gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/env python
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import hpsdnclient.tests.data as test_data
import hpsdnclient.datatypes as datatypes
class JsonObjectTests(unittest.TestCase):
""" Tests the JsonObject Class """
def setUp(self):
self.json_object = datatypes.JsonObject()
self.json_object.a = 0
self.json_object.b = [1, 2, 3, 4]
self.json_object.c = {"d": 5, "e": "six", "f": [7, "eight", 9]}
metric_app = datatypes.JsonObjectFactory.create('MetricApp',
test_data.METRIC_APP)
self.json_object.metric_app = metric_app
self.string = ('{\n'
' "a": 0,\n'
' "b": [\n'
' 1,\n'
' 2,\n'
' 3,\n'
' 4\n'
' ],\n'
' "c": {\n'
' "d": 5,\n'
' "e": "six",\n'
' "f": [\n'
' 7,\n'
' "eight",\n'
' 9\n'
' ]\n'
' },\n'
' "metric_app": {\n'
' "app_id": "com.hp.sdn.cloud",\n'
' "app_name": "HP VAN SDN Cloud Controller"\n'
' }\n'
'}')
def test_to_json_string(self):
result = self.json_object.to_json_string()
expected = self.string
self.assertEquals(result, expected)
def test_to_dict(self):
result = self.json_object.to_dict()
expected = {"a": 0,
"b": [1, 2, 3, 4],
"c": {"d": 5, "e": "six", "f": [7, "eight", 9]},
"metric_app": {
"app_id": "com.hp.sdn.cloud",
"app_name": "HP VAN SDN Cloud Controller",
}
}
self.assertEquals(result, expected)
# Omitted test case for test_factory....
#factory method is tested by the child classes in the suite below
class FactoryTests(unittest.TestCase):
""" Tests the JsonObjectFactory """
def _test_type(self, data, datatype):
""" Tests that the provided data is cast to the correct class.
If attributes within the class are also mapped to Python objects,
these are also checked """
type_name = datatype.__name__
obj = datatypes.JsonObjectFactory.create(type_name, data)
self.assertTrue(isinstance(obj, datatype))
try:
class_map = datatypes.CLASS_MAP[type_name]
for key in class_map:
if eval('obj.%s' % key) is None:
continue
else:
attribute = eval('obj.%s' % key)
if type(attribute) is None:
break
elif type(attribute) == list:
for item in attribute:
cls = eval('datatypes.%s' % class_map[key])
self.assertTrue(isinstance(item, cls))
else:
cls = eval('datatypes.%s' % class_map[key])
self.assertTrue(isinstance(attribute, cls))
except KeyError:
pass
return obj
def test_add_factory(self):
datatypes.JsonObjectFactory.add_factory('Datapath', datatypes.Datapath)
self.assertIn('Datapath', datatypes.JsonObjectFactory.factories)
self.assertEquals(datatypes.JsonObjectFactory.factories['Datapath'],
datatypes.Datapath)
def test_factory_create(self):
obj = self._test_type(test_data.SYSTEM, datatypes.System)
self.assertIn('self_', dir(obj))
self.assertIn('System', datatypes.JsonObjectFactory.factories)
def test_create_license(self):
self._test_type(test_data.LICENSE, datatypes.License)
def test_create_app(self):
self._test_type(test_data.APP, datatypes.App)
def test_create_app_health(self):
self._test_type(test_data.APP_HEALTH, datatypes.AppHealth)
def test_create_audit_log(self):
self._test_type(test_data.AUDIT_LOG, datatypes.AuditLogEntry)
def test_create_system(self):
self._test_type(test_data.SYSTEM, datatypes.System)
def test_create_region(self):
self._test_type(test_data.REGION, datatypes.Region)
def test_create_team(self):
self._test_type(test_data.TEAM, datatypes.Team)
def test_create_alert(self):
self._test_type(test_data.ALERT, datatypes.Alert)
def test_create_alert_topic(self):
self._test_type(test_data.ALERT_TOPIC, datatypes.AlertTopic)
def test_create_alert_topic_listener(self):
self._test_type(test_data.ALERT_TOPIC_LISTENER,
datatypes.AlertTopicListener)
def test_create_metric_app(self):
self._test_type(test_data.METRIC_APP, datatypes.MetricApp)
def test_create_metric(self):
self._test_type(test_data.METRIC, datatypes.Metric)
def test_create_metric_values(self):
self._test_type(test_data.METRIC_VALUES, datatypes.MetricValues)
def test_create_controller_stats(self):
self._test_type(test_data.CONTROLLER_STATS, datatypes.ControllerStats)
def test_create_stats(self):
self._test_type(test_data.STATS, datatypes.Stats)
def test_create_port_stats(self):
self._test_type(test_data.PORT_STATS, datatypes.PortStats)
def test_create_group_stats(self):
self._test_type(test_data.GROUP_STATS, datatypes.GroupStats)
def test_create_meter_stats(self):
self._test_type(test_data.METER_STATS, datatypes.MeterStats)
def test_create_datapath(self):
self._test_type(test_data.DATAPATH, datatypes.Datapath)
def test_create_meter_features(self):
self._test_type(test_data.METER_FEATURES, datatypes.MeterFeatures)
def test_create_group_features(self):
self._test_type(test_data.GROUP_FEATURES, datatypes.GroupFeatures)
def test_create_port(self):
self._test_type(test_data.PORT, datatypes.Port)
def test_create_meter(self):
self._test_type(test_data.METER, datatypes.Meter)
def test_create_group(self):
self._test_type(test_data.GROUP, datatypes.Group)
def test_create_flow(self):
obj = self._test_type(test_data.FLOW, datatypes.Flow)
self.assertEquals(obj.actions.output, 2)
def test_create_flow_multiple_action(self):
obj = self._test_type(test_data.FLOW_MA, datatypes.Flow)
self.assertEquals(obj.actions.output, [1,2,3])
def test_create_cluster(self):
self._test_type(test_data.CLUSTER, datatypes.Cluster)
def test_create_link(self):
self._test_type(test_data.LINK, datatypes.Link)
def test_create_path(self):
self._test_type(test_data.PATH, datatypes.Path)
def test_create_node(self):
self._test_type(test_data.NODE, datatypes.Node)
def test_create_lldp(self):
self._test_type(test_data.LLDP, datatypes.LldpProperties)
def test_create_observation(self):
self._test_type(test_data.OBSERVATION, datatypes.Observation)
def test_create_packet(self):
self._test_type(test_data.PACKET, datatypes.Packet)
def test_create_next_hop(self):
self._test_type(test_data.NEXT_HOP, datatypes.NextHop)
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.speech_v1p1beta1.types import resource
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.protobuf import wrappers_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.speech.v1p1beta1",
manifest={
"RecognizeRequest",
"LongRunningRecognizeRequest",
"TranscriptOutputConfig",
"StreamingRecognizeRequest",
"StreamingRecognitionConfig",
"RecognitionConfig",
"SpeakerDiarizationConfig",
"RecognitionMetadata",
"SpeechContext",
"RecognitionAudio",
"RecognizeResponse",
"LongRunningRecognizeResponse",
"LongRunningRecognizeMetadata",
"StreamingRecognizeResponse",
"StreamingRecognitionResult",
"SpeechRecognitionResult",
"SpeechRecognitionAlternative",
"WordInfo",
},
)
class RecognizeRequest(proto.Message):
r"""The top-level message sent by the client for the ``Recognize``
method.
Attributes:
config (google.cloud.speech_v1p1beta1.types.RecognitionConfig):
Required. Provides information to the
recognizer that specifies how to process the
request.
audio (google.cloud.speech_v1p1beta1.types.RecognitionAudio):
Required. The audio data to be recognized.
"""
config = proto.Field(proto.MESSAGE, number=1, message="RecognitionConfig",)
audio = proto.Field(proto.MESSAGE, number=2, message="RecognitionAudio",)
class LongRunningRecognizeRequest(proto.Message):
r"""The top-level message sent by the client for the
``LongRunningRecognize`` method.
Attributes:
config (google.cloud.speech_v1p1beta1.types.RecognitionConfig):
Required. Provides information to the
recognizer that specifies how to process the
request.
audio (google.cloud.speech_v1p1beta1.types.RecognitionAudio):
Required. The audio data to be recognized.
output_config (google.cloud.speech_v1p1beta1.types.TranscriptOutputConfig):
Optional. Specifies an optional destination
for the recognition results.
"""
config = proto.Field(proto.MESSAGE, number=1, message="RecognitionConfig",)
audio = proto.Field(proto.MESSAGE, number=2, message="RecognitionAudio",)
output_config = proto.Field(
proto.MESSAGE, number=4, message="TranscriptOutputConfig",
)
class TranscriptOutputConfig(proto.Message):
r"""Specifies an optional destination for the recognition
results.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
gcs_uri (str):
Specifies a Cloud Storage URI for the recognition results.
Must be specified in the format:
``gs://bucket_name/object_name``, and the bucket must
already exist.
This field is a member of `oneof`_ ``output_type``.
"""
gcs_uri = proto.Field(proto.STRING, number=1, oneof="output_type",)
class StreamingRecognizeRequest(proto.Message):
r"""The top-level message sent by the client for the
``StreamingRecognize`` method. Multiple
``StreamingRecognizeRequest`` messages are sent. The first message
must contain a ``streaming_config`` message and must not contain
``audio_content``. All subsequent messages must contain
``audio_content`` and must not contain a ``streaming_config``
message.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
streaming_config (google.cloud.speech_v1p1beta1.types.StreamingRecognitionConfig):
Provides information to the recognizer that specifies how to
process the request. The first ``StreamingRecognizeRequest``
message must contain a ``streaming_config`` message.
This field is a member of `oneof`_ ``streaming_request``.
audio_content (bytes):
The audio data to be recognized. Sequential chunks of audio
data are sent in sequential ``StreamingRecognizeRequest``
messages. The first ``StreamingRecognizeRequest`` message
must not contain ``audio_content`` data and all subsequent
``StreamingRecognizeRequest`` messages must contain
``audio_content`` data. The audio bytes must be encoded as
specified in ``RecognitionConfig``. Note: as with all bytes
fields, proto buffers use a pure binary representation (not
base64). See `content
limits <https://cloud.google.com/speech-to-text/quotas#content>`__.
This field is a member of `oneof`_ ``streaming_request``.
"""
streaming_config = proto.Field(
proto.MESSAGE,
number=1,
oneof="streaming_request",
message="StreamingRecognitionConfig",
)
audio_content = proto.Field(proto.BYTES, number=2, oneof="streaming_request",)
class StreamingRecognitionConfig(proto.Message):
r"""Provides information to the recognizer that specifies how to
process the request.
Attributes:
config (google.cloud.speech_v1p1beta1.types.RecognitionConfig):
Required. Provides information to the
recognizer that specifies how to process the
request.
single_utterance (bool):
If ``false`` or omitted, the recognizer will perform
continuous recognition (continuing to wait for and process
audio even if the user pauses speaking) until the client
closes the input stream (gRPC API) or until the maximum time
limit has been reached. May return multiple
``StreamingRecognitionResult``\ s with the ``is_final`` flag
set to ``true``.
If ``true``, the recognizer will detect a single spoken
utterance. When it detects that the user has paused or
stopped speaking, it will return an
``END_OF_SINGLE_UTTERANCE`` event and cease recognition. It
will return no more than one ``StreamingRecognitionResult``
with the ``is_final`` flag set to ``true``.
The ``single_utterance`` field can only be used with
specified models, otherwise an error is thrown. The
``model`` field in [``RecognitionConfig``][] must be set to:
- ``command_and_search``
- ``phone_call`` AND additional field
``useEnhanced``\ =\ ``true``
- The ``model`` field is left undefined. In this case the
API auto-selects a model based on any other parameters
that you set in ``RecognitionConfig``.
interim_results (bool):
If ``true``, interim results (tentative hypotheses) may be
returned as they become available (these interim results are
indicated with the ``is_final=false`` flag). If ``false`` or
omitted, only ``is_final=true`` result(s) are returned.
"""
config = proto.Field(proto.MESSAGE, number=1, message="RecognitionConfig",)
single_utterance = proto.Field(proto.BOOL, number=2,)
interim_results = proto.Field(proto.BOOL, number=3,)
class RecognitionConfig(proto.Message):
r"""Provides information to the recognizer that specifies how to
process the request.
Attributes:
encoding (google.cloud.speech_v1p1beta1.types.RecognitionConfig.AudioEncoding):
Encoding of audio data sent in all ``RecognitionAudio``
messages. This field is optional for ``FLAC`` and ``WAV``
audio files and required for all other audio formats. For
details, see
[AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
sample_rate_hertz (int):
Sample rate in Hertz of the audio data sent in all
``RecognitionAudio`` messages. Valid values are: 8000-48000.
16000 is optimal. For best results, set the sampling rate of
the audio source to 16000 Hz. If that's not possible, use
the native sample rate of the audio source (instead of
re-sampling). This field is optional for FLAC and WAV audio
files, but is required for all other audio formats. For
details, see
[AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
audio_channel_count (int):
The number of channels in the input audio data. ONLY set
this for MULTI-CHANNEL recognition. Valid values for
LINEAR16 and FLAC are ``1``-``8``. Valid values for OGG_OPUS
are '1'-'254'. Valid value for MULAW, AMR, AMR_WB and
SPEEX_WITH_HEADER_BYTE is only ``1``. If ``0`` or omitted,
defaults to one channel (mono). Note: We only recognize the
first channel by default. To perform independent recognition
on each channel set
``enable_separate_recognition_per_channel`` to 'true'.
enable_separate_recognition_per_channel (bool):
This needs to be set to ``true`` explicitly and
``audio_channel_count`` > 1 to get each channel recognized
separately. The recognition result will contain a
``channel_tag`` field to state which channel that result
belongs to. If this is not true, we will only recognize the
first channel. The request is billed cumulatively for all
channels recognized: ``audio_channel_count`` multiplied by
the length of the audio.
language_code (str):
Required. The language of the supplied audio as a
`BCP-47 <https://www.rfc-editor.org/rfc/bcp/bcp47.txt>`__
language tag. Example: "en-US". See `Language
Support <https://cloud.google.com/speech-to-text/docs/languages>`__
for a list of the currently supported language codes.
alternative_language_codes (Sequence[str]):
A list of up to 3 additional
`BCP-47 <https://www.rfc-editor.org/rfc/bcp/bcp47.txt>`__
language tags, listing possible alternative languages of the
supplied audio. See `Language
Support <https://cloud.google.com/speech-to-text/docs/languages>`__
for a list of the currently supported language codes. If
alternative languages are listed, recognition result will
contain recognition in the most likely language detected
including the main language_code. The recognition result
will include the language tag of the language detected in
the audio. Note: This feature is only supported for Voice
Command and Voice Search use cases and performance may vary
for other use cases (e.g., phone call transcription).
max_alternatives (int):
Maximum number of recognition hypotheses to be returned.
Specifically, the maximum number of
``SpeechRecognitionAlternative`` messages within each
``SpeechRecognitionResult``. The server may return fewer
than ``max_alternatives``. Valid values are ``0``-``30``. A
value of ``0`` or ``1`` will return a maximum of one. If
omitted, will return a maximum of one.
profanity_filter (bool):
If set to ``true``, the server will attempt to filter out
profanities, replacing all but the initial character in each
filtered word with asterisks, e.g. "f***". If set to
``false`` or omitted, profanities won't be filtered out.
adaptation (google.cloud.speech_v1p1beta1.types.SpeechAdaptation):
Speech adaptation configuration improves the accuracy of
speech recognition. For more information, see the `speech
adaptation <https://cloud.google.com/speech-to-text/docs/adaptation>`__
documentation. When speech adaptation is set it supersedes
the ``speech_contexts`` field.
transcript_normalization (google.cloud.speech_v1p1beta1.types.TranscriptNormalization):
Use transcription normalization to
automatically replace parts of the transcript
with phrases of your choosing. For
StreamingRecognize, this normalization only
applies to stable partial transcripts (stability
> 0.8) and final transcripts.
speech_contexts (Sequence[google.cloud.speech_v1p1beta1.types.SpeechContext]):
Array of
[SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext].
A means to provide context to assist the speech recognition.
For more information, see `speech
adaptation <https://cloud.google.com/speech-to-text/docs/adaptation>`__.
enable_word_time_offsets (bool):
If ``true``, the top result includes a list of words and the
start and end time offsets (timestamps) for those words. If
``false``, no word-level time offset information is
returned. The default is ``false``.
enable_word_confidence (bool):
If ``true``, the top result includes a list of words and the
confidence for those words. If ``false``, no word-level
confidence information is returned. The default is
``false``.
enable_automatic_punctuation (bool):
If 'true', adds punctuation to recognition
result hypotheses. This feature is only
available in select languages. Setting this for
requests in other languages has no effect at
all. The default 'false' value does not add
punctuation to result hypotheses.
enable_spoken_punctuation (google.protobuf.wrappers_pb2.BoolValue):
The spoken punctuation behavior for the call If not set,
uses default behavior based on model of choice e.g.
command_and_search will enable spoken punctuation by default
If 'true', replaces spoken punctuation with the
corresponding symbols in the request. For example, "how are
you question mark" becomes "how are you?". See
https://cloud.google.com/speech-to-text/docs/spoken-punctuation
for support. If 'false', spoken punctuation is not replaced.
enable_spoken_emojis (google.protobuf.wrappers_pb2.BoolValue):
The spoken emoji behavior for the call
If not set, uses default behavior based on model
of choice If 'true', adds spoken emoji
formatting for the request. This will replace
spoken emojis with the corresponding Unicode
symbols in the final transcript. If 'false',
spoken emojis are not replaced.
enable_speaker_diarization (bool):
If 'true', enables speaker detection for each recognized
word in the top alternative of the recognition result using
a speaker_tag provided in the WordInfo. Note: Use
diarization_config instead.
diarization_speaker_count (int):
If set, specifies the estimated number of speakers in the
conversation. Defaults to '2'. Ignored unless
enable_speaker_diarization is set to true. Note: Use
diarization_config instead.
diarization_config (google.cloud.speech_v1p1beta1.types.SpeakerDiarizationConfig):
Config to enable speaker diarization and set
additional parameters to make diarization better
suited for your application. Note: When this is
enabled, we send all the words from the
beginning of the audio for the top alternative
in every consecutive STREAMING responses. This
is done in order to improve our speaker tags as
our models learn to identify the speakers in the
conversation over time. For non-streaming
requests, the diarization results will be
provided only in the top alternative of the
FINAL SpeechRecognitionResult.
metadata (google.cloud.speech_v1p1beta1.types.RecognitionMetadata):
Metadata regarding this request.
model (str):
Which model to select for the given request. Select the
model best suited to your domain to get best results. If a
model is not explicitly specified, then we auto-select a
model based on the parameters in the RecognitionConfig.
.. raw:: html
<table>
<tr>
<td><b>Model</b></td>
<td><b>Description</b></td>
</tr>
<tr>
<td><code>command_and_search</code></td>
<td>Best for short queries such as voice commands or voice search.</td>
</tr>
<tr>
<td><code>phone_call</code></td>
<td>Best for audio that originated from a phone call (typically
recorded at an 8khz sampling rate).</td>
</tr>
<tr>
<td><code>video</code></td>
<td>Best for audio that originated from video or includes multiple
speakers. Ideally the audio is recorded at a 16khz or greater
sampling rate. This is a premium model that costs more than the
standard rate.</td>
</tr>
<tr>
<td><code>default</code></td>
<td>Best for audio that is not one of the specific audio models.
For example, long-form audio. Ideally the audio is high-fidelity,
recorded at a 16khz or greater sampling rate.</td>
</tr>
</table>
use_enhanced (bool):
Set to true to use an enhanced model for speech recognition.
If ``use_enhanced`` is set to true and the ``model`` field
is not set, then an appropriate enhanced model is chosen if
an enhanced model exists for the audio.
If ``use_enhanced`` is true and an enhanced version of the
specified model does not exist, then the speech is
recognized using the standard version of the specified
model.
"""
class AudioEncoding(proto.Enum):
r"""The encoding of the audio data sent in the request.
All encodings support only 1 channel (mono) audio, unless the
``audio_channel_count`` and
``enable_separate_recognition_per_channel`` fields are set.
For best results, the audio source should be captured and
transmitted using a lossless encoding (``FLAC`` or ``LINEAR16``).
The accuracy of the speech recognition can be reduced if lossy
codecs are used to capture or transmit audio, particularly if
background noise is present. Lossy codecs include ``MULAW``,
``AMR``, ``AMR_WB``, ``OGG_OPUS``, ``SPEEX_WITH_HEADER_BYTE``,
``MP3``, and ``WEBM_OPUS``.
The ``FLAC`` and ``WAV`` audio file formats include a header that
describes the included audio content. You can request recognition
for ``WAV`` files that contain either ``LINEAR16`` or ``MULAW``
encoded audio. If you send ``FLAC`` or ``WAV`` audio file format in
your request, you do not need to specify an ``AudioEncoding``; the
audio encoding format is determined from the file header. If you
specify an ``AudioEncoding`` when you send send ``FLAC`` or ``WAV``
audio, the encoding configuration must match the encoding described
in the audio header; otherwise the request returns an
[google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]
error code.
"""
ENCODING_UNSPECIFIED = 0
LINEAR16 = 1
FLAC = 2
MULAW = 3
AMR = 4
AMR_WB = 5
OGG_OPUS = 6
SPEEX_WITH_HEADER_BYTE = 7
MP3 = 8
WEBM_OPUS = 9
encoding = proto.Field(proto.ENUM, number=1, enum=AudioEncoding,)
sample_rate_hertz = proto.Field(proto.INT32, number=2,)
audio_channel_count = proto.Field(proto.INT32, number=7,)
enable_separate_recognition_per_channel = proto.Field(proto.BOOL, number=12,)
language_code = proto.Field(proto.STRING, number=3,)
alternative_language_codes = proto.RepeatedField(proto.STRING, number=18,)
max_alternatives = proto.Field(proto.INT32, number=4,)
profanity_filter = proto.Field(proto.BOOL, number=5,)
adaptation = proto.Field(
proto.MESSAGE, number=20, message=resource.SpeechAdaptation,
)
transcript_normalization = proto.Field(
proto.MESSAGE, number=24, message=resource.TranscriptNormalization,
)
speech_contexts = proto.RepeatedField(
proto.MESSAGE, number=6, message="SpeechContext",
)
enable_word_time_offsets = proto.Field(proto.BOOL, number=8,)
enable_word_confidence = proto.Field(proto.BOOL, number=15,)
enable_automatic_punctuation = proto.Field(proto.BOOL, number=11,)
enable_spoken_punctuation = proto.Field(
proto.MESSAGE, number=22, message=wrappers_pb2.BoolValue,
)
enable_spoken_emojis = proto.Field(
proto.MESSAGE, number=23, message=wrappers_pb2.BoolValue,
)
enable_speaker_diarization = proto.Field(proto.BOOL, number=16,)
diarization_speaker_count = proto.Field(proto.INT32, number=17,)
diarization_config = proto.Field(
proto.MESSAGE, number=19, message="SpeakerDiarizationConfig",
)
metadata = proto.Field(proto.MESSAGE, number=9, message="RecognitionMetadata",)
model = proto.Field(proto.STRING, number=13,)
use_enhanced = proto.Field(proto.BOOL, number=14,)
class SpeakerDiarizationConfig(proto.Message):
r"""Config to enable speaker diarization.
Attributes:
enable_speaker_diarization (bool):
If 'true', enables speaker detection for each recognized
word in the top alternative of the recognition result using
a speaker_tag provided in the WordInfo.
min_speaker_count (int):
Minimum number of speakers in the
conversation. This range gives you more
flexibility by allowing the system to
automatically determine the correct number of
speakers. If not set, the default value is 2.
max_speaker_count (int):
Maximum number of speakers in the
conversation. This range gives you more
flexibility by allowing the system to
automatically determine the correct number of
speakers. If not set, the default value is 6.
speaker_tag (int):
Output only. Unused.
"""
enable_speaker_diarization = proto.Field(proto.BOOL, number=1,)
min_speaker_count = proto.Field(proto.INT32, number=2,)
max_speaker_count = proto.Field(proto.INT32, number=3,)
speaker_tag = proto.Field(proto.INT32, number=5,)
class RecognitionMetadata(proto.Message):
r"""Description of audio data to be recognized.
Attributes:
interaction_type (google.cloud.speech_v1p1beta1.types.RecognitionMetadata.InteractionType):
The use case most closely describing the
audio content to be recognized.
industry_naics_code_of_audio (int):
The industry vertical to which this speech
recognition request most closely applies. This
is most indicative of the topics contained in
the audio. Use the 6-digit NAICS code to
identify the industry vertical - see
https://www.naics.com/search/.
microphone_distance (google.cloud.speech_v1p1beta1.types.RecognitionMetadata.MicrophoneDistance):
The audio type that most closely describes
the audio being recognized.
original_media_type (google.cloud.speech_v1p1beta1.types.RecognitionMetadata.OriginalMediaType):
The original media the speech was recorded
on.
recording_device_type (google.cloud.speech_v1p1beta1.types.RecognitionMetadata.RecordingDeviceType):
The type of device the speech was recorded
with.
recording_device_name (str):
The device used to make the recording.
Examples 'Nexus 5X' or 'Polycom SoundStation IP
6000' or 'POTS' or 'VoIP' or 'Cardioid
Microphone'.
original_mime_type (str):
Mime type of the original audio file. For example
``audio/m4a``, ``audio/x-alaw-basic``, ``audio/mp3``,
``audio/3gpp``. A list of possible audio mime types is
maintained at
http://www.iana.org/assignments/media-types/media-types.xhtml#audio
obfuscated_id (int):
Obfuscated (privacy-protected) ID of the
user, to identify number of unique users using
the service.
audio_topic (str):
Description of the content. Eg. "Recordings
of federal supreme court hearings from 2012".
"""
class InteractionType(proto.Enum):
r"""Use case categories that the audio recognition request can be
described by.
"""
INTERACTION_TYPE_UNSPECIFIED = 0
DISCUSSION = 1
PRESENTATION = 2
PHONE_CALL = 3
VOICEMAIL = 4
PROFESSIONALLY_PRODUCED = 5
VOICE_SEARCH = 6
VOICE_COMMAND = 7
DICTATION = 8
class MicrophoneDistance(proto.Enum):
r"""Enumerates the types of capture settings describing an audio
file.
"""
MICROPHONE_DISTANCE_UNSPECIFIED = 0
NEARFIELD = 1
MIDFIELD = 2
FARFIELD = 3
class OriginalMediaType(proto.Enum):
r"""The original media the speech was recorded on."""
ORIGINAL_MEDIA_TYPE_UNSPECIFIED = 0
AUDIO = 1
VIDEO = 2
class RecordingDeviceType(proto.Enum):
r"""The type of device the speech was recorded with."""
RECORDING_DEVICE_TYPE_UNSPECIFIED = 0
SMARTPHONE = 1
PC = 2
PHONE_LINE = 3
VEHICLE = 4
OTHER_OUTDOOR_DEVICE = 5
OTHER_INDOOR_DEVICE = 6
interaction_type = proto.Field(proto.ENUM, number=1, enum=InteractionType,)
industry_naics_code_of_audio = proto.Field(proto.UINT32, number=3,)
microphone_distance = proto.Field(proto.ENUM, number=4, enum=MicrophoneDistance,)
original_media_type = proto.Field(proto.ENUM, number=5, enum=OriginalMediaType,)
recording_device_type = proto.Field(proto.ENUM, number=6, enum=RecordingDeviceType,)
recording_device_name = proto.Field(proto.STRING, number=7,)
original_mime_type = proto.Field(proto.STRING, number=8,)
obfuscated_id = proto.Field(proto.INT64, number=9,)
audio_topic = proto.Field(proto.STRING, number=10,)
class SpeechContext(proto.Message):
r"""Provides "hints" to the speech recognizer to favor specific
words and phrases in the results.
Attributes:
phrases (Sequence[str]):
A list of strings containing words and phrases "hints" so
that the speech recognition is more likely to recognize
them. This can be used to improve the accuracy for specific
words and phrases, for example, if specific commands are
typically spoken by the user. This can also be used to add
additional words to the vocabulary of the recognizer. See
`usage
limits <https://cloud.google.com/speech-to-text/quotas#content>`__.
List items can also be set to classes for groups of words
that represent common concepts that occur in natural
language. For example, rather than providing phrase hints
for every month of the year, using the $MONTH class improves
the likelihood of correctly transcribing audio that includes
months.
boost (float):
Hint Boost. Positive value will increase the probability
that a specific phrase will be recognized over other similar
sounding phrases. The higher the boost, the higher the
chance of false positive recognition as well. Negative boost
values would correspond to anti-biasing. Anti-biasing is not
enabled, so negative boost will simply be ignored. Though
``boost`` can accept a wide range of positive values, most
use cases are best served with values between 0 and 20. We
recommend using a binary search approach to finding the
optimal value for your use case.
"""
phrases = proto.RepeatedField(proto.STRING, number=1,)
boost = proto.Field(proto.FLOAT, number=4,)
class RecognitionAudio(proto.Message):
r"""Contains audio data in the encoding specified in the
``RecognitionConfig``. Either ``content`` or ``uri`` must be
supplied. Supplying both or neither returns
[google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
See `content
limits <https://cloud.google.com/speech-to-text/quotas#content>`__.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
content (bytes):
The audio data bytes encoded as specified in
``RecognitionConfig``. Note: as with all bytes fields, proto
buffers use a pure binary representation, whereas JSON
representations use base64.
This field is a member of `oneof`_ ``audio_source``.
uri (str):
URI that points to a file that contains audio data bytes as
specified in ``RecognitionConfig``. The file must not be
compressed (for example, gzip). Currently, only Google Cloud
Storage URIs are supported, which must be specified in the
following format: ``gs://bucket_name/object_name`` (other
URI formats return
[google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]).
For more information, see `Request
URIs <https://cloud.google.com/storage/docs/reference-uris>`__.
This field is a member of `oneof`_ ``audio_source``.
"""
content = proto.Field(proto.BYTES, number=1, oneof="audio_source",)
uri = proto.Field(proto.STRING, number=2, oneof="audio_source",)
class RecognizeResponse(proto.Message):
r"""The only message returned to the client by the ``Recognize`` method.
It contains the result as zero or more sequential
``SpeechRecognitionResult`` messages.
Attributes:
results (Sequence[google.cloud.speech_v1p1beta1.types.SpeechRecognitionResult]):
Sequential list of transcription results
corresponding to sequential portions of audio.
total_billed_time (google.protobuf.duration_pb2.Duration):
When available, billed audio seconds for the
corresponding request.
"""
results = proto.RepeatedField(
proto.MESSAGE, number=2, message="SpeechRecognitionResult",
)
total_billed_time = proto.Field(
proto.MESSAGE, number=3, message=duration_pb2.Duration,
)
class LongRunningRecognizeResponse(proto.Message):
r"""The only message returned to the client by the
``LongRunningRecognize`` method. It contains the result as zero or
more sequential ``SpeechRecognitionResult`` messages. It is included
in the ``result.response`` field of the ``Operation`` returned by
the ``GetOperation`` call of the ``google::longrunning::Operations``
service.
Attributes:
results (Sequence[google.cloud.speech_v1p1beta1.types.SpeechRecognitionResult]):
Sequential list of transcription results
corresponding to sequential portions of audio.
total_billed_time (google.protobuf.duration_pb2.Duration):
When available, billed audio seconds for the
corresponding request.
output_config (google.cloud.speech_v1p1beta1.types.TranscriptOutputConfig):
Original output config if present in the
request.
output_error (google.rpc.status_pb2.Status):
If the transcript output fails this field
contains the relevant error.
"""
results = proto.RepeatedField(
proto.MESSAGE, number=2, message="SpeechRecognitionResult",
)
total_billed_time = proto.Field(
proto.MESSAGE, number=3, message=duration_pb2.Duration,
)
output_config = proto.Field(
proto.MESSAGE, number=6, message="TranscriptOutputConfig",
)
output_error = proto.Field(proto.MESSAGE, number=7, message=status_pb2.Status,)
class LongRunningRecognizeMetadata(proto.Message):
r"""Describes the progress of a long-running ``LongRunningRecognize``
call. It is included in the ``metadata`` field of the ``Operation``
returned by the ``GetOperation`` call of the
``google::longrunning::Operations`` service.
Attributes:
progress_percent (int):
Approximate percentage of audio processed
thus far. Guaranteed to be 100 when the audio is
fully processed and the results are available.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Time when the request was received.
last_update_time (google.protobuf.timestamp_pb2.Timestamp):
Time of the most recent processing update.
uri (str):
Output only. The URI of the audio file being
transcribed. Empty if the audio was sent as byte
content.
output_config (google.cloud.speech_v1p1beta1.types.TranscriptOutputConfig):
Output only. A copy of the
TranscriptOutputConfig if it was set in the
request.
"""
progress_percent = proto.Field(proto.INT32, number=1,)
start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
last_update_time = proto.Field(
proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,
)
uri = proto.Field(proto.STRING, number=4,)
output_config = proto.Field(
proto.MESSAGE, number=5, message="TranscriptOutputConfig",
)
class StreamingRecognizeResponse(proto.Message):
r"""``StreamingRecognizeResponse`` is the only message returned to the
client by ``StreamingRecognize``. A series of zero or more
``StreamingRecognizeResponse`` messages are streamed back to the
client. If there is no recognizable audio, and ``single_utterance``
is set to false, then no messages are streamed back to the client.
Here's an example of a series of ``StreamingRecognizeResponse``\ s
that might be returned while processing audio:
1. results { alternatives { transcript: "tube" } stability: 0.01 }
2. results { alternatives { transcript: "to be a" } stability: 0.01
}
3. results { alternatives { transcript: "to be" } stability: 0.9 }
results { alternatives { transcript: " or not to be" } stability:
0.01 }
4. results { alternatives { transcript: "to be or not to be"
confidence: 0.92 } alternatives { transcript: "to bee or not to
bee" } is_final: true }
5. results { alternatives { transcript: " that's" } stability: 0.01
}
6. results { alternatives { transcript: " that is" } stability: 0.9
} results { alternatives { transcript: " the question" }
stability: 0.01 }
7. results { alternatives { transcript: " that is the question"
confidence: 0.98 } alternatives { transcript: " that was the
question" } is_final: true }
Notes:
- Only two of the above responses #4 and #7 contain final results;
they are indicated by ``is_final: true``. Concatenating these
together generates the full transcript: "to be or not to be that
is the question".
- The others contain interim ``results``. #3 and #6 contain two
interim ``results``: the first portion has a high stability and
is less likely to change; the second portion has a low stability
and is very likely to change. A UI designer might choose to show
only high stability ``results``.
- The specific ``stability`` and ``confidence`` values shown above
are only for illustrative purposes. Actual values may vary.
- In each response, only one of these fields will be set:
``error``, ``speech_event_type``, or one or more (repeated)
``results``.
Attributes:
error (google.rpc.status_pb2.Status):
If set, returns a [google.rpc.Status][google.rpc.Status]
message that specifies the error for the operation.
results (Sequence[google.cloud.speech_v1p1beta1.types.StreamingRecognitionResult]):
This repeated list contains zero or more results that
correspond to consecutive portions of the audio currently
being processed. It contains zero or one ``is_final=true``
result (the newly settled portion), followed by zero or more
``is_final=false`` results (the interim results).
speech_event_type (google.cloud.speech_v1p1beta1.types.StreamingRecognizeResponse.SpeechEventType):
Indicates the type of speech event.
total_billed_time (google.protobuf.duration_pb2.Duration):
When available, billed audio seconds for the
stream. Set only if this is the last response in
the stream.
"""
class SpeechEventType(proto.Enum):
r"""Indicates the type of speech event."""
SPEECH_EVENT_UNSPECIFIED = 0
END_OF_SINGLE_UTTERANCE = 1
error = proto.Field(proto.MESSAGE, number=1, message=status_pb2.Status,)
results = proto.RepeatedField(
proto.MESSAGE, number=2, message="StreamingRecognitionResult",
)
speech_event_type = proto.Field(proto.ENUM, number=4, enum=SpeechEventType,)
total_billed_time = proto.Field(
proto.MESSAGE, number=5, message=duration_pb2.Duration,
)
class StreamingRecognitionResult(proto.Message):
r"""A streaming speech recognition result corresponding to a
portion of the audio that is currently being processed.
Attributes:
alternatives (Sequence[google.cloud.speech_v1p1beta1.types.SpeechRecognitionAlternative]):
May contain one or more recognition hypotheses (up to the
maximum specified in ``max_alternatives``). These
alternatives are ordered in terms of accuracy, with the top
(first) alternative being the most probable, as ranked by
the recognizer.
is_final (bool):
If ``false``, this ``StreamingRecognitionResult`` represents
an interim result that may change. If ``true``, this is the
final time the speech service will return this particular
``StreamingRecognitionResult``, the recognizer will not
return any further hypotheses for this portion of the
transcript and corresponding audio.
stability (float):
An estimate of the likelihood that the recognizer will not
change its guess about this interim result. Values range
from 0.0 (completely unstable) to 1.0 (completely stable).
This field is only provided for interim results
(``is_final=false``). The default of 0.0 is a sentinel value
indicating ``stability`` was not set.
result_end_time (google.protobuf.duration_pb2.Duration):
Time offset of the end of this result
relative to the beginning of the audio.
channel_tag (int):
For multi-channel audio, this is the channel number
corresponding to the recognized result for the audio from
that channel. For audio_channel_count = N, its output values
can range from '1' to 'N'.
language_code (str):
Output only. The
`BCP-47 <https://www.rfc-editor.org/rfc/bcp/bcp47.txt>`__
language tag of the language in this result. This language
code was detected to have the most likelihood of being
spoken in the audio.
"""
alternatives = proto.RepeatedField(
proto.MESSAGE, number=1, message="SpeechRecognitionAlternative",
)
is_final = proto.Field(proto.BOOL, number=2,)
stability = proto.Field(proto.FLOAT, number=3,)
result_end_time = proto.Field(
proto.MESSAGE, number=4, message=duration_pb2.Duration,
)
channel_tag = proto.Field(proto.INT32, number=5,)
language_code = proto.Field(proto.STRING, number=6,)
class SpeechRecognitionResult(proto.Message):
r"""A speech recognition result corresponding to a portion of the
audio.
Attributes:
alternatives (Sequence[google.cloud.speech_v1p1beta1.types.SpeechRecognitionAlternative]):
May contain one or more recognition hypotheses (up to the
maximum specified in ``max_alternatives``). These
alternatives are ordered in terms of accuracy, with the top
(first) alternative being the most probable, as ranked by
the recognizer.
channel_tag (int):
For multi-channel audio, this is the channel number
corresponding to the recognized result for the audio from
that channel. For audio_channel_count = N, its output values
can range from '1' to 'N'.
result_end_time (google.protobuf.duration_pb2.Duration):
Time offset of the end of this result
relative to the beginning of the audio.
language_code (str):
Output only. The
`BCP-47 <https://www.rfc-editor.org/rfc/bcp/bcp47.txt>`__
language tag of the language in this result. This language
code was detected to have the most likelihood of being
spoken in the audio.
"""
alternatives = proto.RepeatedField(
proto.MESSAGE, number=1, message="SpeechRecognitionAlternative",
)
channel_tag = proto.Field(proto.INT32, number=2,)
result_end_time = proto.Field(
proto.MESSAGE, number=4, message=duration_pb2.Duration,
)
language_code = proto.Field(proto.STRING, number=5,)
class SpeechRecognitionAlternative(proto.Message):
r"""Alternative hypotheses (a.k.a. n-best list).
Attributes:
transcript (str):
Transcript text representing the words that
the user spoke.
confidence (float):
The confidence estimate between 0.0 and 1.0. A higher number
indicates an estimated greater likelihood that the
recognized words are correct. This field is set only for the
top alternative of a non-streaming result or, of a streaming
result where ``is_final=true``. This field is not guaranteed
to be accurate and users should not rely on it to be always
provided. The default of 0.0 is a sentinel value indicating
``confidence`` was not set.
words (Sequence[google.cloud.speech_v1p1beta1.types.WordInfo]):
A list of word-specific information for each recognized
word. Note: When ``enable_speaker_diarization`` is true, you
will see all the words from the beginning of the audio.
"""
transcript = proto.Field(proto.STRING, number=1,)
confidence = proto.Field(proto.FLOAT, number=2,)
words = proto.RepeatedField(proto.MESSAGE, number=3, message="WordInfo",)
class WordInfo(proto.Message):
r"""Word-specific information for recognized words.
Attributes:
start_time (google.protobuf.duration_pb2.Duration):
Time offset relative to the beginning of the audio, and
corresponding to the start of the spoken word. This field is
only set if ``enable_word_time_offsets=true`` and only in
the top hypothesis. This is an experimental feature and the
accuracy of the time offset can vary.
end_time (google.protobuf.duration_pb2.Duration):
Time offset relative to the beginning of the audio, and
corresponding to the end of the spoken word. This field is
only set if ``enable_word_time_offsets=true`` and only in
the top hypothesis. This is an experimental feature and the
accuracy of the time offset can vary.
word (str):
The word corresponding to this set of
information.
confidence (float):
The confidence estimate between 0.0 and 1.0. A higher number
indicates an estimated greater likelihood that the
recognized words are correct. This field is set only for the
top alternative of a non-streaming result or, of a streaming
result where ``is_final=true``. This field is not guaranteed
to be accurate and users should not rely on it to be always
provided. The default of 0.0 is a sentinel value indicating
``confidence`` was not set.
speaker_tag (int):
Output only. A distinct integer value is assigned for every
speaker within the audio. This field specifies which one of
those speakers was detected to have spoken this word. Value
ranges from '1' to diarization_speaker_count. speaker_tag is
set if enable_speaker_diarization = 'true' and only in the
top alternative.
"""
start_time = proto.Field(proto.MESSAGE, number=1, message=duration_pb2.Duration,)
end_time = proto.Field(proto.MESSAGE, number=2, message=duration_pb2.Duration,)
word = proto.Field(proto.STRING, number=3,)
confidence = proto.Field(proto.FLOAT, number=4,)
speaker_tag = proto.Field(proto.INT32, number=5,)
__all__ = tuple(sorted(__protobuf__.manifest))
| |
"""
Query subclasses which provide extra functionality beyond simple data retrieval.
"""
from django.core.exceptions import FieldError
from django.db import connections
from django.db.models.query_utils import Q
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, NO_RESULTS,
)
from django.db.models.sql.query import Query
from django.utils import six
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'AggregateQuery']
class DeleteQuery(Query):
"""
Delete queries are done through this class, since they are more constrained
than general queries.
"""
compiler = 'SQLDeleteCompiler'
def do_query(self, table, where, using):
self.tables = [table]
self.where = where
cursor = self.get_compiler(using).execute_sql(CURSOR)
return cursor.rowcount if cursor else 0
def delete_batch(self, pk_list, using, field=None):
"""
Set up and execute delete queries for all the objects in pk_list.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
# number of objects deleted
num_deleted = 0
if not field:
field = self.get_meta().pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(
**{field.attname + '__in': pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]}))
num_deleted += self.do_query(self.get_meta().db_table, self.where, using=using)
return num_deleted
def delete_qs(self, query, using):
"""
Delete the queryset in one SQL query (if possible). For simple queries
this is done by copying the query.query.where to self.query, for
complex queries by using subquery.
"""
innerq = query.query
# Make sure the inner query has at least one table in use.
innerq.get_initial_alias()
# The same for our new query.
self.get_initial_alias()
innerq_used_tables = [t for t in innerq.tables
if innerq.alias_refcount[t]]
if not innerq_used_tables or innerq_used_tables == self.tables:
# There is only the base table in use in the query.
self.where = innerq.where
else:
pk = query.model._meta.pk
if not connections[using].features.update_can_self_select:
# We can't do the delete using subquery.
values = list(query.values_list('pk', flat=True))
if not values:
return 0
return self.delete_batch(values, using)
else:
innerq.clear_select_clause()
innerq.select = [
pk.get_col(self.get_initial_alias())
]
values = innerq
self.where = self.where_class()
self.add_q(Q(pk__in=values))
cursor = self.get_compiler(using).execute_sql(CURSOR)
return cursor.rowcount if cursor else 0
class UpdateQuery(Query):
"""
Represents an "update" SQL query.
"""
compiler = 'SQLUpdateCompiler'
def __init__(self, *args, **kwargs):
super(UpdateQuery, self).__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Runs on initialization and after cloning. Any attributes that would
normally be set in __init__ should go in here, instead, so that they
are also set up after a clone() call.
"""
self.values = []
self.related_ids = None
if not hasattr(self, 'related_updates'):
self.related_updates = {}
def clone(self, klass=None, **kwargs):
return super(UpdateQuery, self).clone(klass,
related_updates=self.related_updates.copy(), **kwargs)
def update_batch(self, pk_list, values, using):
self.add_update_values(values)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(pk__in=pk_list[offset: offset + GET_ITERATOR_CHUNK_SIZE]))
self.get_compiler(using).execute_sql(NO_RESULTS)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in six.iteritems(values):
field = self.get_meta().get_field(name)
direct = not (field.auto_created and not field.concrete) or not field.concrete
model = field.model._meta.concrete_model
if not direct or (field.is_relation and field.many_to_many):
raise FieldError(
'Cannot update model field %r (only non-relations and '
'foreign keys permitted).' % field
)
if model is not self.get_meta().model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Append a sequence of (field, model, value) triples to the internal list
that will be used to generate the UPDATE query. Might be more usefully
called add_update_targets() to hint at the extra information here.
"""
self.values.extend(values_seq)
def add_related_update(self, model, field, value):
"""
Adds (name, value) to an update query for an ancestor model.
Updates are coalesced so that we only run one update query per ancestor.
"""
self.related_updates.setdefault(model, []).append((field, None, value))
def get_related_updates(self):
"""
Returns a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in six.iteritems(self.related_updates):
query = UpdateQuery(model)
query.values = values
if self.related_ids is not None:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
compiler = 'SQLInsertCompiler'
def __init__(self, *args, **kwargs):
super(InsertQuery, self).__init__(*args, **kwargs)
self.fields = []
self.objs = []
def clone(self, klass=None, **kwargs):
extras = {
'fields': self.fields[:],
'objs': self.objs[:],
'raw': self.raw,
}
extras.update(kwargs)
return super(InsertQuery, self).clone(klass, **extras)
def insert_values(self, fields, objs, raw=False):
"""
Set up the insert query from the 'insert_values' dictionary. The
dictionary gives the model field names and their target values.
If 'raw_values' is True, the values in the 'insert_values' dictionary
are inserted directly into the query, rather than passed as SQL
parameters. This provides a way to insert NULL and DEFAULT keywords
into the query, for example.
"""
self.fields = fields
self.objs = objs
self.raw = raw
class AggregateQuery(Query):
"""
An AggregateQuery takes another query as a parameter to the FROM
clause and only selects the elements in the provided list.
"""
compiler = 'SQLAggregateCompiler'
def add_subquery(self, query, using):
self.subquery, self.sub_params = query.get_compiler(using).as_sql(
with_col_aliases=True,
subquery=True,
)
| |
# LATITUDE is NORTH-SOUTH e.g. 33 deg SOUTH (Sydney)
# LONGITUDE is EAST-WEST e.g. 151 deg EAST (Sydney)
from optparse import OptionParser
import datetime, math, sqlite3, sys
import networkx
#http://nodedangles.wordpress.com/2010/05/16/measuring-distance-from-a-point-to-a-line-segment/
# Options
debug = None
dbConn = None
def getDbConn():
global dbConn
if dbConn == None:
dbConn = sqlite3.connect('../data/visual-commute.db')
dbConn.row_factory = sqlite3.Row
return dbConn
def lineMagnitude (x1, y1, x2, y2):
return math.sqrt(math.pow((x2 - x1), 2) + math.pow((y2 - y1), 2))
#Calc minimum distance from a point and a line segment (i.e. consecutive vertices in a polyline).
def distancePointLine (px, py, x1, y1, x2, y2):
#http://local.wasp.uwa.edu.au/~pbourke/geometry/pointline/source.vba
LineMag = lineMagnitude(x1, y1, x2, y2)
if LineMag < 0.00000001:
dpl = 9999
return dpl
u1 = (((px - x1) * (x2 - x1)) + ((py - y1) * (y2 - y1)))
u = u1 / (LineMag * LineMag)
if (u < 0.00001) or (u > 1):
#// closest point does not fall within the line segment, take the shorter distance
#// to an endpoint
ix = lineMagnitude(px, py, x1, y1)
iy = lineMagnitude(px, py, x2, y2)
if ix > iy:
dpl = iy
else:
dpl = ix
else:
# Intersecting point is on the line, use the formula
ix = x1 + u * (x2 - x1)
iy = y1 + u * (y2 - y1)
dpl = lineMagnitude(px, py, ix, iy)
return dpl
class PointInSpaceTime(object):
def __init__(self, name, lat, lon, timeOfDay):
self.name = name
self.lat = lat
self.lon = lon
# datetime.time object
self.timeOfDay = timeOfDay
def __str__(self):
return "%s - lon: %s (E-W) lat: %s (N-S) at time: %s" % (self.name, self.lon, self.lat, self.timeOfDay)
def distanceFrom(self, location):
return lineMagnitude(x1=self.lon, y1=self.lat, x2=location.lon, y2=location.lat)
def shortName(self):
# Removes the trailing " Station", at least for now. More options possible
return self.name.rsplit(" ", 1)[0]
def findClosestSegment(self):
# Find possible segments from the db to avoid constructing segment
# objects for EVERYTHING in the db. If we don't get a match here
# within an acceptable threshold,
# perhaps because the train is late, we'll have to do a wider search
conn = getDbConn()
segmentFinderSql = """
select segmentId, dep.tripId, d.stationName as depStn, d.lat as depLat, d.lon as depLon, dep.depTime as depDepTime, a.stationName as arvStn, a.lat as arvLat, a.lon as arvLon, arv.depTime as arvDepTime
from Station d, Station a, TripStop dep, TripStop arv, Segment
where
Segment.depTripStopId = dep.tripStopId and
Segment.arvTripStopId = arv.tripStopId and
d.stationId = dep.stationId and
a.stationId = arv.stationId and
a`ep.depTime <= ? and
arv.depTime >= ?;
"""
closestSegment = None
closestDistance = 9999
for row in conn.execute(segmentFinderSql, (self.timeOfDay.strftime("%H:%M"), \
self.timeOfDay.strftime("%H:%M"))):
dt = datetime.datetime.strptime(row["depDepTime"], "%H:%M")
startStation = PointInSpaceTime(row["depStn"], \
lat=row["depLat"], lon=row["depLon"], \
timeOfDay=datetime.time(dt.hour, dt.minute))
dt = datetime.datetime.strptime(row["arvDepTime"], "%H:%M")
endStation = PointInSpaceTime(row["arvStn"], \
lat=row["arvLat"], lon=row["arvLon"], \
timeOfDay=datetime.time(dt.hour, dt.minute))
segment = TimetableSegment(startStation, endStation, None, None)
d = segment.latLonDistanceFromPoint(lon=self.lon, lat=self.lat)
if d < closestDistance:
closestSegment = segment
closestDistance = d
return closestSegment
class HTMLdoc(object):
def __init__(self, htmlFileName):
self.htmlFile = open(htmlFileName, "w")
self.htmlFile.write("""
<HTML>
<HEAD>
<TITLE>Visual Timetable</TITLE>
</HEAD>
<BODY bgcolor="grey">
""")
def write(self, s):
self.htmlFile.write(s)
def finalise(self):
self.write("</BODY></HTML>\n")
self.htmlFile.close()
class BaseHTMLDistanceTimeGraph(object):
MINOR_LINE_MARKER_LEN = 2
MAJOR_LINE_MARKER_LEN = 4
TEXT_HEIGHT = 10
def __init__(self, htmlDoc, tripManager, canvasWidth, canvasHeight):
self.htmlDoc = htmlDoc
self.tm = tripManager
self.canvasWidth = canvasWidth
self.canvasHeight = canvasHeight
self.write("""
<canvas id="%(a)scanvas%(w)sX%(h)s" width="%(w)s" height="%(h)s">
<p>Your browser doesn't support canvas.</p>
</canvas>
<script type="text/javascript">
function drawSegmentJS(context, departureXPoint, departureYPoint, arrivalXPoint, arrivalYPoint, lineColour, bulletColour)
{
context.save();
context.strokeStyle = lineColour;
context.fillStyle = bulletColour;
context.beginPath();
context.arc(departureXPoint, departureYPoint, 2, 0, 359, true);
context.closePath();
context.fill();
context.beginPath();
context.moveTo(departureXPoint,departureYPoint);
context.lineTo(arrivalXPoint,arrivalYPoint);
context.stroke();
context.beginPath();
context.arc(arrivalXPoint, arrivalYPoint, 2, 0, 359, true);
context.closePath();
context.fill();
context.restore();
}
function drawHourGridLineJS(context, startX, startY, endX, endY)
{
context.save();
context.strokeStyle = '#ECF1EF';
context.beginPath();
context.moveTo(startX, startY);
context.lineTo(endX, endY);
context.stroke();
context.restore();
}
var drawingCanvas = document.getElementById('%(a)scanvas%(w)sX%(h)s');
var ctx = drawingCanvas.getContext('2d');
ctx.save();
ctx.fillStyle = '#E6E6E6'
ctx.fillRect(0,0,%(w)s,%(h)s);
ctx.restore();
// Preserve the original context
ctx.save();
ctx.font = '%(t)spt Arial';
""" % {"w":self.canvasWidth, "h":self.canvasHeight, "t":self.TEXT_HEIGHT, "a":self.ABBREV})
def write(self, s):
self.htmlDoc.write(s)
def getSegmentLineColour(self, lineId):
# FIXME - trips may or may not have more than one colour depending
# on how many lines make up a trip (or whether a trip can actually
# span more than one line
if lineId in (1,2):
return "#c5c5c5"
elif lineId in (3,4):
return "#fcb514"
elif lineId == -1:
# Interchange
return "black"
else:
return "red"
#return ["red", "orange", "yellow", "green", "blue", "purple"][random.randint(0, 5)]
def getSegmentBulletColour(self, lineId):
if lineId in (1,2):
return "#fcb514"
elif lineId in (3,4):
return "#fcb514"
elif lineId == -1:
# Interchange
return "black"
else:
return "green"
def drawTrips(self):
allTrips = self.tm.getTrips()
tripDepTimeList = []
tripArvTimeList = []
for trip in allTrips:
ttSegments = trip.getSegments()
tripDepTimeList.append(ttSegments[0].departurePoint.timeOfDay)
tripArvTimeList.append(ttSegments[-1].arrivalPoint.timeOfDay)
minDepTripDelta = self.tm.getMinTimeDeltaFromTimes(tripDepTimeList)
minArvTripDelta = self.tm.getMinTimeDeltaFromTimes(tripArvTimeList)
# get a point on the graph to see how many pixels the min deltas cover
exampleDepPoint = datetime.datetime.combine(datetime.datetime.now(), tripDepTimeList[0])
minPxBetweenDepPoints = self.datetimeToYPoint(exampleDepPoint + minDepTripDelta) - \
self.datetimeToYPoint(exampleDepPoint)
# Text height +1 pixel between means the times would be readable
if minPxBetweenDepPoints < (self.TEXT_HEIGHT + 1):
print "Departures are too close (%spx/%s). Use hour axis on the left" % \
(minPxBetweenDepPoints, minDepTripDelta)
drawStartLabel = False
else:
print "Departures are spaced well (%spx/%s). Use labels on the left" % \
(minPxBetweenDepPoints, minDepTripDelta)
drawStartLabel = True
# get a point on the graph to see how many pixels the min deltas cover
exampleArvPoint = datetime.datetime.combine(datetime.datetime.now(), tripArvTimeList[0])
minPxBetweenArvPoints = self.datetimeToYPoint(exampleArvPoint + minArvTripDelta) - \
self.datetimeToYPoint(exampleArvPoint)
# Text height +1 pixel between means the times would be readable
if minPxBetweenArvPoints < (self.TEXT_HEIGHT + 1):
print "Arrivals are too close (%spx/%s). Use hour axis on the right" % \
(minPxBetweenArvPoints, minArvTripDelta)
drawEndLabel = False
else:
print "Arrivals are spaced well (%spx/%s). Use labels on the right" % \
(minPxBetweenArvPoints, minArvTripDelta)
drawEndLabel = True
if not drawStartLabel:
self.drawHourAxis()
for trip in allTrips:
self.drawTrip(trip, drawStartLabel, drawEndLabel)
#[self.drawTrip(trip) for trip in self.tm.getTrips()]
def finalise(self):
self.write("""
ctx.restore();
</SCRIPT>
""")
def drawStationAxis(self):
self.write("ctx.save();\n")
self.write("ctx.textAlign = '%s';\n" % (self.STATION_AXIS_TEXTALIGN,))
self.write("ctx.textBaseline = '%s';\n" % (self.STATION_AXIS_TEXTBASELINE,))
allStations = self.tm.getAllStationsInTrips()
if debug:
stationList = allStations
else:
stationList = (allStations[0], allStations[-1])
for station in stationList:
self.write("//Station marker: %s\n" % (station.shortName(), ))
filltextArgs = (station.shortName(),) + self.stationOnStationAxisCoord(station.name)
self.write("ctx.fillText('%s', %s, %s);\n" % filltextArgs)
self.write("ctx.restore();\n")
def drawHourAxis(self):
# we need to show up to 59mins past the hour on the x axis
self.write("ctx.save();\n")
self.write("ctx.textAlign = '%s';\n" % (self.HOUR_AXIS_TEXTALIGN,))
self.write("ctx.textBaseline = '%s';\n" % (self.HOUR_AXIS_TEXTBASELINE,))
# Note that getMaxEndHour is the largest hour e.g if the largest time is
# 7:15, then the largest hour is 7. This means that in order to get
# the correct scaling factor, we need to add 1 to the maxhour because
# getMaxEndHour + 1 because the range function isn't inclusive
for hour in range(self.tm.getMinTripStartHour(), \
self.tm.getMaxTripEndHour() + 1):
self.drawHourGridLine(hour, hour)
self.write("//Hour marker: %s\n" % (hour,))
filltextArgs = (hour, ) + self.datetimeOnHourAxisCoord(datetime.time(hour))
self.write("ctx.fillText('%s', %s, %s);\n" % filltextArgs)
self.drawSubHourMarkers(hour)
else:
self.drawHourGridLine(hour + 1, hour + 1)
self.write("//Hour marker: %s\n" % (hour + 1,))
filltextArgs = (hour + 1, ) + self.datetimeOnHourAxisCoord(datetime.time(hour + 1))
self.write("ctx.fillText('%s', %s, %s);\n" % filltextArgs)
self.write("ctx.restore();\n")
class TimeVertHTMLDistanceTimeGraph(BaseHTMLDistanceTimeGraph):
# 2 characters wide (ish). Could be smaller if we know we only have 1 char in the hour label
GRAPH_BORDER_PADDING_LEFT_PX = 35
GRAPH_BORDER_PADDING_RIGHT_PX = 35
GRAPH_BORDER_PADDING_TOP_PX = \
BaseHTMLDistanceTimeGraph.MAJOR_LINE_MARKER_LEN + \
BaseHTMLDistanceTimeGraph.TEXT_HEIGHT
GRAPH_BORDER_PADDING_BOTTOM_PX = 5
ABBREV = "timeVert"
STATION_AXIS_TEXTALIGN = "left"
STATION_AXIS_TEXTBASELINE = "bottom"
HOUR_AXIS_TEXTALIGN = "right"
HOUR_AXIS_TEXTBASELINE = "middle"
START_LABEL_TEXTALIGN = "right"
START_LABEL_TEXTBASELINE = "middle"
END_LABEL_TEXTALIGN = "left"
END_LABEL_TEXTBASELINE = "middle"
def __init__(self, htmlDoc, tripManager, canvasWidth, canvasHeight):
super(TimeVertHTMLDistanceTimeGraph, self).__init__(htmlDoc, tripManager, canvasWidth, canvasHeight)
self.stationXAxisPointMap = {}
self.xPointOfYAxis = self.GRAPH_BORDER_PADDING_LEFT_PX
self.yPointOfXAxis = self.GRAPH_BORDER_PADDING_TOP_PX
def drawTrip(self, tripToDraw, drawStartLabel, drawEndLabel):
ttSegments = tripToDraw.getSegments()
tripDepPoint = ttSegments[0].departurePoint
tripArrPoint = ttSegments[-1].arrivalPoint
if drawStartLabel:
self.drawStartTimeLabel(tripDepPoint)
#self.drawSegment(TimetableSegment(tripDepPoint, tripArrPoint))
for segment in ttSegments:
self.drawSegment(segment)
if drawEndLabel:
self.drawEndTimeLabel(tripArrPoint)
def drawGraph(self):
self.drawStationAxis()
self.drawTrips()
def datetimeToYPoint(self, dt):
# Note that maxHour is the largest hour e.g if the largest time is
# 7:15, then the largest hour is 7. This means that in order to get
# the correct scaling factor, we need to add 1 to the maxhour because
# we need to show up to 59mins past the hour on the x axis
yScalingFactor = (self.canvasHeight - \
self.GRAPH_BORDER_PADDING_TOP_PX - \
self.GRAPH_BORDER_PADDING_BOTTOM_PX) / \
(self.tm.getMaxTripEndHour() + 1 - \
self.tm.getMinTripStartHour())
# round (down) it as we probably don't want to do subpixel stuff
return math.floor(self.GRAPH_BORDER_PADDING_TOP_PX + \
(dt.hour + dt.minute/60.0 - \
self.tm.getMinTripStartHour()) * \
yScalingFactor)
def stationNameToXPoint(self, sn):
return self.stationXAxisPointMap[sn]
def stationOnStationAxisCoord(self, stationName):
return (self.stationNameToXPoint(stationName), self.yPointOfXAxis)
def datetimeOnHourAxisCoord(self, dt):
return (self.xPointOfYAxis, self.datetimeToYPoint(dt))
def drawStartTimeLabel(self, pist):
self.write("ctx.save();\n")
self.write("ctx.textAlign = '%s';\n" % (self.START_LABEL_TEXTALIGN,))
self.write("ctx.textBaseline = '%s';\n" % (self.START_LABEL_TEXTBASELINE,))
labelXPoint = self.stationNameToXPoint(pist.name)
labelYPoint = self.datetimeToYPoint(pist.timeOfDay)
self.write("//Start label: %s\n" % (pist.timeOfDay.strftime("%H.%M"), ))
self.write("ctx.fillText('%s ', %s, %s);\n" % \
(pist.timeOfDay.strftime("%H.%M"), labelXPoint, labelYPoint))
self.write("ctx.restore();\n")
def drawEndTimeLabel(self, pist):
self.write("ctx.save();\n")
self.write("ctx.textAlign = '%s';\n" % (self.END_LABEL_TEXTALIGN,))
self.write("ctx.textBaseline = '%s';\n" % (self.END_LABEL_TEXTBASELINE,))
labelXPoint = self.stationNameToXPoint(pist.name)
labelYPoint = self.datetimeToYPoint(pist.timeOfDay)
self.write("//End label: %s\n" % (pist.timeOfDay.strftime("%H.%M"), ))
self.write("ctx.fillText(' %s', %s, %s);\n" % \
(pist.timeOfDay.strftime("%H.%M"), labelXPoint, labelYPoint))
self.write("ctx.restore();\n")
def drawSubHourMarkers(self, thisHour):
for minute in range(15, 60, 15):
self.write("//sub-hour line: %s:%s\n" % (thisHour, minute))
self.write("ctx.beginPath();\n")
# TEXT_HEIGH/2 because we want the dot to be aligned with the
# middle of the hour label
self.write("ctx.moveTo(%s,%s);\n" % \
(self.xPointOfYAxis - (self.TEXT_HEIGHT/2), \
self.datetimeToYPoint(datetime.time(thisHour, minute))))
self.write("ctx.lineTo(%s,%s);\n" % \
(self.xPointOfYAxis - (self.TEXT_HEIGHT/2) - self.MINOR_LINE_MARKER_LEN, \
self.datetimeToYPoint(datetime.time(thisHour, minute))))
self.write("ctx.stroke();\n")
def drawHourGridLine(self, hourLabel, thisHour):
self.write("//Grid line at %s\n" % (hourLabel,))
self.write("drawHourGridLineJS(ctx, %s, %s, %s, %s);\n" %
(self.xPointOfYAxis, \
self.datetimeToYPoint(datetime.time(thisHour)), \
self.canvasWidth - self.GRAPH_BORDER_PADDING_RIGHT_PX, \
self.datetimeToYPoint(datetime.time(thisHour))))
def populateStationPointMap(self):
departureStation = None
allStations = self.tm.getAllStationsInTrips()
if not allStations:
assert 0, "Trip Manager can't find any stations on the trips"
for arrivalStation in allStations:
if departureStation == None:
# this is the first station we've come across
xPointOfDepartureStation = self.xPointOfYAxis
departureStation = arrivalStation
continue
self.stationXAxisPointMap[departureStation.name] = xPointOfDepartureStation
xScalingFactor = (self.canvasWidth - \
self.GRAPH_BORDER_PADDING_LEFT_PX - \
self.GRAPH_BORDER_PADDING_RIGHT_PX)/self.tm.getMaxTripDistance()
xPointOfArrivalStation = math.floor(xPointOfDepartureStation + \
(departureStation.distanceFrom(arrivalStation) * \
xScalingFactor))
# ready for the next iteration
xPointOfDepartureStation = xPointOfArrivalStation
departureStation = arrivalStation
else:
self.stationXAxisPointMap[departureStation.name] = xPointOfDepartureStation
def drawSegment(self, timetableSegment):
# Note that because this draws a circle at the start and end of the
# segment, we're actually drawing circles for all but the first
# departure station and the last arrival station twice. meh.
self.write("//Segment: %s (%s) to %s (%s)\n" % \
(timetableSegment.departurePoint.name, \
timetableSegment.departurePoint.timeOfDay, \
timetableSegment.arrivalPoint.name, \
timetableSegment.arrivalPoint.timeOfDay))
departureXPoint = self.stationNameToXPoint(timetableSegment.departurePoint.name)
arrivalXPoint = self.stationNameToXPoint(timetableSegment.arrivalPoint.name)
departureYPoint = self.datetimeToYPoint(timetableSegment.departurePoint.timeOfDay)
arrivalYPoint = self.datetimeToYPoint(timetableSegment.arrivalPoint.timeOfDay)
self.write("drawSegmentJS(ctx, %s, %s, %s, %s, '%s', '%s');\n" % \
(departureXPoint, departureYPoint, \
arrivalXPoint, arrivalYPoint, \
self.getSegmentLineColour(timetableSegment.lineId), \
self.getSegmentBulletColour(timetableSegment.lineId)))
class TimetableSegment(object):
def __init__(self, departurePoint, arrivalPoint, tripId, lineId):
self.departurePoint = departurePoint
self.arrivalPoint = arrivalPoint
self.tripId = tripId
self.lineId = lineId
def __str__(self):
return "Segment from %s (%s) to %s (%s)" % \
(self.getDepartureName(), self.departurePoint.timeOfDay, \
self.getArrivalName(), self.arrivalPoint.timeOfDay)
def getDepartureName(self):
#return "%s on line %s" % (self.departurePoint.name, self.lineId)
return "%s on trip %s" % (self.departurePoint.name, self.tripId)
def getArrivalName(self):
#return "%s on line %s" % (self.arrivalPoint.name, self.lineId)
return "%s on trip %s" % (self.arrivalPoint.name, self.tripId)
def distance(self):
return self.departurePoint.distanceFrom(self.arrivalPoint)
def addAsDiGraphEdge(self, dg, ignoreLines):
if ignoreLines:
depName = self.departurePoint.name
arvName = self.arrivalPoint.name
else:
depName = self.getDepartureName()
arvName = self.getArrivalName()
if depName not in dg:
dg.add_node(depName, {"tripId":self.tripId, "pist":self.departurePoint})
if arvName not in dg:
dg.add_node(arvName, {"tripId":self.tripId, "pist":self.arrivalPoint})
durationTimeDelta = datetime.datetime.combine(datetime.datetime.today(), self.arrivalPoint.timeOfDay) - \
datetime.datetime.combine(datetime.datetime.today(), self.departurePoint.timeOfDay)
edgeWeight = durationTimeDelta.seconds/60
#print "Adding edge from '%s' to '%s' with weight %s" % (depName, arvName, edgeWeight)
dg.add_edge(depName, arvName, weight=edgeWeight)
def latLonDistanceFromPoint(self, lon, lat):
return distancePointLine(lon, lat, \
self.departurePoint.lon, self.departurePoint.lat, \
self.arrivalPoint.lon, self.arrivalPoint.lat)
class Trip(object):
def __init__(self, tripId, startStation, endStation):
self._tripId = tripId
self._startStation = startStation
self._endStation = endStation
conn = getDbConn()
tripStopSql = "select lineId from Trip where tripId = ?"
self._lineId = conn.execute(tripStopSql, (self._tripId,)).fetchone()["lineId"]
self._segments = []
def __str__(self):
return "Trip (id:%s) from %s to %s, on line %s" % \
(self._tripId, self._startStation, self._endStation, self._lineId)
def getTripId(self):
return self._tripId
def getSegments(self):
if self._segments:
#print "Returning cached segments for tripId %s: %s" % (self._tripId, self._segments)
return self._segments
else:
conn = getDbConn()
# TODO Should update this to use Segment table.
tripStopSql = "select stationName, lat, lon, depTime from TripStop, Station where Station.stationId = TripStop.stationId and tripId = ?"
lastStop = None
seenStartStation = False
for row in conn.execute(tripStopSql, (self._tripId,)):
if row["stationName"] == self._startStation:
seenStartStation = True
if not seenStartStation:
# Trip hasn't started yet
continue
dt = datetime.datetime.strptime(row["depTime"], "%H:%M")
thisStop = PointInSpaceTime(row["stationName"], \
lat=row["lat"], lon=row["lon"], \
timeOfDay=datetime.time(dt.hour, dt.minute))
if lastStop:
self._segments.append(TimetableSegment(lastStop, thisStop, self._tripId, self._lineId))
if row["stationName"] == self._endStation:
# The journey's over
break
lastStop = thisStop
return self._segments
def getTripDistance(self):
return sum([segment.distance() for segment in self.getSegments()])
def getStartHour(self):
return self.getSegments()[0].departurePoint.timeOfDay.hour
def getEndHour(self):
return self.getSegments()[-1].arrivalPoint.timeOfDay.hour
class MultiTrip(object):
def __init__(self):
self._tripList = []
self._segList = []
def __str__(self):
#return "MultiTrip with %s trips. Ids: %s" % (len(self._tripList), ", ".join([str(t.getTripId()) for t in self._tripList]))
return "MultiTrip with %s trips (%s)" % (len(self._tripList), ", ".join([str(t) for t in self._tripList]))
def addTrip(self, tripId, startStation, endStation):
self._tripList.append(Trip(tripId, startStation, endStation))
def getSegments(self):
if self._segList:
return self._segList
else:
segList = []
lastInterchangeStation = None
for t in self._tripList:
#print "Segments for subtrip of multitrip: %s" % (t,)
segs = t.getSegments()
if lastInterchangeStation:
# We need to indicate waiting time at the interchange
segList.append(TimetableSegment(lastInterchangeStation, segs[0].departurePoint, -1, -1))
segList.extend(segs)
lastInterchangeStation = segs[-1].arrivalPoint
return segList
#XXX Three methods below pinched from Trip. Subclass?
def getTripDistance(self):
return sum([segment.distance() for segment in self.getSegments()])
def getStartHour(self):
return self.getSegments()[0].departurePoint.timeOfDay.hour
def getEndHour(self):
return self.getSegments()[-1].arrivalPoint.timeOfDay.hour
class TripManager(object):
def __init__(self, startHour, endHour, startStation, endStation):
# Timeperiod will be something like "from xam to yam", which means
# startHour is inclusive, but endHour is not.
self.startHour = startHour
self.endHour = endHour
self.startStation = startStation
self.endStation = endStation
self._trips = []
# While we develop the method
#self.getTrips = self.getTripsDirect
self.getTrips = self.getTripsWithChanges
# Takes a list of datetime.time elements
def getMinTimeDeltaFromTimes(self, dttList):
dttList.sort()
lastTime = None
# start with the maximum period of time represented on the graph
# as the minTripDelta - if there is only one trip on the graph then
# the gap calculation should make sense (unlike timedelta.max)
minTripDelta = datetime.timedelta(hours=int(self.endHour) - int(self.startHour))
for dtt in dttList:
thisTime = datetime.timedelta(hours=dtt.hour, \
minutes=dtt.minute)
# Ignore this is the first reading
# Ignore if it's the same time as this isn't a rendering problem
if lastTime != None and lastTime != thisTime:
gap = thisTime - lastTime
minTripDelta = min(minTripDelta, gap)
lastTime = thisTime
return minTripDelta
def getInterchangePointsOnLine(self, lineId):
conn = getDbConn()
interchangeList = []
interchangeSql = """
select stationName
from InterchangeStation, Station
where
InterchangeStation.stationId = Station.stationId and
lineId = ?
"""
for row in conn.execute(interchangeSql, (lineId,)):
interchangeList.append(row["stationName"])
return interchangeList
def getLinesContainingStation(self, stationName):
conn = getDbConn()
lineList = []
lineSql = """
select distinct(lineId)
from Trip t, TripStop ts, Station s
where
t.tripId = ts.tripId and
ts.stationId = s.stationId and
s.stationName = ?
"""
for row in conn.execute(lineSql, (stationName,)):
lineList.append(row["lineId"])
return lineList
def getInterchangePoints(self):
#get a list of lines that include start point (startpoint lines)
startPointLines = self.getLinesContainingStation(self.startStation)
#get a list of lines that include the end point (endpoint lines)
endPointLines = self.getLinesContainingStation(self.endStation)
startPointLineInterchangePoints = set()
endPointLineInterchangePoints = set()
for line in startPointLines:
[startPointLineInterchangePoints.add(p) for p in self.getInterchangePointsOnLine(line)]
for line in endPointLines:
[endPointLineInterchangePoints.add(p) for p in self.getInterchangePointsOnLine(line)]
return startPointLineInterchangePoints.union(endPointLineInterchangePoints)
def getTripsWithChanges(self):
if self._trips:
return self._trips
else:
commonInterchangePoints = self.getInterchangePoints()
# Map of tripId to a list of trip objects (starting from the start
# station and ending at an interchange station)
startToInterchangeTrips = {}
for interchangePoint in commonInterchangePoints:
# Get trips from the start station to the interchange point
# (within the time bracket)
for trip in self._getTripsDirectWithParams(self.startStation, \
self.startHour, interchangePoint, self.endHour):
# Form a list of trip objects from start station to interchange
# point, keyed by tripId (the same tripId can refer to multiple trips
# in this case as long as its finish station is different)
tripsWithTripId = startToInterchangeTrips.get(trip.getTripId(), [])
tripsWithTripId.append(trip)
startToInterchangeTrips[trip.getTripId()] = tripsWithTripId
multiTripList = []
for stitTripId, stitList in startToInterchangeTrips.items():
#for stitTripId, stitList in [(41, startToInterchangeTrips[41])]:
dg = networkx.DiGraph()
# Start and end need to be treated like interchange points in that
# we need to get to them regardless of the line (and the node name
# includes lines). Start and stop stations don't have a tripId as
# they'll be connected to some trip at the same station that does
# have a tripId
dg.add_node(self.startStation, {"tripId":None})
dg.add_node(self.endStation, {"tripId":None})
print "Processing Start to Interchange Trips for TripId %s" % (stitTripId,)
interchangeFormat = "%s Interchange"
# Add all the interchange points to the graph noting that
# Interchange Points don't have a tripId
[dg.add_node(interchangeFormat % (cip,), {"tripId":None}) for cip in commonInterchangePoints]
for stit in stitList:
for seg in stit.getSegments():
# add all the segments to the graph
seg.addAsDiGraphEdge(dg, ignoreLines=False)
# Shortest path works from start station to end station
# and the node name doesn't have the trip number in them
# so we need to add the start station and end station
# to the graph somehow.
if seg.departurePoint.name == self.startStation:
dg.add_edge(self.startStation, seg.getDepartureName(), \
weight=0)
# add zero-weight edge from the station node to their
# corresponding interchange partner node.
# Only need to do this for arrival nodes because all nodes
# are represented by both departure and arrival with the
# exception of the initial departure point, and we'll
# never need to change trains at the initial departure
# point
if seg.arrivalPoint.name in commonInterchangePoints:
dg.add_edge(seg.getArrivalName(), \
interchangeFormat % (seg.arrivalPoint.name,), \
weight=0)
interchangeToEndTrips = self._getTripsDirectWithParams( \
seg.arrivalPoint.name, \
seg.arrivalPoint.timeOfDay.hour, \
self.endStation, self.endHour)
if interchangeToEndTrips:
interchangeToEndTripId = interchangeToEndTrips[0].getTripId()
soonestArrivalSegOne = interchangeToEndTrips[0].getSegments()[0]
soonestArrivalLastSeg = interchangeToEndTrips[0].getSegments()[-1]
timeAtInterchange = \
datetime.datetime.combine(datetime.datetime.today(), \
soonestArrivalSegOne.departurePoint.timeOfDay) - \
datetime.datetime.combine(datetime.datetime.today(), \
seg.arrivalPoint.timeOfDay)
dg.add_node(soonestArrivalSegOne.getDepartureName(), \
{"tripId":interchangeToEndTripId, \
"pist":soonestArrivalSegOne.departurePoint})
dg.add_edge(interchangeFormat % (seg.arrivalPoint.name,), \
soonestArrivalSegOne.getDepartureName(), \
weight=timeAtInterchange.seconds/60)
[s.addAsDiGraphEdge(dg, ignoreLines=False) for s in interchangeToEndTrips[0].getSegments()]
#print "------Adding end edge: %s to %s" % (soonestArrivalLastSeg.getArrivalName(), self.endStation)
dg.add_edge(soonestArrivalLastSeg.getArrivalName(), self.endStation, \
weight=0)
else:
#print "No matches from this interchange (%s) to the end (%s) > %sh and < %sh" % \
# (seg.arrivalPoint.name, \
# self.endStation, \
# seg.arrivalPoint.timeOfDay.hour, \
# self.endHour)
pass
#print "########### Graph start ###########"
#with open("dg.graphml", "w") as f:
# networkx.readwrite.graphml.write_graphml(dg, f)
#networkx.readwrite.edgelist.write_edgelist(dg, sys.stdout)
#networkx.readwrite.graphml.write_graphml(dg, sys.stdout)
#print "########### Graph end ###########"
#print "Nodes"
#for n, ndata in dg.nodes(data=True):
# print "N: %s nData: %s" % (n, ndata)
sp = networkx.shortest_path(dg, source=self.startStation, target=self.endStation, weight=True)
print "Shortest path:", sp
thisStartStationPIST = None
thisStopStationPIST = None
thisTripId = None
mt = MultiTrip()
for n in sp:
#print "Node '%s': %s" % (n, dg.node[n])
if dg.node[n]["tripId"] is None:
if thisTripId is None:
# At an interchange station (with the Trip already saved),
# or right at the start, with nothing to save.
#print "bail... tripId is None and thisTripId is none"
continue
else:
# We're at an interchange station. Save the last trip to the mt
# FIXME - populate lineId somehow
print "Adding to Multitrip: TripId %s" % (thisTripId,)
mt.addTrip(thisTripId, thisStartStationPIST.name, thisStopStationPIST.name)
thisTripId = None
else:
if dg.node[n]["tripId"] != thisTripId:
# We're onto a new trip.
thisStartStationPIST = dg.node[n]["pist"]
thisTripId = dg.node[n]["tripId"]
else:
# We're continuing a trip.
thisStopStationPIST = dg.node[n]["pist"]
print mt
multiTripList.append(mt)
self._trips = multiTripList
return self._trips
def _getTripsDirectWithParams(self, startStation, startHour, endStation, endHour):
#print "Trips with Params: %s (>=%sh) to %s (<%sh)" % \
# (startStation, startHour, endStation, endHour)
# FIXME - make sure the calling functions pass in uniform data types
if type(startHour) == int:
startHour = "%02d" % (startHour,)
if type(endHour) == int:
endHour = "%02d" % (endHour,)
assert type(startHour) == type("") and type(endHour) == type(""), "Hours need to be strings"
assert len(startHour) == 2 and len(endHour) == 2, "Hours need to be two stringified numbers, zero padded"
tripList = []
conn = getDbConn()
# Order by arrival time. It helps for multi-stage trips and does no
# harm to other queries.
tripsSql = """
select dep.tripId, a.stationName, arv.depTime, d.stationName, dep.depTime, t.lineId
from Station d, Station a, TripStop dep, TripStop arv, Trip t
where
-- start station and start hour
d.stationName = ? and
dep.depTime >= ? and
-- end station and end hour
a.stationName = ? and
arv.depTime < ? and
dep.depTime < arv.depTime and
arv.tripId = dep.tripId and
d.stationId = dep.stationId and
a.stationId = arv.stationId and
arv.tripId = t.tripId and
dep.tripId = t.tripId
order by arv.depTime
"""
for row in conn.execute(tripsSql, (startStation, startHour, \
endStation, endHour)):
tripList.append(Trip(row["tripId"], startStation, endStation))
#for t in tripList:
# print "- %s" % (t,)
return tripList
def getTripsDirect(self):
if self._trips:
return self._trips
else:
self._trips = self._getTripsDirectWithParams(self.startStation, self.startHour, \
self.endStation, self.endHour)
return self._trips
def getMinTripStartHour(self):
# What about the fact that midnight (0) comes after 11pm (23)?
return reduce(min, [t.getStartHour() for t in self.getTrips()], 23)
def getMaxTripEndHour(self):
# What about the fact that 11pm (23) comes before midnight (0)?
return reduce(max, [t.getEndHour() for t in self.getTrips()], 0)
def getMaxTripDistance(self):
return reduce(max, [t.getTripDistance() for t in self.getTrips()], 0)
def getAllStationsInTrips(self):
# FIXME: What exactly should this do now, given trips aren't linear
# i.e. the x-axis doesn't make sense now...
allStations = []
for trip in self.getTrips():
for seg in trip.getSegments():
if seg.arrivalPoint not in allStations:
allStations.append(seg.arrivalPoint)
if seg.departurePoint not in allStations:
allStations.append(seg.departurePoint)
# Ignore interchange segments because they aren't... hmm... do we want them?
#if seg.lineId != -1:
#print "Segment on trip %s: %s" % (trip, seg)
# Ignoring the lines because we're trying to get a list
# of all stations to get ordering along the tracks so
# lines aren't helpful
return allStations
def main():
parser = OptionParser()
parser.add_option("-f", "--from", dest="fromStation",
help="the origin station")
parser.add_option("-t", "--to", dest="toStation",
help="the destination station")
parser.add_option("-s", "--start-hour", dest="startHour",
help="the start time in the window")
parser.add_option("-e", "--end-hour", dest="endHour",
help="the end time in the window")
parser.add_option("-d", "--debug", dest="debug",
action="store_true", help="send debug messages to stdout")
(options, args) = parser.parse_args()
if not (options.startHour and options.endHour and options.fromStation and options.toStation):
parser.error("Options -f, -t, -s and -e are compulsory")
try:
if int(options.endHour) < 0 or \
int(options.endHour) > 23 or \
int(options.startHour) < 0 or \
int(options.startHour) > 23:
parser.error("Options -s and -e must be greater than 0 and less than 24")
# We need leading zeros for hours less than 10
if len(options.endHour) == 1:
endHourStr = "0" + options.endHour
else:
endHourStr = options.endHour
if len(options.startHour) == 1:
startHourStr = "0" + options.startHour
else:
startHourStr = options.startHour
except ValueError:
parser.error("Options -s and -e must be numbers")
# Append " Station" if it's been left off, as full names are used in the db
if options.fromStation.find(" Station") == -1:
fromStationStr = options.fromStation + " Station"
else:
fromStationStr = options.fromStation
if options.toStation.find(" Station") == -1:
toStationStr = options.toStation + " Station"
else:
toStationStr = options.toStation
if options.debug:
global debug
debug = True
me = PointInSpaceTime("me", lon=150.615, lat=-33.743, timeOfDay=datetime.time(6, 37))
tm = TripManager(startHourStr, endHourStr, fromStationStr, toStationStr)
doc = HTMLdoc("./canvas.html")
for gClass, w, h in ((TimeVertHTMLDistanceTimeGraph, 320, 480), \
(TimeVertHTMLDistanceTimeGraph, 480, 320)):
g = gClass(doc, tm, canvasWidth=w, canvasHeight=h)
g.populateStationPointMap()
g.drawGraph()
g.finalise()
doc.finalise()
#print "Closest segment: %s" % me.findClosestSegment()
#print me
getDbConn().close()
if __name__ == '__main__':
main()
# Centre: -33.7500, 150.6500
| |
# (C) Fractal Industries, Inc. 2016
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# std
from collections import defaultdict
from functools import wraps
# 3rd party
from pysnmp.entity.rfc3413.oneliner import cmdgen
import pysnmp.proto.rfc1902 as snmp_type
from pysnmp.smi import builder
from pysnmp.smi.exval import noSuchInstance, noSuchObject
from pysnmp.error import PySnmpError
# project
from checks.network_checks import NetworkCheck, Status
from config import _is_affirmative
# Additional types that are not part of the SNMP protocol. cf RFC 2856
(CounterBasedGauge64, ZeroBasedCounter64) = builder.MibBuilder().importSymbols(
"HCNUM-TC",
"CounterBasedGauge64",
"ZeroBasedCounter64")
# Metric type that we support
SNMP_COUNTERS = frozenset([
snmp_type.Counter32.__name__,
snmp_type.Counter64.__name__,
ZeroBasedCounter64.__name__])
SNMP_GAUGES = frozenset([
snmp_type.Gauge32.__name__,
snmp_type.Unsigned32.__name__,
CounterBasedGauge64.__name__,
snmp_type.Integer.__name__,
snmp_type.Integer32.__name__])
DEFAULT_OID_BATCH_SIZE = 10
def reply_invalid(oid):
return noSuchInstance.isSameTypeWith(oid) or \
noSuchObject.isSameTypeWith(oid)
class SnmpCheck(NetworkCheck):
SOURCE_TYPE_NAME = 'system'
# pysnmp default values
DEFAULT_RETRIES = 5
DEFAULT_TIMEOUT = 1
SC_STATUS = 'snmp.can_check'
def __init__(self, name, init_config, agentConfig, instances):
for instance in instances:
if 'name' not in instance:
instance['name'] = self._get_instance_key(instance)
instance['skip_event'] = True
self.generators = {}
# Set OID batch size
self.oid_batch_size = int(init_config.get("oid_batch_size", DEFAULT_OID_BATCH_SIZE))
# Load Custom MIB directory
self.mibs_path = None
self.ignore_nonincreasing_oid = False
if init_config is not None:
self.mibs_path = init_config.get("mibs_folder")
self.ignore_nonincreasing_oid = _is_affirmative(
init_config.get("ignore_nonincreasing_oid", False))
NetworkCheck.__init__(self, name, init_config, agentConfig, instances)
def _load_conf(self, instance):
tags = instance.get("tags", [])
ip_address = instance["ip_address"]
metrics = instance.get('metrics', [])
timeout = int(instance.get('timeout', self.DEFAULT_TIMEOUT))
retries = int(instance.get('retries', self.DEFAULT_RETRIES))
enforce_constraints = _is_affirmative(instance.get('enforce_mib_constraints', True))
instance_key = instance['name']
cmd_generator = self.generators.get(instance_key, None)
if not cmd_generator:
cmd_generator = self.create_command_generator(self.mibs_path, self.ignore_nonincreasing_oid)
self.generators[instance_key] = cmd_generator
return cmd_generator, ip_address, tags, metrics, timeout, retries, enforce_constraints
def _get_instance_key(self, instance):
key = instance.get('name', None)
if key:
return key
host = instance.get('host', None)
ip = instance.get('ip_address', None)
port = instance.get('port', None)
if host and port:
key = "{host}:{port}".format(host=host, port=port)
elif ip and port:
key = "{host}:{port}".format(host=ip, port=port)
elif host:
key = host
elif ip:
key = ip
return key
def snmp_logger(self, func):
"""
Decorator to log, with DEBUG level, SNMP commands
"""
@wraps(func)
def wrapper(*args, **kwargs):
self.log.debug("Running SNMP command {0} on OIDS {1}"
.format(func.__name__, args[2:]))
result = func(*args, **kwargs)
self.log.debug("Returned vars: {0}".format(result[-1]))
return result
return wrapper
def create_command_generator(self, mibs_path, ignore_nonincreasing_oid):
'''
Create a command generator to perform all the snmp query.
If mibs_path is not None, load the mibs present in the custom mibs
folder. (Need to be in pysnmp format)
'''
cmd_generator = cmdgen.CommandGenerator()
cmd_generator.ignoreNonIncreasingOid = ignore_nonincreasing_oid
if mibs_path is not None:
mib_builder = cmd_generator.snmpEngine.msgAndPduDsp.\
mibInstrumController.mibBuilder
mib_sources = mib_builder.getMibSources() + \
(builder.DirMibSource(mibs_path), )
mib_builder.setMibSources(*mib_sources)
return cmd_generator
@classmethod
def get_auth_data(cls, instance):
'''
Generate a Security Parameters object based on the instance's
configuration.
See http://pysnmp.sourceforge.net/docs/current/security-configuration.html
'''
if "community_string" in instance:
# SNMP v1 - SNMP v2
# See http://pysnmp.sourceforge.net/docs/current/security-configuration.html
if int(instance.get("snmp_version", 2)) == 1:
return cmdgen.CommunityData(instance['community_string'],
mpModel=0)
return cmdgen.CommunityData(instance['community_string'], mpModel=1)
elif "user" in instance:
# SNMP v3
user = instance["user"]
auth_key = None
priv_key = None
auth_protocol = None
priv_protocol = None
if "authKey" in instance:
auth_key = instance["authKey"]
auth_protocol = cmdgen.usmHMACMD5AuthProtocol
if "privKey" in instance:
priv_key = instance["privKey"]
auth_protocol = cmdgen.usmHMACMD5AuthProtocol
priv_protocol = cmdgen.usmDESPrivProtocol
if "authProtocol" in instance:
auth_protocol = getattr(cmdgen, instance["authProtocol"])
if "privProtocol" in instance:
priv_protocol = getattr(cmdgen, instance["privProtocol"])
return cmdgen.UsmUserData(user,
auth_key,
priv_key,
auth_protocol,
priv_protocol)
else:
raise Exception("An authentication method needs to be provided")
@classmethod
def get_transport_target(cls, instance, timeout, retries):
'''
Generate a Transport target object based on the instance's configuration
'''
if "ip_address" not in instance:
raise Exception("An IP address needs to be specified")
ip_address = instance["ip_address"]
port = int(instance.get("port", 161)) # Default SNMP port
return cmdgen.UdpTransportTarget((ip_address, port), timeout=timeout, retries=retries)
def raise_on_error_indication(self, error_indication, instance):
if error_indication:
message = "{0} for instance {1}".format(error_indication,
instance["ip_address"])
instance["service_check_error"] = message
raise Exception(message)
def check_table(self, instance, cmd_generator, oids, lookup_names,
timeout, retries, enforce_constraints=False):
'''
Perform a snmpwalk on the domain specified by the oids, on the device
configured in instance.
lookup_names is a boolean to specify whether or not to use the mibs to
resolve the name and values.
Returns a dictionary:
dict[oid/metric_name][row index] = value
In case of scalar objects, the row index is just 0
'''
# UPDATE: We used to perform only a snmpgetnext command to fetch metric values.
# It returns the wrong value when the OID passeed is referring to a specific leaf.
# For example:
# snmpgetnext -v2c -c public localhost:11111 1.36.1.2.1.25.4.2.1.7.222
# iso.3.6.1.2.1.25.4.2.1.7.224 = INTEGER: 2
# SOLUTION: perform a snmget command and fallback with snmpgetnext if not found
# Set aliases for snmpget and snmpgetnext with logging
snmpget = self.snmp_logger(cmd_generator.getCmd)
snmpgetnext = self.snmp_logger(cmd_generator.nextCmd)
transport_target = self.get_transport_target(instance, timeout, retries)
auth_data = self.get_auth_data(instance)
first_oid = 0
all_binds = []
results = defaultdict(dict)
while first_oid < len(oids):
try:
# Start with snmpget command
error_indication, error_status, error_index, var_binds = snmpget(
auth_data,
transport_target,
*(oids[first_oid:first_oid + self.oid_batch_size]),
lookupValues=enforce_constraints,
lookupNames=lookup_names)
# Raise on error_indication
self.raise_on_error_indication(error_indication, instance)
missing_results = []
complete_results = []
for var in var_binds:
result_oid, value = var
if reply_invalid(value):
oid_tuple = result_oid.asTuple()
oid = ".".join([str(i) for i in oid_tuple])
missing_results.append(oid)
else:
complete_results.append(var)
if missing_results:
# If we didn't catch the metric using snmpget, try snmpnext
error_indication, error_status, error_index, var_binds_table = snmpgetnext(
auth_data,
transport_target,
*missing_results,
lookupValues=enforce_constraints,
lookupNames=lookup_names)
# Raise on error_indication
self.raise_on_error_indication(error_indication, instance)
if error_status:
message = "{0} for instance {1}".format(error_status.prettyPrint(),
instance["ip_address"])
instance["service_check_error"] = message
self.warning(message)
for table_row in var_binds_table:
complete_results.extend(table_row)
all_binds.extend(complete_results)
except PySnmpError as e:
if "service_check_error" not in instance:
instance["service_check_error"] = "Fail to collect some metrics: {0}".format(e)
if "service_check_severity" not in instance:
instance["service_check_severity"] = Status.CRITICAL
self.warning("Fail to collect some metrics: {0}".format(e))
# if we fail move onto next batch
first_oid = first_oid + self.oid_batch_size
# if we've collected some variables, it's not that bad.
if "service_check_severity" in instance and len(all_binds):
instance["service_check_severity"] = Status.WARNING
for result_oid, value in all_binds:
if lookup_names:
_, metric, indexes = result_oid.getMibSymbol()
results[metric][indexes] = value
else:
oid = result_oid.asTuple()
matching = ".".join([str(i) for i in oid])
results[matching] = value
self.log.debug("Raw results: {0}".format(results))
return results
def _check(self, instance):
'''
Perform two series of SNMP requests, one for all that have MIB asociated
and should be looked up and one for those specified by oids
'''
cmd_generator, ip_address, tags, metrics, timeout, retries, enforce_constraints = self._load_conf(instance)
tags += ['snmp_device:{0}'.format(ip_address)]
table_oids = []
raw_oids = []
# Check the metrics completely defined
for metric in metrics:
if 'MIB' in metric:
try:
assert "table" in metric or "symbol" in metric
to_query = metric.get("table", metric.get("symbol"))
table_oids.append(cmdgen.MibVariable(metric["MIB"], to_query))
except Exception as e:
self.log.warning("Can't generate MIB object for variable : %s\n"
"Exception: %s", metric, e)
elif 'OID' in metric:
raw_oids.append(metric['OID'])
else:
raise Exception('Unsupported metric in config file: %s' % metric)
try:
if table_oids:
self.log.debug("Querying device %s for %s oids", ip_address, len(table_oids))
table_results = self.check_table(instance, cmd_generator, table_oids, True, timeout, retries,
enforce_constraints=enforce_constraints)
self.report_table_metrics(metrics, table_results, tags)
if raw_oids:
self.log.debug("Querying device %s for %s oids", ip_address, len(raw_oids))
raw_results = self.check_table(instance, cmd_generator, raw_oids, False, timeout, retries,
enforce_constraints=False)
self.report_raw_metrics(metrics, raw_results, tags)
except Exception as e:
if "service_check_error" not in instance:
instance["service_check_error"] = "Fail to collect metrics for {0} - {1}".format(instance['name'], e)
self.warning(instance["service_check_error"])
return [(self.SC_STATUS, Status.CRITICAL, instance["service_check_error"])]
finally:
# Report service checks
tags = ["snmp_device:%s" % ip_address]
if "service_check_error" in instance:
status = Status.DOWN
if "service_check_severity" in instance:
status = instance["service_check_severity"]
return [(self.SC_STATUS, status, instance["service_check_error"])]
return [(self.SC_STATUS, Status.UP, None)]
def report_as_service_check(self, sc_name, status, instance, msg=None):
sc_tags = ['snmp_device:{0}'.format(instance["ip_address"])]
custom_tags = instance.get('tags', [])
tags = sc_tags + custom_tags
self.service_check(sc_name,
NetworkCheck.STATUS_TO_SERVICE_CHECK[status],
tags=tags,
message=msg
)
def report_raw_metrics(self, metrics, results, tags):
'''
For all the metrics that are specified as oid,
the conf oid is going to exactly match or be a prefix of the oid sent back by the device
Use the instance configuration to find the name to give to the metric
Submit the results to the aggregator.
'''
for metric in metrics:
forced_type = metric.get('forced_type')
if 'OID' in metric:
queried_oid = metric['OID']
if queried_oid in results:
value = results[queried_oid]
else:
for oid in results:
if oid.startswith(queried_oid):
value = results[oid]
break
else:
self.log.warning("No matching results found for oid %s",
queried_oid)
continue
name = metric.get('name', 'unnamed_metric')
self.submit_metric(name, value, forced_type, tags)
def report_table_metrics(self, metrics, results, tags):
'''
For each of the metrics specified as needing to be resolved with mib,
gather the tags requested in the instance conf for each row.
Submit the results to the aggregator.
'''
for metric in metrics:
forced_type = metric.get('forced_type')
if 'table' in metric:
index_based_tags = []
column_based_tags = []
for metric_tag in metric.get('metric_tags', []):
tag_key = metric_tag['tag']
if 'index' in metric_tag:
index_based_tags.append((tag_key, metric_tag.get('index')))
elif 'column' in metric_tag:
column_based_tags.append((tag_key, metric_tag.get('column')))
else:
self.log.warning("No indication on what value to use for this tag")
for value_to_collect in metric.get("symbols", []):
for index, val in results[value_to_collect].items():
metric_tags = tags + self.get_index_tags(index, results,
index_based_tags,
column_based_tags)
self.submit_metric(value_to_collect, val, forced_type, metric_tags)
elif 'symbol' in metric:
name = metric['symbol']
result = results[name].items()
if len(result) > 1:
self.log.warning("Several rows corresponding while the metric is supposed to be a scalar")
continue
val = result[0][1]
self.submit_metric(name, val, forced_type, tags)
elif 'OID' in metric:
pass # This one is already handled by the other batch of requests
else:
raise Exception('Unsupported metric in config file: %s' % metric)
def get_index_tags(self, index, results, index_tags, column_tags):
'''
Gather the tags for this row of the table (index) based on the
results (all the results from the query).
index_tags and column_tags are the tags to gather.
- Those specified in index_tags contain the tag_group name and the
index of the value we want to extract from the index tuple.
cf. 1 for ipVersion in the IP-MIB::ipSystemStatsTable for example
- Those specified in column_tags contain the name of a column, which
could be a potential result, to use as a tage
cf. ifDescr in the IF-MIB::ifTable for example
'''
tags = []
for idx_tag in index_tags:
tag_group = idx_tag[0]
try:
tag_value = index[idx_tag[1] - 1].prettyPrint()
except IndexError:
self.log.warning("Not enough indexes, skipping this tag")
continue
tags.append("{0}:{1}".format(tag_group, tag_value))
for col_tag in column_tags:
tag_group = col_tag[0]
try:
tag_value = results[col_tag[1]][index]
except KeyError:
self.log.warning("Column %s not present in the table, skipping this tag", col_tag[1])
continue
if reply_invalid(tag_value):
self.log.warning("Can't deduct tag from column for tag %s",
tag_group)
continue
tag_value = tag_value.prettyPrint()
tags.append("{0}:{1}".format(tag_group, tag_value))
return tags
def submit_metric(self, name, snmp_value, forced_type, tags=[]):
'''
Convert the values reported as pysnmp-Managed Objects to values and
report them to the aggregator
'''
if reply_invalid(snmp_value):
# Metrics not present in the queried object
self.log.warning("No such Mib available: %s" % name)
return
metric_name = self.normalize(name, prefix="snmp")
if forced_type:
if forced_type.lower() == "gauge":
value = int(snmp_value)
self.gauge(metric_name, value, tags)
elif forced_type.lower() == "counter":
value = int(snmp_value)
self.rate(metric_name, value, tags)
else:
self.warning("Invalid forced-type specified: {0} in {1}".format(forced_type, name))
raise Exception("Invalid forced-type in config file: {0}".format(name))
return
# Ugly hack but couldn't find a cleaner way
# Proper way would be to use the ASN1 method isSameTypeWith but it
# wrongfully returns True in the case of CounterBasedGauge64
# and Counter64 for example
snmp_class = snmp_value.__class__.__name__
if snmp_class in SNMP_COUNTERS:
value = int(snmp_value)
self.rate(metric_name, value, tags)
return
if snmp_class in SNMP_GAUGES:
value = int(snmp_value)
self.gauge(metric_name, value, tags)
return
self.log.warning("Unsupported metric type %s", snmp_class)
| |
import unittest
from urllib3._collections import (
HTTPHeaderDict,
RecentlyUsedContainer as Container
)
from urllib3.packages import six
xrange = six.moves.xrange
from nose.plugins.skip import SkipTest
class TestLRUContainer(unittest.TestCase):
def test_maxsize(self):
d = Container(5)
for i in xrange(5):
d[i] = str(i)
self.assertEqual(len(d), 5)
for i in xrange(5):
self.assertEqual(d[i], str(i))
d[i+1] = str(i+1)
self.assertEqual(len(d), 5)
self.assertFalse(0 in d)
self.assertTrue(i+1 in d)
def test_expire(self):
d = Container(5)
for i in xrange(5):
d[i] = str(i)
for i in xrange(5):
d.get(0)
# Add one more entry
d[5] = '5'
# Check state
self.assertEqual(list(d.keys()), [2, 3, 4, 0, 5])
def test_same_key(self):
d = Container(5)
for i in xrange(10):
d['foo'] = i
self.assertEqual(list(d.keys()), ['foo'])
self.assertEqual(len(d), 1)
def test_access_ordering(self):
d = Container(5)
for i in xrange(10):
d[i] = True
# Keys should be ordered by access time
self.assertEqual(list(d.keys()), [5, 6, 7, 8, 9])
new_order = [7,8,6,9,5]
for k in new_order:
d[k]
self.assertEqual(list(d.keys()), new_order)
def test_delete(self):
d = Container(5)
for i in xrange(5):
d[i] = True
del d[0]
self.assertFalse(0 in d)
d.pop(1)
self.assertFalse(1 in d)
d.pop(1, None)
def test_get(self):
d = Container(5)
for i in xrange(5):
d[i] = True
r = d.get(4)
self.assertEqual(r, True)
r = d.get(5)
self.assertEqual(r, None)
r = d.get(5, 42)
self.assertEqual(r, 42)
self.assertRaises(KeyError, lambda: d[5])
def test_disposal(self):
evicted_items = []
def dispose_func(arg):
# Save the evicted datum for inspection
evicted_items.append(arg)
d = Container(5, dispose_func=dispose_func)
for i in xrange(5):
d[i] = i
self.assertEqual(list(d.keys()), list(xrange(5)))
self.assertEqual(evicted_items, []) # Nothing disposed
d[5] = 5
self.assertEqual(list(d.keys()), list(xrange(1, 6)))
self.assertEqual(evicted_items, [0])
del d[1]
self.assertEqual(evicted_items, [0, 1])
d.clear()
self.assertEqual(evicted_items, [0, 1, 2, 3, 4, 5])
def test_iter(self):
d = Container()
self.assertRaises(NotImplementedError, d.__iter__)
class NonMappingHeaderContainer(object):
def __init__(self, **kwargs):
self._data = {}
self._data.update(kwargs)
def keys(self):
return self._data.keys()
def __getitem__(self, key):
return self._data[key]
class TestHTTPHeaderDict(unittest.TestCase):
def setUp(self):
self.d = HTTPHeaderDict(Cookie='foo')
self.d.add('cookie', 'bar')
def test_create_from_kwargs(self):
h = HTTPHeaderDict(ab=1, cd=2, ef=3, gh=4)
self.assertEqual(len(h), 4)
self.assertTrue('ab' in h)
def test_create_from_dict(self):
h = HTTPHeaderDict(dict(ab=1, cd=2, ef=3, gh=4))
self.assertEqual(len(h), 4)
self.assertTrue('ab' in h)
def test_create_from_iterator(self):
teststr = 'urllib3ontherocks'
h = HTTPHeaderDict((c, c*5) for c in teststr)
self.assertEqual(len(h), len(set(teststr)))
def test_create_from_list(self):
h = HTTPHeaderDict([('ab', 'A'), ('cd', 'B'), ('cookie', 'C'), ('cookie', 'D'), ('cookie', 'E')])
self.assertEqual(len(h), 3)
self.assertTrue('ab' in h)
clist = h.getlist('cookie')
self.assertEqual(len(clist), 3)
self.assertEqual(clist[0], 'C')
self.assertEqual(clist[-1], 'E')
def test_create_from_headerdict(self):
org = HTTPHeaderDict([('ab', 'A'), ('cd', 'B'), ('cookie', 'C'), ('cookie', 'D'), ('cookie', 'E')])
h = HTTPHeaderDict(org)
self.assertEqual(len(h), 3)
self.assertTrue('ab' in h)
clist = h.getlist('cookie')
self.assertEqual(len(clist), 3)
self.assertEqual(clist[0], 'C')
self.assertEqual(clist[-1], 'E')
self.assertFalse(h is org)
self.assertEqual(h, org)
def test_setitem(self):
self.d['Cookie'] = 'foo'
self.assertEqual(self.d['cookie'], 'foo')
self.d['cookie'] = 'with, comma'
self.assertEqual(self.d.getlist('cookie'), ['with, comma'])
def test_update(self):
self.d.update(dict(Cookie='foo'))
self.assertEqual(self.d['cookie'], 'foo')
self.d.update(dict(cookie='with, comma'))
self.assertEqual(self.d.getlist('cookie'), ['with, comma'])
def test_delitem(self):
del self.d['cookie']
self.assertFalse('cookie' in self.d)
self.assertFalse('COOKIE' in self.d)
def test_add_well_known_multiheader(self):
self.d.add('COOKIE', 'asdf')
self.assertEqual(self.d.getlist('cookie'), ['foo', 'bar', 'asdf'])
self.assertEqual(self.d['cookie'], 'foo, bar, asdf')
def test_add_comma_separated_multiheader(self):
self.d.add('bar', 'foo')
self.d.add('BAR', 'bar')
self.d.add('Bar', 'asdf')
self.assertEqual(self.d.getlist('bar'), ['foo', 'bar', 'asdf'])
self.assertEqual(self.d['bar'], 'foo, bar, asdf')
def test_extend_from_list(self):
self.d.extend([('set-cookie', '100'), ('set-cookie', '200'), ('set-cookie', '300')])
self.assertEqual(self.d['set-cookie'], '100, 200, 300')
def test_extend_from_dict(self):
self.d.extend(dict(cookie='asdf'), b='100')
self.assertEqual(self.d['cookie'], 'foo, bar, asdf')
self.assertEqual(self.d['b'], '100')
self.d.add('cookie', 'with, comma')
self.assertEqual(self.d.getlist('cookie'), ['foo', 'bar', 'asdf', 'with, comma'])
def test_extend_from_container(self):
h = NonMappingHeaderContainer(Cookie='foo', e='foofoo')
self.d.extend(h)
self.assertEqual(self.d['cookie'], 'foo, bar, foo')
self.assertEqual(self.d['e'], 'foofoo')
self.assertEqual(len(self.d), 2)
def test_extend_from_headerdict(self):
h = HTTPHeaderDict(Cookie='foo', e='foofoo')
self.d.extend(h)
self.assertEqual(self.d['cookie'], 'foo, bar, foo')
self.assertEqual(self.d['e'], 'foofoo')
self.assertEqual(len(self.d), 2)
def test_copy(self):
h = self.d.copy()
self.assertTrue(self.d is not h)
self.assertEqual(self.d, h)
def test_getlist(self):
self.assertEqual(self.d.getlist('cookie'), ['foo', 'bar'])
self.assertEqual(self.d.getlist('Cookie'), ['foo', 'bar'])
self.assertEqual(self.d.getlist('b'), [])
self.d.add('b', 'asdf')
self.assertEqual(self.d.getlist('b'), ['asdf'])
def test_getlist_after_copy(self):
self.assertEqual(self.d.getlist('cookie'), HTTPHeaderDict(self.d).getlist('cookie'))
def test_equal(self):
b = HTTPHeaderDict(cookie='foo, bar')
c = NonMappingHeaderContainer(cookie='foo, bar')
self.assertEqual(self.d, b)
self.assertEqual(self.d, c)
self.assertNotEqual(self.d, 2)
def test_not_equal(self):
b = HTTPHeaderDict(cookie='foo, bar')
c = NonMappingHeaderContainer(cookie='foo, bar')
self.assertFalse(self.d != b)
self.assertFalse(self.d != c)
self.assertNotEqual(self.d, 2)
def test_pop(self):
key = 'Cookie'
a = self.d[key]
b = self.d.pop(key)
self.assertEqual(a, b)
self.assertFalse(key in self.d)
self.assertRaises(KeyError, self.d.pop, key)
dummy = object()
self.assertTrue(dummy is self.d.pop(key, dummy))
def test_discard(self):
self.d.discard('cookie')
self.assertFalse('cookie' in self.d)
self.d.discard('cookie')
def test_len(self):
self.assertEqual(len(self.d), 1)
self.d.add('cookie', 'bla')
self.d.add('asdf', 'foo')
# len determined by unique fieldnames
self.assertEqual(len(self.d), 2)
def test_repr(self):
rep = "HTTPHeaderDict({'Cookie': 'foo, bar'})"
self.assertEqual(repr(self.d), rep)
def test_items(self):
items = self.d.items()
self.assertEqual(len(items), 2)
self.assertEqual(items[0][0], 'Cookie')
self.assertEqual(items[0][1], 'foo')
self.assertEqual(items[1][0], 'Cookie')
self.assertEqual(items[1][1], 'bar')
def test_dict_conversion(self):
# Also tested in connectionpool, needs to preserve case
hdict = {'Content-Length': '0', 'Content-type': 'text/plain', 'Server': 'TornadoServer/1.2.3'}
h = dict(HTTPHeaderDict(hdict).items())
self.assertEqual(hdict, h)
self.assertEqual(hdict, dict(HTTPHeaderDict(hdict)))
def test_string_enforcement(self):
# This currently throws AttributeError on key.lower(), should probably be something nicer
self.assertRaises(Exception, self.d.__setitem__, 3, 5)
self.assertRaises(Exception, self.d.add, 3, 4)
self.assertRaises(Exception, self.d.__delitem__, 3)
self.assertRaises(Exception, HTTPHeaderDict, {3: 3})
def test_from_httplib_py2(self):
if six.PY3:
raise SkipTest("python3 has a different internal header implementation")
msg = """
Server: nginx
Content-Type: text/html; charset=windows-1251
Connection: keep-alive
X-Some-Multiline: asdf
asdf
asdf
Set-Cookie: bb_lastvisit=1348253375; expires=Sat, 21-Sep-2013 18:49:35 GMT; path=/
Set-Cookie: bb_lastactivity=0; expires=Sat, 21-Sep-2013 18:49:35 GMT; path=/
www-authenticate: asdf
www-authenticate: bla
"""
buffer = six.moves.StringIO(msg.lstrip().replace('\n', '\r\n'))
msg = six.moves.http_client.HTTPMessage(buffer)
d = HTTPHeaderDict.from_httplib(msg)
self.assertEqual(d['server'], 'nginx')
cookies = d.getlist('set-cookie')
self.assertEqual(len(cookies), 2)
self.assertTrue(cookies[0].startswith("bb_lastvisit"))
self.assertTrue(cookies[1].startswith("bb_lastactivity"))
self.assertEqual(d['x-some-multiline'].split(), ['asdf', 'asdf', 'asdf'])
self.assertEqual(d['www-authenticate'], 'asdf, bla')
self.assertEqual(d.getlist('www-authenticate'), ['asdf', 'bla'])
if __name__ == '__main__':
unittest.main()
| |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
to_datetime,
)
import pandas._testing as tm
from pandas.api.indexers import BaseIndexer
from pandas.core.groupby.groupby import get_groupby
class TestRolling:
def setup_method(self):
self.frame = DataFrame({"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)})
def test_mutated(self):
msg = r"groupby\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
self.frame.groupby("A", foo=1)
g = self.frame.groupby("A")
assert not g.mutated
g = get_groupby(self.frame, by="A", mutated=True)
assert g.mutated
def test_getitem(self):
g = self.frame.groupby("A")
g_mutated = get_groupby(self.frame, by="A", mutated=True)
expected = g_mutated.B.apply(lambda x: x.rolling(2).mean())
result = g.rolling(2).mean().B
tm.assert_series_equal(result, expected)
result = g.rolling(2).B.mean()
tm.assert_series_equal(result, expected)
result = g.B.rolling(2).mean()
tm.assert_series_equal(result, expected)
result = self.frame.B.groupby(self.frame.A).rolling(2).mean()
tm.assert_series_equal(result, expected)
def test_getitem_multiple(self):
# GH 13174
g = self.frame.groupby("A")
r = g.rolling(2, min_periods=0)
g_mutated = get_groupby(self.frame, by="A", mutated=True)
expected = g_mutated.B.apply(lambda x: x.rolling(2, min_periods=0).count())
result = r.B.count()
tm.assert_series_equal(result, expected)
result = r.B.count()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"f",
[
"sum",
"mean",
"min",
"max",
pytest.param(
"count",
marks=pytest.mark.filterwarnings("ignore:min_periods:FutureWarning"),
),
"kurt",
"skew",
],
)
def test_rolling(self, f):
g = self.frame.groupby("A")
r = g.rolling(window=4)
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.rolling(4), f)())
# groupby.apply doesn't drop the grouped-by column
expected = expected.drop("A", axis=1)
# GH 39732
expected_index = MultiIndex.from_arrays([self.frame["A"], range(40)])
expected.index = expected_index
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("f", ["std", "var"])
def test_rolling_ddof(self, f):
g = self.frame.groupby("A")
r = g.rolling(window=4)
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.rolling(4), f)(ddof=1))
# groupby.apply doesn't drop the grouped-by column
expected = expected.drop("A", axis=1)
# GH 39732
expected_index = MultiIndex.from_arrays([self.frame["A"], range(40)])
expected.index = expected_index
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "midpoint", "nearest"]
)
def test_rolling_quantile(self, interpolation):
g = self.frame.groupby("A")
r = g.rolling(window=4)
result = r.quantile(0.4, interpolation=interpolation)
expected = g.apply(
lambda x: x.rolling(4).quantile(0.4, interpolation=interpolation)
)
# groupby.apply doesn't drop the grouped-by column
expected = expected.drop("A", axis=1)
# GH 39732
expected_index = MultiIndex.from_arrays([self.frame["A"], range(40)])
expected.index = expected_index
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("f", ["corr", "cov"])
def test_rolling_corr_cov(self, f):
g = self.frame.groupby("A")
r = g.rolling(window=4)
result = getattr(r, f)(self.frame)
def func(x):
return getattr(x.rolling(4), f)(self.frame)
expected = g.apply(func)
# GH 39591: The grouped column should be all np.nan
# (groupby.apply inserts 0s for cov)
expected["A"] = np.nan
tm.assert_frame_equal(result, expected)
result = getattr(r.B, f)(pairwise=True)
def func(x):
return getattr(x.B.rolling(4), f)(pairwise=True)
expected = g.apply(func)
tm.assert_series_equal(result, expected)
def test_rolling_apply(self, raw):
g = self.frame.groupby("A")
r = g.rolling(window=4)
# reduction
result = r.apply(lambda x: x.sum(), raw=raw)
expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw))
# groupby.apply doesn't drop the grouped-by column
expected = expected.drop("A", axis=1)
# GH 39732
expected_index = MultiIndex.from_arrays([self.frame["A"], range(40)])
expected.index = expected_index
tm.assert_frame_equal(result, expected)
def test_rolling_apply_mutability(self):
# GH 14013
df = DataFrame({"A": ["foo"] * 3 + ["bar"] * 3, "B": [1] * 6})
g = df.groupby("A")
mi = MultiIndex.from_tuples(
[("bar", 3), ("bar", 4), ("bar", 5), ("foo", 0), ("foo", 1), ("foo", 2)]
)
mi.names = ["A", None]
# Grouped column should not be a part of the output
expected = DataFrame([np.nan, 2.0, 2.0] * 2, columns=["B"], index=mi)
result = g.rolling(window=2).sum()
tm.assert_frame_equal(result, expected)
# Call an arbitrary function on the groupby
g.sum()
# Make sure nothing has been mutated
result = g.rolling(window=2).sum()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("expected_value,raw_value", [[1.0, True], [0.0, False]])
def test_groupby_rolling(self, expected_value, raw_value):
# GH 31754
def foo(x):
return int(isinstance(x, np.ndarray))
df = DataFrame({"id": [1, 1, 1], "value": [1, 2, 3]})
result = df.groupby("id").value.rolling(1).apply(foo, raw=raw_value)
expected = Series(
[expected_value] * 3,
index=MultiIndex.from_tuples(((1, 0), (1, 1), (1, 2)), names=["id", None]),
name="value",
)
tm.assert_series_equal(result, expected)
def test_groupby_rolling_center_center(self):
# GH 35552
series = Series(range(1, 6))
result = series.groupby(series).rolling(center=True, window=3).mean()
expected = Series(
[np.nan] * 5,
index=MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3), (5, 4))),
)
tm.assert_series_equal(result, expected)
series = Series(range(1, 5))
result = series.groupby(series).rolling(center=True, window=3).mean()
expected = Series(
[np.nan] * 4,
index=MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3))),
)
tm.assert_series_equal(result, expected)
df = DataFrame({"a": ["a"] * 5 + ["b"] * 6, "b": range(11)})
result = df.groupby("a").rolling(center=True, window=3).mean()
expected = DataFrame(
[np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, 9, np.nan],
index=MultiIndex.from_tuples(
(
("a", 0),
("a", 1),
("a", 2),
("a", 3),
("a", 4),
("b", 5),
("b", 6),
("b", 7),
("b", 8),
("b", 9),
("b", 10),
),
names=["a", None],
),
columns=["b"],
)
tm.assert_frame_equal(result, expected)
df = DataFrame({"a": ["a"] * 5 + ["b"] * 5, "b": range(10)})
result = df.groupby("a").rolling(center=True, window=3).mean()
expected = DataFrame(
[np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, np.nan],
index=MultiIndex.from_tuples(
(
("a", 0),
("a", 1),
("a", 2),
("a", 3),
("a", 4),
("b", 5),
("b", 6),
("b", 7),
("b", 8),
("b", 9),
),
names=["a", None],
),
columns=["b"],
)
tm.assert_frame_equal(result, expected)
def test_groupby_rolling_center_on(self):
# GH 37141
df = DataFrame(
data={
"Date": date_range("2020-01-01", "2020-01-10"),
"gb": ["group_1"] * 6 + ["group_2"] * 4,
"value": range(10),
}
)
result = (
df.groupby("gb")
.rolling(6, on="Date", center=True, min_periods=1)
.value.mean()
)
expected = Series(
[1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 7.0, 7.5, 7.5, 7.5],
name="value",
index=MultiIndex.from_tuples(
(
("group_1", Timestamp("2020-01-01")),
("group_1", Timestamp("2020-01-02")),
("group_1", Timestamp("2020-01-03")),
("group_1", Timestamp("2020-01-04")),
("group_1", Timestamp("2020-01-05")),
("group_1", Timestamp("2020-01-06")),
("group_2", Timestamp("2020-01-07")),
("group_2", Timestamp("2020-01-08")),
("group_2", Timestamp("2020-01-09")),
("group_2", Timestamp("2020-01-10")),
),
names=["gb", "Date"],
),
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("min_periods", [5, 4, 3])
def test_groupby_rolling_center_min_periods(self, min_periods):
# GH 36040
df = DataFrame({"group": ["A"] * 10 + ["B"] * 10, "data": range(20)})
window_size = 5
result = (
df.groupby("group")
.rolling(window_size, center=True, min_periods=min_periods)
.mean()
)
result = result.reset_index()[["group", "data"]]
grp_A_mean = [1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 7.5, 8.0]
grp_B_mean = [x + 10.0 for x in grp_A_mean]
num_nans = max(0, min_periods - 3) # For window_size of 5
nans = [np.nan] * num_nans
grp_A_expected = nans + grp_A_mean[num_nans : 10 - num_nans] + nans
grp_B_expected = nans + grp_B_mean[num_nans : 10 - num_nans] + nans
expected = DataFrame(
{"group": ["A"] * 10 + ["B"] * 10, "data": grp_A_expected + grp_B_expected}
)
tm.assert_frame_equal(result, expected)
def test_groupby_subselect_rolling(self):
# GH 35486
df = DataFrame(
{"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0], "c": [10, 20, 30, 20]}
)
result = df.groupby("a")[["b"]].rolling(2).max()
expected = DataFrame(
[np.nan, np.nan, 2.0, np.nan],
columns=["b"],
index=MultiIndex.from_tuples(
((1, 0), (2, 1), (2, 3), (3, 2)), names=["a", None]
),
)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].rolling(2).max()
expected = Series(
[np.nan, np.nan, 2.0, np.nan],
index=MultiIndex.from_tuples(
((1, 0), (2, 1), (2, 3), (3, 2)), names=["a", None]
),
name="b",
)
tm.assert_series_equal(result, expected)
def test_groupby_rolling_custom_indexer(self):
# GH 35557
class SimpleIndexer(BaseIndexer):
def get_window_bounds(
self, num_values=0, min_periods=None, center=None, closed=None
):
min_periods = self.window_size if min_periods is None else 0
end = np.arange(num_values, dtype=np.int64) + 1
start = end.copy() - self.window_size
start[start < 0] = min_periods
return start, end
df = DataFrame(
{"a": [1.0, 2.0, 3.0, 4.0, 5.0] * 3}, index=[0] * 5 + [1] * 5 + [2] * 5
)
result = (
df.groupby(df.index)
.rolling(SimpleIndexer(window_size=3), min_periods=1)
.sum()
)
expected = df.groupby(df.index).rolling(window=3, min_periods=1).sum()
tm.assert_frame_equal(result, expected)
def test_groupby_rolling_subset_with_closed(self):
# GH 35549
df = DataFrame(
{
"column1": range(6),
"column2": range(6),
"group": 3 * ["A", "B"],
"date": [Timestamp("2019-01-01")] * 6,
}
)
result = (
df.groupby("group").rolling("1D", on="date", closed="left")["column1"].sum()
)
expected = Series(
[np.nan, 0.0, 2.0, np.nan, 1.0, 4.0],
index=MultiIndex.from_tuples(
[("A", Timestamp("2019-01-01"))] * 3
+ [("B", Timestamp("2019-01-01"))] * 3,
names=["group", "date"],
),
name="column1",
)
tm.assert_series_equal(result, expected)
def test_groupby_subset_rolling_subset_with_closed(self):
# GH 35549
df = DataFrame(
{
"column1": range(6),
"column2": range(6),
"group": 3 * ["A", "B"],
"date": [Timestamp("2019-01-01")] * 6,
}
)
result = (
df.groupby("group")[["column1", "date"]]
.rolling("1D", on="date", closed="left")["column1"]
.sum()
)
expected = Series(
[np.nan, 0.0, 2.0, np.nan, 1.0, 4.0],
index=MultiIndex.from_tuples(
[("A", Timestamp("2019-01-01"))] * 3
+ [("B", Timestamp("2019-01-01"))] * 3,
names=["group", "date"],
),
name="column1",
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["max", "min"])
def test_groupby_rolling_index_changed(self, func):
# GH: #36018 nlevels of MultiIndex changed
ds = Series(
[1, 2, 2],
index=MultiIndex.from_tuples(
[("a", "x"), ("a", "y"), ("c", "z")], names=["1", "2"]
),
name="a",
)
result = getattr(ds.groupby(ds).rolling(2), func)()
expected = Series(
[np.nan, np.nan, 2.0],
index=MultiIndex.from_tuples(
[(1, "a", "x"), (2, "a", "y"), (2, "c", "z")], names=["a", "1", "2"]
),
name="a",
)
tm.assert_series_equal(result, expected)
def test_groupby_rolling_empty_frame(self):
# GH 36197
expected = DataFrame({"s1": []})
result = expected.groupby("s1").rolling(window=1).sum()
# GH 32262
expected = expected.drop(columns="s1")
# GH-38057 from_tuples gives empty object dtype, we now get float/int levels
# expected.index = MultiIndex.from_tuples([], names=["s1", None])
expected.index = MultiIndex.from_product(
[Index([], dtype="float64"), Index([], dtype="int64")], names=["s1", None]
)
tm.assert_frame_equal(result, expected)
expected = DataFrame({"s1": [], "s2": []})
result = expected.groupby(["s1", "s2"]).rolling(window=1).sum()
# GH 32262
expected = expected.drop(columns=["s1", "s2"])
expected.index = MultiIndex.from_product(
[
Index([], dtype="float64"),
Index([], dtype="float64"),
Index([], dtype="int64"),
],
names=["s1", "s2", None],
)
tm.assert_frame_equal(result, expected)
def test_groupby_rolling_string_index(self):
# GH: 36727
df = DataFrame(
[
["A", "group_1", Timestamp(2019, 1, 1, 9)],
["B", "group_1", Timestamp(2019, 1, 2, 9)],
["Z", "group_2", Timestamp(2019, 1, 3, 9)],
["H", "group_1", Timestamp(2019, 1, 6, 9)],
["E", "group_2", Timestamp(2019, 1, 20, 9)],
],
columns=["index", "group", "eventTime"],
).set_index("index")
groups = df.groupby("group")
df["count_to_date"] = groups.cumcount()
rolling_groups = groups.rolling("10d", on="eventTime")
result = rolling_groups.apply(lambda df: df.shape[0])
expected = DataFrame(
[
["A", "group_1", Timestamp(2019, 1, 1, 9), 1.0],
["B", "group_1", Timestamp(2019, 1, 2, 9), 2.0],
["H", "group_1", Timestamp(2019, 1, 6, 9), 3.0],
["Z", "group_2", Timestamp(2019, 1, 3, 9), 1.0],
["E", "group_2", Timestamp(2019, 1, 20, 9), 1.0],
],
columns=["index", "group", "eventTime", "count_to_date"],
).set_index(["group", "index"])
tm.assert_frame_equal(result, expected)
def test_groupby_rolling_no_sort(self):
# GH 36889
result = (
DataFrame({"foo": [2, 1], "bar": [2, 1]})
.groupby("foo", sort=False)
.rolling(1)
.min()
)
expected = DataFrame(
np.array([[2.0, 2.0], [1.0, 1.0]]),
columns=["foo", "bar"],
index=MultiIndex.from_tuples([(2, 0), (1, 1)], names=["foo", None]),
)
# GH 32262
expected = expected.drop(columns="foo")
tm.assert_frame_equal(result, expected)
def test_groupby_rolling_count_closed_on(self):
# GH 35869
df = DataFrame(
{
"column1": range(6),
"column2": range(6),
"group": 3 * ["A", "B"],
"date": date_range(end="20190101", periods=6),
}
)
result = (
df.groupby("group")
.rolling("3d", on="date", closed="left")["column1"]
.count()
)
expected = Series(
[np.nan, 1.0, 1.0, np.nan, 1.0, 1.0],
name="column1",
index=MultiIndex.from_tuples(
[
("A", Timestamp("2018-12-27")),
("A", Timestamp("2018-12-29")),
("A", Timestamp("2018-12-31")),
("B", Timestamp("2018-12-28")),
("B", Timestamp("2018-12-30")),
("B", Timestamp("2019-01-01")),
],
names=["group", "date"],
),
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
("func", "kwargs"),
[("rolling", {"window": 2, "min_periods": 1}), ("expanding", {})],
)
def test_groupby_rolling_sem(self, func, kwargs):
# GH: 26476
df = DataFrame(
[["a", 1], ["a", 2], ["b", 1], ["b", 2], ["b", 3]], columns=["a", "b"]
)
result = getattr(df.groupby("a"), func)(**kwargs).sem()
expected = DataFrame(
{"a": [np.nan] * 5, "b": [np.nan, 0.70711, np.nan, 0.70711, 0.70711]},
index=MultiIndex.from_tuples(
[("a", 0), ("a", 1), ("b", 2), ("b", 3), ("b", 4)], names=["a", None]
),
)
# GH 32262
expected = expected.drop(columns="a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
("rollings", "key"), [({"on": "a"}, "a"), ({"on": None}, "index")]
)
def test_groupby_rolling_nans_in_index(self, rollings, key):
# GH: 34617
df = DataFrame(
{
"a": to_datetime(["2020-06-01 12:00", "2020-06-01 14:00", np.nan]),
"b": [1, 2, 3],
"c": [1, 1, 1],
}
)
if key == "index":
df = df.set_index("a")
with pytest.raises(ValueError, match=f"{key} must be monotonic"):
df.groupby("c").rolling("60min", **rollings)
@pytest.mark.parametrize("group_keys", [True, False])
def test_groupby_rolling_group_keys(self, group_keys):
# GH 37641
# GH 38523: GH 37641 actually was not a bug.
# group_keys only applies to groupby.apply directly
arrays = [["val1", "val1", "val2"], ["val1", "val1", "val2"]]
index = MultiIndex.from_arrays(arrays, names=("idx1", "idx2"))
s = Series([1, 2, 3], index=index)
result = s.groupby(["idx1", "idx2"], group_keys=group_keys).rolling(1).mean()
expected = Series(
[1.0, 2.0, 3.0],
index=MultiIndex.from_tuples(
[
("val1", "val1", "val1", "val1"),
("val1", "val1", "val1", "val1"),
("val2", "val2", "val2", "val2"),
],
names=["idx1", "idx2", "idx1", "idx2"],
),
)
tm.assert_series_equal(result, expected)
def test_groupby_rolling_index_level_and_column_label(self):
# The groupby keys should not appear as a resulting column
arrays = [["val1", "val1", "val2"], ["val1", "val1", "val2"]]
index = MultiIndex.from_arrays(arrays, names=("idx1", "idx2"))
df = DataFrame({"A": [1, 1, 2], "B": range(3)}, index=index)
result = df.groupby(["idx1", "A"]).rolling(1).mean()
expected = DataFrame(
{"B": [0.0, 1.0, 2.0]},
index=MultiIndex.from_tuples(
[
("val1", 1, "val1", "val1"),
("val1", 1, "val1", "val1"),
("val2", 2, "val2", "val2"),
],
names=["idx1", "A", "idx1", "idx2"],
),
)
tm.assert_frame_equal(result, expected)
def test_groupby_rolling_resulting_multiindex(self):
# a few different cases checking the created MultiIndex of the result
# https://github.com/pandas-dev/pandas/pull/38057
# grouping by 1 columns -> 2-level MI as result
df = DataFrame({"a": np.arange(8.0), "b": [1, 2] * 4})
result = df.groupby("b").rolling(3).mean()
expected_index = MultiIndex.from_tuples(
[(1, 0), (1, 2), (1, 4), (1, 6), (2, 1), (2, 3), (2, 5), (2, 7)],
names=["b", None],
)
tm.assert_index_equal(result.index, expected_index)
# grouping by 2 columns -> 3-level MI as result
df = DataFrame({"a": np.arange(12.0), "b": [1, 2] * 6, "c": [1, 2, 3, 4] * 3})
result = df.groupby(["b", "c"]).rolling(2).sum()
expected_index = MultiIndex.from_tuples(
[
(1, 1, 0),
(1, 1, 4),
(1, 1, 8),
(1, 3, 2),
(1, 3, 6),
(1, 3, 10),
(2, 2, 1),
(2, 2, 5),
(2, 2, 9),
(2, 4, 3),
(2, 4, 7),
(2, 4, 11),
],
names=["b", "c", None],
)
tm.assert_index_equal(result.index, expected_index)
# grouping with 1 level on dataframe with 2-level MI -> 3-level MI as result
df = DataFrame({"a": np.arange(8.0), "b": [1, 2] * 4, "c": [1, 2, 3, 4] * 2})
df = df.set_index("c", append=True)
result = df.groupby("b").rolling(3).mean()
expected_index = MultiIndex.from_tuples(
[
(1, 0, 1),
(1, 2, 3),
(1, 4, 1),
(1, 6, 3),
(2, 1, 2),
(2, 3, 4),
(2, 5, 2),
(2, 7, 4),
],
names=["b", None, "c"],
)
tm.assert_index_equal(result.index, expected_index)
def test_groupby_rolling_object_doesnt_affect_groupby_apply(self):
# GH 39732
g = self.frame.groupby("A")
expected = g.apply(lambda x: x.rolling(4).sum()).index
_ = g.rolling(window=4)
result = g.apply(lambda x: x.rolling(4).sum()).index
tm.assert_index_equal(result, expected)
assert not g.mutated
assert not g.grouper.mutated
@pytest.mark.parametrize(
"columns", [MultiIndex.from_tuples([("A", ""), ("B", "C")]), ["A", "B"]]
)
def test_by_column_not_in_values(self, columns):
# GH 32262
df = DataFrame([[1, 0]] * 20 + [[2, 0]] * 12 + [[3, 0]] * 8, columns=columns)
g = df.groupby("A")
original_obj = g.obj.copy(deep=True)
r = g.rolling(4)
result = r.sum()
assert "A" not in result.columns
tm.assert_frame_equal(g.obj, original_obj)
def test_groupby_level(self):
# GH 38523, 38787
arrays = [
["Falcon", "Falcon", "Parrot", "Parrot"],
["Captive", "Wild", "Captive", "Wild"],
]
index = MultiIndex.from_arrays(arrays, names=("Animal", "Type"))
df = DataFrame({"Max Speed": [390.0, 350.0, 30.0, 20.0]}, index=index)
result = df.groupby(level=0)["Max Speed"].rolling(2).sum()
expected = Series(
[np.nan, 740.0, np.nan, 50.0],
index=MultiIndex.from_tuples(
[
("Falcon", "Falcon", "Captive"),
("Falcon", "Falcon", "Wild"),
("Parrot", "Parrot", "Captive"),
("Parrot", "Parrot", "Wild"),
],
names=["Animal", "Animal", "Type"],
),
name="Max Speed",
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"by, expected_data",
[
[["id"], {"num": [100.0, 150.0, 150.0, 200.0]}],
[
["id", "index"],
{
"date": [
Timestamp("2018-01-01"),
Timestamp("2018-01-02"),
Timestamp("2018-01-01"),
Timestamp("2018-01-02"),
],
"num": [100.0, 200.0, 150.0, 250.0],
},
],
],
)
def test_as_index_false(self, by, expected_data):
# GH 39433
data = [
["A", "2018-01-01", 100.0],
["A", "2018-01-02", 200.0],
["B", "2018-01-01", 150.0],
["B", "2018-01-02", 250.0],
]
df = DataFrame(data, columns=["id", "date", "num"])
df["date"] = to_datetime(df["date"])
df = df.set_index(["date"])
gp_by = [getattr(df, attr) for attr in by]
result = (
df.groupby(gp_by, as_index=False).rolling(window=2, min_periods=1).mean()
)
expected = {"id": ["A", "A", "B", "B"]}
expected.update(expected_data)
expected = DataFrame(
expected,
index=df.index,
)
tm.assert_frame_equal(result, expected)
class TestExpanding:
def setup_method(self):
self.frame = DataFrame({"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)})
@pytest.mark.parametrize(
"f", ["sum", "mean", "min", "max", "count", "kurt", "skew"]
)
def test_expanding(self, f):
g = self.frame.groupby("A")
r = g.expanding()
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.expanding(), f)())
# groupby.apply doesn't drop the grouped-by column
expected = expected.drop("A", axis=1)
# GH 39732
expected_index = MultiIndex.from_arrays([self.frame["A"], range(40)])
expected.index = expected_index
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("f", ["std", "var"])
def test_expanding_ddof(self, f):
g = self.frame.groupby("A")
r = g.expanding()
result = getattr(r, f)(ddof=0)
expected = g.apply(lambda x: getattr(x.expanding(), f)(ddof=0))
# groupby.apply doesn't drop the grouped-by column
expected = expected.drop("A", axis=1)
# GH 39732
expected_index = MultiIndex.from_arrays([self.frame["A"], range(40)])
expected.index = expected_index
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "midpoint", "nearest"]
)
def test_expanding_quantile(self, interpolation):
g = self.frame.groupby("A")
r = g.expanding()
result = r.quantile(0.4, interpolation=interpolation)
expected = g.apply(
lambda x: x.expanding().quantile(0.4, interpolation=interpolation)
)
# groupby.apply doesn't drop the grouped-by column
expected = expected.drop("A", axis=1)
# GH 39732
expected_index = MultiIndex.from_arrays([self.frame["A"], range(40)])
expected.index = expected_index
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("f", ["corr", "cov"])
def test_expanding_corr_cov(self, f):
g = self.frame.groupby("A")
r = g.expanding()
result = getattr(r, f)(self.frame)
def func(x):
return getattr(x.expanding(), f)(self.frame)
expected = g.apply(func)
# GH 39591: groupby.apply returns 1 instead of nan for windows
# with all nan values
null_idx = list(range(20, 61)) + list(range(72, 113))
expected.iloc[null_idx, 1] = np.nan
# GH 39591: The grouped column should be all np.nan
# (groupby.apply inserts 0s for cov)
expected["A"] = np.nan
tm.assert_frame_equal(result, expected)
result = getattr(r.B, f)(pairwise=True)
def func(x):
return getattr(x.B.expanding(), f)(pairwise=True)
expected = g.apply(func)
tm.assert_series_equal(result, expected)
def test_expanding_apply(self, raw):
g = self.frame.groupby("A")
r = g.expanding()
# reduction
result = r.apply(lambda x: x.sum(), raw=raw)
expected = g.apply(lambda x: x.expanding().apply(lambda y: y.sum(), raw=raw))
# groupby.apply doesn't drop the grouped-by column
expected = expected.drop("A", axis=1)
# GH 39732
expected_index = MultiIndex.from_arrays([self.frame["A"], range(40)])
expected.index = expected_index
tm.assert_frame_equal(result, expected)
class TestEWM:
@pytest.mark.parametrize(
"method, expected_data",
[
["mean", [0.0, 0.6666666666666666, 1.4285714285714286, 2.2666666666666666]],
["std", [np.nan, 0.707107, 0.963624, 1.177164]],
["var", [np.nan, 0.5, 0.9285714285714286, 1.3857142857142857]],
],
)
def test_methods(self, method, expected_data):
# GH 16037
df = DataFrame({"A": ["a"] * 4, "B": range(4)})
result = getattr(df.groupby("A").ewm(com=1.0), method)()
expected = DataFrame(
{"B": expected_data},
index=MultiIndex.from_tuples(
[
("a", 0),
("a", 1),
("a", 2),
("a", 3),
],
names=["A", None],
),
)
tm.assert_frame_equal(result, expected)
expected = df.groupby("A").apply(lambda x: getattr(x.ewm(com=1.0), method)())
# There may be a bug in the above statement; not returning the correct index
tm.assert_frame_equal(result.reset_index(drop=True), expected)
@pytest.mark.parametrize(
"method, expected_data",
[["corr", [np.nan, 1.0, 1.0, 1]], ["cov", [np.nan, 0.5, 0.928571, 1.385714]]],
)
def test_pairwise_methods(self, method, expected_data):
# GH 16037
df = DataFrame({"A": ["a"] * 4, "B": range(4)})
result = getattr(df.groupby("A").ewm(com=1.0), method)()
expected = DataFrame(
{"B": expected_data},
index=MultiIndex.from_tuples(
[
("a", 0, "B"),
("a", 1, "B"),
("a", 2, "B"),
("a", 3, "B"),
],
names=["A", None, None],
),
)
tm.assert_frame_equal(result, expected)
expected = df.groupby("A").apply(lambda x: getattr(x.ewm(com=1.0), method)())
tm.assert_frame_equal(result, expected)
def test_times(self, times_frame):
# GH 40951
halflife = "23 days"
result = times_frame.groupby("A").ewm(halflife=halflife, times="C").mean()
expected = DataFrame(
{
"B": [
0.0,
0.507534,
1.020088,
1.537661,
0.0,
0.567395,
1.221209,
0.0,
0.653141,
1.195003,
]
},
index=MultiIndex.from_tuples(
[
("a", 0),
("a", 3),
("a", 6),
("a", 9),
("b", 1),
("b", 4),
("b", 7),
("c", 2),
("c", 5),
("c", 8),
],
names=["A", None],
),
)
tm.assert_frame_equal(result, expected)
def test_times_vs_apply(self, times_frame):
# GH 40951
halflife = "23 days"
result = times_frame.groupby("A").ewm(halflife=halflife, times="C").mean()
expected = (
times_frame.groupby("A")
.apply(lambda x: x.ewm(halflife=halflife, times="C").mean())
.iloc[[0, 3, 6, 9, 1, 4, 7, 2, 5, 8]]
.reset_index(drop=True)
)
tm.assert_frame_equal(result.reset_index(drop=True), expected)
def test_times_array(self, times_frame):
# GH 40951
halflife = "23 days"
result = times_frame.groupby("A").ewm(halflife=halflife, times="C").mean()
expected = (
times_frame.groupby("A")
.ewm(halflife=halflife, times=times_frame["C"].values)
.mean()
)
tm.assert_frame_equal(result, expected)
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'ZerigoDNSDriver'
]
import copy
import base64
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from xml.etree import ElementTree as ET
from libcloud.utils.misc import merge_valid_keys, get_new_obj
from libcloud.utils.xml import findtext, findall
from libcloud.common.base import XmlResponse, ConnectionUserAndKey
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.common.types import MalformedResponseError, LazyList
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
from libcloud.dns.base import DNSDriver, Zone, Record
API_HOST = 'ns.zerigo.com'
API_VERSION = '1.1'
API_ROOT = '/api/%s/' % (API_VERSION)
VALID_ZONE_EXTRA_PARAMS = ['notes', 'tag-list', 'ns1', 'slave-nameservers']
VALID_RECORD_EXTRA_PARAMS = ['notes', 'ttl', 'priority']
# Number of items per page (maximum limit is 1000)
ITEMS_PER_PAGE = 100
class ZerigoError(LibcloudError):
def __init__(self, code, errors):
self.code = code
self.errors = errors or []
def __str__(self):
return 'Errors: %s' % (', '.join(self.errors))
def __repr__(self):
return ('<ZerigoError response code=%s, errors count=%s>' %
(self.code,
len(self.errors)))
class ZerigoDNSResponse(XmlResponse):
def success(self):
return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED]
def parse_error(self):
status = int(self.status)
if status == 401:
if not self.body:
raise InvalidCredsError(str(self.status) + ': ' + self.error)
else:
raise InvalidCredsError(self.body)
elif status == 404:
context = self.connection.context
if context['resource'] == 'zone':
raise ZoneDoesNotExistError(value='', driver=self,
zone_id=context['id'])
elif context['resource'] == 'record':
raise RecordDoesNotExistError(value='', driver=self,
record_id=context['id'])
elif status != 503:
try:
body = ET.XML(self.body)
except:
raise MalformedResponseError('Failed to parse XML',
body=self.body)
errors = []
for error in findall(element=body, xpath='error'):
errors.append(error.text)
raise ZerigoError(code=status, errors=errors)
return self.body
class ZerigoDNSConnection(ConnectionUserAndKey):
host = API_HOST
secure = True
responseCls = ZerigoDNSResponse
def add_default_headers(self, headers):
auth_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
headers['Authorization'] = 'Basic %s' % (auth_b64.decode('utf-8'))
return headers
def request(self, action, params=None, data='', headers=None,
method='GET'):
if not headers:
headers = {}
if not params:
params = {}
if method in ("POST", "PUT"):
headers = {'Content-Type': 'application/xml; charset=UTF-8'}
return super(ZerigoDNSConnection, self).request(action=action,
params=params,
data=data,
method=method,
headers=headers)
class ZerigoDNSDriver(DNSDriver):
type = Provider.ZERIGO
name = 'Zerigo DNS'
website = 'http://www.zerigo.com/'
connectionCls = ZerigoDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.REDIRECT: 'REDIRECT',
RecordType.TXT: 'TXT',
RecordType.SRV: 'SRV',
RecordType.NAPTR: 'NAPTR',
RecordType.NS: 'NS',
RecordType.PTR: 'PTR',
RecordType.SPF: 'SPF',
RecordType.GEO: 'GEO',
RecordType.URL: 'URL',
}
def list_zones(self):
value_dict = {'type': 'zones'}
return LazyList(get_more=self._get_more, value_dict=value_dict)
def list_records(self, zone):
value_dict = {'type': 'records', 'zone': zone}
return LazyList(get_more=self._get_more, value_dict=value_dict)
def get_zone(self, zone_id):
path = API_ROOT + 'zones/%s.xml' % (zone_id)
self.connection.set_context({'resource': 'zone', 'id': zone_id})
data = self.connection.request(path).object
zone = self._to_zone(elem=data)
return zone
def get_record(self, zone_id, record_id):
zone = self.get_zone(zone_id=zone_id)
self.connection.set_context({'resource': 'record', 'id': record_id})
path = API_ROOT + 'hosts/%s.xml' % (record_id)
data = self.connection.request(path).object
record = self._to_record(elem=data, zone=zone)
return record
def create_zone(self, domain, type='master', ttl=None, extra=None):
"""
Create a new zone.
Provider API docs:
https://www.zerigo.com/docs/apis/dns/1.1/zones/create
"""
path = API_ROOT + 'zones.xml'
zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl,
extra=extra)
data = self.connection.request(action=path,
data=ET.tostring(zone_elem),
method='POST').object
zone = self._to_zone(elem=data)
return zone
def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
"""
Update an existing zone.
Provider API docs:
https://www.zerigo.com/docs/apis/dns/1.1/zones/update
"""
if domain:
raise LibcloudError('Domain cannot be changed', driver=self)
path = API_ROOT + 'zones/%s.xml' % (zone.id)
zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl,
extra=extra)
response = self.connection.request(action=path,
data=ET.tostring(zone_elem),
method='PUT')
assert response.status == httplib.OK
merged = merge_valid_keys(params=copy.deepcopy(zone.extra),
valid_keys=VALID_ZONE_EXTRA_PARAMS,
extra=extra)
updated_zone = get_new_obj(obj=zone, klass=Zone,
attributes={'type': type,
'ttl': ttl,
'extra': merged})
return updated_zone
def create_record(self, name, zone, type, data, extra=None):
"""
Create a new record.
Provider API docs:
https://www.zerigo.com/docs/apis/dns/1.1/hosts/create
"""
path = API_ROOT + 'zones/%s/hosts.xml' % (zone.id)
record_elem = self._to_record_elem(name=name, type=type, data=data,
extra=extra)
response = self.connection.request(action=path,
data=ET.tostring(record_elem),
method='POST')
assert response.status == httplib.CREATED
record = self._to_record(elem=response.object, zone=zone)
return record
def update_record(self, record, name=None, type=None, data=None,
extra=None):
path = API_ROOT + 'hosts/%s.xml' % (record.id)
record_elem = self._to_record_elem(name=name, type=type, data=data,
extra=extra)
response = self.connection.request(action=path,
data=ET.tostring(record_elem),
method='PUT')
assert response.status == httplib.OK
merged = merge_valid_keys(params=copy.deepcopy(record.extra),
valid_keys=VALID_RECORD_EXTRA_PARAMS,
extra=extra)
updated_record = get_new_obj(obj=record, klass=Record,
attributes={'type': type,
'data': data,
'extra': merged})
return updated_record
def delete_zone(self, zone):
path = API_ROOT + 'zones/%s.xml' % (zone.id)
self.connection.set_context({'resource': 'zone', 'id': zone.id})
response = self.connection.request(action=path, method='DELETE')
return response.status == httplib.OK
def delete_record(self, record):
path = API_ROOT + 'hosts/%s.xml' % (record.id)
self.connection.set_context({'resource': 'record', 'id': record.id})
response = self.connection.request(action=path, method='DELETE')
return response.status == httplib.OK
def ex_get_zone_by_domain(self, domain):
"""
Retrieve a zone object by the domain name.
"""
path = API_ROOT + 'zones/%s.xml' % (domain)
self.connection.set_context({'resource': 'zone', 'id': domain})
data = self.connection.request(path).object
zone = self._to_zone(elem=data)
return zone
def ex_force_slave_axfr(self, zone):
"""
Force a zone transfer.
"""
path = API_ROOT + 'zones/%s/force_slave_axfr.xml' % (zone.id)
self.connection.set_context({'resource': 'zone', 'id': zone.id})
response = self.connection.request(path, method='POST')
assert response.status == httplib.ACCEPTED
return zone
def _to_zone_elem(self, domain=None, type=None, ttl=None, extra=None):
zone_elem = ET.Element('zone', {})
if domain:
domain_elem = ET.SubElement(zone_elem, 'domain')
domain_elem.text = domain
if type:
ns_type_elem = ET.SubElement(zone_elem, 'ns-type')
if type == 'master':
ns_type_elem.text = 'pri_sec'
elif type == 'slave':
if not extra or 'ns1' not in extra:
raise LibcloudError('ns1 extra attribute is required ' +
'when zone type is slave', driver=self)
ns_type_elem.text = 'sec'
ns1_elem = ET.SubElement(zone_elem, 'ns1')
ns1_elem.text = extra['ns1']
elif type == 'std_master':
# TODO: Each driver should provide supported zone types
# Slave name servers are elsewhere
if not extra or 'slave-nameservers' not in extra:
raise LibcloudError('slave-nameservers extra ' +
'attribute is required whenzone ' +
'type is std_master', driver=self)
ns_type_elem.text = 'pri'
slave_nameservers_elem = ET.SubElement(zone_elem,
'slave-nameservers')
slave_nameservers_elem.text = extra['slave-nameservers']
if ttl:
default_ttl_elem = ET.SubElement(zone_elem, 'default-ttl')
default_ttl_elem.text = str(ttl)
if extra and 'tag-list' in extra:
tags = extra['tag-list']
tags_elem = ET.SubElement(zone_elem, 'tag-list')
tags_elem.text = ' '.join(tags)
return zone_elem
def _to_record_elem(self, name=None, type=None, data=None, extra=None):
record_elem = ET.Element('host', {})
if name:
name_elem = ET.SubElement(record_elem, 'hostname')
name_elem.text = name
if type:
type_elem = ET.SubElement(record_elem, 'host-type')
type_elem.text = self.RECORD_TYPE_MAP[type]
if data:
data_elem = ET.SubElement(record_elem, 'data')
data_elem.text = data
if extra:
if 'ttl' in extra:
ttl_elem = ET.SubElement(record_elem, 'ttl',
{'type': 'integer'})
ttl_elem.text = str(extra['ttl'])
if 'priority' in extra:
# Only MX and SRV records support priority
priority_elem = ET.SubElement(record_elem, 'priority',
{'type': 'integer'})
priority_elem.text = str(extra['priority'])
if 'notes' in extra:
notes_elem = ET.SubElement(record_elem, 'notes')
notes_elem.text = extra['notes']
return record_elem
def _to_zones(self, elem):
zones = []
for item in findall(element=elem, xpath='zone'):
zone = self._to_zone(elem=item)
zones.append(zone)
return zones
def _to_zone(self, elem):
id = findtext(element=elem, xpath='id')
domain = findtext(element=elem, xpath='domain')
type = findtext(element=elem, xpath='ns-type')
type = 'master' if type.find('pri') == 0 else 'slave'
ttl = findtext(element=elem, xpath='default-ttl')
hostmaster = findtext(element=elem, xpath='hostmaster')
custom_ns = findtext(element=elem, xpath='custom-ns')
custom_nameservers = findtext(element=elem, xpath='custom-nameservers')
notes = findtext(element=elem, xpath='notes')
nx_ttl = findtext(element=elem, xpath='nx-ttl')
slave_nameservers = findtext(element=elem, xpath='slave-nameservers')
tags = findtext(element=elem, xpath='tag-list')
tags = tags.split(' ') if tags else []
extra = {'hostmaster': hostmaster, 'custom-ns': custom_ns,
'custom-nameservers': custom_nameservers, 'notes': notes,
'nx-ttl': nx_ttl, 'slave-nameservers': slave_nameservers,
'tags': tags}
zone = Zone(id=str(id), domain=domain, type=type, ttl=int(ttl),
driver=self, extra=extra)
return zone
def _to_records(self, elem, zone):
records = []
for item in findall(element=elem, xpath='host'):
record = self._to_record(elem=item, zone=zone)
records.append(record)
return records
def _to_record(self, elem, zone):
id = findtext(element=elem, xpath='id')
name = findtext(element=elem, xpath='hostname')
type = findtext(element=elem, xpath='host-type')
type = self._string_to_record_type(type)
data = findtext(element=elem, xpath='data')
notes = findtext(element=elem, xpath='notes')
state = findtext(element=elem, xpath='state')
fqdn = findtext(element=elem, xpath='fqdn')
priority = findtext(element=elem, xpath='priority')
extra = {'notes': notes, 'state': state, 'fqdn': fqdn,
'priority': priority}
record = Record(id=id, name=name, type=type, data=data,
zone=zone, driver=self, extra=extra)
return record
def _get_more(self, last_key, value_dict):
# Note: last_key in this case really is a "last_page".
# TODO: Update base driver and change last_key to something more
# generic - e.g. marker
params = {}
params['per_page'] = ITEMS_PER_PAGE
params['page'] = last_key + 1 if last_key else 1
transform_func_kwargs = {}
if value_dict['type'] == 'zones':
path = API_ROOT + 'zones.xml'
response = self.connection.request(path)
transform_func = self._to_zones
elif value_dict['type'] == 'records':
zone = value_dict['zone']
path = API_ROOT + 'zones/%s/hosts.xml' % (zone.id)
self.connection.set_context({'resource': 'zone', 'id': zone.id})
response = self.connection.request(path, params=params)
transform_func = self._to_records
transform_func_kwargs['zone'] = value_dict['zone']
exhausted = False
result_count = int(response.headers.get('x-query-count', 0))
transform_func_kwargs['elem'] = response.object
if (params['page'] * ITEMS_PER_PAGE) >= result_count:
exhausted = True
if response.status == httplib.OK:
items = transform_func(**transform_func_kwargs)
return items, params['page'], exhausted
else:
return [], None, True
| |
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
The classes in this module serve the creation of an ISO for a library creation
process.
The following tasks need to be performed:
* split library layout into quadrants
* check current state of the stock racks provided
* create ISO sample stock rack for each quadrant (barcodes are provided)
* create worklists for the ISO sample stock racks (sample transfer from
single-molecule-design stock rack to pool stock rack) for each quadrant
* create tube handler worklists
* create sample transfer robot files
* upload data to a ticket
AAB
"""
from datetime import datetime
from everest.entities.utils import get_root_aggregate
from everest.querying.specifications import cntd
from thelma.tools.base import BaseTool
from thelma.tools.libcreation.base \
import MOLECULE_DESIGN_TRANSFER_VOLUME
from thelma.tools.libcreation.base \
import get_source_plate_transfer_volume
from thelma.tools.libcreation.base \
import get_stock_pool_buffer_volume
from thelma.tools.libcreation.base import NUMBER_MOLECULE_DESIGNS
from thelma.tools.libcreation.base import NUMBER_SECTORS
from thelma.tools.libcreation.base import PREPARATION_PLATE_VOLUME
from thelma.tools.worklists.tubehandler import TubeTransferData
from thelma.tools.worklists.tubehandler import XL20WorklistWriter
from thelma.tools.writers import LINEBREAK_CHAR
from thelma.tools.writers import TxtWriter
from thelma.interfaces import ITube
from thelma.interfaces import ITubeRack
from thelma.entities.iso import ISO_STATUS
from thelma.entities.liquidtransfer import PlannedWorklist
__docformat__ = 'reStructuredText en'
__all__ = ['LibraryCreationWorklistWriter',
'LibraryCreationXL20ReportWriter',
'LibraryCreationCyBioOverviewWriter']
class LibraryCreationWorklistWriter(BaseTool):
"""
Writes the worklist files for a library creation ISO. This comprises:
- 4 tube handler worklists (one for each quadrant)
- 4 tube handler reports (one for each quadrant)
- overview file
:Note: The files for the CyBio worklists cannot be generated here, because
this requires the stock tubes to be transferred.
**Return Value:** The worklist files as zip stream (mapped onto file names).
"""
NAME = 'Library Creation Worklist Writer'
#: Label for the worklists of the pool sample stock racks. The label
#: will be appended by the source rack barcodes.
SAMPLE_STOCK_WORKLIST_LABEL = 'from_'
#: The delimiter for the source rack barcodes in the label of the
#: stock transfer worklists.
SAMPLE_STOCK_WORKLIST_DELIMITER = '-'
#: File name for a tube handler worklist file. The placeholder contain
#: the layout number, the library name and the quadrant number.
FILE_NAME_XL20_WORKLIST = '%s-%s_xl20_worklist_Q%i.csv'
#: File name for a tube handler worklist file. The placeholder contain
#: the layout number, the library name and the quadrant number.
FILE_NAME_XL20_REPORT = '%s-%s_xl20_report_Q%i.txt'
#: File name for the CyBio instructions info file. The placeholders
#: contain the layout number and the the library name.
FILE_NAME_CYBIO = '%s-%s-CyBio_instructions.txt'
def __init__(self, library_creation_iso, tube_destination_racks,
pool_stock_racks, parent=None):
"""
Constructor:
:param library_creation_iso: The library creation ISO for which to
generate the worklist files.
:type library_creation_iso:
:class:`thelma.entities.library.LibraryCreationIso`
:param tube_destination_racks: The barcodes for the destination
rack for the single molecule design tube (these racks have to be
empty).
:type tube_destination_racks: map of barcode lists
(:class:`basestring`) mapped onto sector indices.
:param pool_stock_racks: The barcodes for the pool stock racks
(these racks have to have empty tubes in defined positions).
:type pool_stock_racks: map of barcodes
(:class:`basestring`) mapped onto sector indices.
"""
BaseTool.__init__(self, parent=parent)
#: The library creation ISO for which to generate the worklist files.
self.library_creation_iso = library_creation_iso
#: The barcodes for the destination rack for the single molecule
#: design tube (these racks have to be empty).
self.tube_destination_racks = tube_destination_racks
#: The barcodes for the pool stock racks rack for the single molecule
#: design tube (these racks have to have empty tubes in defined
#: positions).
self.pool_stock_racks = pool_stock_racks
#: The name of the library that is created here.
self.library_name = None
#: The layout number of the ISO.
self.layout_number = None
#: Stores the generated file streams (mapped onto file names).
self.__file_map = None
#: Maps tube racks onto barcodes.
self.__rack_map = None
#: The library layout for the ISO.
self.__library_layout = None
#: Maps library position onto sector indices.
self.__library_sectors = None
#: Maps translated library position onto sector indices.
self.__translated_sectors = None
#: Maps tube onto tube barcodes.
self.__tube_map = dict()
#: The tube transfer data items for the tube handler worklist writer
#: sorted by sector index.
self.__tube_transfers = None
#: Stores the rack location for each source rack (single molecule
#: design pools).
self.__source_rack_locations = None
def reset(self):
BaseTool.reset(self)
self.library_name = None
self.layout_number = None
self.__rack_map = dict()
self.__library_layout = None
self.__library_sectors = None
self.__translated_sectors = dict()
self.__tube_map = dict()
self.__tube_transfers = dict()
self.__file_map = dict()
self.__source_rack_locations = dict()
def run(self):
"""
Creates the worklist files.
"""
self.reset()
self.add_info('Start worklist file generation ...')
self.__check_input()
if not self.has_errors(): self.__get_tube_racks()
if not self.has_errors(): self.__get_library_layout()
# if not self.has_errors(): self.__find_ignored_positions()
if not self.has_errors():
self.__check_tube_destination_racks()
self.__check_pool_stock_racks()
if not self.has_errors(): self.__create_sample_stock_racks()
if not self.has_errors(): self.__fetch_tube_locations()
if not self.has_errors(): self.__write_tube_handler_files()
if not self.has_errors(): self.__write_cybio_overview_file()
if not self.has_errors():
self.return_value = self.__file_map
self.add_info('Worklist file generation completed.')
def __check_input(self):
# Checks the initialisation values.
self.add_debug('Check input values ...')
if self._check_input_class('library creation ISO',
self.library_creation_iso, LibraryCreationIso):
status = self.library_creation_iso.status
if not status == ISO_STATUS.QUEUED:
msg = 'Unexpected ISO status: "%s"' % (status)
self.add_error(msg)
self.library_name = self.library_creation_iso.iso_request.\
plate_set_label
self.layout_number = self.library_creation_iso.layout_number
if self._check_input_class('tube destination rack map',
self.tube_destination_racks, dict):
for sector_index, barcode_list in \
self.tube_destination_racks.iteritems():
if not self._check_input_class(
'sector index in the tube destination map',
sector_index, int): break
if not self._check_input_class(
'barcode list in the tube destination map',
barcode_list, list): break
if not len(self.tube_destination_racks) > 0:
msg = 'There are no barcodes in the destination rack map!'
self.add_error(msg)
if self._check_input_class('pool stock rack map', self.pool_stock_racks,
dict):
for sector_index, barcode in self.pool_stock_racks.iteritems():
if not self._check_input_class(
'sector index in the pool stock rack map',
sector_index, int): break
if not self._check_input_class(
'barcode in the pool stock rack map',
barcode, basestring): break
if not len(self.pool_stock_racks) > 0:
msg = 'There are no barcodes in the pool stock rack map!'
self.add_error(msg)
def __get_tube_racks(self):
# Fetches the tubes rack for the rack barcodes.
self.add_debug('Fetch tube racks ...')
tube_rack_agg = get_root_aggregate(ITubeRack)
not_found = []
for barcode in self.pool_stock_racks.values():
rack = tube_rack_agg.get_by_slug(barcode)
if rack is None:
not_found.append(barcode)
else:
self.__rack_map[barcode] = rack
for barcode_list in self.tube_destination_racks.values():
for barcode in barcode_list:
rack = tube_rack_agg.get_by_slug(barcode)
if rack is None:
not_found.append(barcode)
else:
self.__rack_map[barcode] = rack
if len(not_found) > 0:
msg = 'The following racks have not been found in the DB: %s!' \
% (', '.join(sorted(not_found)))
self.add_error(msg)
def __get_library_layout(self):
# Fetches the library layout and sorts its positions into quadrants.
self.add_debug('Fetch library layout ...') #
converter = LibraryLayoutConverter(
self.library_creation_iso.rack_layout,
parent=self)
self.__library_layout = converter.get_result()
if self.__library_layout is None:
msg = 'Error when trying to convert library layout.'
self.add_error(msg)
else:
self.__library_sectors = QuadrantIterator.sort_into_sectors(
working_layout=self.__library_layout,
number_sectors=NUMBER_SECTORS)
del_sectors = []
for sector_index, positions in self.__library_sectors.iteritems():
if len(positions) < 1:
del_sectors.append(sector_index)
continue
translator = RackSectorTranslator(
NUMBER_SECTORS,
sector_index,
0,
behaviour=RackSectorTranslator.ONE_TO_MANY)
translated_positions = []
for lib_pos in positions:
translated_pos = \
translator.translate(lib_pos.rack_position)
translated_positions.append(translated_pos)
self.__translated_sectors[sector_index] = translated_positions
for sector_index in del_sectors:
del self.__library_sectors[sector_index]
def __check_tube_destination_racks(self):
# Makes sure there is the right number of tube destination racks for
# each quadrant and that all racks are empty.
self.add_debug('Check tube destination racks ...')
not_empty = []
for sector_index, barcodes in self.tube_destination_racks.iteritems():
if not self.__library_sectors.has_key(sector_index): continue
if not len(barcodes) >= NUMBER_MOLECULE_DESIGNS:
msg = 'You need to provide %i empty racks for each rack ' \
'sector. For sector %i you have only provided ' \
'%i barcodes.' % (NUMBER_MOLECULE_DESIGNS,
(sector_index + 1), len(barcodes))
self.add_error(msg)
for barcode in barcodes:
rack = self.__rack_map[barcode]
if len(rack.containers) > 0: not_empty.append(barcode)
if len(not_empty) > 0:
msg = 'The following tube destination racks you have chosen are ' \
'not empty: %s.' % (', '.join(sorted(not_empty)))
self.add_error(msg)
def __check_pool_stock_racks(self):
# Checks whether the pool stock comply with there assumed sector layouts
# and whether all tubes are empty.
self.add_debug('Check pool stock racks ...')
for sector_index, positions in self.__translated_sectors.iteritems():
if not self.pool_stock_racks.has_key(sector_index):
msg = 'Please provide a pool stock rack for sector %i!' \
% (sector_index + 1)
self.add_error(msg)
break
barcode = self.pool_stock_racks[sector_index]
rack = self.__rack_map[barcode]
tube_map = dict()
for tube in rack.containers:
tube_map[tube.location.position] = tube
tube_missing = []
not_empty = []
add_tube = []
for rack_pos in get_positions_for_shape(rack.rack_shape):
if rack_pos in positions:
if not tube_map.has_key(rack_pos):
tube_missing.append(rack_pos.label)
continue
tube = tube_map[rack_pos]
if tube.sample is None:
continue
elif tube.sample.volume > 0:
not_empty.append(rack_pos.label)
elif tube_map.has_key(rack_pos):
add_tube.append(rack_pos.label)
if len(tube_missing) > 0:
msg = 'There are some tubes missing in the pool stock rack ' \
'for sector %i (%s): %s.' % ((sector_index + 1),
barcode, ', '.join(sorted(tube_missing)))
self.add_error(msg)
if len(not_empty) > 0:
msg = 'Some tubes in the pool stock rack for sector %i (%s) ' \
'are not empty: %s.' % ((sector_index + 1), barcode,
', '.join(sorted(not_empty)))
self.add_error(msg)
if len(add_tube) > 0:
msg = 'There are some tubes in the stock rack for sector %i ' \
'(%s) that are located in positions that should be ' \
'empty: %s.' % ((sector_index + 1), barcode,
', '.join(sorted(add_tube)))
self.add_warning(msg)
def __create_sample_stock_racks(self):
# Creates the ISO sample stock rack (= the pool stock racks) for the
# library. The stock rack list of the ISO has to be reset before
# (in case of update).
self.add_debug('Create pool stock racks ...')
worklists = self.__create_takeout_worklists()
issrs = self.library_creation_iso.iso_sample_stock_racks
if len(issrs) < 1:
for sector_index, barcode in self.pool_stock_racks.iteritems():
if not worklists.has_key(sector_index): continue
rack = self.__rack_map[barcode]
worklist = worklists[sector_index]
IsoSampleStockRack(iso=self.library_creation_iso,
rack=rack, sector_index=sector_index,
planned_worklist=worklist)
else:
for issr in issrs:
issr.worklist = worklists[issr.sector_index]
barcode = self.pool_stock_racks[issr.sector_index]
issr.rack = self.__rack_map[barcode]
def __create_takeout_worklists(self):
# Creates the container transfer for the stock sample worklists (this
# is in theory a 1-to-1 rack transfer, but since the sources are tubes
# that can be moved we use container transfers instead).
self.add_debug('Create stock take out worklists ...')
worklists = dict()
volume = MOLECULE_DESIGN_TRANSFER_VOLUME / VOLUME_CONVERSION_FACTOR
for sector_index in self.pool_stock_racks.keys():
if not self.__translated_sectors.has_key(sector_index): continue
positions = self.__translated_sectors[sector_index]
dest_rack_barcodes = self.tube_destination_racks[sector_index]
label = self.SAMPLE_STOCK_WORKLIST_LABEL \
+ (self.SAMPLE_STOCK_WORKLIST_DELIMITER.join(
dest_rack_barcodes))
worklist = PlannedWorklist(label=label)
for rack_pos in positions:
pct = PlannedContainerTransfer(volume=volume,
source_position=rack_pos,
target_position=rack_pos)
worklist.planned_transfers.append(pct)
worklists[sector_index] = worklist
return worklists
def __fetch_tube_locations(self):
# Fetches the rack barcode amd tube location for every scheduled
# tube.
self.add_debug('Fetch tube locations ...')
self.__fetch_tubes()
if not self.has_errors():
source_racks = set()
for tube in self.__tube_map.values():
source_rack = tube.location.rack
source_racks.add(source_rack)
self.__get_rack_locations(source_racks)
self.__create_tube_transfers()
def __fetch_tubes(self):
# Fetches tube (for location data), from the the DB. Uses the tube
# barcodes from the library layouts.
self.add_debug('Fetch tubes ...')
tube_barcodes = []
for lib_pos in self.__library_layout.working_positions():
for barcode in lib_pos.stock_tube_barcodes:
tube_barcodes.append(barcode)
tube_agg = get_root_aggregate(ITube)
tube_agg.filter = cntd(barcode=tube_barcodes)
iterator = tube_agg.iterator()
while True:
try:
tube = iterator.next()
except StopIteration:
break
else:
self.__tube_map[tube.barcode] = tube
if not len(tube_barcodes) == len(self.__tube_map):
missing_tubes = []
for tube_barcode in tube_barcodes:
if not self.__tube_map.has_key(tube_barcode):
missing_tubes.append(tube_barcode)
msg = 'Could not find tubes for the following tube barcodes: %s.' \
% (', '.join(sorted(missing_tubes)))
self.add_error(msg)
def __create_tube_transfers(self):
# Assign the tube data items to target positions and create tube
# transfer data items for them.
self.add_debug('Create tube transfer data ...')
for sector_index, positions in self.__library_sectors.iteritems():
translator = RackSectorTranslator(number_sectors=NUMBER_SECTORS,
source_sector_index=sector_index,
target_sector_index=0,
enforce_type=RackSectorTranslator.ONE_TO_MANY)
rack_barcodes = self.tube_destination_racks[sector_index]
tube_transfers = []
for lib_pos in positions:
target_pos_384 = lib_pos.rack_position
target_pos_96 = translator.translate(target_pos_384)
for i in range(NUMBER_MOLECULE_DESIGNS):
tube_barcode = lib_pos.stock_tube_barcodes[i]
tube = self.__tube_map[tube_barcode]
tube_pos = tube.location.position
tube_rack = tube.location.rack
target_rack_barcode = rack_barcodes[i]
ttd = TubeTransferData(tube_barcode=tube_barcode,
src_rack_barcode=tube_rack.barcode,
src_pos=tube_pos,
trg_rack_barcode=target_rack_barcode,
trg_pos=target_pos_96)
tube_transfers.append(ttd)
self.__tube_transfers[sector_index] = tube_transfers
def __get_rack_locations(self, source_racks):
# Returns a map that stores the rack location for each source rack
# (DB query).
self.add_debug('Fetch rack locations ...')
for src_rack in source_racks:
barcode = src_rack.barcode
loc = src_rack.location
if loc is None:
self.__source_rack_locations[barcode] = 'not found'
continue
name = loc.name
index = loc.index
if index is None or len(index) < 1:
self.__source_rack_locations[barcode] = name
else:
self.__source_rack_locations[barcode] = '%s, index: %s' \
% (name, index)
def __write_tube_handler_files(self):
# Creates the tube handler worklists and report files for every
# quadrant.
self.add_debug('Write XL20 files ...')
for sector_index, tube_transfers in self.__tube_transfers.iteritems():
worklist_writer = XL20WorklistWriter(tube_transfers, parent=self)
worklist_stream = worklist_writer.get_result()
if worklist_stream is None:
msg = 'Error when trying to write tube handler worklist ' \
'file for sector %i.' % (sector_index + 1)
self.add_error(msg)
else:
fn = self.FILE_NAME_XL20_WORKLIST % (self.library_name,
self.layout_number, (sector_index + 1))
self.__file_map[fn] = worklist_stream
report_writer = LibraryCreationXL20ReportWriter(
tube_transfers, self.library_name, self.layout_number,
sector_index, self.__source_rack_locations)
report_stream = report_writer.get_result()
if report_stream is None:
msg = 'Error when trying to write tube handler report for ' \
'sector %i.' % (sector_index + 1)
self.add_error(msg)
else:
fn = self.FILE_NAME_XL20_REPORT % (self.library_name,
self.layout_number, (sector_index + 1))
self.__file_map[fn] = report_stream
def __write_cybio_overview_file(self):
# Generates the file stream with the CyBio instructions. This file is
# not created by a normal series worklist file writer because the
# tubes for the first steck (pool generation) are not in place yet.
self.add_debug('Generate CyBio info file ...')
writer = LibraryCreationCyBioOverviewWriter(
self.library_creation_iso, self.pool_stock_racks,
self.tube_destination_racks, parent=self)
stream = writer.get_result()
if stream is None:
msg = 'Error when trying to write CyBio info file.'
self.add_error(msg)
else:
fn = self.FILE_NAME_CYBIO % (self.library_name, self.layout_number)
self.__file_map[fn] = stream
class LibraryCreationXL20ReportWriter(TxtWriter):
"""
Generates an overview for the tube handling of a particular quadrant
in a library creation ISO.
**Return Value:** stream (TXT)
"""
NAME = 'Library Creation XL20 Report Writer'
#: The main headline of the file.
BASE_MAIN_HEADER = 'XL20 Worklist Generation Report / %s / %s'
#: The header text for the general section.
GENERAL_HEADER = 'General Settings'
#: This line presents the library name.
LIBRARY_LINE = 'ISO for Library: %s'
#: This line presents the layout number.
LAYOUT_NUMBER_LINE = 'Layout number: %i'
#: This line presents the quadrant number.
SECTOR_NUMBER_LINE = 'Sector number: %i'
#: This line presents the total number of stock tubes used.
TUBE_NO_LINE = 'Total number of tubes: %i'
#: This line presents the transfer volume.
VOLUME_LINE = 'Volume: %.1f ul'
#: The header text for the destination racks section.
DESTINATION_RACKS_HEADER = 'Destination Racks'
#: The body for the destination racks section.
DESTINATION_RACK_BASE_LINE = '%s'
#: The header for the source racks section.
SOURCE_RACKS_HEADER = 'Source Racks'
#: The body for the source racks section.
SOURCE_RACKS_BASE_LINE = '%s (%s)'
def __init__(self, tube_transfers, library_name, layout_number,
sector_index, source_rack_locations, parent=None):
"""
Constructor.
:param tube_transfers: Define which tube goes where.
:type tube_transfers: :class:`TubeTransfer`#
:param str library_name: The library we are creating.
:param int layout_number: the layout for which we are creating racks
:param int sector_index: The sector we are dealing with.
:param source_rack_locations: Maps rack locations onto rack barcodes.
:type source_rack_locations: :class:`dict`
"""
TxtWriter.__init__(self, parent=None)
#: Define which tube goes where.
self.tube_transfers = tube_transfers
#: The library we are creating.
self.library_name = library_name
#: The layout for which we are creating racks
self.layout_number = layout_number
#: The sector we are dealing with.
self.sector_index = sector_index
#: Maps rack locations onto rack barcodes.
self.source_rack_locations = source_rack_locations
def _check_input(self):
"""
Checks if the tools has obtained correct input values.
"""
if self._check_input_class('tube transfer list', self.tube_transfers,
list):
for ttd in self.tube_transfers:
if not self._check_input_class('tube transfer', ttd,
TubeTransferData): break
self._check_input_class('library name', self.library_name, basestring)
self._check_input_class('layout number', self.layout_number, int)
self._check_input_class('sector index', self.sector_index, int)
self._check_input_class('rack location map',
self.source_rack_locations, dict)
def _write_stream_content(self):
"""
Writes into the streams.
"""
self.add_debug('Write stream ...')
self.__write_main_headline()
self.__write_general_section()
self.__write_destination_racks_section()
self.__write_source_racks_section()
def __write_main_headline(self):
# Writes the main head line.
now = datetime.now()
date_string = now.strftime('%d.%m.%Y')
time_string = now.strftime('%H:%M')
main_headline = self.BASE_MAIN_HEADER % (date_string, time_string)
self._write_headline(main_headline, underline_char='=',
preceding_blank_lines=0, trailing_blank_lines=1)
def __write_general_section(self):
# The general section contains library name, sector index, layout
# number and the number of tubes.
self._write_headline(self.GENERAL_HEADER, preceding_blank_lines=1)
general_lines = [self.LIBRARY_LINE % (self.library_name),
self.LAYOUT_NUMBER_LINE % (self.layout_number),
self.SECTOR_NUMBER_LINE % (self.sector_index + 1),
self.TUBE_NO_LINE % (len(self.tube_transfers)),
self.VOLUME_LINE % (MOLECULE_DESIGN_TRANSFER_VOLUME)]
self._write_body_lines(general_lines)
def __write_destination_racks_section(self):
# Writes the destination rack section.
barcodes = set()
for ttd in self.tube_transfers:
barcodes.add(ttd.trg_rack_barcode)
self._write_headline(self.DESTINATION_RACKS_HEADER)
lines = []
for barcode in barcodes:
lines.append(self.DESTINATION_RACK_BASE_LINE % (barcode))
self._write_body_lines(lines)
def __write_source_racks_section(self):
# Writes the source rack section.
barcodes = set()
for ttd in self.tube_transfers:
barcodes.add(ttd.src_rack_barcode)
sorted_barcodes = sorted(list(barcodes))
self._write_headline(self.SOURCE_RACKS_HEADER)
lines = []
for barcode in sorted_barcodes:
loc = self.source_rack_locations[barcode]
if loc is None: loc = 'unknown location'
lines.append(self.SOURCE_RACKS_BASE_LINE % (barcode, loc))
self._write_body_lines(lines)
class LibraryCreationCyBioOverviewWriter(TxtWriter):
"""
This tool writes a CyBio overview file for the CyBio steps involved in
the creation of library plates.
We do not use the normal series worklist writer here because the stock
tubes for the single molecule designs are not the right positions yet.
**Return Value:** stream (TXT)
"""
NAME = 'Library Creation CyBio Writer'
#: Header for the pool creation section.
HEADER_POOL_CREATION = 'Pool Creation'
#: Header for the preparation plate transfer section.
HEADER_SOURCE_CREATION = 'Transfer from Stock Rack to Preparation Plates'
#: Header for the aliquot transfer section.
HEADER_ALIQUOT_TRANSFER = 'Transfer to Library Aliquot Plates'
#: Base line for transfer volumes.
VOLUME_LINE = 'Volume: %.1f ul'
#: Base line for buffer volumes.
BUFFER_LINE = 'Assumed buffer volume: %.1f ul'
#: Base line for source racks (singular, for prep plate creation)
SOURCE_LINE = 'Source rack: %s'
#: Base line for source racks (plural, for pool creation).
SOURCE_LINE_PLURAL = 'Source racks: %s'
#: Base line for target racks (singlular)
TARGET_LINE = 'Target rack: %s'
#: Base line for target racks (plural, for aliquot section).
TARGET_LINE_PLURAL = 'Target racks: %s'
#: Base line for quadrant depcitions.
QUADRANT_LINE = 'Q%i:'
def __init__(self, library_creation_iso, pool_stock_racks,
tube_destination_racks, parent=None):
"""
Constructor.
:param library_creation_iso: The library creation ISO for which to
generate the file.
:type library_creation_iso:
:class:`thelma.entities.library.LibraryCreationIso`
:param tube_destination_racks: The barcodes for the destination
rack for the single molecule design tube (these racks have to be
empty).
:type tube_destination_racks: map of barcode lists
(:class:`basestring`) mapped onto sector indices.
:param pool_stock_racks: The barcodes for the pool stock racks
(these racks have to have empty tubes in defined positions).
:type pool_stock_racks: map of barcodes
(:class:`basestring`) mapped onto sector indices.
"""
TxtWriter.__init__(self, parent=parent)
#: The library creation ISO for which to generate the file.
self.library_creation_iso = library_creation_iso
#: The barcodes for the destination, rack for the single molecule
#: design tubes.
self.tube_destination_racks = tube_destination_racks
#: The barcodes for the pool stock racks.
self.pool_stock_racks = pool_stock_racks
#: The library source (preparation) plates mapped onto
#: rack sectors.
self.__source_plates = None
def reset(self):
TxtWriter.reset(self)
self.__source_plates = dict()
def _check_input(self):
self.add_debug('Check input values ...')
if self._check_input_class('library creation ISO',
self.library_creation_iso, LibraryCreationIso):
status = self.library_creation_iso.status
if not status == ISO_STATUS.QUEUED:
msg = 'Unexpected ISO status: "%s"' % (status)
self.add_error(msg)
if self._check_input_class('tube destination rack map',
self.tube_destination_racks, dict):
for sector_index, barcode_list in \
self.tube_destination_racks.iteritems():
if not self._check_input_class(
'sector index in the tube destination map',
sector_index, int): break
if not self._check_input_class(
'barcode list in the tube destination map',
barcode_list, list): break
if not len(self.tube_destination_racks) > 0:
msg = 'There are no barcodes in the destination rack map!'
self.add_error(msg)
if self._check_input_class('pool stock rack map', self.pool_stock_racks,
dict):
for sector_index, barcode in self.pool_stock_racks.iteritems():
if not self._check_input_class(
'sector index in the pool stock rack map',
sector_index, int): break
if not self._check_input_class(
'barcode in the pool stock rack map',
barcode, basestring): break
if not len(self.pool_stock_racks) > 0:
msg = 'There are no barcodes in the pool stock rack map!'
self.add_error(msg)
def _write_stream_content(self):
"""
Writes into the streams.
"""
self.add_debug('Write stream ...')
self.__write_pool_creation_section()
self.__get_source_plates()
self.__write_prep_creation_section()
self.__write_aliquot_part()
def __write_pool_creation_section(self):
# This is the stock transfer part (creating pools from single molecule
# designs).
self.add_debug('Create pool section ...')
self._write_headline(header_text=self.HEADER_POOL_CREATION,
preceding_blank_lines=0)
lines = []
volume_line = self.VOLUME_LINE % (MOLECULE_DESIGN_TRANSFER_VOLUME)
volume_line += ' each'
lines.append(volume_line)
buffer_line = self.BUFFER_LINE % get_stock_pool_buffer_volume()
lines.append(buffer_line)
for sector_index in sorted(self.tube_destination_racks.keys()):
lines.append('')
lines.append(self.QUADRANT_LINE % (sector_index + 1))
barcodes = self.tube_destination_racks[sector_index]
target_rack = self.pool_stock_racks[sector_index]
src_line = self.SOURCE_LINE_PLURAL % (', '.join(sorted(barcodes)))
lines.append(src_line)
lines.append(self.TARGET_LINE % (target_rack))
self._write_body_lines(lines)
def __get_source_plates(self):
# Maps library source (preparation) plate barcodes onto sector indices.
for lsp in self.library_creation_iso.library_source_plates:
self.__source_plates[lsp.sector_index] = lsp.plate
def __write_prep_creation_section(self):
# This part deals with the transfer from pool stock racks to preparation
# (source) plates.
self.add_debug('Create source plate section ...')
self._write_headline(header_text=self.HEADER_SOURCE_CREATION)
lines = []
transfer_volume = get_source_plate_transfer_volume()
volume_line = self.VOLUME_LINE % (transfer_volume)
lines.append(volume_line)
buffer_volume = PREPARATION_PLATE_VOLUME - transfer_volume
buffer_line = self.BUFFER_LINE % buffer_volume
lines.append(buffer_line)
for sector_index in sorted(self.pool_stock_racks.keys()):
lines.append('')
lines.append(self.QUADRANT_LINE % (sector_index + 1))
pool_barcode = self.pool_stock_racks[sector_index]
src_plate = self.__source_plates[sector_index]
src_term = '%s (%s)' % (src_plate.barcode, src_plate.label)
lines.append(self.SOURCE_LINE % (pool_barcode))
lines.append(self.TARGET_LINE % (src_term))
self._write_body_lines(lines)
def __write_aliquot_part(self):
# This part deals with the transfer from pool stock racks to
# preparation (source) plates.
self.add_debug('Write aliquot transfer section ...')
self._write_headline(self.HEADER_ALIQUOT_TRANSFER)
lines = []
lines.append(self.SOURCE_LINE_PLURAL % '')
for sector_index in sorted(self.__source_plates.keys()):
line = self.QUADRANT_LINE % ((sector_index + 1))
src_plate = self.__source_plates[sector_index]
src_term = ' %s (%s)' % (src_plate.barcode, src_plate.label)
line += '%s' % (src_term)
lines.append(line)
aliquot_plates = dict()
for iap in self.library_creation_iso.iso_aliquot_plates:
plate = iap.plate
aliquot_plates[plate.label] = plate.barcode
lines.append(LINEBREAK_CHAR)
lines.append(self.TARGET_LINE_PLURAL % '')
for label in sorted(aliquot_plates.keys()):
barcode = aliquot_plates[label]
trg_term = '%s (%s)' % (barcode, label)
lines.append(trg_term)
self._write_body_lines(lines)
| |
##
# @file dct_unitest.py
# @author Yibo Lin
# @date Mar 2019
#
import pdb
import os
import sys
import numpy as np
import unittest
import torch
from torch.autograd import Function, Variable
import time
import scipy
from scipy import fftpack
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from dreamplace.ops.dct import dct2_fft2
from dreamplace.ops.dct import discrete_spectral_transform
from dreamplace.ops.dct import dct_lee
from dreamplace.ops.dct import dct
sys.path.pop()
dtype = torch.float32
class DCTOpTest(unittest.TestCase):
def test_dctRandom(self):
N = 4
x = torch.empty(N, N, dtype=dtype).uniform_(0, 10.0)
#x = Variable(torch.tensor([[1, 2, 7, 9, 20, 31], [4, 5, 9, 2, 1, 6]], dtype=dtype))
golden_value = discrete_spectral_transform.dct_2N(x).data.numpy()
print("golden_value")
print(golden_value)
# test cpu using N-FFT
# pdb.set_trace()
custom = dct.DCT(algorithm='N')
dct_value = custom.forward(x)
print("dct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test cpu using 2N-FFT
# pdb.set_trace()
custom = dct.DCT(algorithm='2N')
dct_value = custom.forward(x)
print("dct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test cpu using dct_lee
# pdb.set_trace()
custom = dct_lee.DCT()
dct_value = custom.forward(x)
print("dct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
if torch.cuda.device_count():
# test gpu
custom = dct.DCT(algorithm='N')
dct_value = custom.forward(x.cuda()).cpu()
print("dct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test gpu
custom = dct.DCT(algorithm='2N')
dct_value = custom.forward(x.cuda()).cpu()
print("dct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test gpu
custom = dct_lee.DCT()
dct_value = custom.forward(x.cuda()).cpu()
print("dct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
#golden_value = discrete_spectral_transform.dct2_2N(x).data.numpy()
#print("2D golden_value")
# print(golden_value)
#custom = dct.DCT()
#dct2_value = custom.forward(dct_value.cuda().t().contiguous()).cpu()
#dct2_value = dct2_value.t().contiguous()
#print("dct2_value cuda")
# print(dct2_value.data.numpy())
#np.testing.assert_allclose(dct2_value.data.numpy(), golden_value)
def test_idctRandom(self):
N = 4
x = torch.empty(N, N, dtype=dtype).uniform_(0, 10.0)
#x = Variable(torch.tensor([[1, 2, 7, 9, 20, 31], [4, 5, 9, 2, 1, 6]], dtype=dtype))
print("x")
print(x)
y = discrete_spectral_transform.dct_N(x)
print("y")
print(y.data.numpy())
golden_value = discrete_spectral_transform.idct_2N(y).data.numpy()
print("2D golden_value")
print(golden_value)
# test cpu use N-FFT
# pdb.set_trace()
custom = dct.IDCT(algorithm='N')
dct_value = custom.forward(y)
print("idct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-5)
# test cpu use 2N-FFT
# pdb.set_trace()
custom = dct.IDCT(algorithm='2N')
dct_value = custom.forward(y)
print("idct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-5)
# test cpu use dct_lee
# pdb.set_trace()
custom = dct_lee.IDCT()
dct_value = custom.forward(y)
print("idct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-5)
if torch.cuda.device_count():
# test gpu
custom = dct.IDCT(algorithm='N')
dct_value = custom.forward(y.cuda()).cpu()
print("idct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-5)
# test gpu
custom = dct.IDCT(algorithm='2N')
dct_value = custom.forward(y.cuda()).cpu()
print("idct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-5)
# test gpu
custom = dct_lee.IDCT()
dct_value = custom.forward(y.cuda()).cpu()
print("idct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-5)
def test_dct2Random(self):
torch.manual_seed(10)
M = 4
N = 8
x = torch.empty(M, N, dtype=dtype).uniform_(0, 10.0)
expkM = discrete_spectral_transform.get_exact_expk(M, dtype=x.dtype, device=x.device)
expkN = discrete_spectral_transform.get_exact_expk(N, dtype=x.dtype, device=x.device)
golden_value = discrete_spectral_transform.dct2_N(x).data.numpy()
print("2D DCT golden_value")
print(golden_value)
# test cpu using N-FFT
# pdb.set_trace()
custom = dct.DCT2(algorithm='N')
dct_value = custom.forward(x)
print("2D dct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test cpu using 2N-FFT
# pdb.set_trace()
custom = dct.DCT2(algorithm='2N')
dct_value = custom.forward(x)
print("2D dct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test cpu using dct_lee
# pdb.set_trace()
custom = dct_lee.DCT2()
dct_value = custom.forward(x)
print("2D dct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test cpu using fft2
custom = dct2_fft2.DCT2(expkM, expkN)
dct_value = custom.forward(x)
print("2D dct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
if torch.cuda.device_count():
# test gpu
custom = dct.DCT2(algorithm='N')
dct_value = custom.forward(x.cuda()).cpu()
print("2D dct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test gpu
custom = dct.DCT2(algorithm='2N')
dct_value = custom.forward(x.cuda()).cpu()
print("2D dct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test gpu
custom = dct_lee.DCT2()
dct_value = custom.forward(x.cuda()).cpu()
print("2D dct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test gpu using fft2
custom = dct2_fft2.DCT2(expkM.cuda(), expkN.cuda())
dct_value = custom.forward(x.cuda()).cpu()
print("2D dct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
def test_idct2Random(self):
torch.manual_seed(10)
M = 4
N = 8
x = torch.empty(M, N, dtype=torch.int32).random_(0, 10).double()
print("2D x")
print(x)
expkM = discrete_spectral_transform.get_exact_expk(M, dtype=x.dtype, device=x.device)
expkN = discrete_spectral_transform.get_exact_expk(N, dtype=x.dtype, device=x.device)
y = discrete_spectral_transform.dct2_2N(x)
golden_value = discrete_spectral_transform.idct2_2N(y).data.numpy()
print("2D idct golden_value")
print(golden_value)
# test cpu using N-FFT
# pdb.set_trace()
custom = dct.IDCT2(algorithm='N')
dct_value = custom.forward(y)
print("2D idct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test cpu using 2N-FFT
# pdb.set_trace()
custom = dct.IDCT2(algorithm='2N')
dct_value = custom.forward(y)
print("2D idct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test cpu using dct_lee
# pdb.set_trace()
custom = dct_lee.IDCT2()
dct_value = custom.forward(y)
print("2D idct_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test cpu using fft2
custom = dct2_fft2.IDCT2(expkM, expkN)
dct_value = custom.forward(y)
print("2D idct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
if torch.cuda.device_count():
# test gpu
custom = dct.IDCT2(algorithm='N')
dct_value = custom.forward(y.cuda()).cpu()
print("2D idct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test gpu
custom = dct.IDCT2(algorithm='2N')
dct_value = custom.forward(y.cuda()).cpu()
print("2D idct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test gpu
custom = dct_lee.IDCT2()
dct_value = custom.forward(y.cuda()).cpu()
print("2D idct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
# test gpu using ifft2
custom = dct2_fft2.IDCT2(expkM.cuda(), expkN.cuda())
dct_value = custom.forward(y.cuda()).cpu()
print("2D idct_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, rtol=1e-6, atol=1e-5)
def test_idxct2Random(self):
torch.manual_seed(10)
M = 4
N = 8
x = torch.empty(M, N, dtype=torch.int32).random_(0, 10).double()
print("2D x")
print(x)
golden_value = discrete_spectral_transform.idxt(x, 0).data.numpy()
print("2D golden_value")
print(golden_value)
# test cpu
# pdb.set_trace()
custom = dct.IDXCT()
dct_value = custom.forward(x)
print("dxt_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, atol=1e-14)
# test cpu
# pdb.set_trace()
custom = dct_lee.IDXCT()
dct_value = custom.forward(x)
print("dxt_value")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, atol=1e-14)
if torch.cuda.device_count():
# test gpu
custom = dct.IDXCT()
dct_value = custom.forward(x.cuda()).cpu()
print("dxt_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, atol=1e-14)
# test gpu
custom = dct_lee.IDXCT()
dct_value = custom.forward(x.cuda()).cpu()
print("dxt_value cuda")
print(dct_value.data.numpy())
np.testing.assert_allclose(dct_value.data.numpy(), golden_value, atol=1e-14)
class DSTOpTest(unittest.TestCase):
def test_dstRandom(self):
N = 4
x = torch.empty(N, N, dtype=dtype).uniform_(0, 10.0)
#x = Variable(torch.tensor([[1, 2, 7, 9, 20, 31], [4, 5, 9, 2, 1, 6]], dtype=dtype))
import scipy
from scipy import fftpack
#golden_value = discrete_spectral_transform.dst(x).data.numpy()
golden_value = torch.from_numpy(fftpack.dst(x.data.numpy())).data.numpy() / N
print("golden_value")
print(golden_value)
# test cpu
# pdb.set_trace()
custom = dct.DST()
dst_value = custom.forward(x)
print("dst_value")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, rtol=1e-5)
# test cpu
# pdb.set_trace()
custom = dct_lee.DST()
dst_value = custom.forward(x)
print("dst_value")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, rtol=1e-5)
if torch.cuda.device_count():
# test gpu
custom = dct.DST()
dst_value = custom.forward(x.cuda()).cpu()
print("dst_value cuda")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, rtol=1e-5)
# test gpu
custom = dct_lee.DST()
dst_value = custom.forward(x.cuda()).cpu()
print("dst_value cuda")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, rtol=1e-5)
def test_idstRandom(self):
N = 4
x = torch.empty(N, N, dtype=dtype).uniform_(0, 10.0)
#x = Variable(torch.tensor([[1, 2, 7, 9, 20, 31], [4, 5, 9, 2, 1, 6]], dtype=dtype))
print("x")
print(x)
import scipy
from scipy import fftpack
#y = discrete_spectral_transform.dst(x)
y = torch.from_numpy(fftpack.dst(x.data.numpy()))
print("y")
print(y.data.numpy())
#golden_value = discrete_spectral_transform.idst(y).data.numpy()
golden_value = torch.from_numpy(fftpack.idst(y.data.numpy())).data.numpy()
print("2D golden_value")
print(golden_value)
# test cpu
# pdb.set_trace()
custom = dct.IDST()
dst_value = custom.forward(y)
print("idst_value")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, rtol=1e-5)
# test cpu
# pdb.set_trace()
custom = dct_lee.IDST()
dst_value = custom.forward(y)
print("idst_value")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, rtol=1e-5)
if torch.cuda.device_count():
# test gpu
custom = dct.IDST()
dst_value = custom.forward(y.cuda()).cpu()
print("idst_value cuda")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, rtol=1e-5)
# test gpu
custom = dct_lee.IDST()
dst_value = custom.forward(y.cuda()).cpu()
print("idst_value cuda")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, rtol=1e-5)
def test_idxst2Random(self):
torch.manual_seed(10)
M = 4
N = 8
x = torch.empty(M, N, dtype=torch.int32).random_(0, 10).double()
print("2D x")
print(x)
golden_value = discrete_spectral_transform.idxt(x, 1).data.numpy()
print("2D golden_value")
print(golden_value)
# test cpu
# pdb.set_trace()
custom = dct.IDXST()
dst_value = custom.forward(x)
print("dxt_value")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
# test cpu
# pdb.set_trace()
custom = dct_lee.IDXST()
dst_value = custom.forward(x)
print("dxt_value")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
if torch.cuda.device_count():
# test gpu
custom = dct.IDXST()
dst_value = custom.forward(x.cuda()).cpu()
print("dxt_value cuda")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
# test gpu
custom = dct_lee.IDXST()
dst_value = custom.forward(x.cuda()).cpu()
print("dxt_value cuda")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
class DXTOpTest(unittest.TestCase):
def test_idcct2Random(self):
torch.manual_seed(10)
M = 4
N = 8
x = torch.empty(M, N, dtype=torch.int32).random_(0, 10).double()
print("2D x")
print(x)
golden_value = discrete_spectral_transform.idcct2(x).data.numpy()
print("2D golden_value")
print(golden_value)
# test cpu
# pdb.set_trace()
custom = dct.IDCCT2()
dst_value = custom.forward(x)
print("dxt_value")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
# test cpu
# pdb.set_trace()
custom = dct_lee.IDCCT2()
dst_value = custom.forward(x)
print("dxt_value")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
if torch.cuda.device_count():
# test gpu
custom = dct.IDCCT2()
dst_value = custom.forward(x.cuda()).cpu()
print("dxt_value cuda")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
# test gpu
custom = dct_lee.IDCCT2()
dst_value = custom.forward(x.cuda()).cpu()
print("dxt_value cuda")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
def test_idcst2Random(self):
torch.manual_seed(10)
M = 4
N = 8
x = torch.empty(M, N, dtype=torch.int32).random_(0, 10).double()
print("2D x")
print(x)
golden_value = discrete_spectral_transform.idcst2(x).data.numpy()
print("2D golden_value")
print(golden_value)
# test cpu
# pdb.set_trace()
custom = dct.IDCST2()
dst_value = custom.forward(x)
print("dxt_value")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
# test cpu
# pdb.set_trace()
custom = dct_lee.IDCST2()
dst_value = custom.forward(x)
print("dxt_value")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
if torch.cuda.device_count():
# test gpu
custom = dct.IDCST2()
dst_value = custom.forward(x.cuda()).cpu()
print("dxt_value cuda")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
# test gpu
custom = dct_lee.IDCST2()
dst_value = custom.forward(x.cuda()).cpu()
print("dxt_value cuda")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
def test_idsct2Random(self):
torch.manual_seed(10)
M = 4
N = 8
x = torch.empty(M, N, dtype=torch.int32).random_(0, 10).double()
print("2D x")
print(x)
golden_value = discrete_spectral_transform.idsct2(x).data.numpy()
print("2D golden_value")
print(golden_value)
# test cpu
# pdb.set_trace()
custom = dct.IDSCT2()
dst_value = custom.forward(x)
print("dxt_value")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
# test cpu
# pdb.set_trace()
custom = dct_lee.IDSCT2()
dst_value = custom.forward(x)
print("dxt_value")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
if torch.cuda.device_count():
# test gpu
custom = dct.IDSCT2()
dst_value = custom.forward(x.cuda()).cpu()
print("dxt_value cuda")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
# test gpu
custom = dct_lee.IDSCT2()
dst_value = custom.forward(x.cuda()).cpu()
print("dxt_value cuda")
print(dst_value.data.numpy())
np.testing.assert_allclose(dst_value.data.numpy(), golden_value, atol=1e-14)
def test_idct_idxstRandom(self):
torch.manual_seed(10)
M = 4
N = 8
x = torch.empty(M, N, dtype=torch.int32).random_(0, 10).double()
print("2D x")
print(x)
expkM = discrete_spectral_transform.get_exact_expk(M, dtype=x.dtype, device=x.device)
expkN = discrete_spectral_transform.get_exact_expk(N, dtype=x.dtype, device=x.device)
golden_value = discrete_spectral_transform.idct_idxst(x).data.numpy()
print("2D golden_value")
print(golden_value)
# test cpu
custom = dct.IDCT_IDXST()
idct_idxst_value = custom.forward(x)
print("2D dct.idct_idxst")
print(idct_idxst_value.data.numpy())
np.testing.assert_allclose(idct_idxst_value.data.numpy(), golden_value * 2, atol=1e-14)
# test gpu
custom = dct2_fft2.IDCT_IDXST(expkM, expkN)
idct_idxst_value = custom.forward(x)
print("2D dct2_fft2.idct_idxst cuda")
print(idct_idxst_value.data.numpy())
# note the scale factor
np.testing.assert_allclose(idct_idxst_value.data.numpy(), golden_value * 2, atol=1e-14)
if torch.cuda.device_count():
# test gpu
custom = dct.IDCT_IDXST()
idct_idxst_value = custom.forward(x.cuda()).cpu()
print("2D dct.idct_idxst cuda")
print(idct_idxst_value.data.numpy())
np.testing.assert_allclose(idct_idxst_value.data.numpy(), golden_value * 2, atol=1e-14)
# test gpu
custom = dct2_fft2.IDCT_IDXST(expkM.cuda(), expkN.cuda())
idct_idxst_value = custom.forward(x.cuda()).cpu()
print("2D dct2_fft2.idct_idxst cuda")
print(idct_idxst_value.data.numpy())
# note the scale factor
np.testing.assert_allclose(idct_idxst_value.data.numpy(), golden_value * 2, atol=1e-14)
def test_idxst_idctRandom(self):
torch.manual_seed(10)
M = 4
N = 8
x = torch.empty(M, N, dtype=torch.int32).random_(0, 10).double()
print("2D x")
print(x)
expkM = discrete_spectral_transform.get_exact_expk(M, dtype=x.dtype, device=x.device)
expkN = discrete_spectral_transform.get_exact_expk(N, dtype=x.dtype, device=x.device)
golden_value = discrete_spectral_transform.idxst_idct(x).data.numpy()
print("2D golden_value")
print(golden_value)
# test cpu
custom = dct.IDXST_IDCT()
idxst_idct_value = custom.forward(x)
print("2D dct.idxst_idct")
print(idxst_idct_value.data.numpy())
np.testing.assert_allclose(idxst_idct_value.data.numpy(), golden_value * 2, atol=1e-14)
# test cpu
custom = dct2_fft2.IDXST_IDCT(expkM, expkN)
idxst_idct_value = custom.forward(x)
print("2D dct2_fft2.idxst_idct cuda")
print(idxst_idct_value.data.numpy())
# note the scale factor
np.testing.assert_allclose(idxst_idct_value.data.numpy(), golden_value* 2, atol=1e-14)
if torch.cuda.device_count():
# test gpu
custom = dct.IDXST_IDCT()
idxst_idct_value = custom.forward(x.cuda()).cpu()
print("2D dct.idxst_idct cuda")
print(idxst_idct_value.data.numpy())
np.testing.assert_allclose(idxst_idct_value.data.numpy(), golden_value * 2, atol=1e-14)
# test gpu
custom = dct2_fft2.IDXST_IDCT(expkM.cuda(), expkN.cuda())
idxst_idct_value = custom.forward(x.cuda()).cpu()
print("2D dct2_fft2.idxst_idct cuda")
print(idxst_idct_value.data.numpy())
# note the scale factor
np.testing.assert_allclose(idxst_idct_value.data.numpy(), golden_value* 2, atol=1e-14)
def eval_torch_rfft1d(x, runs):
for i in range(100):
a = torch.rfft(x, signal_ndim=1, onesided=True)
torch.cuda.synchronize()
tt = time.time()
for i in range(runs):
a = torch.rfft(x, signal_ndim=1, onesided=True)
torch.cuda.synchronize()
print("torch.rfft1d takes %.7f ms" % ((time.time()-tt)/runs*1000))
b = torch.irfft(a, signal_ndim=1, onesided=True, signal_sizes=x.shape[1:])
torch.cuda.synchronize()
tt = time.time()
for i in range(runs):
b = torch.irfft(a, signal_ndim=1, onesided=True, signal_sizes=x.shape[1:])
torch.cuda.synchronize()
print("torch.irfft1d takes %.7f ms" % ((time.time()-tt)/runs*1000))
print("")
def eval_torch_rfft2d(x, runs):
for i in range(100):
a = torch.rfft(x, signal_ndim=2, onesided=True)
torch.cuda.synchronize()
tt = time.time()
for i in range(runs):
a = torch.rfft(x, signal_ndim=2, onesided=True)
torch.cuda.synchronize()
print("torch.rfft2d takes %.7f ms" % ((time.time()-tt)/runs*1000))
b = torch.irfft(a, signal_ndim=2, onesided=True, signal_sizes=x.shape)
torch.cuda.synchronize()
tt = time.time()
for i in range(runs):
b = torch.irfft(a, signal_ndim=2, onesided=True, signal_sizes=x.shape)
torch.cuda.synchronize()
print("torch.irfft2d takes %.7f ms" % ((time.time()-tt)/runs*1000))
print("")
def eval_dct2d(x, expk0, expk1, expkM, expkN, runs):
x_numpy = x.data.cpu().numpy()
torch.cuda.synchronize()
tt = time.time()
y = fftpack.dct(fftpack.dct(x_numpy.T, norm=None).T/x.size(1), norm=None)/x.size(0)
torch.cuda.synchronize()
print("CPU scipy.fftpack.dct2d takes %.7f ms" % ((time.time()-tt)*1000))
# 9s for 200 iterations 1024x1024 on GTX 1080
torch.cuda.synchronize()
tt = time.time()
# with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_2N = discrete_spectral_transform.dct2_2N(x, expk0=expk0, expk1=expk1)
torch.cuda.synchronize()
# print(prof)
print("PyTorch: dct2d_2N takes %.7f ms" % ((time.time()-tt)/runs*1000))
# 11s for 200 iterations 1024x1024 on GTX 1080
perm0 = discrete_spectral_transform.get_perm(x.size(-2), dtype=torch.int64, device=x.device)
perm1 = discrete_spectral_transform.get_perm(x.size(-1), dtype=torch.int64, device=x.device)
torch.cuda.synchronize()
tt = time.time()
# with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = discrete_spectral_transform.dct2_N(x, perm0=perm0, expk0=expk0, perm1=perm1, expk1=expk1)
torch.cuda.synchronize()
# print(prof)
print("PyTorch: dct2d_N takes %.7f ms" % ((time.time()-tt)/runs*1000))
dct2func = dct.DCT2(expk0, expk1, algorithm='2N')
torch.cuda.synchronize()
tt = time.time()
# with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_2N = dct2func.forward(x)
torch.cuda.synchronize()
# print(prof)
print("DCT2d_2N Function takes %.7f ms" % ((time.time()-tt)/runs*1000))
dct2func = dct.DCT2(expk0, expk1, algorithm='N')
y_N = dct2func.forward(x)
torch.cuda.synchronize()
# with torch.autograd.profiler.profile(use_cuda=True) as prof:
tt = time.time()
for i in range(runs):
y_N = dct2func.forward(x)
torch.cuda.synchronize()
# print(prof)
print("DCT2d_N Function takes %.7f ms" % ((time.time()-tt)/runs*1000))
# The implementation below only supports float64 by now
dct2func = dct_lee.DCT2(expk0, expk1)
torch.cuda.synchronize()
tt = time.time()
# with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = dct2func.forward(x)
torch.cuda.synchronize()
# print(prof)
print("DCT2d_Lee Function takes %.7f ms" % ((time.time()-tt)/runs*1000))
dct2func = dct2_fft2.DCT2(expkM, expkN)
y = dct2func.forward(x)
torch.cuda.synchronize()
tt = time.time()
for i in range(runs):
y_test = dct2func.forward(x)
torch.cuda.synchronize()
print("DCT2_FFT2 Function takes %.7f ms" % ((time.time()-tt)/runs*1000))
print("")
def eval_idct2d(x, expk0, expk1, expkM, expkN, runs):
y_N = discrete_spectral_transform.idct2_N(x, expk0=expk0, expk1=expk1)
torch.cuda.synchronize()
tt = time.time()
for i in range(runs):
y_N = discrete_spectral_transform.idct2_N(x, expk0=expk0, expk1=expk1)
torch.cuda.synchronize()
print("PyTorch idct2_N takes %.7f ms" % ((time.time()-tt)/runs*1000))
idct2func = dct.IDCT2(expk0, expk1, algorithm='2N')
y_N = idct2func.forward(x)
torch.cuda.synchronize()
tt = time.time()
for i in range(runs):
y_N = idct2func.forward(x)
torch.cuda.synchronize()
print("IDCT2_2N Function takes %.7f ms" % ((time.time()-tt)/runs*1000))
idct2func = dct.IDCT2(expk0, expk1, algorithm='N')
y_N = idct2func.forward(x)
torch.cuda.synchronize()
tt = time.time()
# with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = idct2func.forward(x)/x.size(0)/x.size(1)/4
torch.cuda.synchronize()
# print(prof)
print("IDCT2_N Function takes %.7f ms" % ((time.time()-tt)/runs*1000))
dct2func = dct2_fft2.IDCT2(expkM, expkN)
y = dct2func.forward(x)
torch.cuda.synchronize()
tt = time.time()
for i in range(runs):
y_test = dct2func.forward(x)
torch.cuda.synchronize()
print("IDCT2_FFT2 Function takes %.7f ms" % ((time.time()-tt)/runs*1000))
print("")
def eval_idxt2d(x, expk0, expk1, expkM, expkN, runs):
dct2func = dct.IDXST_IDCT(expk0, expk1)
y = dct2func.forward(x)
torch.cuda.synchronize()
tt = time.time()
for i in range(runs):
y_test = dct2func.forward(x)
torch.cuda.synchronize()
print("dct.IDXST_IDCT Function takes %.7f ms" % ((time.time()-tt)/runs*1000))
y_N = discrete_spectral_transform.idxst_idct(x, expk_0=expk0, expk_1=expk1)
torch.cuda.synchronize()
tt = time.time()
for i in range(runs):
y_N = discrete_spectral_transform.idxst_idct(x, expk_0=expk0, expk_1=expk1)
torch.cuda.synchronize()
print("PyTorch: idxst_idct takes %.7f ms" % ((time.time()-tt)/runs*1000))
dct2func = dct2_fft2.IDXST_IDCT(expkM, expkN)
y = dct2func.forward(x)
torch.cuda.synchronize()
tt = time.time()
for i in range(runs):
y_test = dct2func.forward(x)
torch.cuda.synchronize()
print("dct2_fft2.IDXST_IDCT takes %.7f ms" % ((time.time()-tt)/runs*1000))
dct2func = dct.IDCT_IDXST(expk0, expk1)
y = dct2func.forward(x)
torch.cuda.synchronize()
tt = time.time()
for i in range(runs):
y_test = dct2func.forward(x)
torch.cuda.synchronize()
print("dct.IDCT_IDXST takes %.7f ms" % ((time.time()-tt)/runs*1000))
y_N = discrete_spectral_transform.idct_idxst(x, expk_0=expk0, expk_1=expk1)
torch.cuda.synchronize()
tt = time.time()
for i in range(runs):
y_N = discrete_spectral_transform.idct_idxst(x, expk_0=expk0, expk_1=expk1)
torch.cuda.synchronize()
print("PyTorch: idct_idxst takes %.7f ms" % ((time.time()-tt)/runs*1000))
dct2func = dct2_fft2.IDCT_IDXST(expkM, expkN)
y = dct2func.forward(x)
torch.cuda.synchronize()
tt = time.time()
for i in range(runs):
y_test = dct2func.forward(x)
torch.cuda.synchronize()
print("dct2_fft2.IDCT_IDXST takes %.7f ms" % ((time.time()-tt)/runs*1000))
print("")
def eval_others(x, expk0, expk1, expkM, expkN, runs):
y_N = discrete_spectral_transform.idcct2(x, expk_0=expk0, expk_1=expk1)
torch.cuda.synchronize()
tt = time.time()
# with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = discrete_spectral_transform.idcct2(x, expk_0=expk0, expk_1=expk1)
torch.cuda.synchronize()
# print(prof)
print("idcct2 takes %.7f ms" % ((time.time()-tt)/runs*1000))
func = dct.IDCCT2(expk0, expk1)
y_N = func.forward(x)
torch.cuda.synchronize()
tt = time.time()
# with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = func.forward(x)
torch.cuda.synchronize()
# print(prof)
print("IDCCT2 Function takes %.7f ms" % ((time.time()-tt)/runs*1000))
y_N = discrete_spectral_transform.idcst2(x, expk_0=expk0, expk_1=expk1)
torch.cuda.synchronize()
tt = time.time()
# with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = discrete_spectral_transform.idcst2(x, expk_0=expk0, expk_1=expk1)
torch.cuda.synchronize()
# print(prof)
print("idcst2 takes %.7f ms" % ((time.time()-tt)/runs*1000))
func = dct.IDCST2(expk0, expk1)
y_N = func.forward(x)
torch.cuda.synchronize()
tt = time.time()
# with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = func.forward(x)
torch.cuda.synchronize()
# print(prof)
print("IDCST2 Function takes %.7f ms" % ((time.time()-tt)/runs*1000))
y_N = discrete_spectral_transform.idsct2(x, expk_0=expk0, expk_1=expk1)
torch.cuda.synchronize()
tt = time.time()
# with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = discrete_spectral_transform.idsct2(x, expk_0=expk0, expk_1=expk1)
torch.cuda.synchronize()
# print(prof)
print("idsct2 takes %.7f ms" % ((time.time()-tt)/runs*1000))
func = dct.IDSCT2(expk0, expk1)
y_N = func.forward(x)
torch.cuda.synchronize()
tt = time.time()
# with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(runs):
y_N = func.forward(x)
torch.cuda.synchronize()
# print(prof)
print("IDSCT2 Function takes %.7f ms" % ((time.time()-tt)/runs*1000))
print("")
def eval_runtime():
runs = 100
M = 1024
N = 1024
dtype = torch.float64
x = torch.empty(M, N, dtype=dtype).uniform_(0, 10.0).cuda()
print("M = {}, N = {}".format(M, N))
# 2cos(), 2sin()
expk0 = discrete_spectral_transform.get_expk(M, dtype=x.dtype, device=x.device)
expk1 = discrete_spectral_transform.get_expk(N, dtype=x.dtype, device=x.device)
# cos(), -sin()
expkM = discrete_spectral_transform.get_exact_expk(M, dtype=x.dtype, device=x.device)
expkN = discrete_spectral_transform.get_exact_expk(N, dtype=x.dtype, device=x.device)
eval_torch_rfft1d(x, runs)
eval_torch_rfft2d(x, runs)
eval_dct2d(x, expk0, expk1, expkM, expkN, runs)
eval_idct2d(x, expk0, expk1, expkM, expkN, runs)
eval_idxt2d(x, expk0, expk1, expkM, expkN, runs)
eval_others(x, expk0, expk1, expkM, expkN, runs)
if __name__ == '__main__':
torch.manual_seed(10)
np.random.seed(10)
print("usage: python dct_unitest.py test|eval")
if len(sys.argv) > 1 and sys.argv[1] == "eval":
eval_runtime()
else:
unittest.main()
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
frappe.translate
~~~~~~~~~~~~~~~~
Translation tools for frappe
"""
import frappe, os, re, codecs, json
from frappe.utils.jinja import render_include
from frappe.utils import strip
from jinja2 import TemplateError
import itertools, operator
def guess_language(lang_list=None):
"""Set `frappe.local.lang` from HTTP headers at beginning of request"""
lang_codes = frappe.request.accept_languages.values()
if not lang_codes:
return frappe.local.lang
guess = None
if not lang_list:
lang_list = get_all_languages() or []
for l in lang_codes:
code = l.strip()
if code in lang_list or code == "en":
guess = code
break
# check if parent language (pt) is setup, if variant (pt-BR)
if "-" in code:
code = code.split("-")[0]
if code in lang_list:
guess = code
break
return guess or frappe.local.lang
def get_user_lang(user=None):
"""Set frappe.local.lang from user preferences on session beginning or resumption"""
if not user:
user = frappe.session.user
# via cache
lang = frappe.cache().hget("lang", user)
if not lang:
# if defined in user profile
user_lang = frappe.db.get_value("User", user, "language")
if user_lang and user_lang!="Loading...":
lang = get_lang_dict().get(user_lang) or frappe.local.lang
else:
default_lang = frappe.db.get_default("lang")
lang = default_lang or frappe.local.lang
frappe.cache().hset("lang", user, lang or "en")
return lang
def set_default_language(language):
"""Set Global default language"""
lang = get_lang_dict()[language]
frappe.db.set_default("lang", lang)
frappe.local.lang = lang
def get_all_languages():
"""Returns all language codes ar, ch etc"""
return [a.split()[0] for a in get_lang_info()]
def get_lang_dict():
"""Returns all languages in dict format, full name is the key e.g. `{"english":"en"}`"""
return dict([[a[1], a[0]] for a in [a.split(None, 1) for a in get_lang_info()]])
def get_language_from_code(lang):
return dict(a.split(None, 1) for a in get_lang_info()).get(lang)
def get_lang_info():
"""Returns a listified version of `apps/languages.txt`"""
return frappe.cache().get_value("langinfo",
lambda:frappe.get_file_items(os.path.join(frappe.local.sites_path, "languages.txt")))
def get_dict(fortype, name=None):
"""Returns translation dict for a type of object.
:param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot`
:param name: name of the document for which assets are to be returned.
"""
fortype = fortype.lower()
cache = frappe.cache()
asset_key = fortype + ":" + (name or "-")
translation_assets = cache.hget("translation_assets", frappe.local.lang) or {}
if not asset_key in translation_assets:
if fortype=="doctype":
messages = get_messages_from_doctype(name)
elif fortype=="page":
messages = get_messages_from_page(name)
elif fortype=="report":
messages = get_messages_from_report(name)
elif fortype=="include":
messages = get_messages_from_include_files()
elif fortype=="jsfile":
messages = get_messages_from_file(name)
elif fortype=="boot":
messages = get_messages_from_include_files()
messages += frappe.db.sql("select 'DocType:', name from tabDocType")
messages += frappe.db.sql("select 'Role:', name from tabRole")
messages += frappe.db.sql("select 'Module:', name from `tabModule Def`")
translation_assets[asset_key] = make_dict_from_messages(messages)
translation_assets[asset_key].update(get_dict_from_hooks(fortype, name))
cache.hset("translation_assets", frappe.local.lang, translation_assets)
return translation_assets[asset_key]
def get_dict_from_hooks(fortype, name):
translated_dict = {}
hooks = frappe.get_hooks("get_translated_dict")
for (hook_fortype, fortype_name) in hooks:
if hook_fortype == fortype and fortype_name == name:
for method in hooks[(hook_fortype, fortype_name)]:
translated_dict.update(frappe.get_attr(method)())
return translated_dict
def add_lang_dict(code):
"""Extracts messages and returns Javascript code snippet to be appened at the end
of the given script
:param code: Javascript code snippet to which translations needs to be appended."""
messages = extract_messages_from_code(code)
messages = [message for pos, message in messages]
code += "\n\n$.extend(frappe._messages, %s)" % json.dumps(make_dict_from_messages(messages))
return code
def make_dict_from_messages(messages, full_dict=None):
"""Returns translated messages as a dict in Language specified in `frappe.local.lang`
:param messages: List of untranslated messages
"""
out = {}
if full_dict==None:
full_dict = get_full_dict(frappe.local.lang)
for m in messages:
if m[1] in full_dict:
out[m[1]] = full_dict[m[1]]
return out
def get_lang_js(fortype, name):
"""Returns code snippet to be appended at the end of a JS script.
:param fortype: Type of object, e.g. `DocType`
:param name: Document name
"""
return "\n\n$.extend(frappe._messages, %s)" % json.dumps(get_dict(fortype, name))
def get_full_dict(lang):
"""Load and return the entire translations dictionary for a language from :meth:`frape.cache`
:param lang: Language Code, e.g. `hi`
"""
if not lang or lang == "en":
return {}
if not frappe.local.lang_full_dict:
frappe.local.lang_full_dict = frappe.cache().hget("lang_full_dict", lang)
if not frappe.local.lang_full_dict:
frappe.local.lang_full_dict = load_lang(lang)
# cache lang
frappe.cache().hset("lang_full_dict", lang, frappe.local.lang_full_dict)
return frappe.local.lang_full_dict
def load_lang(lang, apps=None):
"""Combine all translations from `.csv` files in all `apps`"""
out = {}
for app in (apps or frappe.get_all_apps(True)):
path = os.path.join(frappe.get_pymodule_path(app), "translations", lang + ".csv")
if os.path.exists(path):
csv_content = read_csv_file(path)
cleaned = {}
for item in csv_content:
if len(item)==3:
# with file and line numbers
cleaned[item[1]] = strip(item[2])
elif len(item)==2:
cleaned[item[0]] = strip(item[1])
else:
raise Exception("Bad translation in '{app}' for language '{lang}': {values}".format(
app=app, lang=lang, values=repr(item).encode("utf-8")
))
out.update(cleaned)
return out
def clear_cache():
"""Clear all translation assets from :meth:`frappe.cache`"""
cache = frappe.cache()
cache.delete_key("langinfo")
cache.delete_key("lang_full_dict")
cache.delete_key("translation_assets")
def get_messages_for_app(app):
"""Returns all messages (list) for a specified `app`"""
messages = []
modules = ", ".join(['"{}"'.format(m.title().replace("_", " ")) \
for m in frappe.local.app_modules[app]])
# doctypes
if modules:
for name in frappe.db.sql_list("""select name from tabDocType
where module in ({})""".format(modules)):
messages.extend(get_messages_from_doctype(name))
# pages
for name, title in frappe.db.sql("""select name, title from tabPage
where module in ({})""".format(modules)):
messages.append((None, title or name))
messages.extend(get_messages_from_page(name))
# reports
for name in frappe.db.sql_list("""select tabReport.name from tabDocType, tabReport
where tabReport.ref_doctype = tabDocType.name
and tabDocType.module in ({})""".format(modules)):
messages.append((None, name))
messages.extend(get_messages_from_report(name))
for i in messages:
if not isinstance(i, tuple):
raise Exception
# app_include_files
messages.extend(get_all_messages_from_js_files(app))
# server_messages
messages.extend(get_server_messages(app))
return deduplicate_messages(messages)
def get_messages_from_doctype(name):
"""Extract all translatable messages for a doctype. Includes labels, Python code,
Javascript code, html templates"""
messages = []
meta = frappe.get_meta(name)
messages = [meta.name, meta.module]
if meta.description:
messages.append(meta.description)
# translations of field labels, description and options
for d in meta.get("fields"):
messages.extend([d.label, d.description])
if d.fieldtype=='Select' and d.options:
options = d.options.split('\n')
if not "icon" in options[0]:
messages.extend(options)
# translations of roles
for d in meta.get("permissions"):
if d.role:
messages.append(d.role)
messages = [message for message in messages if message]
messages = [('DocType: ' + name, message) for message in messages if is_translatable(message)]
# extract from js, py files
doctype_file_path = frappe.get_module_path(meta.module, "doctype", meta.name, meta.name)
messages.extend(get_messages_from_file(doctype_file_path + ".js"))
messages.extend(get_messages_from_file(doctype_file_path + "_list.js"))
messages.extend(get_messages_from_file(doctype_file_path + "_list.html"))
messages.extend(get_messages_from_file(doctype_file_path + "_calendar.js"))
return messages
def get_messages_from_page(name):
"""Returns all translatable strings from a :class:`frappe.core.doctype.Page`"""
return _get_messages_from_page_or_report("Page", name)
def get_messages_from_report(name):
"""Returns all translatable strings from a :class:`frappe.core.doctype.Report`"""
report = frappe.get_doc("Report", name)
messages = _get_messages_from_page_or_report("Report", name,
frappe.db.get_value("DocType", report.ref_doctype, "module"))
# TODO position here!
if report.query:
messages.extend([(None, message) for message in re.findall('"([^:,^"]*):', report.query) if is_translatable(message)])
messages.append((None,report.report_name))
return messages
def _get_messages_from_page_or_report(doctype, name, module=None):
if not module:
module = frappe.db.get_value(doctype, name, "module")
doc_path = frappe.get_module_path(module, doctype, name)
messages = get_messages_from_file(os.path.join(doc_path, frappe.scrub(name) +".py"))
if os.path.exists(doc_path):
for filename in os.listdir(doc_path):
if filename.endswith(".js") or filename.endswith(".html"):
messages += get_messages_from_file(os.path.join(doc_path, filename))
return messages
def get_server_messages(app):
"""Extracts all translatable strings (tagged with :func:`frappe._`) from Python modules inside an app"""
messages = []
for basepath, folders, files in os.walk(frappe.get_pymodule_path(app)):
for dontwalk in (".git", "public", "locale"):
if dontwalk in folders: folders.remove(dontwalk)
for f in files:
if f.endswith(".py") or f.endswith(".html") or f.endswith(".js"):
messages.extend(get_messages_from_file(os.path.join(basepath, f)))
return messages
def get_messages_from_include_files(app_name=None):
"""Returns messages from js files included at time of boot like desk.min.js for desk and web"""
messages = []
for file in (frappe.get_hooks("app_include_js", app_name=app_name) or []) + (frappe.get_hooks("web_include_js", app_name=app_name) or []):
messages.extend(get_messages_from_file(os.path.join(frappe.local.sites_path, file)))
return messages
def get_all_messages_from_js_files(app_name=None):
"""Extracts all translatable strings from app `.js` files"""
messages = []
for app in ([app_name] if app_name else frappe.get_installed_apps()):
if os.path.exists(frappe.get_app_path(app, "public")):
for basepath, folders, files in os.walk(frappe.get_app_path(app, "public")):
if "frappe/public/js/lib" in basepath:
continue
for fname in files:
if fname.endswith(".js") or fname.endswith(".html"):
messages.extend(get_messages_from_file(os.path.join(basepath, fname)))
return messages
def get_messages_from_file(path):
"""Returns a list of transatable strings from a code file
:param path: path of the code file
"""
apps_path = get_bench_dir()
if os.path.exists(path):
with open(path, 'r') as sourcefile:
return [(os.path.relpath(" +".join([path, str(pos)]), apps_path),
message) for pos, message in extract_messages_from_code(sourcefile.read(), path.endswith(".py"))]
else:
# print "Translate: {0} missing".format(os.path.abspath(path))
return []
def extract_messages_from_code(code, is_py=False):
"""Extracts translatable srings from a code file
:param code: code from which translatable files are to be extracted
:param is_py: include messages in triple quotes e.g. `_('''message''')`"""
try:
code = render_include(code)
except TemplateError:
# Exception will occur when it encounters John Resig's microtemplating code
pass
messages = []
messages += [(m.start(), m.groups()[0]) for m in re.compile('_\("([^"]*)"').finditer(code)]
messages += [(m.start(), m.groups()[0]) for m in re.compile("_\('([^']*)'").finditer(code)]
if is_py:
messages += [(m.start(), m.groups()[0]) for m in re.compile('_\("{3}([^"]*)"{3}.*\)').finditer(code)]
messages = [(pos, message) for pos, message in messages if is_translatable(message)]
return pos_to_line_no(messages, code)
def is_translatable(m):
if re.search("[a-z]", m) and not m.startswith("icon-") and not m.endswith("px") and not m.startswith("eval:"):
return True
return False
def pos_to_line_no(messages, code):
ret = []
messages = sorted(messages, key=lambda x: x[0])
newlines = [m.start() for m in re.compile('\\n').finditer(code)]
line = 1
newline_i = 0
for pos, message in messages:
while newline_i < len(newlines) and pos > newlines[newline_i]:
line+=1
newline_i+= 1
ret.append((line, message))
return ret
def read_csv_file(path):
"""Read CSV file and return as list of list
:param path: File path"""
from csv import reader
with codecs.open(path, 'r', 'utf-8') as msgfile:
data = msgfile.read()
# for japanese! #wtf
data = data.replace(chr(28), "").replace(chr(29), "")
data = reader([r.encode('utf-8') for r in data.splitlines()])
newdata = [[unicode(val, 'utf-8') for val in row] for row in data]
return newdata
def write_csv_file(path, app_messages, lang_dict):
"""Write translation CSV file.
:param path: File path, usually `[app]/translations`.
:param app_messages: Translatable strings for this app.
:param lang_dict: Full translated dict.
"""
app_messages.sort(lambda x,y: cmp(x[1], y[1]))
from csv import writer
with open(path, 'wb') as msgfile:
w = writer(msgfile, lineterminator='\n')
for p, m in app_messages:
t = lang_dict.get(m, '')
# strip whitespaces
t = re.sub('{\s?([0-9]+)\s?}', "{\g<1>}", t)
w.writerow([p.encode('utf-8') if p else '', m.encode('utf-8'), t.encode('utf-8')])
def get_untranslated(lang, untranslated_file, get_all=False):
"""Returns all untranslated strings for a language and writes in a file
:param lang: Language code.
:param untranslated_file: Output file path.
:param get_all: Return all strings, translated or not."""
clear_cache()
apps = frappe.get_all_apps(True)
messages = []
untranslated = []
for app in apps:
messages.extend(get_messages_for_app(app))
messages = deduplicate_messages(messages)
def escape_newlines(s):
return (s.replace("\\\n", "|||||")
.replace("\\n", "||||")
.replace("\n", "|||"))
if get_all:
print str(len(messages)) + " messages"
with open(untranslated_file, "w") as f:
for m in messages:
# replace \n with ||| so that internal linebreaks don't get split
f.write((escape_newlines(m[1]) + os.linesep).encode("utf-8"))
else:
full_dict = get_full_dict(lang)
for m in messages:
if not full_dict.get(m[1]):
untranslated.append(m[1])
if untranslated:
print str(len(untranslated)) + " missing translations of " + str(len(messages))
with open(untranslated_file, "w") as f:
for m in untranslated:
# replace \n with ||| so that internal linebreaks don't get split
f.write((escape_newlines(m) + os.linesep).encode("utf-8"))
else:
print "all translated!"
def update_translations(lang, untranslated_file, translated_file):
"""Update translations from a source and target file for a given language.
:param lang: Language code (e.g. `en`).
:param untranslated_file: File path with the messages in English.
:param translated_file: File path with messages in language to be updated."""
clear_cache()
full_dict = get_full_dict(lang)
def restore_newlines(s):
return (s.replace("|||||", "\\\n")
.replace("| | | | |", "\\\n")
.replace("||||", "\\n")
.replace("| | | |", "\\n")
.replace("|||", "\n")
.replace("| | |", "\n"))
translation_dict = {}
for key, value in zip(frappe.get_file_items(untranslated_file, ignore_empty_lines=False),
frappe.get_file_items(translated_file, ignore_empty_lines=False)):
# undo hack in get_untranslated
translation_dict[restore_newlines(key)] = restore_newlines(value)
full_dict.update(translation_dict)
for app in frappe.get_all_apps(True):
write_translations_file(app, lang, full_dict)
def rebuild_all_translation_files():
"""Rebuild all translation files: `[app]/translations/[lang].csv`."""
for lang in get_all_languages():
for app in frappe.get_all_apps():
write_translations_file(app, lang)
def write_translations_file(app, lang, full_dict=None, app_messages=None):
"""Write a translation file for a given language.
:param app: `app` for which translations are to be written.
:param lang: Language code.
:param full_dict: Full translated langauge dict (optional).
:param app_messages: Source strings (optional).
"""
if not app_messages:
app_messages = get_messages_for_app(app)
if not app_messages:
return
tpath = frappe.get_pymodule_path(app, "translations")
frappe.create_folder(tpath)
write_csv_file(os.path.join(tpath, lang + ".csv"),
app_messages, full_dict or get_full_dict(lang))
def send_translations(translation_dict):
"""Append translated dict in `frappe.local.response`"""
if "__messages" not in frappe.local.response:
frappe.local.response["__messages"] = {}
frappe.local.response["__messages"].update(translation_dict)
def deduplicate_messages(messages):
ret = []
op = operator.itemgetter(1)
messages = sorted(messages, key=op)
for k, g in itertools.groupby(messages, op):
ret.append(g.next())
return ret
def get_bench_dir():
return os.path.join(frappe.__file__, '..', '..', '..', '..')
def rename_language(old_name, new_name):
language_in_system_settings = frappe.db.get_single_value("System Settings", "language")
if language_in_system_settings == old_name:
frappe.db.set_value("System Settings", "System Settings", "language", new_name)
frappe.db.sql("""update `tabUser` set language=%(new_name)s where language=%(old_name)s""",
{ "old_name": old_name, "new_name": new_name })
| |
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Unit test for config files."""
import StringIO
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.rdfvalues import anomaly as rdf_anomaly
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import config_file as rdf_config_file
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.parsers import config_file
CFG = """
# A comment.
Protocol 2 # Another comment.
Ciphers aes128-ctr,aes256-ctr,aes128-cbc,aes256-cbc
ServerKeyBits 768
Port 22
Port 2222,10222
# Make life easy for root. It's hard running a server.
Match User root
PermitRootLogin yes
# Oh yeah, this is an excellent way to protect that root account.
Match Address 192.168.3.12
PermitRootLogin no
Protocol 1 # Not a valid match group entry.
"""
class SshdConfigTest(test_lib.GRRBaseTest):
"""Test parsing of an sshd configuration."""
def GetConfig(self):
"""Read in the test configuration file."""
parser = config_file.SshdConfigParser()
results = list(parser.Parse(None, StringIO.StringIO(CFG), None))
self.assertEqual(1, len(results))
return results[0]
def testParseConfig(self):
"""Ensure we can extract sshd settings."""
result = self.GetConfig()
self.assertTrue(isinstance(result, rdf_config_file.SshdConfig))
self.assertItemsEqual([2], result.config.protocol)
expect = ["aes128-ctr", "aes256-ctr", "aes128-cbc", "aes256-cbc"]
self.assertItemsEqual(expect, result.config.ciphers)
def testFindNumericValues(self):
"""Keywords with numeric settings are converted to integers."""
result = self.GetConfig()
self.assertEqual(768, result.config.serverkeybits)
self.assertItemsEqual([22, 2222, 10222], result.config.port)
def testParseMatchGroups(self):
"""Match groups are added to separate sections."""
result = self.GetConfig()
# Multiple Match groups found.
self.assertEqual(2, len(result.matches))
# Config options set per Match group.
block_1, block_2 = result.matches
self.assertEqual("user root", block_1.criterion)
self.assertEqual("address 192.168.3.12", block_2.criterion)
self.assertEqual(True, block_1.config.permitrootlogin)
self.assertEqual(False, block_2.config.permitrootlogin)
self.assertFalse(block_1.config.protocol)
class FieldParserTests(test_lib.GRRBaseTest):
"""Test the field parser."""
def testParser(self):
test_data = r"""
each of these words:should;be \
fields # but not these ones \n, or \ these.
this should be another entry "with this quoted text as one field"
'an entry'with" only two" fields ;; and not this comment.
"""
expected = [["each", "of", "these", "words", "should", "be", "fields"],
["this", "should", "be", "another", "entry",
"with this quoted text as one field"],
["an entrywith only two", "fields"]]
cfg = config_file.FieldParser(sep=["[ \t\f\v]+", ":", ";"],
comments=["#", ";;"])
results = cfg.ParseEntries(test_data)
for i, expect in enumerate(expected):
self.assertItemsEqual(expect, results[i])
def testNoFinalTerminator(self):
test_data = "you forgot a newline"
expected = [["you", "forgot", "a", "newline"]]
cfg = config_file.FieldParser()
results = cfg.ParseEntries(test_data)
for i, expect in enumerate(expected):
self.assertItemsEqual(expect, results[i])
def testWhitespaceDoesntNukeNewline(self):
test_data = "trailing spaces \nno trailing spaces\n"
expected = [["trailing", "spaces"], ["no", "trailing", "spaces"]]
results = config_file.FieldParser().ParseEntries(test_data)
for i, expect in enumerate(expected):
self.assertItemsEqual(expect, results[i])
expected = [["trailing", "spaces", "no", "trailing", "spaces"]]
results = config_file.FieldParser(sep=r"\s+").ParseEntries(test_data)
for i, expect in enumerate(expected):
self.assertItemsEqual(expect, results[i])
class KeyValueParserTests(test_lib.GRRBaseTest):
"""Test the field parser."""
def testParser(self):
test_data = r"""
key1 = a list of \
fields # but not \n this, or \ this.
# Nothing here.
key 2:another entry
= # Bad line
'a key'with" no" value field ;; and not this comment.
"""
expected = [{"key1": ["a", "list", "of", "fields"]},
{"key 2": ["another", "entry"]},
{"a keywith no value field": []}]
cfg = config_file.KeyValueParser(kv_sep=["=", ":"], comments=["#", ";;"])
results = cfg.ParseEntries(test_data)
for i, expect in enumerate(expected):
self.assertDictEqual(expect, results[i])
class NfsExportParserTests(test_lib.GRRBaseTest):
"""Test the NFS exports parser."""
def testParseNfsExportFile(self):
test_data = r"""
/path/to/foo -rw,sync host1(ro) host2
/path/to/bar *.example.org(all_squash,ro) \
192.168.1.0/24 (rw) # Mistake here - space makes this default.
"""
exports = StringIO.StringIO(test_data)
parser = config_file.NfsExportsParser()
results = list(parser.Parse(None, exports, None))
self.assertEqual("/path/to/foo", results[0].share)
self.assertItemsEqual(["rw", "sync"], results[0].defaults)
self.assertEqual("host1", results[0].clients[0].host)
self.assertItemsEqual(["ro"], results[0].clients[0].options)
self.assertEqual("host2", results[0].clients[1].host)
self.assertItemsEqual([], results[0].clients[1].options)
self.assertEqual("/path/to/bar", results[1].share)
self.assertItemsEqual(["rw"], results[1].defaults)
self.assertEqual("*.example.org", results[1].clients[0].host)
self.assertItemsEqual(["all_squash", "ro"], results[1].clients[0].options)
self.assertEqual("192.168.1.0/24", results[1].clients[1].host)
self.assertItemsEqual([], results[1].clients[1].options)
class MtabParserTests(test_lib.GRRBaseTest):
"""Test the mtab and proc/mounts parser."""
def testParseMountData(self):
test_data = r"""
rootfs / rootfs rw 0 0
arnie@host.example.org:/users/arnie /home/arnie/remote fuse.sshfs rw,nosuid,nodev,max_read=65536 0 0
/dev/sr0 /media/USB\040Drive vfat ro,nosuid,nodev
"""
exports = StringIO.StringIO(test_data)
parser = config_file.MtabParser()
results = list(parser.Parse(None, exports, None))
self.assertEqual("rootfs", results[0].device)
self.assertEqual("/", results[0].mount_point)
self.assertEqual("rootfs", results[0].type)
self.assertTrue(results[0].options.rw)
self.assertFalse(results[0].options.ro)
self.assertEqual("arnie@host.example.org:/users/arnie", results[1].device)
self.assertEqual("/home/arnie/remote", results[1].mount_point)
self.assertEqual("fuse.sshfs", results[1].type)
self.assertTrue(results[1].options.rw)
self.assertTrue(results[1].options.nosuid)
self.assertTrue(results[1].options.nodev)
self.assertEqual(["65536"], results[1].options.max_read)
self.assertEqual("/dev/sr0", results[2].device)
self.assertEqual("/media/USB Drive", results[2].mount_point)
self.assertEqual("vfat", results[2].type)
self.assertTrue(results[2].options.ro)
self.assertTrue(results[2].options.nosuid)
self.assertTrue(results[2].options.nodev)
class MountCmdTests(test_lib.GRRBaseTest):
"""Test the mount command parser."""
def testParseMountData(self):
test_data = r"""
rootfs on / type rootfs (rw)
arnie@host.example.org:/users/arnie on /home/arnie/remote type fuse.sshfs (rw,nosuid,nodev,max_read=65536)
/dev/sr0 on /media/USB Drive type vfat (ro,nosuid,nodev)
"""
parser = config_file.MountCmdParser()
results = list(parser.Parse("/bin/mount", [], test_data, "", 0, 5, None))
self.assertEqual("rootfs", results[0].device)
self.assertEqual("/", results[0].mount_point)
self.assertEqual("rootfs", results[0].type)
self.assertTrue(results[0].options.rw)
self.assertFalse(results[0].options.ro)
self.assertEqual("arnie@host.example.org:/users/arnie", results[1].device)
self.assertEqual("/home/arnie/remote", results[1].mount_point)
self.assertEqual("fuse.sshfs", results[1].type)
self.assertTrue(results[1].options.rw)
self.assertTrue(results[1].options.nosuid)
self.assertTrue(results[1].options.nodev)
self.assertEqual(["65536"], results[1].options.max_read)
self.assertEqual("/dev/sr0", results[2].device)
self.assertEqual("/media/USB Drive", results[2].mount_point)
self.assertEqual("vfat", results[2].type)
self.assertTrue(results[2].options.ro)
self.assertTrue(results[2].options.nosuid)
self.assertTrue(results[2].options.nodev)
class RsyslogParserTests(test_lib.GRRBaseTest):
"""Test the rsyslog parser."""
def testParseRsyslog(self):
test_data = r"""
$SomeDirective
daemon.* @@tcp.example.com.:514;RSYSLOG_ForwardFormat
syslog.debug,info @udp.example.com.:514;RSYSLOG_ForwardFormat
kern.* |/var/log/pipe
news,uucp.* ~
user.* ^/usr/bin/log2cowsay
*.* /var/log/messages
*.emerg *
mail.* -/var/log/maillog
"""
log_conf = StringIO.StringIO(test_data)
parser = config_file.RsyslogParser()
results = list(parser.ParseMultiple([None], [log_conf], None))
self.assertEqual(1, len(results))
tcp, udp, pipe, null, script, fs, wall, async_fs = [
target for target in results[0].targets]
self.assertEqual("daemon", tcp.facility)
self.assertEqual("*", tcp.priority)
self.assertEqual("TCP", tcp.transport)
self.assertEqual("tcp.example.com.:514", tcp.destination)
self.assertEqual("syslog", udp.facility)
self.assertEqual("debug,info", udp.priority)
self.assertEqual("UDP", udp.transport)
self.assertEqual("udp.example.com.:514", udp.destination)
self.assertEqual("kern", pipe.facility)
self.assertEqual("*", pipe.priority)
self.assertEqual("PIPE", pipe.transport)
self.assertEqual("/var/log/pipe", pipe.destination)
self.assertEqual("news,uucp", null.facility)
self.assertEqual("*", null.priority)
self.assertEqual("NONE", null.transport)
self.assertFalse(null.destination)
self.assertEqual("user", script.facility)
self.assertEqual("*", script.priority)
self.assertEqual("SCRIPT", script.transport)
self.assertEqual("/usr/bin/log2cowsay", script.destination)
self.assertEqual("*", fs.facility)
self.assertEqual("*", fs.priority)
self.assertEqual("FILE", fs.transport)
self.assertEqual("/var/log/messages", fs.destination)
self.assertEqual("*", wall.facility)
self.assertEqual("emerg", wall.priority)
self.assertEqual("WALL", wall.transport)
self.assertEqual("*", wall.destination)
self.assertEqual("mail", async_fs.facility)
self.assertEqual("*", async_fs.priority)
self.assertEqual("FILE", async_fs.transport)
self.assertEqual("/var/log/maillog", async_fs.destination)
class APTPackageSourceParserTests(test_lib.GRRBaseTest):
"""Test the APT package source lists parser."""
def testPackageSourceData(self):
test_data = r"""
# Security updates
deb http://security.debian.org/ wheezy/updates main contrib non-free
deb-src [arch=amd64,trusted=yes] ftp://security.debian.org/ wheezy/updates main contrib non-free
## Random comment
# Different transport protocols below
deb ssh://ftp.debian.org/debian wheezy main contrib non-free
deb-src file:/mnt/deb-sources-files/ wheezy main contrib non-free
# correct - referencing root file system
deb-src file:/
# incorrect
deb-src http://
# Bad lines below - these shouldn't get any URIs back
deb
deb-src [arch=i386]
deb-src abcdefghijklmnopqrstuvwxyz
"""
file_obj = StringIO.StringIO(test_data)
pathspec = rdf_paths.PathSpec(path="/etc/apt/sources.list")
stat = rdf_client.StatEntry(pathspec=pathspec)
parser = config_file.APTPackageSourceParser()
results = list(parser.Parse(stat, file_obj, None))
result = [d for d in results if isinstance(d,
rdf_protodict.AttributedDict)][0]
self.assertEqual("/etc/apt/sources.list", result.filename)
self.assertEqual(5, len(result.uris))
self.assertEqual("http", result.uris[0].transport)
self.assertEqual("security.debian.org", result.uris[0].host)
self.assertEqual("/", result.uris[0].path)
self.assertEqual("ftp", result.uris[1].transport)
self.assertEqual("security.debian.org", result.uris[1].host)
self.assertEqual("/", result.uris[1].path)
self.assertEqual("ssh", result.uris[2].transport)
self.assertEqual("ftp.debian.org", result.uris[2].host)
self.assertEqual("/debian", result.uris[2].path)
self.assertEqual("file", result.uris[3].transport)
self.assertEqual("", result.uris[3].host)
self.assertEqual("/mnt/deb-sources-files/", result.uris[3].path)
self.assertEqual("file", result.uris[4].transport)
self.assertEqual("", result.uris[4].host)
self.assertEqual("/", result.uris[4].path)
def testEmptySourceData(self):
test_data = ("# comment 1\n"
"# deb http://security.debian.org/ wheezy/updates main\n"
"URI :\n"
"URI:\n"
"# Trailing whitespace on purpose\n"
"URI: \n"
"\n"
"URIs :\n"
"URIs:\n"
"# Trailing whitespace on purpose\n"
"URIs: \n"
"# comment 2\n")
file_obj = StringIO.StringIO(test_data)
pathspec = rdf_paths.PathSpec(path="/etc/apt/sources.list.d/test.list")
stat = rdf_client.StatEntry(pathspec=pathspec)
parser = config_file.APTPackageSourceParser()
results = list(parser.Parse(stat, file_obj, None))
result = [d for d in results if isinstance(d,
rdf_protodict.AttributedDict)][0]
self.assertEqual("/etc/apt/sources.list.d/test.list", result.filename)
self.assertEqual(0, len(result.uris))
def testRFC822StyleSourceDataParser(self):
"""Test source list formated as per rfc822 style."""
test_data = r"""
# comment comment comment
Types: deb deb-src
URIs: http://example.com/debian
http://1.example.com/debian1
http://2.example.com/debian2
http://willdetect.example.com/debian-strange
URIs : ftp://3.example.com/debian3
http://4.example.com/debian4
blahblahblahblahblahlbha
http://willdetect2.example.com/debian-w2
http://willdetect3.example.com/debian-w3
URI
URI : ssh://5.example.com/debian5
Suites: stable testing
Sections: component1 component2
Description: short
long long long
[option1]: [option1-value]
deb-src [arch=amd64,trusted=yes] ftp://security.debian.org/ wheezy/updates main contrib non-free
# comment comment comment
Types: deb
URI:ftp://another.example.com/debian2
Suites: experimental
Sections: component1 component2
Enabled: no
Description: http://debian.org
This URL shouldn't be picked up by the parser
[option1]: [option1-value]
"""
file_obj = StringIO.StringIO(test_data)
pathspec = rdf_paths.PathSpec(path="/etc/apt/sources.list.d/rfc822.list")
stat = rdf_client.StatEntry(pathspec=pathspec)
parser = config_file.APTPackageSourceParser()
results = list(parser.Parse(stat, file_obj, None))
result = [d for d in results if isinstance(d,
rdf_protodict.AttributedDict)][0]
self.assertEqual("/etc/apt/sources.list.d/rfc822.list", result.filename)
self.assertEqual(11, len(result.uris))
self.assertEqual("ftp", result.uris[0].transport)
self.assertEqual("security.debian.org", result.uris[0].host)
self.assertEqual("/", result.uris[0].path)
self.assertEqual("http", result.uris[1].transport)
self.assertEqual("example.com", result.uris[1].host)
self.assertEqual("/debian", result.uris[1].path)
self.assertEqual("http", result.uris[2].transport)
self.assertEqual("1.example.com", result.uris[2].host)
self.assertEqual("/debian1", result.uris[2].path)
self.assertEqual("http", result.uris[3].transport)
self.assertEqual("2.example.com", result.uris[3].host)
self.assertEqual("/debian2", result.uris[3].path)
self.assertEqual("http", result.uris[4].transport)
self.assertEqual("willdetect.example.com", result.uris[4].host)
self.assertEqual("/debian-strange", result.uris[4].path)
self.assertEqual("ftp", result.uris[5].transport)
self.assertEqual("3.example.com", result.uris[5].host)
self.assertEqual("/debian3", result.uris[5].path)
self.assertEqual("http", result.uris[6].transport)
self.assertEqual("4.example.com", result.uris[6].host)
self.assertEqual("/debian4", result.uris[6].path)
self.assertEqual("http", result.uris[7].transport)
self.assertEqual("willdetect2.example.com", result.uris[7].host)
self.assertEqual("/debian-w2", result.uris[7].path)
self.assertEqual("http", result.uris[8].transport)
self.assertEqual("willdetect3.example.com", result.uris[8].host)
self.assertEqual("/debian-w3", result.uris[8].path)
self.assertEqual("ssh", result.uris[9].transport)
self.assertEqual("5.example.com", result.uris[9].host)
self.assertEqual("/debian5", result.uris[9].path)
self.assertEqual("ftp", result.uris[10].transport)
self.assertEqual("another.example.com", result.uris[10].host)
self.assertEqual("/debian2", result.uris[10].path)
class YumPackageSourceParserTests(test_lib.GRRBaseTest):
"""Test the Yum package source lists parser."""
def testPackageSourceData(self):
test_data = r"""
# comment 1
[centosdvdiso]
name=CentOS DVD ISO
baseurl=file:///mnt
http://mirror1.centos.org/CentOS/6/os/i386/
baseurl =ssh://mirror2.centos.org/CentOS/6/os/i386/
enabled=1
gpgcheck=1
gpgkey=file:///mnt/RPM-GPG-KEY-CentOS-6
# comment2
[examplerepo]
name=Example Repository
baseurl = https://mirror3.centos.org/CentOS/6/os/i386/
enabled=1
gpgcheck=1
gpgkey=http://mirror.centos.org/CentOS/6/os/i386/RPM-GPG-KEY-CentOS-6
"""
file_obj = StringIO.StringIO(test_data)
pathspec = rdf_paths.PathSpec(path="/etc/yum.repos.d/test1.repo")
stat = rdf_client.StatEntry(pathspec=pathspec)
parser = config_file.YumPackageSourceParser()
results = list(parser.Parse(stat, file_obj, None))
result = [d for d in results if isinstance(d,
rdf_protodict.AttributedDict)][0]
self.assertEqual("/etc/yum.repos.d/test1.repo", result.filename)
self.assertEqual(4, len(result.uris))
self.assertEqual("file", result.uris[0].transport)
self.assertEqual("", result.uris[0].host)
self.assertEqual("/mnt", result.uris[0].path)
self.assertEqual("http", result.uris[1].transport)
self.assertEqual("mirror1.centos.org", result.uris[1].host)
self.assertEqual("/CentOS/6/os/i386/", result.uris[1].path)
self.assertEqual("ssh", result.uris[2].transport)
self.assertEqual("mirror2.centos.org", result.uris[2].host)
self.assertEqual("/CentOS/6/os/i386/", result.uris[2].path)
self.assertEqual("https", result.uris[3].transport)
self.assertEqual("mirror3.centos.org", result.uris[3].host)
self.assertEqual("/CentOS/6/os/i386/", result.uris[3].path)
def testEmptySourceData(self):
test_data = ("# comment 1\n"
"baseurl=\n"
"# Trailing whitespace on purpose\n"
"baseurl= \n"
"# Trailing whitespace on purpose\n"
"baseurl = \n"
"baseurl\n"
"# comment 2\n")
file_obj = StringIO.StringIO(test_data)
pathspec = rdf_paths.PathSpec(path="/etc/yum.repos.d/emptytest.repo")
stat = rdf_client.StatEntry(pathspec=pathspec)
parser = config_file.YumPackageSourceParser()
results = list(parser.Parse(stat, file_obj, None))
result = [d for d in results if isinstance(d,
rdf_protodict.AttributedDict)][0]
self.assertEqual("/etc/yum.repos.d/emptytest.repo", result.filename)
self.assertEqual(0, len(result.uris))
class CronAtAllowDenyParserTests(test_lib.GRRBaseTest):
"""Test the cron/at allow/deny parser."""
def testParseCronData(self):
test_data = r"""root
user
user2 user3
root
hi hello
user
pparth"""
file_obj = StringIO.StringIO(test_data)
pathspec = rdf_paths.PathSpec(path="/etc/at.allow")
stat = rdf_client.StatEntry(pathspec=pathspec)
parser = config_file.CronAtAllowDenyParser()
results = list(parser.Parse(stat, file_obj, None))
result = [d for d in results if isinstance(d,
rdf_protodict.AttributedDict)][0]
filename = result.filename
users = result.users
self.assertEqual("/etc/at.allow", filename)
self.assertEqual(sorted(["root", "user", "pparth"]), sorted(users))
anomalies = [a for a in results if isinstance(a, rdf_anomaly.Anomaly)]
self.assertEqual(1, len(anomalies))
anom = anomalies[0]
self.assertEqual("Dodgy entries in /etc/at.allow.", anom.symptom)
self.assertEqual(sorted(["user2 user3", "hi hello"]), sorted(anom.finding))
self.assertEqual(pathspec, anom.reference_pathspec)
self.assertEqual("PARSER_ANOMALY", anom.type)
class NtpParserTests(test_lib.GRRBaseTest):
"""Test the ntp.conf parser."""
def testParseNtpConfig(self):
test_data = r"""
# Time servers
server 1.2.3.4 iburst
server 4.5.6.7 iburst
server 8.9.10.11 iburst
server pool.ntp.org iburst
server 2001:1234:1234:2::f iburst
# Drift file
driftfile /var/lib/ntp/ntp.drift
restrict default nomodify noquery nopeer
# Guard against monlist NTP reflection attacks.
disable monitor
# Enable the creation of a peerstats file
enable stats
statsdir /var/log/ntpstats
filegen peerstats file peerstats type day link enable
# Test only.
ttl 127 88
broadcastdelay 0.01
"""
conffile = StringIO.StringIO(test_data)
parser = config_file.NtpdParser()
results = list(parser.Parse(None, conffile, None))
# We expect some results.
self.assertTrue(results)
# There should be only one result.
self.assertEqual(1, len(results))
# Now that we are sure, just use that single result for easy of reading.
results = results[0]
# Check all the expected "simple" config keywords are present.
expected_config_keywords = set(
["driftfile", "statsdir", "filegen", "ttl",
"broadcastdelay"]) | set(parser._defaults.keys())
self.assertEqual(expected_config_keywords, set(results.config.keys()))
# Check all the expected "keyed" config keywords are present.
self.assertTrue(results.server)
self.assertTrue(results.restrict)
# And check one that isn't in the config, isn't in out result.
self.assertFalse(results.trap)
# Check we got all the "servers".
servers = ["1.2.3.4", "4.5.6.7", "8.9.10.11", "pool.ntp.org",
"2001:1234:1234:2::f"]
self.assertItemsEqual(servers, [r.address for r in results.server])
# In our test data, they all have "iburst" as an arg. Check that is found.
for r in results.server:
self.assertEqual("iburst", r.options)
# Check a few values were parsed correctly.
self.assertEqual("/var/lib/ntp/ntp.drift", results.config["driftfile"])
self.assertEqual("/var/log/ntpstats", results.config["statsdir"])
self.assertEqual("peerstats file peerstats type day link enable",
results.config["filegen"])
self.assertEqual(1, len(results.restrict))
self.assertEqual("default", results.restrict[0].address)
self.assertEqual("nomodify noquery nopeer", results.restrict[0].options)
# A option that can have a list of integers.
self.assertEqual([127, 88], results.config["ttl"])
# An option that should only have a single float.
self.assertEqual([0.01], results.config["broadcastdelay"])
# Check the modified defaults.
self.assertFalse(results.config["monitor"])
self.assertTrue(results.config["stats"])
# Check an unlisted defaults are unmodified.
self.assertFalse(results.config["kernel"])
self.assertTrue(results.config["auth"])
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
| |
#!/usr/bin/env python
"""Django model to DOT (Graphviz) converter
by Antonio Cavedoni <antonio@cavedoni.org>
Make sure your DJANGO_SETTINGS_MODULE is set to your project or
place this script in the same directory of the project and call
the script like this:
$ python modelviz.py [-h] [-a] [-d] [-g] [-i <model_names>] <app_label> ... <app_label> > <filename>.dot
$ dot <filename>.dot -Tpng -o <filename>.png
options:
-h, --help
show this help message and exit.
-a, --all_applications
show models from all applications.
-d, --disable_fields
don't show the class member fields.
-g, --group_models
draw an enclosing box around models from the same app.
-i, --include_models=User,Person,Car
only include selected models in graph.
"""
__version__ = "0.9"
__svnid__ = "$Id$"
__license__ = "Python"
__author__ = "Antonio Cavedoni <http://cavedoni.com/>"
__contributors__ = [
"Stefano J. Attardi <http://attardi.org/>",
"limodou <http://www.donews.net/limodou/>",
"Carlo C8E Miron",
"Andre Campos <cahenan@gmail.com>",
"Justin Findlay <jfindlay@gmail.com>",
"Alexander Houben <alexander@houben.ch>",
"Bas van Oostveen <v.oostveen@gmail.com>",
]
import getopt, sys
from django.core.management import setup_environ
from django.utils.encoding import mark_safe
try:
import settings
except ImportError:
pass
else:
setup_environ(settings)
from django.template import Template, Context
from django.db import models
from django.db.models import get_models
from django.db.models.fields.related import \
ForeignKey, OneToOneField, ManyToManyField
try:
from django.db.models.fields.generic import GenericRelation
except ImportError:
from django.contrib.contenttypes.generic import GenericRelation
head_template = """
digraph name {
fontname = "Helvetica"
fontsize = 8
node [
fontname = "Helvetica"
fontsize = 8
shape = "plaintext"
]
edge [
fontname = "Helvetica"
fontsize = 8
]
"""
body_template = """
{% if use_subgraph %}
subgraph {{ cluster_app_name }} {
label=<
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER"
><FONT FACE="Helvetica Bold" COLOR="Black" POINT-SIZE="12"
>{{ app_name }}</FONT></TD></TR>
</TABLE>
>
color=olivedrab4
style="rounded"
{% endif %}
{% for model in models %}
{{ model.app_name }}_{{ model.name }} [label=<
<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"
><FONT FACE="Helvetica Bold" COLOR="white"
>{{ model.name }}</FONT></TD></TR>
{% if not disable_fields %}
{% for field in model.fields %}
<TR><TD ALIGN="LEFT" BORDER="0"
><FONT {% if field.blank %}COLOR="#7B7B7B" {% endif %}FACE="Helvetica Bold">{{ field.name }}</FONT
></TD>
<TD ALIGN="LEFT"
><FONT {% if field.blank %}COLOR="#7B7B7B" {% endif %}FACE="Helvetica Bold">{{ field.type }}</FONT
></TD></TR>
{% endfor %}
{% endif %}
</TABLE>
>]
{% endfor %}
{% if use_subgraph %}
}
{% endif %}
"""
rel_template = """
{% for model in models %}
{% for relation in model.relations %}
{% if relation.needs_node %}
{{ relation.target }} [label=<
<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0">
<TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"
><FONT FACE="Helvetica Bold" COLOR="white"
>{{ relation.target }}</FONT></TD></TR>
</TABLE>
>]
{% endif %}
{{ model.app_name }}_{{ model.name }} -> {{ relation.target_app }}_{{ relation.target }}
[label="{{ relation.name }}"] {{ relation.arrows }};
{% endfor %}
{% endfor %}
"""
tail_template = """
}
"""
def generate_dot(app_labels, **kwargs):
disable_fields = kwargs.get('disable_fields', False)
include_models = kwargs.get('include_models', [])
all_applications = kwargs.get('all_applications', False)
use_subgraph = kwargs.get('group_models', False)
dot = head_template
apps = []
if all_applications:
apps = models.get_apps()
for app_label in app_labels:
app = models.get_app(app_label)
if not app in apps:
apps.append(app)
graphs = []
for app in apps:
graph = Context({
'name': '"%s"' % app.__name__,
'app_name': "%s" % app.__name__.rsplit('.', 1)[0],
'cluster_app_name': "cluster_%s" % app.__name__.replace(".", "_"),
'disable_fields': disable_fields,
'use_subgraph': use_subgraph,
'models': []
})
for appmodel in get_models(app):
model = {
'app_name': app.__name__.replace(".", "_"),
'name': appmodel.__name__,
'fields': [],
'relations': []
}
# consider given model name ?
def consider(model_name):
return not include_models or model_name in include_models
if not consider(appmodel._meta.object_name):
continue
# model attributes
def add_attributes():
model['fields'].append({
'name': field.name,
'type': type(field).__name__,
'blank': field.blank
})
for field in appmodel._meta.fields:
add_attributes()
if appmodel._meta.many_to_many:
for field in appmodel._meta.many_to_many:
add_attributes()
# relations
def add_relation(extras=""):
_rel = {
'target_app': field.rel.to.__module__.replace('.','_'),
'target': field.rel.to.__name__,
'type': type(field).__name__,
'name': field.name,
'arrows': extras,
'needs_node': True
}
if _rel not in model['relations'] and consider(_rel['target']):
model['relations'].append(_rel)
for field in appmodel._meta.fields:
if isinstance(field, ForeignKey):
add_relation()
elif isinstance(field, OneToOneField):
add_relation('[arrowhead=none arrowtail=none]')
if appmodel._meta.many_to_many:
for field in appmodel._meta.many_to_many:
if isinstance(field, ManyToManyField):
add_relation('[arrowhead=normal arrowtail=normal]')
elif isinstance(field, GenericRelation):
add_relation(mark_safe('[style="dotted"] [arrowhead=normal arrowtail=normal]'))
graph['models'].append(model)
graphs.append(graph)
nodes = []
for graph in graphs:
nodes.extend([e['name'] for e in graph['models']])
for graph in graphs:
# don't draw duplication nodes because of relations
for model in graph['models']:
for relation in model['relations']:
if relation['target'] in nodes:
relation['needs_node'] = False
# render templates
t = Template(body_template)
dot += '\n' + t.render(graph)
for graph in graphs:
t = Template(rel_template)
dot += '\n' + t.render(graph)
dot += '\n' + tail_template
return dot
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hadgi:",
["help", "all_applications", "disable_fields", "group_models", "include_models="])
except getopt.GetoptError, error:
print __doc__
sys.exit(error)
kwargs = {}
for opt, arg in opts:
if opt in ("-h", "--help"):
print __doc__
sys.exit()
if opt in ("-a", "--all_applications"):
kwargs['all_applications'] = True
if opt in ("-d", "--disable_fields"):
kwargs['disable_fields'] = True
if opt in ("-g", "--group_models"):
kwargs['group_models'] = True
if opt in ("-i", "--include_models"):
kwargs['include_models'] = arg.split(',')
if not args and not kwargs.get('all_applications', False):
print __doc__
sys.exit()
print generate_dot(args, **kwargs)
if __name__ == "__main__":
main()
| |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Create a blockwise lower-triangular operator from `LinearOperators`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_algebra
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorBlockLowerTriangular"]
@tf_export("linalg.LinearOperatorBlockLowerTriangular")
class LinearOperatorBlockLowerTriangular(linear_operator.LinearOperator):
"""Combines `LinearOperators` into a blockwise lower-triangular matrix.
This operator is initialized with a nested list of linear operators, which
are combined into a new `LinearOperator` whose underlying matrix
representation is square and has each operator on or below the main diagonal,
and zero's elsewhere. Each element of the outer list is a list of
`LinearOperators` corresponding to a row-partition of the blockwise structure.
The number of `LinearOperator`s in row-partion `i` must be equal to `i`.
For example, a blockwise `3 x 3` `LinearOperatorBlockLowerTriangular` is
initialized with the list `[[op_00], [op_10, op_11], [op_20, op_21, op_22]]`,
where the `op_ij`, `i < 3, j <= i`, are `LinearOperator` instances. The
`LinearOperatorBlockLowerTriangular` behaves as the following blockwise
matrix, where `0` represents appropriately-sized [batch] matrices of zeros:
```none
[[op_00, 0, 0],
[op_10, op_11, 0],
[op_20, op_21, op_22]]
```
Each `op_jj` on the diagonal is required to represent a square matrix, and
hence will have shape `batch_shape_j + [M_j, M_j]`. `LinearOperator`s in row
`j` of the blockwise structure must have `range_dimension` equal to that of
`op_jj`, and `LinearOperators` in column `j` must have `domain_dimension`
equal to that of `op_jj`.
If each `op_jj` on the diagonal has shape `batch_shape_j + [M_j, M_j]`, then
the combined operator has shape `broadcast_batch_shape + [sum M_j, sum M_j]`,
where `broadcast_batch_shape` is the mutual broadcast of `batch_shape_j`,
`j = 0, 1, ..., J`, assuming the intermediate batch shapes broadcast.
Even if the combined shape is well defined, the combined operator's
methods may fail due to lack of broadcasting ability in the defining
operators' methods.
For example, to create a 4 x 4 linear operator combined of three 2 x 2
operators:
>>> operator_0 = tf.linalg.LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
>>> operator_1 = tf.linalg.LinearOperatorFullMatrix([[1., 0.], [0., 1.]])
>>> operator_2 = tf.linalg.LinearOperatorLowerTriangular([[5., 6.], [7., 8]])
>>> operator = LinearOperatorBlockLowerTriangular(
... [[operator_0], [operator_1, operator_2]])
>>> operator.to_dense()
<tf.Tensor: shape=(4, 4), dtype=float32, numpy=
array([[1., 2., 0., 0.],
[3., 4., 0., 0.],
[1., 0., 5., 0.],
[0., 1., 7., 8.]], dtype=float32)>
>>> operator.shape
TensorShape([4, 4])
>>> operator.log_abs_determinant()
<tf.Tensor: shape=(), dtype=float32, numpy=4.3820267>
>>> x0 = [[1., 6.], [-3., 4.]]
>>> x1 = [[0., 2.], [4., 0.]]
>>> x = tf.concat([x0, x1], 0) # Shape [2, 4] Tensor
>>> operator.matmul(x)
<tf.Tensor: shape=(4, 2), dtype=float32, numpy=
array([[-5., 14.],
[-9., 34.],
[ 1., 16.],
[29., 18.]], dtype=float32)>
The above `matmul` is equivalent to:
>>> tf.concat([operator_0.matmul(x0),
... operator_1.matmul(x0) + operator_2.matmul(x1)], axis=0)
<tf.Tensor: shape=(4, 2), dtype=float32, numpy=
array([[-5., 14.],
[-9., 34.],
[ 1., 16.],
[29., 18.]], dtype=float32)>
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
For example:
Create a [2, 3] batch of 4 x 4 linear operators:
>>> matrix_44 = tf.random.normal(shape=[2, 3, 4, 4])
>>> operator_44 = tf.linalg.LinearOperatorFullMatrix(matrix_44)
Create a [1, 3] batch of 5 x 4 linear operators:
>>> matrix_54 = tf.random.normal(shape=[1, 3, 5, 4])
>>> operator_54 = tf.linalg.LinearOperatorFullMatrix(matrix_54)
Create a [1, 3] batch of 5 x 5 linear operators:
>>> matrix_55 = tf.random.normal(shape=[1, 3, 5, 5])
>>> operator_55 = tf.linalg.LinearOperatorFullMatrix(matrix_55)
Combine to create a [2, 3] batch of 9 x 9 operators:
>>> operator_99 = LinearOperatorBlockLowerTriangular(
... [[operator_44], [operator_54, operator_55]])
>>> operator_99.shape
TensorShape([2, 3, 9, 9])
Create a shape [2, 1, 9] batch of vectors and apply the operator to it.
>>> x = tf.random.normal(shape=[2, 1, 9])
>>> y = operator_99.matvec(x)
>>> y.shape
TensorShape([2, 3, 9])
Create a blockwise list of vectors and apply the operator to it. A blockwise
list is returned.
>>> x4 = tf.random.normal(shape=[2, 1, 4])
>>> x5 = tf.random.normal(shape=[2, 3, 5])
>>> y_blockwise = operator_99.matvec([x4, x5])
>>> y_blockwise[0].shape
TensorShape([2, 3, 4])
>>> y_blockwise[1].shape
TensorShape([2, 3, 5])
#### Performance
Suppose `operator` is a `LinearOperatorBlockLowerTriangular` consisting of `D`
row-partitions and `D` column-partitions, such that the total number of
operators is `N = D * (D + 1) // 2`.
* `operator.matmul` has complexity equal to the sum of the `matmul`
complexities of the individual operators.
* `operator.solve` has complexity equal to the sum of the `solve` complexities
of the operators on the diagonal and the `matmul` complexities of the
operators off the diagonal.
* `operator.determinant` has complexity equal to the sum of the `determinant`
complexities of the operators on the diagonal.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operators,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorBlockLowerTriangular"):
r"""Initialize a `LinearOperatorBlockLowerTriangular`.
`LinearOperatorBlockLowerTriangular` is initialized with a list of lists of
operators `[[op_0], [op_1, op_2], [op_3, op_4, op_5],...]`.
Args:
operators: Iterable of iterables of `LinearOperator` objects, each with
the same `dtype`. Each element of `operators` corresponds to a row-
partition, in top-to-bottom order. The operators in each row-partition
are filled in left-to-right. For example,
`operators = [[op_0], [op_1, op_2], [op_3, op_4, op_5]]` creates a
`LinearOperatorBlockLowerTriangular` with full block structure
`[[op_0, 0, 0], [op_1, op_2, 0], [op_3, op_4, op_5]]`. The number of
operators in the `i`th row must be equal to `i`, such that each operator
falls on or below the diagonal of the blockwise structure.
`LinearOperator`s that fall on the diagonal (the last elements of each
row) must be square. The other `LinearOperator`s must have domain
dimension equal to the domain dimension of the `LinearOperator`s in the
same column-partition, and range dimension equal to the range dimension
of the `LinearOperator`s in the same row-partition.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
This will raise a `ValueError` if set to `False`.
name: A name for this `LinearOperator`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty, contains an erroneous number of
elements, or contains operators with incompatible shapes.
"""
# Validate operators.
check_ops.assert_proper_iterable(operators)
for row in operators:
check_ops.assert_proper_iterable(row)
operators = [list(row) for row in operators]
if not operators:
raise ValueError(
"Expected a non-empty list of operators. Found: {}".format(operators))
self._operators = operators
self._diagonal_operators = [row[-1] for row in operators]
dtype = operators[0][0].dtype
self._validate_dtype(dtype)
is_non_singular = self._validate_non_singular(is_non_singular)
self._validate_num_operators()
self._validate_operator_dimensions()
is_square = self._validate_square(is_square)
with ops.name_scope(name):
super(LinearOperatorBlockLowerTriangular, self).__init__(
dtype=dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
def _validate_num_operators(self):
for i, row in enumerate(self.operators):
if len(row) != i + 1:
raise ValueError(
"The `i`th row-partition (`i`th element of `operators`) must "
"contain `i` blocks (`LinearOperator` instances). Row {} contains "
"{} blocks.".format(i + 1, len(row)))
def _validate_operator_dimensions(self):
"""Check that `operators` have compatible dimensions."""
for i in range(1, len(self.operators)):
for j in range(i):
op = self.operators[i][j]
# `above_op` is the operator directly above `op` in the blockwise
# structure, in row partition `i-1`, column partition `j`. `op` should
# have the same `domain_dimension` as `above_op`.
above_op = self.operators[i - 1][j]
# `right_op` is the operator to the right of `op` in the blockwise
# structure, in row partition `i`, column partition `j+1`. `op` should
# have the same `range_dimension` as `right_op`.
right_op = self.operators[i][j + 1]
if (op.domain_dimension is not None and
above_op.domain_dimension is not None):
if op.domain_dimension != above_op.domain_dimension:
raise ValueError(
"Operator domain dimensions {} and {} must be equal to fit a "
"blockwise structure.".format(
op.domain_dimension, above_op.domain_dimension))
if (op.range_dimension is not None and
right_op.range_dimension is not None):
if op.range_dimension != right_op.range_dimension:
raise ValueError(
"Operator range dimensions {} and {} must be equal to fit a "
"blockwise structure.".format(
op.range_dimension, right_op.range_dimension))
# pylint: disable=g-bool-id-comparison
def _validate_non_singular(self, is_non_singular):
if all(op.is_non_singular for op in self._diagonal_operators):
if is_non_singular is False:
raise ValueError(
"A blockwise lower-triangular operator with non-singular operators "
" on the main diagonal is always non-singular.")
return True
if any(op.is_non_singular is False for op in self._diagonal_operators):
if is_non_singular is True:
raise ValueError(
"A blockwise lower-triangular operator with a singular operator on "
"the main diagonal is always singular.")
return False
def _validate_square(self, is_square):
if is_square is False:
raise ValueError("`LinearOperatorBlockLowerTriangular` must be square.")
if any(op.is_square is False for op in self._diagonal_operators):
raise ValueError(
"Matrices on the diagonal (the final elements of each row-partition "
"in the `operators` list) must be square.")
return True
# pylint: enable=g-bool-id-comparison
def _validate_dtype(self, dtype):
for i, row in enumerate(self.operators):
for operator in row:
if operator.dtype != dtype:
name_type = (str((o.name, o.dtype)) for o in row)
raise TypeError(
"Expected all operators to have the same dtype. Found {} in row "
"{} and {} in row 0.".format(name_type, i, str(dtype)))
@property
def operators(self):
return self._operators
def _block_range_dimensions(self):
return [op.range_dimension for op in self._diagonal_operators]
def _block_domain_dimensions(self):
return [op.domain_dimension for op in self._diagonal_operators]
def _block_range_dimension_tensors(self):
return [op.range_dimension_tensor() for op in self._diagonal_operators]
def _block_domain_dimension_tensors(self):
return [op.domain_dimension_tensor() for op in self._diagonal_operators]
def _shape(self):
# Get final matrix shape.
domain_dimension = sum(self._block_domain_dimensions())
range_dimension = sum(self._block_range_dimensions())
matrix_shape = tensor_shape.TensorShape([domain_dimension, range_dimension])
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0][0].batch_shape
for row in self.operators[1:]:
for operator in row:
batch_shape = common_shapes.broadcast_shape(
batch_shape, operator.batch_shape)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
# Avoid messy broadcasting if possible.
if self.shape.is_fully_defined():
return ops.convert_to_tensor(
self.shape.as_list(), dtype=dtypes.int32, name="shape")
domain_dimension = sum(self._block_domain_dimension_tensors())
range_dimension = sum(self._block_range_dimension_tensors())
matrix_shape = array_ops.stack([domain_dimension, range_dimension])
batch_shape = self.operators[0][0].batch_shape_tensor()
for row in self.operators[1:]:
for operator in row:
batch_shape = array_ops.broadcast_dynamic_shape(
batch_shape, operator.batch_shape_tensor())
return array_ops.concat((batch_shape, matrix_shape), 0)
def matmul(self, x, adjoint=False, adjoint_arg=False, name="matmul"):
"""Transform [batch] matrix `x` with left multiplication: `x --> Ax`.
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
X = ... # shape [..., N, R], batch matrix, R > 0.
Y = operator.matmul(X)
Y.shape
==> [..., M, R]
Y[..., :, r] = sum_j A[..., :, j] X[j, r]
```
Args:
x: `LinearOperator`, `Tensor` with compatible shape and same `dtype` as
`self`, or a blockwise iterable of `LinearOperator`s or `Tensor`s. See
class docstring for definition of shape compatibility.
adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is
the hermitian transpose (transposition and complex conjugation).
name: A name for this `Op`.
Returns:
A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype`
as `self`, or if `x` is blockwise, a list of `Tensor`s with shapes that
concatenate to `[..., M, R]`.
"""
if isinstance(x, linear_operator.LinearOperator):
left_operator = self.adjoint() if adjoint else self
right_operator = x.adjoint() if adjoint_arg else x
if (right_operator.range_dimension is not None and
left_operator.domain_dimension is not None and
right_operator.range_dimension != left_operator.domain_dimension):
raise ValueError(
"Operators are incompatible. Expected `x` to have dimension"
" {} but got {}.".format(
left_operator.domain_dimension, right_operator.range_dimension))
with self._name_scope(name):
return linear_operator_algebra.matmul(left_operator, right_operator)
with self._name_scope(name):
arg_dim = -1 if adjoint_arg else -2
block_dimensions = (self._block_range_dimensions() if adjoint
else self._block_domain_dimensions())
if linear_operator_util.arg_is_blockwise(block_dimensions, x, arg_dim):
for i, block in enumerate(x):
if not isinstance(block, linear_operator.LinearOperator):
block = ops.convert_to_tensor(block)
self._check_input_dtype(block)
block_dimensions[i].assert_is_compatible_with(block.shape[arg_dim])
x[i] = block
else:
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
op_dimension = (self.range_dimension if adjoint
else self.domain_dimension)
op_dimension.assert_is_compatible_with(x.shape[arg_dim])
return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
arg_dim = -1 if adjoint_arg else -2
block_dimensions = (self._block_range_dimensions() if adjoint
else self._block_domain_dimensions())
blockwise_arg = linear_operator_util.arg_is_blockwise(
block_dimensions, x, arg_dim)
if blockwise_arg:
split_x = x
else:
split_dim = -1 if adjoint_arg else -2
# Split input by columns if adjoint_arg is True, else rows
split_x = linear_operator_util.split_arg_into_blocks(
self._block_domain_dimensions(),
self._block_domain_dimension_tensors,
x, axis=split_dim)
result_list = []
# Iterate over row-partitions (i.e. column-partitions of the adjoint).
if adjoint:
for index in range(len(self.operators)):
# Begin with the operator on the diagonal and apply it to the
# respective `rhs` block.
result = self.operators[index][index].matmul(
split_x[index], adjoint=adjoint, adjoint_arg=adjoint_arg)
# Iterate top to bottom over the operators in the remainder of the
# column-partition (i.e. left to right over the row-partition of the
# adjoint), apply the operator to the respective `rhs` block and
# accumulate the sum. For example, given the
# `LinearOperatorBlockLowerTriangular`:
#
# op = [[A, 0, 0],
# [B, C, 0],
# [D, E, F]]
#
# if `index = 1`, the following loop calculates:
# `y_1 = (C.matmul(x_1, adjoint=adjoint) +
# E.matmul(x_2, adjoint=adjoint)`,
# where `x_1` and `x_2` are splits of `x`.
for j in range(index + 1, len(self.operators)):
result += self.operators[j][index].matmul(
split_x[j], adjoint=adjoint, adjoint_arg=adjoint_arg)
result_list.append(result)
else:
for row in self.operators:
# Begin with the left-most operator in the row-partition and apply it
# to the first `rhs` block.
result = row[0].matmul(
split_x[0], adjoint=adjoint, adjoint_arg=adjoint_arg)
# Iterate left to right over the operators in the remainder of the row
# partition, apply the operator to the respective `rhs` block, and
# accumulate the sum.
for j, operator in enumerate(row[1:]):
result += operator.matmul(
split_x[j + 1], adjoint=adjoint, adjoint_arg=adjoint_arg)
result_list.append(result)
if blockwise_arg:
return result_list
result_list = linear_operator_util.broadcast_matrix_batch_dims(
result_list)
return array_ops.concat(result_list, axis=-2)
def matvec(self, x, adjoint=False, name="matvec"):
"""Transform [batch] vector `x` with left multiplication: `x --> Ax`.
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
X = ... # shape [..., N], batch vector
Y = operator.matvec(X)
Y.shape
==> [..., M]
Y[..., :] = sum_j A[..., :, j] X[..., j]
```
Args:
x: `Tensor` with compatible shape and same `dtype` as `self`, or an
iterable of `Tensor`s. `Tensor`s are treated a [batch] vectors, meaning
for every set of leading dimensions, the last dimension defines a
vector.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
name: A name for this `Op`.
Returns:
A `Tensor` with shape `[..., M]` and same `dtype` as `self`.
"""
with self._name_scope(name):
block_dimensions = (self._block_range_dimensions() if adjoint
else self._block_domain_dimensions())
if linear_operator_util.arg_is_blockwise(block_dimensions, x, -1):
for i, block in enumerate(x):
if not isinstance(block, linear_operator.LinearOperator):
block = ops.convert_to_tensor(block)
self._check_input_dtype(block)
block_dimensions[i].assert_is_compatible_with(block.shape[-1])
x[i] = block
x_mat = [block[..., array_ops.newaxis] for block in x]
y_mat = self.matmul(x_mat, adjoint=adjoint)
return [array_ops.squeeze(y, axis=-1) for y in y_mat]
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
op_dimension = (self.range_dimension if adjoint
else self.domain_dimension)
op_dimension.assert_is_compatible_with(x.shape[-1])
x_mat = x[..., array_ops.newaxis]
y_mat = self.matmul(x_mat, adjoint=adjoint)
return array_ops.squeeze(y_mat, axis=-1)
def _determinant(self):
if all(op.is_positive_definite for op in self._diagonal_operators):
return math_ops.exp(self._log_abs_determinant())
result = self._diagonal_operators[0].determinant()
for op in self._diagonal_operators[1:]:
result *= op.determinant()
return result
def _log_abs_determinant(self):
result = self._diagonal_operators[0].log_abs_determinant()
for op in self._diagonal_operators[1:]:
result += op.log_abs_determinant()
return result
def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"):
"""Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`.
The returned `Tensor` will be close to an exact solution if `A` is well
conditioned. Otherwise closeness will vary. See class docstring for details.
Given the blockwise `n + 1`-by-`n + 1` linear operator:
op = [[A_00 0 ... 0 ... 0],
[A_10 A_11 ... 0 ... 0],
...
[A_k0 A_k1 ... A_kk ... 0],
...
[A_n0 A_n1 ... A_nk ... A_nn]]
we find `x = op.solve(y)` by observing that
`y_k = A_k0.matmul(x_0) + A_k1.matmul(x_1) + ... + A_kk.matmul(x_k)`
and therefore
`x_k = A_kk.solve(y_k -
A_k0.matmul(x_0) - ... - A_k(k-1).matmul(x_(k-1)))`
where `x_k` and `y_k` are the `k`th blocks obtained by decomposing `x`
and `y` along their appropriate axes.
We first solve `x_0 = A_00.solve(y_0)`. Proceeding inductively, we solve
for `x_k`, `k = 1..n`, given `x_0..x_(k-1)`.
The adjoint case is solved similarly, beginning with
`x_n = A_nn.solve(y_n, adjoint=True)` and proceeding backwards.
Examples:
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
# Solve R > 0 linear systems for every member of the batch.
RHS = ... # shape [..., M, R]
X = operator.solve(RHS)
# X[..., :, r] is the solution to the r'th linear system
# sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r]
operator.matmul(X)
==> RHS
```
Args:
rhs: `Tensor` with same `dtype` as this operator and compatible shape,
or a list of `Tensor`s. `Tensor`s are treated like a [batch] matrices
meaning for every set of leading dimensions, the last two dimensions
defines a matrix.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, solve the system involving the adjoint
of this `LinearOperator`: `A^H X = rhs`.
adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H`
is the hermitian transpose (transposition and complex conjugation).
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`.
Raises:
NotImplementedError: If `self.is_non_singular` or `is_square` is False.
"""
if self.is_non_singular is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"be singular.")
if self.is_square is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"not be square.")
if isinstance(rhs, linear_operator.LinearOperator):
left_operator = self.adjoint() if adjoint else self
right_operator = rhs.adjoint() if adjoint_arg else rhs
if (right_operator.range_dimension is not None and
left_operator.domain_dimension is not None and
right_operator.range_dimension != left_operator.domain_dimension):
raise ValueError(
"Operators are incompatible. Expected `rhs` to have dimension"
" {} but got {}.".format(
left_operator.domain_dimension, right_operator.range_dimension))
with self._name_scope(name):
return linear_operator_algebra.solve(left_operator, right_operator)
with self._name_scope(name):
block_dimensions = (self._block_domain_dimensions() if adjoint
else self._block_range_dimensions())
arg_dim = -1 if adjoint_arg else -2
blockwise_arg = linear_operator_util.arg_is_blockwise(
block_dimensions, rhs, arg_dim)
if blockwise_arg:
for i, block in enumerate(rhs):
if not isinstance(block, linear_operator.LinearOperator):
block = ops.convert_to_tensor(block)
self._check_input_dtype(block)
block_dimensions[i].assert_is_compatible_with(block.shape[arg_dim])
rhs[i] = block
if adjoint_arg:
split_rhs = [linalg.adjoint(y) for y in rhs]
else:
split_rhs = rhs
else:
rhs = ops.convert_to_tensor(rhs, name="rhs")
self._check_input_dtype(rhs)
op_dimension = (self.domain_dimension if adjoint
else self.range_dimension)
op_dimension.assert_is_compatible_with(rhs.shape[arg_dim])
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
split_rhs = linear_operator_util.split_arg_into_blocks(
self._block_domain_dimensions(),
self._block_domain_dimension_tensors,
rhs, axis=-2)
solution_list = []
if adjoint:
# For an adjoint blockwise lower-triangular linear operator, the system
# must be solved bottom to top. Iterate backwards over rows of the
# adjoint (i.e. columns of the non-adjoint operator).
for index in reversed(range(len(self.operators))):
y = split_rhs[index]
# Iterate top to bottom over the operators in the off-diagonal portion
# of the column-partition (i.e. row-partition of the adjoint), apply
# the operator to the respective block of the solution found in
# previous iterations, and subtract the result from the `rhs` block.
# For example,let `A`, `B`, and `D` be the linear operators in the top
# row-partition of the adjoint of
# `LinearOperatorBlockLowerTriangular([[A], [B, C], [D, E, F]])`,
# and `x_1` and `x_2` be blocks of the solution found in previous
# iterations of the outer loop. The following loop (when `index == 0`)
# expresses
# `Ax_0 + Bx_1 + Dx_2 = y_0` as `Ax_0 = y_0*`, where
# `y_0* = y_0 - Bx_1 - Dx_2`.
for j in reversed(range(index + 1, len(self.operators))):
y -= self.operators[j][index].matmul(
solution_list[len(self.operators) - 1 - j],
adjoint=adjoint)
# Continuing the example above, solve `Ax_0 = y_0*` for `x_0`.
solution_list.append(
self._diagonal_operators[index].solve(y, adjoint=adjoint))
solution_list.reverse()
else:
# Iterate top to bottom over the row-partitions.
for row, y in zip(self.operators, split_rhs):
# Iterate left to right over the operators in the off-diagonal portion
# of the row-partition, apply the operator to the block of the
# solution found in previous iterations, and subtract the result from
# the `rhs` block. For example, let `D`, `E`, and `F` be the linear
# operators in the bottom row-partition of
# `LinearOperatorBlockLowerTriangular([[A], [B, C], [D, E, F]])` and
# `x_0` and `x_1` be blocks of the solution found in previous
# iterations of the outer loop. The following loop
# (when `index == 2`), expresses
# `Dx_0 + Ex_1 + Fx_2 = y_2` as `Fx_2 = y_2*`, where
# `y_2* = y_2 - D_x0 - Ex_1`.
for i, operator in enumerate(row[:-1]):
y -= operator.matmul(solution_list[i], adjoint=adjoint)
# Continuing the example above, solve `Fx_2 = y_2*` for `x_2`.
solution_list.append(row[-1].solve(y, adjoint=adjoint))
if blockwise_arg:
return solution_list
solution_list = linear_operator_util.broadcast_matrix_batch_dims(
solution_list)
return array_ops.concat(solution_list, axis=-2)
def solvevec(self, rhs, adjoint=False, name="solve"):
"""Solve single equation with best effort: `A X = rhs`.
The returned `Tensor` will be close to an exact solution if `A` is well
conditioned. Otherwise closeness will vary. See class docstring for details.
Examples:
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
# Solve one linear system for every member of the batch.
RHS = ... # shape [..., M]
X = operator.solvevec(RHS)
# X is the solution to the linear system
# sum_j A[..., :, j] X[..., j] = RHS[..., :]
operator.matvec(X)
==> RHS
```
Args:
rhs: `Tensor` with same `dtype` as this operator, or list of `Tensor`s
(for blockwise operators). `Tensor`s are treated as [batch] vectors,
meaning for every set of leading dimensions, the last dimension defines
a vector. See class docstring for definition of compatibility regarding
batch dimensions.
adjoint: Python `bool`. If `True`, solve the system involving the adjoint
of this `LinearOperator`: `A^H X = rhs`.
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[...,N]` and same `dtype` as `rhs`.
Raises:
NotImplementedError: If `self.is_non_singular` or `is_square` is False.
"""
with self._name_scope(name):
block_dimensions = (self._block_domain_dimensions() if adjoint
else self._block_range_dimensions())
if linear_operator_util.arg_is_blockwise(block_dimensions, rhs, -1):
for i, block in enumerate(rhs):
if not isinstance(block, linear_operator.LinearOperator):
block = ops.convert_to_tensor(block)
self._check_input_dtype(block)
block_dimensions[i].assert_is_compatible_with(block.shape[-1])
rhs[i] = block
rhs_mat = [array_ops.expand_dims(block, axis=-1) for block in rhs]
solution_mat = self.solve(rhs_mat, adjoint=adjoint)
return [array_ops.squeeze(x, axis=-1) for x in solution_mat]
rhs = ops.convert_to_tensor(rhs, name="rhs")
self._check_input_dtype(rhs)
op_dimension = (self.domain_dimension if adjoint
else self.range_dimension)
op_dimension.assert_is_compatible_with(rhs.shape[-1])
rhs_mat = array_ops.expand_dims(rhs, axis=-1)
solution_mat = self.solve(rhs_mat, adjoint=adjoint)
return array_ops.squeeze(solution_mat, axis=-1)
def _diag_part(self):
diag_list = []
for op in self._diagonal_operators:
# Extend the axis, since `broadcast_matrix_batch_dims` treats all but the
# final two dimensions as batch dimensions.
diag_list.append(op.diag_part()[..., array_ops.newaxis])
diag_list = linear_operator_util.broadcast_matrix_batch_dims(diag_list)
diagonal = array_ops.concat(diag_list, axis=-2)
return array_ops.squeeze(diagonal, axis=-1)
def _trace(self):
result = self._diagonal_operators[0].trace()
for op in self._diagonal_operators[1:]:
result += op.trace()
return result
def _to_dense(self):
num_cols = 0
dense_rows = []
flat_broadcast_operators = linear_operator_util.broadcast_matrix_batch_dims(
[op.to_dense() for row in self.operators for op in row]) # pylint: disable=g-complex-comprehension
broadcast_operators = [
flat_broadcast_operators[i * (i + 1) // 2:(i + 1) * (i + 2) // 2]
for i in range(len(self.operators))]
for row_blocks in broadcast_operators:
batch_row_shape = array_ops.shape(row_blocks[0])[:-1]
num_cols += array_ops.shape(row_blocks[-1])[-1]
zeros_to_pad_after_shape = array_ops.concat(
[batch_row_shape,
[self.domain_dimension_tensor() - num_cols]], axis=-1)
zeros_to_pad_after = array_ops.zeros(
shape=zeros_to_pad_after_shape, dtype=self.dtype)
row_blocks.append(zeros_to_pad_after)
dense_rows.append(array_ops.concat(row_blocks, axis=-1))
mat = array_ops.concat(dense_rows, axis=-2)
mat.set_shape(self.shape)
return mat
def _assert_non_singular(self):
return control_flow_ops.group([
op.assert_non_singular() for op in self._diagonal_operators])
def _eigvals(self):
eig_list = []
for op in self._diagonal_operators:
# Extend the axis for broadcasting.
eig_list.append(op.eigvals()[..., array_ops.newaxis])
eig_list = linear_operator_util.broadcast_matrix_batch_dims(eig_list)
eigs = array_ops.concat(eig_list, axis=-2)
return array_ops.squeeze(eigs, axis=-1)
| |
import os
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard import exceptions
DEBUG = False
TEMPLATE_DEBUG = DEBUG
COMPRESS_OFFLINE = True
ALLOWED_HOSTS = ['*']
# Set SSL proxy settings:
# For Django 1.4+ pass this header from the proxy after terminating the SSL,
# and don't forget to strip it from the client's request.
# For more information see:
# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
# If Horizon is being served through SSL, then uncomment the following two
# settings to better secure the cookies from security exploits
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
# Overrides for OpenStack API versions. Use this setting to force the
# OpenStack dashboard to use a specfic API version for a given service API.
# NOTE: The version should be formatted as it appears in the URL for the
# service API. For example, The identity service APIs have inconsistent
# use of the decimal point, so valid options would be "2.0" or "3".
OPENSTACK_API_VERSIONS = {
{% if horizon.keystone_api_version == 3 -%}
"identity": 3,
{% else %}
# "identity": 3,
{% endif %}
"volume": 2
}
# Set this to True if running on multi-domain model. When this is enabled, it
# will require user to enter the Domain name in addition to username for login.
# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
# Overrides the default domain used when running on single-domain model
# with Keystone V3. All entities will be created in the default domain.
# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
# Default OpenStack Dashboard configuration.
HORIZON_CONFIG = {
'dashboards': ('project', 'admin', 'settings',),
'default_dashboard': 'project',
'user_home': 'openstack_dashboard.views.get_user_home',
'ajax_queue_limit': 10,
'auto_fade_alerts': {
'delay': 3000,
'fade_duration': 1500,
'types': ['alert-success', 'alert-info']
},
'help_url': "http://docs.openstack.org",
'exceptions': {'recoverable': exceptions.RECOVERABLE,
'not_found': exceptions.NOT_FOUND,
'unauthorized': exceptions.UNAUTHORIZED},
}
# Specify a regular expression to validate user passwords.
# HORIZON_CONFIG["password_validator"] = {
# "regex": '.*',
# "help_text": _("Your password does not meet the requirements.")
# }
# Disable simplified floating IP address management for deployments with
# multiple floating IP pools or complex network requirements.
# HORIZON_CONFIG["simple_ip_management"] = False
# Turn off browser autocompletion for the login form if so desired.
# HORIZON_CONFIG["password_autocomplete"] = "off"
LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
# Set custom secret key:
# You can either set it to a specific value or you can let horizion generate a
# default secret key that is unique on this machine, e.i. regardless of the
# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there
# may be situations where you would want to set this explicitly, e.g. when
# multiple dashboard instances are distributed on different machines (usually
# behind a load-balancer). Either you have to make sure that a session gets all
# requests routed to the same dashboard instance or you set the same SECRET_KEY
# for all of them.
from horizon.utils import secret_key
SECRET_KEY = "{{ secrets.horizon_secret_key }}"
{% macro memcached_hosts() -%}
{% for host in groups['controller'] -%}
{% if loop.last -%}
'{{ hostvars[host][primary_interface]['ipv4']['address'] }}:{{ memcached.port }}'
{%- else -%}
'{{ hostvars[host][primary_interface]['ipv4']['address'] }}:{{ memcached.port }}',
{%- endif -%}
{% endfor -%}
{% endmacro -%}
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
'default': {
'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION' : [
{{ memcached_hosts() }}
]
}
}
# Send email to the console by default
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Or send them to /dev/null
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
# Configure these for your outgoing email host
# EMAIL_HOST = 'smtp.my-company.com'
# EMAIL_PORT = 25
# EMAIL_HOST_USER = 'djangomail'
# EMAIL_HOST_PASSWORD = 'top-secret!'
# For multiple regions uncomment this configuration, and add (endpoint, title).
# AVAILABLE_REGIONS = [
# ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
# ]
{% if horizon.keystone_api_version == 3 -%}
AVAILABLE_REGIONS = [
('https://{{ endpoints.main }}:5001/v3', 'RegionOne'),
]
{% endif -%}
OPENSTACK_HOST = "{{ endpoints.main }}"
OPENSTACK_KEYSTONE_URL = "https://%s:5001/v{{horizon.keystone_api_version}}" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "service"
SESSION_TIMEOUT = {{ horizon.session_timeout }}
# Disable SSL certificate checks (useful for self-signed certificates):
OPENSTACK_SSL_NO_VERIFY = True
# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
# capabilities of the auth backend for Keystone.
# If Keystone has been configured to use LDAP as the auth backend then set
# can_edit_user to False and name to 'ldap'.
#
# TODO(tres): Remove these once Keystone has an API to identify auth backend.
OPENSTACK_KEYSTONE_BACKEND = {
'name': 'native',
'can_edit_user': True,
'can_edit_project': True,
'can_edit_domain': True
}
OPENSTACK_HYPERVISOR_FEATURES = {
'can_set_mount_point': False,
}
# The OPENSTACK_QUANTUM_NETWORK settings can be used to enable optional
# services provided by quantum. Currently only the load balancer service
# is available.
OPENSTACK_NEUTRON_NETWORK = {
'enable_lb': False,
'enable_quotas': True
}
# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
# in the Keystone service catalog. Use this setting when Horizon is running
# external to the OpenStack environment. The default is 'internalURL'.
#OPENSTACK_ENDPOINT_TYPE = "publicURL"
# The number of objects (Swift containers/objects or images) to display
# on a single page before providing a paging element (a "more" link)
# to paginate results.
API_RESULT_LIMIT = 1000
API_RESULT_PAGE_SIZE = 20
# The timezone of the server. This should correspond with the timezone
# of your entire OpenStack installation, and hopefully be in UTC.
TIME_ZONE = "UTC"
LOGGING = {
'version': 1,
# When set to True this will disable all logging except
# for loggers specified in this configuration dictionary. Note that
# if nothing is specified here and disable_existing_loggers is True,
# django.db.backends will still log unless it is disabled explicitly.
'disable_existing_loggers': False,
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
# Set the level to "DEBUG" for verbose output logging.
'level': 'INFO',
'class': 'logging.StreamHandler',
},
},
'loggers': {
# Logging from django.db.backends is VERY verbose, send to null
# by default.
'django.db.backends': {
'handlers': ['null'],
'propagate': False,
},
'requests': {
'handlers': ['null'],
'propagate': False,
},
'horizon': {
'handlers': ['console'],
'propagate': False,
},
'openstack_dashboard': {
'handlers': ['console'],
'propagate': False,
},
'novaclient': {
'handlers': ['console'],
'propagate': False,
},
'cinderclient': {
'handlers': ['console'],
'propagate': False,
},
'keystoneclient': {
'handlers': ['console'],
'propagate': False,
},
'glanceclient': {
'handlers': ['console'],
'propagate': False,
},
'nose.plugins.manager': {
'handlers': ['console'],
'propagate': False,
}
}
}
from openstack_dashboard.settings import STATICFILES_DIRS, STATIC_ROOT
{% if openstack_install_method == 'package' %}
STATIC_ROOT='/opt/bbc/openstack-{{ openstack_package_version }}/horizon/static'
{% else %}
STATIC_ROOT='/opt/stack/horizon/static'
{% endif %}
STATICFILES_DIRS.append(('/etc/openstack-dashboard/static'))
| |
"""Test OpenZWave Websocket API."""
from openzwavemqtt.const import (
ATTR_CODE_SLOT,
ATTR_LABEL,
ATTR_OPTIONS,
ATTR_POSITION,
ATTR_VALUE,
ValueType,
)
from homeassistant.components.ozw.const import ATTR_CONFIG_PARAMETER
from homeassistant.components.ozw.lock import ATTR_USERCODE
from homeassistant.components.ozw.websocket_api import (
ATTR_IS_AWAKE,
ATTR_IS_BEAMING,
ATTR_IS_FAILED,
ATTR_IS_FLIRS,
ATTR_IS_ROUTING,
ATTR_IS_SECURITYV1,
ATTR_IS_ZWAVE_PLUS,
ATTR_NEIGHBORS,
ATTR_NODE_BASIC_STRING,
ATTR_NODE_BAUD_RATE,
ATTR_NODE_GENERIC_STRING,
ATTR_NODE_QUERY_STAGE,
ATTR_NODE_SPECIFIC_STRING,
ID,
NODE_ID,
OZW_INSTANCE,
PARAMETER,
TYPE,
VALUE,
)
from homeassistant.components.websocket_api.const import (
ERR_INVALID_FORMAT,
ERR_NOT_FOUND,
ERR_NOT_SUPPORTED,
)
from .common import MQTTMessage, setup_ozw
from tests.async_mock import patch
async def test_websocket_api(hass, generic_data, hass_ws_client):
"""Test the ozw websocket api."""
await setup_ozw(hass, fixture=generic_data)
client = await hass_ws_client(hass)
# Test instance list
await client.send_json({ID: 4, TYPE: "ozw/get_instances"})
msg = await client.receive_json()
assert len(msg["result"]) == 1
result = msg["result"][0]
assert result[OZW_INSTANCE] == 1
assert result["Status"] == "driverAllNodesQueried"
assert result["OpenZWave_Version"] == "1.6.1008"
# Test network status
await client.send_json({ID: 5, TYPE: "ozw/network_status"})
msg = await client.receive_json()
result = msg["result"]
assert result["Status"] == "driverAllNodesQueried"
assert result[OZW_INSTANCE] == 1
# Test node status
await client.send_json({ID: 6, TYPE: "ozw/node_status", NODE_ID: 32})
msg = await client.receive_json()
result = msg["result"]
assert result[OZW_INSTANCE] == 1
assert result[NODE_ID] == 32
assert result[ATTR_NODE_QUERY_STAGE] == "Complete"
assert result[ATTR_IS_ZWAVE_PLUS]
assert result[ATTR_IS_AWAKE]
assert not result[ATTR_IS_FAILED]
assert result[ATTR_NODE_BAUD_RATE] == 100000
assert result[ATTR_IS_BEAMING]
assert not result[ATTR_IS_FLIRS]
assert result[ATTR_IS_ROUTING]
assert not result[ATTR_IS_SECURITYV1]
assert result[ATTR_NODE_BASIC_STRING] == "Routing Slave"
assert result[ATTR_NODE_GENERIC_STRING] == "Binary Switch"
assert result[ATTR_NODE_SPECIFIC_STRING] == "Binary Power Switch"
assert result[ATTR_NEIGHBORS] == [1, 33, 36, 37, 39]
await client.send_json({ID: 7, TYPE: "ozw/node_status", NODE_ID: 999})
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test node statistics
await client.send_json({ID: 8, TYPE: "ozw/node_statistics", NODE_ID: 39})
msg = await client.receive_json()
result = msg["result"]
assert result[OZW_INSTANCE] == 1
assert result[NODE_ID] == 39
assert result["send_count"] == 57
assert result["sent_failed"] == 0
assert result["retries"] == 1
assert result["last_request_rtt"] == 26
assert result["last_response_rtt"] == 38
assert result["average_request_rtt"] == 29
assert result["average_response_rtt"] == 37
assert result["received_packets"] == 3594
assert result["received_dup_packets"] == 12
assert result["received_unsolicited"] == 3546
# Test node metadata
await client.send_json({ID: 9, TYPE: "ozw/node_metadata", NODE_ID: 39})
msg = await client.receive_json()
result = msg["result"]
assert result["metadata"]["ProductPic"] == "images/aeotec/zwa002.png"
await client.send_json({ID: 10, TYPE: "ozw/node_metadata", NODE_ID: 999})
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test network statistics
await client.send_json({ID: 11, TYPE: "ozw/network_statistics"})
msg = await client.receive_json()
result = msg["result"]
assert result["readCnt"] == 92220
assert result[OZW_INSTANCE] == 1
assert result["node_count"] == 5
# Test get nodes
await client.send_json({ID: 12, TYPE: "ozw/get_nodes"})
msg = await client.receive_json()
result = msg["result"]
assert len(result) == 5
assert result[2][ATTR_IS_AWAKE]
assert not result[1][ATTR_IS_FAILED]
# Test get config parameters
await client.send_json({ID: 13, TYPE: "ozw/get_config_parameters", NODE_ID: 39})
msg = await client.receive_json()
result = msg["result"]
assert len(result) == 8
for config_param in result:
assert config_param["type"] in (
ValueType.LIST.value,
ValueType.BOOL.value,
ValueType.INT.value,
ValueType.BYTE.value,
ValueType.SHORT.value,
ValueType.BITSET.value,
)
# Test set config parameter
config_param = result[0]
current_val = config_param[ATTR_VALUE]
new_val = next(
option["Value"]
for option in config_param[ATTR_OPTIONS]
if option["Label"] != current_val
)
new_label = next(
option["Label"]
for option in config_param[ATTR_OPTIONS]
if option["Label"] != current_val and option["Value"] != new_val
)
await client.send_json(
{
ID: 14,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: config_param[ATTR_CONFIG_PARAMETER],
VALUE: new_val,
}
)
msg = await client.receive_json()
assert msg["success"]
await client.send_json(
{
ID: 15,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: config_param[ATTR_CONFIG_PARAMETER],
VALUE: new_label,
}
)
msg = await client.receive_json()
assert msg["success"]
# Test OZW Instance not found error
await client.send_json(
{ID: 16, TYPE: "ozw/get_config_parameters", OZW_INSTANCE: 999, NODE_ID: 1}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test OZW Node not found error
await client.send_json(
{
ID: 18,
TYPE: "ozw/set_config_parameter",
NODE_ID: 999,
PARAMETER: 0,
VALUE: "test",
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test parameter not found
await client.send_json(
{
ID: 19,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: 45,
VALUE: "test",
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test list value not found
await client.send_json(
{
ID: 20,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: config_param[ATTR_CONFIG_PARAMETER],
VALUE: "test",
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
# Test value type invalid
await client.send_json(
{
ID: 21,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: 3,
VALUE: 0,
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_SUPPORTED
# Test invalid bitset format
await client.send_json(
{
ID: 22,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: 3,
VALUE: {ATTR_POSITION: 1, ATTR_VALUE: True, ATTR_LABEL: "test"},
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_INVALID_FORMAT
# Test valid bitset format passes validation
await client.send_json(
{
ID: 23,
TYPE: "ozw/set_config_parameter",
NODE_ID: 39,
PARAMETER: 10000,
VALUE: {ATTR_POSITION: 1, ATTR_VALUE: True},
}
)
msg = await client.receive_json()
result = msg["error"]
assert result["code"] == ERR_NOT_FOUND
async def test_ws_locks(hass, lock_data, hass_ws_client):
"""Test lock websocket apis."""
await setup_ozw(hass, fixture=lock_data)
client = await hass_ws_client(hass)
await client.send_json(
{
ID: 1,
TYPE: "ozw/get_code_slots",
NODE_ID: 10,
}
)
msg = await client.receive_json()
assert msg["success"]
await client.send_json(
{
ID: 2,
TYPE: "ozw/set_usercode",
NODE_ID: 10,
ATTR_CODE_SLOT: 1,
ATTR_USERCODE: "1234",
}
)
msg = await client.receive_json()
assert msg["success"]
await client.send_json(
{
ID: 3,
TYPE: "ozw/clear_usercode",
NODE_ID: 10,
ATTR_CODE_SLOT: 1,
}
)
msg = await client.receive_json()
assert msg["success"]
async def test_refresh_node(hass, generic_data, sent_messages, hass_ws_client):
"""Test the ozw refresh node api."""
receive_message = await setup_ozw(hass, fixture=generic_data)
client = await hass_ws_client(hass)
# Send the refresh_node_info command
await client.send_json({ID: 9, TYPE: "ozw/refresh_node_info", NODE_ID: 39})
msg = await client.receive_json()
assert len(sent_messages) == 1
assert msg["success"]
# Receive a mock status update from OZW
message = MQTTMessage(
topic="OpenZWave/1/node/39/",
payload={"NodeID": 39, "NodeQueryStage": "initializing"},
)
message.encode()
receive_message(message)
# Verify we got expected data on the websocket
msg = await client.receive_json()
result = msg["event"]
assert result["type"] == "node_updated"
assert result["node_query_stage"] == "initializing"
# Send another mock status update from OZW
message = MQTTMessage(
topic="OpenZWave/1/node/39/",
payload={"NodeID": 39, "NodeQueryStage": "versions"},
)
message.encode()
receive_message(message)
# Send a mock status update for a different node
message = MQTTMessage(
topic="OpenZWave/1/node/35/",
payload={"NodeID": 35, "NodeQueryStage": "fake_shouldnt_be_received"},
)
message.encode()
receive_message(message)
# Verify we received the message for node 39 but not for node 35
msg = await client.receive_json()
result = msg["event"]
assert result["type"] == "node_updated"
assert result["node_query_stage"] == "versions"
async def test_refresh_node_unsubscribe(hass, generic_data, hass_ws_client):
"""Test unsubscribing the ozw refresh node api."""
await setup_ozw(hass, fixture=generic_data)
client = await hass_ws_client(hass)
with patch("openzwavemqtt.OZWOptions.listen") as mock_listen:
# Send the refresh_node_info command
await client.send_json({ID: 9, TYPE: "ozw/refresh_node_info", NODE_ID: 39})
await client.receive_json()
# Send the unsubscribe command
await client.send_json({ID: 10, TYPE: "unsubscribe_events", "subscription": 9})
await client.receive_json()
assert mock_listen.return_value.called
| |
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from androguard.core import bytecode
from androguard.core import androconf
from androguard.core.bytecodes.dvm_permissions import DVM_PERMISSIONS
from androguard.util import read
import StringIO
from struct import pack, unpack
from xml.sax.saxutils import escape
from zlib import crc32
import re
from xml.dom import minidom
NS_ANDROID_URI = 'http://schemas.android.com/apk/res/android'
# 0: chilkat
# 1: default python zipfile module
# 2: patch zipfile module
ZIPMODULE = 1
import sys
if sys.hexversion < 0x2070000:
try:
import chilkat
ZIPMODULE = 0
# UNLOCK : change it with your valid key !
try:
CHILKAT_KEY = read("key.txt")
except Exception:
CHILKAT_KEY = "testme"
except ImportError:
ZIPMODULE = 1
else:
ZIPMODULE = 1
################################################### CHILKAT ZIP FORMAT #####################################################
class ChilkatZip(object):
def __init__(self, raw):
self.files = []
self.zip = chilkat.CkZip()
self.zip.UnlockComponent( CHILKAT_KEY )
self.zip.OpenFromMemory( raw, len(raw) )
filename = chilkat.CkString()
e = self.zip.FirstEntry()
while e != None:
e.get_FileName(filename)
self.files.append( filename.getString() )
e = e.NextEntry()
def delete(self, patterns):
el = []
filename = chilkat.CkString()
e = self.zip.FirstEntry()
while e != None:
e.get_FileName(filename)
if re.match(patterns, filename.getString()) != None:
el.append( e )
e = e.NextEntry()
for i in el:
self.zip.DeleteEntry( i )
def remplace_file(self, filename, buff):
entry = self.zip.GetEntryByName(filename)
if entry != None:
obj = chilkat.CkByteData()
obj.append2( buff, len(buff) )
return entry.ReplaceData( obj )
return False
def write(self):
obj = chilkat.CkByteData()
self.zip.WriteToMemory( obj )
return obj.getBytes()
def namelist(self):
return self.files
def read(self, elem):
e = self.zip.GetEntryByName( elem )
s = chilkat.CkByteData()
e.Inflate( s )
return s.getBytes()
def sign_apk(filename, keystore, storepass):
from subprocess import Popen, PIPE, STDOUT
compile = Popen([androconf.CONF["PATH_JARSIGNER"],
"-sigalg",
"MD5withRSA",
"-digestalg",
"SHA1",
"-storepass",
storepass,
"-keystore",
keystore,
filename,
"alias_name"],
stdout=PIPE, stderr=STDOUT)
stdout, stderr = compile.communicate()
######################################################## APK FORMAT ########################################################
class APK(object):
"""
This class can access to all elements in an APK file
:param filename: specify the path of the file, or raw data
:param raw: specify if the filename is a path or raw data (optional)
:param mode: specify the mode to open the file (optional)
:param magic_file: specify the magic file (optional)
:param zipmodule: specify the type of zip module to use (0:chilkat, 1:zipfile, 2:patch zipfile)
:type filename: string
:type raw: boolean
:type mode: string
:type magic_file: string
:type zipmodule: int
:Example:
APK("myfile.apk")
APK(read("myfile.apk"), raw=True)
"""
def __init__(self, filename, raw=False, mode="r", magic_file=None, zipmodule=ZIPMODULE):
self.filename = filename
self.xml = {}
self.axml = {}
self.arsc = {}
self.package = ""
self.androidversion = {}
self.permissions = []
self.valid_apk = False
self.files = {}
self.files_crc32 = {}
self.magic_file = magic_file
if raw == True:
self.__raw = filename
else:
self.__raw = read(filename)
self.zipmodule = zipmodule
if zipmodule == 0:
self.zip = ChilkatZip(self.__raw)
elif zipmodule == 2:
from androguard.patch import zipfile
self.zip = zipfile.ZipFile(StringIO.StringIO(self.__raw), mode=mode)
else:
import zipfile
self.zip = zipfile.ZipFile(StringIO.StringIO(self.__raw), mode=mode)
for i in self.zip.namelist():
if i == "AndroidManifest.xml":
self.axml[i] = AXMLPrinter(self.zip.read(i))
try:
self.xml[i] = minidom.parseString(self.axml[i].get_buff())
except:
self.xml[i] = None
if self.xml[i] != None:
self.package = self.xml[i].documentElement.getAttribute("package")
self.androidversion["Code"] = self.xml[i].documentElement.getAttributeNS(NS_ANDROID_URI, "versionCode")
self.androidversion["Name"] = self.xml[i].documentElement.getAttributeNS(NS_ANDROID_URI, "versionName")
for item in self.xml[i].getElementsByTagName('uses-permission'):
self.permissions.append(str(item.getAttributeNS(NS_ANDROID_URI, "name")))
self.valid_apk = True
self.get_files_types()
def get_AndroidManifest(self):
"""
Return the Android Manifest XML file
:rtype: xml object
"""
return self.xml["AndroidManifest.xml"]
def is_valid_APK(self):
"""
Return true if the APK is valid, false otherwise
:rtype: boolean
"""
return self.valid_apk
def get_filename(self):
"""
Return the filename of the APK
:rtype: string
"""
return self.filename
def get_package(self):
"""
Return the name of the package
:rtype: string
"""
return self.package
def get_androidversion_code(self):
"""
Return the android version code
:rtype: string
"""
return self.androidversion["Code"]
def get_androidversion_name(self):
"""
Return the android version name
:rtype: string
"""
return self.androidversion["Name"]
def get_files(self):
"""
Return the files inside the APK
:rtype: a list of strings
"""
return self.zip.namelist()
def get_files_types(self):
"""
Return the files inside the APK with their associated types (by using python-magic)
:rtype: a dictionnary
"""
try:
import magic
except ImportError:
# no lib magic !
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
self.files[i] = "Unknown"
return self.files
if self.files != {}:
return self.files
builtin_magic = 0
try:
getattr(magic, "MagicException")
except AttributeError:
builtin_magic = 1
if builtin_magic:
ms = magic.open(magic.MAGIC_NONE)
ms.load()
for i in self.get_files():
buffer = self.zip.read(i)
self.files[i] = ms.buffer(buffer)
self.files[i] = self._patch_magic(buffer, self.files[i])
self.files_crc32[i] = crc32(buffer)
else:
m = magic.Magic(magic_file=self.magic_file)
for i in self.get_files():
buffer = self.zip.read(i)
self.files[i] = m.from_buffer(buffer)
self.files[i] = self._patch_magic(buffer, self.files[i])
self.files_crc32[i] = crc32(buffer)
return self.files
def _patch_magic(self, buffer, orig):
if ("Zip" in orig) or ("DBase" in orig):
val = androconf.is_android_raw(buffer)
if val == "APK":
if androconf.is_valid_android_raw(buffer):
return "Android application package file"
elif val == "AXML":
return "Android's binary XML"
return orig
def get_files_crc32(self):
if self.files_crc32 == {}:
self.get_files_types()
return self.files_crc32
def get_files_information(self):
"""
Return the files inside the APK with their associated types and crc32
:rtype: string, string, int
"""
if self.files == {}:
self.get_files_types()
for i in self.get_files():
try:
yield i, self.files[i], self.files_crc32[i]
except KeyError:
yield i, "", ""
def get_raw(self):
"""
Return raw bytes of the APK
:rtype: string
"""
return self.__raw
def get_file(self, filename):
"""
Return the raw data of the specified filename
:rtype: string
"""
try:
return self.zip.read(filename)
except KeyError:
return ""
def get_dex(self):
"""
Return the raw data of the classes dex file
:rtype: string
"""
return self.get_file("classes.dex")
def get_elements(self, tag_name, attribute):
"""
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute
"""
l = []
for i in self.xml:
for item in self.xml[i].getElementsByTagName(tag_name):
value = item.getAttributeNS(NS_ANDROID_URI, attribute)
value = self.format_value( value )
l.append( str( value ) )
return l
def format_value(self, value):
if len(value) > 0:
if value[0] == ".":
value = self.package + value
else:
v_dot = value.find(".")
if v_dot == 0:
value = self.package + "." + value
elif v_dot == -1:
value = self.package + "." + value
return value
def get_element(self, tag_name, attribute):
"""
Return element in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:rtype: string
"""
for i in self.xml:
for item in self.xml[i].getElementsByTagName(tag_name):
value = item.getAttributeNS(NS_ANDROID_URI, attribute)
if len(value) > 0:
return value
return None
def get_main_activity(self):
"""
Return the name of the main activity
:rtype: string
"""
x = set()
y = set()
for i in self.xml:
for item in self.xml[i].getElementsByTagName("activity"):
for sitem in item.getElementsByTagName( "action" ):
val = sitem.getAttributeNS(NS_ANDROID_URI, "name" )
if val == "android.intent.action.MAIN":
x.add( item.getAttributeNS(NS_ANDROID_URI, "name" ) )
for sitem in item.getElementsByTagName( "category" ):
val = sitem.getAttributeNS(NS_ANDROID_URI, "name" )
if val == "android.intent.category.LAUNCHER":
y.add( item.getAttributeNS(NS_ANDROID_URI, "name" ) )
z = x.intersection(y)
if len(z) > 0:
return self.format_value(z.pop())
return None
def get_activities(self):
"""
Return the android:name attribute of all activities
:rtype: a list of string
"""
return self.get_elements("activity", "name")
def get_services(self):
"""
Return the android:name attribute of all services
:rtype: a list of string
"""
return self.get_elements("service", "name")
def get_receivers(self):
"""
Return the android:name attribute of all receivers
:rtype: a list of string
"""
return self.get_elements("receiver", "name")
def get_providers(self):
"""
Return the android:name attribute of all providers
:rtype: a list of string
"""
return self.get_elements("provider", "name")
def get_intent_filters(self, category, name):
d = {}
d["action"] = []
d["category"] = []
for i in self.xml:
for item in self.xml[i].getElementsByTagName(category):
if self.format_value(item.getAttributeNS(NS_ANDROID_URI, "name")) == name:
for sitem in item.getElementsByTagName("intent-filter"):
for ssitem in sitem.getElementsByTagName("action"):
if ssitem.getAttributeNS(NS_ANDROID_URI, "name") not in d["action"]:
d["action"].append(ssitem.getAttributeNS(NS_ANDROID_URI, "name"))
for ssitem in sitem.getElementsByTagName("category"):
if ssitem.getAttributeNS(NS_ANDROID_URI, "name") not in d["category"]:
d["category"].append(ssitem.getAttributeNS(NS_ANDROID_URI, "name"))
if not d["action"]:
del d["action"]
if not d["category"]:
del d["category"]
return d
def get_permissions(self):
"""
Return permissions
:rtype: list of string
"""
return self.permissions
def get_details_permissions(self):
"""
Return permissions with details
:rtype: list of string
"""
l = {}
for i in self.permissions:
perm = i
pos = i.rfind(".")
if pos != -1:
perm = i[pos+1:]
try:
l[ i ] = DVM_PERMISSIONS["MANIFEST_PERMISSION"][ perm ]
except KeyError:
l[ i ] = [ "normal", "Unknown permission from android reference", "Unknown permission from android reference" ]
return l
def get_max_sdk_version(self):
"""
Return the android:maxSdkVersion attribute
:rtype: string
"""
return self.get_element("uses-sdk", "maxSdkVersion")
def get_min_sdk_version(self):
"""
Return the android:minSdkVersion attribute
:rtype: string
"""
return self.get_element("uses-sdk", "minSdkVersion")
def get_target_sdk_version(self):
"""
Return the android:targetSdkVersion attribute
:rtype: string
"""
return self.get_element( "uses-sdk", "targetSdkVersion" )
def get_libraries(self):
"""
Return the android:name attributes for libraries
:rtype: list
"""
return self.get_elements( "uses-library", "name" )
def get_certificate(self, filename):
"""
Return a certificate object by giving the name in the apk file
"""
import chilkat
cert = chilkat.CkCert()
f = self.get_file(filename)
success = cert.LoadFromBinary2(f, len(f))
return success, cert
def new_zip(self, filename, deleted_files=None, new_files={}):
"""
Create a new zip file
:param filename: the output filename of the zip
:param deleted_files: a regex pattern to remove specific file
:param new_files: a dictionnary of new files
:type filename: string
:type deleted_files: None or a string
:type new_files: a dictionnary (key:filename, value:content of the file)
"""
if self.zipmodule == 2:
from androguard.patch import zipfile
zout = zipfile.ZipFile(filename, 'w')
else:
import zipfile
zout = zipfile.ZipFile(filename, 'w')
for item in self.zip.infolist():
if deleted_files != None:
if re.match(deleted_files, item.filename) == None:
if item.filename in new_files:
zout.writestr(item, new_files[item.filename])
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
zout.close()
def get_android_manifest_axml(self):
"""
Return the :class:`AXMLPrinter` object which corresponds to the AndroidManifest.xml file
:rtype: :class:`AXMLPrinter`
"""
try:
return self.axml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_manifest_xml(self):
"""
Return the xml object which corresponds to the AndroidManifest.xml file
:rtype: object
"""
try:
return self.xml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_resources(self):
"""
Return the :class:`ARSCParser` object which corresponds to the resources.arsc file
:rtype: :class:`ARSCParser`
"""
try:
return self.arsc["resources.arsc"]
except KeyError:
try:
self.arsc["resources.arsc"] = ARSCParser(self.zip.read("resources.arsc"))
return self.arsc["resources.arsc"]
except KeyError:
return None
def get_signature_name(self):
signature_expr = re.compile("^(META-INF/)(.*)(\.RSA)$")
for i in self.get_files():
if signature_expr.search(i):
return i
return None
def get_signature(self):
signature_expr = re.compile("^(META-INF/)(.*)(\.RSA)$")
for i in self.get_files():
if signature_expr.search(i):
return self.get_file(i)
return None
def show(self):
self.get_files_types()
print "FILES: "
for i in self.get_files():
try:
print "\t", i, self.files[i], "%x" % self.files_crc32[i]
except KeyError:
print "\t", i, "%x" % self.files_crc32[i]
print "PERMISSIONS: "
details_permissions = self.get_details_permissions()
for i in details_permissions:
print "\t", i, details_permissions[i]
print "MAIN ACTIVITY: ", self.get_main_activity()
print "ACTIVITIES: "
activities = self.get_activities()
for i in activities:
filters = self.get_intent_filters("activity", i)
print "\t", i, filters or ""
print "SERVICES: "
services = self.get_services()
for i in services:
filters = self.get_intent_filters("service", i)
print "\t", i, filters or ""
print "RECEIVERS: "
receivers = self.get_receivers()
for i in receivers:
filters = self.get_intent_filters("receiver", i)
print "\t", i, filters or ""
print "PROVIDERS: ", self.get_providers()
def show_Certificate(cert):
print "Issuer: C=%s, CN=%s, DN=%s, E=%s, L=%s, O=%s, OU=%s, S=%s" % (cert.issuerC(), cert.issuerCN(), cert.issuerDN(), cert.issuerE(), cert.issuerL(), cert.issuerO(), cert.issuerOU(), cert.issuerS())
print "Subject: C=%s, CN=%s, DN=%s, E=%s, L=%s, O=%s, OU=%s, S=%s" % (cert.subjectC(), cert.subjectCN(), cert.subjectDN(), cert.subjectE(), cert.subjectL(), cert.subjectO(), cert.subjectOU(), cert.subjectS())
######################################################## AXML FORMAT ########################################################
# Translated from http://code.google.com/p/android4me/source/browse/src/android/content/res/AXmlResourceParser.java
UTF8_FLAG = 0x00000100
class StringBlock(object):
def __init__(self, buff):
self.start = buff.get_idx()
self._cache = {}
self.header = unpack('<h', buff.read(2))[0]
self.header_size = unpack('<h', buff.read(2))[0]
self.chunkSize = unpack('<i', buff.read(4))[0]
self.stringCount = unpack('<i', buff.read(4))[0]
self.styleOffsetCount = unpack('<i', buff.read(4))[0]
self.flags = unpack('<i', buff.read(4))[0]
self.m_isUTF8 = ((self.flags & UTF8_FLAG) != 0)
self.stringsOffset = unpack('<i', buff.read(4))[0]
self.stylesOffset = unpack('<i', buff.read(4))[0]
self.m_stringOffsets = []
self.m_styleOffsets = []
self.m_strings = []
self.m_styles = []
for i in range(0, self.stringCount):
self.m_stringOffsets.append(unpack('<i', buff.read(4))[0])
for i in range(0, self.styleOffsetCount):
self.m_styleOffsets.append(unpack('<i', buff.read(4))[0])
size = self.chunkSize - self.stringsOffset
if self.stylesOffset != 0:
size = self.stylesOffset - self.stringsOffset
# FIXME
if (size % 4) != 0:
androconf.warning("ooo")
for i in range(0, size):
self.m_strings.append(unpack('=b', buff.read(1))[0])
if self.stylesOffset != 0:
size = self.chunkSize - self.stylesOffset
# FIXME
if (size % 4) != 0:
androconf.warning("ooo")
for i in range(0, size / 4):
self.m_styles.append(unpack('<i', buff.read(4))[0])
def getString(self, idx):
if idx in self._cache:
return self._cache[idx]
if idx < 0 or not self.m_stringOffsets or idx >= len(self.m_stringOffsets):
return ""
offset = self.m_stringOffsets[idx]
if not self.m_isUTF8:
length = self.getShort2(self.m_strings, offset)
offset += 2
self._cache[idx] = self.decode(self.m_strings, offset, length)
else:
offset += self.getVarint(self.m_strings, offset)[1]
varint = self.getVarint(self.m_strings, offset)
offset += varint[1]
length = varint[0]
self._cache[idx] = self.decode2(self.m_strings, offset, length)
return self._cache[idx]
def getStyle(self, idx):
print idx
print idx in self.m_styleOffsets, self.m_styleOffsets[idx]
print self.m_styles[0]
def decode(self, array, offset, length):
length = length * 2
length = length + length % 2
data = ""
for i in range(0, length):
t_data = pack("=b", self.m_strings[offset + i])
data += unicode(t_data, errors='ignore')
if data[-2:] == "\x00\x00":
break
end_zero = data.find("\x00\x00")
if end_zero != -1:
data = data[:end_zero]
return data.decode("utf-16", 'replace')
def decode2(self, array, offset, length):
data = ""
for i in range(0, length):
t_data = pack("=b", self.m_strings[offset + i])
data += unicode(t_data, errors='ignore')
return data.decode("utf-8", 'replace')
def getVarint(self, array, offset):
val = array[offset]
more = (val & 0x80) != 0
val &= 0x7f
if not more:
return val, 1
return val << 8 | array[offset + 1] & 0xff, 2
def getShort(self, array, offset):
value = array[offset / 4]
if ((offset % 4) / 2) == 0:
return value & 0xFFFF
else:
return value >> 16
def getShort2(self, array, offset):
return (array[offset + 1] & 0xff) << 8 | array[offset] & 0xff
def show(self):
print "StringBlock", hex(self.start), hex(self.header), hex(self.header_size), hex(self.chunkSize), hex(self.stringsOffset), self.m_stringOffsets
for i in range(0, len(self.m_stringOffsets)):
print i, repr(self.getString(i))
ATTRIBUTE_IX_NAMESPACE_URI = 0
ATTRIBUTE_IX_NAME = 1
ATTRIBUTE_IX_VALUE_STRING = 2
ATTRIBUTE_IX_VALUE_TYPE = 3
ATTRIBUTE_IX_VALUE_DATA = 4
ATTRIBUTE_LENGHT = 5
CHUNK_AXML_FILE = 0x00080003
CHUNK_RESOURCEIDS = 0x00080180
CHUNK_XML_FIRST = 0x00100100
CHUNK_XML_START_NAMESPACE = 0x00100100
CHUNK_XML_END_NAMESPACE = 0x00100101
CHUNK_XML_START_TAG = 0x00100102
CHUNK_XML_END_TAG = 0x00100103
CHUNK_XML_TEXT = 0x00100104
CHUNK_XML_LAST = 0x00100104
START_DOCUMENT = 0
END_DOCUMENT = 1
START_TAG = 2
END_TAG = 3
TEXT = 4
class AXMLParser(object):
def __init__(self, raw_buff):
self.reset()
self.valid_axml = True
self.buff = bytecode.BuffHandle(raw_buff)
axml_file = unpack('<L', self.buff.read(4))[0]
if axml_file == CHUNK_AXML_FILE:
self.buff.read(4)
self.sb = StringBlock(self.buff)
self.m_resourceIDs = []
self.m_prefixuri = {}
self.m_uriprefix = {}
self.m_prefixuriL = []
self.visited_ns = []
else:
self.valid_axml = False
androconf.warning("Not a valid xml file")
def is_valid(self):
return self.valid_axml
def reset(self):
self.m_event = -1
self.m_lineNumber = -1
self.m_name = -1
self.m_namespaceUri = -1
self.m_attributes = []
self.m_idAttribute = -1
self.m_classAttribute = -1
self.m_styleAttribute = -1
def next(self):
self.doNext()
return self.m_event
def doNext(self):
if self.m_event == END_DOCUMENT:
return
event = self.m_event
self.reset()
while True:
chunkType = -1
# Fake END_DOCUMENT event.
if event == END_TAG:
pass
# START_DOCUMENT
if event == START_DOCUMENT:
chunkType = CHUNK_XML_START_TAG
else:
if self.buff.end():
self.m_event = END_DOCUMENT
break
chunkType = unpack('<L', self.buff.read(4))[0]
if chunkType == CHUNK_RESOURCEIDS:
chunkSize = unpack('<L', self.buff.read(4))[0]
# FIXME
if chunkSize < 8 or chunkSize % 4 != 0:
androconf.warning("Invalid chunk size")
for i in range(0, chunkSize / 4 - 2):
self.m_resourceIDs.append(unpack('<L', self.buff.read(4))[0])
continue
# FIXME
if chunkType < CHUNK_XML_FIRST or chunkType > CHUNK_XML_LAST:
androconf.warning("invalid chunk type")
# Fake START_DOCUMENT event.
if chunkType == CHUNK_XML_START_TAG and event == -1:
self.m_event = START_DOCUMENT
break
self.buff.read(4) # /*chunkSize*/
lineNumber = unpack('<L', self.buff.read(4))[0]
self.buff.read(4) # 0xFFFFFFFF
if chunkType == CHUNK_XML_START_NAMESPACE or chunkType == CHUNK_XML_END_NAMESPACE:
if chunkType == CHUNK_XML_START_NAMESPACE:
prefix = unpack('<L', self.buff.read(4))[0]
uri = unpack('<L', self.buff.read(4))[0]
self.m_prefixuri[prefix] = uri
self.m_uriprefix[uri] = prefix
self.m_prefixuriL.append((prefix, uri))
self.ns = uri
else:
self.ns = -1
self.buff.read(4)
self.buff.read(4)
(prefix, uri) = self.m_prefixuriL.pop()
#del self.m_prefixuri[ prefix ]
#del self.m_uriprefix[ uri ]
continue
self.m_lineNumber = lineNumber
if chunkType == CHUNK_XML_START_TAG:
self.m_namespaceUri = unpack('<L', self.buff.read(4))[0]
self.m_name = unpack('<L', self.buff.read(4))[0]
# FIXME
self.buff.read(4) # flags
attributeCount = unpack('<L', self.buff.read(4))[0]
self.m_idAttribute = (attributeCount >> 16) - 1
attributeCount = attributeCount & 0xFFFF
self.m_classAttribute = unpack('<L', self.buff.read(4))[0]
self.m_styleAttribute = (self.m_classAttribute >> 16) - 1
self.m_classAttribute = (self.m_classAttribute & 0xFFFF) - 1
for i in range(0, attributeCount * ATTRIBUTE_LENGHT):
self.m_attributes.append(unpack('<L', self.buff.read(4))[0])
for i in range(ATTRIBUTE_IX_VALUE_TYPE, len(self.m_attributes), ATTRIBUTE_LENGHT):
self.m_attributes[i] = self.m_attributes[i] >> 24
self.m_event = START_TAG
break
if chunkType == CHUNK_XML_END_TAG:
self.m_namespaceUri = unpack('<L', self.buff.read(4))[0]
self.m_name = unpack('<L', self.buff.read(4))[0]
self.m_event = END_TAG
break
if chunkType == CHUNK_XML_TEXT:
self.m_name = unpack('<L', self.buff.read(4))[0]
# FIXME
self.buff.read(4)
self.buff.read(4)
self.m_event = TEXT
break
def getPrefixByUri(self, uri):
try:
return self.m_uriprefix[uri]
except KeyError:
return -1
def getPrefix(self):
try:
return self.sb.getString(self.m_uriprefix[self.m_namespaceUri])
except KeyError:
return u''
def getName(self):
if self.m_name == -1 or (self.m_event != START_TAG and self.m_event != END_TAG):
return u''
return self.sb.getString(self.m_name)
def getText(self):
if self.m_name == -1 or self.m_event != TEXT:
return u''
return self.sb.getString(self.m_name)
def getNamespacePrefix(self, pos):
prefix = self.m_prefixuriL[pos][0]
return self.sb.getString(prefix)
def getNamespaceUri(self, pos):
uri = self.m_prefixuriL[pos][1]
return self.sb.getString(uri)
def getXMLNS(self):
buff = ""
for i in self.m_uriprefix:
if i not in self.visited_ns:
buff += "xmlns:%s=\"%s\"\n" % (self.sb.getString(self.m_uriprefix[i]), self.sb.getString(self.m_prefixuri[self.m_uriprefix[i]]))
self.visited_ns.append(i)
return buff
def getNamespaceCount(self, pos):
pass
def getAttributeOffset(self, index):
# FIXME
if self.m_event != START_TAG:
androconf.warning("Current event is not START_TAG.")
offset = index * 5
# FIXME
if offset >= len(self.m_attributes):
androconf.warning("Invalid attribute index")
return offset
def getAttributeCount(self):
if self.m_event != START_TAG:
return -1
return len(self.m_attributes) / ATTRIBUTE_LENGHT
def getAttributePrefix(self, index):
offset = self.getAttributeOffset(index)
uri = self.m_attributes[offset + ATTRIBUTE_IX_NAMESPACE_URI]
prefix = self.getPrefixByUri(uri)
if prefix == -1:
return ""
return self.sb.getString(prefix)
def getAttributeName(self, index):
offset = self.getAttributeOffset(index)
name = self.m_attributes[offset+ATTRIBUTE_IX_NAME]
if name == -1:
return ""
return self.sb.getString( name )
def getAttributeValueType(self, index):
offset = self.getAttributeOffset(index)
return self.m_attributes[offset+ATTRIBUTE_IX_VALUE_TYPE]
def getAttributeValueData(self, index):
offset = self.getAttributeOffset(index)
return self.m_attributes[offset+ATTRIBUTE_IX_VALUE_DATA]
def getAttributeValue(self, index):
offset = self.getAttributeOffset(index)
valueType = self.m_attributes[offset+ATTRIBUTE_IX_VALUE_TYPE]
if valueType == TYPE_STRING:
valueString = self.m_attributes[offset+ATTRIBUTE_IX_VALUE_STRING]
return self.sb.getString( valueString )
# WIP
return ""
#int valueData=m_attributes[offset+ATTRIBUTE_IX_VALUE_DATA];
#return TypedValue.coerceToString(valueType,valueData);
TYPE_ATTRIBUTE = 2
TYPE_DIMENSION = 5
TYPE_FIRST_COLOR_INT = 28
TYPE_FIRST_INT = 16
TYPE_FLOAT = 4
TYPE_FRACTION = 6
TYPE_INT_BOOLEAN = 18
TYPE_INT_COLOR_ARGB4 = 30
TYPE_INT_COLOR_ARGB8 = 28
TYPE_INT_COLOR_RGB4 = 31
TYPE_INT_COLOR_RGB8 = 29
TYPE_INT_DEC = 16
TYPE_INT_HEX = 17
TYPE_LAST_COLOR_INT = 31
TYPE_LAST_INT = 31
TYPE_NULL = 0
TYPE_REFERENCE = 1
TYPE_STRING = 3
RADIX_MULTS = [ 0.00390625, 3.051758E-005, 1.192093E-007, 4.656613E-010 ]
DIMENSION_UNITS = [ "px","dip","sp","pt","in","mm" ]
FRACTION_UNITS = [ "%", "%p" ]
COMPLEX_UNIT_MASK = 15
def complexToFloat(xcomplex):
return (float)(xcomplex & 0xFFFFFF00) * RADIX_MULTS[(xcomplex >> 4) & 3]
class AXMLPrinter(object):
def __init__(self, raw_buff):
self.axml = AXMLParser(raw_buff)
self.xmlns = False
self.buff = u''
while True and self.axml.is_valid():
_type = self.axml.next()
# print "tagtype = ", _type
if _type == START_DOCUMENT:
self.buff += u'<?xml version="1.0" encoding="utf-8"?>\n'
elif _type == START_TAG:
self.buff += u'<' + self.getPrefix(self.axml.getPrefix()) + self.axml.getName() + u'\n'
self.buff += self.axml.getXMLNS()
for i in range(0, self.axml.getAttributeCount()):
self.buff += "%s%s=\"%s\"\n" % (self.getPrefix(
self.axml.getAttributePrefix(i)), self.axml.getAttributeName(i), self._escape(self.getAttributeValue(i)))
self.buff += u'>\n'
elif _type == END_TAG:
self.buff += "</%s%s>\n" % (self.getPrefix(self.axml.getPrefix()), self.axml.getName())
elif _type == TEXT:
self.buff += "%s\n" % self.axml.getText()
elif _type == END_DOCUMENT:
break
# pleed patch
def _escape(self, s):
s = s.replace("&", "&")
s = s.replace('"', """)
s = s.replace("'", "'")
s = s.replace("<", "<")
s = s.replace(">", ">")
return escape(s)
def get_buff(self):
return self.buff.encode('utf-8')
def get_xml(self):
return minidom.parseString(self.get_buff()).toprettyxml(encoding="utf-8")
def get_xml_obj(self):
return minidom.parseString(self.get_buff())
def getPrefix(self, prefix):
if prefix == None or len(prefix) == 0:
return u''
return prefix + u':'
def getAttributeValue(self, index):
_type = self.axml.getAttributeValueType(index)
_data = self.axml.getAttributeValueData(index)
if _type == TYPE_STRING:
return self.axml.getAttributeValue(index)
elif _type == TYPE_ATTRIBUTE:
return "?%s%08X" % (self.getPackage(_data), _data)
elif _type == TYPE_REFERENCE:
return "@%s%08X" % (self.getPackage(_data), _data)
elif _type == TYPE_FLOAT:
return "%f" % unpack("=f", pack("=L", _data))[0]
elif _type == TYPE_INT_HEX:
return "0x%08X" % _data
elif _type == TYPE_INT_BOOLEAN:
if _data == 0:
return "false"
return "true"
elif _type == TYPE_DIMENSION:
return "%f%s" % (complexToFloat(_data), DIMENSION_UNITS[_data & COMPLEX_UNIT_MASK])
elif _type == TYPE_FRACTION:
return "%f%s" % (complexToFloat(_data) * 100, FRACTION_UNITS[_data & COMPLEX_UNIT_MASK])
elif _type >= TYPE_FIRST_COLOR_INT and _type <= TYPE_LAST_COLOR_INT:
return "#%08X" % _data
elif _type >= TYPE_FIRST_INT and _type <= TYPE_LAST_INT:
return "%d" % androconf.long2int(_data)
return "<0x%X, type 0x%02X>" % (_data, _type)
def getPackage(self, id):
if id >> 24 == 1:
return "android:"
return ""
RES_NULL_TYPE = 0x0000
RES_STRING_POOL_TYPE = 0x0001
RES_TABLE_TYPE = 0x0002
RES_XML_TYPE = 0x0003
# Chunk types in RES_XML_TYPE
RES_XML_FIRST_CHUNK_TYPE = 0x0100
RES_XML_START_NAMESPACE_TYPE= 0x0100
RES_XML_END_NAMESPACE_TYPE = 0x0101
RES_XML_START_ELEMENT_TYPE = 0x0102
RES_XML_END_ELEMENT_TYPE = 0x0103
RES_XML_CDATA_TYPE = 0x0104
RES_XML_LAST_CHUNK_TYPE = 0x017f
# This contains a uint32_t array mapping strings in the string
# pool back to resource identifiers. It is optional.
RES_XML_RESOURCE_MAP_TYPE = 0x0180
# Chunk types in RES_TABLE_TYPE
RES_TABLE_PACKAGE_TYPE = 0x0200
RES_TABLE_TYPE_TYPE = 0x0201
RES_TABLE_TYPE_SPEC_TYPE = 0x0202
class ARSCParser(object):
def __init__(self, raw_buff):
self.analyzed = False
self.buff = bytecode.BuffHandle(raw_buff)
#print "SIZE", hex(self.buff.size())
self.header = ARSCHeader(self.buff)
self.packageCount = unpack('<i', self.buff.read(4))[0]
#print hex(self.packageCount)
self.stringpool_main = StringBlock(self.buff)
self.next_header = ARSCHeader(self.buff)
self.packages = {}
self.values = {}
for i in range(0, self.packageCount):
current_package = ARSCResTablePackage(self.buff)
package_name = current_package.get_name()
self.packages[package_name] = []
mTableStrings = StringBlock(self.buff)
mKeyStrings = StringBlock(self.buff)
#self.stringpool_main.show()
#self.mTableStrings.show()
#self.mKeyStrings.show()
self.packages[package_name].append(current_package)
self.packages[package_name].append(mTableStrings)
self.packages[package_name].append(mKeyStrings)
pc = PackageContext(current_package, self.stringpool_main, mTableStrings, mKeyStrings)
current = self.buff.get_idx()
while not self.buff.end():
header = ARSCHeader(self.buff)
self.packages[package_name].append(header)
if header.type == RES_TABLE_TYPE_SPEC_TYPE:
self.packages[package_name].append(ARSCResTypeSpec(self.buff, pc))
elif header.type == RES_TABLE_TYPE_TYPE:
a_res_type = ARSCResType(self.buff, pc)
self.packages[package_name].append(a_res_type)
entries = []
for i in range(0, a_res_type.entryCount):
current_package.mResId = current_package.mResId & 0xffff0000 | i
entries.append((unpack('<i', self.buff.read(4))[0], current_package.mResId))
self.packages[package_name].append(entries)
for entry, res_id in entries:
if self.buff.end():
break
if entry != -1:
ate = ARSCResTableEntry(self.buff, res_id, pc)
self.packages[package_name].append(ate)
elif header.type == RES_TABLE_PACKAGE_TYPE:
break
else:
androconf.warning("unknown type")
break
current += header.size
self.buff.set_idx(current)
def _analyse(self):
if self.analyzed:
return
self.analyzed = True
for package_name in self.packages:
self.values[package_name] = {}
nb = 3
for header in self.packages[package_name][nb:]:
if isinstance(header, ARSCHeader):
if header.type == RES_TABLE_TYPE_TYPE:
a_res_type = self.packages[package_name][nb + 1]
if a_res_type.config.get_language() not in self.values[package_name]:
self.values[package_name][a_res_type.config.get_language()] = {}
self.values[package_name][a_res_type.config.get_language()]["public"] = []
c_value = self.values[package_name][a_res_type.config.get_language()]
entries = self.packages[package_name][nb + 2]
nb_i = 0
for entry, res_id in entries:
if entry != -1:
ate = self.packages[package_name][nb + 3 + nb_i]
#print ate.is_public(), a_res_type.get_type(), ate.get_value(), hex(ate.mResId)
if ate.get_index() != -1:
c_value["public"].append((a_res_type.get_type(), ate.get_value(), ate.mResId))
if a_res_type.get_type() not in c_value:
c_value[a_res_type.get_type()] = []
if a_res_type.get_type() == "string":
c_value["string"].append(self.get_resource_string(ate))
elif a_res_type.get_type() == "id":
if not ate.is_complex():
c_value["id"].append(self.get_resource_id(ate))
elif a_res_type.get_type() == "bool":
if not ate.is_complex():
c_value["bool"].append(self.get_resource_bool(ate))
elif a_res_type.get_type() == "integer":
c_value["integer"].append(self.get_resource_integer(ate))
elif a_res_type.get_type() == "color":
c_value["color"].append(self.get_resource_color(ate))
elif a_res_type.get_type() == "dimen":
c_value["dimen"].append(self.get_resource_dimen(ate))
#elif a_res_type.get_type() == "style":
# c_value["style"].append(self.get_resource_style(ate))
nb_i += 1
nb += 1
def get_resource_string(self, ate):
return [ate.get_value(), ate.get_key_data()]
def get_resource_id(self, ate):
x = [ate.get_value()]
if ate.key.get_data() == 0:
x.append("false")
elif ate.key.get_data() == 1:
x.append("true")
return x
def get_resource_bool(self, ate):
x = [ate.get_value()]
if ate.key.get_data() == 0:
x.append("false")
elif ate.key.get_data() == -1:
x.append("true")
return x
def get_resource_integer(self, ate):
return [ate.get_value(), ate.key.get_data()]
def get_resource_color(self, ate):
entry_data = ate.key.get_data()
return [ate.get_value(), "#%02x%02x%02x%02x" % (((entry_data >> 24) & 0xFF), ((entry_data >> 16) & 0xFF), ((entry_data >> 8) & 0xFF), (entry_data & 0xFF))]
def get_resource_dimen(self, ate):
try:
return [ate.get_value(), "%s%s" % (complexToFloat(ate.key.get_data()), DIMENSION_UNITS[ate.key.get_data() & COMPLEX_UNIT_MASK])]
except Exception, why:
androconf.warning(why.__str__())
return [ate.get_value(), ate.key.get_data()]
# FIXME
def get_resource_style(self, ate):
return ["", ""]
def get_packages_names(self):
return self.packages.keys()
def get_locales(self, package_name):
self._analyse()
return self.values[package_name].keys()
def get_types(self, package_name, locale):
self._analyse()
return self.values[package_name][locale].keys()
def get_public_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["public"]:
buff += '<public type="%s" name="%s" id="0x%08x" />\n' % (i[0], i[1], i[2])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_string_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["string"]:
buff += '<string name="%s">%s</string>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_strings_resources(self):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += "<packages>\n"
for package_name in self.get_packages_names():
buff += "<package name=\"%s\">\n" % package_name
for locale in self.get_locales(package_name):
buff += "<locale value=%s>\n" % repr(locale)
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["string"]:
buff += '<string name="%s">%s</string>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
buff += '</locale>\n'
buff += "</package>\n"
buff += "</packages>\n"
return buff.encode('utf-8')
def get_id_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["id"]:
if len(i) == 1:
buff += '<item type="id" name="%s"/>\n' % (i[0])
else:
buff += '<item type="id" name="%s">%s</item>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_bool_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["bool"]:
buff += '<bool name="%s">%s</bool>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_integer_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["integer"]:
buff += '<integer name="%s">%s</integer>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_color_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["color"]:
buff += '<color name="%s">%s</color>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_dimen_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["dimen"]:
buff += '<dimen name="%s">%s</dimen>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_id(self, package_name, rid, locale='\x00\x00'):
self._analyse()
try:
for i in self.values[package_name][locale]["public"]:
if i[2] == rid:
return i
except KeyError:
return None
def get_string(self, package_name, name, locale='\x00\x00'):
self._analyse()
try:
for i in self.values[package_name][locale]["string"]:
if i[0] == name:
return i
except KeyError:
return None
def get_items(self, package_name):
self._analyse()
return self.packages[package_name]
class PackageContext(object):
def __init__(self, current_package, stringpool_main, mTableStrings, mKeyStrings):
self.stringpool_main = stringpool_main
self.mTableStrings = mTableStrings
self.mKeyStrings = mKeyStrings
self.current_package = current_package
def get_mResId(self):
return self.current_package.mResId
def set_mResId(self, mResId):
self.current_package.mResId = mResId
class ARSCHeader(object):
def __init__(self, buff):
self.start = buff.get_idx()
self.type = unpack('<h', buff.read(2))[0]
self.header_size = unpack('<h', buff.read(2))[0]
self.size = unpack('<i', buff.read(4))[0]
#print "ARSCHeader", hex(self.start), hex(self.type), hex(self.header_size), hex(self.size)
class ARSCResTablePackage(object):
def __init__(self, buff):
self.start = buff.get_idx()
self.id = unpack('<i', buff.read(4))[0]
self.name = buff.readNullString(256)
self.typeStrings = unpack('<i', buff.read(4))[0]
self.lastPublicType = unpack('<i', buff.read(4))[0]
self.keyStrings = unpack('<i', buff.read(4))[0]
self.lastPublicKey = unpack('<i', buff.read(4))[0]
self.mResId = self.id << 24
#print "ARSCResTablePackage", hex(self.start), hex(self.id), hex(self.mResId), repr(self.name.decode("utf-16", errors='replace')), hex(self.typeStrings), hex(self.lastPublicType), hex(self.keyStrings), hex(self.lastPublicKey)
def get_name(self):
name = self.name.decode("utf-16", 'replace')
name = name[:name.find("\x00")]
return name
class ARSCResTypeSpec(object):
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.id = unpack('<b', buff.read(1))[0]
self.res0 = unpack('<b', buff.read(1))[0]
self.res1 = unpack('<h', buff.read(2))[0]
self.entryCount = unpack('<i', buff.read(4))[0]
#print "ARSCResTypeSpec", hex(self.start), hex(self.id), hex(self.res0), hex(self.res1), hex(self.entryCount), "table:" + self.parent.mTableStrings.getString(self.id - 1)
self.typespec_entries = []
for i in range(0, self.entryCount):
self.typespec_entries.append(unpack('<i', buff.read(4))[0])
class ARSCResType(object):
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.id = unpack('<b', buff.read(1))[0]
self.res0 = unpack('<b', buff.read(1))[0]
self.res1 = unpack('<h', buff.read(2))[0]
self.entryCount = unpack('<i', buff.read(4))[0]
self.entriesStart = unpack('<i', buff.read(4))[0]
self.mResId = (0xff000000 & self.parent.get_mResId()) | self.id << 16
self.parent.set_mResId(self.mResId)
#print "ARSCResType", hex(self.start), hex(self.id), hex(self.res0), hex(self.res1), hex(self.entryCount), hex(self.entriesStart), hex(self.mResId), "table:" + self.parent.mTableStrings.getString(self.id - 1)
self.config = ARSCResTableConfig(buff)
def get_type(self):
return self.parent.mTableStrings.getString(self.id - 1)
class ARSCResTableConfig(object):
def __init__(self, buff):
self.start = buff.get_idx()
self.size = unpack('<i', buff.read(4))[0]
self.imsi = unpack('<i', buff.read(4))[0]
self.locale = unpack('<i', buff.read(4))[0]
self.screenType = unpack('<i', buff.read(4))[0]
self.input = unpack('<i', buff.read(4))[0]
self.screenSize = unpack('<i', buff.read(4))[0]
self.version = unpack('<i', buff.read(4))[0]
self.screenConfig = 0
self.screenSizeDp = 0
if self.size >= 32:
self.screenConfig = unpack('<i', buff.read(4))[0]
if self.size >= 36:
self.screenSizeDp = unpack('<i', buff.read(4))[0]
self.exceedingSize = self.size - 36
if self.exceedingSize > 0:
androconf.warning("too much bytes !")
self.padding = buff.read(self.exceedingSize)
#print "ARSCResTableConfig", hex(self.start), hex(self.size), hex(self.imsi), hex(self.locale), repr(self.get_language()), repr(self.get_country()), hex(self.screenType), hex(self.input), hex(self.screenSize), hex(self.version), hex(self.screenConfig), hex(self.screenSizeDp)
def get_language(self):
x = self.locale & 0x0000ffff
return chr(x & 0x00ff) + chr((x & 0xff00) >> 8)
def get_country(self):
x = (self.locale & 0xffff0000) >> 16
return chr(x & 0x00ff) + chr((x & 0xff00) >> 8)
class ARSCResTableEntry(object):
def __init__(self, buff, mResId, parent=None):
self.start = buff.get_idx()
self.mResId = mResId
self.parent = parent
self.size = unpack('<h', buff.read(2))[0]
self.flags = unpack('<h', buff.read(2))[0]
self.index = unpack('<i', buff.read(4))[0]
#print "ARSCResTableEntry", hex(self.start), hex(self.mResId), hex(self.size), hex(self.flags), hex(self.index), self.is_complex()#, hex(self.mResId)
if self.flags & 1:
self.item = ARSCComplex(buff, parent)
else:
self.key = ARSCResStringPoolRef(buff, self.parent)
def get_index(self):
return self.index
def get_value(self):
return self.parent.mKeyStrings.getString(self.index)
def get_key_data(self):
return self.key.get_data_value()
def is_public(self):
return self.flags == 0 or self.flags == 2
def is_complex(self):
return (self.flags & 1) == 1
class ARSCComplex(object):
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.id_parent = unpack('<i', buff.read(4))[0]
self.count = unpack('<i', buff.read(4))[0]
self.items = []
for i in range(0, self.count):
self.items.append((unpack('<i', buff.read(4))[0], ARSCResStringPoolRef(buff, self.parent)))
#print "ARSCComplex", hex(self.start), self.id_parent, self.count, repr(self.parent.mKeyStrings.getString(self.id_parent))
class ARSCResStringPoolRef(object):
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.skip_bytes = buff.read(3)
self.data_type = unpack('<b', buff.read(1))[0]
self.data = unpack('<i', buff.read(4))[0]
#print "ARSCResStringPoolRef", hex(self.start), hex(self.data_type), hex(self.data)#, "key:" + self.parent.mKeyStrings.getString(self.index), self.parent.stringpool_main.getString(self.data)
def get_data_value(self):
return self.parent.stringpool_main.getString(self.data)
def get_data(self):
return self.data
def get_data_type(self):
return self.data_type
def get_arsc_info(arscobj):
buff = ""
for package in arscobj.get_packages_names():
buff += package + ":\n"
for locale in arscobj.get_locales(package):
buff += "\t" + repr(locale) + ":\n"
for ttype in arscobj.get_types(package, locale):
buff += "\t\t" + ttype + ":\n"
try:
tmp_buff = getattr(arscobj, "get_" + ttype + "_resources")(package, locale).decode("utf-8", 'replace').split("\n")
for i in tmp_buff:
buff += "\t\t\t" + i + "\n"
except AttributeError:
pass
return buff
| |
import functools
import sys
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.utils import (
ContextVar,
capture_internal_exceptions,
event_from_exception,
)
from sentry_sdk._compat import PY2, reraise, iteritems
from sentry_sdk.tracing import Span
from sentry_sdk.integrations._wsgi_common import _filter_headers
from sentry_sdk._types import MYPY
if MYPY:
from typing import Callable
from typing import Dict
from typing import Iterator
from typing import Any
from typing import Tuple
from typing import Optional
from typing import TypeVar
from sentry_sdk.utils import ExcInfo
from sentry_sdk._types import EventProcessor
T = TypeVar("T")
U = TypeVar("U")
E = TypeVar("E")
_wsgi_middleware_applied = ContextVar("sentry_wsgi_middleware_applied")
if PY2:
def wsgi_decoding_dance(s, charset="utf-8", errors="replace"):
# type: (str, str, str) -> str
return s.decode(charset, errors)
else:
def wsgi_decoding_dance(s, charset="utf-8", errors="replace"):
# type: (str, str, str) -> str
return s.encode("latin1").decode(charset, errors)
def get_host(environ):
# type: (Dict[str, str]) -> str
"""Return the host for the given WSGI environment. Yanked from Werkzeug."""
if environ.get("HTTP_HOST"):
rv = environ["HTTP_HOST"]
if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
rv = rv[:-3]
elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
rv = rv[:-4]
elif environ.get("SERVER_NAME"):
rv = environ["SERVER_NAME"]
if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in (
("https", "443"),
("http", "80"),
):
rv += ":" + environ["SERVER_PORT"]
else:
# In spite of the WSGI spec, SERVER_NAME might not be present.
rv = "unknown"
return rv
def get_request_url(environ):
# type: (Dict[str, str]) -> str
"""Return the absolute URL without query string for the given WSGI
environment."""
return "%s://%s/%s" % (
environ.get("wsgi.url_scheme"),
get_host(environ),
wsgi_decoding_dance(environ.get("PATH_INFO") or "").lstrip("/"),
)
class SentryWsgiMiddleware(object):
__slots__ = ("app",)
def __init__(self, app):
# type: (Callable[[Dict[str, str], Callable[..., Any]], Any]) -> None
self.app = app
def __call__(self, environ, start_response):
# type: (Dict[str, str], Callable[..., Any]) -> _ScopedResponse
if _wsgi_middleware_applied.get(False):
return self.app(environ, start_response)
_wsgi_middleware_applied.set(True)
try:
hub = Hub(Hub.current)
with hub:
with capture_internal_exceptions():
with hub.configure_scope() as scope:
scope.clear_breadcrumbs()
scope._name = "wsgi"
scope.add_event_processor(_make_wsgi_event_processor(environ))
span = Span.continue_from_environ(environ)
span.op = "http.server"
span.transaction = "generic WSGI request"
with hub.start_span(span) as span:
try:
rv = self.app(
environ,
functools.partial(
_sentry_start_response, start_response, span
),
)
except BaseException:
reraise(*_capture_exception(hub))
finally:
_wsgi_middleware_applied.set(False)
return _ScopedResponse(hub, rv)
def _sentry_start_response(
old_start_response, span, status, response_headers, exc_info=None
):
# type: (Callable[[str, U, Optional[E]], T], Span, str, U, Optional[E]) -> T
with capture_internal_exceptions():
status_int = int(status.split(" ", 1)[0])
span.set_http_status(status_int)
return old_start_response(status, response_headers, exc_info)
def _get_environ(environ):
# type: (Dict[str, str]) -> Iterator[Tuple[str, str]]
"""
Returns our whitelisted environment variables.
"""
keys = ["SERVER_NAME", "SERVER_PORT"]
if _should_send_default_pii():
# make debugging of proxy setup easier. Proxy headers are
# in headers.
keys += ["REMOTE_ADDR"]
for key in keys:
if key in environ:
yield key, environ[key]
# `get_headers` comes from `werkzeug.datastructures.EnvironHeaders`
#
# We need this function because Django does not give us a "pure" http header
# dict. So we might as well use it for all WSGI integrations.
def _get_headers(environ):
# type: (Dict[str, str]) -> Iterator[Tuple[str, str]]
"""
Returns only proper HTTP headers.
"""
for key, value in iteritems(environ):
key = str(key)
if key.startswith("HTTP_") and key not in (
"HTTP_CONTENT_TYPE",
"HTTP_CONTENT_LENGTH",
):
yield key[5:].replace("_", "-").title(), value
elif key in ("CONTENT_TYPE", "CONTENT_LENGTH"):
yield key.replace("_", "-").title(), value
def get_client_ip(environ):
# type: (Dict[str, str]) -> Optional[Any]
"""
Infer the user IP address from various headers. This cannot be used in
security sensitive situations since the value may be forged from a client,
but it's good enough for the event payload.
"""
try:
return environ["HTTP_X_FORWARDED_FOR"].split(",")[0].strip()
except (KeyError, IndexError):
pass
try:
return environ["HTTP_X_REAL_IP"]
except KeyError:
pass
return environ.get("REMOTE_ADDR")
def _capture_exception(hub):
# type: (Hub) -> ExcInfo
exc_info = sys.exc_info()
# Check client here as it might have been unset while streaming response
if hub.client is not None:
e = exc_info[1]
# SystemExit(0) is the only uncaught exception that is expected behavior
should_skip_capture = isinstance(e, SystemExit) and e.code in (0, None)
if not should_skip_capture:
event, hint = event_from_exception(
exc_info,
client_options=hub.client.options,
mechanism={"type": "wsgi", "handled": False},
)
hub.capture_event(event, hint=hint)
return exc_info
class _ScopedResponse(object):
__slots__ = ("_response", "_hub")
def __init__(self, hub, response):
# type: (Hub, Iterator[bytes]) -> None
self._hub = hub
self._response = response
def __iter__(self):
# type: () -> Iterator[bytes]
iterator = iter(self._response)
while True:
with self._hub:
try:
chunk = next(iterator)
except StopIteration:
break
except BaseException:
reraise(*_capture_exception(self._hub))
yield chunk
def close(self):
# type: () -> None
with self._hub:
try:
self._response.close() # type: ignore
except AttributeError:
pass
except BaseException:
reraise(*_capture_exception(self._hub))
def _make_wsgi_event_processor(environ):
# type: (Dict[str, str]) -> EventProcessor
# It's a bit unfortunate that we have to extract and parse the request data
# from the environ so eagerly, but there are a few good reasons for this.
#
# We might be in a situation where the scope/hub never gets torn down
# properly. In that case we will have an unnecessary strong reference to
# all objects in the environ (some of which may take a lot of memory) when
# we're really just interested in a few of them.
#
# Keeping the environment around for longer than the request lifecycle is
# also not necessarily something uWSGI can deal with:
# https://github.com/unbit/uwsgi/issues/1950
client_ip = get_client_ip(environ)
request_url = get_request_url(environ)
query_string = environ.get("QUERY_STRING")
method = environ.get("REQUEST_METHOD")
env = dict(_get_environ(environ))
headers = _filter_headers(dict(_get_headers(environ)))
def event_processor(event, hint):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
with capture_internal_exceptions():
# if the code below fails halfway through we at least have some data
request_info = event.setdefault("request", {})
if _should_send_default_pii():
user_info = event.setdefault("user", {})
if client_ip:
user_info["ip_address"] = client_ip
request_info["url"] = request_url
request_info["query_string"] = query_string
request_info["method"] = method
request_info["env"] = env
request_info["headers"] = headers
return event
return event_processor
| |
"""Tests for the aws component config and setup."""
from asynctest import patch as async_patch, MagicMock, CoroutineMock
from homeassistant.components import aws
from homeassistant.setup import async_setup_component
class MockAioSession:
"""Mock AioSession."""
def __init__(self, *args, **kwargs):
"""Init a mock session."""
self.get_user = CoroutineMock()
self.invoke = CoroutineMock()
self.publish = CoroutineMock()
self.send_message = CoroutineMock()
def create_client(self, *args, **kwargs): # pylint: disable=no-self-use
"""Create a mocked client."""
return MagicMock(
__aenter__=CoroutineMock(
return_value=CoroutineMock(
get_user=self.get_user, # iam
invoke=self.invoke, # lambda
publish=self.publish, # sns
send_message=self.send_message, # sqs
)
),
__aexit__=CoroutineMock(),
)
async def test_empty_config(hass):
"""Test a default config will be create for empty config."""
with async_patch("aiobotocore.AioSession", new=MockAioSession):
await async_setup_component(hass, "aws", {"aws": {}})
await hass.async_block_till_done()
sessions = hass.data[aws.DATA_SESSIONS]
assert sessions is not None
assert len(sessions) == 1
session = sessions.get("default")
assert isinstance(session, MockAioSession)
# we don't validate auto-created default profile
session.get_user.assert_not_awaited()
async def test_empty_credential(hass):
"""Test a default config will be create for empty credential section."""
with async_patch("aiobotocore.AioSession", new=MockAioSession):
await async_setup_component(
hass,
"aws",
{
"aws": {
"notify": [
{
"service": "lambda",
"name": "New Lambda Test",
"region_name": "us-east-1",
}
]
}
},
)
await hass.async_block_till_done()
sessions = hass.data[aws.DATA_SESSIONS]
assert sessions is not None
assert len(sessions) == 1
session = sessions.get("default")
assert isinstance(session, MockAioSession)
assert hass.services.has_service("notify", "new_lambda_test") is True
await hass.services.async_call(
"notify", "new_lambda_test", {"message": "test", "target": "ARN"}, blocking=True
)
session.invoke.assert_awaited_once()
async def test_profile_credential(hass):
"""Test credentials with profile name."""
with async_patch("aiobotocore.AioSession", new=MockAioSession):
await async_setup_component(
hass,
"aws",
{
"aws": {
"credentials": {"name": "test", "profile_name": "test-profile"},
"notify": [
{
"service": "sns",
"credential_name": "test",
"name": "SNS Test",
"region_name": "us-east-1",
}
],
}
},
)
await hass.async_block_till_done()
sessions = hass.data[aws.DATA_SESSIONS]
assert sessions is not None
assert len(sessions) == 1
session = sessions.get("test")
assert isinstance(session, MockAioSession)
assert hass.services.has_service("notify", "sns_test") is True
await hass.services.async_call(
"notify",
"sns_test",
{"title": "test", "message": "test", "target": "ARN"},
blocking=True,
)
session.publish.assert_awaited_once()
async def test_access_key_credential(hass):
"""Test credentials with access key."""
with async_patch("aiobotocore.AioSession", new=MockAioSession):
await async_setup_component(
hass,
"aws",
{
"aws": {
"credentials": [
{"name": "test", "profile_name": "test-profile"},
{
"name": "key",
"aws_access_key_id": "test-key",
"aws_secret_access_key": "test-secret",
},
],
"notify": [
{
"service": "sns",
"credential_name": "key",
"name": "SNS Test",
"region_name": "us-east-1",
}
],
}
},
)
await hass.async_block_till_done()
sessions = hass.data[aws.DATA_SESSIONS]
assert sessions is not None
assert len(sessions) == 2
session = sessions.get("key")
assert isinstance(session, MockAioSession)
assert hass.services.has_service("notify", "sns_test") is True
await hass.services.async_call(
"notify",
"sns_test",
{"title": "test", "message": "test", "target": "ARN"},
blocking=True,
)
session.publish.assert_awaited_once()
async def test_notify_credential(hass):
"""Test notify service can use access key directly."""
with async_patch("aiobotocore.AioSession", new=MockAioSession):
await async_setup_component(
hass,
"aws",
{
"aws": {
"notify": [
{
"service": "sqs",
"credential_name": "test",
"name": "SQS Test",
"region_name": "us-east-1",
"aws_access_key_id": "some-key",
"aws_secret_access_key": "some-secret",
}
]
}
},
)
await hass.async_block_till_done()
sessions = hass.data[aws.DATA_SESSIONS]
assert sessions is not None
assert len(sessions) == 1
assert isinstance(sessions.get("default"), MockAioSession)
assert hass.services.has_service("notify", "sqs_test") is True
await hass.services.async_call(
"notify", "sqs_test", {"message": "test", "target": "ARN"}, blocking=True
)
async def test_notify_credential_profile(hass):
"""Test notify service can use profile directly."""
with async_patch("aiobotocore.AioSession", new=MockAioSession):
await async_setup_component(
hass,
"aws",
{
"aws": {
"notify": [
{
"service": "sqs",
"name": "SQS Test",
"region_name": "us-east-1",
"profile_name": "test",
}
]
}
},
)
await hass.async_block_till_done()
sessions = hass.data[aws.DATA_SESSIONS]
assert sessions is not None
assert len(sessions) == 1
assert isinstance(sessions.get("default"), MockAioSession)
assert hass.services.has_service("notify", "sqs_test") is True
await hass.services.async_call(
"notify", "sqs_test", {"message": "test", "target": "ARN"}, blocking=True
)
async def test_credential_skip_validate(hass):
"""Test credential can skip validate."""
with async_patch("aiobotocore.AioSession", new=MockAioSession):
await async_setup_component(
hass,
"aws",
{
"aws": {
"credentials": [
{
"name": "key",
"aws_access_key_id": "not-valid",
"aws_secret_access_key": "dont-care",
"validate": False,
}
]
}
},
)
await hass.async_block_till_done()
sessions = hass.data[aws.DATA_SESSIONS]
assert sessions is not None
assert len(sessions) == 1
session = sessions.get("key")
assert isinstance(session, MockAioSession)
session.get_user.assert_not_awaited()
| |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JSON Web Tokens
Provides support for creating (encoding) and verifying (decoding) JWTs,
especially JWTs generated and consumed by Google infrastructure.
See `rfc7519`_ for more details on JWTs.
To encode a JWT use :func:`encode`::
from google.auth import crypto
from google.auth import jwt
signer = crypt.Signer(private_key)
payload = {'some': 'payload'}
encoded = jwt.encode(signer, payload)
To decode a JWT and verify claims use :func:`decode`::
claims = jwt.decode(encoded, certs=public_certs)
You can also skip verification::
claims = jwt.decode(encoded, verify=False)
.. _rfc7519: https://tools.ietf.org/html/rfc7519
"""
import base64
import collections
import copy
import datetime
import json
import cachetools
from six.moves import urllib
from google.auth import _helpers
from google.auth import _service_account_info
from google.auth import crypt
from google.auth import exceptions
import google.auth.credentials
_DEFAULT_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
_DEFAULT_MAX_CACHE_SIZE = 10
def encode(signer, payload, header=None, key_id=None):
"""Make a signed JWT.
Args:
signer (google.auth.crypt.Signer): The signer used to sign the JWT.
payload (Mapping[str, str]): The JWT payload.
header (Mapping[str, str]): Additional JWT header payload.
key_id (str): The key id to add to the JWT header. If the
signer has a key id it will be used as the default. If this is
specified it will override the signer's key id.
Returns:
bytes: The encoded JWT.
"""
if header is None:
header = {}
if key_id is None:
key_id = signer.key_id
header.update({'typ': 'JWT', 'alg': 'RS256'})
if key_id is not None:
header['kid'] = key_id
segments = [
base64.urlsafe_b64encode(json.dumps(header).encode('utf-8')),
base64.urlsafe_b64encode(json.dumps(payload).encode('utf-8')),
]
signing_input = b'.'.join(segments)
signature = signer.sign(signing_input)
segments.append(base64.urlsafe_b64encode(signature))
return b'.'.join(segments)
def _decode_jwt_segment(encoded_section):
"""Decodes a single JWT segment."""
section_bytes = _helpers.padded_urlsafe_b64decode(encoded_section)
try:
return json.loads(section_bytes.decode('utf-8'))
except ValueError:
raise ValueError('Can\'t parse segment: {0}'.format(section_bytes))
def _unverified_decode(token):
"""Decodes a token and does no verification.
Args:
token (Union[str, bytes]): The encoded JWT.
Returns:
Tuple[str, str, str, str]: header, payload, signed_section, and
signature.
Raises:
ValueError: if there are an incorrect amount of segments in the token.
"""
token = _helpers.to_bytes(token)
if token.count(b'.') != 2:
raise ValueError(
'Wrong number of segments in token: {0}'.format(token))
encoded_header, encoded_payload, signature = token.split(b'.')
signed_section = encoded_header + b'.' + encoded_payload
signature = _helpers.padded_urlsafe_b64decode(signature)
# Parse segments
header = _decode_jwt_segment(encoded_header)
payload = _decode_jwt_segment(encoded_payload)
return header, payload, signed_section, signature
def decode_header(token):
"""Return the decoded header of a token.
No verification is done. This is useful to extract the key id from
the header in order to acquire the appropriate certificate to verify
the token.
Args:
token (Union[str, bytes]): the encoded JWT.
Returns:
Mapping: The decoded JWT header.
"""
header, _, _, _ = _unverified_decode(token)
return header
def _verify_iat_and_exp(payload):
"""Verifies the ``iat`` (Issued At) and ``exp`` (Expires) claims in a token
payload.
Args:
payload (Mapping[str, str]): The JWT payload.
Raises:
ValueError: if any checks failed.
"""
now = _helpers.datetime_to_secs(_helpers.utcnow())
# Make sure the iat and exp claims are present.
for key in ('iat', 'exp'):
if key not in payload:
raise ValueError(
'Token does not contain required claim {}'.format(key))
# Make sure the token wasn't issued in the future.
iat = payload['iat']
# Err on the side of accepting a token that is slightly early to account
# for clock skew.
earliest = iat - _helpers.CLOCK_SKEW_SECS
if now < earliest:
raise ValueError('Token used too early, {} < {}'.format(now, iat))
# Make sure the token wasn't issued in the past.
exp = payload['exp']
# Err on the side of accepting a token that is slightly out of date
# to account for clow skew.
latest = exp + _helpers.CLOCK_SKEW_SECS
if latest < now:
raise ValueError('Token expired, {} < {}'.format(latest, now))
def decode(token, certs=None, verify=True, audience=None):
"""Decode and verify a JWT.
Args:
token (str): The encoded JWT.
certs (Union[str, bytes, Mapping[str, Union[str, bytes]]]): The
certificate used to validate the JWT signatyre. If bytes or string,
it must the the public key certificate in PEM format. If a mapping,
it must be a mapping of key IDs to public key certificates in PEM
format. The mapping must contain the same key ID that's specified
in the token's header.
verify (bool): Whether to perform signature and claim validation.
Verification is done by default.
audience (str): The audience claim, 'aud', that this JWT should
contain. If None then the JWT's 'aud' parameter is not verified.
Returns:
Mapping[str, str]: The deserialized JSON payload in the JWT.
Raises:
ValueError: if any verification checks failed.
"""
header, payload, signed_section, signature = _unverified_decode(token)
if not verify:
return payload
# If certs is specified as a dictionary of key IDs to certificates, then
# use the certificate identified by the key ID in the token header.
if isinstance(certs, collections.Mapping):
key_id = header.get('kid')
if key_id:
if key_id not in certs:
raise ValueError(
'Certificate for key id {} not found.'.format(key_id))
certs_to_check = [certs[key_id]]
# If there's no key id in the header, check against all of the certs.
else:
certs_to_check = certs.values()
else:
certs_to_check = certs
# Verify that the signature matches the message.
if not crypt.verify_signature(signed_section, signature, certs_to_check):
raise ValueError('Could not verify token signature.')
# Verify the issued at and created times in the payload.
_verify_iat_and_exp(payload)
# Check audience.
if audience is not None:
claim_audience = payload.get('aud')
if audience != claim_audience:
raise ValueError(
'Token has wrong audience {}, expected {}'.format(
claim_audience, audience))
return payload
class Credentials(google.auth.credentials.Signing,
google.auth.credentials.Credentials):
"""Credentials that use a JWT as the bearer token.
These credentials require an "audience" claim. This claim identifies the
intended recipient of the bearer token.
The constructor arguments determine the claims for the JWT that is
sent with requests. Usually, you'll construct these credentials with
one of the helper constructors as shown in the next section.
To create JWT credentials using a Google service account private key
JSON file::
audience = 'https://pubsub.googleapis.com/google.pubsub.v1.Publisher'
credentials = jwt.Credentials.from_service_account_file(
'service-account.json',
audience=audience)
If you already have the service account file loaded and parsed::
service_account_info = json.load(open('service_account.json'))
credentials = jwt.Credentials.from_service_account_info(
service_account_info,
audience=audience)
Both helper methods pass on arguments to the constructor, so you can
specify the JWT claims::
credentials = jwt.Credentials.from_service_account_file(
'service-account.json',
audience=audience,
additional_claims={'meta': 'data'})
You can also construct the credentials directly if you have a
:class:`~google.auth.crypt.Signer` instance::
credentials = jwt.Credentials(
signer,
issuer='your-issuer',
subject='your-subject',
audience=audience)
The claims are considered immutable. If you want to modify the claims,
you can easily create another instance using :meth:`with_claims`::
new_audience = (
'https://pubsub.googleapis.com/google.pubsub.v1.Subscriber')
new_credentials = credentials.with_claims(audience=new_audience)
"""
def __init__(self, signer, issuer, subject, audience,
additional_claims=None,
token_lifetime=_DEFAULT_TOKEN_LIFETIME_SECS):
"""
Args:
signer (google.auth.crypt.Signer): The signer used to sign JWTs.
issuer (str): The `iss` claim.
subject (str): The `sub` claim.
audience (str): the `aud` claim. The intended audience for the
credentials.
additional_claims (Mapping[str, str]): Any additional claims for
the JWT payload.
token_lifetime (int): The amount of time in seconds for
which the token is valid. Defaults to 1 hour.
"""
super(Credentials, self).__init__()
self._signer = signer
self._issuer = issuer
self._subject = subject
self._audience = audience
self._token_lifetime = token_lifetime
if additional_claims is None:
additional_claims = {}
self._additional_claims = additional_claims
@classmethod
def _from_signer_and_info(cls, signer, info, **kwargs):
"""Creates a Credentials instance from a signer and service account
info.
Args:
signer (google.auth.crypt.Signer): The signer used to sign JWTs.
info (Mapping[str, str]): The service account info.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: The constructed credentials.
Raises:
ValueError: If the info is not in the expected format.
"""
kwargs.setdefault('subject', info['client_email'])
kwargs.setdefault('issuer', info['client_email'])
return cls(signer, **kwargs)
@classmethod
def from_service_account_info(cls, info, **kwargs):
"""Creates an Credentials instance from a dictionary.
Args:
info (Mapping[str, str]): The service account info in Google
format.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: The constructed credentials.
Raises:
ValueError: If the info is not in the expected format.
"""
signer = _service_account_info.from_dict(
info, require=['client_email'])
return cls._from_signer_and_info(signer, info, **kwargs)
@classmethod
def from_service_account_file(cls, filename, **kwargs):
"""Creates a Credentials instance from a service account .json file
in Google format.
Args:
filename (str): The path to the service account .json file.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: The constructed credentials.
"""
info, signer = _service_account_info.from_filename(
filename, require=['client_email'])
return cls._from_signer_and_info(signer, info, **kwargs)
@classmethod
def from_signing_credentials(cls, credentials, audience, **kwargs):
"""Creates a new :class:`google.auth.jwt.Credentials` instance from an
existing :class:`google.auth.credentials.Signing` instance.
The new instance will use the same signer as the existing instance and
will use the existing instance's signer email as the issuer and
subject by default.
Example::
svc_creds = service_account.Credentials.from_service_account_file(
'service_account.json')
audience = (
'https://pubsub.googleapis.com/google.pubsub.v1.Publisher')
jwt_creds = jwt.Credentials.from_signing_credentials(
svc_creds, audience=audience)
Args:
credentials (google.auth.credentials.Signing): The credentials to
use to construct the new credentials.
audience (str): the `aud` claim. The intended audience for the
credentials.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: A new Credentials instance.
"""
kwargs.setdefault('issuer', credentials.signer_email)
kwargs.setdefault('subject', credentials.signer_email)
return cls(
credentials.signer,
audience=audience,
**kwargs)
def with_claims(self, issuer=None, subject=None, audience=None,
additional_claims=None):
"""Returns a copy of these credentials with modified claims.
Args:
issuer (str): The `iss` claim. If unspecified the current issuer
claim will be used.
subject (str): The `sub` claim. If unspecified the current subject
claim will be used.
audience (str): the `aud` claim. If unspecified the current
audience claim will be used.
additional_claims (Mapping[str, str]): Any additional claims for
the JWT payload. This will be merged with the current
additional claims.
Returns:
google.auth.jwt.Credentials: A new credentials instance.
"""
new_additional_claims = copy.deepcopy(self._additional_claims)
new_additional_claims.update(additional_claims or {})
return Credentials(
self._signer,
issuer=issuer if issuer is not None else self._issuer,
subject=subject if subject is not None else self._subject,
audience=audience if audience is not None else self._audience,
additional_claims=new_additional_claims)
def _make_jwt(self):
"""Make a signed JWT.
Returns:
Tuple[bytes, datetime]: The encoded JWT and the expiration.
"""
now = _helpers.utcnow()
lifetime = datetime.timedelta(seconds=self._token_lifetime)
expiry = now + lifetime
payload = {
'iss': self._issuer,
'sub': self._subject,
'iat': _helpers.datetime_to_secs(now),
'exp': _helpers.datetime_to_secs(expiry),
'aud': self._audience,
}
payload.update(self._additional_claims)
jwt = encode(self._signer, payload)
return jwt, expiry
def refresh(self, request):
"""Refreshes the access token.
Args:
request (Any): Unused.
"""
# pylint: disable=unused-argument
# (pylint doesn't correctly recognize overridden methods.)
self.token, self.expiry = self._make_jwt()
@_helpers.copy_docstring(google.auth.credentials.Signing)
def sign_bytes(self, message):
return self._signer.sign(message)
@property
@_helpers.copy_docstring(google.auth.credentials.Signing)
def signer_email(self):
return self._issuer
@property
@_helpers.copy_docstring(google.auth.credentials.Signing)
def signer(self):
return self._signer
class OnDemandCredentials(
google.auth.credentials.Signing,
google.auth.credentials.Credentials):
"""On-demand JWT credentials.
Like :class:`Credentials`, this class uses a JWT as the bearer token for
authentication. However, this class does not require the audience at
construction time. Instead, it will generate a new token on-demand for
each request using the request URI as the audience. It caches tokens
so that multiple requests to the same URI do not incur the overhead
of generating a new token every time.
This behavior is especially useful for `gRPC`_ clients. A gRPC service may
have multiple audience and gRPC clients may not know all of the audiences
required for accessing a particular service. With these credentials,
no knowledge of the audiences is required ahead of time.
.. _grpc: http://www.grpc.io/
"""
def __init__(self, signer, issuer, subject,
additional_claims=None,
token_lifetime=_DEFAULT_TOKEN_LIFETIME_SECS,
max_cache_size=_DEFAULT_MAX_CACHE_SIZE):
"""
Args:
signer (google.auth.crypt.Signer): The signer used to sign JWTs.
issuer (str): The `iss` claim.
subject (str): The `sub` claim.
additional_claims (Mapping[str, str]): Any additional claims for
the JWT payload.
token_lifetime (int): The amount of time in seconds for
which the token is valid. Defaults to 1 hour.
max_cache_size (int): The maximum number of JWT tokens to keep in
cache. Tokens are cached using :class:`cachetools.LRUCache`.
"""
super(OnDemandCredentials, self).__init__()
self._signer = signer
self._issuer = issuer
self._subject = subject
self._token_lifetime = token_lifetime
if additional_claims is None:
additional_claims = {}
self._additional_claims = additional_claims
self._cache = cachetools.LRUCache(maxsize=max_cache_size)
@classmethod
def _from_signer_and_info(cls, signer, info, **kwargs):
"""Creates an OnDemandCredentials instance from a signer and service
account info.
Args:
signer (google.auth.crypt.Signer): The signer used to sign JWTs.
info (Mapping[str, str]): The service account info.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.OnDemandCredentials: The constructed credentials.
Raises:
ValueError: If the info is not in the expected format.
"""
kwargs.setdefault('subject', info['client_email'])
kwargs.setdefault('issuer', info['client_email'])
return cls(signer, **kwargs)
@classmethod
def from_service_account_info(cls, info, **kwargs):
"""Creates an OnDemandCredentials instance from a dictionary.
Args:
info (Mapping[str, str]): The service account info in Google
format.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.OnDemandCredentials: The constructed credentials.
Raises:
ValueError: If the info is not in the expected format.
"""
signer = _service_account_info.from_dict(
info, require=['client_email'])
return cls._from_signer_and_info(signer, info, **kwargs)
@classmethod
def from_service_account_file(cls, filename, **kwargs):
"""Creates an OnDemandCredentials instance from a service account .json
file in Google format.
Args:
filename (str): The path to the service account .json file.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.OnDemandCredentials: The constructed credentials.
"""
info, signer = _service_account_info.from_filename(
filename, require=['client_email'])
return cls._from_signer_and_info(signer, info, **kwargs)
@classmethod
def from_signing_credentials(cls, credentials, **kwargs):
"""Creates a new :class:`google.auth.jwt.OnDemandCredentials` instance
from an existing :class:`google.auth.credentials.Signing` instance.
The new instance will use the same signer as the existing instance and
will use the existing instance's signer email as the issuer and
subject by default.
Example::
svc_creds = service_account.Credentials.from_service_account_file(
'service_account.json')
jwt_creds = jwt.OnDemandCredentials.from_signing_credentials(
svc_creds)
Args:
credentials (google.auth.credentials.Signing): The credentials to
use to construct the new credentials.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.jwt.Credentials: A new Credentials instance.
"""
kwargs.setdefault('issuer', credentials.signer_email)
kwargs.setdefault('subject', credentials.signer_email)
return cls(credentials.signer, **kwargs)
def with_claims(self, issuer=None, subject=None, additional_claims=None):
"""Returns a copy of these credentials with modified claims.
Args:
issuer (str): The `iss` claim. If unspecified the current issuer
claim will be used.
subject (str): The `sub` claim. If unspecified the current subject
claim will be used.
additional_claims (Mapping[str, str]): Any additional claims for
the JWT payload. This will be merged with the current
additional claims.
Returns:
google.auth.jwt.OnDemandCredentials: A new credentials instance.
"""
new_additional_claims = copy.deepcopy(self._additional_claims)
new_additional_claims.update(additional_claims or {})
return OnDemandCredentials(
self._signer,
issuer=issuer if issuer is not None else self._issuer,
subject=subject if subject is not None else self._subject,
additional_claims=new_additional_claims,
max_cache_size=self._cache.maxsize)
@property
def valid(self):
"""Checks the validity of the credentials.
These credentials are always valid because it generates tokens on
demand.
"""
return True
def _make_jwt_for_audience(self, audience):
"""Make a new JWT for the given audience.
Args:
audience (str): The intended audience.
Returns:
Tuple[bytes, datetime]: The encoded JWT and the expiration.
"""
now = _helpers.utcnow()
lifetime = datetime.timedelta(seconds=self._token_lifetime)
expiry = now + lifetime
payload = {
'iss': self._issuer,
'sub': self._subject,
'iat': _helpers.datetime_to_secs(now),
'exp': _helpers.datetime_to_secs(expiry),
'aud': audience,
}
payload.update(self._additional_claims)
jwt = encode(self._signer, payload)
return jwt, expiry
def _get_jwt_for_audience(self, audience):
"""Get a JWT For a given audience.
If there is already an existing, non-expired token in the cache for
the audience, that token is used. Otherwise, a new token will be
created.
Args:
audience (str): The intended audience.
Returns:
bytes: The encoded JWT.
"""
token, expiry = self._cache.get(audience, (None, None))
if token is None or expiry < _helpers.utcnow():
token, expiry = self._make_jwt_for_audience(audience)
self._cache[audience] = token, expiry
return token
def refresh(self, request):
"""Raises an exception, these credentials can not be directly
refreshed.
Args:
request (Any): Unused.
Raises:
google.auth.RefreshError
"""
# pylint: disable=unused-argument
# (pylint doesn't correctly recognize overridden methods.)
raise exceptions.RefreshError(
'OnDemandCredentials can not be directly refreshed.')
def before_request(self, request, method, url, headers):
"""Performs credential-specific before request logic.
Args:
request (Any): Unused. JWT credentials do not need to make an
HTTP request to refresh.
method (str): The request's HTTP method.
url (str): The request's URI. This is used as the audience claim
when generating the JWT.
headers (Mapping): The request's headers.
"""
# pylint: disable=unused-argument
# (pylint doesn't correctly recognize overridden methods.)
parts = urllib.parse.urlsplit(url)
# Strip query string and fragment
audience = urllib.parse.urlunsplit(
(parts.scheme, parts.netloc, parts.path, None, None))
token = self._get_jwt_for_audience(audience)
self.apply(headers, token=token)
@_helpers.copy_docstring(google.auth.credentials.Signing)
def sign_bytes(self, message):
return self._signer.sign(message)
@property
@_helpers.copy_docstring(google.auth.credentials.Signing)
def signer_email(self):
return self._issuer
@property
@_helpers.copy_docstring(google.auth.credentials.Signing)
def signer(self):
return self._signer
| |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
import mock
import webapp2
import webtest
# pylint: disable=unused-import
from dashboard import mock_oauth2_decorator
# pylint: enable=unused-import
from dashboard import associate_alerts
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models.subscription import Subscription
from dashboard.services import issue_tracker_service
class AssociateAlertsTest(testing_common.TestCase):
def setUp(self):
super(AssociateAlertsTest, self).setUp()
app = webapp2.WSGIApplication([(
'/associate_alerts', associate_alerts.AssociateAlertsHandler)])
self.testapp = webtest.TestApp(app)
testing_common.SetSheriffDomains(['chromium.org'])
self.SetCurrentUser('foo@chromium.org', is_admin=True)
def _AddTests(self):
"""Adds sample Tests and returns a list of their keys."""
testing_common.AddTests(['ChromiumGPU'], ['linux-release'], {
'scrolling-benchmark': {
'first_paint': {},
'mean_frame_time': {},
}
})
return list(map(utils.TestKey, [
'ChromiumGPU/linux-release/scrolling-benchmark/first_paint',
'ChromiumGPU/linux-release/scrolling-benchmark/mean_frame_time',
]))
def _AddAnomalies(self):
"""Adds sample Anomaly data and returns a dict of revision to key."""
subscription = Subscription(
name='Chromium Perf Sheriff',
notification_email='sullivan@google.com'
)
test_keys = self._AddTests()
key_map = {}
# Add anomalies to the two tests alternately.
for end_rev in range(10000, 10120, 10):
test_key = test_keys[0] if end_rev % 20 == 0 else test_keys[1]
anomaly_key = anomaly.Anomaly(
start_revision=(end_rev - 5), end_revision=end_rev, test=test_key,
median_before_anomaly=100, median_after_anomaly=200,
subscriptions=[subscription],
subscription_names=[subscription.name],
).put()
key_map[end_rev] = anomaly_key.urlsafe()
# Add an anomaly that overlaps.
anomaly_key = anomaly.Anomaly(
start_revision=9990, end_revision=9996, test=test_keys[0],
median_before_anomaly=100, median_after_anomaly=200,
subscriptions=[subscription],
subscription_names=[subscription.name],
).put()
key_map[9996] = anomaly_key.urlsafe()
# Add an anomaly that overlaps and has bug ID.
anomaly_key = anomaly.Anomaly(
start_revision=9990, end_revision=9997, test=test_keys[0],
median_before_anomaly=100, median_after_anomaly=200, bug_id=12345,
subscriptions=[subscription],
subscription_names=[subscription.name],
).put()
key_map[9997] = anomaly_key.urlsafe()
return key_map
def testGet_NoKeys_ShowsError(self):
response = self.testapp.get('/associate_alerts')
self.assertIn('<div class="error">', response.body)
def testGet_SameAsPost(self):
get_response = self.testapp.get('/associate_alerts')
post_response = self.testapp.post('/associate_alerts')
self.assertEqual(get_response.body, post_response.body)
def testGet_InvalidBugId_ShowsError(self):
key_map = self._AddAnomalies()
response = self.testapp.get(
'/associate_alerts?keys=%s&bug_id=foo' % key_map[9996])
self.assertIn('<div class="error">', response.body)
self.assertIn('Invalid bug ID', response.body)
# Mocks fetching bugs from issue tracker.
@mock.patch('services.issue_tracker_service.discovery.build',
mock.MagicMock())
@mock.patch.object(
issue_tracker_service.IssueTrackerService, 'List',
mock.MagicMock(return_value={
'items': [
{
'id': 12345,
'summary': '5% regression in bot/suite/x at 10000:20000',
'state': 'open',
'status': 'New',
'author': {'name': 'exam...@google.com'},
},
{
'id': 13579,
'summary': '1% regression in bot/suite/y at 10000:20000',
'state': 'closed',
'status': 'WontFix',
'author': {'name': 'exam...@google.com'},
},
]}))
def testGet_NoBugId_ShowsDialog(self):
# When a GET request is made with some anomaly keys but no bug ID,
# A HTML form is shown for the user to input a bug number.
key_map = self._AddAnomalies()
response = self.testapp.get('/associate_alerts?keys=%s' % key_map[10000])
# The response contains a table of recent bugs and a form.
self.assertIn('12345', response.body)
self.assertIn('13579', response.body)
self.assertIn('<form', response.body)
def testGet_WithBugId_AlertIsAssociatedWithBugId(self):
# When the bug ID is given and the alerts overlap, then the Anomaly
# entities are updated and there is a response indicating success.
key_map = self._AddAnomalies()
response = self.testapp.get(
'/associate_alerts?keys=%s,%s&bug_id=12345' % (
key_map[9996], key_map[10000]))
# The response page should have a bug number.
self.assertIn('12345', response.body)
# The Anomaly entities should be updated.
for anomaly_entity in anomaly.Anomaly.query().fetch():
if anomaly_entity.end_revision in (10000, 9996):
self.assertEqual(12345, anomaly_entity.bug_id)
elif anomaly_entity.end_revision != 9997:
self.assertIsNone(anomaly_entity.bug_id)
def testGet_TargetBugHasNoAlerts_DoesNotAskForConfirmation(self):
# Associating alert with bug ID that has no alerts is always OK.
key_map = self._AddAnomalies()
response = self.testapp.get(
'/associate_alerts?keys=%s,%s&bug_id=578' % (
key_map[9996], key_map[10000]))
# The response page should have a bug number.
self.assertIn('578', response.body)
# The Anomaly entities should be updated.
self.assertEqual(
578, anomaly.Anomaly.query(
anomaly.Anomaly.end_revision == 9996).get().bug_id)
self.assertEqual(
578, anomaly.Anomaly.query(
anomaly.Anomaly.end_revision == 10000).get().bug_id)
def testGet_NonOverlappingAlerts_AsksForConfirmation(self):
# Associating alert with bug ID that contains non-overlapping revision
# ranges should show a confirmation page.
key_map = self._AddAnomalies()
response = self.testapp.get(
'/associate_alerts?keys=%s,%s&bug_id=12345' % (
key_map[10000], key_map[10010]))
# The response page should show confirmation page.
self.assertIn('Do you want to continue?', response.body)
# The Anomaly entities should not be updated.
for anomaly_entity in anomaly.Anomaly.query().fetch():
if anomaly_entity.end_revision != 9997:
self.assertIsNone(anomaly_entity.bug_id)
def testGet_WithConfirm_AssociatesWithNewBugId(self):
# Associating alert with bug ID and with confirmed non-overlapping revision
# range should update alert with bug ID.
key_map = self._AddAnomalies()
response = self.testapp.get(
'/associate_alerts?confirm=true&keys=%s,%s&bug_id=12345' % (
key_map[10000], key_map[10010]))
# The response page should have the bug number.
self.assertIn('12345', response.body)
# The Anomaly entities should be updated.
for anomaly_entity in anomaly.Anomaly.query().fetch():
if anomaly_entity.end_revision in (10000, 10010):
self.assertEqual(12345, anomaly_entity.bug_id)
elif anomaly_entity.end_revision != 9997:
self.assertIsNone(anomaly_entity.bug_id)
def testRevisionRangeFromSummary(self):
# If the summary is in the expected format, a pair is returned.
self.assertEqual(
(10000, 10500),
associate_alerts._RevisionRangeFromSummary(
'1% regression in bot/my_suite/test at 10000:10500'))
# Otherwise None is returned.
self.assertIsNone(
associate_alerts._RevisionRangeFromSummary(
'Regression in rev ranges 12345 to 20000'))
def testRangesOverlap_NonOverlapping_ReturnsFalse(self):
self.assertFalse(associate_alerts._RangesOverlap((1, 5), (6, 9)))
self.assertFalse(associate_alerts._RangesOverlap((6, 9), (1, 5)))
def testRangesOverlap_NoneGiven_ReturnsFalse(self):
self.assertFalse(associate_alerts._RangesOverlap((1, 5), None))
self.assertFalse(associate_alerts._RangesOverlap(None, (1, 5)))
self.assertFalse(associate_alerts._RangesOverlap(None, None))
def testRangesOverlap_OneIncludesOther_ReturnsTrue(self):
# True if one range envelopes the other.
self.assertTrue(associate_alerts._RangesOverlap((1, 9), (2, 5)))
self.assertTrue(associate_alerts._RangesOverlap((2, 5), (1, 9)))
def testRangesOverlap_PartlyOverlap_ReturnsTrue(self):
self.assertTrue(associate_alerts._RangesOverlap((1, 6), (5, 9)))
self.assertTrue(associate_alerts._RangesOverlap((5, 9), (1, 6)))
def testRangesOverlap_CommonBoundary_ReturnsTrue(self):
self.assertTrue(associate_alerts._RangesOverlap((1, 6), (6, 9)))
self.assertTrue(associate_alerts._RangesOverlap((6, 9), (1, 6)))
if __name__ == '__main__':
unittest.main()
| |
import numpy
from srxraylib.sources import srfunc
from srxraylib.util.h5_simple_writer import H5SimpleWriter
from scipy.interpolate import interp1d
from scipy.integrate import cumtrapz
import scipy.constants as codata
from orangecontrib.xoppy.util.fit_gaussian2d import fit_gaussian2d, info_params, twoD_Gaussian
from oasys.util.oasys_util import get_fwhm
# --------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------
def xoppy_calc_bm(MACHINE_NAME="ESRF bending magnet",RB_CHOICE=0,MACHINE_R_M=25.0,BFIELD_T=0.8,\
BEAM_ENERGY_GEV=6.04,CURRENT_A=0.1,HOR_DIV_MRAD=1.0,VER_DIV=0,\
PHOT_ENERGY_MIN=100.0,PHOT_ENERGY_MAX=100000.0,NPOINTS=500,LOG_CHOICE=1,\
PSI_MRAD_PLOT=1.0,PSI_MIN=-1.0,PSI_MAX=1.0,PSI_NPOINTS=500,TYPE_CALC=0,FILE_DUMP=0):
# electron energy in GeV
gamma = BEAM_ENERGY_GEV*1e3 / srfunc.codata_mee
r_m = MACHINE_R_M # magnetic radius in m
if RB_CHOICE == 1:
r_m = srfunc.codata_me * srfunc.codata_c / srfunc.codata_ec / BFIELD_T * numpy.sqrt(gamma * gamma - 1)
# calculate critical energy in eV
ec_m = 4.0*numpy.pi*r_m/3.0/numpy.power(gamma,3) # wavelength in m
ec_ev = srfunc.m2ev / ec_m
fm = None
a = None
energy_ev = None
if TYPE_CALC == 0:
if LOG_CHOICE == 0:
energy_ev = numpy.linspace(PHOT_ENERGY_MIN,PHOT_ENERGY_MAX,NPOINTS) # photon energy grid
else:
energy_ev = numpy.logspace(numpy.log10(PHOT_ENERGY_MIN),numpy.log10(PHOT_ENERGY_MAX),NPOINTS) # photon energy grid
a5 = srfunc.sync_ene(VER_DIV, energy_ev, ec_ev=ec_ev, polarization=0, \
e_gev=BEAM_ENERGY_GEV, i_a=CURRENT_A, hdiv_mrad=HOR_DIV_MRAD, \
psi_min=PSI_MIN, psi_max=PSI_MAX, psi_npoints=PSI_NPOINTS)
a5par = srfunc.sync_ene(VER_DIV, energy_ev, ec_ev=ec_ev, polarization=1, \
e_gev=BEAM_ENERGY_GEV, i_a=CURRENT_A, hdiv_mrad=HOR_DIV_MRAD, \
psi_min=PSI_MIN, psi_max=PSI_MAX, psi_npoints=PSI_NPOINTS)
a5per = srfunc.sync_ene(VER_DIV, energy_ev, ec_ev=ec_ev, polarization=2, \
e_gev=BEAM_ENERGY_GEV, i_a=CURRENT_A, hdiv_mrad=HOR_DIV_MRAD, \
psi_min=PSI_MIN, psi_max=PSI_MAX, psi_npoints=PSI_NPOINTS)
if VER_DIV == 0:
coltitles=['Photon Energy [eV]','Photon Wavelength [A]','E/Ec','Flux_spol/Flux_total','Flux_ppol/Flux_total','Flux[Phot/sec/0.1%bw]','Power[Watts/eV]']
title='integrated in Psi,'
if VER_DIV == 1:
coltitles=['Photon Energy [eV]','Photon Wavelength [A]','E/Ec','Flux_spol/Flux_total','Flux_ppol/Flux_total','Flux[Phot/sec/0.1%bw/mrad(Psi)]','Power[Watts/eV/mrad(Psi)]']
title='at Psi=0,'
if VER_DIV == 2:
coltitles=['Photon Energy [eV]','Photon Wavelength [A]','E/Ec','Flux_spol/Flux_total','Flux_ppol/Flux_total','Flux[Phot/sec/0.1%bw]','Power[Watts/eV]']
title='in Psi=[%e,%e]'%(PSI_MIN,PSI_MAX)
if VER_DIV == 3:
coltitles=['Photon Energy [eV]','Photon Wavelength [A]','E/Ec','Flux_spol/Flux_total','Flux_ppol/Flux_total','Flux[Phot/sec/0.1%bw/mrad(Psi)]','Power[Watts/eV/mrad(Psi)]']
title='at Psi=%e mrad'%(PSI_MIN)
a6=numpy.zeros((7,len(energy_ev)))
a1 = energy_ev
a6[0,:] = (a1)
a6[1,:] = srfunc.m2ev * 1e10 / (a1)
a6[2,:] = (a1)/ec_ev # E/Ec
a6[3,:] = numpy.array(a5par)/numpy.array(a5)
a6[4,:] = numpy.array(a5per)/numpy.array(a5)
a6[5,:] = numpy.array(a5)
a6[6,:] = numpy.array(a5)*1e3 * srfunc.codata_ec
if TYPE_CALC == 1: # angular distributions over over all energies
angle_mrad = numpy.linspace(-PSI_MRAD_PLOT, +PSI_MRAD_PLOT,NPOINTS) # angle grid
a6 = numpy.zeros((6,NPOINTS))
a6[0,:] = angle_mrad # angle in mrad
a6[1,:] = angle_mrad*gamma/1e3 # Psi[rad]*Gamma
a6[2,:] = srfunc.sync_f(angle_mrad * gamma / 1e3)
a6[3,:] = srfunc.sync_f(angle_mrad * gamma / 1e3, polarization=1)
a6[4,:] = srfunc.sync_f(angle_mrad * gamma / 1e3, polarization=2)
a6[5,:] = srfunc.sync_ang(0, angle_mrad, i_a=CURRENT_A, hdiv_mrad=HOR_DIV_MRAD, e_gev=BEAM_ENERGY_GEV, r_m=r_m)
coltitles=['Psi[mrad]','Psi[rad]*Gamma','F','F s-pol','F p-pol','Power[Watts/mrad(Psi)]']
if TYPE_CALC == 2: # angular distributions at a single energy
angle_mrad = numpy.linspace(-PSI_MRAD_PLOT, +PSI_MRAD_PLOT,NPOINTS) # angle grid
a6 = numpy.zeros((7,NPOINTS))
a6[0,:] = angle_mrad # angle in mrad
a6[1,:] = angle_mrad*gamma/1e3 # Psi[rad]*Gamma
a6[2,:] = srfunc.sync_f(angle_mrad * gamma / 1e3)
a6[3,:] = srfunc.sync_f(angle_mrad * gamma / 1e3, polarization=1)
a6[4,:] = srfunc.sync_f(angle_mrad * gamma / 1e3, polarization=2)
tmp = srfunc.sync_ang(1, angle_mrad, energy=PHOT_ENERGY_MIN, i_a=CURRENT_A, hdiv_mrad=HOR_DIV_MRAD, e_gev=BEAM_ENERGY_GEV, ec_ev=ec_ev)
tmp.shape = -1
a6[5,:] = tmp
a6[6,:] = a6[5,:] * srfunc.codata_ec * 1e3
coltitles=['Psi[mrad]','Psi[rad]*Gamma','F','F s-pol','F p-pol','Flux[Ph/sec/0.1%bw/mrad(Psi)]','Power[Watts/eV/mrad(Psi)]']
if TYPE_CALC == 3: # angular,energy distributions flux
angle_mrad = numpy.linspace(-PSI_MRAD_PLOT, +PSI_MRAD_PLOT,NPOINTS) # angle grid
if LOG_CHOICE == 0:
energy_ev = numpy.linspace(PHOT_ENERGY_MIN,PHOT_ENERGY_MAX,NPOINTS) # photon energy grid
else:
energy_ev = numpy.logspace(numpy.log10(PHOT_ENERGY_MIN),numpy.log10(PHOT_ENERGY_MAX),NPOINTS) # photon energy grid
# fm[angle,energy]
fm = srfunc.sync_ene(4, energy_ev, ec_ev=ec_ev, e_gev=BEAM_ENERGY_GEV, i_a=CURRENT_A, \
hdiv_mrad=HOR_DIV_MRAD, psi_min=PSI_MIN, psi_max=PSI_MAX, psi_npoints=PSI_NPOINTS)
a = numpy.linspace(PSI_MIN,PSI_MAX,PSI_NPOINTS)
a6 = numpy.zeros((4,len(a)*len(energy_ev)))
ij = -1
for i in range(len(a)):
for j in range(len(energy_ev)):
ij += 1
a6[0,ij] = a[i]
a6[1,ij] = energy_ev[j]
a6[2,ij] = fm[i,j] * srfunc.codata_ec * 1e3
a6[3,ij] = fm[i,j]
coltitles=['Psi [mrad]','Photon Energy [eV]','Power [Watts/eV/mrad(Psi)]','Flux [Ph/sec/0.1%bw/mrad(Psi)]']
# write spec file
ncol = len(coltitles)
npoints = len(a6[0,:])
if FILE_DUMP:
outFile = "bm.spec"
f = open(outFile,"w")
f.write("#F "+outFile+"\n")
f.write("\n")
f.write("#S 1 bm results\n")
f.write("#N %d\n"%(ncol))
f.write("#L")
for i in range(ncol):
f.write(" "+coltitles[i])
f.write("\n")
for i in range(npoints):
f.write((" %e "*ncol+"\n")%(tuple(a6[:,i].tolist())))
f.close()
print("File written to disk: " + outFile)
if TYPE_CALC == 0:
if LOG_CHOICE == 0:
print("\nPower from integral of spectrum: %15.3f W"%(a5.sum() * 1e3*srfunc.codata_ec * (energy_ev[1]-energy_ev[0])))
return a6.T, fm, a, energy_ev
# --------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------
def xoppy_calc_wigg(FIELD=0,NPERIODS=12,ULAMBDA=0.125,K=14.0,ENERGY=6.04,PHOT_ENERGY_MIN=100.0,\
PHOT_ENERGY_MAX=100100.0,NPOINTS=100,NTRAJPOINTS=101,CURRENT=200.0,FILE="?"):
print("Inside xoppy_calc_wigg. ")
outFileTraj = "xwiggler_traj.spec"
outFile = "xwiggler.spec"
if FIELD == 0:
t0,p = srfunc.wiggler_trajectory(b_from=0, nPer=NPERIODS, nTrajPoints=NTRAJPOINTS, \
ener_gev=ENERGY, per=ULAMBDA, kValue=K, \
trajFile=outFileTraj)
if FIELD == 1:
# magnetic field from B(s) map
t0,p = srfunc.wiggler_trajectory(b_from=1, nPer=NPERIODS, nTrajPoints=NTRAJPOINTS, \
ener_gev=ENERGY, inData=FILE, trajFile=outFileTraj)
if FIELD == 2:
# magnetic field from harmonics
# hh = srfunc.wiggler_harmonics(b_t,Nh=41,fileOutH="tmp.h")
t0,p = srfunc.wiggler_trajectory(b_from=2, nPer=NPERIODS, nTrajPoints=NTRAJPOINTS, \
ener_gev=ENERGY, per=ULAMBDA, inData="", trajFile=outFileTraj)
print(p)
#
# now spectra
#
e, f0, p0 = srfunc.wiggler_spectrum(t0, enerMin=PHOT_ENERGY_MIN, enerMax=PHOT_ENERGY_MAX, nPoints=NPOINTS, \
electronCurrent=CURRENT*1e-3, outFile=outFile, elliptical=False)
try:
cumulated_power = p0.cumsum() * numpy.abs(e[0] - e[1])
except:
cumulated_power = 0.0
print("\nPower from integral of spectrum (sum rule): %8.3f W" % (cumulated_power[-1]))
return e, f0, p0 , cumulated_power
def trapezoidal_rule_2d_1darrays(data2D,h=None,v=None):
if h is None:
h = numpy.arange(data2D.shape[0])
if v is None:
v = numpy.arange(data2D.shape[1])
totPower2 = numpy.trapz(data2D, v, axis=1)
totPower2 = numpy.trapz(totPower2, h, axis=0)
return totPower2
#
#
#
def xoppy_calc_wiggler_radiation(
ELECTRONENERGY = 3.0,
ELECTRONCURRENT = 0.1,
PERIODID = 0.120,
NPERIODS = 37.0,
KV = 22.416,
DISTANCE = 30.0,
HSLITPOINTS = 500,
VSLITPOINTS = 500,
PHOTONENERGYMIN = 100.0,
PHOTONENERGYMAX = 100100.0,
PHOTONENERGYPOINTS = 101,
NTRAJPOINTS = 1001,
FIELD = 0,
FILE = "/Users/srio/Oasys/Bsin.txt",
POLARIZATION = 0, # 0=total, 1=parallel (s), 2=perpendicular (p)
SHIFT_X_FLAG = 0,
SHIFT_X_VALUE = 0.0,
SHIFT_BETAX_FLAG = 0,
SHIFT_BETAX_VALUE = 0.0,
CONVOLUTION = 1,
PASSEPARTOUT = 3.0,
h5_file = "wiggler_radiation.h5",
h5_entry_name = "XOPPY_RADIATION",
h5_initialize = True,
h5_parameters = None,
do_plot = False,
):
# calculate wiggler trajectory
if FIELD == 0:
(traj, pars) = srfunc.wiggler_trajectory(
b_from = 0,
inData = "",
nPer = int(NPERIODS), #37,
nTrajPoints = NTRAJPOINTS,
ener_gev = ELECTRONENERGY,
per = PERIODID,
kValue = KV,
trajFile = "",
shift_x_flag = SHIFT_X_FLAG,
shift_x_value = SHIFT_X_VALUE,
shift_betax_flag = SHIFT_BETAX_FLAG,
shift_betax_value = SHIFT_BETAX_VALUE)
if FIELD == 1:
# magnetic field from B(s) map
(traj, pars) = srfunc.wiggler_trajectory(
b_from=1,
nPer=1,
nTrajPoints=NTRAJPOINTS,
ener_gev=ELECTRONENERGY,
inData=FILE,
trajFile="",
shift_x_flag = SHIFT_X_FLAG,
shift_x_value = SHIFT_X_VALUE,
shift_betax_flag = SHIFT_BETAX_FLAG,
shift_betax_value = SHIFT_BETAX_FLAG)
if FIELD == 2:
raise("Not implemented")
energy, flux, power = srfunc.wiggler_spectrum(traj,
enerMin = PHOTONENERGYMIN,
enerMax = PHOTONENERGYMAX,
nPoints = PHOTONENERGYPOINTS,
electronCurrent = ELECTRONCURRENT,
outFile = "",
elliptical = False,
polarization = POLARIZATION)
try:
cumulated_power = power.cumsum() * numpy.abs(energy[0] - energy[1])
except:
cumulated_power = 0.0
print("\nPower from integral of spectrum (sum rule): %8.3f W" % (cumulated_power[-1]))
try:
cumulated_power = cumtrapz(power, energy, initial=0)
except:
cumulated_power = 0.0
print("Power from integral of spectrum (trapezoid rule): %8.3f W" % (cumulated_power[-1]))
codata_mee = 1e-6 * codata.m_e * codata.c ** 2 / codata.e # electron mass in meV
gamma = ELECTRONENERGY * 1e3 / codata_mee
Y = traj[1, :].copy()
divX = traj[3,:].copy()
By = traj[7, :].copy()
# rho = (1e9 / codata.c) * ELECTRONENERGY / By
# Ec0 = 3 * codata.h * codata.c * gamma**3 / (4 * numpy.pi * rho) / codata.e
# Ec = 665.0 * ELECTRONENERGY**2 * numpy.abs(By)
# Ecmax = 665.0 * ELECTRONENERGY** 2 * (numpy.abs(By)).max()
coeff = 3 / (4 * numpy.pi) * codata.h * codata.c**2 / codata_mee ** 3 / codata.e # ~665.0
Ec = coeff * ELECTRONENERGY ** 2 * numpy.abs(By)
Ecmax = coeff * ELECTRONENERGY ** 2 * (numpy.abs(By)).max()
# approx formula for divergence (first formula in pag 43 of Tanaka's paper)
sigmaBp = 0.597 / gamma * numpy.sqrt(Ecmax / PHOTONENERGYMIN)
# we use vertical interval 6*sigmaBp and horizontal interval = vertical + trajectory interval
divXX = numpy.linspace(divX.min() - PASSEPARTOUT * sigmaBp, divX.max() + PASSEPARTOUT * sigmaBp, HSLITPOINTS)
divZZ = numpy.linspace(-PASSEPARTOUT * sigmaBp, PASSEPARTOUT * sigmaBp, VSLITPOINTS)
e = numpy.linspace(PHOTONENERGYMIN, PHOTONENERGYMAX, PHOTONENERGYPOINTS)
p = numpy.zeros( (PHOTONENERGYPOINTS, HSLITPOINTS, VSLITPOINTS) )
for i in range(e.size):
Ephoton = e[i]
# vertical divergence
intensity = srfunc.sync_g1(Ephoton / Ec, polarization=POLARIZATION)
Ecmean = (Ec * intensity).sum() / intensity.sum()
fluxDivZZ = srfunc.sync_ang(1, divZZ * 1e3, polarization=POLARIZATION,
e_gev=ELECTRONENERGY, i_a=ELECTRONCURRENT, hdiv_mrad=1.0, energy=Ephoton, ec_ev=Ecmean)
if do_plot:
from srxraylib.plot.gol import plot
plot(divZZ, fluxDivZZ, title="min intensity %f" % fluxDivZZ.min(), xtitle="divZ", ytitle="fluxDivZZ", show=1)
# horizontal divergence after Tanaka
if False:
e_over_ec = Ephoton / Ecmax
uudlim = 1.0 / gamma
uud = numpy.linspace(-uudlim*0.99, uudlim*0.99, divX.size)
uu = e_over_ec / numpy.sqrt(1 - gamma**2 * uud**2)
plot(uud, 2 * numpy.pi / numpy.sqrt(3) * srfunc.sync_g1(uu))
# horizontal divergence
# intensity = srfunc.sync_g1(Ephoton / Ec, polarization=POLARIZATION)
intensity_interpolated = interpolate_multivalued_function(divX, intensity, divXX, Y, )
if CONVOLUTION: # do always convolution!
intensity_interpolated.shape = -1
divXX_window = divXX[-1] - divXX[0]
divXXCC = numpy.linspace( -0.5 * divXX_window, 0.5 * divXX_window, divXX.size)
fluxDivZZCC = srfunc.sync_ang(1, divXXCC * 1e3, polarization=POLARIZATION,
e_gev=ELECTRONENERGY, i_a=ELECTRONCURRENT, hdiv_mrad=1.0,
energy=Ephoton, ec_ev=Ecmax)
fluxDivZZCC.shape = -1
intensity_convolved = numpy.convolve(intensity_interpolated/intensity_interpolated.max(),
fluxDivZZCC/fluxDivZZCC.max(),
mode='same')
else:
intensity_convolved = intensity_interpolated
if i == 0:
print("\n\n============ sizes vs photon energy =======================")
print("Photon energy/eV FWHM X'/urad FWHM Y'/urad FWHM X/mm FWHM Z/mm ")
print("%16.3f %12.3f %12.3f %9.2f %9.2f" %
(Ephoton,
1e6 * get_fwhm(intensity_convolved, divXX)[0],
1e6 * get_fwhm(fluxDivZZ, divZZ)[0],
1e3 * get_fwhm(intensity_convolved, divXX)[0] * DISTANCE,
1e3 * get_fwhm(fluxDivZZ, divZZ)[0] * DISTANCE ))
if do_plot:
plot(divX, intensity/intensity.max(),
divXX, intensity_interpolated/intensity_interpolated.max(),
divXX, intensity_convolved/intensity_convolved.max(),
divXX, fluxDivZZCC/fluxDivZZCC.max(),
title="min intensity %f, Ephoton=%6.2f" % (intensity.min(), Ephoton), xtitle="divX", ytitle="intensity",
legend=["orig","interpolated","convolved","kernel"],show=1)
# combine H * V
INTENSITY = numpy.outer(intensity_convolved/intensity_convolved.max(), fluxDivZZ/fluxDivZZ.max())
p[i,:,:] = INTENSITY
if do_plot:
from srxraylib.plot.gol import plot_image, plot_surface, plot_show
plot_image(INTENSITY, divXX, divZZ, aspect='auto', title="E=%6.2f" % Ephoton, show=1)
# to create oasys icon...
# plot_surface(INTENSITY, divXX, divZZ, title="", show=0)
# import matplotlib.pylab as plt
# plt.xticks([])
# plt.yticks([])
# plt.axis('off')
# plt.tick_params(axis='both', left='off', top='off', right='off', bottom='off', labelleft='off',
# labeltop='off', labelright='off', labelbottom='off')
#
# plot_show()
#
h = divXX * DISTANCE * 1e3 # in mm for the h5 file
v = divZZ * DISTANCE * 1e3 # in mm for the h5 file
print("\nWindow size: %f mm [H] x %f mm [V]" % (h[-1] - h[0], v[-1] - v[0]))
print("Window size: %g rad [H] x %g rad [V]" % (divXX[-1] - divXX[0], divZZ[-1] - divZZ[0]))
# normalization and total flux
for i in range(e.size):
INTENSITY = p[i, :, :]
# norm = INTENSITY.sum() * (h[1] - h[0]) * (v[1] - v[0])
norm = trapezoidal_rule_2d_1darrays(INTENSITY, h, v)
p[i, :, :] = INTENSITY / norm * flux[i]
# fit
fit_ok = False
try:
power = p.sum(axis=0) * (e[1] - e[0]) * codata.e * 1e3
print("\n\n============= Fitting power density to a 2D Gaussian. ==============\n")
print("Please use these results with care: check if the original data looks like a Gaussian.")
fit_parameters = fit_gaussian2d(power,h,v)
print(info_params(fit_parameters))
H,V = numpy.meshgrid(h,v)
data_fitted = twoD_Gaussian( (H,V), * fit_parameters)
print(" Total power (sum rule) in the fitted data [W]: ",data_fitted.sum()*(h[1]-h[0])*(v[1]-v[0]))
# plot_image(data_fitted.reshape((h.size,v.size)),h, v,title="FIT")
print("====================================================\n")
fit_ok = True
except:
pass
# output file
if h5_file != "":
try:
if h5_initialize:
h5w = H5SimpleWriter.initialize_file(h5_file,creator="xoppy_wigglers.py")
else:
h5w = H5SimpleWriter(h5_file,None)
h5w.create_entry(h5_entry_name,nx_default=None)
h5w.add_stack(e,h,v,p,stack_name="Radiation",entry_name=h5_entry_name,
title_0="Photon energy [eV]",
title_1="X gap [mm]",
title_2="Y gap [mm]")
h5w.create_entry("parameters",root_entry=h5_entry_name,nx_default=None)
if h5_parameters is not None:
for key in h5_parameters.keys():
h5w.add_key(key,h5_parameters[key], entry_name=h5_entry_name+"/parameters")
h5w.create_entry("trajectory", root_entry=h5_entry_name, nx_default="transversal trajectory")
h5w.add_key("traj", traj, entry_name=h5_entry_name + "/trajectory")
h5w.add_dataset(traj[1,:], traj[0,:], dataset_name="transversal trajectory",entry_name=h5_entry_name + "/trajectory", title_x="s [m]",title_y="X [m]")
h5w.add_dataset(traj[1,:], traj[3,:], dataset_name="transversal velocity",entry_name=h5_entry_name + "/trajectory", title_x="s [m]",title_y="Vx/c")
h5w.add_dataset(traj[1, :], traj[7, :], dataset_name="Magnetic field",
entry_name=h5_entry_name + "/trajectory", title_x="s [m]", title_y="Bz [T]")
if fit_ok:
h5w.add_image(power,h,v,image_name="PowerDensity",entry_name=h5_entry_name,title_x="X [mm]",title_y="Y [mm]")
h5w.add_image(data_fitted.reshape(h.size,v.size),h,v,image_name="PowerDensityFit",entry_name=h5_entry_name,title_x="X [mm]",title_y="Y [mm]")
h5w.add_key("fit_info",info_params(fit_parameters), entry_name=h5_entry_name+"/PowerDensityFit")
print("File written to disk: %s"%h5_file)
except:
print("ERROR initializing h5 file")
return e, h, v, p, traj
#
# auxiliar functions
#
def interpolate_multivalued_function(divX, intensity, divX_i, s):
divXprime = numpy.gradient(divX, s) # derivative
knots = crossings_nonzero_all(divXprime)
knots.insert(0,0)
knots.append(len(divXprime))
divX_split = numpy.split(divX, knots)
intensity_split = numpy.split(intensity, knots)
s_split = numpy.split(intensity, knots)
# plot(s, divX/divX.max(),
# s,divXprime/divXprime.max(),
# s[(knots[0]):(knots[1])], (divX/divX.max())[(knots[0]):(knots[1])],
# s[(knots[-2]):(knots[-1])], (divX / divX.max())[(knots[-2]):(knots[-1])],
# title='derivative',legend=["divX","divXprime","branch 1","branch N"])
intensity_interpolated = numpy.zeros_like(divX_i)
for i in range(len(s_split)):
if divX_split[i].size > 2:
fintensity = interp1d(divX_split[i], intensity_split[i], kind='linear', axis=-1, copy=True,
bounds_error=False, fill_value=0.0, assume_sorted=False)
intensity_interpolated += fintensity(divX_i)
return intensity_interpolated
def crossings_nonzero_all(data):
# we suppose the array does not contain 0.0000000000000
# https://stackoverflow.com/questions/3843017/efficiently-detect-sign-changes-in-python
pos = data > 0
npos = ~pos
out = ((pos[:-1] & npos[1:]) | (npos[:-1] & pos[1:])).nonzero()[0]
return out.tolist()
def create_magnetic_field_for_bending_magnet(do_plot=False,filename="",B0=-1.0,divergence=1e-3,radius=10.0,npoints=500):
L = radius * divergence
Lmax = numpy.abs(L * 1.1)
y = numpy.linspace(-Lmax / 2, Lmax / 2, npoints)
B = y * 0.0 + B0
ybad = numpy.where(numpy.abs(y) > numpy.abs(L / 2) )
B[ybad] = 0
if do_plot:
from srxraylib.plot.gol import plot
plot(y, B, xtitle="y [m]", ytitle="B [T]",title=filename)
if filename != "":
f = open(filename, "w")
for i in range(y.size):
f.write("%f %f\n" % (y[i], B[i]))
f.close()
print("File written to disk: %s"%filename)
return y,B
def trapezoidal_rule_2d_1darrays(data2D,h=None,v=None):
if h is None:
h = numpy.arange(data2D.shape[0])
if v is None:
v = numpy.arange(data2D.shape[1])
totPower2 = numpy.trapz(data2D, v, axis=1)
totPower2 = numpy.trapz(totPower2, h, axis=0)
return totPower2
if __name__ == "__main__":
from srxraylib.plot.gol import plot, plot_image, plot_scatter, plot_show, set_qt
set_qt()
# e, h, v, p, traj = xoppy_calc_wiggler_radiation(PHOTONENERGYPOINTS=100,do_plot = False, POLARIZATION=0, NPERIODS=3.5)
# e, h, v, p, traj = xoppy_calc_wiggler_radiation(PHOTONENERGYPOINTS=3,FIELD=1)
# create_magnetic_field_for_bending_magnet(do_plot=True, filename="tmp.txt", B0=-1.0, divergence=1e-3, radius=10.0,
# npoints=500)
#
# e, h, v, p, traj = xoppy_calc_wiggler_radiation(PHOTONENERGYPOINTS=3, do_plot=True, POLARIZATION=0,
# FIELD=1, FILE="tmp.txt")
#
# script to make the calculations (created by XOPPY:wiggler_radiation)
#
h5_parameters = dict()
h5_parameters["ELECTRONENERGY"] = 6.0
h5_parameters["ELECTRONCURRENT"] = 0.2
h5_parameters["PERIODID"] = 0.15
h5_parameters["NPERIODS"] = 10.0
h5_parameters["KV"] = 21.015
h5_parameters["FIELD"] = 0 # 0= sinusoidal, 1=from file
h5_parameters["FILE"] = ''
h5_parameters["POLARIZATION"] = 0 # 0=total, 1=s, 2=p
h5_parameters["DISTANCE"] = 30.0
h5_parameters["HSLITPOINTS"] = 500
h5_parameters["VSLITPOINTS"] = 500
h5_parameters["PHOTONENERGYMIN"] = 100.0
h5_parameters["PHOTONENERGYMAX"] = 100100.0
h5_parameters["PHOTONENERGYPOINTS"] = 101
h5_parameters["SHIFT_X_FLAG"] = 0
h5_parameters["SHIFT_X_VALUE"] = 0.0
h5_parameters["SHIFT_BETAX_FLAG"] = 0
h5_parameters["SHIFT_BETAX_VALUE"] = 0.0
h5_parameters["CONVOLUTION"] = 1
e, h, v, p, traj = xoppy_calc_wiggler_radiation(
ELECTRONENERGY=h5_parameters["ELECTRONENERGY"],
ELECTRONCURRENT=h5_parameters["ELECTRONCURRENT"],
PERIODID=h5_parameters["PERIODID"],
NPERIODS=h5_parameters["NPERIODS"],
KV=h5_parameters["KV"],
FIELD=h5_parameters["FIELD"],
FILE=h5_parameters["FILE"],
POLARIZATION=h5_parameters["POLARIZATION"],
DISTANCE=h5_parameters["DISTANCE"],
HSLITPOINTS=h5_parameters["HSLITPOINTS"],
VSLITPOINTS=h5_parameters["VSLITPOINTS"],
PHOTONENERGYMIN=h5_parameters["PHOTONENERGYMIN"],
PHOTONENERGYMAX=h5_parameters["PHOTONENERGYMAX"],
PHOTONENERGYPOINTS=h5_parameters["PHOTONENERGYPOINTS"],
SHIFT_X_FLAG=h5_parameters["SHIFT_X_FLAG"],
SHIFT_X_VALUE=h5_parameters["SHIFT_X_VALUE"],
SHIFT_BETAX_FLAG=h5_parameters["SHIFT_BETAX_FLAG"],
SHIFT_BETAX_VALUE=h5_parameters["SHIFT_BETAX_VALUE"],
CONVOLUTION=h5_parameters["CONVOLUTION"],
h5_file="wiggler_radiation.h5",
h5_entry_name="XOPPY_RADIATION",
h5_initialize=True,
h5_parameters=h5_parameters,
do_plot=0,
PASSEPARTOUT=1,
)
# example plot
from srxraylib.plot.gol import plot_image
plot_image(p[0], h, v, title="Flux [photons/s] per 0.1 bw per mm2 at %9.3f eV" % (25100.0), xtitle="H [mm]",
ytitle="V [mm]")
#
# end script
#
# #
# # script to make the calculations (created by XOPPY:wiggler_radiation)
# #
#
# from orangecontrib.xoppy.util.xoppy_bm_wiggler import xoppy_calc_wiggler_radiation
#
# h5_parameters = dict()
# h5_parameters["ELECTRONENERGY"] = 6.0
# h5_parameters["ELECTRONCURRENT"] = 0.2
# h5_parameters["PERIODID"] = 0.15
# h5_parameters["NPERIODS"] = 10.0
# h5_parameters["KV"] = 21.015
# h5_parameters["FIELD"] = 0 # 0= sinusoidal, 1=from file
# h5_parameters["FILE"] = ''
# h5_parameters["POLARIZATION"] = 0 # 0=total, 1=s, 2=p
# h5_parameters["DISTANCE"] = 30.0
# h5_parameters["HSLITPOINTS"] = 500
# h5_parameters["VSLITPOINTS"] = 500
# h5_parameters["PHOTONENERGYMIN"] = 100.0
# h5_parameters["PHOTONENERGYMAX"] = 100100.0
# h5_parameters["PHOTONENERGYPOINTS"] = 11
# h5_parameters["SHIFT_X_FLAG"] = 0
# h5_parameters["SHIFT_X_VALUE"] = 0.0
# h5_parameters["SHIFT_BETAX_FLAG"] = 0
# h5_parameters["SHIFT_BETAX_VALUE"] = 0.0
# h5_parameters["CONVOLUTION"] = 1
#
# e, h, v, p, traj = xoppy_calc_wiggler_radiation(
# ELECTRONENERGY=h5_parameters["ELECTRONENERGY"],
# ELECTRONCURRENT=h5_parameters["ELECTRONCURRENT"],
# PERIODID=h5_parameters["PERIODID"],
# NPERIODS=h5_parameters["NPERIODS"],
# KV=h5_parameters["KV"],
# FIELD=h5_parameters["FIELD"],
# FILE=h5_parameters["FILE"],
# POLARIZATION=h5_parameters["POLARIZATION"],
# DISTANCE=h5_parameters["DISTANCE"],
# HSLITPOINTS=h5_parameters["HSLITPOINTS"],
# VSLITPOINTS=h5_parameters["VSLITPOINTS"],
# PHOTONENERGYMIN=h5_parameters["PHOTONENERGYMIN"],
# PHOTONENERGYMAX=h5_parameters["PHOTONENERGYMAX"],
# PHOTONENERGYPOINTS=h5_parameters["PHOTONENERGYPOINTS"],
# SHIFT_X_FLAG=h5_parameters["SHIFT_X_FLAG"],
# SHIFT_X_VALUE=h5_parameters["SHIFT_X_VALUE"],
# SHIFT_BETAX_FLAG=h5_parameters["SHIFT_BETAX_FLAG"],
# SHIFT_BETAX_VALUE=h5_parameters["SHIFT_BETAX_VALUE"],
# CONVOLUTION=h5_parameters["CONVOLUTION"],
# h5_file="wiggler_radiation.h5",
# h5_entry_name="XOPPY_RADIATION",
# h5_initialize=True,
# h5_parameters=h5_parameters,
# do_plot=1,
# )
#
# # example plot
# from srxraylib.plot.gol import plot_image
#
# plot_image(p[0], h, v, title="Flux [photons/s] per 0.1 bw per mm2 at %9.3f eV" % (100.0), xtitle="H [mm]",
# ytitle="V [mm]")
#
# end script
#
| |
"""tests basic polymorphic mapper loading/saving, minimal relationships"""
from sqlalchemy import exc as sa_exc
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy.orm import create_session
from sqlalchemy.orm import mapper
from sqlalchemy.orm import polymorphic_union
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing.schema import Column
class Person(fixtures.ComparableEntity):
pass
class Engineer(Person):
pass
class Manager(Person):
pass
class Boss(Manager):
pass
class Company(fixtures.ComparableEntity):
pass
class PolymorphTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global companies, people, engineers, managers, boss
companies = Table(
"companies",
metadata,
Column(
"company_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(50)),
)
people = Table(
"people",
metadata,
Column(
"person_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column(
"company_id",
Integer,
ForeignKey("companies.company_id"),
nullable=False,
),
Column("name", String(50)),
Column("type", String(30)),
)
engineers = Table(
"engineers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column("status", String(30)),
Column("engineer_name", String(50)),
Column("primary_language", String(50)),
)
managers = Table(
"managers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column("status", String(30)),
Column("manager_name", String(50)),
)
boss = Table(
"boss",
metadata,
Column(
"boss_id",
Integer,
ForeignKey("managers.person_id"),
primary_key=True,
),
Column("golf_swing", String(30)),
)
class InsertOrderTest(PolymorphTest):
def test_insert_order(self):
"""test that classes of multiple types mix up mapper inserts
so that insert order of individual tables is maintained"""
person_join = polymorphic_union(
{
"engineer": people.join(engineers),
"manager": people.join(managers),
"person": people.select(people.c.type == "person"),
},
None,
"pjoin",
)
person_mapper = mapper(
Person,
people,
with_polymorphic=("*", person_join),
polymorphic_on=person_join.c.type,
polymorphic_identity="person",
)
mapper(
Engineer,
engineers,
inherits=person_mapper,
polymorphic_identity="engineer",
)
mapper(
Manager,
managers,
inherits=person_mapper,
polymorphic_identity="manager",
)
mapper(
Company,
companies,
properties={
"employees": relationship(
Person, backref="company", order_by=person_join.c.person_id
)
},
)
session = create_session()
c = Company(name="company1")
c.employees.append(
Manager(
status="AAB",
manager_name="manager1",
name="pointy haired boss",
)
)
c.employees.append(
Engineer(
status="BBA",
engineer_name="engineer1",
primary_language="java",
name="dilbert",
)
)
c.employees.append(Person(status="HHH", name="joesmith"))
c.employees.append(
Engineer(
status="CGG",
engineer_name="engineer2",
primary_language="python",
name="wally",
)
)
c.employees.append(
Manager(status="ABA", manager_name="manager2", name="jsmith")
)
session.add(c)
session.flush()
session.expunge_all()
eq_(session.query(Company).get(c.company_id), c)
@testing.combinations(
("lazy", True), ("nonlazy", False), argnames="lazy_relationship", id_="ia"
)
@testing.combinations(
("redefine", True),
("noredefine", False),
argnames="redefine_colprop",
id_="ia",
)
@testing.combinations(
("unions", True),
("unions", False),
("joins", False),
("auto", False),
("none", False),
argnames="with_polymorphic,include_base",
id_="rr",
)
class RoundTripTest(PolymorphTest):
lazy_relationship = None
include_base = None
redefine_colprop = None
with_polymorphic = None
run_inserts = "once"
run_deletes = None
run_setup_mappers = "once"
@classmethod
def setup_mappers(cls):
include_base = cls.include_base
lazy_relationship = cls.lazy_relationship
redefine_colprop = cls.redefine_colprop
with_polymorphic = cls.with_polymorphic
if with_polymorphic == "unions":
if include_base:
person_join = polymorphic_union(
{
"engineer": people.join(engineers),
"manager": people.join(managers),
"person": people.select(people.c.type == "person"),
},
None,
"pjoin",
)
else:
person_join = polymorphic_union(
{
"engineer": people.join(engineers),
"manager": people.join(managers),
},
None,
"pjoin",
)
manager_join = people.join(managers).outerjoin(boss)
person_with_polymorphic = ["*", person_join]
manager_with_polymorphic = ["*", manager_join]
elif with_polymorphic == "joins":
person_join = (
people.outerjoin(engineers).outerjoin(managers).outerjoin(boss)
)
manager_join = people.join(managers).outerjoin(boss)
person_with_polymorphic = ["*", person_join]
manager_with_polymorphic = ["*", manager_join]
elif with_polymorphic == "auto":
person_with_polymorphic = "*"
manager_with_polymorphic = "*"
else:
person_with_polymorphic = None
manager_with_polymorphic = None
if redefine_colprop:
person_mapper = mapper(
Person,
people,
with_polymorphic=person_with_polymorphic,
polymorphic_on=people.c.type,
polymorphic_identity="person",
properties={"person_name": people.c.name},
)
else:
person_mapper = mapper(
Person,
people,
with_polymorphic=person_with_polymorphic,
polymorphic_on=people.c.type,
polymorphic_identity="person",
)
mapper(
Engineer,
engineers,
inherits=person_mapper,
polymorphic_identity="engineer",
)
mapper(
Manager,
managers,
inherits=person_mapper,
with_polymorphic=manager_with_polymorphic,
polymorphic_identity="manager",
)
mapper(Boss, boss, inherits=Manager, polymorphic_identity="boss")
mapper(
Company,
companies,
properties={
"employees": relationship(
Person,
lazy=lazy_relationship,
cascade="all, delete-orphan",
backref="company",
order_by=people.c.person_id,
)
},
)
@classmethod
def insert_data(cls, connection):
redefine_colprop = cls.redefine_colprop
include_base = cls.include_base
if redefine_colprop:
person_attribute_name = "person_name"
else:
person_attribute_name = "name"
employees = [
Manager(
status="AAB",
manager_name="manager1",
**{person_attribute_name: "pointy haired boss"}
),
Engineer(
status="BBA",
engineer_name="engineer1",
primary_language="java",
**{person_attribute_name: "dilbert"}
),
]
if include_base:
employees.append(Person(**{person_attribute_name: "joesmith"}))
employees += [
Engineer(
status="CGG",
engineer_name="engineer2",
primary_language="python",
**{person_attribute_name: "wally"}
),
Manager(
status="ABA",
manager_name="manager2",
**{person_attribute_name: "jsmith"}
),
]
session = Session(connection)
c = Company(name="company1")
c.employees = employees
session.add(c)
session.commit()
@testing.fixture
def get_dilbert(self):
def run(session):
if self.redefine_colprop:
person_attribute_name = "person_name"
else:
person_attribute_name = "name"
dilbert = (
session.query(Engineer)
.filter_by(**{person_attribute_name: "dilbert"})
.one()
)
return dilbert
return run
def test_lazy_load(self):
lazy_relationship = self.lazy_relationship
with_polymorphic = self.with_polymorphic
if self.redefine_colprop:
person_attribute_name = "person_name"
else:
person_attribute_name = "name"
session = create_session()
dilbert = (
session.query(Engineer)
.filter_by(**{person_attribute_name: "dilbert"})
.one()
)
employees = session.query(Person).order_by(Person.person_id).all()
company = session.query(Company).first()
eq_(session.query(Person).get(dilbert.person_id), dilbert)
session.expunge_all()
eq_(
session.query(Person)
.filter(Person.person_id == dilbert.person_id)
.one(),
dilbert,
)
session.expunge_all()
def go():
cc = session.query(Company).get(company.company_id)
eq_(cc.employees, employees)
if not lazy_relationship:
if with_polymorphic != "none":
self.assert_sql_count(testing.db, go, 1)
else:
self.assert_sql_count(testing.db, go, 2)
else:
if with_polymorphic != "none":
self.assert_sql_count(testing.db, go, 2)
else:
self.assert_sql_count(testing.db, go, 3)
def test_baseclass_lookup(self, get_dilbert):
session = Session()
dilbert = get_dilbert(session)
if self.redefine_colprop:
person_attribute_name = "person_name"
else:
person_attribute_name = "name"
# test selecting from the query, using the base
# mapped table (people) as the selection criterion.
# in the case of the polymorphic Person query,
# the "people" selectable should be adapted to be "person_join"
eq_(
session.query(Person)
.filter(getattr(Person, person_attribute_name) == "dilbert")
.first(),
dilbert,
)
def test_subclass_lookup(self, get_dilbert):
session = Session()
dilbert = get_dilbert(session)
if self.redefine_colprop:
person_attribute_name = "person_name"
else:
person_attribute_name = "name"
eq_(
session.query(Engineer)
.filter(getattr(Person, person_attribute_name) == "dilbert")
.first(),
dilbert,
)
def test_baseclass_base_alias_filter(self, get_dilbert):
session = Session()
dilbert = get_dilbert(session)
# test selecting from the query, joining against
# an alias of the base "people" table. test that
# the "palias" alias does *not* get sucked up
# into the "person_join" conversion.
palias = people.alias("palias")
dilbert = session.query(Person).get(dilbert.person_id)
is_(
dilbert,
session.query(Person)
.filter(
(palias.c.name == "dilbert")
& (palias.c.person_id == Person.person_id)
)
.first(),
)
def test_subclass_base_alias_filter(self, get_dilbert):
session = Session()
dilbert = get_dilbert(session)
palias = people.alias("palias")
is_(
dilbert,
session.query(Engineer)
.filter(
(palias.c.name == "dilbert")
& (palias.c.person_id == Person.person_id)
)
.first(),
)
def test_baseclass_sub_table_filter(self, get_dilbert):
session = Session()
dilbert = get_dilbert(session)
is_(
dilbert,
session.query(Person)
.filter(
(Engineer.engineer_name == "engineer1")
& (engineers.c.person_id == people.c.person_id)
)
.first(),
)
def test_subclass_getitem(self, get_dilbert):
session = Session()
dilbert = get_dilbert(session)
is_(
dilbert,
session.query(Engineer).filter(
Engineer.engineer_name == "engineer1"
)[0],
)
def test_primary_table_only_for_requery(self):
session = Session()
if self.redefine_colprop:
person_attribute_name = "person_name"
else:
person_attribute_name = "name"
dilbert = ( # noqa
session.query(Person)
.filter(getattr(Person, person_attribute_name) == "dilbert")
.first()
)
def go():
# assert that only primary table is queried for
# already-present-in-session
(
session.query(Person)
.filter(getattr(Person, person_attribute_name) == "dilbert")
.first()
)
self.assert_sql_count(testing.db, go, 1)
def test_standalone_orphans(self):
if self.redefine_colprop:
person_attribute_name = "person_name"
else:
person_attribute_name = "name"
session = Session()
daboss = Boss(
status="BBB",
manager_name="boss",
golf_swing="fore",
**{person_attribute_name: "daboss"}
)
session.add(daboss)
assert_raises(sa_exc.DBAPIError, session.flush)
| |
import os
import cv2
import re
import time
from threading import RLock
import numpy as np
from codependent_thread import CodependentThread
from image_misc import cv2_imshow_rgb, read_cam_frame, crop_to_square
from misc import tsplit, get_files_list
import caffe
class InputImageFetcher(CodependentThread):
'''Fetches images from a webcam or loads from a directory.'''
def __init__(self, settings):
CodependentThread.__init__(self, settings.input_updater_heartbeat_required)
self.daemon = True
self.lock = RLock()
self.quit = False
self.latest_frame_idx = -1
self.latest_frame_data = None
self.latest_frame_is_from_cam = False
# True for loading from file, False for loading from camera
self.static_file_mode = True
self.settings = settings
# True for streching the image, False for cropping largest square
self.static_file_stretch_mode = self.settings.static_file_stretch_mode
# Cam input
self.capture_device = settings.input_updater_capture_device
self.no_cam_present = (self.capture_device is None) # Disable all cam functionality
self.bound_cap_device = None
self.sleep_after_read_frame = settings.input_updater_sleep_after_read_frame
self.latest_cam_frame = None
self.freeze_cam = False
# Static file input
# latest image filename selected, used to avoid reloading
self.latest_static_filename = None
# latest loaded image frame, holds the pixels and used to force reloading
self.latest_static_frame = None
# latest label for loaded image
self.latest_label = None
# keeps current index of loaded file, doesn't seem important
self.static_file_idx = None
# contains the requested number of increaments for file index
self.static_file_idx_increment = 0
self.available_files, self.labels = get_files_list(self.settings)
def bind_camera(self):
# Due to OpenCV limitations, this should be called from the main thread
print 'InputImageFetcher: bind_camera starting'
if self.no_cam_present:
print 'InputImageFetcher: skipping camera bind (device: None)'
else:
self.bound_cap_device = cv2.VideoCapture(self.capture_device)
if self.bound_cap_device.isOpened():
print 'InputImageFetcher: capture device %s is open' % self.capture_device
else:
print '\n\nWARNING: InputImageFetcher: capture device %s failed to open! Camera will not be available!\n\n' % self.capture_device
self.bound_cap_device = None
self.no_cam_present = True
print 'InputImageFetcher: bind_camera finished'
def free_camera(self):
# Due to OpenCV limitations, this should be called from the main thread
if self.no_cam_present:
print 'InputImageFetcher: skipping camera free (device: None)'
else:
print 'InputImageFetcher: freeing camera'
del self.bound_cap_device # free the camera
self.bound_cap_device = None
print 'InputImageFetcher: camera freed'
def set_mode_static(self):
with self.lock:
self.static_file_mode = True
def set_mode_cam(self):
with self.lock:
if self.no_cam_present:
print 'WARNING: ignoring set_mode_cam, no cam present'
else:
self.static_file_mode = False
assert self.bound_cap_device != None, 'Call bind_camera first'
def toggle_input_mode(self):
with self.lock:
if self.static_file_mode:
self.set_mode_cam()
else:
self.set_mode_static()
def set_mode_stretch_on(self):
with self.lock:
if not self.static_file_stretch_mode:
self.static_file_stretch_mode = True
self.latest_static_frame = None # Force reload
self.latest_label = None
#self.latest_frame_is_from_cam = True # Force reload
def set_mode_stretch_off(self):
with self.lock:
if self.static_file_stretch_mode:
self.static_file_stretch_mode = False
self.latest_static_frame = None # Force reload
self.latest_label = None
#self.latest_frame_is_from_cam = True # Force reload
def toggle_stretch_mode(self):
with self.lock:
if self.static_file_stretch_mode:
self.set_mode_stretch_off()
else:
self.set_mode_stretch_on()
def run(self):
while not self.quit and not self.is_timed_out():
#start_time = time.time()
if self.static_file_mode:
self.check_increment_and_load_image()
else:
if self.freeze_cam and self.latest_cam_frame is not None:
# If static file mode was switched to cam mode but cam is still frozen, we need to push the cam frame again
if not self.latest_frame_is_from_cam:
# future feature: implement more interesting combination of using a camera in sieamese mode
if self.settings.is_siamese:
im = (self.latest_cam_frame, self.latest_cam_frame)
else:
im = self.latest_cam_frame
self._increment_and_set_frame(im, True)
else:
frame_full = read_cam_frame(self.bound_cap_device, color=not self.settings._calculated_is_gray_model)
#print '====> just read frame', frame_full.shape
frame = crop_to_square(frame_full)
with self.lock:
self.latest_cam_frame = frame
if self.settings.is_siamese:
im = (self.latest_cam_frame, self.latest_cam_frame)
else:
im = self.latest_cam_frame
self._increment_and_set_frame(im, True)
time.sleep(self.sleep_after_read_frame)
#print 'Reading one frame took', time.time() - start_time
print 'InputImageFetcher: exiting run method'
#print 'InputImageFetcher: read', self.read_frames, 'frames'
def get_frame(self):
'''Fetch the latest frame_idx and frame. The idx increments
any time the frame data changes. If the idx is < 0, the frame
is not valid.
'''
with self.lock:
return (self.latest_frame_idx, self.latest_frame_data, self.latest_label, self.latest_static_filename)
def increment_static_file_idx(self, amount = 1):
with self.lock:
self.static_file_idx_increment += amount
def next_image(self):
if self.static_file_mode:
self.increment_static_file_idx(1)
else:
self.static_file_mode = True
def prev_image(self):
if self.static_file_mode:
self.increment_static_file_idx(-1)
else:
self.static_file_mode = True
def _increment_and_set_frame(self, frame, from_cam):
assert frame is not None
with self.lock:
self.latest_frame_idx += 1
self.latest_frame_data = frame
self.latest_frame_is_from_cam = from_cam
def check_increment_and_load_image(self):
with self.lock:
if (self.static_file_idx_increment == 0 and
self.static_file_idx is not None and
not self.latest_frame_is_from_cam and
self.latest_static_frame is not None):
# Skip if a static frame is already loaded and there is no increment
return
assert len(self.available_files) != 0, ('Error: No files found in %s matching %s (current working directory is %s)' %
(self.settings.static_files_dir, self.settings.static_files_regexp, os.getcwd()))
if self.static_file_idx is None:
self.static_file_idx = 0
self.static_file_idx = (self.static_file_idx + self.static_file_idx_increment) % len(self.available_files)
self.static_file_idx_increment = 0
if self.latest_static_filename != self.available_files[self.static_file_idx] or self.latest_static_frame is None:
self.latest_static_filename = self.available_files[self.static_file_idx]
failed = False
try:
if self.settings.is_siamese:
# loading two images for siamese network
im1 = caffe.io.load_image(os.path.join(self.settings.static_files_dir, self.latest_static_filename[0]), color=not self.settings._calculated_is_gray_model)
im2 = caffe.io.load_image(os.path.join(self.settings.static_files_dir, self.latest_static_filename[1]), color=not self.settings._calculated_is_gray_model)
if not self.static_file_stretch_mode:
im1 = crop_to_square(im1)
im2 = crop_to_square(im2)
im = (im1,im2)
else:
im = caffe.io.load_image(os.path.join(self.settings.static_files_dir, self.latest_static_filename), color=not self.settings._calculated_is_gray_model)
if not self.static_file_stretch_mode:
im = crop_to_square(im)
except Exception as e:
failed = True
print 'Failed loading data'
if not failed:
self.latest_static_frame = im
# if we have labels, keep it
if self.labels:
self.latest_label = self.labels[self.static_file_idx]
self._increment_and_set_frame(self.latest_static_frame, False)
| |
from abc import ABCMeta, abstractmethod
from datetime import datetime
try:
from cdecimal import Decimal
except ImportError:
from decimal import Decimal
from ..tools.lxml_tools import clean_html
from ..tools.text import find_number, drop_space
from .decorator import default, empty, cached, bind_item
from .const import NULL
from .error import ChoiceFieldError
metaclass_ABCMeta = ABCMeta('metaclass_ABCMeta', (object, ), {})
class Field(metaclass_ABCMeta):
"""
All custom fields should extend this class, and override the get method.
"""
def __init__(self, xpath=None, default=NULL, empty_default=NULL,
processor=None, **kwargs):
self.xpath_exp = xpath
self.default = default
self.empty_default = empty_default
self.processor = processor
@abstractmethod
def __get__(self, obj, objtype):
pass
def __set__(self, obj, value):
obj._cache[self.attr_name] = value
def process(self, value):
if self.processor:
return self.processor(value)
else:
return value
class NullField(Field):
@cached
@default
@empty
@bind_item
def __get__(self, item, itemtype):
return self.process(None)
class ItemListField(Field):
def __init__(self, xpath, item_cls, *args, **kwargs):
self.item_cls = item_cls
super(ItemListField, self).__init__(xpath, *args, **kwargs)
@cached
@default
@empty
@bind_item
def __get__(self, item, itemtype):
subitems = []
for sel in item._selector.select(self.xpath_exp):
subitem = self.item_cls(sel.node)
subitems.append(subitem)
return self.process(subitems)
class IntegerField(Field):
def __init__(self, *args, **kwargs):
self.find_number = kwargs.get('find_number', False)
self.ignore_spaces = kwargs.get('ignore_spaces', False)
self.ignore_chars = kwargs.get('ignore_chars', None)
self.multiple = kwargs.get('multiple', False)
super(IntegerField, self).__init__(*args, **kwargs)
def get_raw_values(self, item):
return item._selector.select(self.xpath_exp).text_list()
def get_raw_value(self, item):
return item._selector.select(self.xpath_exp).text()
@cached
@default
@empty
@bind_item
def __get__(self, item, itemtype):
if self.multiple:
result = []
for raw_value in self.get_raw_values(item):
result.append(self.process_raw_value(raw_value))
return result
else:
raw_value = self.get_raw_value(item)
return self.process_raw_value(raw_value)
def process_raw_value(self, value):
if self.empty_default is not NULL:
if value == "":
return self.empty_default
if self.find_number or self.ignore_spaces or self.ignore_chars:
return find_number(self.process(value), ignore_spaces=self.ignore_spaces,
ignore_chars=self.ignore_chars)
else:
# TODO: process ignore_chars and ignore_spaces in this case too
if self.ignore_chars:
for char in ignore_chars:
value = value.replace(char, '')
if self.ignore_spaces:
value = drop_space(value)
return int(self.process(value).strip())
class DecimalField(Field):
def __init__(self, *args, **kwargs):
self.multiple = kwargs.get('multiple', False)
super(DecimalField, self).__init__(*args, **kwargs)
def get_raw_values(self, item):
return item._selector.select(self.xpath_exp).text_list()
def get_raw_value(self, item):
return item._selector.select(self.xpath_exp).text()
@cached
@default
@empty
@bind_item
def __get__(self, item, itemtype):
if self.multiple:
result = []
for raw_value in self.get_raw_values(item):
result.append(self.process_raw_value(raw_value))
return result
else:
raw_value = self.get_raw_value(item)
return self.process_raw_value(raw_value)
def process_raw_value(self, value):
if self.empty_default is not NULL:
if value == "":
return self.empty_default
return Decimal(self.process(value).strip())
class StringField(Field):
def __init__(self, *args, **kwargs):
self.normalize_space = kwargs.pop('normalize_space', True)
self.multiple = kwargs.get('multiple', False)
super(StringField, self).__init__(*args, **kwargs)
@cached
@default
@empty
@bind_item
def __get__(self, item, itemtype):
#value = item._selector.select(self.xpath_exp)\
#.text(normalize_space=self.normalize_space)
#return self.process(value)
if self.multiple:
result = []
for raw_value in self.get_raw_values(item):
result.append(self.process_raw_value(raw_value))
return result
else:
raw_value = self.get_raw_value(item)
return self.process_raw_value(raw_value)
def process_raw_value(self, value):
return self.process(value)
def get_raw_values(self, item):
return item._selector.select(self.xpath_exp).text_list()
def get_raw_value(self, item):
return item._selector.select(self.xpath_exp).text()
class HTMLField(Field):
def __init__(self, *args, **kwargs):
self.safe_attrs = kwargs.pop('safe_attrs', None)
super(HTMLField, self).__init__(*args, **kwargs)
@cached
@default
@empty
@bind_item
def __get__(self, item, itemtype):
value = item._selector.select(self.xpath_exp).html()
if self.safe_attrs is not None:
return self.process(clean_html(value, output_encoding='unicode'))
else:
return self.process(value)
class ChoiceField(Field):
def __init__(self, *args, **kwargs):
self.choices = kwargs.pop('choices')
super(ChoiceField, self).__init__(*args, **kwargs)
@cached
@default
@empty
@bind_item
def __get__(self, item, itemtype):
value = item._selector.select(self.xpath_exp).text()
clean_value = self.process(value)
try:
return self.choices[clean_value]
except KeyError:
raise ChoiceFieldError('Unknown choice: %s' % clean_value)
class RegexField(Field):
def __init__(self, xpath, regex, *args, **kwargs):
self.regex = regex
super(RegexField, self).__init__(xpath, *args, **kwargs)
@cached
@default
@bind_item
def __get__(self, item, itemtype):
value = item._selector.select(self.xpath_exp).text()
match = self.regex.search(value)
if match:
return self.process(match.group(1))
else:
raise DataNotFound('Could not find regex')
class DateTimeField(Field):
def __init__(self, xpath, datetime_format='Y-m-d', *args, **kwargs):
self.datetime_format = datetime_format
super(DateTimeField, self).__init__(xpath, *args, **kwargs)
@cached
@default
@bind_item
def __get__(self, item, itemtype):
value = item._selector.select(self.xpath_exp).text()
return datetime.strptime(self.process(value),
self.datetime_format)
class DateField(Field):
def __init__(self, xpath, date_format='Y-m-d', *args, **kwargs):
self.date_format = date_format
super(DateField, self).__init__(xpath, *args, **kwargs)
@cached
@default
@bind_item
def __get__(self, item, itemtype):
value = item._selector.select(self.xpath_exp).text()
return datetime.strptime(self.process(value),
self.date_format).date()
class FuncField(Field):
def __init__(self, func, pass_item=False, *args, **kwargs):
self.func = func
self.pass_item = pass_item
super(FuncField, self).__init__(*args, **kwargs)
@cached
@default
@bind_item
def __get__(self, item, itemtype):
if self.pass_item:
val = self.func(item, item._selector)
else:
val = self.func(item._selector)
return self.process(val)
class BooleanField(Field):
@cached
@default
@bind_item
def __get__(self, item, itemtype):
return item._selector.select(self.xpath_exp).exists()
| |
"""
Validation methods for the NAPALM base.
See: https://napalm.readthedocs.io/en/latest/validate.html
"""
import yaml
import copy
import re
from napalm.base.exceptions import ValidationException
# We put it here to compile it only once
numeric_compare_regex = re.compile(r"^(<|>|<=|>=|==|!=)(\d+(\.\d+){0,1})$")
def _get_validation_file(validation_file):
try:
with open(validation_file, "r") as stream:
try:
validation_source = yaml.safe_load(stream)
except yaml.YAMLError as exc:
raise ValidationException(exc)
except IOError:
raise ValidationException("File {0} not found.".format(validation_file))
return validation_source
def _mode(mode_string):
mode = {"strict": False}
for m in mode_string.split():
if m not in mode.keys():
raise ValidationException("mode '{}' not recognized".format(m))
mode[m] = True
return mode
def _compare_getter_list(src, dst, mode):
result = {"complies": True, "present": [], "missing": [], "extra": []}
for src_element in src:
found = False
i = 0
while True:
try:
intermediate_match = compare(src_element, dst[i])
if (
isinstance(intermediate_match, dict)
and intermediate_match["complies"]
or not isinstance(intermediate_match, dict)
and intermediate_match
):
found = True
result["present"].append(src_element)
dst.pop(i)
break
else:
i += 1
except IndexError:
break
if not found:
result["complies"] = False
result["missing"].append(src_element)
if mode["strict"] and dst:
result["extra"] = dst
result["complies"] = False
return result
def _compare_getter_dict(src, dst, mode):
result = {"complies": True, "present": {}, "missing": [], "extra": []}
dst = copy.deepcopy(dst) # Otherwise we are going to modify a "live" object
for key, src_element in src.items():
try:
dst_element = dst.pop(key)
result["present"][key] = {}
intermediate_result = compare(src_element, dst_element)
if isinstance(intermediate_result, dict):
nested = True
complies = intermediate_result["complies"]
if not complies:
result["present"][key]["diff"] = intermediate_result
else:
complies = intermediate_result
nested = False
if not complies:
result["present"][key]["expected_value"] = src_element
result["present"][key]["actual_value"] = dst_element
if not complies:
result["complies"] = False
result["present"][key]["complies"] = complies
result["present"][key]["nested"] = nested
except KeyError:
result["missing"].append(key)
result["complies"] = False
if mode["strict"] and dst:
result["extra"] = list(dst.keys())
result["complies"] = False
return result
def compare(src, dst):
if isinstance(src, str):
src = str(src)
if isinstance(src, dict):
mode = _mode(src.pop("_mode", ""))
if "list" in src.keys():
if not isinstance(dst, list):
# This can happen with nested lists
return False
return _compare_getter_list(src["list"], dst, mode)
return _compare_getter_dict(src, dst, mode)
elif isinstance(src, str):
if src.startswith("<") or src.startswith(">"):
cmp_result = _compare_numeric(src, dst)
return cmp_result
elif "<->" in src and len(src.split("<->")) == 2:
cmp_result = _compare_range(src, dst)
return cmp_result
else:
m = re.search(src, str(dst))
if m:
return bool(m)
else:
return src == dst
elif type(src) == type(dst) == list:
pairs = zip(src, dst)
diff_lists = [
[(k, x[k], y[k]) for k in x if not re.search(x[k], y[k])]
for x, y in pairs
if x != y
]
return empty_tree(diff_lists)
else:
return src == dst
def _compare_numeric(src_num, dst_num):
"""Compare numerical values. You can use '<%d','>%d'."""
dst_num = float(dst_num)
match = numeric_compare_regex.match(src_num)
if not match:
error = "Failed numeric comparison. Collected: {}. Expected: {}".format(
dst_num, src_num
)
raise ValueError(error)
operand = {
"<": "__lt__",
">": "__gt__",
">=": "__ge__",
"<=": "__le__",
"==": "__eq__",
"!=": "__ne__",
}
return getattr(dst_num, operand[match.group(1)])(float(match.group(2)))
def _compare_range(src_num, dst_num):
"""Compare value against a range of values. You can use '%d<->%d'."""
dst_num = float(dst_num)
match = src_num.split("<->")
if len(match) != 2:
error = "Failed range comparison. Collected: {}. Expected: {}".format(
dst_num, src_num
)
raise ValueError(error)
if float(match[0]) <= dst_num <= float(match[1]):
return True
else:
return False
def empty_tree(input_list):
"""Recursively iterate through values in nested lists."""
for item in input_list:
if not isinstance(item, list) or not empty_tree(item):
return False
return True
def compliance_report(cls, validation_file=None, validation_source=None):
report = {}
if validation_file:
validation_source = _get_validation_file(validation_file)
# Otherwise we are going to modify a "live" object
validation_source = copy.deepcopy(validation_source)
for validation_check in validation_source:
for getter, expected_results in validation_check.items():
if getter == "get_config":
# TBD
pass
else:
key = expected_results.pop("_name", "") or getter
try:
kwargs = expected_results.pop("_kwargs", {})
actual_results = getattr(cls, getter)(**kwargs)
report[key] = compare(expected_results, actual_results)
except NotImplementedError:
report[key] = {"skipped": True, "reason": "NotImplemented"}
complies = all([e.get("complies", True) for e in report.values()])
report["skipped"] = [k for k, v in report.items() if v.get("skipped", False)]
report["complies"] = complies
return report
| |
from __future__ import unicode_literals
import logging
import six
import time
import traceback
from six.moves.urllib_parse import unquote, urlencode
from twisted.internet import defer
from autobahn.twisted.websocket import WebSocketServerProtocol, WebSocketServerFactory, ConnectionDeny
from .utils import parse_x_forwarded_for
logger = logging.getLogger(__name__)
class WebSocketProtocol(WebSocketServerProtocol):
"""
Protocol which supports WebSockets and forwards incoming messages to
the websocket channels.
"""
# If we should send no more messages (e.g. we error-closed the socket)
muted = False
def set_main_factory(self, main_factory):
self.main_factory = main_factory
self.channel_layer = self.main_factory.channel_layer
def onConnect(self, request):
self.request = request
self.packets_received = 0
self.protocol_to_accept = None
self.socket_opened = time.time()
self.last_data = time.time()
try:
# Sanitize and decode headers
self.clean_headers = []
for name, value in request.headers.items():
name = name.encode("ascii")
# Prevent CVE-2015-0219
if b"_" in name:
continue
self.clean_headers.append((name.lower(), value.encode("latin1")))
# Reconstruct query string
# TODO: get autobahn to provide it raw
query_string = urlencode(request.params, doseq=True).encode("ascii")
# Make sending channel
self.reply_channel = self.main_factory.make_send_channel()
# Tell main factory about it
self.main_factory.reply_protocols[self.reply_channel] = self
# Get client address if possible
if hasattr(self.transport.getPeer(), "host") and hasattr(self.transport.getPeer(), "port"):
self.client_addr = [self.transport.getPeer().host, self.transport.getPeer().port]
self.server_addr = [self.transport.getHost().host, self.transport.getHost().port]
else:
self.client_addr = None
self.server_addr = None
if self.main_factory.proxy_forwarded_address_header:
self.client_addr = parse_x_forwarded_for(
self.http_headers,
self.main_factory.proxy_forwarded_address_header,
self.main_factory.proxy_forwarded_port_header,
self.client_addr
)
# Make initial request info dict from request (we only have it here)
self.path = request.path.encode("ascii")
self.request_info = {
"path": self.unquote(self.path),
"headers": self.clean_headers,
"query_string": self.unquote(query_string),
"client": self.client_addr,
"server": self.server_addr,
"reply_channel": self.reply_channel,
"order": 0,
}
except:
# Exceptions here are not displayed right, just 500.
# Turn them into an ERROR log.
logger.error(traceback.format_exc())
raise
ws_protocol = None
for header, value in self.clean_headers:
if header == b'sec-websocket-protocol':
protocols = [x.strip() for x in self.unquote(value).split(",")]
for protocol in protocols:
if protocol in self.factory.protocols:
ws_protocol = protocol
break
# Work out what subprotocol we will accept, if any
if ws_protocol and ws_protocol in self.factory.protocols:
self.protocol_to_accept = ws_protocol
else:
self.protocol_to_accept = None
# Send over the connect message
try:
self.channel_layer.send("websocket.connect", self.request_info)
except self.channel_layer.ChannelFull:
# You have to consume websocket.connect according to the spec,
# so drop the connection.
self.muted = True
logger.warn("WebSocket force closed for %s due to connect backpressure", self.reply_channel)
# Send code 503 "Service Unavailable" with close.
raise ConnectionDeny(code=503, reason="Connection queue at capacity")
else:
self.factory.log_action("websocket", "connecting", {
"path": self.request.path,
"client": "%s:%s" % tuple(self.client_addr) if self.client_addr else None,
})
# Make a deferred and return it - we'll either call it or err it later on
self.handshake_deferred = defer.Deferred()
return self.handshake_deferred
@classmethod
def unquote(cls, value):
"""
Python 2 and 3 compat layer for utf-8 unquoting
"""
if six.PY2:
return unquote(value).decode("utf8")
else:
return unquote(value.decode("ascii"))
def onOpen(self):
# Send news that this channel is open
logger.debug("WebSocket %s open and established", self.reply_channel)
self.factory.log_action("websocket", "connected", {
"path": self.request.path,
"client": "%s:%s" % tuple(self.client_addr) if self.client_addr else None,
})
def onMessage(self, payload, isBinary):
# If we're muted, do nothing.
if self.muted:
logger.debug("Muting incoming frame on %s", self.reply_channel)
return
logger.debug("WebSocket incoming frame on %s", self.reply_channel)
self.packets_received += 1
self.last_data = time.time()
try:
if isBinary:
self.channel_layer.send("websocket.receive", {
"reply_channel": self.reply_channel,
"path": self.unquote(self.path),
"order": self.packets_received,
"bytes": payload,
})
else:
self.channel_layer.send("websocket.receive", {
"reply_channel": self.reply_channel,
"path": self.unquote(self.path),
"order": self.packets_received,
"text": payload.decode("utf8"),
})
except self.channel_layer.ChannelFull:
# You have to consume websocket.receive according to the spec,
# so drop the connection.
self.muted = True
logger.warn("WebSocket force closed for %s due to receive backpressure", self.reply_channel)
# Send code 1013 "try again later" with close.
self.sendCloseFrame(code=1013, isReply=False)
def serverAccept(self):
"""
Called when we get a message saying to accept the connection.
"""
self.handshake_deferred.callback(self.protocol_to_accept)
logger.debug("WebSocket %s accepted by application", self.reply_channel)
def serverReject(self):
"""
Called when we get a message saying to reject the connection.
"""
self.handshake_deferred.errback(ConnectionDeny(code=403, reason="Access denied"))
self.cleanup()
logger.debug("WebSocket %s rejected by application", self.reply_channel)
self.factory.log_action("websocket", "rejected", {
"path": self.request.path,
"client": "%s:%s" % tuple(self.client_addr) if self.client_addr else None,
})
def serverSend(self, content, binary=False):
"""
Server-side channel message to send a message.
"""
if self.state == self.STATE_CONNECTING:
self.serverAccept()
self.last_data = time.time()
logger.debug("Sent WebSocket packet to client for %s", self.reply_channel)
if binary:
self.sendMessage(content, binary)
else:
self.sendMessage(content.encode("utf8"), binary)
def serverClose(self, code=True):
"""
Server-side channel message to close the socket
"""
code = 1000 if code is True else code
self.sendClose(code=code)
def onClose(self, wasClean, code, reason):
self.cleanup()
if hasattr(self, "reply_channel"):
logger.debug("WebSocket closed for %s", self.reply_channel)
try:
if not self.muted:
self.channel_layer.send("websocket.disconnect", {
"reply_channel": self.reply_channel,
"code": code,
"path": self.unquote(self.path),
"order": self.packets_received + 1,
})
except self.channel_layer.ChannelFull:
pass
self.factory.log_action("websocket", "disconnected", {
"path": self.request.path,
"client": "%s:%s" % tuple(self.client_addr) if self.client_addr else None,
})
else:
logger.debug("WebSocket closed before handshake established")
def cleanup(self):
"""
Call to clean up this socket after it's closed.
"""
if hasattr(self, "reply_channel"):
del self.factory.reply_protocols[self.reply_channel]
def duration(self):
"""
Returns the time since the socket was opened
"""
return time.time() - self.socket_opened
def check_ping(self):
"""
Checks to see if we should send a keepalive ping/deny socket connection
"""
# If we're still connecting, deny the connection
if self.state == self.STATE_CONNECTING:
if self.duration() > self.main_factory.websocket_connect_timeout:
self.serverReject()
elif self.state == self.STATE_OPEN:
if (time.time() - self.last_data) > self.main_factory.ping_interval:
self._sendAutoPing()
self.last_data = time.time()
class WebSocketFactory(WebSocketServerFactory):
"""
Factory subclass that remembers what the "main"
factory is, so WebSocket protocols can access it
to get reply ID info.
"""
def __init__(self, main_factory, *args, **kwargs):
self.main_factory = main_factory
WebSocketServerFactory.__init__(self, *args, **kwargs)
def log_action(self, *args, **kwargs):
self.main_factory.log_action(*args, **kwargs)
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import rsvp_te
from . import segment_routing
class signaling_protocols(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/signaling-protocols. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: top-level signaling protocol configuration
"""
__slots__ = ("_path_helper", "_extmethods", "__rsvp_te", "__segment_routing")
_yang_name = "signaling-protocols"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__rsvp_te = YANGDynClass(
base=rsvp_te.rsvp_te,
is_container="container",
yang_name="rsvp-te",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__segment_routing = YANGDynClass(
base=segment_routing.segment_routing,
is_container="container",
yang_name="segment-routing",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances", "network-instance", "mpls", "signaling-protocols"
]
def _get_rsvp_te(self):
"""
Getter method for rsvp_te, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te (container)
YANG Description: RSVP-TE global signaling protocol configuration
"""
return self.__rsvp_te
def _set_rsvp_te(self, v, load=False):
"""
Setter method for rsvp_te, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_rsvp_te is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rsvp_te() directly.
YANG Description: RSVP-TE global signaling protocol configuration
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=rsvp_te.rsvp_te,
is_container="container",
yang_name="rsvp-te",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """rsvp_te must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=rsvp_te.rsvp_te, is_container='container', yang_name="rsvp-te", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__rsvp_te = t
if hasattr(self, "_set"):
self._set()
def _unset_rsvp_te(self):
self.__rsvp_te = YANGDynClass(
base=rsvp_te.rsvp_te,
is_container="container",
yang_name="rsvp-te",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_segment_routing(self):
"""
Getter method for segment_routing, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing (container)
YANG Description: MPLS-specific Segment Routing configuration and operational state
parameters
"""
return self.__segment_routing
def _set_segment_routing(self, v, load=False):
"""
Setter method for segment_routing, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_segment_routing is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_segment_routing() directly.
YANG Description: MPLS-specific Segment Routing configuration and operational state
parameters
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=segment_routing.segment_routing,
is_container="container",
yang_name="segment-routing",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """segment_routing must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=segment_routing.segment_routing, is_container='container', yang_name="segment-routing", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__segment_routing = t
if hasattr(self, "_set"):
self._set()
def _unset_segment_routing(self):
self.__segment_routing = YANGDynClass(
base=segment_routing.segment_routing,
is_container="container",
yang_name="segment-routing",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
rsvp_te = __builtin__.property(_get_rsvp_te, _set_rsvp_te)
segment_routing = __builtin__.property(_get_segment_routing, _set_segment_routing)
_pyangbind_elements = OrderedDict(
[("rsvp_te", rsvp_te), ("segment_routing", segment_routing)]
)
from . import rsvp_te
from . import segment_routing
class signaling_protocols(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/signaling-protocols. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: top-level signaling protocol configuration
"""
__slots__ = ("_path_helper", "_extmethods", "__rsvp_te", "__segment_routing")
_yang_name = "signaling-protocols"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__rsvp_te = YANGDynClass(
base=rsvp_te.rsvp_te,
is_container="container",
yang_name="rsvp-te",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__segment_routing = YANGDynClass(
base=segment_routing.segment_routing,
is_container="container",
yang_name="segment-routing",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances", "network-instance", "mpls", "signaling-protocols"
]
def _get_rsvp_te(self):
"""
Getter method for rsvp_te, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te (container)
YANG Description: RSVP-TE global signaling protocol configuration
"""
return self.__rsvp_te
def _set_rsvp_te(self, v, load=False):
"""
Setter method for rsvp_te, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/rsvp_te (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_rsvp_te is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rsvp_te() directly.
YANG Description: RSVP-TE global signaling protocol configuration
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=rsvp_te.rsvp_te,
is_container="container",
yang_name="rsvp-te",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """rsvp_te must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=rsvp_te.rsvp_te, is_container='container', yang_name="rsvp-te", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__rsvp_te = t
if hasattr(self, "_set"):
self._set()
def _unset_rsvp_te(self):
self.__rsvp_te = YANGDynClass(
base=rsvp_te.rsvp_te,
is_container="container",
yang_name="rsvp-te",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_segment_routing(self):
"""
Getter method for segment_routing, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing (container)
YANG Description: MPLS-specific Segment Routing configuration and operational state
parameters
"""
return self.__segment_routing
def _set_segment_routing(self, v, load=False):
"""
Setter method for segment_routing, mapped from YANG variable /network_instances/network_instance/mpls/signaling_protocols/segment_routing (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_segment_routing is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_segment_routing() directly.
YANG Description: MPLS-specific Segment Routing configuration and operational state
parameters
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=segment_routing.segment_routing,
is_container="container",
yang_name="segment-routing",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """segment_routing must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=segment_routing.segment_routing, is_container='container', yang_name="segment-routing", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__segment_routing = t
if hasattr(self, "_set"):
self._set()
def _unset_segment_routing(self):
self.__segment_routing = YANGDynClass(
base=segment_routing.segment_routing,
is_container="container",
yang_name="segment-routing",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
rsvp_te = __builtin__.property(_get_rsvp_te, _set_rsvp_te)
segment_routing = __builtin__.property(_get_segment_routing, _set_segment_routing)
_pyangbind_elements = OrderedDict(
[("rsvp_te", rsvp_te), ("segment_routing", segment_routing)]
)
| |
# slicer imports
from __main__ import vtk, slicer
# python includes
import sys
import time
class Helper(object):
'''
classdocs
'''
@staticmethod
def Info(message):
'''
'''
print "[VMTK " + time.strftime("%m/%d/%Y %H:%M:%S") + "]: " + str(message)
sys.stdout.flush()
@staticmethod
def Debug(message):
'''
'''
showDebugOutput = 1
from time import strftime
if showDebugOutput:
print "[VMTK " + time.strftime("%m/%d/%Y %H:%M:%S") + "] DEBUG: " + str(message)
sys.stdout.flush()
@staticmethod
def CreateSpace(n):
'''
'''
spacer = ""
for s in range(n):
spacer += " "
return spacer
@staticmethod
def CheckIfVmtkIsInstalled():
'''
'''
vmtkInstalled = True
try:
fastMarching = vtkvmtkFastMarchingUpwindGradientImageFilter()
fastMarching = None
except Exception:
vmtkInstalled = False
return vmtkInstalled
@staticmethod
def convertFiducialHierarchyToVtkIdList(hierarchyNode,volumeNode):
'''
'''
outputIds = vtk.vtkIdList()
if not hierarchyNode or not volumeNode:
return outputIds
if isinstance(hierarchyNode,slicer.vtkMRMLMarkupsFiducialNode) and isinstance(volumeNode,slicer.vtkMRMLScalarVolumeNode):
image = volumeNode.GetImageData()
# now we have the children which are fiducialNodes - let's loop!
for n in range(hierarchyNode.GetNumberOfFiducials()):
currentCoordinatesRAS = [0,0,0]
# grab the current coordinates
hierarchyNode.GetNthFiducialPosition(n,currentCoordinatesRAS)
# convert the RAS to IJK
currentCoordinatesIJK = Helper.ConvertRAStoIJK(volumeNode,currentCoordinatesRAS)
# strip the last element since we need a 3based tupel
currentCoordinatesIJKlist = (int(currentCoordinatesIJK[0]),int(currentCoordinatesIJK[1]),int(currentCoordinatesIJK[2]))
outputIds.InsertNextId(int(image.ComputePointId(currentCoordinatesIJKlist)))
# IdList was created, return it even if it might be empty
return outputIds
@staticmethod
def ConvertRAStoIJK(volumeNode,rasCoordinates):
'''
'''
rasToIjkMatrix = vtk.vtkMatrix4x4()
volumeNode.GetRASToIJKMatrix(rasToIjkMatrix)
# the RAS coordinates need to be 4
if len(rasCoordinates) < 4:
rasCoordinates.append(1)
ijkCoordinates = rasToIjkMatrix.MultiplyPoint(rasCoordinates)
return ijkCoordinates
@staticmethod
def extractROI(originalVolumeID,newVolumeID,rasCoordinates,diameter):
'''
'''
originalVolume = slicer.mrmlScene.GetNodeByID(originalVolumeID)
newVolume = slicer.mrmlScene.GetNodeByID(newVolumeID)
# code below converted from cropVolume module by A. Fedorov
# optimized after that :)
inputRASToIJK = vtk.vtkMatrix4x4()
inputIJKToRAS = vtk.vtkMatrix4x4()
outputIJKToRAS = vtk.vtkMatrix4x4()
outputRASToIJK = vtk.vtkMatrix4x4()
volumeXform = vtk.vtkMatrix4x4()
T = vtk.vtkMatrix4x4()
originalVolume.GetRASToIJKMatrix(inputRASToIJK)
originalVolume.GetIJKToRASMatrix(inputIJKToRAS)
outputIJKToRAS.Identity()
outputRASToIJK.Identity()
volumeXform.Identity()
T.Identity()
# if the originalVolume is under a transform
volumeTransformNode = originalVolume.GetParentTransformNode()
if volumeTransformNode:
volumeTransformNode.GetMatrixTransformToWorld(volumeXform)
volumeXform.Invert()
maxSpacing = max(originalVolume.GetSpacing())
# build our box
rX = diameter*4*maxSpacing
rY = diameter*4*maxSpacing
rZ = diameter*4*maxSpacing
cX = rasCoordinates[0]
cY = rasCoordinates[1]
cZ = rasCoordinates[2]
inputSpacingX = originalVolume.GetSpacing()[0]
inputSpacingY = originalVolume.GetSpacing()[1]
inputSpacingZ = originalVolume.GetSpacing()[2]
outputExtentX = int(2.0*rX/inputSpacingX)
outputExtentY = int(2.0*rY/inputSpacingY)
outputExtentZ = int(2.0*rZ/inputSpacingZ)
# configure spacing
outputIJKToRAS.SetElement(0,0,inputSpacingX)
outputIJKToRAS.SetElement(1,1,inputSpacingY)
outputIJKToRAS.SetElement(2,2,inputSpacingZ)
# configure origin
outputIJKToRAS.SetElement(0,3,(cX-rX+inputSpacingX*0.5))
outputIJKToRAS.SetElement(1,3,(cY-rY+inputSpacingY*0.5))
outputIJKToRAS.SetElement(2,3,(cZ-rZ+inputSpacingZ*0.5))
outputRASToIJK.DeepCopy(outputIJKToRAS)
outputRASToIJK.Invert()
T.DeepCopy(outputIJKToRAS)
T.Multiply4x4(volumeXform,T,T)
T.Multiply4x4(inputRASToIJK,T,T)
resliceT = vtk.vtkTransform()
resliceT.SetMatrix(T)
reslicer = vtk.vtkImageReslice()
reslicer.SetInterpolationModeToLinear()
reslicer.SetInput(originalVolume.GetImageData())
reslicer.SetOutputExtent(0,int(outputExtentX),0,int(outputExtentY),0,int(outputExtentZ))
reslicer.SetOutputOrigin(0,0,0)
reslicer.SetOutputSpacing(1,1,1)
#reslicer.SetOutputOrigin(image.GetOrigin())
#reslicer.SetOutputSpacing(image.GetSpacing())
reslicer.SetResliceTransform(resliceT)
reslicer.UpdateWholeExtent()
changer = vtk.vtkImageChangeInformation()
changer.SetInput(reslicer.GetOutput())
changer.SetOutputOrigin(0,0,0)
changer.SetOutputSpacing(1,1,1)
#changer.SetOutputOrigin(image.GetOrigin())
# changer.SetOutputSpacing(image.GetSpacing())
changer.Update()
outImageData = vtk.vtkImageData()
outImageData.DeepCopy(changer.GetOutput())
outImageData.Update()
newVolume.SetAndObserveImageData(outImageData)
newVolume.SetIJKToRASMatrix(outputIJKToRAS)
newVolume.SetRASToIJKMatrix(outputRASToIJK)
newVolume.Modified()
| |
######################################################################
# Copyright 2016, 2017 John J. Rofrano. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
######################################################################
"""
Order Store Service with UI
Paths:
------
GET / - Displays a UI for Selenium testing
GET /orders - Returns a list all of the Orders
GET /orders/{id} - Returns the Order with a given id number
POST /orders - creates a new Order record in the database
PUT /orders/{id} - updates a Order record in the database
DELETE /orders/{id} - deletes a Order record in the database
"""
import sys
import logging
from flask import jsonify, request, json, url_for, make_response, abort
from flask_api import status # HTTP Status Codes
from werkzeug.exceptions import NotFound
from app.models import Order
from . import app
# Error handlers reuire app to be initialized so we must import
# then only after we have initialized the Flask app instance
import error_handlers
######################################################################
# GET HEALTH CHECK
######################################################################
@app.route('/healthcheck')
def healthcheck():
""" Let them know our heart is still beating """
return make_response(jsonify(status=200, message='Healthy'), status.HTTP_200_OK)
######################################################################
# GET INDEX
######################################################################
@app.route('/')
def index():
# data = '{name: <string>, time: <string>}'
# url = request.base_url + 'orders' # url_for('list_orders')
# return jsonify(name='Order Demo REST API Service', version='1.0', url=url, data=data), status.HTTP_200_OK
return app.send_static_file('index.html')
######################################################################
# LIST ALL ORDERS
######################################################################
@app.route('/orders', methods=['GET'])
def list_orders():
"""
Retrieve a list of Orders
This endpoint will return all Orders unless a query parameter is specificed
---
tags:
- Orders
description: The Orders endpoint allows you to query Orders
parameters:
- name: name
in: query
description: the customer of Order you are looking for
required: false
type: string
- name: time
in: query
description: the time of Order you are looking for
required: false
type: string
- name: status
in: query
description: the status of the order
required: false
type: boolean
responses:
200:
description: An array of Orders
schema:
type: array
items:
schema:
id: Order
properties:
id:
type: integer
description: unique id assigned internallt by service
name:
type: string
description: the customer's name
time:
type: string
description: the time of an order placed
status:
type: boolean
description: the status of the order
"""
orders = []
time = request.args.get('time')
name = request.args.get('name')
if time:
orders = Order.find_by_time(time)
elif name:
orders = Order.find_by_name(name)
else:
orders = Order.all()
results = [order.serialize() for order in orders]
return make_response(jsonify(results), status.HTTP_200_OK)
######################################################################
# RETRIEVE AN ORDER
######################################################################
@app.route('/orders/<int:id>', methods=['GET'])
def get_orders(id):
"""
Retrieve a single Order
This endpoint will return a Order based on it's id
---
tags:
- Orders
produces:
- application/json
parameters:
- name: id
in: path
description: ID of oRDER to retrieve
type: integer
required: true
responses:
200:
description: Order returned
schema:
id: Order
properties:
id:
type: integer
description: unique id assigned internallt by service
name:
type: string
description: the order's customer name
time:
type: string
description: the time of order placed
status:
type: boolean
description: the status of the order
404:
description: Order not found
"""
order = Order.find(id)
if not order:
raise NotFound("Order with id '{}' was not found.".format(id))
return make_response(jsonify(order.serialize()), status.HTTP_200_OK)
######################################################################
# ADD A NEW ORDER
######################################################################
@app.route('/orders', methods=['POST'])
def create_orders():
"""
Creates an Order
This endpoint will create an Order based the data in the body that is posted
---
tags:
- Orders
consumes:
- application/json
produces:
- application/json
parameters:
- in: body
name: body
required: true
schema:
id: data
required:
- name
- time
- status
properties:
name:
type: string
description: customer name for the order
time:
type: string
description: the time of order placed
status:
type: boolean
description: the status of the order
responses:
201:
description: Order created
schema:
id: Order
properties:
id:
type: integer
description: unique id assigned internally by service
name:
type: string
description: the Order's cutomer name
time:
type: string
description: the time of order placed
status:
type: boolean
description: the status of the order
400:
description: Bad Request (the posted data was not valid)
"""
data = {}
# Check for form submission data
if request.headers.get('Content-Type') == 'application/x-www-form-urlencoded':
app.logger.info('Getting data from form submit')
data = {
'name': request.form['name'],
'time': request.form['time'],
'status': True
}
else:
app.logger.info('Getting data from API call')
data = request.get_json()
app.logger.info(data)
order = Order()
order.deserialize(data)
order.save()
message = order.serialize()
location_url = url_for('get_orders', id=order.id, _external=True)
return make_response(jsonify(message), status.HTTP_201_CREATED,
{'Location': location_url})
######################################################################
# UPDATE AN EXISTING ORDER
######################################################################
@app.route('/orders/<int:id>', methods=['PUT'])
def update_orders(id):
"""
Update a Order
This endpoint will update a Order based the body that is posted
---
tags:
- Orders
consumes:
- application/json
produces:
- application/json
parameters:
- name: id
in: path
description: ID of order to retrieve
type: integer
required: true
- in: body
name: body
schema:
id: data
required:
- name
- time
- status
properties:
name:
type: string
description: name for the Order
time:
type: string
description: the time of order palced
status:
type: boolean
description: the status of the order
responses:
200:
description: Order Updated
schema:
id: Order
properties:
id:
type: integer
description: unique id assigned internallt by service
name:
type: string
description: the order's cutomer name
time:
type: string
description: the time of order placed
status:
type: boolean
description: the status of the order
400:
description: Bad Request (the posted data was not valid)
"""
check_content_type('application/json')
order = Order.find(id)
if not order:
raise NotFound("Order with id '{}' was not found.".format(id))
data = request.get_json()
app.logger.info(data)
order.deserialize(data)
order.id = id
order.save()
return make_response(jsonify(order.serialize()), status.HTTP_200_OK)
######################################################################
# DELETE AN ORDER
######################################################################
@app.route('/orders/<int:id>', methods=['DELETE'])
def delete_orders(id):
"""
Delete a Order
This endpoint will delete a Order based the id specified in the path
---
tags:
- Orders
description: Deletes a Order from the database
parameters:
- name: id
in: path
description: ID of order to delete
type: integer
required: true
responses:
204:
description: Order deleted
"""
order = Order.find(id)
if order:
order.delete()
return make_response('', status.HTTP_204_NO_CONTENT)
######################################################################
# PURCHASE AN ORDER
#############################g#########################################
@app.route('/orders/<int:id>/purchase', methods=['PUT'])
def purchase_orders(id):
""" Purchasing an Order makes it unstatus """
order = Order.find(id)
if not order:
abort(status.HTTP_404_NOT_FOUND, "Order with id '{}' was not found.".format(id))
if not order.status:
abort(status.HTTP_400_BAD_REQUEST, "Order with id '{}' is not status.".format(id))
order.status = False
order.save()
return make_response(jsonify(order.serialize()), status.HTTP_200_OK)
######################################################################
# DELETE ALL PET DATA (for testing only)
######################################################################
@app.route('/orders/reset', methods=['DELETE'])
def orders_reset():
""" Removes all orders from the database """
Order.remove_all()
return make_response('', status.HTTP_204_NO_CONTENT)
######################################################################
# U T I L I T Y F U N C T I O N S
######################################################################
@app.before_first_request
def init_db(redis=None):
""" Initlaize the model """
Order.init_db(redis)
# load sample data
def data_load(payload):
""" Loads a Order into the database """
order = Order(0, payload['name'], payload['time'])
order.save()
def data_reset():
""" Removes all Orders from the database """
Order.remove_all()
def check_content_type(content_type):
""" Checks that the media type is correct """
if request.headers['Content-Type'] == content_type:
return
app.logger.error('Invalid Content-Type: %s', request.headers['Content-Type'])
abort(status.HTTP_415_UNSUPPORTED_MEDIA_TYPE, 'Content-Type must be {}'.format(content_type))
#@app.before_first_request
def initialize_logging(log_level=logging.INFO):
""" Initialized the default logging to STDOUT """
if not app.debug:
print 'Setting up logging...'
# Set up default logging for submodules to use STDOUT
# datefmt='%m/%d/%Y %I:%M:%S %p'
fmt = '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'
logging.basicConfig(stream=sys.stdout, level=log_level, format=fmt)
# Make a new log handler that uses STDOUT
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(fmt))
handler.setLevel(log_level)
# Remove the Flask default handlers and use our own
handler_list = list(app.logger.handlers)
for log_handler in handler_list:
app.logger.removeHandler(log_handler)
app.logger.addHandler(handler)
app.logger.setLevel(log_level)
app.logger.info('Logging handler established')
| |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mention memory encoder."""
import copy
import os
from absl.testing import absltest
from absl.testing import parameterized
from flax import jax_utils
import jax
import jax.numpy as jnp
from language.mentionmemory.encoders import mention_memory_encoder # pylint: disable=unused-import
from language.mentionmemory.tasks import mention_memory_task
from language.mentionmemory.utils import checkpoint_utils
from language.mentionmemory.utils import test_utils
import ml_collections
import numpy as np
# easiest to define as constant here
MENTION_SIZE = 2
class MentionMemoryEncoderTest(parameterized.TestCase):
"""Tests for mention memory encoder."""
encoder_config = {
'dtype': 'float32',
'vocab_size': 1000,
'memory_key_dim': 4,
'memory_value_dim': 4,
'separate_memory_values': False,
'memory_update_type': 'additive',
'memory_update_config': {},
'same_passage_memory_policy': 'disallow',
'k_top_device': 2,
'rows': 4,
'splits': 2,
'max_length': 128,
'max_positions': 128,
'hidden_size': 4,
'intermediate_dim': 8,
'num_attention_heads': 2,
'num_initial_layers': 1,
'num_final_layers': 1,
'dropout_rate': 0.1,
'n_memory_text_entities': 2,
'final_k_top_device': 2,
'final_splits': 2,
}
model_config = {
'encoder_config': encoder_config,
}
config = {
'model_config': model_config,
'seed': 0,
'per_device_batch_size': 2,
'samples_per_example': 1,
'mask_rate': 0.2,
'mention_mask_rate': 0.2,
'mlm_weight': 0.5,
'el_im_weight': 0.25,
'coref_res_weight': 0.25,
'max_mention_targets': 5,
'max_mlm_targets': 25,
'max_mentions': 10,
# Enable same-entity-set-retrieval loss so `memory_text_entities`
# will be loaded.
'same_entity_set_retrieval_weight': 0.1,
}
n_devices = 4
table_size = 1024
text_length = 100
n_mentions = 5
n_linked_mentions = 3
@parameterized.parameters(
{},
{'separate_memory_values': True},
{'num_intermediate_layers': 1},
)
def test_model_shape(
self,
separate_memory_values=False,
num_intermediate_layers=None,
):
"""Test loss function runs and produces expected values."""
config = copy.deepcopy(self.config)
config['model_config']['encoder_config'][
'separate_memory_values'] = separate_memory_values
config['model_config']['encoder_config'][
'num_intermediate_layers'] = num_intermediate_layers
config = ml_collections.FrozenConfigDict(config)
model_config = config.model_config
encoder_config = model_config.encoder_config
rows = encoder_config.rows
preprocess_fn = mention_memory_task.MentionMemoryTask.make_preprocess_fn(config) # pylint: disable=line-too-long
collater_fn = mention_memory_task.MentionMemoryTask.make_collater_fn(config)
test_utils.force_multi_devices(self.n_devices)
devices = jax.local_devices()
model = mention_memory_encoder.MentionMemoryEncoder(**encoder_config)
dummy_input = mention_memory_task.MentionMemoryTask.dummy_input(config)
dummy_input = jax.device_put_replicated(dummy_input, devices)
init_rng = jax.random.PRNGKey(0)
split_rng = jax.random.split(init_rng, self.n_devices)
memory_table = np.random.rand(rows, self.table_size // rows,
encoder_config.memory_key_dim)
memory_keys = jax.device_put_replicated(memory_table, devices)
memory_values = memory_table.reshape(-1, encoder_config.memory_key_dim)
memory_values = jax.device_put_replicated(memory_values, devices)
memory_identifiers = np.arange(self.table_size)
memory_identifiers = jax.device_put_replicated(memory_identifiers, devices)
memory_entity_ids = memory_identifiers
memory_text_entities = np.zeros(
(self.table_size, encoder_config.n_memory_text_entities),
dtype=np.int32)
memory_text_entities = jax.device_put_replicated(memory_text_entities,
devices)
def model_init(*args, **kwargs):
return model.init(*args, method=model.forward, **kwargs)
initial_variables = jax.pmap(
model_init, 'batch', static_broadcasted_argnums=2)(
split_rng,
dummy_input,
True,
)
initial_variables = {'params': initial_variables['params']}
initial_variables['constants'] = {
'memory_keys': memory_keys,
'memory_values': memory_values,
'memory_identifiers': memory_identifiers,
'memory_entity_ids': memory_entity_ids,
'memory_text_entities': memory_text_entities,
}
raw_example = test_utils.gen_mention_pretraining_sample(
self.text_length,
self.n_mentions,
self.n_linked_mentions,
max_length=encoder_config.max_length)
processed_example = preprocess_fn(raw_example)
batch = {
key: np.tile(value, (config.per_device_batch_size, 1))
for key, value in processed_example.items()
}
batch = collater_fn(batch)
batch = {
key: test_utils.tensor_to_numpy(value) for key, value in batch.items()
}
batch = {
key: jax.device_put_replicated(value, devices)
for key, value in batch.items()
}
def model_apply(*args, **kwargs):
return model.apply(*args, method=model.forward, **kwargs)
papply = jax.pmap(model_apply, 'batch', static_broadcasted_argnums=(2))
encoded_output, loss_helpers, _ = papply(
{
'params': initial_variables['params'],
'constants': initial_variables['constants'],
},
batch,
True,
)
self.assertEqual(encoded_output.shape,
(self.n_devices, config.per_device_batch_size,
encoder_config.max_length, encoder_config.hidden_size))
memory_value_dim = encoder_config.memory_value_dim
memory_key_dim = encoder_config.memory_key_dim
memory_size = memory_value_dim if memory_value_dim else memory_key_dim
self.assertEqual(loss_helpers['target_mention_encodings'].shape,
(self.n_devices, config.max_mention_targets *
config.per_device_batch_size, memory_size))
@parameterized.parameters(
{},
{'separate_memory_values': True},
{'memory_only': True},
)
def test_load_weights(self, separate_memory_values=False, memory_only=False):
"""Test saving and loading model recovers original parameters."""
config = copy.deepcopy(self.config)
config['model_config']['encoder_config'][
'separate_memory_values'] = separate_memory_values
config = ml_collections.ConfigDict(config)
model_config = config.model_config
encoder_config = model_config.encoder_config
rows = encoder_config.rows
test_utils.force_multi_devices(self.n_devices)
devices = jax.local_devices()
model = mention_memory_encoder.MentionMemoryEncoder(**encoder_config)
dummy_input = mention_memory_task.MentionMemoryTask.dummy_input(config)
dummy_input = jax.device_put_replicated(dummy_input, devices)
init_rng = jax.random.PRNGKey(0)
split_rng = jax.random.split(init_rng, self.n_devices)
memory_table = np.random.rand(rows, self.table_size // rows,
encoder_config.memory_key_dim)
memory_keys = jax.device_put_replicated(memory_table, devices)
memory_values = memory_table.reshape(-1, encoder_config.memory_key_dim)
memory_values = jax.device_put_replicated(memory_values, devices)
memory_identifiers = np.arange(self.table_size)
memory_identifiers = jax.device_put_replicated(memory_identifiers, devices)
memory_entity_ids = memory_identifiers
memory_text_entities = np.zeros(
(self.table_size, encoder_config.n_memory_text_entities),
dtype=np.int32)
memory_text_entities = jax.device_put_replicated(memory_text_entities,
devices)
def model_init(*args, **kwargs):
return model.init(*args, method=model.forward, **kwargs)
initial_variables = jax.pmap(
model_init, 'batch', static_broadcasted_argnums=2)(
split_rng,
dummy_input,
True,
)
initial_variables = {'params': initial_variables['params']}
initial_variables['constants'] = {
'memory_keys': memory_keys,
'memory_values': memory_values,
'memory_identifiers': memory_identifiers,
'memory_entity_ids': memory_entity_ids,
'memory_text_entities': memory_text_entities,
}
n_shards = 4
tempdir_obj = self.create_tempdir()
tempdir = tempdir_obj.full_path
memory_key_base = os.path.join(tempdir, 'memory_keys')
memory_value_base = os.path.join(tempdir, 'memory_values')
memory_id_base = os.path.join(tempdir, 'memory_id')
memory_entity_id_base = os.path.join(tempdir, 'memory_entity_id')
memory_text_entities_base = os.path.join(tempdir, 'memory_text_entities')
unreplicated_variables = jax_utils.unreplicate(initial_variables)
unreplicated_variables['params'] = unreplicated_variables[
'params'].unfreeze()
if memory_only:
load_weights = 'memory_only'
else:
load_weights = os.path.join(tempdir, 'weights')
checkpoint_utils.save_weights(load_weights,
unreplicated_variables['params'])
memory_keys = initial_variables['constants']['memory_keys']
memory_keys = memory_keys.reshape(n_shards, -1,
encoder_config.memory_key_dim)
memory_values = initial_variables['constants']['memory_values']
memory_values = memory_values.reshape(n_shards, -1,
encoder_config.memory_key_dim)
memory_ids = initial_variables['constants']['memory_identifiers'].reshape(
n_shards, -1)
memory_entity_ids = initial_variables['constants'][
'memory_entity_ids'].reshape(n_shards, -1)
memory_text_entities = initial_variables['constants'][
'memory_text_entities'].reshape(n_shards, -1,
encoder_config.n_memory_text_entities)
for shard in range(n_shards):
np.save(memory_key_base + str(shard), memory_keys[shard])
np.save(memory_value_base + str(shard), memory_values[shard])
np.save(memory_id_base + str(shard), memory_ids[shard])
np.save(memory_entity_id_base + str(shard), memory_entity_ids[shard])
np.save(memory_entity_id_base + str(shard), memory_entity_ids[shard])
np.save(memory_text_entities_base + str(shard),
memory_text_entities[shard])
config.memory_key_pattern = memory_key_base + '*'
config.memory_value_pattern = memory_value_base + '*'
config.memory_id_pattern = memory_id_base + '*'
config.memory_entity_id_pattern = memory_entity_id_base + '*'
config.memory_text_entities_pattern = memory_text_entities_base + '*'
config.load_weights = load_weights
loaded_variables = mention_memory_encoder.MentionMemoryEncoder.load_weights(
config)
arrayeq = lambda x, y: jnp.all(x == y)
constants = {
key: value
for key, value in initial_variables['constants'].items()
if not (key == 'memory_values' and not separate_memory_values)
}
comparison_variables = {'constants': constants}
if not memory_only:
comparison_variables['params'] = initial_variables['params'].unfreeze()
self.assertTrue(
jax.tree_map(arrayeq, loaded_variables, comparison_variables))
if __name__ == '__main__':
absltest.main()
| |
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import routes
import six
from six.moves import range
import webob.dec
from nova.api import auth as api_auth
from nova.api import openstack as openstack_api
from nova.api.openstack import api_version_request as api_version
from nova.api.openstack import compute
from nova.api.openstack.compute import versions
from nova.api.openstack import urlmap
from nova.api.openstack import wsgi as os_wsgi
from nova.api import wsgi
from nova.compute import flavors
from nova.compute import vm_states
import nova.conf
from nova import context
from nova.db.sqlalchemy import models
from nova import exception as exc
from nova.network.security_group import security_group_base
from nova import objects
from nova.objects import base
from nova import quota
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_network
from nova.tests.unit.objects import test_keypair
from nova import utils
CONF = nova.conf.CONF
QUOTAS = quota.QUOTAS
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_PROJECT_ID = '6a6a9c9eee154e9cb8cec487b98d36ab'
FAKE_USER_ID = '5fae60f5cf4642609ddd31f71748beac'
FAKE_UUIDS = {}
@webob.dec.wsgify
def fake_wsgi(self, req):
return self.application
def wsgi_app_v21(fake_auth_context=None, v2_compatible=False,
custom_routes=None):
inner_app_v21 = compute.APIRouterV21(custom_routes=custom_routes)
if v2_compatible:
inner_app_v21 = openstack_api.LegacyV2CompatibleWrapper(inner_app_v21)
if fake_auth_context is not None:
ctxt = fake_auth_context
else:
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
api_v21 = openstack_api.FaultWrapper(
api_auth.InjectContext(ctxt, inner_app_v21))
mapper = urlmap.URLMap()
mapper['/v2'] = api_v21
mapper['/v2.1'] = api_v21
mapper['/'] = openstack_api.FaultWrapper(versions.Versions())
return mapper
def stub_out_key_pair_funcs(testcase, have_key_pair=True, **kwargs):
def key_pair(context, user_id):
return [dict(test_keypair.fake_keypair,
name='key', public_key='public_key', **kwargs)]
def one_key_pair(context, user_id, name):
if name in ['key', 'new-key']:
return dict(test_keypair.fake_keypair,
name=name, public_key='public_key', **kwargs)
else:
raise exc.KeypairNotFound(user_id=user_id, name=name)
def no_key_pair(context, user_id):
return []
if have_key_pair:
testcase.stub_out('nova.db.api.key_pair_get_all_by_user', key_pair)
testcase.stub_out('nova.db.api.key_pair_get', one_key_pair)
else:
testcase.stub_out('nova.db.api.key_pair_get_all_by_user', no_key_pair)
def stub_out_trusted_certs(test, certs=None):
def fake_trusted_certs(cls, context, instance_uuid):
return objects.TrustedCerts(ids=trusted_certs)
def fake_instance_extra(context, instance_uuid, columns):
if columns is ['trusted_certs']:
return {'trusted_certs': trusted_certs}
else:
return {'numa_topology': None,
'pci_requests': None,
'flavor': None,
'vcpu_model': None,
'trusted_certs': trusted_certs,
'migration_context': None}
trusted_certs = []
if certs:
trusted_certs = certs
test.stub_out('nova.objects.TrustedCerts.get_by_instance_uuid',
fake_trusted_certs)
test.stub_out('nova.db.instance_extra_get_by_instance_uuid',
fake_instance_extra)
def stub_out_instance_quota(test, allowed, quota, resource='instances'):
def fake_reserve(context, **deltas):
requested = deltas.pop(resource, 0)
if requested > allowed:
quotas = dict(instances=1, cores=1, ram=1)
quotas[resource] = quota
usages = dict(instances=dict(in_use=0, reserved=0),
cores=dict(in_use=0, reserved=0),
ram=dict(in_use=0, reserved=0))
usages[resource]['in_use'] = (quotas[resource] * 9 // 10 - allowed)
usages[resource]['reserved'] = quotas[resource] // 10
raise exc.OverQuota(overs=[resource], quotas=quotas,
usages=usages)
test.stub_out('nova.quota.QUOTAS.reserve', fake_reserve)
def stub_out_networking(test):
def get_my_ip():
return '127.0.0.1'
test.stub_out('oslo_utils.netutils.get_my_ipv4', get_my_ip)
def stub_out_compute_api_snapshot(test):
def snapshot(self, context, instance, name, extra_properties=None):
# emulate glance rejecting image names which are too long
if len(name) > 256:
raise exc.Invalid
return dict(id='123', status='ACTIVE', name=name,
properties=extra_properties)
test.stub_out('nova.compute.api.API.snapshot', snapshot)
class stub_out_compute_api_backup(object):
def __init__(self, test):
self.extra_props_last_call = None
test.stub_out('nova.compute.api.API.backup', self.backup)
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
self.extra_props_last_call = extra_properties
props = dict(backup_type=backup_type,
rotation=rotation)
props.update(extra_properties or {})
return dict(id='123', status='ACTIVE', name=name, properties=props)
def stub_out_nw_api_get_instance_nw_info(test, num_networks=1, func=None):
fake_network.stub_out_nw_api_get_instance_nw_info(test)
def stub_out_nw_api(test, cls=None, private=None, publics=None):
if not private:
private = '192.168.0.3'
if not publics:
publics = ['1.2.3.4']
class Fake(object):
def __init__(self):
pass
def get_instance_nw_info(*args, **kwargs):
pass
def get_floating_ips_by_fixed_address(*args, **kwargs):
return publics
def validate_networks(self, context, networks, max_count):
return max_count
def create_resource_requests(self, context, requested_networks,
pci_requests):
pass
if cls is None:
cls = Fake
if CONF.use_neutron:
test.stub_out('nova.network.neutronv2.api.API', cls)
else:
test.stub_out('nova.network.api.API', cls)
fake_network.stub_out_nw_api_get_instance_nw_info(test)
def stub_out_secgroup_api(test, security_groups=None):
class FakeSecurityGroupAPI(security_group_base.SecurityGroupBase):
"""This handles both nova-network and neutron style security group APIs
"""
def get_instances_security_groups_bindings(
self, context, servers, detailed=False):
# This method shouldn't be called unless using neutron.
if not CONF.use_neutron:
raise Exception('Invalid security group API call for nova-net')
instances_security_group_bindings = {}
if servers:
instances_security_group_bindings = {
server['id']: [] for server in servers
}
return instances_security_group_bindings
def get_instance_security_groups(
self, context, instance, detailed=False):
return security_groups if security_groups is not None else []
if CONF.use_neutron:
test.stub_out(
'nova.network.security_group.neutron_driver.SecurityGroupAPI',
FakeSecurityGroupAPI)
else:
test.stub_out(
'nova.compute.api.SecurityGroupAPI', FakeSecurityGroupAPI)
class FakeToken(object):
id_count = 0
def __getitem__(self, key):
return getattr(self, key)
def __init__(self, **kwargs):
FakeToken.id_count += 1
self.id = FakeToken.id_count
for k, v in kwargs.items():
setattr(self, k, v)
class FakeRequestContext(context.RequestContext):
def __init__(self, *args, **kwargs):
kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token')
super(FakeRequestContext, self).__init__(*args, **kwargs)
class HTTPRequest(os_wsgi.Request):
@classmethod
def blank(cls, *args, **kwargs):
defaults = {'base_url': 'http://localhost/v2'}
use_admin_context = kwargs.pop('use_admin_context', False)
project_id = kwargs.pop('project_id', 'fake')
version = kwargs.pop('version', os_wsgi.DEFAULT_API_VERSION)
defaults.update(kwargs)
out = super(HTTPRequest, cls).blank(*args, **defaults)
out.environ['nova.context'] = FakeRequestContext(
user_id='fake_user',
project_id=project_id,
is_admin=use_admin_context)
out.api_version_request = api_version.APIVersionRequest(version)
return out
class HTTPRequestV21(HTTPRequest):
pass
class TestRouter(wsgi.Router):
def __init__(self, controller, mapper=None):
if not mapper:
mapper = routes.Mapper()
mapper.resource("test", "tests",
controller=os_wsgi.Resource(controller))
super(TestRouter, self).__init__(mapper)
class FakeAuthDatabase(object):
data = {}
@staticmethod
def auth_token_get(context, token_hash):
return FakeAuthDatabase.data.get(token_hash, None)
@staticmethod
def auth_token_create(context, token):
fake_token = FakeToken(created_at=timeutils.utcnow(), **token)
FakeAuthDatabase.data[fake_token.token_hash] = fake_token
FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token
return fake_token
@staticmethod
def auth_token_destroy(context, token_id):
token = FakeAuthDatabase.data.get('id_%i' % token_id)
if token and token.token_hash in FakeAuthDatabase.data:
del FakeAuthDatabase.data[token.token_hash]
del FakeAuthDatabase.data['id_%i' % token_id]
def create_info_cache(nw_cache):
if nw_cache is None:
pub0 = ('192.168.1.100',)
pub1 = ('2001:db8:0:1::1',)
def _ip(ip):
return {'address': ip, 'type': 'fixed'}
nw_cache = [
{'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'test1',
'subnets': [{'cidr': '192.168.1.0/24',
'ips': [_ip(ip) for ip in pub0]},
{'cidr': 'b33f::/64',
'ips': [_ip(ip) for ip in pub1]}]}}]
if not isinstance(nw_cache, six.string_types):
nw_cache = jsonutils.dumps(nw_cache)
return {
"info_cache": {
"network_info": nw_cache,
"deleted": False,
"created_at": None,
"deleted_at": None,
"updated_at": None,
}
}
def get_fake_uuid(token=0):
if token not in FAKE_UUIDS:
FAKE_UUIDS[token] = uuidutils.generate_uuid()
return FAKE_UUIDS[token]
def fake_instance_get(**kwargs):
def _return_server(context, uuid, columns_to_join=None, use_slave=False):
if 'project_id' not in kwargs:
kwargs['project_id'] = 'fake'
return stub_instance(1, **kwargs)
return _return_server
def fake_compute_get(**kwargs):
def _return_server_obj(context, uuid, expected_attrs=None):
return stub_instance_obj(context, **kwargs)
return _return_server_obj
def fake_actions_to_locked_server(self, context, instance, *args, **kwargs):
raise exc.InstanceIsLocked(instance_uuid=instance['uuid'])
def fake_instance_get_all_by_filters(num_servers=5, **kwargs):
def _return_servers(context, *args, **kwargs):
servers_list = []
marker = None
limit = None
found_marker = False
if "marker" in kwargs:
marker = kwargs["marker"]
if "limit" in kwargs:
limit = kwargs["limit"]
if 'columns_to_join' in kwargs:
kwargs.pop('columns_to_join')
if 'use_slave' in kwargs:
kwargs.pop('use_slave')
if 'sort_keys' in kwargs:
kwargs.pop('sort_keys')
if 'sort_dirs' in kwargs:
kwargs.pop('sort_dirs')
if 'cell_mappings' in kwargs:
kwargs.pop('cell_mappings')
for i in range(num_servers):
uuid = get_fake_uuid(i)
server = stub_instance(id=i + 1, uuid=uuid,
**kwargs)
servers_list.append(server)
if marker is not None and uuid == marker:
found_marker = True
servers_list = []
if marker is not None and not found_marker:
raise exc.MarkerNotFound(marker=marker)
if limit is not None:
servers_list = servers_list[:limit]
return servers_list
return _return_servers
def fake_compute_get_all(num_servers=5, **kwargs):
def _return_servers_objs(context, search_opts=None, limit=None,
marker=None, expected_attrs=None, sort_keys=None,
sort_dirs=None):
db_insts = fake_instance_get_all_by_filters()(None,
limit=limit,
marker=marker)
expected = ['metadata', 'system_metadata', 'flavor',
'info_cache', 'security_groups']
return base.obj_make_list(context, objects.InstanceList(),
objects.Instance, db_insts,
expected_attrs=expected)
return _return_servers_objs
def stub_instance(id=1, user_id=None, project_id=None, host=None,
node=None, vm_state=None, task_state=None,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
flavor_id="1", name=None, key_name='',
access_ipv4=None, access_ipv6=None, progress=0,
auto_disk_config=False, display_name=None,
display_description=None,
include_fake_metadata=True, config_drive=None,
power_state=None, nw_cache=None, metadata=None,
security_groups=None, root_device_name=None,
limit=None, marker=None,
launched_at=timeutils.utcnow(),
terminated_at=timeutils.utcnow(),
availability_zone='', locked_by=None, cleaned=False,
memory_mb=0, vcpus=0, root_gb=0, ephemeral_gb=0,
instance_type=None, launch_index=0, kernel_id="",
ramdisk_id="", user_data=None, system_metadata=None,
services=None, trusted_certs=None):
if user_id is None:
user_id = 'fake_user'
if project_id is None:
project_id = 'fake_project'
if metadata:
metadata = [{'key': k, 'value': v} for k, v in metadata.items()]
elif include_fake_metadata:
metadata = [models.InstanceMetadata(key='seq', value=str(id))]
else:
metadata = []
inst_type = flavors.get_flavor_by_flavor_id(int(flavor_id))
sys_meta = flavors.save_flavor_info({}, inst_type)
sys_meta.update(system_metadata or {})
if host is not None:
host = str(host)
if key_name:
key_data = 'FAKE'
else:
key_data = ''
if security_groups is None:
security_groups = [{"id": 1, "name": "test", "description": "Foo:",
"project_id": "project", "user_id": "user",
"created_at": None, "updated_at": None,
"deleted_at": None, "deleted": False}]
# ReservationID isn't sent back, hack it in there.
server_name = name or "server%s" % id
if reservation_id != "":
server_name = "reservation_%s" % (reservation_id, )
info_cache = create_info_cache(nw_cache)
if instance_type is None:
instance_type = flavors.get_default_flavor()
flavorinfo = jsonutils.dumps({
'cur': instance_type.obj_to_primitive(),
'old': None,
'new': None,
})
instance = {
"id": int(id),
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"deleted_at": datetime.datetime(2010, 12, 12, 10, 0, 0),
"deleted": None,
"user_id": user_id,
"project_id": project_id,
"image_ref": image_ref,
"kernel_id": kernel_id,
"ramdisk_id": ramdisk_id,
"launch_index": launch_index,
"key_name": key_name,
"key_data": key_data,
"config_drive": config_drive,
"vm_state": vm_state or vm_states.ACTIVE,
"task_state": task_state,
"power_state": power_state,
"memory_mb": memory_mb,
"vcpus": vcpus,
"root_gb": root_gb,
"ephemeral_gb": ephemeral_gb,
"ephemeral_key_uuid": None,
"hostname": display_name or server_name,
"host": host,
"node": node,
"instance_type_id": 1,
"instance_type": inst_type,
"user_data": user_data,
"reservation_id": reservation_id,
"mac_address": "",
"launched_at": launched_at,
"terminated_at": terminated_at,
"availability_zone": availability_zone,
"display_name": display_name or server_name,
"display_description": display_description,
"locked": locked_by is not None,
"locked_by": locked_by,
"metadata": metadata,
"access_ip_v4": access_ipv4,
"access_ip_v6": access_ipv6,
"uuid": uuid,
"progress": progress,
"auto_disk_config": auto_disk_config,
"name": "instance-%s" % id,
"shutdown_terminate": True,
"disable_terminate": False,
"security_groups": security_groups,
"root_device_name": root_device_name,
"system_metadata": utils.dict_to_metadata(sys_meta),
"pci_devices": [],
"vm_mode": "",
"default_swap_device": "",
"default_ephemeral_device": "",
"launched_on": "",
"cell_name": "",
"architecture": "",
"os_type": "",
"extra": {"numa_topology": None,
"pci_requests": None,
"flavor": flavorinfo,
"trusted_certs": trusted_certs,
},
"cleaned": cleaned,
"services": services,
"tags": [],
}
instance.update(info_cache)
instance['info_cache']['instance_uuid'] = instance['uuid']
return instance
def stub_instance_obj(ctxt, *args, **kwargs):
db_inst = stub_instance(*args, **kwargs)
expected = ['metadata', 'system_metadata', 'flavor',
'info_cache', 'security_groups', 'tags']
inst = objects.Instance._from_db_object(ctxt, objects.Instance(),
db_inst,
expected_attrs=expected)
inst.fault = None
if db_inst["services"] is not None:
# This ensures services there if one wanted so
inst.services = db_inst["services"]
return inst
def stub_volume(id, **kwargs):
volume = {
'id': id,
'user_id': 'fakeuser',
'project_id': 'fakeproject',
'host': 'fakehost',
'size': 1,
'availability_zone': 'fakeaz',
'status': 'fakestatus',
'attach_status': 'attached',
'name': 'vol name',
'display_name': 'displayname',
'display_description': 'displaydesc',
'created_at': datetime.datetime(1999, 1, 1, 1, 1, 1),
'snapshot_id': None,
'volume_type_id': 'fakevoltype',
'volume_metadata': [],
'volume_type': {'name': 'vol_type_name'},
'multiattach': False,
'attachments': {'fakeuuid': {'mountpoint': '/'},
'fakeuuid2': {'mountpoint': '/dev/sdb'}
}
}
volume.update(kwargs)
return volume
def stub_volume_create(self, context, size, name, description, snapshot,
**param):
vol = stub_volume('1')
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
try:
vol['snapshot_id'] = snapshot['id']
except (KeyError, TypeError):
vol['snapshot_id'] = None
vol['availability_zone'] = param.get('availability_zone', 'fakeaz')
return vol
def stub_volume_update(self, context, *args, **param):
pass
def stub_volume_get(self, context, volume_id):
return stub_volume(volume_id)
def stub_volume_get_all(context, search_opts=None):
return [stub_volume(100, project_id='fake'),
stub_volume(101, project_id='superfake'),
stub_volume(102, project_id='superduperfake')]
def stub_volume_check_attach(self, context, *args, **param):
pass
def stub_snapshot(id, **kwargs):
snapshot = {
'id': id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': timeutils.utcnow(),
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': 'fake'
}
snapshot.update(kwargs)
return snapshot
def stub_snapshot_create(self, context, volume_id, name, description):
return stub_snapshot(100, volume_id=volume_id, display_name=name,
display_description=description)
def stub_compute_volume_snapshot_create(self, context, volume_id, create_info):
return {'snapshot': {'id': "421752a6-acf6-4b2d-bc7a-119f9148cd8c",
'volumeId': volume_id}}
def stub_snapshot_delete(self, context, snapshot_id):
if snapshot_id == '-1':
raise exc.SnapshotNotFound(snapshot_id=snapshot_id)
def stub_compute_volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
pass
def stub_snapshot_get(self, context, snapshot_id):
if snapshot_id == '-1':
raise exc.SnapshotNotFound(snapshot_id=snapshot_id)
return stub_snapshot(snapshot_id)
def stub_snapshot_get_all(self, context):
return [stub_snapshot(100, project_id='fake'),
stub_snapshot(101, project_id='superfake'),
stub_snapshot(102, project_id='superduperfake')]
def stub_bdm_get_all_by_instance_uuids(context, instance_uuids,
use_slave=False):
i = 1
result = []
for instance_uuid in instance_uuids:
for x in range(2): # add two BDMs per instance
result.append(fake_block_device.FakeDbBlockDeviceDict({
'id': i,
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'volume_id%d' % (i),
'instance_uuid': instance_uuid,
}))
i += 1
return result
def fake_not_implemented(*args, **kwargs):
raise NotImplementedError()
FLAVORS = {
'1': objects.Flavor(
id=1,
name='flavor 1',
memory_mb=256,
vcpus=1,
root_gb=10,
ephemeral_gb=20,
flavorid='1',
swap=10,
rxtx_factor=1.0,
vcpu_weight=None,
disabled=False,
is_public=True,
description=None,
extra_specs={"key1": "value1", "key2": "value2"}
),
'2': objects.Flavor(
id=2,
name='flavor 2',
memory_mb=512,
vcpus=1,
root_gb=20,
ephemeral_gb=10,
flavorid='2',
swap=5,
rxtx_factor=None,
vcpu_weight=None,
disabled=True,
is_public=True,
description='flavor 2 description',
extra_specs={}
),
}
def stub_out_flavor_get_by_flavor_id(test):
@staticmethod
def fake_get_by_flavor_id(context, flavor_id, read_deleted=None):
return FLAVORS[flavor_id]
test.stub_out('nova.objects.Flavor.get_by_flavor_id',
fake_get_by_flavor_id)
def stub_out_flavor_get_all(test):
@staticmethod
def fake_get_all(context, inactive=False, filters=None,
sort_key='flavorid', sort_dir='asc', limit=None,
marker=None):
if marker in ['99999']:
raise exc.MarkerNotFound(marker)
def reject_min(db_attr, filter_attr):
return (filter_attr in filters and
getattr(flavor, db_attr) < int(filters[filter_attr]))
filters = filters or {}
res = []
for flavor in FLAVORS.values():
if reject_min('memory_mb', 'min_memory_mb'):
continue
elif reject_min('root_gb', 'min_root_gb'):
continue
res.append(flavor)
res = sorted(res, key=lambda item: getattr(item, sort_key))
output = []
marker_found = True if marker is None else False
for flavor in res:
if not marker_found and marker == flavor.flavorid:
marker_found = True
elif marker_found:
if limit is None or len(output) < int(limit):
output.append(flavor)
return objects.FlavorList(objects=output)
test.stub_out('nova.objects.FlavorList.get_all', fake_get_all)
| |
#
# Copyright 2014 Rackspace, Inc
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from oslo_config import cfg
import six
from ironic.common import pxe_utils
from ironic.conductor import task_manager
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.objects import utils as object_utils
CONF = cfg.CONF
class TestPXEUtils(db_base.DbTestCase):
def setUp(self):
super(TestPXEUtils, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake")
common_pxe_options = {
'deployment_aki_path': u'/tftpboot/1be26c0b-03f2-4d2e-ae87-'
u'c02d7f33c123/deploy_kernel',
'aki_path': u'/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/'
u'kernel',
'pxe_append_params': 'test_param',
'deployment_ari_path': u'/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7'
u'f33c123/deploy_ramdisk',
'root_device': 'vendor=fake,size=123',
'ipa-api-url': 'http://192.168.122.184:6385',
}
self.pxe_options = {
'deployment_key': '0123456789ABCDEFGHIJKLMNOPQRSTUV',
'ari_path': u'/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/'
u'ramdisk',
'iscsi_target_iqn': u'iqn-1be26c0b-03f2-4d2e-ae87-c02d7f33'
u'c123',
'deployment_id': u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123',
'ironic_api_url': 'http://192.168.122.184:6385',
'disk': 'cciss/c0d0,sda,hda,vda',
'boot_option': 'netboot',
'ipa-driver-name': 'pxe_ssh',
}
self.pxe_options.update(common_pxe_options)
self.pxe_options_bios = {
'boot_mode': 'bios',
}
self.pxe_options_bios.update(self.pxe_options)
self.pxe_options_uefi = {
'boot_mode': 'uefi',
}
self.pxe_options_uefi.update(self.pxe_options)
self.agent_pxe_options = {
'ipa-driver-name': 'agent_ipmitool',
}
self.agent_pxe_options.update(common_pxe_options)
self.ipxe_options = self.pxe_options.copy()
self.ipxe_options.update({
'deployment_aki_path': 'http://1.2.3.4:1234/deploy_kernel',
'deployment_ari_path': 'http://1.2.3.4:1234/deploy_ramdisk',
'aki_path': 'http://1.2.3.4:1234/kernel',
'ari_path': 'http://1.2.3.4:1234/ramdisk',
})
self.node = object_utils.create_test_node(self.context)
def test__build_pxe_config(self):
rendered_template = pxe_utils._build_pxe_config(
self.pxe_options_bios, CONF.pxe.pxe_config_template,
'{{ ROOT }}', '{{ DISK_IDENTIFIER }}')
expected_template = open(
'ironic/tests/drivers/pxe_config.template').read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
def test__build_pxe_config_with_agent(self):
rendered_template = pxe_utils._build_pxe_config(
self.agent_pxe_options, CONF.agent.agent_pxe_config_template,
'{{ ROOT }}', '{{ DISK_IDENTIFIER }}')
expected_template = open(
'ironic/tests/drivers/agent_pxe_config.template').read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
def test__build_ipxe_config(self):
# NOTE(lucasagomes): iPXE is just an extension of the PXE driver,
# it doesn't have it's own configuration option for template.
# More info:
# http://docs.openstack.org/developer/ironic/deploy/install-guide.html
self.config(
pxe_config_template='ironic/drivers/modules/ipxe_config.template',
group='pxe'
)
self.config(http_url='http://1.2.3.4:1234', group='deploy')
rendered_template = pxe_utils._build_pxe_config(
self.ipxe_options, CONF.pxe.pxe_config_template,
'{{ ROOT }}', '{{ DISK_IDENTIFIER }}')
expected_template = open(
'ironic/tests/drivers/ipxe_config.template').read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
def test__build_elilo_config(self):
pxe_opts = self.pxe_options
pxe_opts['boot_mode'] = 'uefi'
rendered_template = pxe_utils._build_pxe_config(
pxe_opts, CONF.pxe.uefi_pxe_config_template,
'{{ ROOT }}', '{{ DISK_IDENTIFIER }}')
expected_template = open(
'ironic/tests/drivers/elilo_efi_pxe_config.template'
).read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
def test__build_grub_config(self):
pxe_opts = self.pxe_options
pxe_opts['boot_mode'] = 'uefi'
pxe_opts['tftp_server'] = '192.0.2.1'
grub_tmplte = "ironic/drivers/modules/pxe_grub_config.template"
rendered_template = pxe_utils._build_pxe_config(
pxe_opts, grub_tmplte, '(( ROOT ))', '(( DISK_IDENTIFIER ))')
expected_template = open(
'ironic/tests/drivers/pxe_grub_config.template').read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
@mock.patch('ironic.common.utils.create_link_without_raise', autospec=True)
@mock.patch('ironic.common.utils.unlink_without_raise', autospec=True)
@mock.patch('ironic.drivers.utils.get_node_mac_addresses', autospec=True)
def test__write_mac_pxe_configs(self, get_macs_mock, unlink_mock,
create_link_mock):
macs = [
'00:11:22:33:44:55:66',
'00:11:22:33:44:55:67'
]
get_macs_mock.return_value = macs
create_link_calls = [
mock.call(u'/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/pxelinux.cfg/01-00-11-22-33-44-55-66'),
mock.call(u'/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/pxelinux.cfg/01-00-11-22-33-44-55-67')
]
unlink_calls = [
mock.call('/tftpboot/pxelinux.cfg/01-00-11-22-33-44-55-66'),
mock.call('/tftpboot/pxelinux.cfg/01-00-11-22-33-44-55-67'),
]
with task_manager.acquire(self.context, self.node.uuid) as task:
pxe_utils._link_mac_pxe_configs(task)
unlink_mock.assert_has_calls(unlink_calls)
create_link_mock.assert_has_calls(create_link_calls)
@mock.patch('ironic.common.utils.create_link_without_raise', autospec=True)
@mock.patch('ironic.common.utils.unlink_without_raise', autospec=True)
@mock.patch('ironic.drivers.utils.get_node_mac_addresses', autospec=True)
def test__write_mac_ipxe_configs(self, get_macs_mock, unlink_mock,
create_link_mock):
self.config(ipxe_enabled=True, group='pxe')
macs = [
'00:11:22:33:44:55:66',
'00:11:22:33:44:55:67'
]
get_macs_mock.return_value = macs
create_link_calls = [
mock.call(u'/httpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/httpboot/pxelinux.cfg/00-11-22-33-44-55-66'),
mock.call(u'/httpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/httpboot/pxelinux.cfg/00112233445566'),
mock.call(u'/httpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/httpboot/pxelinux.cfg/00-11-22-33-44-55-67'),
mock.call(u'/httpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/httpboot/pxelinux.cfg/00112233445567'),
]
unlink_calls = [
mock.call('/httpboot/pxelinux.cfg/00-11-22-33-44-55-66'),
mock.call('/httpboot/pxelinux.cfg/00112233445566'),
mock.call('/httpboot/pxelinux.cfg/00-11-22-33-44-55-67'),
mock.call('/httpboot/pxelinux.cfg/00112233445567'),
]
with task_manager.acquire(self.context, self.node.uuid) as task:
pxe_utils._link_mac_pxe_configs(task)
unlink_mock.assert_has_calls(unlink_calls)
create_link_mock.assert_has_calls(create_link_calls)
@mock.patch('ironic.common.utils.create_link_without_raise', autospec=True)
@mock.patch('ironic.common.utils.unlink_without_raise', autospec=True)
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.provider',
autospec=True)
def test__link_ip_address_pxe_configs(self, provider_mock, unlink_mock,
create_link_mock):
ip_address = '10.10.0.1'
address = "aa:aa:aa:aa:aa:aa"
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
provider_mock.get_ip_addresses.return_value = [ip_address]
create_link_calls = [
mock.call(u'/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
u'/tftpboot/10.10.0.1.conf'),
]
with task_manager.acquire(self.context, self.node.uuid) as task:
pxe_utils._link_ip_address_pxe_configs(task, False)
unlink_mock.assert_called_once_with('/tftpboot/10.10.0.1.conf')
create_link_mock.assert_has_calls(create_link_calls)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch.object(pxe_utils, '_build_pxe_config', autospec=True)
@mock.patch('oslo_utils.fileutils.ensure_tree', autospec=True)
def test_create_pxe_config(self, ensure_tree_mock, build_mock,
write_mock):
build_mock.return_value = self.pxe_options_bios
with task_manager.acquire(self.context, self.node.uuid) as task:
pxe_utils.create_pxe_config(task, self.pxe_options_bios,
CONF.pxe.pxe_config_template)
build_mock.assert_called_with(self.pxe_options_bios,
CONF.pxe.pxe_config_template,
'{{ ROOT }}',
'{{ DISK_IDENTIFIER }}')
ensure_calls = [
mock.call(os.path.join(CONF.pxe.tftp_root, self.node.uuid)),
mock.call(os.path.join(CONF.pxe.tftp_root, 'pxelinux.cfg'))
]
ensure_tree_mock.assert_has_calls(ensure_calls)
pxe_cfg_file_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
write_mock.assert_called_with(pxe_cfg_file_path, self.pxe_options_bios)
@mock.patch('ironic.common.pxe_utils._link_ip_address_pxe_configs',
autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.pxe_utils._build_pxe_config', autospec=True)
@mock.patch('oslo_utils.fileutils.ensure_tree', autospec=True)
def test_create_pxe_config_uefi_elilo(self, ensure_tree_mock, build_mock,
write_mock, link_ip_configs_mock):
build_mock.return_value = self.pxe_options_uefi
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = 'boot_mode:uefi'
pxe_utils.create_pxe_config(task, self.pxe_options_uefi,
CONF.pxe.uefi_pxe_config_template)
ensure_calls = [
mock.call(os.path.join(CONF.pxe.tftp_root, self.node.uuid)),
mock.call(os.path.join(CONF.pxe.tftp_root, 'pxelinux.cfg'))
]
ensure_tree_mock.assert_has_calls(ensure_calls)
build_mock.assert_called_with(self.pxe_options_uefi,
CONF.pxe.uefi_pxe_config_template,
'{{ ROOT }}',
'{{ DISK_IDENTIFIER }}')
link_ip_configs_mock.assert_called_once_with(task, True)
pxe_cfg_file_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
write_mock.assert_called_with(pxe_cfg_file_path, self.pxe_options_uefi)
@mock.patch('ironic.common.pxe_utils._link_ip_address_pxe_configs',
autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.pxe_utils._build_pxe_config', autospec=True)
@mock.patch('oslo_utils.fileutils.ensure_tree', autospec=True)
def test_create_pxe_config_uefi_grub(self, ensure_tree_mock, build_mock,
write_mock, link_ip_configs_mock):
build_mock.return_value = self.pxe_options_uefi
grub_tmplte = "ironic/drivers/modules/pxe_grub_config.template"
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = 'boot_mode:uefi'
pxe_utils.create_pxe_config(task, self.pxe_options_uefi,
grub_tmplte)
ensure_calls = [
mock.call(os.path.join(CONF.pxe.tftp_root, self.node.uuid)),
mock.call(os.path.join(CONF.pxe.tftp_root, 'pxelinux.cfg'))
]
ensure_tree_mock.assert_has_calls(ensure_calls)
build_mock.assert_called_with(self.pxe_options_uefi,
grub_tmplte,
'(( ROOT ))',
'(( DISK_IDENTIFIER ))')
link_ip_configs_mock.assert_called_once_with(task, False)
pxe_cfg_file_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
write_mock.assert_called_with(pxe_cfg_file_path, self.pxe_options_uefi)
@mock.patch('ironic.common.utils.rmtree_without_raise', autospec=True)
@mock.patch('ironic.common.utils.unlink_without_raise', autospec=True)
def test_clean_up_pxe_config(self, unlink_mock, rmtree_mock):
address = "aa:aa:aa:aa:aa:aa"
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
with task_manager.acquire(self.context, self.node.uuid) as task:
pxe_utils.clean_up_pxe_config(task)
unlink_mock.assert_called_once_with("/tftpboot/pxelinux.cfg/01-%s"
% address.replace(':', '-'))
rmtree_mock.assert_called_once_with(
os.path.join(CONF.pxe.tftp_root, self.node.uuid))
def test__get_pxe_mac_path(self):
mac = '00:11:22:33:44:55:66'
self.assertEqual('/tftpboot/pxelinux.cfg/01-00-11-22-33-44-55-66',
pxe_utils._get_pxe_mac_path(mac))
def test__get_pxe_mac_path_ipxe(self):
self.config(ipxe_enabled=True, group='pxe')
self.config(http_root='/httpboot', group='deploy')
mac = '00:11:22:33:AA:BB:CC'
self.assertEqual('/httpboot/pxelinux.cfg/00-11-22-33-aa-bb-cc',
pxe_utils._get_pxe_mac_path(mac))
def test__get_pxe_ip_address_path(self):
ipaddress = '10.10.0.1'
self.assertEqual('/tftpboot/10.10.0.1.conf',
pxe_utils._get_pxe_ip_address_path(ipaddress, False))
def test_get_root_dir(self):
expected_dir = '/tftproot'
self.config(ipxe_enabled=False, group='pxe')
self.config(tftp_root=expected_dir, group='pxe')
self.assertEqual(expected_dir, pxe_utils.get_root_dir())
def test_get_root_dir_ipxe(self):
expected_dir = '/httpboot'
self.config(ipxe_enabled=True, group='pxe')
self.config(http_root=expected_dir, group='deploy')
self.assertEqual(expected_dir, pxe_utils.get_root_dir())
def test_get_pxe_config_file_path(self):
self.assertEqual(os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'config'),
pxe_utils.get_pxe_config_file_path(self.node.uuid))
def test_dhcp_options_for_instance(self):
self.config(tftp_server='192.0.2.1', group='pxe')
self.config(pxe_bootfile_name='fake-bootfile', group='pxe')
expected_info = [{'opt_name': 'bootfile-name',
'opt_value': 'fake-bootfile'},
{'opt_name': 'server-ip-address',
'opt_value': '192.0.2.1'},
{'opt_name': 'tftp-server',
'opt_value': '192.0.2.1'}
]
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(expected_info,
pxe_utils.dhcp_options_for_instance(task))
def _test_get_deploy_kr_info(self, expected_dir):
node_uuid = 'fake-node'
driver_info = {
'deploy_kernel': 'glance://deploy-kernel',
'deploy_ramdisk': 'glance://deploy-ramdisk',
}
expected = {
'deploy_kernel': ('glance://deploy-kernel',
expected_dir + '/fake-node/deploy_kernel'),
'deploy_ramdisk': ('glance://deploy-ramdisk',
expected_dir + '/fake-node/deploy_ramdisk'),
}
kr_info = pxe_utils.get_deploy_kr_info(node_uuid, driver_info)
self.assertEqual(expected, kr_info)
def test_get_deploy_kr_info(self):
expected_dir = '/tftp'
self.config(tftp_root=expected_dir, group='pxe')
self._test_get_deploy_kr_info(expected_dir)
def test_get_deploy_kr_info_ipxe(self):
expected_dir = '/http'
self.config(ipxe_enabled=True, group='pxe')
self.config(http_root=expected_dir, group='deploy')
self._test_get_deploy_kr_info(expected_dir)
def test_get_deploy_kr_info_bad_driver_info(self):
self.config(tftp_root='/tftp', group='pxe')
node_uuid = 'fake-node'
driver_info = {}
self.assertRaises(KeyError,
pxe_utils.get_deploy_kr_info,
node_uuid,
driver_info)
def test_dhcp_options_for_instance_ipxe(self):
self.config(tftp_server='192.0.2.1', group='pxe')
self.config(pxe_bootfile_name='fake-bootfile', group='pxe')
self.config(ipxe_enabled=True, group='pxe')
self.config(http_url='http://192.0.3.2:1234', group='deploy')
self.config(ipxe_boot_script='/test/boot.ipxe', group='pxe')
self.config(dhcp_provider='isc', group='dhcp')
expected_boot_script_url = 'http://192.0.3.2:1234/boot.ipxe'
expected_info = [{'opt_name': '!175,bootfile-name',
'opt_value': 'fake-bootfile'},
{'opt_name': 'server-ip-address',
'opt_value': '192.0.2.1'},
{'opt_name': 'tftp-server',
'opt_value': '192.0.2.1'},
{'opt_name': 'bootfile-name',
'opt_value': expected_boot_script_url}]
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertItemsEqual(expected_info,
pxe_utils.dhcp_options_for_instance(task))
self.config(dhcp_provider='neutron', group='dhcp')
expected_boot_script_url = 'http://192.0.3.2:1234/boot.ipxe'
expected_info = [{'opt_name': 'tag:!ipxe,bootfile-name',
'opt_value': 'fake-bootfile'},
{'opt_name': 'server-ip-address',
'opt_value': '192.0.2.1'},
{'opt_name': 'tftp-server',
'opt_value': '192.0.2.1'},
{'opt_name': 'tag:ipxe,bootfile-name',
'opt_value': expected_boot_script_url}]
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertItemsEqual(expected_info,
pxe_utils.dhcp_options_for_instance(task))
@mock.patch('ironic.common.utils.rmtree_without_raise', autospec=True)
@mock.patch('ironic.common.utils.unlink_without_raise', autospec=True)
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.provider')
def test_clean_up_pxe_config_uefi(self, provider_mock, unlink_mock,
rmtree_mock):
ip_address = '10.10.0.1'
address = "aa:aa:aa:aa:aa:aa"
properties = {'capabilities': 'boot_mode:uefi'}
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
provider_mock.get_ip_addresses.return_value = [ip_address]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties = properties
pxe_utils.clean_up_pxe_config(task)
unlink_calls = [
mock.call('/tftpboot/10.10.0.1.conf'),
mock.call('/tftpboot/0A0A0001.conf')
]
unlink_mock.assert_has_calls(unlink_calls)
rmtree_mock.assert_called_once_with(
os.path.join(CONF.pxe.tftp_root, self.node.uuid))
@mock.patch('ironic.common.utils.rmtree_without_raise')
@mock.patch('ironic.common.utils.unlink_without_raise')
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.provider')
def test_clean_up_pxe_config_uefi_instance_info(self,
provider_mock, unlink_mock,
rmtree_mock):
ip_address = '10.10.0.1'
address = "aa:aa:aa:aa:aa:aa"
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
provider_mock.get_ip_addresses.return_value = [ip_address]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.instance_info['deploy_boot_mode'] = 'uefi'
pxe_utils.clean_up_pxe_config(task)
unlink_calls = [
mock.call('/tftpboot/10.10.0.1.conf'),
mock.call('/tftpboot/0A0A0001.conf')
]
unlink_mock.assert_has_calls(unlink_calls)
rmtree_mock.assert_called_once_with(
os.path.join(CONF.pxe.tftp_root, self.node.uuid))
| |
import requests
import xmltodict
import json
import os
class Gateway:
'''Gateway class'''
def __init__(self):
'''Establish a gateway connection using a connection URL and auth credentials'''
self.url = 'http://dev.tanklink.com/latlontdg/service.asmx?WSDL'
def gateway_request(self, string):
'''Make a request to the gateway web service with a specific soap envelope string.
Returns soap response string'''
headers = {'content-type': 'text/xml'}
#headers = {'content-type': 'application/soap+xml'}
try:
response = requests.post(self.url, data=string, headers=headers)
return str(response.content)
except:
return 'gateway request error'
def parse_response(self, respinput):
'''Parse the response xml string and create dictionary using xmltodict module.
Returns ordered dictionary of the xml response starting the soap:Body node'''
beg_tag_str = '<soap:Body>'
end_tag_str = '</soap:Body>'
try:
xmldict = respinput[respinput.find(beg_tag_str) : respinput.find(end_tag_str) + len(end_tag_str)]
return xmltodict.parse(xmldict)
except:
xmldict = {'error':'parse error'}
return xmldict
def parse_dictionary(self, pdinput):
'''Convert dict output from parseResponse to JSON.
Returns json formatted string'''
# return json.dumps(pdinput)
return json.dumps(pdinput, sort_keys=True, indent=4)
def save_resp_json(self, resp):
'''Save the dictionary formatted response from parseResponse to local JSON file.
Returns bool status of json file write'''
try:
#create json filename from root key, overwrite if exists
file = ''
for k in resp['soap:Body']:
file = 'data/' + str(k) + '.json'
break
writefile = open(file, 'w')
writefile.write(json.dumps(resp, sort_keys=True, indent=4))
return True
except FileNotFoundError:
return False
def save_resp_unique_json(self, resp, uniqueid):
'''Save the dictionary formatted response from parseResponse to local JSON file.
This is a unique json file which includes an id in filename
Returns bool status of json file write'''
try:
#create json filename from root key, overwrite if exists
file = ''
for k in resp['soap:Body']:
file = 'data/' + str(k) + uniqueid + '.json'
break
writefile = open(file, 'w')
writefile.write(json.dumps(resp, sort_keys=True, indent=4))
return True
except FileNotFoundError:
return False
def delete_resp_unique_json(self, delfile):
'''Delete the local JSON file.
Used to clean up json files or to remove any with no records.
Returns bool status of json file delete'''
try:
os.remove(delfile)
return True
except FileNotFoundError:
return False
def test_connect(self):
'''Simple test to make sure the gateway is up and running.
Returns true if OK'''
#TODO: Need to complete this
return True
class Process():
'''Process class for processing data in JSON obtained from Gateway'''
def __init__(self):
'''Create process for reading data from json'''
self.tankjsonfile = 'data/GetTankResponse.json'
self.inventoryfile = 'data/GetInventoryResponse.json'
self.tankgenlatlonplus = 'data/GetTankGeneralLatLonPlusResponse{0}.json'
self.invcalcalrmfile = 'data/GetInventoryCalcAlarmResponse.json'
self.invcalcalrmfilelatest = 'data/GetInventoryCalcAlarmResponse_latest.json'
self.uniqueinvcalcalrmfile = 'data/GetInventoryCalcAlarmResponse{0}.json'
def get_json_file(self, filestring):
'''Get the dictionary of tanks from JSON file. Takes filestring name
Returns a dict'''
try:
openfile = open(filestring, 'r')
jsonfiledict = json.loads(openfile.read())
except FileNotFoundError:
jsonfiledict = '{ }' #if not found, return empty dict
return jsonfiledict
def get_tank_list(self):
'''Get tank list. Returns list of tank IDs'''
jsonfromfile = self.get_json_file(self.tankjsonfile)
returnlist = []
#check for key error
try:
listfromjson = jsonfromfile['soap:Body']['GetTankResponse']['GetTankResult']['Tank'] #returns list
for k in listfromjson:
if k['iTankID']:
returnlist.append(k['iTankID'])
except KeyError:
pass
#log key error, ignore?
return returnlist
def get_inventorycalcalrm_transactID(self):
'''Get inventory calc alarm. Returns only the TransactionID string'''
jsonfromfile = self.get_json_file(self.invcalcalrmfile)
transactIDfromjson = ''
#check for key error
try:
transactIDfromjson = jsonfromfile['soap:Body']['GetInventoryCalcAlarmResponse']['iTransactionId'] #returns string value
except KeyError:
pass
#log key error, ignore?
return transactIDfromjson
def get_inventorycalcalrm_unique_transactID(self, prevtransactid):
'''Get UNIQUE inventory calc alarm. Returns only the unique TransactionID string'''
jsonfromfile = self.get_json_file(self.uniqueinvcalcalrmfile.format(prevtransactid))
uniquetransactIDfromjson = ''
#check for key error
try:
uniquetransactIDfromjson = jsonfromfile['soap:Body']['GetInventoryCalcAlarmResponse']['iTransactionId'] #returns string value
except KeyError:
pass
#log key error, ignore?
return uniquetransactIDfromjson
def count_inventorycalcalrm(self):
'''Get inventory calc alarm list. Returns a count of the items in the list as an int'''
jsonfromfile = self.get_json_file(self.invcalcalrmfile)
listcount = 0
try:
listfromjson = jsonfromfile['soap:Body']['GetInventoryCalcAlarmResponse']['GetInventoryCalcAlarmResult']['CalcAlarmInventory'] #returns list
for k in listfromjson:
if k['iInventoryID']:
listcount += 1
except KeyError:
pass
#log key error, ignore?
return listcount
def count_inventorycalcalrmlatest(self):
'''Get inventory calc alarm list from latest file. Returns a count of the items in the list as an int'''
jsonfromfile = self.get_json_file(self.invcalcalrmfilelatest)
listcount = 0
try:
listfromjson = jsonfromfile['soap:Body']['GetInventoryCalcAlarmResponse']['GetInventoryCalcAlarmResult']['CalcAlarmInventory'] #returns list
for k in listfromjson:
if k['iInventoryID']:
listcount += 1
except KeyError:
pass
#log key error, ignore?
return listcount
def count_inventorycalcalrm_unique(self, prevtransactid):
'''Get UNIQUE inventory calc alarm list. Returns a count of the items in the unique list as an int'''
jsonfromfile = self.get_json_file(self.uniqueinvcalcalrmfile.format(prevtransactid))
#Fixed to only count if valid data for each list element (ie. not xsi:nil)
# listcount = 0
# try:
# listfromjson = jsonfromfile['soap:Body']['GetInventoryCalcAlarmResponse']['GetInventoryCalcAlarmResult']['CalcAlarmInventory'] #returns list
# listcount = len(listfromjson)
listcount = 0
try:
listfromjson = jsonfromfile['soap:Body']['GetInventoryCalcAlarmResponse']['GetInventoryCalcAlarmResult']['CalcAlarmInventory'] #returns list
for k in listfromjson:
if k['iInventoryID']:
listcount += 1
except KeyError:
pass
#log key error, ignore?
return listcount
def get_inventory_list(self):
'''Get inventory list. Returns list of inventory IDs'''
jsonfromfile = self.get_json_file(self.inventoryfile)
returnlist = []
#check for key error
try:
listfromjson = jsonfromfile['soap:Body']['GetInventoryResponse']['GetInventoryResult']['Inventory'] #returns list
for k in listfromjson:
if k['iInventoryID']:
returnlist.append(k['iInventoryID'])
except KeyError:
pass
#log key error, ignore?
return returnlist
def get_tankinv_list(self):
'''Get tank inventory list.
Returns list of tank IDs with inventory IDs'''
#NOTE: Updated to use GetInventoryCalcAlarm LATEST instead of GetInventory or GetInventoryCalcAlarm
tankjsonfromfile = self.get_json_file(self.tankjsonfile)
#invjsonfromfile = self.get_json_file(self.inventoryfile)
invjsonfromfile = self.get_json_file(self.invcalcalrmfilelatest)
returnlist = []
tanklistfromjson = tankjsonfromfile['soap:Body']['GetTankResponse']['GetTankResult']['Tank'] #returns list
#invlistfromjson = invjsonfromfile['soap:Body']['GetInventoryResponse']['GetInventoryResult']['Inventory'] #returns list
invlistfromjson = invjsonfromfile['soap:Body']['GetInventoryCalcAlarmResponse']['GetInventoryCalcAlarmResult']['CalcAlarmInventory'] #returns list
for k in tanklistfromjson:
try:
#now iterate through each tank
if k['iTankID']:
listelementtoappend = [] #need combo of k['iTankID'] AND k['iInventoryID']
for k2 in invlistfromjson:
try:
#check tank id against each item in inv to add all inv records for that tank id
if str(k['iTankID']) == str(k2['iTankID']) and k2['iInventoryID']:
listelementtoappend.append(k2['iInventoryID'])
except KeyError:
pass
#log key error?
tankid_dict = {k['iTankID'] : listelementtoappend} #create dict with key=tankID and value=list of invIDs
returnlist.append(tankid_dict)
except KeyError:
pass
#log key error?
return returnlist
def get_latestinvid_bytank(self, tankidstr):
'''Get the latest inventory id by tank id.
Return inventory id string'''
returnlatestinvstr = ''
bothlist = self.get_tankinv_list()
for item in bothlist:
try:
if item[tankidstr]:
highestvalue = 0
valuelist = item[tankidstr]
for v in valuelist:
if int(v) > highestvalue:
highestvalue = int(v)
returnlatestinvstr = str(highestvalue)
except KeyError:
pass
return returnlatestinvstr
def get_grossvol_byinvid(self, invidstr):
'''Get the gross volume from GetInventoryReponse based on inventory id.
Return gross inventory value as string'''
#NOTE: Updated to use GetInventoryCalcAlarm LATEST instead of GetInventory or GetInventoryCalcAlarm
# jsonfromfile = self.get_json_file(self.inventoryfile)
jsonfromfile = self.get_json_file(self.invcalcalrmfilelatest)
#check for key error
try:
# listfromjson = jsonfromfile['soap:Body']['GetInventoryResponse']['GetInventoryResult']['Inventory'] #returns list
listfromjson = jsonfromfile['soap:Body']['GetInventoryCalcAlarmResponse']['GetInventoryCalcAlarmResult']['CalcAlarmInventory'] #returns list
for k in listfromjson:
if str(k['iInventoryID']) == invidstr:
return str(k['dGrossVolume'])
except KeyError:
pass
#log key error, ignore?
return ''
def get_tankname_bytankid(self, tankidstr, tankdict):
'''Get the tank name attribute from the parsed tank dictionary & tank id as a string.
Return tank name'''
#check for key error
try:
dictfromjson = tankdict['soap:Body']['GetTankGeneralLatLonPlusResponse']['GetTankGeneralLatLonPlusResult'] #returns dict
if str(dictfromjson['iTankID']) == tankidstr:
return str(dictfromjson['sTankName'])
except KeyError:
pass
#log key error, ignore?
return ''
def get_tankname_bytankid_file(self, tankidstr):
'''Get the tank name attribute from the tank id as a string.
Return tank name'''
uniquefilestr = self.tankgenlatlonplus.format(tankidstr)
jsonfromfile = self.get_json_file(uniquefilestr)
return self.get_tankname_bytankid(tankidstr, jsonfromfile)
def get_tankalrm_byinvid(self, invalrmidstr):
'''Get the alarm status for tank by the inventory id as a string.
Return the alarm status as string.'''
#NOTE: Updated to use GetInventoryCalcAlarm LATEST instead of GetInventory or GetInventoryCalcAlarm
jsonfromfile = self.get_json_file(self.invcalcalrmfilelatest)
try:
alrmlistfromjson = jsonfromfile['soap:Body']['GetInventoryCalcAlarmResponse']['GetInventoryCalcAlarmResult']['CalcAlarmInventory'] #returns list
for k in alrmlistfromjson:
if str(k['iInventoryID']) == invalrmidstr:
return str(k['iCalcAlarmBits'])
except KeyError:
print('key error')
return ''
| |
import numpy as np
"""
This file defines layer types that are commonly used for recurrent neural
networks.
"""
def rnn_step_forward(x, prev_h, Wx, Wh, b):
"""
Run the forward pass for a single timestep of a vanilla RNN that uses a tanh
activation function.
The input data has dimension D, the hidden state has dimension H, and we use
a minibatch size of N.
Inputs:
- x: Input data for this timestep, of shape (N, D).
- prev_h: Hidden state from previous timestep, of shape (N, H)
- Wx: Weight matrix for input-to-hidden connections, of shape (D, H)
- Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H)
- b: Biases of shape (H,)
Returns a tuple of:
- next_h: Next hidden state, of shape (N, H)
- cache: Tuple of values needed for the backward pass.
"""
next_h = np.tanh(np.dot(x, Wx) + np.dot(prev_h, Wh) + b)
cache = (x, prev_h, Wx, Wh, next_h)
return next_h, cache
def rnn_step_backward(dnext_h, cache):
"""
Backward pass for a single timestep of a vanilla RNN.
Inputs:
- dnext_h: Gradient of loss with respect to next hidden state
- cache: Cache object from the forward pass
Returns a tuple of:
- dx: Gradients of input data, of shape (N, D)
- dprev_h: Gradients of previous hidden state, of shape (N, H)
- dWx: Gradients of input-to-hidden weights, of shape (N, H)
- dWh: Gradients of hidden-to-hidden weights, of shape (H, H)
- db: Gradients of bias vector, of shape (H,)
"""
x, prev_h, Wx, Wh, next_h = cache
daffine = dnext_h * (1 - np.square(next_h))
db = np.sum(daffine, axis=0)
dWx = np.dot(x.T, daffine)
dx = np.dot(daffine, Wx.T)
dWh = np.dot(prev_h.T, daffine)
dprev_h = np.dot(daffine, Wh.T)
return dx, dprev_h, dWx, dWh, db
def rnn_forward(x, h0, Wx, Wh, b):
"""
Run a vanilla RNN forward on an entire sequence of data. We assume an input
sequence composed of T vectors, each of dimension D. The RNN uses a hidden
size of H, and we work over a minibatch containing N sequences. After running
the RNN forward, we return the hidden states for all timesteps.
Inputs:
- x: Input data for the entire timeseries, of shape (N, T, D).
- h0: Initial hidden state, of shape (N, H)
- Wx: Weight matrix for input-to-hidden connections, of shape (D, H)
- Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H)
- b: Biases of shape (H,)
Returns a tuple of:
- h: Hidden states for the entire timeseries, of shape (N, T, H).
- cache: Values needed in the backward pass
"""
cache = []
N, T, D = x.shape
_, H = h0.shape
h = np.zeros((N, T, H))
h[:, 0, :], c = rnn_step_forward(x[:, 0, :], h0, Wx, Wh, b)
cache.append(c)
for i in np.arange(1, T):
h[:, i, :], c = rnn_step_forward(x[:, i, :], h[:, i - 1, :], Wx, Wh, b)
cache.append(c)
return h, cache
def rnn_backward(dh, cache):
"""
Compute the backward pass for a vanilla RNN over an entire sequence of data.
Inputs:
- dh: Upstream gradients of all hidden states, of shape (N, T, H)
Returns a tuple of:
- dx: Gradient of inputs, of shape (N, T, D)
- dh0: Gradient of initial hidden state, of shape (N, H)
- dWx: Gradient of input-to-hidden weights, of shape (D, H)
- dWh: Gradient of hidden-to-hidden weights, of shape (H, H)
- db: Gradient of biases, of shape (H,)
"""
N, T, H = dh.shape
(_, D), dtype = cache[0][0].shape, cache[0][0].dtype
dx = np.zeros((N, T, D), dtype=dtype)
dx[:, T - 1, :], dh_prev, dWx, dWh, db = \
rnn_step_backward(dh[:, T - 1, :], cache[T - 1])
for i in np.arange(T - 2, -1, -1):
dx[:, i, :], dh_prev, dWx_temp, dWh_temp, db_temp = \
rnn_step_backward(dh[:, i, :] + dh_prev, cache[i])
dWx += dWx_temp
dWh += dWh_temp
db += db_temp
return dx, dh_prev, dWx, dWh, db
def word_embedding_forward(x, W):
"""
Forward pass for word embeddings. We operate on minibatches of size N where
each sequence has length T. We assume a vocabulary of V words, assigning each
to a vector of dimension D.
Inputs:
- x: Integer array of shape (N, T) giving indices of words. Each element idx
of x muxt be in the range 0 <= idx < V.
- W: Weight matrix of shape (V, D) giving word vectors for all words.
Returns a tuple of:
- out: Array of shape (N, T, D) giving word vectors for all input words.
- cache: Values needed for the backward pass
"""
out = W[x]
cache = (x, W)
return out, cache
def word_embedding_backward(dout, cache):
"""
Backward pass for word embeddings. We cannot back-propagate into the words
since they are integers, so we only return gradient for the word embedding
matrix.
HINT: Look up the function np.add.at
Inputs:
- dout: Upstream gradients of shape (N, T, D)
- cache: Values from the forward pass
Returns:
- dW: Gradient of word embedding matrix, of shape (V, D).
"""
x, W = cache
dW = np.zeros_like(W)
np.add.at(dW, x, dout)
return dW
def sigmoid(x):
"""
A numerically stable version of the logistic sigmoid function.
"""
pos_mask = (x >= 0)
neg_mask = (x < 0)
z = np.zeros_like(x)
z[pos_mask] = np.exp(-x[pos_mask])
z[neg_mask] = np.exp(x[neg_mask])
top = np.ones_like(x)
top[neg_mask] = z[neg_mask]
return top / (1 + z)
def lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b):
"""
Forward pass for a single timestep of an LSTM.
The input data has dimension D, the hidden state has dimension H, and we use
a minibatch size of N.
Inputs:
- x: Input data, of shape (N, D)
- prev_h: Previous hidden state, of shape (N, H)
- prev_c: previous cell state, of shape (N, H)
- Wx: Input-to-hidden weights, of shape (D, 4H)
- Wh: Hidden-to-hidden weights, of shape (H, 4H)
- b: Biases, of shape (4H,)
Returns a tuple of:
- next_h: Next hidden state, of shape (N, H)
- next_c: Next cell state, of shape (N, H)
- cache: Tuple of values needed for backward pass.
"""
affine = np.dot(x, Wx) + np.dot(prev_h, Wh) + b
[i, f, o, g] = np.split(affine, 4, axis=1)
i = sigmoid(i)
f = sigmoid(f)
o = sigmoid(o)
g = np.tanh(g)
next_c = f * prev_c + i * g
tanh_next_c = np.tanh(next_c)
next_h = o * tanh_next_c
cache = (x, prev_h, prev_c, Wx, Wh, b, i, f, o, g, tanh_next_c)
return next_h, next_c, cache
def lstm_step_backward(dnext_h, dnext_c, cache):
"""
Backward pass for a single timestep of an LSTM.
Inputs:
- dnext_h: Gradients of next hidden state, of shape (N, H)
- dnext_c: Gradients of next cell state, of shape (N, H)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient of input data, of shape (N, D)
- dprev_h: Gradient of previous hidden state, of shape (N, H)
- dprev_c: Gradient of previous cell state, of shape (N, H)
- dWx: Gradient of input-to-hidden weights, of shape (D, 4H)
- dWh: Gradient of hidden-to-hidden weights, of shape (H, 4H)
- db: Gradient of biases, of shape (4H,)
"""
x, prev_h, prev_c, Wx, Wh, b, i, f, o, g, tanh_next_c = cache
dnext_c = dnext_c + dnext_h * o * (1 - np.square(tanh_next_c))
di = dnext_c * g
df = dnext_c * prev_c
do = dnext_h * tanh_next_c
dg = dnext_c * i
daffine_i = di * i * (1 - i)
daffine_f = df * f * (1 - f)
daffine_o = do * o * (1 - o)
daffine_g = dg * (1 - np.square(g))
daffine = np.concatenate((daffine_i, daffine_f, daffine_o, daffine_g), axis=1)
dx = np.dot(daffine, Wx.T)
dprev_h = np.dot(daffine, Wh.T)
dprev_c = dnext_c * f
dWx = np.dot(x.T, daffine)
dWh = np.dot(prev_h.T, daffine)
db = np.sum(daffine, axis=0)
return dx, dprev_h, dprev_c, dWx, dWh, db
def lstm_forward(x, h0, Wx, Wh, b):
"""
Forward pass for an LSTM over an entire sequence of data. We assume an input
sequence composed of T vectors, each of dimension D. The LSTM uses a hidden
size of H, and we work over a minibatch containing N sequences. After running
the LSTM forward, we return the hidden states for all timesteps.
Note that the initial cell state is passed as input, but the initial cell
state is set to zero. Also note that the cell state is not returned; it is
an internal variable to the LSTM and is not accessed from outside.
Inputs:
- x: Input data of shape (N, T, D)
- h0: Initial hidden state of shape (N, H)
- Wx: Weights for input-to-hidden connections, of shape (D, 4H)
- Wh: Weights for hidden-to-hidden connections, of shape (H, 4H)
- b: Biases of shape (4H,)
Returns a tuple of:
- h: Hidden states for all timesteps of all sequences, of shape (N, T, H)
- cache: Values needed for the backward pass.
"""
cache = []
N, T, D = x.shape
_, H = h0.shape
c0 = np.zeros_like(h0)
h = np.zeros((N, T, H))
c = np.zeros((N, T, H))
h[:, 0, :], c[:, 0, :], temp = lstm_step_forward(x[:, 0, :], h0, c0, Wx, Wh, b)
cache.append(temp)
for i in np.arange(1, T):
h[:, i, :], c[:, i, :], temp = \
lstm_step_forward(x[:, i, :], h[:, i - 1, :], c[:, i - 1, :], Wx, Wh, b)
cache.append(temp)
return h, cache
def lstm_backward(dh, cache):
"""
Backward pass for an LSTM over an entire sequence of data.]
Inputs:
- dh: Upstream gradients of hidden states, of shape (N, T, H)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient of input data of shape (N, T, D)
- dh0: Gradient of initial hidden state of shape (N, H)
- dWx: Gradient of input-to-hidden weight matrix of shape (D, 4H)
- dWh: Gradient of hidden-to-hidden weight matrix of shape (H, 4H)
- db: Gradient of biases, of shape (4H,)
"""
N, T, H = dh.shape
(_, D), dtype = cache[0][0].shape, cache[0][0].dtype
dx = np.zeros((N, T, D), dtype=dtype)
dx[:, T - 1, :], dh_prev, dc_prev, dWx, dWh, db = \
lstm_step_backward(dh[:, T - 1, :], np.zeros((N, H)), cache[T - 1])
for i in np.arange(T - 2, -1, -1):
dx[:, i, :], dh_prev, dc_prev, dWx_temp, dWh_temp, db_temp = \
lstm_step_backward(dh[:, i, :] + dh_prev, dc_prev, cache[i])
dWx += dWx_temp
dWh += dWh_temp
db += db_temp
return dx, dh_prev, dWx, dWh, db
return dx, dh0, dWx, dWh, db
def temporal_affine_forward(x, w, b):
"""
Forward pass for a temporal affine layer. The input is a set of D-dimensional
vectors arranged into a minibatch of N timeseries, each of length T. We use
an affine function to transform each of those vectors into a new vector of
dimension M.
Inputs:
- x: Input data of shape (N, T, D)
- w: Weights of shape (D, M)
- b: Biases of shape (M,)
Returns a tuple of:
- out: Output data of shape (N, T, M)
- cache: Values needed for the backward pass
"""
N, T, D = x.shape
M = b.shape[0]
out = x.reshape(N * T, D).dot(w).reshape(N, T, M) + b
cache = x, w, b, out
return out, cache
def temporal_affine_backward(dout, cache):
"""
Backward pass for temporal affine layer.
Input:
- dout: Upstream gradients of shape (N, T, M)
- cache: Values from forward pass
Returns a tuple of:
- dx: Gradient of input, of shape (N, T, D)
- dw: Gradient of weights, of shape (D, M)
- db: Gradient of biases, of shape (M,)
"""
x, w, b, out = cache
N, T, D = x.shape
M = b.shape[0]
dx = dout.reshape(N * T, M).dot(w.T).reshape(N, T, D)
dw = dout.reshape(N * T, M).T.dot(x.reshape(N * T, D)).T
db = dout.sum(axis=(0, 1))
return dx, dw, db
def temporal_softmax_loss(x, y, mask, verbose=False):
"""
A temporal version of softmax loss for use in RNNs. We assume that we are
making predictions over a vocabulary of size V for each timestep of a
timeseries of length T, over a minibatch of size N. The input x gives scores
for all vocabulary elements at all timesteps, and y gives the indices of the
ground-truth element at each timestep. We use a cross-entropy loss at each
timestep, summing the loss over all timesteps and averaging across the
minibatch.
As an additional complication, we may want to ignore the model output at some
timesteps, since sequences of different length may have been combined into a
minibatch and padded with NULL tokens. The optional mask argument tells us
which elements should contribute to the loss.
Inputs:
- x: Input scores, of shape (N, T, V)
- y: Ground-truth indices, of shape (N, T) where each element is in the range
0 <= y[i, t] < V
- mask: Boolean array of shape (N, T) where mask[i, t] tells whether or not
the scores at x[i, t] should contribute to the loss.
Returns a tuple of:
- loss: Scalar giving loss
- dx: Gradient of loss with respect to scores x.
"""
N, T, V = x.shape
x_flat = x.reshape(N * T, V)
y_flat = y.reshape(N * T)
mask_flat = mask.reshape(N * T)
probs = np.exp(x_flat - np.max(x_flat, axis=1, keepdims=True))
probs /= np.sum(probs, axis=1, keepdims=True)
loss = -np.sum(mask_flat * np.log(probs[np.arange(N * T), y_flat])) / N
dx_flat = probs.copy()
dx_flat[np.arange(N * T), y_flat] -= 1
dx_flat /= N
dx_flat *= mask_flat[:, None]
if verbose: print 'dx_flat: ', dx_flat.shape
dx = dx_flat.reshape(N, T, V)
return loss, dx
| |
#!/usr/bin/env python
# coding=UTF-8
# Title: handler.py
# Description: This file contains all tornado.web.RequestHandler classes used in this application
# Author David Nellessen <david.nellessen@familo.net>
# Date: 12.01.15
# Note:
# ==============================================================================
# Import modules
from tornado import web, gen, escape
from tornado.escape import utf8
import logging
import phonenumbers
import pygeoip
from tornado.iostream import StreamClosedError
class BaseHandler(web.RequestHandler):
"""
A base handler providing localization features, phone number validation
and formation as well as use of service limitation based on IP addresses.
It also implements support for JSONP (for cross-domain requests).
"""
guess_country = True
default_country = 'DE'
def __init__(self, application, request, **kwargs):
super(BaseHandler, self).__init__(application, request, **kwargs)
self.counter = {}
def write(self, chunk):
"""
Overwrites the default write method to support tornado.webJSONP.
"""
if self._finished:
raise RuntimeError("Cannot write() after finish(). May be caused "
"by using async operations without the "
"@asynchronous decorator.")
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
callback = self.get_argument('callback', None)
if callback:
chunk = callback + '(' + chunk + ');'
chunk = utf8(chunk)
self._write_buffer.append(chunk)
def get_browser_locale_code(self):
"""
Determines the user's locale from ``Accept-Language`` header.
This is similar to tornado.web.get_browser_locale except it
returns the code and not a Locale instance. Also this will return
a result weather a translation for this language was loaded or not.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
"""
if "Accept-Language" in self.request.headers:
languages = self.request.headers["Accept-Language"].split(",")
locales = []
for language in languages:
parts = language.strip().split(";")
if len(parts) > 1 and parts[1].startswith("q="):
try:
score = float(parts[1][2:])
except (ValueError, TypeError):
score = 0.0
else:
score = 1.0
locales.append((parts[0], score))
if locales:
locales.sort(key=lambda pair: pair[1], reverse=True)
logging.debug(locales)
codes = [l[0] for l in locales]
return codes[0]
return self.__class__.default_country
def get_user_country_by_ip(self):
"""
Determines the user's country by his IP-address. This will return
the country code or None if not found.
"""
try:
country = self.application.geo_ip.country_code_by_addr(
self.request.remote_ip)
except pygeoip.GeoIPError:
try:
country = self.application.geo_ipv6.country_code_by_addr(
self.request.remote_ip)
except pygeoip.GeoIPError:
pass
if not country:
logging.warning('Could not locate country for ' + self.request.remote_ip)
return None
else:
logging.debug('Determined country by IP address: ' + country)
return country
def parse_phonenumber(self, number):
"""
Validates and parses a phonenumber. It will return a
phone number object or False if parsing failed.
If the phone number is not given in full international notion the
parameter the country will be guesses if the class attribute guess_country
is True. Guessing will be done as follows:
1. If a query string parameter 'country' is given as a country code
(i.e. 'US', 'DE', ...) it will be used.
2. If no parameter country is given the country will be determined by
the remote IP address.
3. Otherwise the country determined by the request header
Accept-Language will be used.
4. As a fall-back the classes default_country attribute will be used.
"""
try:
return phonenumbers.parse(number)
except:
# Get the country code to use for phone number parsing.
if self.__class__.guess_country:
country_code = self.get_argument('country', None)
if country_code == None:
country_code = self.get_user_country_by_ip()
if country_code == None:
code = self.get_browser_locale_code().replace('-', '_')
parts = code.split('_')
if len(parts) > 1: country_code = parts[1]
if country_code == None: country_code = self.__class__.default_country
country_code = country_code.upper()
logging.debug("Final country code: " + country_code)
else:
country_code = self.__class__.default_country
# Parse the phone number into international notion.
try:
number_parsed = phonenumbers.parse(number, country_code)
return number_parsed
except:
return False
@gen.coroutine
def limit_call(self, chash=None, amount=2, expire=10):
"""
Use this function to limit user requests. Returns True if this function
was called less then 'amount' times in the last 'expire' seconds with
the same value 'chash' and the same remote IP address or False
otherwise.
"""
key = 'limit_call_' + chash + '_' + self.request.remote_ip
redis = self.application.redis
try:
current_value = yield gen.Task(redis.get, key)
except StreamClosedError:
yield gen.Task(self.application.redis_reconnect)
redis = self.application.redis
current_value = yield gen.Task(redis.get, key)
if current_value != None and int(current_value) >= amount:
logging.info('Call Limitation acceded: ' + key)
raise gen.Return(False)
else:
yield gen.Task(redis.incr, key)
if not current_value: yield gen.Task(redis.expire, key, expire)
raise gen.Return(True)
class DLRHandler(web.RequestHandler):
"""
Handles delivery receipts.
"""
def get(self):
"""
All delivery receipts will be send as HTTP-GET requests.
"""
# TODO: Parse request!
logging.info('Received DLR. Not yet parsed though.')
class NumberValidationHandler(BaseHandler):
"""
Validates a phone number.
"""
limit_amount = 10
limit_expires = 3600
@gen.coroutine
def get(self):
"""
Validates a phone number given as the query string parameter 'number'.
If the phone number is not given in full international notion the
parameter the country will be guesses if the class attribute guess_country
is True. Guessing will be done as follows:
1. If a query string parameter 'country' is given as a country code
(i.e. 'US', 'DE', ...) it will be used.
2. If no parameter country is given the country will be determined by
the remote IP address.
3. Otherwise the country determined by the request header
Accept-Language will be used.
4. As a fall-back the classes attribute default_country will be used.
"""
# Limit calls.
if self.limit_amount and not (yield self.limit_call('number_validation', self.limit_amount, self.limit_expires)):
#raise web.HTTPError(403, 'Number Validation request limit acceded')
self.finish({'status': 'error',
'error': 'limit_acceded'})
return
# Decode request's query string parameters.
number = self.get_argument('number', None)
if not number:
self.finish({'status': 'error',
'error': 'number_missing'})
return
logging.debug('Received number {} for validation'.format(number))
numberobj = self.parse_phonenumber(number)
if numberobj:
number = phonenumbers.format_number(numberobj,
phonenumbers.PhoneNumberFormat.INTERNATIONAL)
else: number = False
self.finish({'status': 'ok',
'number': number})
class SimpleMessageHandler(BaseHandler):
message = 'This is an Example Message'
sender = 'Put a sender title or number here'
limit_amount = 10
limit_expires = 3600
@gen.coroutine
def get(self):
# Limit calls.
if self.limit_amount and not (yield (self.limit_call('example_handler', self.limit_amount, self.limit_expires))):
self.finish({'status': 'error',
'error': 'limit_acceded'})
return
# Get receiver's phone number as 'receiver' parameter.
receiver = self.get_argument('receiver', None)
if not receiver:
self.finish({'status': 'error',
'error': 'receiver_missing'})
return
# Parse the given phone number.
receiverobj = self.parse_phonenumber(receiver)
if not receiverobj:
self.finish({'status': 'error',
'error': 'receiver_validation'})
return
# Format numbers for processing and displaying.
receiver_nice = phonenumbers.format_number(receiverobj,
phonenumbers.PhoneNumberFormat.INTERNATIONAL)
receiver = phonenumbers.format_number(receiverobj,
phonenumbers.PhoneNumberFormat.E164)
# Send message to receiver.
result = yield gen.Task(self.application.nexmo_client.send_message,
self.__class__.sender, receiver,
self.__class__.message)
# Process result.
if result: self.finish({'status': 'ok',
'message': 'Message sent',
'number': receiver_nice})
else: self.finish({'status': 'error',
'error': 'nexmo_error',
'message': 'Nexmo Service Error',
'number': receiver_nice})
| |
"""Generate metafile data for use in BitTorrent applications
These data structures are generalizations of the original BitTorrent and
BitTornado makemetafile.py behaviors.
"""
import os
import re
import time
import hashlib
from .TypedCollections import TypedDict, TypedList, SplitList
from .bencode import BencodedFile
def get_piece_len(size):
"""Parameters
long size - size of files described by torrent
Return
long - size of pieces to hash
"""
if size > 8 * (2 ** 30): # > 8G file
piece_len_exp = 21 # = 2M pieces
elif size > 2 * (2 ** 30): # > 2G file
piece_len_exp = 20 # = 1M pieces
elif size > 512 * (2 ** 20): # >512M file
piece_len_exp = 19 # =512K pieces
elif size > 64 * (2 ** 20): # > 64M file
piece_len_exp = 18 # =256K pieces
elif size > 16 * (2 ** 20): # > 16M file
piece_len_exp = 17 # =128K pieces
elif size > 4 * (2 ** 20): # > 4M file
piece_len_exp = 16 # = 64K pieces
else: # < 4M file
piece_len_exp = 15 # = 32K pieces
return 2 ** piece_len_exp
def check_type(obj, types, errmsg='', pred=lambda x: False):
"""Raise value error if obj does not match type or triggers predicate"""
if not isinstance(obj, types) or pred(obj):
raise ValueError(errmsg)
VALID_NAME = re.compile(r'^[^/\\.~][^/\\]*$')
def check_info(info):
"""Validate torrent metainfo dictionary"""
valid_name = re.compile(r'^[^/\\.~][^/\\]*$')
berr = 'bad metainfo - '
check_type(info, dict, berr + 'not a dictionary')
check_type(info.get('pieces'), bytes, berr + 'bad pieces key',
lambda x: len(x) % 20 != 0)
check_type(info.get('piece length'), int, berr + 'illegal piece length',
lambda x: x <= 0)
name = info.get('name')
check_type(name, str, berr + 'bad name')
if not valid_name.match(name):
raise ValueError('name %s disallowed for security reasons' % name)
if ('files' in info) == ('length' in info):
raise ValueError('single/multiple file mix')
if 'length' in info:
check_type(info['length'], int, berr + 'bad length',
lambda x: x < 0)
else:
files = info.get('files')
check_type(files, list)
paths = {}
for finfo in files:
check_type(finfo, dict, berr + 'bad file value')
check_type(finfo.get('length'), int, berr + 'bad length',
lambda x: x < 0)
path = finfo.get('path')
check_type(path, list, berr + 'bad path', lambda x: x == [])
for directory in path:
check_type(directory, str, berr + 'bad path dir')
if not valid_name.match(directory):
raise ValueError('path {} disallowed for security reasons'
''.format(directory))
tpath = tuple(path)
if tpath in paths:
raise ValueError('bad metainfo - duplicate path')
paths[tpath] = True
class PieceHasher(object):
"""Wrapper for SHA1 hash with a maximum length"""
def __init__(self, pieceLength, hashtype=hashlib.sha1):
self.pieceLength = pieceLength
self._hashtype = hashtype
self._hash = hashtype()
self.done = 0
self.pieces = []
def resetHash(self):
"""Set hash to initial state"""
self._hash = self._hashtype()
self.done = 0
def update(self, data, progress=lambda x: None):
"""Add data to PieceHasher, splitting pieces if necessary.
Progress function that accepts a number of (new) bytes hashed
is optional
"""
tofinish = self.pieceLength - self.done # bytes to finish a piece
# Split data based on the number of bytes to finish the current piece
# If data is less than needed, remainder will be empty
init, remainder = data[:tofinish], data[tofinish:]
# Hash initial segment
self._hash.update(init)
progress(len(init))
self.done += len(init)
# Hash remainder, if present
if remainder:
toHash = len(remainder)
# Create a new hash for each piece of data present
hashes = [self._hashtype(remainder[i:i + self.pieceLength])
for i in range(0, toHash, self.pieceLength)]
progress(toHash)
self.done = toHash % self.pieceLength
self.pieces.append(self._hash.digest())
self._hash = hashes[-1]
self.pieces.extend(piece.digest() for piece in hashes[:-1])
# If the piece is finished, reinitialize
if self.done == self.pieceLength:
self.pieces.append(self._hash.digest())
self.resetHash()
def __nonzero__(self):
"""Evaluate to true if any data has been hashed"""
return bool(self.pieces) or self.done != 0
def __repr__(self):
return "<PieceHasher[{:d}] ({})>".format(
len(self.pieces), self._hash.hexdigest())
def __bytes__(self):
"""Print concatenated digests of pieces and current digest, if
nonzero"""
excess = []
if self.done > 0:
excess.append(self._hash.digest())
return b''.join(self.pieces + excess)
@property
def digest(self):
"""Current hash digest as a byte string"""
return self._hash.digest()
@property
def hashtype(self):
"""Name of the hash function being used"""
return self._hash.name
class Info(TypedDict): # pylint: disable=R0904
"""Info - information associated with a .torrent file
Info attributes
str name - name of file/dir being hashed
long size - total size of files to be described
dict[] fs - metadata about files described
long totalhashed - portion of total data hashed
PieceHasher hasher - object to manage hashed files
"""
class Files(TypedList):
class File(TypedDict):
class Path(TypedList):
valtype = str
valconst = lambda s, x: VALID_NAME.match(x)
typemap = {'length': int, 'path': Path}
valtype = File
typemap = {'name': str, 'piece length': int, 'pieces': bytes,
'files': Files, 'length': int}
def __init__(self, name, size=None,
progress=lambda x: None, progress_percent=False, **params):
"""
Parameters
str source - source file name (last path element)
int size - total size of files to be described
f() progress - callback function to report progress
bool progress_percent - flag for reporting percentage or change
"""
super(Info, self).__init__()
if not params and not isinstance(name, (str, bytes)):
params = name
# Accept iterables
if not isinstance(params, dict):
params = dict(params)
name = params.pop('name', None)
size = params.pop('size', None)
progress = params.pop('progress', lambda x: None)
progress_percent = params.pop('progress_percent', False)
if isinstance(name, bytes):
name = name.decode()
self['name'] = name
if 'files' in params:
self['files'] = params['files']
self['length'] = sum(entry['length']
for entry in self._get('files'))
elif 'length' in params:
self['length'] = params['length']
self['files'] = [{'path': [self['name']],
'length': self._get('length')}]
else:
self['files'] = []
self['length'] = size
if 'pieces' in params:
pieces = params['pieces']
# 'piece length' can't be made a variable
self.hasher = PieceHasher(params['piece length'])
self.hasher.pieces = [pieces[i:i + 20]
for i in range(0, len(pieces), 20)]
self.totalhashed = self._get('length')
elif size:
# BitTorrent/BitTornado have traditionally allowed this parameter
piece_len_exp = params.get('piece_size_pow2')
if piece_len_exp is not None and piece_len_exp != 0:
piece_length = 2 ** piece_len_exp
else:
piece_length = get_piece_len(size)
self.totalhashed = 0
self.hasher = PieceHasher(piece_length)
# Progress for this function updates the total amount hashed
# Call the given progress function according to whether it accpts
# percent or update
if progress_percent:
assert self._get('length')
def totalprogress(update, self=self, base=progress):
"""Update totalhashed and use percentage progress callback"""
self.totalhashed += update
base(self.totalhashed / self._get('length'))
self.progress = totalprogress
else:
def updateprogress(update, self=self, base=progress):
"""Update totalhashed and use update progress callback"""
self.totalhashed += update
base(update)
self.progress = updateprogress
def __contains__(self, key):
"""Test whether a key is in the Info dict"""
files = self._get('files')
if key == 'files':
return len(files) != 1
elif key == 'length':
return len(files) == 1
else:
return key in self.valid_keys
def __getitem__(self, key):
"""Retrieve value associated with key in Info dict"""
if key not in self.valid_keys:
raise KeyError('Invalid Info key')
if key == 'piece length':
return self.hasher.pieceLength
elif key == 'pieces':
return bytes(self.hasher)
elif key == 'files':
if 'files' not in self:
raise KeyError('files')
elif key == 'length':
if 'length' not in self:
raise KeyError('length')
return super(Info, self).__getitem__(key)
def keys(self):
"""Return iterator over keys in Info dict"""
keys = self.valid_keys.copy()
if 'files' in self:
keys.remove('length')
else:
keys.remove('files')
return iter(keys)
def values(self):
"""Return iterator over values in Info dict"""
return (self[key] for key in self.keys())
def items(self):
"""Return iterator over items in Info dict"""
return ((key, self[key]) for key in self.keys())
def get(self, key, default=None):
"""Return value associated with key in Info dict, or default, if
unavailable"""
try:
return self[key]
except KeyError:
return default
def _get(self, *args, **kwargs):
return super(Info, self).get(*args, **kwargs)
def add_file_info(self, size, path):
"""Add file information to torrent.
Parameters
long size size of file (in bytes)
str[] path file path e.g. ['path','to','file.ext']
"""
self._get('files').append({'length': size, 'path': path})
def add_data(self, data):
"""Process a segment of data.
Note that the sequence of calls to this function is sensitive to
order and concatenation. Treat it as a rolling hashing function, as
it uses one.
The length of data is relatively unimportant, though exact
multiples of the hasher's pieceLength will slightly improve
performance. The largest possible pieceLength (2**21 bytes == 2MB)
would be a reasonable default.
Parameters
str data - an arbitrarily long segment of the file to
be hashed
"""
self.hasher.update(data, self.progress)
def resume(self, location):
"""Rehash last piece to prepare PieceHasher to accept more data
Parameters
str location - base path for hashed files"""
excessLength = self._get('length') % self.hasher.pieceLength
if self.hasher.done != 0 or excessLength == 0:
return
seek = 0
# Construct list of files needed to provide the leftover data
rehash = []
for entry in self._get('files')[::-1]:
rehash.insert(0, entry)
excessLength -= entry['length']
if excessLength < 0:
seek = -excessLength
break
# Final piece digest to compare new hash digest against
validator = self.hasher.pieces.pop()
for entry in rehash:
path = os.path.join(location, *entry['path'])
with open(path, 'rb') as tohash:
tohash.seek(seek)
self.hasher.update(tohash.read())
seek = 0
if self.hasher.digest != validator:
self.hasher.resetHash()
self.hasher.pieces.append(validator)
raise ValueError("Location does not produce same hash")
class MetaInfo(TypedDict, BencodedFile):
"""A constrained metainfo dictionary"""
class AnnounceList(SplitList):
class AnnounceTier(SplitList):
splitchar = ','
valtype = str
splitchar = '|'
valtype = AnnounceTier
class HTTPList(SplitList):
splitchar = '|'
valtype = str
typemap = {'info': Info, 'announce': str, 'creation date': int,
'comment': str, 'announce-list': AnnounceList,
'httpseeds': HTTPList}
def __init__(self, *args, **kwargs):
super(MetaInfo, self).__init__(*args, **kwargs)
if 'creation date' not in self:
self['creation date'] = int(time.time())
| |
#!/usr/bin/python
#
# -*- coding: utf-8 -*-
#
# web-backup.py
#
# This script iterates through a config file of web services/apps
# that needs backing up, using the workflow:
#
# # mkdir /tmp/webservice
# # service stop webservice
# # dbdump db_name /tmp/webservice/db_name.sql
# # rsync -a /var/www/webservice /tmp/webservice/web-root
# # tar cjpf /backup/webservice/webservice-date.tar.bz2 /tmp/webservice
#
# Imports
import os
import sys
import time
from configobj import ConfigObj
import argparse
import logging
import shlex
from subprocess import Popen, PIPE, STDOUT
# Read arguments
parser = argparse.ArgumentParser()
parser.add_argument('--config',
help='Config file, defaults to "./web-backup.cfg"',
type=argparse.FileType('r'),
default='web-backup.cfg'
)
# Check for config file
try:
args = parser.parse_args()
except argparse.ArgumentError:
parser.print_help()
sys.exit(1)
print('\nWeb Backup started!\n')
# Read configuraion file
config = ConfigObj(args.config)
# General settings
generalSettings = config['general']
# Backup jobs
backupJobs = config['jobs']
# Variables
backupDir = generalSettings['backup_dir']
tempDir = generalSettings['temp_dir']
logDir = generalSettings['log_dir']
logFile = generalSettings['log_dir'] + '/web-backup.log'
if generalSettings['log_level'] == 'debug':
logLevel = logging.DEBUG
elif generalSettings['log_level'] == 'warning':
logLevel = logging.WARNING
else:
logLevel = logging.INFO
errors = []
# Check log dir
if not os.path.exists(logDir):
try:
os.makedirs(logDir)
except OSError:
print('Unable to create "' + logDir + '"!\n')
sys.exit(1)
# Setup logging
logging.basicConfig(
filename=logFile,
format='%(asctime)s:%(levelname)s:%(message)s',
datefmt='%Y-%m-%d %T',
level=logLevel,
)
logging.info('Starting backup run')
logging.debug('Running with settings:')
logging.debug('backupDir: ' + backupDir)
logging.debug('tempDir:' + tempDir)
logging.debug('logDir: ' + logDir)
logging.debug('logFile: ' + logFile)
logging.debug('logLevel: ' + str(logLevel))
# Functions
def checkDirectory(directory):
if not os.path.exists(directory):
try:
os.makedirs(directory)
logging.debug('Created "' + directory + '"')
return 0
except OSError:
logging.critical('Unable to create "' + directory + '"!')
return 1
else:
logging.debug('Directory "' + directory + '" already exists')
return 0
def runCommand(command):
cmd = shlex.split(command)
logging.debug('Running command: "' + command + '"')
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
for line in proc.stderr:
logging.warning(str(line.strip()))
for line in proc.stdout:
logging.debug(str(line.strip()))
proc.wait()
if proc.returncode != 0:
logging.critical('Command failed with return code "' +
str(proc.returncode) + '"')
return 1
else:
logging.debug('Command successfully finished with returncode "' +
str(proc.returncode) + '"')
return 0
# Classes
class webBackup:
def __init__(self, name, backupDir, tempDir, stopCmd, startCmd, dumpCmd, webRoot, extraDirs):
self.name = name
self.backupDir = backupDir
self.tempDir = tempDir
self.stopCmd = stopCmd
self.startCmd = startCmd
self.dumpCmd = dumpCmd
self.webRoot = webRoot
self.extraDirs = extraDirs
# Sanity check
# Debug log settings
logging.debug('Backup job name: ' + self.name)
logging.debug('Backup dir: ' + self.backupDir)
logging.debug('Temp dir: ' + self.tempDir)
logging.debug('Stop command: ' + self.stopCmd)
logging.debug('Start command: ' + self.startCmd)
logging.debug('DB dump command: ' + self.dumpCmd)
logging.debug('Web root dir: ' + self.webRoot)
logging.debug('Extra dirs: ' + str(self.extraDirs))
return
def run_backup(self):
timeStamp = time.strftime("%Y-%m-%d_%H-%M-%S")
target = self.name + "_" + timeStamp
tempTarget = self.tempDir + "/" + target
# Create temp dir
logging.debug('Creating temp target directory: "' + tempTarget + "'")
checkDirectory(tempTarget)
# Run the stop command
if self.stopCmd:
logging.info('Running stop command')
status = runCommand(self.stopCmd)
if status == 1:
msg = 'Stop command failed!'
logging.critical(msg)
return(1, msg)
else:
logging.debug('No stop command defined')
# Run database backup
if self.dumpCmd:
logging.info('Running DB dump')
fullCmd = self.dumpCmd + '> ' + \
tempTarget + '/dbdump_' + \
timeStamp + '.sql'
logging.debug('Full dump command: "' + fullCmd + '"')
status = runCommand(fullCmd)
if status == 1:
msg = 'Database dump failed!'
logging.critical(msg)
return(1, msg)
else:
logging.debug('No DB dump command defined')
# Backup web-root
if self.webRoot:
logging.info('Running web root backup')
fullCmd = 'cp -a ' + self.webRoot + ' ' + tempTarget + '/'
status = runCommand(fullCmd)
if status == 1:
msg = 'Web root backup failed!'
logging.critical(msg)
return(1, msg)
else:
logging.debug('No web root defined')
# Backup extra dirs
if self.extraDirs:
logging.info('Running extra dirs backup')
for directory in self.extraDirs:
fullCmd = 'cp -a ' + directory + ' ' + tempTarget + '/'
status = runCommand(fullCmd)
if status == 1:
msg = 'Extra dir backup failed!'
logging.critical(msg)
return(1, msg)
else:
logging.debug('No extra dirs defined')
# Run the start command
if self.startCmd:
logging.info('Running start command')
status = runCommand(self.startCmd)
if status == 1:
msg = 'Start command failed!'
logging.critical(msg)
return(1, msg)
else:
logging.debug('No start command defined')
# Create archive
fullBackupDir = self.backupDir + '/' + self.name
checkDirectory(fullBackupDir)
logging.info('Creating archive')
fullCmd = 'tar cjpf ' + fullBackupDir + '/' + target + 'tar.bz2 ' + \
tempTarget
status = runCommand(fullCmd)
if status == 1:
msg = 'Archive failed!'
logging.critical(msg)
return(1, msg)
# Remove temp dir
logging.info('Removing temp dir')
fullCmd = 'rm -rf ' + tempTarget
status = runCommand(fullCmd)
if status == 1:
msg = 'Removing temp dir failed!'
logging.critical(msg)
return(1, msg)
return(0, 'Success!')
# Main
# Run backups
for job in backupJobs.keys():
logging.info('Running ' + job + ' job')
# Set variables
name = job
jobParams = {
'service_stop_command': '',
'service_start_command': '',
'web_root': '',
'db_dump_command': '',
'extra_dirs': '',
}
for param, val in jobParams.iteritems():
try:
jobParams[param] = backupJobs[job][param]
except KeyError:
logging.debug('Setting "' + param + '" not set')
pass
# Run backup
backup = webBackup(
name,
backupDir,
tempDir,
jobParams['service_stop_command'],
jobParams['service_start_command'],
jobParams['db_dump_command'],
jobParams['web_root'],
jobParams['extra_dirs'],
)
status = backup.run_backup()
if status[0] != 0:
print('Backup failed! Job: "' + name + '"')
print('Error: ' + status[1])
# Add error to errors list
errors.append('Job : "' + name + '"' + ' Error: ' + status[1])
else:
print('Job: "' + name + '" successful\n')
# Check for errors and exit
if errors:
print('Finished with errors')
logging.warning('Backup run finished with errors!')
for error in errors:
print(error)
logging.info(error)
print('\n')
sys.exit(1)
else:
print('Finished successfully\n')
logging.info('Backup run finished successfully')
sys.exit(0)
| |
""" This module contains several of the most important classes used in cellpy.
It also contains functions that are used by readers and utils. And it has the file-
version definitions.
"""
import collections
import datetime
import importlib
import logging
import os
import pickle
import sys
import time
import warnings
from functools import wraps
import numpy as np
import pandas as pd
from scipy import interpolate
from cellpy.exceptions import NullData
from cellpy.parameters import prms
from cellpy.parameters.internal_settings import (
ATTRS_CELLPYFILE,
cellpy_limits,
cellpy_units,
get_headers_normal,
get_headers_step_table,
get_headers_summary,
)
CELLPY_FILE_VERSION = 6
MINIMUM_CELLPY_FILE_VERSION = 4
STEP_TABLE_VERSION = 5
RAW_TABLE_VERSION = 5
SUMMARY_TABLE_VERSION = 5
PICKLE_PROTOCOL = 4
HEADERS_NORMAL = get_headers_normal()
HEADERS_SUMMARY = get_headers_summary()
HEADERS_STEP_TABLE = get_headers_step_table()
# https://stackoverflow.com/questions/60067953/
# 'is-it-possible-to-specify-the-pickle-protocol-when-writing-pandas-to-hdf5
class PickleProtocol:
def __init__(self, level):
self.previous = pickle.HIGHEST_PROTOCOL
self.level = level
def __enter__(self):
importlib.reload(pickle)
pickle.HIGHEST_PROTOCOL = self.level
def __exit__(self, *exc):
importlib.reload(pickle)
pickle.HIGHEST_PROTOCOL = self.previous
def pickle_protocol(level):
return PickleProtocol(level)
class FileID(object):
"""class for storing information about the raw-data files.
This class is used for storing and handling raw-data file information.
It is important to keep track of when the data was extracted from the
raw-data files so that it is easy to know if the hdf5-files used for
@storing "treated" data is up-to-date.
Attributes:
name (str): Filename of the raw-data file.
full_name (str): Filename including path of the raw-data file.
size (float): Size of the raw-data file.
last_modified (datetime): Last time of modification of the raw-data
file.
last_accessed (datetime): last time of access of the raw-data file.
last_info_changed (datetime): st_ctime of the raw-data file.
location (str): Location of the raw-data file.
"""
def __init__(self, filename=None):
make_defaults = True
if filename:
if os.path.isfile(filename):
fid_st = os.stat(filename)
self.name = os.path.abspath(filename)
self.full_name = filename
self.size = fid_st.st_size
self.last_modified = fid_st.st_mtime
self.last_accessed = fid_st.st_atime
self.last_info_changed = fid_st.st_ctime
self.location = os.path.dirname(filename)
self.last_data_point = 0 # used later when updating is implemented
make_defaults = False
if make_defaults:
self.name = None
self.full_name = None
self.size = 0
self.last_modified = None
self.last_accessed = None
self.last_info_changed = None
self.location = None
self._last_data_point = 0 # to be used later when updating is implemented
def __str__(self):
txt = "\n<fileID>\n"
txt += f"full name: {self.full_name}\n"
txt += f"name: {self.name}\n"
txt += f"location: {self.location}\n"
if self.last_modified is not None:
txt += f"modified: {self.last_modified}\n"
else:
txt += "modified: NAN\n"
if self.size is not None:
txt += f"size: {self.size}\n"
else:
txt += "size: NAN\n"
txt += f"last data point: {self.last_data_point}\n"
return txt
@property
def last_data_point(self):
# TODO: consider including a method here to find the last data point (raw data)
# ideally, this value should be set when loading the raw data before
# merging files (if it consists of several files)
return self._last_data_point
@last_data_point.setter
def last_data_point(self, value):
self._last_data_point = value
def populate(self, filename):
"""Finds the file-stats and populates the class with stat values.
Args:
filename (str): name of the file.
"""
if os.path.isfile(filename):
fid_st = os.stat(filename)
self.name = os.path.abspath(filename)
self.full_name = filename
self.size = fid_st.st_size
self.last_modified = fid_st.st_mtime
self.last_accessed = fid_st.st_atime
self.last_info_changed = fid_st.st_ctime
self.location = os.path.dirname(filename)
def get_raw(self):
"""Get a list with information about the file.
The returned list contains name, size, last_modified and location.
"""
return [self.name, self.size, self.last_modified, self.location]
def get_name(self):
"""Get the filename."""
return self.name
def get_size(self):
"""Get the size of the file."""
return self.size
def get_last(self):
"""Get last modification time of the file."""
return self.last_modified
class Cell(object):
"""Object to store data for a test.
This class is used for storing all the relevant data for a 'run', i.e. all
the data collected by the tester as stored in the raw-files.
Attributes:
test_no (int): test number.
mass (float): mass of electrode [mg].
dfdata (pandas.DataFrame): contains the experimental data points.
dfsummary (pandas.DataFrame): contains summary of the data pr. cycle.
step_table (pandas.DataFrame): information for each step, used for
defining type of step (charge, discharge, etc.)
"""
def _repr_html_(self):
obj = f"<b>Cell-object</b> id={hex(id(self))}"
txt = "<p>"
for p in dir(self):
if not p.startswith("_"):
if p not in ["raw", "summary", "steps", "logger"]:
value = self.__getattribute__(p)
txt += f"<b>{p}</b>: {value}<br>"
txt += "</p>"
try:
raw_txt = f"<p><b>raw data-frame (summary)</b><br>{self.raw.describe()._repr_html_()}</p>"
raw_txt += f"<p><b>raw data-frame (head)</b><br>{self.raw.head()._repr_html_()}</p>"
except AttributeError:
raw_txt = "<p><b>raw data-frame </b><br> not found!</p>"
except ValueError:
raw_txt = "<p><b>raw data-frame </b><br> does not contain any columns!</p>"
try:
summary_txt = f"<p><b>summary data-frame (summary)</b><br>{self.summary.describe()._repr_html_()}</p>"
summary_txt += f"<p><b>summary data-frame (head)</b><br>{self.summary.head()._repr_html_()}</p>"
except AttributeError:
summary_txt = "<p><b>summary data-frame </b><br> not found!</p>"
except ValueError:
summary_txt = (
"<p><b>summary data-frame </b><br> does not contain any columns!</p>"
)
try:
steps_txt = f"<p><b>steps data-frame (summary)</b><br>{self.steps.describe()._repr_html_()}</p>"
steps_txt += f"<p><b>steps data-frame (head)</b><br>{self.steps.head()._repr_html_()}</p>"
except AttributeError:
steps_txt = "<p><b>steps data-frame </b><br> not found!</p>"
except ValueError:
steps_txt = (
"<p><b>steps data-frame </b><br> does not contain any columns!</p>"
)
return obj + txt + summary_txt + steps_txt + raw_txt
def __init__(self, **kwargs):
self.logger = logging.getLogger(__name__)
self.logger.debug("created DataSet instance")
# meta-data
self.cell_no = None
self.mass = prms.Materials.default_mass # active material (in mg)
self.tot_mass = prms.Materials.default_mass # total material (in mg)
self.no_cycles = 0.0
self.charge_steps = None
self.discharge_steps = None
self.ir_steps = None
self.ocv_steps = None
self.nom_cap = prms.DataSet.nom_cap # mAh/g (for finding c-rates)
self.mass_given = False
self.material = prms.Materials.default_material
self.merged = False
self.file_errors = None # not in use at the moment
self.loaded_from = None # loaded from (can be list if merged)
self.channel_index = None
self.channel_number = None
self.creator = None
self.item_ID = None
self.schedule_file_name = None
self.start_datetime = None
self.test_ID = None
self.name = None
# new meta data
self.cycle_mode = prms.Reader.cycle_mode
self.active_electrode_area = None # [cm2]
self.active_electrode_thickness = None # [micron]
self.electrolyte_type = None #
self.electrolyte_volume = None # [micro-liter]
self.active_electrode_type = None
self.counter_electrode_type = None
self.reference_electrode_type = None
self.experiment_type = None
self.cell_type = None
self.separator_type = None
self.active_electrode_current_collector = None
self.reference_electrode_current_collector = None
self.comment = None
# custom meta-data
for k in kwargs:
if hasattr(self, k):
setattr(self, k, kwargs[k])
# methods in CellpyData to update if adding new attributes:
# ATTRS_CELLPYFILE
# place to put "checks" etc:
# _extract_meta_from_cellpy_file
# _create_infotable()
self.raw_data_files = []
self.raw_data_files_length = []
self.raw_units = cellpy_units
self.raw_limits = cellpy_limits
# self.data = collections.OrderedDict() # not used
# self.summary = collections.OrderedDict() # not used
self.raw = pd.DataFrame()
self.summary = pd.DataFrame()
# self.summary_made = False # Should be removed
self.steps = pd.DataFrame() # is this used? - check!
# self.step_table_made = False # Should be removed
# self.parameter_table = collections.OrderedDict()
self.summary_table_version = SUMMARY_TABLE_VERSION
self.step_table_version = STEP_TABLE_VERSION
self.cellpy_file_version = CELLPY_FILE_VERSION
self.raw_table_version = RAW_TABLE_VERSION
@staticmethod
def _header_str(hdr):
txt = "\n"
txt += 80 * "-" + "\n"
txt += f" {hdr} ".center(80) + "\n"
txt += 80 * "-" + "\n"
return txt
def __str__(self):
txt = "<DataSet>\n"
txt += "loaded from file\n"
if isinstance(self.loaded_from, (list, tuple)):
for f in self.loaded_from:
txt += str(f)
txt += "\n"
else:
txt += str(self.loaded_from)
txt += "\n"
txt += "\n* GLOBAL\n"
txt += f"material: {self.material}\n"
txt += f"mass (active): {self.mass}\n"
txt += f"test ID: {self.test_ID}\n"
txt += f"mass (total): {self.tot_mass}\n"
txt += f"nominal capacity: {self.nom_cap}\n"
txt += f"channel index: {self.channel_index}\n"
txt += f"DataSet name: {self.name}\n"
txt += f"creator: {self.creator}\n"
txt += f"schedule file name: {self.schedule_file_name}\n"
try:
if self.start_datetime:
start_datetime_str = xldate_as_datetime(self.start_datetime)
else:
start_datetime_str = "Not given"
except AttributeError:
start_datetime_str = "NOT READABLE YET"
txt += f"start-date: {start_datetime_str}\n"
txt += self._header_str("DATA")
try:
txt += str(self.raw.describe())
except (AttributeError, ValueError):
txt += "EMPTY (Not processed yet)\n"
txt += self._header_str("SUMMARY")
try:
txt += str(self.summary.describe())
except (AttributeError, ValueError):
txt += "EMPTY (Not processed yet)\n"
txt += self._header_str("STEP TABLE")
try:
txt += str(self.steps.describe())
txt += str(self.steps.head())
except (AttributeError, ValueError):
txt += "EMPTY (Not processed yet)\n"
txt += self._header_str("RAW UNITS")
try:
txt += str(self.raw.describe())
txt += str(self.raw.head())
except (AttributeError, ValueError):
txt += "EMPTY (Not processed yet)\n"
return txt
@property
def summary_made(self):
"""check if the summary table exists"""
try:
empty = self.summary.empty
except AttributeError:
empty = True
return not empty
@property
def steps_made(self):
"""check if the step table exists"""
try:
empty = self.steps.empty
except AttributeError:
empty = True
return not empty
@property
def no_data(self):
# TODO: @jepe should consider renaming this to be in-line with "steps_made" etc. (or renaming steps_made and
# summary_made to e.g. no_steps, no_summary)
try:
empty = self.raw.empty
except AttributeError:
empty = True
return empty
def identify_last_data_point(data):
"""Find the last data point and store it in the fid instance"""
logging.debug("searching for last data point")
hdr_data_point = HEADERS_NORMAL.data_point_txt
try:
if hdr_data_point in data.raw.columns:
last_data_point = data.raw[hdr_data_point].max()
else:
last_data_point = data.raw.index.max()
except AttributeError:
logging.debug("AttributeError - setting last data point to 0")
last_data_point = 0
if not last_data_point > 0:
last_data_point = 0
data.raw_data_files[0].last_data_point = last_data_point
logging.debug(f"last data point: {last_data_point}")
return data
def check64bit(current_system="python"):
"""checks if you are on a 64 bit platform"""
if current_system == "python":
return sys.maxsize > 2147483647
elif current_system == "os":
import platform
pm = platform.machine()
if pm != ".." and pm.endswith("64"): # recent Python (not Iron)
return True
else:
if "PROCESSOR_ARCHITEW6432" in os.environ:
return True # 32 bit program running on 64 bit Windows
try:
# 64 bit Windows 64 bit program
return os.environ["PROCESSOR_ARCHITECTURE"].endswith("64")
except IndexError:
pass # not Windows
try:
# this often works in Linux
return "64" in platform.architecture()[0]
except Exception:
# is an older version of Python, assume also an older os@
# (best we can guess)
return False
def humanize_bytes(b, precision=1):
"""Return a humanized string representation of a number of b."""
abbrevs = (
(1 << 50, "PB"),
(1 << 40, "TB"),
(1 << 30, "GB"),
(1 << 20, "MB"),
(1 << 10, "kB"),
(1, "b"),
)
if b == 1:
return "1 byte"
for factor, suffix in abbrevs:
if b >= factor:
break
# return '%.*f %s' % (precision, old_div(b, factor), suffix)
return "%.*f %s" % (precision, b // factor, suffix)
def xldate_as_datetime(xldate, datemode=0, option="to_datetime"):
"""Converts a xls date stamp to a more sensible format.
Args:
xldate (str): date stamp in Excel format.
datemode (int): 0 for 1900-based, 1 for 1904-based.
option (str): option in ("to_datetime", "to_float", "to_string"),
return value
Returns:
datetime (datetime object, float, or string).
"""
# This does not work for numpy-arrays
if option == "to_float":
d = (xldate - 25589) * 86400.0
else:
try:
d = datetime.datetime(1899, 12, 30) + datetime.timedelta(
days=xldate + 1462 * datemode
)
# date_format = "%Y-%m-%d %H:%M:%S:%f" # with microseconds,
# excel cannot cope with this!
if option == "to_string":
date_format = "%Y-%m-%d %H:%M:%S" # without microseconds
d = d.strftime(date_format)
except TypeError:
logging.info(f"The date is not of correct type [{xldate}]")
d = xldate
return d
def convert_to_mAhg(c, mass=1.0):
"""Converts capacity in Ah to capacity in mAh/g.
Args:
c (float or numpy array): capacity in mA.
mass (float): mass in mg.
Returns:
float: 1000000 * c / mass
"""
return 1_000_000 * c / mass
def collect_ocv_curves():
raise NotImplementedError
def collect_capacity_curves(
data,
direction="charge",
trim_taper_steps=None,
steps_to_skip=None,
steptable=None,
max_cycle_number=None,
**kwargs,
):
"""Create a list of pandas.DataFrames, one for each charge step.
The DataFrames are named by its cycle number.
Input: CellpyData
Returns: list of pandas.DataFrames,
list of cycle numbers,
minimum voltage value,
maximum voltage value"""
# TODO: should allow for giving cycle numbers as input (e.g. cycle=[1, 2, 10]
# or cycle=2), not only max_cycle_number
minimum_v_value = np.Inf
maximum_v_value = -np.Inf
charge_list = []
cycles = kwargs.pop("cycle", None)
if cycles is None:
cycles = data.get_cycle_numbers()
if max_cycle_number is None:
max_cycle_number = max(cycles)
for cycle in cycles:
if cycle > max_cycle_number:
break
try:
if direction == "charge":
q, v = data.get_ccap(
cycle,
trim_taper_steps=trim_taper_steps,
steps_to_skip=steps_to_skip,
steptable=steptable,
)
else:
q, v = data.get_dcap(
cycle,
trim_taper_steps=trim_taper_steps,
steps_to_skip=steps_to_skip,
steptable=steptable,
)
except NullData as e:
logging.warning(e)
d = pd.DataFrame()
d.name = cycle
charge_list.append(d)
else:
d = pd.DataFrame({"q": q, "v": v})
# d.name = f"{cycle}"
d.name = cycle
charge_list.append(d)
v_min = v.min()
v_max = v.max()
if v_min < minimum_v_value:
minimum_v_value = v_min
if v_max > maximum_v_value:
maximum_v_value = v_max
return charge_list, cycles, minimum_v_value, maximum_v_value
def interpolate_y_on_x(
df,
x=None,
y=None,
new_x=None,
dx=10.0,
number_of_points=None,
direction=1,
**kwargs,
):
"""Interpolate a column based on another column.
Args:
df: DataFrame with the (cycle) data.
x: Column name for the x-value (defaults to the step-time column).
y: Column name for the y-value (defaults to the voltage column).
new_x (numpy array or None): Interpolate using these new x-values
instead of generating x-values based on dx or number_of_points.
dx: step-value (defaults to 10.0)
number_of_points: number of points for interpolated values (use
instead of dx and overrides dx if given).
direction (-1,1): if direction is negative, then invert the
x-values before interpolating.
**kwargs: arguments passed to scipy.interpolate.interp1d
Returns: DataFrame with interpolated y-values based on given or
generated x-values.
"""
# TODO: allow for giving a fixed interpolation range (x-values).
# Remember to treat extrapolation properly (e.g. replace with NaN?).
if x is None:
x = df.columns[0]
if y is None:
y = df.columns[1]
xs = df[x].values
ys = df[y].values
if direction > 0:
x_min = xs.min()
x_max = xs.max()
else:
x_max = xs.min()
x_min = xs.max()
dx = -dx
bounds_error = kwargs.pop("bounds_error", False)
f = interpolate.interp1d(xs, ys, bounds_error=bounds_error, **kwargs)
if new_x is None:
if number_of_points:
new_x = np.linspace(x_min, x_max, number_of_points)
else:
new_x = np.arange(x_min, x_max, dx)
new_y = f(new_x)
new_df = pd.DataFrame({x: new_x, y: new_y})
return new_df
def group_by_interpolate(
df,
x=None,
y=None,
group_by=None,
number_of_points=100,
tidy=False,
individual_x_cols=False,
header_name="Unit",
dx=10.0,
generate_new_x=True,
):
"""Do a pandas.DataFrame.group_by and perform interpolation for all groups.
This function is a wrapper around an internal interpolation function in
cellpy (that uses scipy.interpolate.interp1d) that combines doing a group-by
operation and interpolation.
Args:
df (pandas.DataFrame): the dataframe to morph.
x (str): the header for the x-value
(defaults to normal header step_time_txt) (remark that the default
group_by column is the cycle column, and each cycle normally
consist of several steps (so you risk interpolating / merging
several curves on top of each other (not good)).
y (str): the header for the y-value
(defaults to normal header voltage_txt).
group_by (str): the header to group by
(defaults to normal header cycle_index_txt)
number_of_points (int): if generating new x-column, how many values it
should contain.
tidy (bool): return the result in tidy (i.e. long) format.
individual_x_cols (bool): return as xy xy xy ... data.
header_name (str): name for the second level of the columns (only
applies for xy xy xy ... data) (defaults to "Unit").
dx (float): if generating new x-column and number_of_points is None or
zero, distance between the generated values.
generate_new_x (bool): create a new x-column by
using the x-min and x-max values from the original dataframe where
the method is set by the number_of_points key-word:
1) if number_of_points is not None (default is 100):
```
new_x = np.linspace(x_max, x_min, number_of_points)
```
2) else:
```
new_x = np.arange(x_max, x_min, dx)
```
Returns: pandas.DataFrame with interpolated x- and y-values. The returned
dataframe is in tidy (long) format for tidy=True.
"""
# TODO: @jepe - create more tests
time_00 = time.time()
if x is None:
x = HEADERS_NORMAL.step_time_txt
if y is None:
y = HEADERS_NORMAL.voltage_txt
if group_by is None:
group_by = [HEADERS_NORMAL.cycle_index_txt]
if not isinstance(group_by, (list, tuple)):
group_by = [group_by]
if not generate_new_x:
# check if it makes sence
if (not tidy) and (not individual_x_cols):
logging.warning("Unlogical condition")
generate_new_x = True
new_x = None
if generate_new_x:
x_max = df[x].max()
x_min = df[x].min()
if number_of_points:
new_x = np.linspace(x_max, x_min, number_of_points)
else:
new_x = np.arange(x_max, x_min, dx)
new_dfs = []
keys = []
for name, group in df.groupby(group_by):
keys.append(name)
if not isinstance(name, (list, tuple)):
name = [name]
new_group = interpolate_y_on_x(
group, x=x, y=y, new_x=new_x, number_of_points=number_of_points, dx=dx
)
if tidy or (not tidy and not individual_x_cols):
for i, j in zip(group_by, name):
new_group[i] = j
new_dfs.append(new_group)
if tidy:
new_df = pd.concat(new_dfs)
else:
if individual_x_cols:
new_df = pd.concat(new_dfs, axis=1, keys=keys)
group_by.append(header_name)
new_df.columns.names = group_by
else:
new_df = pd.concat(new_dfs)
new_df = new_df.pivot(index=x, columns=group_by[0], values=y)
time_01 = time.time() - time_00
logging.debug(f"duration: {time_01} seconds")
return new_df
| |
"""Provide add-on management."""
from __future__ import annotations
import asyncio
from dataclasses import dataclass
from enum import Enum
from functools import partial
from typing import Any, Callable, TypeVar, cast
from homeassistant.components.hassio import (
async_create_backup,
async_get_addon_discovery_info,
async_get_addon_info,
async_install_addon,
async_restart_addon,
async_set_addon_options,
async_start_addon,
async_stop_addon,
async_uninstall_addon,
async_update_addon,
)
from homeassistant.components.hassio.handler import HassioAPIError
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.singleton import singleton
from .const import ADDON_SLUG, CONF_ADDON_DEVICE, CONF_ADDON_NETWORK_KEY, DOMAIN, LOGGER
F = TypeVar("F", bound=Callable[..., Any]) # pylint: disable=invalid-name
DATA_ADDON_MANAGER = f"{DOMAIN}_addon_manager"
@singleton(DATA_ADDON_MANAGER)
@callback
def get_addon_manager(hass: HomeAssistant) -> AddonManager:
"""Get the add-on manager."""
return AddonManager(hass)
def api_error(error_message: str) -> Callable[[F], F]:
"""Handle HassioAPIError and raise a specific AddonError."""
def handle_hassio_api_error(func: F) -> F:
"""Handle a HassioAPIError."""
async def wrapper(*args, **kwargs): # type: ignore
"""Wrap an add-on manager method."""
try:
return_value = await func(*args, **kwargs)
except HassioAPIError as err:
raise AddonError(f"{error_message}: {err}") from err
return return_value
return cast(F, wrapper)
return handle_hassio_api_error
@dataclass
class AddonInfo:
"""Represent the current add-on info state."""
options: dict[str, Any]
state: AddonState
update_available: bool
version: str | None
class AddonState(Enum):
"""Represent the current state of the add-on."""
NOT_INSTALLED = "not_installed"
INSTALLING = "installing"
UPDATING = "updating"
NOT_RUNNING = "not_running"
RUNNING = "running"
class AddonManager:
"""Manage the add-on.
Methods may raise AddonError.
Only one instance of this class may exist
to keep track of running add-on tasks.
"""
def __init__(self, hass: HomeAssistant) -> None:
"""Set up the add-on manager."""
self._hass = hass
self._install_task: asyncio.Task | None = None
self._restart_task: asyncio.Task | None = None
self._start_task: asyncio.Task | None = None
self._update_task: asyncio.Task | None = None
def task_in_progress(self) -> bool:
"""Return True if any of the add-on tasks are in progress."""
return any(
task and not task.done()
for task in (
self._install_task,
self._start_task,
self._update_task,
)
)
@api_error("Failed to get Z-Wave JS add-on discovery info")
async def async_get_addon_discovery_info(self) -> dict:
"""Return add-on discovery info."""
discovery_info = await async_get_addon_discovery_info(self._hass, ADDON_SLUG)
if not discovery_info:
raise AddonError("Failed to get Z-Wave JS add-on discovery info")
discovery_info_config: dict = discovery_info["config"]
return discovery_info_config
@api_error("Failed to get the Z-Wave JS add-on info")
async def async_get_addon_info(self) -> AddonInfo:
"""Return and cache Z-Wave JS add-on info."""
addon_info: dict = await async_get_addon_info(self._hass, ADDON_SLUG)
addon_state = self.async_get_addon_state(addon_info)
return AddonInfo(
options=addon_info["options"],
state=addon_state,
update_available=addon_info["update_available"],
version=addon_info["version"],
)
@callback
def async_get_addon_state(self, addon_info: dict[str, Any]) -> AddonState:
"""Return the current state of the Z-Wave JS add-on."""
addon_state = AddonState.NOT_INSTALLED
if addon_info["version"] is not None:
addon_state = AddonState.NOT_RUNNING
if addon_info["state"] == "started":
addon_state = AddonState.RUNNING
if self._install_task and not self._install_task.done():
addon_state = AddonState.INSTALLING
if self._update_task and not self._update_task.done():
addon_state = AddonState.UPDATING
return addon_state
@api_error("Failed to set the Z-Wave JS add-on options")
async def async_set_addon_options(self, config: dict) -> None:
"""Set Z-Wave JS add-on options."""
options = {"options": config}
await async_set_addon_options(self._hass, ADDON_SLUG, options)
@api_error("Failed to install the Z-Wave JS add-on")
async def async_install_addon(self) -> None:
"""Install the Z-Wave JS add-on."""
await async_install_addon(self._hass, ADDON_SLUG)
@callback
def async_schedule_install_addon(self, catch_error: bool = False) -> asyncio.Task:
"""Schedule a task that installs the Z-Wave JS add-on.
Only schedule a new install task if the there's no running task.
"""
if not self._install_task or self._install_task.done():
LOGGER.info("Z-Wave JS add-on is not installed. Installing add-on")
self._install_task = self._async_schedule_addon_operation(
self.async_install_addon, catch_error=catch_error
)
return self._install_task
@callback
def async_schedule_install_setup_addon(
self, usb_path: str, network_key: str, catch_error: bool = False
) -> asyncio.Task:
"""Schedule a task that installs and sets up the Z-Wave JS add-on.
Only schedule a new install task if the there's no running task.
"""
if not self._install_task or self._install_task.done():
LOGGER.info("Z-Wave JS add-on is not installed. Installing add-on")
self._install_task = self._async_schedule_addon_operation(
self.async_install_addon,
partial(self.async_configure_addon, usb_path, network_key),
self.async_start_addon,
catch_error=catch_error,
)
return self._install_task
@api_error("Failed to uninstall the Z-Wave JS add-on")
async def async_uninstall_addon(self) -> None:
"""Uninstall the Z-Wave JS add-on."""
await async_uninstall_addon(self._hass, ADDON_SLUG)
@api_error("Failed to update the Z-Wave JS add-on")
async def async_update_addon(self) -> None:
"""Update the Z-Wave JS add-on if needed."""
addon_info = await self.async_get_addon_info()
if addon_info.version is None:
raise AddonError("Z-Wave JS add-on is not installed")
if not addon_info.update_available:
return
await self.async_create_backup()
await async_update_addon(self._hass, ADDON_SLUG)
@callback
def async_schedule_update_addon(self, catch_error: bool = False) -> asyncio.Task:
"""Schedule a task that updates and sets up the Z-Wave JS add-on.
Only schedule a new update task if the there's no running task.
"""
if not self._update_task or self._update_task.done():
LOGGER.info("Trying to update the Z-Wave JS add-on")
self._update_task = self._async_schedule_addon_operation(
self.async_update_addon,
catch_error=catch_error,
)
return self._update_task
@api_error("Failed to start the Z-Wave JS add-on")
async def async_start_addon(self) -> None:
"""Start the Z-Wave JS add-on."""
await async_start_addon(self._hass, ADDON_SLUG)
@api_error("Failed to restart the Z-Wave JS add-on")
async def async_restart_addon(self) -> None:
"""Restart the Z-Wave JS add-on."""
await async_restart_addon(self._hass, ADDON_SLUG)
@callback
def async_schedule_start_addon(self, catch_error: bool = False) -> asyncio.Task:
"""Schedule a task that starts the Z-Wave JS add-on.
Only schedule a new start task if the there's no running task.
"""
if not self._start_task or self._start_task.done():
LOGGER.info("Z-Wave JS add-on is not running. Starting add-on")
self._start_task = self._async_schedule_addon_operation(
self.async_start_addon, catch_error=catch_error
)
return self._start_task
@callback
def async_schedule_restart_addon(self, catch_error: bool = False) -> asyncio.Task:
"""Schedule a task that restarts the Z-Wave JS add-on.
Only schedule a new restart task if the there's no running task.
"""
if not self._restart_task or self._restart_task.done():
LOGGER.info("Restarting Z-Wave JS add-on")
self._restart_task = self._async_schedule_addon_operation(
self.async_restart_addon, catch_error=catch_error
)
return self._restart_task
@api_error("Failed to stop the Z-Wave JS add-on")
async def async_stop_addon(self) -> None:
"""Stop the Z-Wave JS add-on."""
await async_stop_addon(self._hass, ADDON_SLUG)
async def async_configure_addon(self, usb_path: str, network_key: str) -> None:
"""Configure and start Z-Wave JS add-on."""
addon_info = await self.async_get_addon_info()
new_addon_options = {
CONF_ADDON_DEVICE: usb_path,
CONF_ADDON_NETWORK_KEY: network_key,
}
if new_addon_options != addon_info.options:
await self.async_set_addon_options(new_addon_options)
@callback
def async_schedule_setup_addon(
self, usb_path: str, network_key: str, catch_error: bool = False
) -> asyncio.Task:
"""Schedule a task that configures and starts the Z-Wave JS add-on.
Only schedule a new setup task if the there's no running task.
"""
if not self._start_task or self._start_task.done():
LOGGER.info("Z-Wave JS add-on is not running. Starting add-on")
self._start_task = self._async_schedule_addon_operation(
partial(self.async_configure_addon, usb_path, network_key),
self.async_start_addon,
catch_error=catch_error,
)
return self._start_task
@api_error("Failed to create a backup of the Z-Wave JS add-on.")
async def async_create_backup(self) -> None:
"""Create a partial backup of the Z-Wave JS add-on."""
addon_info = await self.async_get_addon_info()
name = f"addon_{ADDON_SLUG}_{addon_info.version}"
LOGGER.debug("Creating backup: %s", name)
await async_create_backup(
self._hass,
{"name": name, "addons": [ADDON_SLUG]},
partial=True,
)
@callback
def _async_schedule_addon_operation(
self, *funcs: Callable, catch_error: bool = False
) -> asyncio.Task:
"""Schedule an add-on task."""
async def addon_operation() -> None:
"""Do the add-on operation and catch AddonError."""
for func in funcs:
try:
await func()
except AddonError as err:
if not catch_error:
raise
LOGGER.error(err)
break
return self._hass.async_create_task(addon_operation())
class AddonError(HomeAssistantError):
"""Represent an error with Z-Wave JS add-on."""
| |
from django.utils import timezone
from fbmbot.models import Item
from fbmbot.utils import post_facebook
from common import States,Commands
from matcher import find_match
from processor.utils import get_url,DEFAULT_IMG_URL
DEFAULT_DESCRIPTION = "Your Item Description"
def fb_helper_btn(title,url,payload,web_url=True):
type = "web_url" if web_url else "postback"
if type == "web_url":
return {"type": type, "title": title, "url": url}
return {"type": type, "title":title,"payload":payload}
def fb_helper_element(title,item_url="",image_url="",subtitle="",buttons=None):
temp = {"title":title}
if len(image_url) > 0:
temp["image_url"] = image_url
if len(subtitle) > 0:
temp["subtitle"] = subtitle
if len(item_url) > 0:
temp["item_url"] = item_url
if buttons and len(buttons) > 0:
temp["buttons"] = buttons
return temp
def fb_helper_playload_btn(text,buttons=None):
return {"template_type":"button","text":text,"buttons":buttons}
def fb_helper_playload_generic(elements=None):
return {"template_type":"generic","elements":elements}
def fb_msg(type,payload,notification="REGULAR"):
if type == "text":
return {"text":payload}
if type == "image":
return {"attachment":{"type":"image","payload":{"url":payload}}}
if type == "template":
return {"attachment":{"type":"template","payload":payload}}
def welcome_msg():
return fb_msg(
"template",
fb_helper_playload_btn(
"Hi I am Tradeit! Start with any of these buttons",
[fb_helper_btn("Start Trading","","btn_start_trade",False),
fb_helper_btn("How it works","","btn_instructions",False)]
)
)
def update_user():
return 0
def instructions():
return fb_msg(
"template",
fb_helper_playload_generic(
[
fb_helper_element(
"Upload an Item",
"",
"https://scontent-sin1-1.xx.fbcdn.net/v/t34.0-12/13246140_120300000003876160_922088848_n.png?oh=89f8c0de998cc6dee876ea789aaca439&oe=573A0165"
),
fb_helper_element(
"Start Trading",
"",
"https://scontent-sin1-1.xx.fbcdn.net/v/t34.0-12/13180928_120300000004207234_2003940716_n.png?oh=ac058cf6890d30bf2837a534c82f1e65&oe=5739C174"),
fb_helper_element(
"You can use these links",
"",
"",
"",
[
fb_helper_btn(
"Add item to trade",
"",
"btn_create_new",
False
),
fb_helper_btn(
"View Inventory",
"",
"btn_inventory",
False
),
fb_helper_btn(
"Set Location",
"",
"btn_set_location",
False
)
]
)
]
)
)
def help():
return fb_msg(
"text",
"Hey there! Don't worry you can use these cmds:\nTrade: 'start trading'\nView inventory: 'inventory'"
)
def inventory(user):
items = user.item_set.all()
#for item in item_set push element into element_list
item_ls = []
for item in items:
item_ls.append(
fb_helper_element(
item.description,
"",
item.image_url,
"",
[
fb_helper_btn(
"Trade This",
"",
"btn_start_trade_{}".format(item.id),
False
),
fb_helper_btn(
"Edit This",
"",
"btn_edit_{}".format(item.id),
False
)
]
)
)
if (item_ls):
return fb_msg(
"template",
fb_helper_playload_generic(
item_ls
)
)
else:
return fb_msg(
"text",
"No items so far"
)
def create_item(user,cmd_args):
"""
cmd_args: url
"""
item = Item.objects.create(
owner = user,
image_url =cmd_args['img_url'][0]['payload']['url'],
description = cmd_args.get("description",DEFAULT_DESCRIPTION),
date_created = timezone.now()
)
user.last_state = States.ADD_NEW
user.save()
return fb_msg(
"template",
fb_helper_playload_generic(
[
fb_helper_element(
item.description,
"",
item.image_url,
"Add Item description by replying",
[
fb_helper_btn(
"Cancel",
"",
"btn_delete_{}",format(item.id),
False
)
]
)
]
)
)
def edit_item(user,cmd_args):
"""
:param user:
:param cmd_args: url,description, id
:return:
"""
if (cmd_args.get("id")):
item = Item.objects.get(id=cmd_args.get("id"))
else:
item = user.item_set.filter(is_editing=True)[0]
new_url = get_url(cmd_args,use_default=False)
item.image_url = new_url if new_url else item.image_url
item.description = cmd_args.get("text",item.description)
button_ls = [
fb_helper_btn(
"Delete",
"",
"btn_delete_{}".format(item.id),
False
),
fb_helper_btn(
"Save",
"",
"btn_cancel".format(item.id),
False
)
]
if (item.image_url!=DEFAULT_IMG_URL and item.description!=DEFAULT_DESCRIPTION):
button_ls.append(fb_helper_btn(
"Trade This",
"",
"btn_start_trade_{}".format(item.id),
False
))
return fb_msg(
"template",
fb_helper_playload_generic(
[
fb_helper_element(
item.description,
"",
item.image_url,
"Change information by replying with text or image",
button_ls
)
]
)
)
def delete_item(user,cmdargs):
pass
def location_saved():
return fb_msg(
"text",
"Your Location is saved as _____"
)
def change_location():
return fb_msg(
"template",
fb_helper_playload_generic(
[
fb_helper_element(
"Your Location is _____",
"",
"http://static.independent.co.uk/s3fs-public/thumbnails/image/2015/03/08/09/emmawatson.jpg",
"Send us your location to change it"
)
]
)
)
# helper function
def get_match_msg(user,item):
return fb_msg(
"template",
fb_helper_playload_generic(
[
fb_helper_element(
item.description,
"",
item.image_url,
user.location,
[
fb_helper_btn(
"Accept",
"",
"btn_accept",
False
),
fb_helper_btn(
"Reject",
"",
"btn_reject",
False
)
]
)
]
)
)
def start_trading(user):
#if no item create new
#if id is set use item(id)
active_item = user.item_set.filter(active=True)[0]
active_item.is_editing = False
active_item.save()
matched_items = find_match(active_item)
user.last_state = States.START_TRADING
user.save()
if (matched_items):
matched_item = matched_items[0]
other_user = matched_item.owner
msg = get_match_msg(user=other_user,item=matched_item)
post_facebook(fbid=user.fb_user_id,msg_dict=msg)
msg = get_match_msg(user=user,item=active_item)
post_facebook(fbid=other_user.fb_user_id,msg_dict=msg)
else:
return fb_msg(type="template",
payload=fb_helper_playload_btn(
"searching for match...",
[
fb_helper_btn("Cancel","","btn_cancel",False)
]
)
)
def waiting():
return fb_msg(
"template",
fb_helper_playload_btn(
"Waiting for the other user to response",
[
fb_helper_btn(
"Cancel & Reject",
"",
"btn_reject",
False
)
]
)
)
def rejected():
return fb_msg(
"template",
fb_helper_playload_btn(
"The other user rejected :(",
[
fb_helper_btn(
"Find Another",
"",
"btn_start_trade",
False
)
]
)
)
def cancel_trading(user):
items = user.item_set.filter(active=True,deleted=False)
for item in items:
item.active = False
item.save()
user.last_state = States.STATIC
user.save()
return fb_msg(
"text",
"Trading Cancelled..."
)
def success():
return fb_msg(
"template",
fb_helper_playload_btn(
"Congratulations! You can chat with the user with the link",
[
fb_helper_btn(
"Start Chat",
"https://www.facebook.com/messages/",
"",
True
)
]
)
)
def delete():
return fb_msg(
"text",
"Item Deleted"
)
def default():
return fb_msg(
"text",
"Wa Ham Zi Tou Ahhhhhh"
)
def rejecttrade():
#msg other user to tell him that he is rejected
return fb_msg(
"template",
fb_helper_playload_btn(
"You rejected the offer",
[
fb_helper_btn(
"Find Another",
"",
"btn_start_trade",
False
)
]
)
)
def process_for_reply(command,command_args,user,**kwargs):
"""
:param cmd: string
:param cmdargs: dict
:param user: BotUser
:return: a dict as message to be posted back, return None if nothing is to be posted back
"""
if command == "welcome":
return welcome_msg()
elif command == "instructions":
return instructions()
elif command == "help":
return help()
elif command == "inventory":
return inventory(user)
elif command == "createitem":
return create_item(user,command_args)
elif command == "edititem":
return edit_item(user,cmd_args=command_args)
elif command == "locationsaved":
return location_saved()
elif command == "changelocation":
return change_location()
elif command == "starttrading":
return start_trading(user)
elif command == "rejected":
return rejected()
elif command == "waiting":
return waiting()
elif command == Commands.CANCEL_TRADING:
return cancel_trading(user)
elif command == "success":
return success()
else:
return default()
| |
import time, sys, os, copy
import datetime, pprint
import asyncio
import logging as loggingmod
from functools import wraps, partial
from biothings.utils.common import get_timestamp, get_random_string, timesofar, iter_n
from biothings.utils.mongo import get_src_conn, get_src_dump
from biothings.utils.dataload import merge_struct
from biothings.utils.manager import BaseSourceManager, \
ManagerError, ResourceNotFound
from .storage import IgnoreDuplicatedStorage, MergerStorage, \
BasicStorage, NoBatchIgnoreDuplicatedStorage, \
NoStorage
from biothings.utils.loggers import HipchatHandler, get_logger
from biothings import config
logging = config.logger
class ResourceNotReady(Exception):
pass
class ResourceError(Exception):
pass
def upload_worker(name, storage_class, loaddata_func, col_name,
batch_size, batch_num, *args):
"""
Pickable job launcher, typically running from multiprocessing.
storage_class will instanciate with col_name, the destination
collection name. loaddata_func is the parsing/loading function,
called with *args
"""
try:
data = loaddata_func(*args)
storage = storage_class(None,col_name,loggingmod)
return storage.process(data,batch_size)
except Exception as e:
logger_name = "%s_batch_%s" % (name,batch_num)
logger = get_logger(logger_name, config.LOG_FOLDER)
logger.exception(e)
raise
class DocSourceMaster(dict):
'''A class to manage various doc data sources.'''
# TODO: fix this delayed import
from biothings import config
__collection__ = config.DATA_SRC_MASTER_COLLECTION
__database__ = config.DATA_SRC_DATABASE
use_dot_notation = True
use_schemaless = True
structure = {
'name': str,
'timestamp': datetime.datetime,
}
class BaseSourceUploader(object):
'''
Default datasource uploader. Database storage can be done
in batch or line by line. Duplicated records aren't not allowed
'''
# TODO: fix this delayed import
from biothings import config
__database__ = config.DATA_SRC_DATABASE
# define storage strategy, override in subclass as necessary
storage_class = BasicStorage
# Will be override in subclasses
# name of the resource and collection name used to store data
# (see regex_name though for exceptions)
name = None
# if several resources, this one if the main name,
# it's also the _id of the resource in src_dump collection
# if set to None, it will be set to the value of variable "name"
main_source =None
# in case resource used split collections (so data is spread accross
# different colleciton, regex_name should be specified so all those split
# collections can be found using it (used when selecting mappers for instance)
regex_name = None
keep_archive = 10 # number of archived collection to keep. Oldest get dropped first.
def __init__(self, db_conn_info, data_root, collection_name=None, log_folder=None, *args, **kwargs):
"""db_conn_info is a database connection info tuple (host,port) to fetch/store
information about the datasource's state data_root is the root folder containing
all resources. It will generate its own data folder from this point"""
# non-pickable attributes (see __getattr__, prepare() and unprepare())
self.init_state()
self.db_conn_info = db_conn_info
self.timestamp = datetime.datetime.now()
self.t0 = time.time()
# main_source at object level so it's part of pickling data
# otherwise it won't be set properly when using multiprocessing
# note: "name" is always defined at class level so pickle knows
# how to restore it
self.main_source = self.__class__.main_source or self.__class__.name
self.src_root_folder=os.path.join(data_root, self.main_source)
self.log_folder = log_folder or config.LOG_FOLDER
self.logfile = None
self.temp_collection_name = None
self.collection_name = collection_name or self.name
self.data_folder = None
self.prepared = False
@property
def fullname(self):
if self.main_source != self.name:
name = "%s.%s" % (self.main_source,self.name)
else:
name = self.name
return name
@classmethod
def create(klass, db_conn_info, data_root, *args, **kwargs):
"""
Factory-like method, just return an instance of this uploader
(used by SourceManager, may be overridden in sub-class to generate
more than one instance per class, like a true factory.
This is usefull when a resource is splitted in different collection but the
data structure doesn't change (it's really just data splitted accros
multiple collections, usually for parallelization purposes).
Instead of having actual class for each split collection, factory
will generate them on-the-fly.
"""
return klass(db_conn_info, data_root, *args, **kwargs)
def init_state(self):
self._state = {
"db" : None,
"conn" : None,
"collection" : None,
"src_dump" : None,
"logger" : None
}
def prepare(self,state={}):
"""Sync uploader information with database (or given state dict)"""
if self.prepared:
return
if state:
# let's be explicit, _state takes what it wants
for k in self._state:
self._state[k] = state[k]
return
self._state["conn"] = get_src_conn()
self._state["db"] = self.conn[self.__class__.__database__]
self._state["collection"] = self.db[self.collection_name]
self._state["src_dump"] = self.prepare_src_dump()
self._state["logger"] = self.setup_log()
self.data_folder = self.src_doc.get("data_folder")
# flag ready
self.prepared = True
def unprepare(self):
"""
reset anything that's not pickable (so self can be pickled)
return what's been reset as a dict, so self can be restored
once pickled
"""
state = {
"db" : self._state["db"],
"conn" : self._state["conn"],
"collection" : self._state["collection"],
"src_dump" : self._state["src_dump"],
"logger" : self._state["logger"]
}
for k in state:
self._state[k] = None
self.prepared = False
return state
def get_pinfo(self):
"""
Return dict containing information about the current process
(used to report in the hub)
"""
return {"category" : "uploader",
"source" : self.fullname,
"step" : "",
"description" : ""}
def check_ready(self,force=False):
if not self.src_doc:
raise ResourceNotReady("Missing information for source '%s' to start upload" % self.main_source)
if not self.src_doc.get("data_folder"):
raise ResourceNotReady("No data folder found for resource '%s'" % self.name)
if not force and not self.src_doc.get("download",{}).get("status") == "success":
raise ResourceNotReady("No successful download found for resource '%s'" % self.name)
if not os.path.exists(self.src_root_folder):
raise ResourceNotReady("Data folder '%s' doesn't exist for resource '%s'" % self.name)
job = self.src_doc.get("upload",{}).get("job",{}).get(self.name)
if not force and job:
raise ResourceNotReady("Resource '%s' is already being uploaded (job: %s)" % (self.name,job))
def load_data(self,data_folder):
"""Parse data inside data_folder and return structure ready to be
inserted in database"""
raise NotImplementedError("Implement in subclass")
@classmethod
def get_mapping(self):
"""Return ES mapping"""
return {} # default to nothing...
def make_temp_collection(self):
'''Create a temp collection for dataloading, e.g., entrez_geneinfo_INEMO.'''
if self.temp_collection_name:
# already set
return
new_collection = None
self.temp_collection_name = self.collection_name + '_temp_' + get_random_string()
return self.temp_collection_name
def clean_archived_collections(self):
# archived collections look like...
prefix = "%s_archive_" % self.name
cols = [c for c in self.db.collection_names() if c.startswith(prefix)]
tmp_prefix = "%s_temp_" % self.name
tmp_cols = [c for c in self.db.collection_names() if c.startswith(tmp_prefix)]
# timestamp is what's after _archive_, YYYYMMDD, so we can sort it safely
cols = sorted(cols,reverse=True)
to_drop = cols[self.keep_archive:] + tmp_cols
for colname in to_drop:
self.logger.info("Cleaning old archive/temp collection '%s'" % colname)
self.db[colname].drop()
def switch_collection(self):
'''after a successful loading, rename temp_collection to regular collection name,
and renaming existing collection to a temp name for archiving purpose.
'''
if self.temp_collection_name and self.db[self.temp_collection_name].count() > 0:
if self.collection.count() > 0:
# renaming existing collections
new_name = '_'.join([self.collection_name, 'archive', get_timestamp(), get_random_string()])
self.collection.rename(new_name, dropTarget=True)
self.db[self.temp_collection_name].rename(self.collection_name)
else:
raise ResourceError("No temp collection (or it's empty)")
def post_update_data(self, steps, force, batch_size, job_manager, **kwargs):
"""Override as needed to perform operations after
data has been uploaded"""
pass
@asyncio.coroutine
def update_data(self, batch_size, job_manager):
"""
Iterate over load_data() to pull data and store it
"""
pinfo = self.get_pinfo()
pinfo["step"] = "update_data"
got_error = False
self.unprepare()
job = yield from job_manager.defer_to_process(
pinfo,
partial(
upload_worker,
self.fullname,
self.__class__.storage_class,
self.load_data,
self.temp_collection_name,
batch_size,
1, # no batch, just #1
self.data_folder
)
)
def uploaded(f):
nonlocal got_error
if type(f.result()) != int:
got_error = Exception("upload error (should have a int as returned value got %s" % repr(f.result()))
job.add_done_callback(uploaded)
yield from job
if got_error:
raise got_error
self.switch_collection()
def generate_doc_src_master(self):
_doc = {"_id": str(self.name),
"name": self.regex_name and self.regex_name or str(self.name),
"timestamp": datetime.datetime.now()}
# store mapping
_doc['mapping'] = self.__class__.get_mapping()
# type of id being stored in these docs
if hasattr(self.__class__, '__metadata__'):
_doc.update(self.__class__.__metadata__)
return _doc
def update_master(self):
_doc = self.generate_doc_src_master()
self.save_doc_src_master(_doc)
def save_doc_src_master(self,_doc):
coll = self.conn[DocSourceMaster.__database__][DocSourceMaster.__collection__]
dkey = {"_id": _doc["_id"]}
prev = coll.find_one(dkey)
if prev:
coll.replace_one(dkey, _doc)
else:
coll.insert_one(_doc)
def register_status(self,status,**extra):
"""
Register step status, ie. status for a sub-resource
"""
upload_info = {"status" : status}
upload_info.update(extra)
job_key = "upload.jobs.%s" % self.name
if status == "uploading":
# record some "in-progress" information
upload_info['step'] = self.name # this is the actual collection name
upload_info['temp_collection'] = self.temp_collection_name
upload_info['pid'] = os.getpid()
upload_info['logfile'] = self.logfile
upload_info['started_at'] = datetime.datetime.now()
self.src_dump.update_one({"_id":self.main_source},{"$set" : {job_key : upload_info}})
else:
# only register time when it's a final state
# also, keep previous uploading information
upd = {}
for k,v in upload_info.items():
upd["%s.%s" % (job_key,k)] = v
t1 = round(time.time() - self.t0, 0)
upd["%s.status" % job_key] = status
upd["%s.time" % job_key] = timesofar(self.t0)
upd["%s.time_in_s" % job_key] = t1
upd["%s.step" % job_key] = self.name # collection name
self.src_dump.update_one({"_id" : self.main_source},{"$set" : upd})
@asyncio.coroutine
def load(self, steps=["data","post","master","clean"], force=False,
batch_size=10000, job_manager=None, **kwargs):
"""
Main resource load process, reads data from doc_c using chunk sized as batch_size.
steps defines the different processes used to laod the resource:
- "data" : will store actual data into single collections
- "post" : will perform post data load operations
- "master" : will register the master document in src_master
"""
self.logger.info("Uploading '%s' (collection: %s)" % (self.name, self.collection_name))
# sanity check before running
self.check_ready(force)
# check what to do
if type(steps) == str:
steps = steps.split(",")
update_data = "data" in steps
update_master = "master" in steps
post_update_data = "post" in steps
clean_archives = "clean" in steps
strargs = "[steps=%s]" % ",".join(steps)
try:
if not self.temp_collection_name:
self.make_temp_collection()
self.db[self.temp_collection_name].drop() # drop all existing records just in case.
self.register_status("uploading")
if update_data:
# unsync to make it pickable
state = self.unprepare()
yield from self.update_data(batch_size, job_manager, **kwargs)
self.prepare(state)
if update_master:
self.update_master()
if post_update_data:
got_error = False
self.unprepare()
pinfo = self.get_pinfo()
pinfo["step"] = "post_update_data"
f2 = yield from job_manager.defer_to_thread(pinfo,
partial(self.post_update_data, steps, force, batch_size, job_manager, **kwargs))
def postupdated(f):
if f.exception():
got_error = f.exception()
f2.add_done_callback(postupdated)
yield from f2
if got_error:
raise got_error
cnt = self.db[self.collection_name].count()
if clean_archives:
self.clean_archived_collections()
self.register_status("success",count=cnt)
self.logger.info("success %s" % strargs,extra={"notify":True})
except Exception as e:
self.register_status("failed",err=str(e))
self.logger.exception("failed %s: %s" % (strargs,e),extra={"notify":True})
raise
def prepare_src_dump(self):
"""Sync with src_dump collection, collection information (src_doc)
Return src_dump collection"""
src_dump = get_src_dump()
self.src_doc = src_dump.find_one({'_id': self.main_source})
return src_dump
def setup_log(self):
"""Setup and return a logger instance"""
import logging as logging_mod
if not os.path.exists(self.src_root_folder):
os.makedirs(self.src_root_folder)
self.logfile = os.path.join(self.log_folder, 'upload_%s_%s.log' % (self.fullname,time.strftime("%Y%m%d",self.timestamp.timetuple())))
fmt = logging_mod.Formatter('%(asctime)s [%(process)d:%(threadName)s] - %(name)s - %(levelname)s -- %(message)s', datefmt="%H:%M:%S")
fh = logging_mod.FileHandler(self.logfile)
fh.setFormatter(fmt)
fh.name = "logfile"
nh = HipchatHandler(config.HIPCHAT_CONFIG)
nh.setFormatter(fmt)
nh.name = "hipchat"
logger = logging_mod.getLogger("%s_upload" % self.fullname)
logger.setLevel(logging_mod.DEBUG)
if not fh.name in [h.name for h in logger.handlers]:
logger.addHandler(fh)
if not nh.name in [h.name for h in logger.handlers]:
logger.addHandler(nh)
return logger
def __getattr__(self,attr):
"""This catches access to unpicabkle attributes. If unset,
will call sync to restore them."""
# tricky: self._state will always exist when the instance is create
# through __init__(). But... when pickling the instance, __setstate__
# is used to restore attribute on an instance that's hasn't been though
# __init__() constructor. So we raise an error here to tell pickle not
# to restore this attribute (it'll be set after)
if attr == "_state":
raise AttributeError(attr)
if attr in self._state:
if not self._state[attr]:
self.prepare()
return self._state[attr]
else:
raise AttributeError(attr)
class NoBatchIgnoreDuplicatedSourceUploader(BaseSourceUploader):
'''Same as default uploader, but will store records and ignore if
any duplicated error occuring (use with caution...). Storage
is done line by line (slow, not using a batch) but preserve order
of data in input file.
'''
storage_class = NoBatchIgnoreDuplicatedStorage
class IgnoreDuplicatedSourceUploader(BaseSourceUploader):
'''Same as default uploader, but will store records and ignore if
any duplicated error occuring (use with caution...). Storage
is done using batch and unordered bulk operations.
'''
storage_class = IgnoreDuplicatedStorage
class MergerSourceUploader(BaseSourceUploader):
storage_class = MergerStorage
class DummySourceUploader(BaseSourceUploader):
"""
Dummy uploader, won't upload any data, assuming data is already there
but make sure every other bit of information is there for the overall process
(usefull when online data isn't available anymore)
"""
def prepare_src_dump(self):
src_dump = get_src_dump()
# just populate/initiate an src_dump record (b/c no dump before) if needed
self.src_doc = src_dump.find_one({'_id': self.main_source})
if not self.src_doc:
src_dump.save({"_id":self.main_source})
self.src_doc = src_dump.find_one({'_id': self.main_source})
return src_dump
def check_ready(self,force=False):
# bypass checks about src_dump
pass
@asyncio.coroutine
def update_data(self, batch_size, job_manager=None, release=None):
assert release, "Dummy uploader requires 'release' argument to be specified"
self.logger.info("Dummy uploader, nothing to upload")
# by-pass register_status and store release here (it's usually done by dumpers but
# dummy uploaders have no dumper associated b/c it's collection-only resource)
self.src_dump.update_one({'_id': self.main_source}, {"$set" : {"release": release}})
# sanity check, dummy uploader, yes, but make sure data is there
assert self.collection.count() > 0, "No data found in collection '%s' !!!" % self.collection_name
class ParallelizedSourceUploader(BaseSourceUploader):
def jobs(self):
"""Return list of (*arguments) passed to self.load_data, in order. for
each parallelized jobs. Ex: [(x,1),(y,2),(z,3)]"""
raise NotImplementedError("implement me in subclass")
@asyncio.coroutine
def update_data(self, batch_size, job_manager=None):
jobs = []
job_params = self.jobs()
got_error = False
# make sure we don't use any of self reference in the following loop
fullname = copy.deepcopy(self.fullname)
storage_class = copy.deepcopy(self.__class__.storage_class)
load_data = copy.deepcopy(self.load_data)
temp_collection_name = copy.deepcopy(self.temp_collection_name)
state = self.unprepare()
# important: within this loop, "self" should never be used to make sure we don't
# instantiate unpicklable attributes (via via autoset attributes, see prepare())
# because there could a race condition where an error would cause self to log a statement
# (logger is unpicklable) while at the same another job from the loop would be
# subtmitted to job_manager causing a error due to that logger attribute)
# in other words: once unprepared, self should never be changed until all
# jobs are submitted
for bnum,args in enumerate(job_params):
pinfo = self.get_pinfo()
pinfo["step"] = "update_data"
pinfo["description"] = "%s" % str(args)
job = yield from job_manager.defer_to_process(
pinfo,
partial(
# pickable worker
upload_worker,
# worker name
fullname,
# storage class
storage_class,
# loading func
load_data,
# dest collection name
temp_collection_name,
# batch size
batch_size,
# batch num
bnum,
# and finally *args passed to loading func
*args
)
)
jobs.append(job)
# raise error as soon as we know
if got_error:
raise got_error
def batch_uploaded(f,name,batch_num):
# important: don't even use "self" ref here to make sure jobs can be submitted
# (see comment above, before loop)
nonlocal got_error
try:
if type(f.result()) != int:
got_error = Exception("Batch #%s failed while uploading source '%s' [%s]" % (batch_num, name, f.result()))
except Exception as e:
got_error = e
job.add_done_callback(partial(batch_uploaded,name=fullname,batch_num=bnum))
if jobs:
yield from asyncio.gather(*jobs)
if got_error:
raise got_error
self.switch_collection()
self.clean_archived_collections()
class NoDataSourceUploader(BaseSourceUploader):
"""
This uploader won't upload any data and won't even assume
there's actual data (different from DummySourceUploader on this point).
It's usefull for instance when mapping need to be stored (get_mapping())
but data doesn't comes from an actual upload (ie. generated)
"""
storage_class = NoStorage
@asyncio.coroutine
def update_data(self, batch_size, job_manager=None):
self.logger.debug("No data to upload, skip")
##############################
import aiocron
class UploaderManager(BaseSourceManager):
'''
After registering datasources, manager will orchestrate source uploading.
'''
SOURCE_CLASS = BaseSourceUploader
def __init__(self,poll_schedule=None,*args,**kwargs):
super(UploaderManager,self).__init__(*args,**kwargs)
self.poll_schedule = poll_schedule
def filter_class(self,klass):
if klass.name is None:
# usually a base defined in an uploader, which then is subclassed in same
# module. Kind of intermediate, not fully functional class
logging.debug("%s has no 'name' defined, skip it" % klass)
return None
else:
return klass
def create_instance(self,klass):
res = klass.create(db_conn_info=self.conn.address,data_root=config.DATA_ARCHIVE_ROOT)
return res
def register_classes(self,klasses):
for klass in klasses:
if klass.main_source:
self.register.setdefault(klass.main_source,[]).append(klass)
else:
self.register.setdefault(klass.name,[]).append(klass)
self.conn.register(klass)
def register_status(self,src_name,status,**extra):
"""
Register overall status for resource
"""
src_dump = get_src_dump()
upload_info = {'status': status}
upload_info.update(extra)
if status == "uploading":
upload_info["jobs"] = {}
# unflag "need upload"
src_dump.update_one({"_id" : src_name},{"$unset" : {"pending_to_upload":None}})
src_dump.update_one({"_id" : src_name},{"$set" : {"upload" : upload_info}})
else:
# we want to keep information
upd = {}
for k,v in upload_info.items():
upd["upload.%s" % k] = v
src_dump.update_one({"_id" : src_name},{"$set" : upd})
def upload_all(self,raise_on_error=False,**kwargs):
"""
Trigger upload processes for all registered resources.
**kwargs are passed to upload_src() method
"""
jobs = []
for src in self.register:
job = self.upload_src(src, **kwargs)
jobs.extend(job)
return asyncio.gather(*jobs)
def upload_src(self, src, *args, **kwargs):
"""
Trigger upload for registered resource named 'src'.
Other args are passed to uploader's load() method
"""
try:
klasses = self[src]
except KeyError:
raise ResourceNotFound("Can't find '%s' in registered sources (whether as main or sub-source)" % src)
jobs = []
try:
self.register_status(src,"uploading")
for i,klass in enumerate(klasses):
job = self.job_manager.submit(partial(
self.create_and_load,klass,job_manager=self.job_manager,*args,**kwargs))
jobs.append(job)
tasks = asyncio.gather(*jobs)
def done(f):
try:
# just consume the result to raise exception
# if there were an error... (what an api...)
f.result()
self.register_status(src,"success")
logging.info("success",extra={"notify":True})
except Exception as e:
self.register_status(src,"failed",err=str(e))
logging.exception("failed: %s" % e,extra={"notify":True})
tasks.add_done_callback(done)
return jobs
except Exception as e:
self.register_status(src,"failed",err=str(e))
self.logger.exception("Error while uploading '%s': %s" % (src,e),extra={"notify":True})
raise
@asyncio.coroutine
def create_and_load(self,klass,*args,**kwargs):
insts = self.create_instance(klass)
if type(insts) != list:
insts = [insts]
for inst in insts:
yield from inst.load(*args,**kwargs)
def poll(self):
if not self.poll_schedule:
raise ManagerError("poll_schedule is not defined")
src_dump = get_src_dump()
@asyncio.coroutine
def check_pending_to_upload():
sources = [src['_id'] for src in src_dump.find({'pending_to_upload': True}) if type(src['_id']) == str]
logging.info("Found %d resources to upload (%s)" % (len(sources),repr(sources)))
for src_name in sources:
logging.info("Launch upload for '%s'" % src_name)
try:
self.upload_src(src_name)
except ResourceNotFound:
logging.error("Resource '%s' needs upload but is not registered in manager" % src_name)
cron = aiocron.crontab(self.poll_schedule,func=partial(check_pending_to_upload),
start=True, loop=self.job_manager.loop)
| |
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright 2013-2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import git
import os
import logging
import zuul.model
def reset_repo_to_head(repo):
# This lets us reset the repo even if there is a file in the root
# directory named 'HEAD'. Currently, GitPython does not allow us
# to instruct it to always include the '--' to disambiguate. This
# should no longer be necessary if this PR merges:
# https://github.com/gitpython-developers/GitPython/pull/319
try:
repo.git.reset('--hard', 'HEAD', '--')
except git.GitCommandError as e:
# git nowadays may use 1 as status to indicate there are still unstaged
# modifications after the reset
if e.status != 1:
raise
class ZuulReference(git.Reference):
_common_path_default = "refs/zuul"
_points_to_commits_only = True
class Repo(object):
log = logging.getLogger("zuul.Repo")
def __init__(self, remote, local, email, username):
self.remote_url = remote
self.local_path = local
self.email = email
self.username = username
self._initialized = False
try:
self._ensure_cloned()
except:
self.log.exception("Unable to initialize repo for %s" % remote)
def _ensure_cloned(self):
repo_is_cloned = os.path.exists(os.path.join(self.local_path, '.git'))
if self._initialized and repo_is_cloned:
return
# If the repo does not exist, clone the repo.
if not repo_is_cloned:
self.log.debug("Cloning from %s to %s" % (self.remote_url,
self.local_path))
git.Repo.clone_from(self.remote_url, self.local_path)
repo = git.Repo(self.local_path)
if self.email:
repo.config_writer().set_value('user', 'email',
self.email)
if self.username:
repo.config_writer().set_value('user', 'name',
self.username)
repo.config_writer().write()
self._initialized = True
def isInitialized(self):
return self._initialized
def createRepoObject(self):
try:
self._ensure_cloned()
repo = git.Repo(self.local_path)
except:
self.log.exception("Unable to initialize repo for %s" %
self.local_path)
return repo
def reset(self):
self.log.debug("Resetting repository %s" % self.local_path)
self.update()
repo = self.createRepoObject()
origin = repo.remotes.origin
for ref in origin.refs:
if ref.remote_head == 'HEAD':
continue
repo.create_head(ref.remote_head, ref, force=True)
# try reset to remote HEAD (usually origin/master)
# If it fails, pick the first reference
try:
repo.head.reference = origin.refs['HEAD']
except IndexError:
repo.head.reference = origin.refs[0]
reset_repo_to_head(repo)
repo.git.clean('-x', '-f', '-d')
def prune(self):
repo = self.createRepoObject()
origin = repo.remotes.origin
stale_refs = origin.stale_refs
if stale_refs:
self.log.debug("Pruning stale refs: %s", stale_refs)
git.refs.RemoteReference.delete(repo, *stale_refs)
def getBranchHead(self, branch):
repo = self.createRepoObject()
branch_head = repo.heads[branch]
return branch_head.commit
def hasBranch(self, branch):
repo = self.createRepoObject()
origin = repo.remotes.origin
return branch in origin.refs
def getCommitFromRef(self, refname):
repo = self.createRepoObject()
if refname not in repo.refs:
return None
ref = repo.refs[refname]
return ref.commit
def checkout(self, ref):
repo = self.createRepoObject()
self.log.debug("Checking out %s" % ref)
repo.head.reference = ref
reset_repo_to_head(repo)
return repo.head.commit
def cherryPick(self, ref):
repo = self.createRepoObject()
self.log.debug("Cherry-picking %s" % ref)
self.fetch(ref)
repo.git.cherry_pick("FETCH_HEAD")
return repo.head.commit
def merge(self, ref, strategy=None):
repo = self.createRepoObject()
args = []
if strategy:
args += ['-s', strategy]
args.append('FETCH_HEAD')
self.fetch(ref)
self.log.debug("Merging %s with args %s" % (ref, args))
repo.git.merge(*args)
return repo.head.commit
def fetch(self, ref):
repo = self.createRepoObject()
# The git.remote.fetch method may read in git progress info and
# interpret it improperly causing an AssertionError. Because the
# data was fetched properly subsequent fetches don't seem to fail.
# So try again if an AssertionError is caught.
origin = repo.remotes.origin
try:
origin.fetch(ref)
except AssertionError:
origin.fetch(ref)
def fetchFrom(self, repository, refspec):
repo = self.createRepoObject()
repo.git.fetch(repository, refspec)
def createZuulRef(self, ref, commit='HEAD'):
repo = self.createRepoObject()
self.log.debug("CreateZuulRef %s at %s on %s" % (ref, commit, repo))
ref = ZuulReference.create(repo, ref, commit)
return ref.commit
def push(self, local, remote):
repo = self.createRepoObject()
self.log.debug("Pushing %s:%s to %s" % (local, remote,
self.remote_url))
repo.remotes.origin.push('%s:%s' % (local, remote))
def update(self):
repo = self.createRepoObject()
self.log.debug("Updating repository %s" % self.local_path)
origin = repo.remotes.origin
if repo.git.version_info[:2] < (1, 9):
# Before 1.9, 'git fetch --tags' did not include the
# behavior covered by 'git --fetch', so we run both
# commands in that case. Starting with 1.9, 'git fetch
# --tags' is all that is necessary. See
# https://github.com/git/git/blob/master/Documentation/RelNotes/1.9.0.txt#L18-L20
origin.fetch()
origin.fetch(tags=True)
class Merger(object):
log = logging.getLogger("zuul.Merger")
def __init__(self, working_root, connections, email, username):
self.repos = {}
self.working_root = working_root
if not os.path.exists(working_root):
os.makedirs(working_root)
self._makeSSHWrappers(working_root, connections)
self.email = email
self.username = username
def _makeSSHWrappers(self, working_root, connections):
for connection_name, connection in connections.items():
sshkey = connection.connection_config.get('sshkey')
if sshkey:
self._makeSSHWrapper(sshkey, working_root, connection_name)
def _makeSSHWrapper(self, key, merge_root, connection_name='default'):
wrapper_name = '.ssh_wrapper_%s' % connection_name
name = os.path.join(merge_root, wrapper_name)
fd = open(name, 'w')
fd.write('#!/bin/bash\n')
fd.write('ssh -i %s $@\n' % key)
fd.close()
os.chmod(name, 0o755)
def addProject(self, project, url):
repo = None
try:
path = os.path.join(self.working_root, project)
repo = Repo(url, path, self.email, self.username)
self.repos[project] = repo
except Exception:
self.log.exception("Unable to add project %s" % project)
return repo
def getRepo(self, project, url):
if project in self.repos:
return self.repos[project]
if not url:
raise Exception("Unable to set up repo for project %s"
" without a url" % (project,))
return self.addProject(project, url)
def updateRepo(self, project, url):
repo = self.getRepo(project, url)
try:
self.log.info("Updating local repository %s", project)
repo.update()
except Exception:
self.log.exception("Unable to update %s", project)
def _mergeChange(self, item, ref):
repo = self.getRepo(item['project'], item['url'])
try:
repo.checkout(ref)
except Exception:
self.log.exception("Unable to checkout %s" % ref)
return None
try:
mode = item['merge_mode']
if mode == zuul.model.MERGER_MERGE:
commit = repo.merge(item['refspec'])
elif mode == zuul.model.MERGER_MERGE_RESOLVE:
commit = repo.merge(item['refspec'], 'resolve')
elif mode == zuul.model.MERGER_CHERRY_PICK:
commit = repo.cherryPick(item['refspec'])
else:
raise Exception("Unsupported merge mode: %s" % mode)
except git.GitCommandError:
# Log git exceptions at debug level because they are
# usually benign merge conflicts
self.log.debug("Unable to merge %s" % item, exc_info=True)
return None
except Exception:
self.log.exception("Exception while merging a change:")
return None
return commit
def _setGitSsh(self, connection_name):
wrapper_name = '.ssh_wrapper_%s' % connection_name
name = os.path.join(self.working_root, wrapper_name)
if os.path.isfile(name):
os.environ['GIT_SSH'] = name
elif 'GIT_SSH' in os.environ:
del os.environ['GIT_SSH']
def _mergeItem(self, item, recent):
self.log.debug("Processing refspec %s for project %s / %s ref %s" %
(item['refspec'], item['project'], item['branch'],
item['ref']))
self._setGitSsh(item['connection_name'])
repo = self.getRepo(item['project'], item['url'])
key = (item['project'], item['branch'])
# See if we have a commit for this change already in this repo
zuul_ref = item['branch'] + '/' + item['ref']
commit = repo.getCommitFromRef(zuul_ref)
if commit:
self.log.debug("Found commit %s for ref %s" % (commit, zuul_ref))
# Store this as the most recent commit for this
# project-branch
recent[key] = commit
return commit
self.log.debug("Unable to find commit for ref %s" % (zuul_ref,))
# We need to merge the change
# Get the most recent commit for this project-branch
base = recent.get(key)
if not base:
# There is none, so use the branch tip
# we need to reset here in order to call getBranchHead
self.log.debug("No base commit found for %s" % (key,))
try:
repo.reset()
except Exception:
self.log.exception("Unable to reset repo %s" % repo)
return None
base = repo.getBranchHead(item['branch'])
else:
self.log.debug("Found base commit %s for %s" % (base, key,))
# Merge the change
commit = self._mergeChange(item, base)
if not commit:
return None
# Store this commit as the most recent for this project-branch
recent[key] = commit
# Set the Zuul ref for this item to point to the most recent
# commits of each project-branch
for key, mrc in recent.items():
project, branch = key
try:
repo = self.getRepo(project, None)
zuul_ref = branch + '/' + item['ref']
repo.createZuulRef(zuul_ref, mrc)
except Exception:
self.log.exception("Unable to set zuul ref %s for "
"item %s" % (zuul_ref, item))
return None
return commit
def mergeChanges(self, items):
recent = {}
commit = None
for item in items:
if item.get("number") and item.get("patchset"):
self.log.debug("Merging for change %s,%s." %
(item["number"], item["patchset"]))
elif item.get("newrev") and item.get("oldrev"):
self.log.debug("Merging for rev %s with oldrev %s." %
(item["newrev"], item["oldrev"]))
commit = self._mergeItem(item, recent)
if not commit:
return None
return commit.hexsha
| |
from __future__ import division
import random
import warnings
import numpy as np
from numpy.testing import assert_allclose, raises
from nose.plugins.attrib import attr
from menpo.testing import is_same_array
from menpo.image import Image, MaskedImage
from menpo.feature import (hog, lbp, es, igo, daisy, no_op, normalize,
normalize_norm, normalize_std, normalize_var)
import menpo.io as mio
def test_imagewindowiterator_hog_padding():
n_cases = 5
image_width = np.random.randint(50, 250, [n_cases, 1])
image_height = np.random.randint(50, 250, [n_cases, 1])
window_step_horizontal = np.random.randint(1, 10, [n_cases, 1])
window_step_vertical = np.random.randint(1, 10, [n_cases, 1])
for i in range(n_cases):
image = MaskedImage(np.random.randn(1, image_height[i, 0],
image_width[i, 0]))
hog_im = hog(image, mode='dense',
window_step_vertical=window_step_vertical[i, 0],
window_step_horizontal=window_step_horizontal[i, 0],
window_step_unit='pixels', padding=True)
n_windows_horizontal = len(range(0, image_width[i, 0],
window_step_horizontal[i, 0]))
n_windows_vertical = len(range(0, image_height[i, 0],
window_step_vertical[i, 0]))
assert_allclose(hog_im.shape, (n_windows_vertical,
n_windows_horizontal))
def test_windowiterator_hog_no_padding():
n_cases = 5
image_width = np.random.randint(50, 250, [n_cases, 1])
image_height = np.random.randint(50, 250, [n_cases, 1])
window_step_horizontal = np.random.randint(1, 10, [n_cases, 1])
window_step_vertical = np.random.randint(1, 10, [n_cases, 1])
window_width = np.random.randint(3, 20, [n_cases, 1])
window_height = np.random.randint(3, 20, [n_cases, 1])
for i in range(n_cases):
image = MaskedImage(np.random.randn(1, image_height[i, 0],
image_width[i, 0]))
hog_img = hog(image, mode='dense', cell_size=3, block_size=1,
window_height=window_height[i, 0],
window_width=window_width[i, 0], window_unit='pixels',
window_step_vertical=window_step_vertical[i, 0],
window_step_horizontal=window_step_horizontal[i, 0],
window_step_unit='pixels', padding=False)
n_windows_horizontal = len(range(window_width[i, 0] - 1,
image_width[i, 0],
window_step_horizontal[i, 0]))
n_windows_vertical = len(range(window_height[i, 0] - 1,
image_height[i, 0],
window_step_vertical[i, 0]))
assert_allclose(hog_img.shape, (n_windows_vertical,
n_windows_horizontal))
def test_windowiterator_lbp_padding():
n_cases = 5
image_width = np.random.randint(50, 250, [n_cases, 1])
image_height = np.random.randint(50, 250, [n_cases, 1])
window_step_horizontal = np.random.randint(1, 10, [n_cases, 1])
window_step_vertical = np.random.randint(1, 10, [n_cases, 1])
for i in range(n_cases):
image = MaskedImage(np.random.randn(1, image_height[i, 0],
image_width[i, 0]))
lbp_img = lbp(image, window_step_vertical=window_step_vertical[i, 0],
window_step_horizontal=window_step_horizontal[i, 0],
window_step_unit='pixels', padding=True)
n_windows_horizontal = len(range(0, image_width[i, 0],
window_step_horizontal[i, 0]))
n_windows_vertical = len(range(0, image_height[i, 0],
window_step_vertical[i, 0]))
assert_allclose(lbp_img.shape, (n_windows_vertical,
n_windows_horizontal))
def test_windowiterator_lbp_no_padding():
n_cases = 5
image_width = np.random.randint(50, 250, [n_cases, 1])
image_height = np.random.randint(50, 250, [n_cases, 1])
window_step_horizontal = np.random.randint(1, 10, [n_cases, 1])
window_step_vertical = np.random.randint(1, 10, [n_cases, 1])
radius = np.random.randint(3, 5, [n_cases, 1])
for i in range(n_cases):
image = Image(np.random.randn(1, image_height[i, 0],
image_width[i, 0]))
lbp_img = lbp(image, radius=radius[i, 0], samples=8,
window_step_vertical=window_step_vertical[i, 0],
window_step_horizontal=window_step_horizontal[i, 0],
window_step_unit='pixels', padding=False)
window_size = 2 * radius[i, 0] + 1
n_windows_horizontal = len(range(window_size - 1, image_width[i, 0],
window_step_horizontal[i, 0]))
n_windows_vertical = len(range(window_size - 1, image_height[i, 0],
window_step_vertical[i, 0]))
assert_allclose(lbp_img.shape, (n_windows_vertical,
n_windows_horizontal))
def test_hog_channels_dalaltriggs():
n_cases = 3
cell_size = np.random.randint(1, 10, [n_cases, 1])
block_size = np.random.randint(1, 3, [n_cases, 1])
num_bins = np.random.randint(7, 9, [n_cases, 1])
channels = np.random.randint(1, 4, [n_cases, 1])
for i in range(n_cases):
image = MaskedImage(np.random.randn(channels[i, 0], 40, 40))
block_size_pixels = cell_size[i, 0] * block_size[i, 0]
window_width = np.random.randint(block_size_pixels, 40, 1)
window_height = np.random.randint(block_size_pixels, 40, 1)
hog_img = hog(image, mode='dense', algorithm='dalaltriggs',
cell_size=cell_size[i, 0], block_size=block_size[i, 0],
num_bins=num_bins[i, 0], window_height=window_height[0],
window_width=window_width[0], window_unit='pixels',
window_step_vertical=3, window_step_horizontal=3,
window_step_unit='pixels', padding=True)
length_per_block = block_size[i, 0] * block_size[i, 0] * num_bins[i, 0]
n_blocks_horizontal = len(range(block_size_pixels - 1, window_width[0],
cell_size[i, 0]))
n_blocks_vertical = len(range(block_size_pixels - 1, window_height[0],
cell_size[i, 0]))
n_channels = n_blocks_horizontal * n_blocks_vertical * length_per_block
assert_allclose(hog_img.n_channels, n_channels)
def test_hog_channels_zhuramanan():
n_cases = 3
cell_size = np.random.randint(2, 10, [n_cases])
channels = np.random.randint(1, 4, [n_cases])
for i in range(n_cases):
image = MaskedImage(np.random.randn(channels[i], 40, 40))
win_width = np.random.randint(3 * cell_size[i], 40, 1)
win_height = np.random.randint(3 * cell_size[i], 40, 1)
hog_img = hog(image, mode='dense', algorithm='zhuramanan',
cell_size=cell_size[i],
window_height=win_height[0],
window_width=win_width[0],
window_unit='pixels', window_step_vertical=3,
window_step_horizontal=3,
window_step_unit='pixels', padding=True, verbose=True)
length_per_block = 31
n_blocks_horizontal = np.floor((win_width[0] / cell_size[i]) + 0.5) - 2
n_blocks_vertical = np.floor((win_height[0] / cell_size[i]) + 0.5) - 2
n_channels = n_blocks_horizontal * n_blocks_vertical * length_per_block
assert_allclose(hog_img.n_channels, n_channels)
@attr('cyvlfeat')
def test_dsift_channels():
from menpo.feature import dsift
n_cases = 3
num_bins_horizontal = np.random.randint(1, 3, [n_cases, 1])
num_bins_vertical = np.random.randint(1, 3, [n_cases, 1])
num_or_bins = np.random.randint(7, 9, [n_cases, 1])
cell_size_horizontal = np.random.randint(1, 10, [n_cases, 1])
cell_size_vertical = np.random.randint(1, 10, [n_cases, 1])
channels = np.random.randint(1, 4, [n_cases])
for i in range(n_cases):
image = MaskedImage(np.random.randn(channels[i], 40, 40))
dsift_img = dsift(image, window_step_horizontal=1,
window_step_vertical=1,
num_bins_horizontal=num_bins_horizontal[i, 0],
num_bins_vertical=num_bins_vertical[i, 0],
num_or_bins=num_or_bins[i, 0],
cell_size_horizontal=cell_size_horizontal[i, 0],
cell_size_vertical=cell_size_vertical[i, 0])
n_channels = (num_bins_horizontal[i, 0] * num_bins_vertical[i, 0] *
num_or_bins[i, 0])
assert_allclose(dsift_img.n_channels, n_channels)
def test_lbp_channels():
n_cases = 3
n_combs = np.random.randint(1, 6, [n_cases, 1])
channels = np.random.randint(1, 4, [n_cases, 1])
for i in range(n_cases):
radius = random.sample(range(1, 10), n_combs[i, 0])
samples = random.sample(range(4, 12), n_combs[i, 0])
image = MaskedImage(np.random.randn(channels[i, 0], 40, 40))
lbp_img = lbp(image, radius=radius, samples=samples,
window_step_vertical=3, window_step_horizontal=3,
window_step_unit='pixels', padding=True)
assert_allclose(lbp_img.n_channels, n_combs[i, 0] * channels[i, 0])
def test_igo_channels():
n_cases = 3
channels = np.random.randint(1, 10, [n_cases, 1])
for i in range(n_cases):
image = Image(np.random.randn(channels[i, 0], 40, 40))
igo_img = igo(image)
igo2_img = igo(image, double_angles=True)
assert_allclose(igo_img.shape, image.shape)
assert_allclose(igo2_img.shape, image.shape)
assert_allclose(igo_img.n_channels, 2 * channels[i, 0])
assert_allclose(igo2_img.n_channels, 4 * channels[i, 0])
def test_es_channels():
n_cases = 3
channels = np.random.randint(1, 10, [n_cases, 1])
for i in range(n_cases):
image = Image(np.random.randn(channels[i, 0], 40, 40))
es_img = es(image)
assert_allclose(es_img.shape, image.shape)
assert_allclose(es_img.n_channels, 2 * channels[i, 0])
def test_daisy_channels():
n_cases = 3
rings = np.random.randint(1, 3, [n_cases, 1])
orientations = np.random.randint(1, 7, [n_cases, 1])
histograms = np.random.randint(1, 6, [n_cases, 1])
channels = np.random.randint(1, 5, [n_cases, 1])
for i in range(n_cases):
image = Image(np.random.randn(channels[i, 0], 40, 40))
daisy_img = daisy(image, step=4, rings=rings[i, 0],
orientations=orientations[i, 0],
histograms=histograms[i, 0])
assert_allclose(daisy_img.shape, (3, 3))
assert_allclose(daisy_img.n_channels,
((rings[i, 0]*histograms[i, 0]+1)*orientations[i, 0]))
def test_igo_values():
image = Image([[1., 2.], [2., 1.]])
igo_img = igo(image)
res = np.array(
[[[0.70710678, 0.70710678],
[-0.70710678, -0.70710678]],
[[0.70710678, -0.70710678],
[0.70710678, -0.70710678]]])
assert_allclose(igo_img.pixels, res)
image = Image([[0., 0.], [0., 0.]])
igo_img = igo(image)
res = np.array([[[0., 0.], [0., 0.]], [[1., 1.], [1., 1.]]])
assert_allclose(igo_img.pixels, res)
def test_es_values():
image = Image([[1., 2.], [2., 1.]])
es_img = es(image)
k = 1.0 / (2 * (2 ** 0.5))
res = np.array([[[k, -k], [k, -k]], [[k, k], [-k, -k]]])
assert_allclose(es_img.pixels, res)
def test_daisy_values():
image = Image([[1., 2., 3., 4.], [2., 1., 3., 4.], [1., 2., 3., 4.],
[2., 1., 3., 4.]])
daisy_img = daisy(image, step=1, rings=2, radius=1, orientations=8,
histograms=8)
assert_allclose(np.around(daisy_img.pixels[10, 0, 0], 6), 0.001355)
assert_allclose(np.around(daisy_img.pixels[20, 0, 1], 6), 0.032237)
assert_allclose(np.around(daisy_img.pixels[30, 1, 0], 6), 0.002032)
assert_allclose(np.around(daisy_img.pixels[40, 1, 1], 6), 0.000163)
@attr('cyvlfeat')
def test_dsift_values():
from menpo.feature import dsift
# Equivalent to the transpose of image in Matlab
image = Image([[1, 2, 3, 4], [2, 1, 3, 4], [1, 2, 3, 4], [2, 1, 3, 4]])
sift_img = dsift(image, cell_size_horizontal=2, cell_size_vertical=2)
assert_allclose(np.around(sift_img.pixels[0, 0, 0], 6), 19.719786,
rtol=1e-04)
assert_allclose(np.around(sift_img.pixels[1, 0, 1], 6), 141.535736,
rtol=1e-04)
assert_allclose(np.around(sift_img.pixels[0, 1, 0], 6), 184.377472,
rtol=1e-04)
assert_allclose(np.around(sift_img.pixels[5, 1, 1], 6), 39.04007,
rtol=1e-04)
def test_lbp_values():
image = Image([[0., 6., 0.], [5., 18., 13.], [0., 20., 0.]])
lbp_img = lbp(image, radius=1, samples=4, mapping_type='none',
padding=False)
assert_allclose(lbp_img.pixels, 8.)
image = Image([[0., 6., 0.], [5., 25., 13.], [0., 20., 0.]])
lbp_img = lbp(image, radius=1, samples=4, mapping_type='riu2',
padding=False)
assert_allclose(lbp_img.pixels, 0.)
image = Image([[0., 6., 0.], [5., 13., 13.], [0., 20., 0.]])
lbp_img = lbp(image, radius=1, samples=4, mapping_type='u2', padding=False)
assert_allclose(lbp_img.pixels, 8.)
image = Image([[0., 6., 0.], [5., 6., 13.], [0., 20., 0.]])
lbp_img = lbp(image, radius=1, samples=4, mapping_type='ri', padding=False)
assert_allclose(lbp_img.pixels, 4.)
def test_constrain_landmarks():
breaking_bad = mio.import_builtin_asset('breakingbad.jpg').as_masked()
breaking_bad = breaking_bad.crop_to_landmarks(boundary=20)
breaking_bad = breaking_bad.resize([50, 50])
breaking_bad.constrain_mask_to_landmarks()
hog_b = hog(breaking_bad, mode='sparse')
x = np.where(hog_b.landmarks['PTS'].points[:, 0] > hog_b.shape[1] - 1)
y = np.where(hog_b.landmarks['PTS'].points[:, 0] > hog_b.shape[0] - 1)
assert_allclose(len(x[0]) + len(y[0]), 12)
hog_b = hog(breaking_bad, mode='sparse')
hog_b.landmarks['PTS'] = hog_b.landmarks['PTS'].constrain_to_bounds(
hog_b.bounds())
x = np.where(hog_b.landmarks['PTS'].points[:, 0] > hog_b.shape[1] - 1)
y = np.where(hog_b.landmarks['PTS'].points[:, 0] > hog_b.shape[0] - 1)
assert_allclose(len(x[0]) + len(y[0]), 0)
def test_no_op():
image = Image([[1., 2.], [2., 1.]])
new_image = no_op(image)
assert_allclose(image.pixels, new_image.pixels)
assert not is_same_array(image.pixels, new_image.pixels)
def test_normalize_no_scale_all():
pixels = np.arange(27, dtype=np.float).reshape([3, 3, 3])
image = Image(pixels, copy=False)
new_image = normalize(image, scale_func=None, mode='all')
assert_allclose(new_image.pixels, pixels - 13.)
def test_normalize_norm_all():
pixels = np.arange(27, dtype=np.float).reshape([3, 3, 3])
image = Image(pixels, copy=False)
new_image = normalize_norm(image, mode='all')
assert_allclose(np.linalg.norm(new_image.pixels), 1.)
def test_normalize_norm_channels():
pixels = np.arange(27, dtype=np.float).reshape([3, 3, 3])
image = Image(pixels, copy=False)
new_image = normalize_norm(image, mode='per_channel')
assert_allclose(np.linalg.norm(new_image.pixels[0]), 1.)
assert_allclose(np.linalg.norm(new_image.pixels[1]), 1.)
assert_allclose(np.linalg.norm(new_image.pixels[2]), 1.)
def test_normalize_std_all():
pixels = np.arange(27, dtype=np.float).reshape([3, 3, 3])
image = Image(pixels, copy=False)
new_image = normalize_std(image, mode='all')
assert_allclose(np.std(new_image.pixels), 1.)
def test_normalize_std_channels():
pixels = np.arange(27, dtype=np.float).reshape([3, 3, 3])
image = Image(pixels, copy=False)
new_image = normalize_std(image, mode='per_channel')
assert_allclose(np.std(new_image.pixels[0]), 1.)
assert_allclose(np.std(new_image.pixels[1]), 1.)
assert_allclose(np.std(new_image.pixels[2]), 1.)
def test_normalize_var_all():
pixels = np.arange(27, dtype=np.float).reshape([3, 3, 3])
image = Image(pixels, copy=False)
new_image = normalize_var(image, mode='all')
assert_allclose(np.var(new_image.pixels), 0.01648, atol=1e-3)
def test_normalize_var_channels():
pixels = np.arange(27, dtype=np.float).reshape([3, 3, 3])
image = Image(pixels, copy=False)
new_image = normalize_var(image, mode='per_channel')
assert_allclose(np.var(new_image.pixels[0]), 0.15, atol=1e-5)
assert_allclose(np.var(new_image.pixels[1]), 0.15, atol=1e-5)
assert_allclose(np.var(new_image.pixels[2]), 0.15, atol=1e-5)
def test_normalize_no_scale_per_channel():
pixels = np.arange(27, dtype=np.float).reshape([3, 3, 3])
image = Image(pixels, copy=False)
new_image = normalize(image, scale_func=None, mode='per_channel')
assert_allclose(new_image.pixels[0], pixels[0] - 4.)
assert_allclose(new_image.pixels[1], pixels[1] - 13.)
assert_allclose(new_image.pixels[2], pixels[2] - 22.)
def test_normalize_no_scale_per_channel():
pixels = np.arange(27, dtype=np.float).reshape([3, 3, 3])
image = Image(pixels, copy=False)
new_image = normalize(image, scale_func=None, mode='per_channel')
assert_allclose(new_image.pixels[0], pixels[0] - 4.)
assert_allclose(new_image.pixels[1], pixels[1] - 13.)
assert_allclose(new_image.pixels[2], pixels[2] - 22.)
def test_normalize_scale_all():
pixels = np.arange(27, dtype=np.float).reshape([3, 3, 3])
dummy_scale = lambda *a, **kwargs: np.array(2.0)
image = Image(pixels, copy=False)
new_image = normalize(image, scale_func=dummy_scale, mode='all')
assert_allclose(new_image.pixels, (pixels - 13.0) / 2.0)
def test_normalize_scale_per_channel():
pixels = np.arange(27, dtype=np.float).reshape([3, 3, 3])
image = Image(pixels, copy=False)
dummy_scale = lambda *a, **kwargs: np.array(2.0)
new_image = normalize(image, scale_func=dummy_scale, mode='per_channel')
assert_allclose(new_image.pixels[0], (pixels[0] - 4.) / 2.0)
assert_allclose(new_image.pixels[1], (pixels[1] - 13.) / 2.0)
assert_allclose(new_image.pixels[2], (pixels[2] - 22.) / 2.0)
@raises(ValueError)
def test_normalize_unknown_mode_raises():
image = Image.init_blank((2, 2))
normalize(image, mode='fake')
@raises(ValueError)
def test_normalize_0_variance_raises():
image = Image.init_blank((2, 2))
dummy_scale = lambda *a, **kwargs: np.array(0.0)
normalize(image, scale_func=dummy_scale)
def test_normalize_0_variance_warning():
pixels = np.arange(8, dtype=np.float).reshape([2, 2, 2])
image = Image(pixels, copy=False)
dummy_scale = lambda *a, **kwargs: np.array([2.0, 0.0])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
new_image = normalize(image, scale_func=dummy_scale,
error_on_divide_by_zero=False,
mode='per_channel')
assert_allclose(new_image.pixels[0], [[-0.75, -0.25], [0.25, 0.75]])
assert_allclose(new_image.pixels[1], [[-1.5, -0.5], [0.5, 1.5]])
| |
from neupy import layers
from neupy.utils import function_name_scope
__all__ = ('resnet50',)
@function_name_scope
def ResidualUnit(n_input_filters, stride=1, rate=1, has_branch=False,
name=None):
def bn_name(index):
return 'bn' + name + '_branch' + index
def conv_name(index):
return 'res' + name + '_branch' + index
n_output_filters = 4 * n_input_filters
main_branch = layers.join(
# The main purpose of this 1x1 convolution layer is to
# reduce number of filters. For instance, for the tensor with
# 256 filters it can be reduced to 64. This trick allows to
# reduce computation by factor of 4.
layers.Convolution(
size=(1, 1, n_input_filters),
stride=stride,
bias=None,
name=conv_name('2a'),
),
layers.BatchNorm(name=bn_name('2a')),
layers.Relu(),
# This convolution layer applies 3x3 filter in order to
# extract features.
layers.Convolution(
(3, 3, n_input_filters),
padding='same',
dilation=rate,
bias=None,
name=conv_name('2b'),
),
layers.BatchNorm(name=bn_name('2b')),
layers.Relu(),
# Last layer reverses operations of the first layer. In this
# case we increase number of filters. For instance, from previously
# obtained 64 filters we can increase it back to the 256 filters
layers.Convolution(
(1, 1, n_output_filters),
bias=None,
name=conv_name('2c')
),
layers.BatchNorm(name=bn_name('2c')),
)
if has_branch:
residual_branch = layers.join(
layers.Convolution(
(1, 1, n_output_filters),
stride=stride,
bias=None,
name=conv_name('1'),
),
layers.BatchNorm(name=bn_name('1')),
)
else:
# Empty list defines residual connection, meaning that
# output from this branch would be equal to its input
residual_branch = layers.Identity('residual-' + name)
return layers.join(
# For the output from two branches we just combine results
# with simple elementwise sum operation. The main purpose of
# the residual connection is to build shortcuts for the
# gradient during backpropagation.
(main_branch | residual_branch),
layers.Elementwise(),
layers.Relu(),
)
def resnet50(input_shape=(224, 224, 3), include_global_pool=True,
in_out_ratio=32):
"""
ResNet50 network architecture with random parameters. Parameters
can be loaded using ``neupy.storage`` module.
ResNet50 has roughly 25.5 million parameters.
Parameters
----------
input_shape : tuple
Network's input shape. Defaults to ``(224, 224, 3)``.
include_global_pool : bool
Specifies if returned output should include global pooling
layer. Defaults to ``True``.
in_out_ratio : {4, 8, 16, 32}
Every layer that applies strides reduces height and width per every
image. There are 5 of these layers in Resnet and at the end each
dimensions gets reduced by ``32``. For example, 224x224 image
will be reduced to 7x7 image patches. This parameter specifies
what level of reduction we want to obtain after we've propagated
network through all the convolution layers.
Notes
-----
Because of the global pooling layer, ResNet50 can be applied to
the images with variable sizes. The only limitation is that image
size should be bigger than 32x32, otherwise network won't be able
to apply all transformations to the image.
Examples
--------
ResNet-50 for ImageNet classification
>>> from neupy import architectures, algorithms
>>>
>>> resnet = architectures.resnet50()
>>> resnet
(?, 224, 224, 3) -> [... 187 layers ...] -> (?, 1000)
>>>
>>> optimizer = algorithms.Momentum(resnet50)
ResNet-50 for custom classification task
>>> from neupy import architectures
>>> resnet = architectures.resnet50(include_global_pool=False)
>>> resnet
(?, 224, 224, 3) -> [... 185 layers ...] -> (?, 7, 7, 2048)
>>>
>>> from neupy.layers import *
>>> resnet = resnet >> GlobalPooling('avg') >> Softmax(21)
(?, 224, 224, 3) -> [... 187 layers ...] -> (?, 21)
ResNet-50 for image segmentation
>>> from neupy import architectures
>>> resnet = architectures.resnet50(
... include_global_pool=False,
... in_out_ratio=8,
... )
>>> resnet
(?, 224, 224, 3) -> [... 185 layers ...] -> (?, 28, 28, 2048)
See Also
--------
:architecture:`vgg16` : VGG16 network
:architecture:`squeezenet` : SqueezeNet network
:architecture:`resnet50` : ResNet-50 network
References
----------
Deep Residual Learning for Image Recognition.
https://arxiv.org/abs/1512.03385
"""
in_out_configs = {
4: {'strides': [1, 1, 1], 'rates': [2, 4, 8]},
8: {'strides': [2, 1, 1], 'rates': [1, 2, 4]},
16: {'strides': [2, 2, 1], 'rates': [1, 1, 2]},
32: {'strides': [2, 2, 2], 'rates': [1, 1, 1]},
}
if in_out_ratio not in in_out_configs:
raise ValueError(
"Expected one of the folowing in_out_ratio values: {}, got "
"{} instead.".format(in_out_configs.keys(), in_out_ratio))
strides = in_out_configs[in_out_ratio]['strides']
rates = in_out_configs[in_out_ratio]['rates']
resnet = layers.join(
layers.Input(input_shape),
# Convolutional layer reduces image's height and width by a factor
# of 2 (because of the stride)
# from (3, 224, 224) to (64, 112, 112)
layers.Convolution(
(7, 7, 64), stride=2, bias=None,
padding='same', name='conv1'
),
layers.BatchNorm(name='bn_conv1'),
layers.Relu(),
# Stride equal two 2 reduces image size by a factor of two
# from (64, 112, 112) to (64, 56, 56)
layers.MaxPooling((3, 3), stride=2, padding="same"),
# The branch option applies extra convolution x+ batch
# normalization transformations to the residual
ResidualUnit(64, name='2a', has_branch=True),
ResidualUnit(64, name='2b'),
ResidualUnit(64, name='2c'),
# When stride=2 reduces width and hight by factor of 2
ResidualUnit(128, stride=strides[0], name='3a', has_branch=True),
ResidualUnit(128, rate=rates[0], name='3b'),
ResidualUnit(128, rate=rates[0], name='3c'),
ResidualUnit(128, rate=rates[0], name='3d'),
# When stride=2 reduces width and hight by factor of 2
ResidualUnit(256, rate=rates[0], name='4a',
stride=strides[1], has_branch=True),
ResidualUnit(256, rate=rates[1], name='4b'),
ResidualUnit(256, rate=rates[1], name='4c'),
ResidualUnit(256, rate=rates[1], name='4d'),
ResidualUnit(256, rate=rates[1], name='4e'),
ResidualUnit(256, rate=rates[1], name='4f'),
# When stride=2 reduces width and hight by factor of 2
ResidualUnit(512, rate=rates[1], name='5a',
stride=strides[2], has_branch=True),
ResidualUnit(512, rate=rates[2], name='5b'),
ResidualUnit(512, rate=rates[2], name='5c'),
)
if include_global_pool:
resnet = layers.join(
resnet,
# Since the final residual unit has 2048 output filters, global
# pooling will replace every output image with single average
# value. Despite input image size, output from this layer always
# will be a vector with 2048 values.
layers.GlobalPooling('avg'),
layers.Softmax(1000, name='fc1000'),
)
return resnet
| |
#!/usr/bin/env python
# Created by: Robert Cimrman, 05.12.2005
"""Benchamrks for umfpack module"""
from __future__ import division, print_function, absolute_import
from optparse import OptionParser
import time
import urllib.request, urllib.parse, urllib.error
import gzip
import numpy as np
import sys
import scipy.sparse as sp
import scipy.sparse.linalg.dsolve.umfpack as um
import scipy.linalg as nla
defaultURL = 'http://www.cise.ufl.edu/research/sparse/HBformat/'
usage = """%%prog [options] <matrix file name> [<matrix file name>, ...]
<matrix file name> can be a local or distant (gzipped) file
default url is:
%s
supported formats are:
triplet .. [nRow, nCol, nItem] followed by 'nItem' * [ir, ic, value]
hb .. Harwell-Boeing format N/A
""" % defaultURL
##
# 05.12.2005, c
def read_triplet( fd ):
nRow, nCol = map( int, fd.readline().split() )
nItem = int( fd.readline() )
ij = np.zeros( (nItem,2), np.int32 )
val = np.zeros( (nItem,), np.float64 )
for ii, row in enumerate( fd.readlines() ):
aux = row.split()
ij[ii] = int( aux[0] ), int( aux[1] )
val[ii] = float( aux[2] )
mtx = sp.csc_matrix( (val, ij), dims = (nRow, nCol), nzmax = nItem )
return mtx
##
# 06.12.2005, c
def read_triplet2( fd ):
nRow, nCol = map( int, fd.readline().split() )
nItem = int( fd.readline() )
ij, val = io.read_array( fd,
columns = [(0,1), (2,)],
atype = (np.int32, np.float64),
rowsize = nItem )
mtx = sp.csc_matrix( (val, ij), dims = (nRow, nCol), nzmax = nItem )
return mtx
formatMap = {'triplet' : read_triplet}
##
# 05.12.2005, c
def readMatrix( matrixName, options ):
if options.default_url:
matrixName = defaultURL + matrixName
print('url:', matrixName)
if matrixName[:7] == 'http://':
if sys.version_info[0] >= 3:
fileName, status = urllib.request.urlretrieve( matrixName )
else:
fileName, status = urllib.urlretrieve( matrixName )
## print status
else:
fileName = matrixName
print('file:', fileName)
try:
readMatrix = formatMap[options.format]
except:
raise ValueError('unsupported format: %s' % options.format)
print('format:', options.format)
print('reading...')
if fileName.endswith('.gz'):
fd = gzip.open( fileName )
else:
fd = open( fileName )
mtx = readMatrix( fd )
fd.close()
print('ok')
return mtx
##
# 05.12.2005, c
def main():
parser = OptionParser( usage = usage )
parser.add_option( "-c", "--compare",
action = "store_true", dest = "compare",
default = False,
help = "compare with default scipy.sparse solver [default: %default]" )
parser.add_option( "-p", "--plot",
action = "store_true", dest = "plot",
default = False,
help = "plot time statistics [default: %default]" )
parser.add_option( "-d", "--default-url",
action = "store_true", dest = "default_url",
default = False,
help = "use default url [default: %default]" )
parser.add_option( "-f", "--format", type = type( '' ),
dest = "format", default = 'triplet',
help = "matrix format [default: %default]" )
(options, args) = parser.parse_args()
if (len( args ) >= 1):
matrixNames = args;
else:
parser.print_help(),
return
sizes, nnzs, times, errors = [], [], [], []
legends = ['umfpack', 'sparse.solve']
for ii, matrixName in enumerate( matrixNames ):
print('*' * 50)
mtx = readMatrix( matrixName, options )
sizes.append( mtx.shape )
nnzs.append( mtx.nnz )
tts = np.zeros( (2,), dtype = np.double )
times.append( tts )
err = np.zeros( (2,2), dtype = np.double )
errors.append( err )
print('size : %s (%d nnz)' % (mtx.shape, mtx.nnz))
sol0 = np.ones( (mtx.shape[0],), dtype = np.double )
rhs = mtx * sol0
umfpack = um.UmfpackContext()
tt = time.clock()
sol = umfpack( um.UMFPACK_A, mtx, rhs, autoTranspose = True )
tts[0] = time.clock() - tt
print("umfpack : %.2f s" % tts[0])
error = mtx * sol - rhs
err[0,0] = nla.norm( error )
print('||Ax-b|| :', err[0,0])
error = sol0 - sol
err[0,1] = nla.norm( error )
print('||x - x_{exact}|| :', err[0,1])
if options.compare:
tt = time.clock()
sol = sp.solve( mtx, rhs )
tts[1] = time.clock() - tt
print("sparse.solve : %.2f s" % tts[1])
error = mtx * sol - rhs
err[1,0] = nla.norm( error )
print('||Ax-b|| :', err[1,0])
error = sol0 - sol
err[1,1] = nla.norm( error )
print('||x - x_{exact}|| :', err[1,1])
if options.plot:
try:
import pylab
except ImportError:
raise ImportError("could not import pylab")
times = np.array( times )
print(times)
pylab.plot( times[:,0], 'b-o' )
if options.compare:
pylab.plot( times[:,1], 'r-s' )
else:
del legends[1]
print(legends)
ax = pylab.axis()
y2 = 0.5 * (ax[3] - ax[2])
xrng = list(range( len( nnzs )))
for ii in xrng:
yy = y2 + 0.4 * (ax[3] - ax[2])\
* np.sin( ii * 2 * np.pi / (len( xrng ) - 1) )
if options.compare:
pylab.text( ii+0.02, yy,
'%s\n%.2e err_umf\n%.2e err_sp'
% (sizes[ii], np.sum( errors[ii][0,:] ),
np.sum( errors[ii][1,:] )) )
else:
pylab.text( ii+0.02, yy,
'%s\n%.2e err_umf'
% (sizes[ii], np.sum( errors[ii][0,:] )) )
pylab.plot( [ii, ii], [ax[2], ax[3]], 'k:' )
pylab.xticks( xrng, ['%d' % (nnzs[ii] ) for ii in xrng] )
pylab.xlabel( 'nnz' )
pylab.ylabel( 'time [s]' )
pylab.legend( legends )
pylab.axis( [ax[0] - 0.05, ax[1] + 1, ax[2], ax[3]] )
pylab.show()
if __name__ == '__main__':
main()
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for GradientDescent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class GradientDescentOptimizerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
sgd = gradient_descent.SGD(3.0)
sgd_op = sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testBasicWithLearningRateDecay(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
learning_rate = 3.0
decay = 0.5
sgd = gradient_descent.SGD(learning_rate=learning_rate, decay=decay)
if not context.executing_eagerly():
sgd_op = sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 2 steps of sgd
if not context.executing_eagerly():
self.evaluate(sgd_op)
else:
sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
self.evaluate(var1))
if not context.executing_eagerly():
self.evaluate(sgd_op)
else:
sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Validate updated params
self.assertAllCloseAccordingToType(
[1.0 - 3.0 * 0.1 - 2.0 * 0.1, 2.0 - 3.0 * 0.1 - 2.0 * 0.1],
self.evaluate(var0))
self.assertAllCloseAccordingToType(
[3.0 - 3.0 * 0.01 - 2.0 * 0.01, 4.0 - 3.0 * 0.01 - 2.0 * 0.01],
self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testBasicCallableParams(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
lr = lambda: 3.0
sgd = gradient_descent.SGD(lr)
sgd_op = sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testMinimizeResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
loss = lambda: math_ops.matmul(var0, x) + var1 # pylint: disable=cell-var-from-loop
sgd = gradient_descent.SGD(1.0)
sgd_op = sgd.minimize(loss, [var0, var1])
self.evaluate(variables.global_variables_initializer())
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([[1.0 - 4.0, 2.0 - 5.0]],
self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - 1.0], self.evaluate(var1))
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
def loss():
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop
pred += var1 # pylint: disable=cell-var-from-loop
return pred * pred
sgd_op = gradient_descent.SGD(1.0).minimize(loss, [var0, var1])
self.evaluate(variables.global_variables_initializer())
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
np_grad = 2 * np_pred
self.assertAllCloseAccordingToType(
[[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - np_grad], self.evaluate(var1))
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
lrate = constant_op.constant(3.0)
sgd_op = gradient_descent.SGD(lrate).apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
self.evaluate(var1))
@test_util.run_deprecated_v1
def testGradWrtRef(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
opt = gradient_descent.SGD(3.0)
values = [1.0, 3.0]
vars_ = [variables.Variable([v], dtype=dtype) for v in values]
loss = lambda: vars_[0] + vars_[1] # pylint: disable=cell-var-from-loop
grads_and_vars = opt._compute_gradients(loss, vars_)
self.evaluate(variables.global_variables_initializer())
for grad, _ in grads_and_vars:
self.assertAllCloseAccordingToType([1.0], self.evaluate(grad))
def testSparseBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant([0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]), constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant([0.01], shape=[1, 1], dtype=dtype),
constant_op.constant([1]), constant_op.constant([2, 1]))
sgd_op = gradient_descent.SGD(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([[1.0 - 3.0 * 0.1], [2.0]],
self.evaluate(var0))
self.assertAllCloseAccordingToType([[3.0], [4.0 - 3.0 * 0.01]],
self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseBasicWithLearningRateDecay(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant([0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]), constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant([0.01], shape=[1, 1], dtype=dtype),
constant_op.constant([1]), constant_op.constant([2, 1]))
sgd_op = gradient_descent.SGD(
3.0, decay=0.5).apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 2 steps of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([[1.0 - 3.0 * 0.1], [2.0]],
self.evaluate(var0))
self.assertAllCloseAccordingToType([[3.0], [4.0 - 3.0 * 0.01]],
self.evaluate(var1))
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType(
[[1.0 - 3.0 * 0.1 - 2.0 * 0.1], [2.0]], self.evaluate(var0))
self.assertAllCloseAccordingToType(
[[3.0], [4.0 - 3.0 * 0.01 - 2.0 * 0.01]], self.evaluate(var1))
def testCapturingInDefunWhileExecutingEagerly(self):
with context.eager_mode():
optimizer = gradient_descent.SGD(1.0)
def step():
self.v = resource_variable_ops.ResourceVariable(1.0)
with backprop.GradientTape() as tape:
loss = self.v**2
grad = tape.gradient(loss, self.v)
optimizer.apply_gradients([(grad, self.v)])
return self.v.read_value()
compiled_step = function.defun(step)
self.assertEqual(float(step()), -1.0)
self.assertEqual(float(compiled_step()), -1.0)
# This shouldn't fail; in particular, the learning rate tensor should
# be an EagerTensor once again, not a graph Tensor.
self.assertEqual(float(step()), -1.0)
def testConstructSGDWithLR(self):
opt = gradient_descent.SGD(lr=1.0)
self.assertEqual(opt.lr, 1.0)
opt_2 = gradient_descent.SGD(learning_rate=0.1, lr=1.0)
self.assertEqual(opt_2.lr, 1.0)
opt_3 = gradient_descent.SGD(learning_rate=0.1)
self.assertEqual(opt_3.lr, 0.1)
class MomentumOptimizerTest(test.TestCase):
def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum):
accum = accum * momentum - g * lr
var += (accum * momentum - g * lr)
return var, accum
@test_util.run_in_graph_and_eager_modes
def testBasic(self):
for _, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0],
dtype=dtype,
name="var0")
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0],
dtype=dtype,
name="var1")
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
learning_rate = 2.0
momentum = 0.9
mom_opt = gradient_descent.SGD(
learning_rate=learning_rate, momentum=momentum)
# self.assertFalse(mom_opt._initial_decay)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
# Check we have slots
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEqual(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEqual(slot1.get_shape(), var1.get_shape())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
self.evaluate(variables.global_variables_initializer())
self.evaluate(mom_update)
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([-0.2, -0.2]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([-0.02, -0.02]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the momentum accumulators contain the previous update.
self.evaluate(mom_update)
if context.executing_eagerly():
mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * (-0.2) - 2.0 * 0.1), (0.9 * (-0.2) - 2.0 * 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * (-0.02) - 2.0 * 0.01),
(0.9 * (-0.02) - 2.0 * 0.01)]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
@test_util.run_deprecated_v1
def testNesterovMomentum(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0],
dtype=dtype,
name="var0")
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0],
dtype=dtype,
name="var1")
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
loss = lambda: 5 * var0 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
mom_op = gradient_descent.SGD(
learning_rate=2.0, momentum=0.9, nesterov=True)
opt_op = mom_op.minimize(loss, [var0, var1])
variables.global_variables_initializer().run()
for _ in range(1, 5):
opt_op.run()
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(
var1_np, accum1_np, 3, 2.0, 0.9)
self.assertAllClose(var0_np, self.evaluate(var0))
self.assertAllClose(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseNesterovMomentum(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
grads = []
for t in range(1, 5):
grads.append(var0_np * 10)
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(
var1_np, accum1_np, 3, 2.0, 0.9)
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(
var0_np, dtype=dtype, name="var0")
var1 = resource_variable_ops.ResourceVariable(
var1_np, dtype=dtype, name="var1")
mom_op = gradient_descent.SGD(
learning_rate=2.0, momentum=0.9, nesterov=True)
x_feed = array_ops.placeholder(dtype)
y_feed = ops.IndexedSlices(x_feed, constant_op.constant([0, 1]),
constant_op.constant([2]))
grads_and_vars = [(y_feed, var0),
(constant_op.constant([3.0, 3.0], dtype=dtype), var1)]
opt_update = mom_op.apply_gradients(grads_and_vars)
variables.global_variables_initializer().run()
for t in range(1, 5):
opt_update.run(feed_dict={x_feed: grads[t - 1]})
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(
var1_np, accum1_np, 3, 2.0, 0.9)
self.assertAllClose(var0_np, self.evaluate(var0))
self.assertAllClose(var1_np, self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# This test invokes the ResourceSparseApplyMomentum operation, which
# did not have a registered GPU kernel as of April 2018. With graph
# execution, the placement algorithm notices this and automatically
# places the variable in CPU (host) memory. With eager execution,
# the variable would be placed in GPU memory if available, which
# would then conflict with the future invocation of the
# ResourceSparseApplyMomentum operation.
# To work around this discrepancy, for now we force the variable
# to be placed on CPU.
with ops.device("/cpu:0"):
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
# pylint: disable=cell-var-from-loop
def loss():
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
return pred * pred
# pylint: enable=cell-var-from-loop
opt = gradient_descent.SGD(learning_rate=1.0, momentum=0.0)
sgd_op = opt.minimize(loss, [var0])
self.evaluate(variables.global_variables_initializer())
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0))
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testMinimizeWith2DIndicesForEmbeddingLookup(self):
# This test invokes the ResourceSparseApplyMomentum operation, which
# did not have a registered GPU kernel as of April 2018. With graph
# execution, the placement algorithm notices this and automatically
# places the variable in CPU (host) memory. With eager execution,
# the variable would be placed in GPU memory if available, which
# would then conflict with the future invocation of the
# ResourceSparseApplyMomentum operation.
# To work around this discrepancy, for now we force the variable
# to be placed on CPU.
with ops.device("/cpu:0"):
var0 = resource_variable_ops.ResourceVariable(array_ops.ones([2, 2]))
def loss():
return math_ops.reduce_sum(embedding_ops.embedding_lookup(var0, [[1]]))
opt = gradient_descent.SGD(learning_rate=1.0, momentum=0.0)
sgd_op = opt.minimize(loss, [var0])
self.evaluate(variables.global_variables_initializer())
self.evaluate(sgd_op)
self.assertAllCloseAccordingToType([[1, 1], [0, 0]], self.evaluate(var0))
@test_util.run_deprecated_v1
def testTensorLearningRateAndMomentum(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = gradient_descent.SGD(
learning_rate=constant_op.constant(2.0),
momentum=constant_op.constant(0.9))
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Check we have slots
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEqual(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEqual(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([-0.2, -0.2]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([-0.02, -0.02]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * (-0.2) - 2.0 * 0.1), (0.9 * (-0.2) - 2.0 * 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * (-0.02) - 2.0 * 0.01),
(0.9 * (-0.02) - 2.0 * 0.01)]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparse(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable(array_ops.zeros([4, 2], dtype=dtype))
var1 = variables.Variable(constant_op.constant(1.0, dtype, [4, 2]))
grads0 = ops.IndexedSlices(
constant_op.constant([[.1, .1]], dtype=dtype),
constant_op.constant([1]), constant_op.constant([4, 2]))
grads1 = ops.IndexedSlices(
constant_op.constant([[.01, .01], [.01, .01]], dtype=dtype),
constant_op.constant([2, 3]), constant_op.constant([4, 2]))
mom_opt = gradient_descent.SGD(learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Check we have slots
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEqual(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEqual(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([0, 0], self.evaluate(var0)[0])
self.assertAllClose([0, 0], self.evaluate(var0)[1])
self.assertAllClose([1, 1], self.evaluate(var1)[2])
# Step 1: the momentum accumulators are 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([0, 0]),
self.evaluate(slot0)[0])
self.assertAllCloseAccordingToType(
np.array([-2.0 * .1, -2.0 * .1]),
self.evaluate(slot0)[1])
self.assertAllCloseAccordingToType(
np.array([-2.0 * .01, -2.0 * .01]),
self.evaluate(slot1)[2])
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([0, 0]),
self.evaluate(var0)[0])
self.assertAllCloseAccordingToType(
np.array([-(0.1 * 2.0), -(0.1 * 2.0)]),
self.evaluate(var0)[1])
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.01 * 2.0), 1.0 - (0.01 * 2.0)]),
self.evaluate(var1)[2])
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([0, 0]), self.evaluate(slot0)[0])
self.assertAllCloseAccordingToType(
np.array([(0.9 * (-0.2) - 2.0 * 0.1), (0.9 * (-0.2) - 2.0 * 0.1)]),
self.evaluate(slot0)[1])
self.assertAllCloseAccordingToType(
np.array([(0.9 * (-0.02) - 2.0 * 0.01),
(0.9 * (-0.02) - 2.0 * 0.01)]),
self.evaluate(slot1)[2])
# Check that the parameters have been updated.
self.assertAllClose(np.array([0, 0]), self.evaluate(var0)[0])
self.assertAllCloseAccordingToType(
np.array([
-(0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
-(0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]),
self.evaluate(var0)[1])
self.assertAllCloseAccordingToType(
np.array([
0.98 - ((0.9 * 0.01 + 0.01) * 2.0),
0.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]),
self.evaluate(var1)[2])
@test_util.run_deprecated_v1
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = gradient_descent.SGD(learning_rate=2.0, momentum=0.9)
mom_update1 = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
mom_update2 = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEqual(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEqual(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update1.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([-0.2, -0.2]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([-0.02, -0.02]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the second momentum accumulators contain the previous update.
mom_update2.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * (-0.2) - 2.0 * 0.1), (0.9 * (-0.2) - 2.0 * 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * (-0.02) - 2.0 * 0.01),
(0.9 * (-0.02) - 2.0 * 0.01)]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testConfig(self):
with self.cached_session():
opt = gradient_descent.SGD(learning_rate=1.0, momentum=0.9, nesterov=True)
config = opt.get_config()
opt2 = gradient_descent.SGD.from_config(config)
# assert both are equal float values.
self.assertEqual(
opt._get_hyper("learning_rate"), opt2._get_hyper("learning_rate"))
self.assertEqual(opt._get_hyper("momentum"), opt2._get_hyper("momentum"))
# self.assertEqual(opt._get_hyper("decay"), opt2._get_hyper("decay"))
var0 = variables.Variable([[1.0], [2.0]], dtype=dtypes.float32)
loss = lambda: 3 * var0
# learning rate variable created when calling minimize.
opt.minimize(loss, [var0])
self.evaluate(variables.global_variables_initializer())
config = opt.get_config()
opt3 = gradient_descent.SGD.from_config(config)
self.assertEqual(
self.evaluate(opt._get_hyper("learning_rate")),
opt3._get_hyper("learning_rate"))
self.assertEqual(
self.evaluate(opt._get_hyper("momentum")),
opt3._get_hyper("momentum"))
# self.assertEqual(
# self.evaluate(opt._get_hyper("decay")), opt3._get_hyper("decay"))
self.assertTrue(opt3.nesterov)
def testNesterovWithoutMomentum(self):
with self.assertRaisesRegexp(ValueError, "must be between"):
gradient_descent.SGD(learning_rate=1.0, momentum=2.0)
def testConstructMomentumWithLR(self):
opt = gradient_descent.SGD(lr=1.0, momentum=0.9)
self.assertEqual(opt.lr, 1.0)
opt_2 = gradient_descent.SGD(learning_rate=0.1, momentum=0.9, lr=1.0)
self.assertEqual(opt_2.lr, 1.0)
opt_3 = gradient_descent.SGD(learning_rate=0.1, momentum=0.9)
self.assertEqual(opt_3.lr, 0.1)
if __name__ == "__main__":
test.main()
| |
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import re
import fnmatch
import sys
import subprocess
import datetime
import os
################################################################################
# file filtering
################################################################################
EXCLUDE = [
# auto generated:
'src/qt/bitcoinstrings.cpp',
'src/chainparamsseeds.h',
# other external copyrights:
'src/tinyformat.h',
'test/functional/test_framework/bignum.py',
# python init:
'*__init__.py',
]
EXCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in EXCLUDE]))
EXCLUDE_DIRS = [
# git subtrees
"src/crypto/ctaes/",
"src/leveldb/",
"src/secp256k1/",
"src/univalue/",
]
INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.py']
INCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in INCLUDE]))
def applies_to_file(filename):
for excluded_dir in EXCLUDE_DIRS:
if filename.startswith(excluded_dir):
return False
return ((EXCLUDE_COMPILED.match(filename) is None) and
(INCLUDE_COMPILED.match(filename) is not None))
################################################################################
# obtain list of files in repo according to INCLUDE and EXCLUDE
################################################################################
GIT_LS_CMD = 'git ls-files --full-name'.split(' ')
GIT_TOPLEVEL_CMD = 'git rev-parse --show-toplevel'.split(' ')
def call_git_ls(base_directory):
out = subprocess.check_output([*GIT_LS_CMD, base_directory])
return [f for f in out.decode("utf-8").split('\n') if f != '']
def call_git_toplevel():
"Returns the absolute path to the project root"
return subprocess.check_output(GIT_TOPLEVEL_CMD).strip().decode("utf-8")
def get_filenames_to_examine(base_directory):
"Returns an array of absolute paths to any project files in the base_directory that pass the include/exclude filters"
root = call_git_toplevel()
filenames = call_git_ls(base_directory)
return sorted([os.path.join(root, filename) for filename in filenames if
applies_to_file(filename)])
################################################################################
# define and compile regexes for the patterns we are looking for
################################################################################
COPYRIGHT_WITH_C = 'Copyright \(c\)'
COPYRIGHT_WITHOUT_C = 'Copyright'
ANY_COPYRIGHT_STYLE = '(%s|%s)' % (COPYRIGHT_WITH_C, COPYRIGHT_WITHOUT_C)
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
YEAR_LIST = '(%s)(, %s)+' % (YEAR, YEAR)
ANY_YEAR_STYLE = '(%s|%s)' % (YEAR_RANGE, YEAR_LIST)
ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE = ("%s %s" % (ANY_COPYRIGHT_STYLE,
ANY_YEAR_STYLE))
ANY_COPYRIGHT_COMPILED = re.compile(ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE)
def compile_copyright_regex(copyright_style, year_style, name):
return re.compile('%s %s,? %s' % (copyright_style, year_style, name))
EXPECTED_HOLDER_NAMES = [
"Satoshi Nakamoto\n",
"The Bitcoin Core developers\n",
"Bitcoin Core Developers\n",
"BitPay Inc\.\n",
"University of Illinois at Urbana-Champaign\.\n",
"Pieter Wuille\n",
"Wladimir J. van der Laan\n",
"Jeff Garzik\n",
"Jan-Klaas Kollhof\n",
"Sam Rushing\n",
"ArtForz -- public domain half-a-node\n",
"Intel Corporation",
"The Zcash developers",
"Jeremy Rubin",
]
DOMINANT_STYLE_COMPILED = {}
YEAR_LIST_STYLE_COMPILED = {}
WITHOUT_C_STYLE_COMPILED = {}
for holder_name in EXPECTED_HOLDER_NAMES:
DOMINANT_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_RANGE, holder_name))
YEAR_LIST_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_LIST, holder_name))
WITHOUT_C_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITHOUT_C, ANY_YEAR_STYLE,
holder_name))
################################################################################
# search file contents for copyright message of particular category
################################################################################
def get_count_of_copyrights_of_any_style_any_holder(contents):
return len(ANY_COPYRIGHT_COMPILED.findall(contents))
def file_has_dominant_style_copyright_for_holder(contents, holder_name):
match = DOMINANT_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_year_list_style_copyright_for_holder(contents, holder_name):
match = YEAR_LIST_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_without_c_style_copyright_for_holder(contents, holder_name):
match = WITHOUT_C_STYLE_COMPILED[holder_name].search(contents)
return match is not None
################################################################################
# get file info
################################################################################
def read_file(filename):
return open(filename, 'r', encoding="utf8").read()
def gather_file_info(filename):
info = {}
info['filename'] = filename
c = read_file(filename)
info['contents'] = c
info['all_copyrights'] = get_count_of_copyrights_of_any_style_any_holder(c)
info['classified_copyrights'] = 0
info['dominant_style'] = {}
info['year_list_style'] = {}
info['without_c_style'] = {}
for holder_name in EXPECTED_HOLDER_NAMES:
has_dominant_style = (
file_has_dominant_style_copyright_for_holder(c, holder_name))
has_year_list_style = (
file_has_year_list_style_copyright_for_holder(c, holder_name))
has_without_c_style = (
file_has_without_c_style_copyright_for_holder(c, holder_name))
info['dominant_style'][holder_name] = has_dominant_style
info['year_list_style'][holder_name] = has_year_list_style
info['without_c_style'][holder_name] = has_without_c_style
if has_dominant_style or has_year_list_style or has_without_c_style:
info['classified_copyrights'] = info['classified_copyrights'] + 1
return info
################################################################################
# report execution
################################################################################
SEPARATOR = '-'.join(['' for _ in range(80)])
def print_filenames(filenames, verbose):
if not verbose:
return
for filename in filenames:
print("\t%s" % filename)
def print_report(file_infos, verbose):
print(SEPARATOR)
examined = [i['filename'] for i in file_infos]
print("%d files examined according to INCLUDE and EXCLUDE fnmatch rules" %
len(examined))
print_filenames(examined, verbose)
print(SEPARATOR)
print('')
zero_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 0]
print("%4d with zero copyrights" % len(zero_copyrights))
print_filenames(zero_copyrights, verbose)
one_copyright = [i['filename'] for i in file_infos if
i['all_copyrights'] == 1]
print("%4d with one copyright" % len(one_copyright))
print_filenames(one_copyright, verbose)
two_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 2]
print("%4d with two copyrights" % len(two_copyrights))
print_filenames(two_copyrights, verbose)
three_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 3]
print("%4d with three copyrights" % len(three_copyrights))
print_filenames(three_copyrights, verbose)
four_or_more_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] >= 4]
print("%4d with four or more copyrights" % len(four_or_more_copyrights))
print_filenames(four_or_more_copyrights, verbose)
print('')
print(SEPARATOR)
print('Copyrights with dominant style:\ne.g. "Copyright (c)" and '
'"<year>" or "<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
dominant_style = [i['filename'] for i in file_infos if
i['dominant_style'][holder_name]]
if len(dominant_style) > 0:
print("%4d with '%s'" % (len(dominant_style),
holder_name.replace('\n', '\\n')))
print_filenames(dominant_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with year list style:\ne.g. "Copyright (c)" and '
'"<year1>, <year2>, ...":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
year_list_style = [i['filename'] for i in file_infos if
i['year_list_style'][holder_name]]
if len(year_list_style) > 0:
print("%4d with '%s'" % (len(year_list_style),
holder_name.replace('\n', '\\n')))
print_filenames(year_list_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with no "(c)" style:\ne.g. "Copyright" and "<year>" or '
'"<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
without_c_style = [i['filename'] for i in file_infos if
i['without_c_style'][holder_name]]
if len(without_c_style) > 0:
print("%4d with '%s'" % (len(without_c_style),
holder_name.replace('\n', '\\n')))
print_filenames(without_c_style, verbose)
print('')
print(SEPARATOR)
unclassified_copyrights = [i['filename'] for i in file_infos if
i['classified_copyrights'] < i['all_copyrights']]
print("%d with unexpected copyright holder names" %
len(unclassified_copyrights))
print_filenames(unclassified_copyrights, verbose)
print(SEPARATOR)
def exec_report(base_directory, verbose):
filenames = get_filenames_to_examine(base_directory)
file_infos = [gather_file_info(f) for f in filenames]
print_report(file_infos, verbose)
################################################################################
# report cmd
################################################################################
REPORT_USAGE = """
Produces a report of all copyright header notices found inside the source files
of a repository.
Usage:
$ ./copyright_header.py report <base_directory> [verbose]
Arguments:
<base_directory> - The base directory of a bitcoin source code repository.
[verbose] - Includes a list of every file of each subcategory in the report.
"""
def report_cmd(argv):
if len(argv) == 2:
sys.exit(REPORT_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad <base_directory>: %s" % base_directory)
if len(argv) == 3:
verbose = False
elif argv[3] == 'verbose':
verbose = True
else:
sys.exit("*** unknown argument: %s" % argv[2])
exec_report(base_directory, verbose)
################################################################################
# query git for year of last change
################################################################################
GIT_LOG_CMD = "git log --pretty=format:%%ai %s"
def call_git_log(filename):
out = subprocess.check_output((GIT_LOG_CMD % filename).split(' '))
return out.decode("utf-8").split('\n')
def get_git_change_years(filename):
git_log_lines = call_git_log(filename)
if len(git_log_lines) == 0:
return [datetime.date.today().year]
# timestamp is in ISO 8601 format. e.g. "2016-09-05 14:25:32 -0600"
return [line.split(' ')[0].split('-')[0] for line in git_log_lines]
def get_most_recent_git_change_year(filename):
return max(get_git_change_years(filename))
################################################################################
# read and write to file
################################################################################
def read_file_lines(filename):
f = open(filename, 'r', encoding="utf8")
file_lines = f.readlines()
f.close()
return file_lines
def write_file_lines(filename, file_lines):
f = open(filename, 'w', encoding="utf8")
f.write(''.join(file_lines))
f.close()
################################################################################
# update header years execution
################################################################################
COPYRIGHT = 'Copyright \(c\)'
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
HOLDER = 'The Bitcoin Core developers'
UPDATEABLE_LINE_COMPILED = re.compile(' '.join([COPYRIGHT, YEAR_RANGE, HOLDER]))
def get_updatable_copyright_line(file_lines):
index = 0
for line in file_lines:
if UPDATEABLE_LINE_COMPILED.search(line) is not None:
return index, line
index = index + 1
return None, None
def parse_year_range(year_range):
year_split = year_range.split('-')
start_year = year_split[0]
if len(year_split) == 1:
return start_year, start_year
return start_year, year_split[1]
def year_range_to_str(start_year, end_year):
if start_year == end_year:
return start_year
return "%s-%s" % (start_year, end_year)
def create_updated_copyright_line(line, last_git_change_year):
copyright_splitter = 'Copyright (c) '
copyright_split = line.split(copyright_splitter)
# Preserve characters on line that are ahead of the start of the copyright
# notice - they are part of the comment block and vary from file-to-file.
before_copyright = copyright_split[0]
after_copyright = copyright_split[1]
space_split = after_copyright.split(' ')
year_range = space_split[0]
start_year, end_year = parse_year_range(year_range)
if end_year == last_git_change_year:
return line
return (before_copyright + copyright_splitter +
year_range_to_str(start_year, last_git_change_year) + ' ' +
' '.join(space_split[1:]))
def update_updatable_copyright(filename):
file_lines = read_file_lines(filename)
index, line = get_updatable_copyright_line(file_lines)
if not line:
print_file_action_message(filename, "No updatable copyright.")
return
last_git_change_year = get_most_recent_git_change_year(filename)
new_line = create_updated_copyright_line(line, last_git_change_year)
if line == new_line:
print_file_action_message(filename, "Copyright up-to-date.")
return
file_lines[index] = new_line
write_file_lines(filename, file_lines)
print_file_action_message(filename,
"Copyright updated! -> %s" % last_git_change_year)
def exec_update_header_year(base_directory):
for filename in get_filenames_to_examine(base_directory):
update_updatable_copyright(filename)
################################################################################
# update cmd
################################################################################
UPDATE_USAGE = """
Updates all the copyright headers of "The Bitcoin Core developers" which were
changed in a year more recent than is listed. For example:
// Copyright (c) <firstYear>-<lastYear> The Bitcoin Core developers
will be updated to:
// Copyright (c) <firstYear>-<lastModifiedYear> The Bitcoin Core developers
where <lastModifiedYear> is obtained from the 'git log' history.
This subcommand also handles copyright headers that have only a single year. In those cases:
// Copyright (c) <year> The Bitcoin Core developers
will be updated to:
// Copyright (c) <year>-<lastModifiedYear> The Bitcoin Core developers
where the update is appropriate.
Usage:
$ ./copyright_header.py update <base_directory>
Arguments:
<base_directory> - The base directory of a bitcoin source code repository.
"""
def print_file_action_message(filename, action):
print("%-52s %s" % (filename, action))
def update_cmd(argv):
if len(argv) != 3:
sys.exit(UPDATE_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad base_directory: %s" % base_directory)
exec_update_header_year(base_directory)
################################################################################
# inserted copyright header format
################################################################################
def get_header_lines(header, start_year, end_year):
lines = header.split('\n')[1:-1]
lines[0] = lines[0] % year_range_to_str(start_year, end_year)
return [line + '\n' for line in lines]
CPP_HEADER = '''
// Copyright (c) %s The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_cpp_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(CPP_HEADER, start_year, end_year))
PYTHON_HEADER = '''
# Copyright (c) %s The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_python_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(PYTHON_HEADER, start_year, end_year))
################################################################################
# query git for year of last change
################################################################################
def get_git_change_year_range(filename):
years = get_git_change_years(filename)
return min(years), max(years)
################################################################################
# check for existing core copyright
################################################################################
def file_already_has_core_copyright(file_lines):
index, _ = get_updatable_copyright_line(file_lines)
return index is not None
################################################################################
# insert header execution
################################################################################
def file_has_hashbang(file_lines):
if len(file_lines) < 1:
return False
if len(file_lines[0]) <= 2:
return False
return file_lines[0][:2] == '#!'
def insert_python_header(filename, file_lines, start_year, end_year):
if file_has_hashbang(file_lines):
insert_idx = 1
else:
insert_idx = 0
header_lines = get_python_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(insert_idx, line)
write_file_lines(filename, file_lines)
def insert_cpp_header(filename, file_lines, start_year, end_year):
header_lines = get_cpp_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(0, line)
write_file_lines(filename, file_lines)
def exec_insert_header(filename, style):
file_lines = read_file_lines(filename)
if file_already_has_core_copyright(file_lines):
sys.exit('*** %s already has a copyright by The Bitcoin Core developers'
% (filename))
start_year, end_year = get_git_change_year_range(filename)
if style == 'python':
insert_python_header(filename, file_lines, start_year, end_year)
else:
insert_cpp_header(filename, file_lines, start_year, end_year)
################################################################################
# insert cmd
################################################################################
INSERT_USAGE = """
Inserts a copyright header for "The Bitcoin Core developers" at the top of the
file in either Python or C++ style as determined by the file extension. If the
file is a Python file and it has a '#!' starting the first line, the header is
inserted in the line below it.
The copyright dates will be set to be:
"<year_introduced>-<current_year>"
where <year_introduced> is according to the 'git log' history. If
<year_introduced> is equal to <current_year>, the date will be set to be:
"<current_year>"
If the file already has a copyright for "The Bitcoin Core developers", the
script will exit.
Usage:
$ ./copyright_header.py insert <file>
Arguments:
<file> - A source file in the bitcoin repository.
"""
def insert_cmd(argv):
if len(argv) != 3:
sys.exit(INSERT_USAGE)
filename = argv[2]
if not os.path.isfile(filename):
sys.exit("*** bad filename: %s" % filename)
_, extension = os.path.splitext(filename)
if extension not in ['.h', '.cpp', '.cc', '.c', '.py']:
sys.exit("*** cannot insert for file extension %s" % extension)
if extension == '.py':
style = 'python'
else:
style = 'cpp'
exec_insert_header(filename, style)
################################################################################
# UI
################################################################################
USAGE = """
copyright_header.py - utilities for managing copyright headers of 'The Bitcoin
Core developers' in repository source files.
Usage:
$ ./copyright_header <subcommand>
Subcommands:
report
update
insert
To see subcommand usage, run them without arguments.
"""
SUBCOMMANDS = ['report', 'update', 'insert']
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.exit(USAGE)
subcommand = sys.argv[1]
if subcommand not in SUBCOMMANDS:
sys.exit(USAGE)
if subcommand == 'report':
report_cmd(sys.argv)
elif subcommand == 'update':
update_cmd(sys.argv)
elif subcommand == 'insert':
insert_cmd(sys.argv)
| |
"""
channels.models
~~~~~~~~~~~~~~~
:copyright: (c) 2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from datetime import datetime, timedelta
from disqusapi import Paginator
from flask import session
from channels.app import app, disqusapi, schedule
from channels.oauth import api_call
from channels.utils import avatar_hash, datestr_to_datetime, from_cache, secure_avatar
from channels.views import threads, posts, users
class User:
@classmethod
def format(cls, user):
return {
'id': user['id'],
'avatar': secure_avatar(user['avatar']['cache']),
'name': user['username'],
'hash': avatar_hash(user)
}
@classmethod
def save(cls, user):
result = cls.format(user)
users.add(result, datetime.utcnow().strftime('%s.%m'))
return result
@classmethod
def list(cls, offset=0, limit=100):
return users.list(offset=offset, limit=limit)
@classmethod
def list_by_thread(cls, thread_id, offset=0, limit=100):
return users.list(thread_id=thread_id, offset=offset, limit=limit)
@classmethod
def get_by_id(cls, user_id):
return users.get(user_id)
class Category:
@classmethod
def list(cls):
return list(disqusapi.categories.list(forum=app.config['DISQUS_FORUM'], method='GET', limit=100))
@classmethod
def get(cls, name):
return cls.cache[name]
Category.cache = dict((c['title'], c) for c in from_cache(Category.list))
class Thread:
@classmethod
def format(cls, thread):
return {
'id': thread['id'],
'title': thread['title'],
'createdAtISO': thread['createdAt'].isoformat(),
'category': thread['category'],
'link': thread['link'],
'posts': thread['posts'],
}
@classmethod
def save(cls, thread):
dt = datestr_to_datetime(thread['createdAt'])
thread['createdAt'] = dt
result = cls.format(thread)
score = dt.strftime('%s.%m')
threads.add(result, score)
threads.add_to_set(result['id'], thread['posts'], _key='posts')
return result
@classmethod
def get(cls, thread_id):
result = threads.get(thread_id)
if result is None:
thread = disqusapi.threads.details(thread=thread_id, forum=app.config['DISQUS_FORUM'])
result = cls.save(thread)
return result
@classmethod
def list_by_author(cls, author_id, offset=0, limit=100):
assert author_id == session['auth']['user_id']
result = threads.list(author_id=author_id, offset=offset, limit=limit)
if result is None:
result = []
for thread in api_call(disqusapi.users.listActiveThreads, forum=app.config['DISQUS_FORUM'], method='GET'):
result.append(Thread.save(thread))
score = thread['createdAt'].strftime('%s.%m')
threads.add_to_set(thread['id'], score, author_id=author_id)
result.reverse()
return result
@classmethod
def list(cls, offset=0, limit=100):
result = threads.list(offset=offset, limit=limit)
if result is None:
result = []
for thread in Paginator(disqusapi.threads.list, forum=app.config['DISQUS_FORUM'], category=Category.get('General')['id'], method='GET'):
result.append(Thread.save(thread))
result.reverse()
return result
@classmethod
def search(cls, query, limit=100):
return threads.search(query, 'title', limit=limit)
class Session:
@classmethod
def list(cls, offset=0, limit=100):
thread_ids = []
for talk in sorted(schedule.itervalues(), key=lambda x: (x['start'], x['room'])):
thread_ids.append(talk['disqus:thread']['id'])
if not thread_ids:
return []
thread_ids = thread_ids[:limit]
result = threads.get_many(thread_ids)
missing_thread_ids = [t for t, v in result.iteritems() if not v]
if missing_thread_ids:
thread_list = Paginator(disqusapi.threads.list, thread=missing_thread_ids, forum=app.config['DISQUS_FORUM'])
for thread in thread_list:
result[thread['id']] = Thread.save(thread)
for thread in result.itervalues():
if not thread:
continue
thread['session'] = schedule.get(thread.get('link'))
return [result[t] for t in thread_ids if result.get(t)]
@classmethod
def list_active(cls, offset=0, limit=100):
start = datetime.utcnow() - timedelta(minutes=10)
end = start + timedelta(minutes=20)
thread_ids = []
for talk in sorted(schedule.itervalues(), key=lambda x: (x['start'], x['room'])):
if talk['start'] > start and talk['start'] < end:
thread_ids.append(talk['disqus:thread']['id'])
if not thread_ids:
return []
thread_ids = thread_ids[:limit]
result = threads.get_many(thread_ids)
missing_thread_ids = [t for t, v in result.iteritems() if not v]
if missing_thread_ids:
thread_list = disqusapi.threads.list(thread=missing_thread_ids, forum=app.config['DISQUS_FORUM'])
for thread in thread_list:
result[thread['id']] = Thread.save(thread)
for thread in result.itervalues():
if not thread:
continue
thread['session'] = schedule.get(thread.get('link'))
return [result[t] for t in thread_ids if result.get(t)]
@classmethod
def list_upcoming(cls, offset=0, limit=100):
start = datetime.utcnow() - timedelta(minutes=10)
# end = start + timedelta(minutes=30)
num = 0
thread_ids = []
for talk in sorted(schedule.itervalues(), key=lambda x: (x['start'], x['room'])):
if talk['end'] > start: # and talk['start'] < end:
thread_ids.append(talk['disqus:thread']['id'])
if num > limit:
return
if not thread_ids:
return []
thread_ids = thread_ids[:limit]
result = threads.get_many(thread_ids)
missing_thread_ids = [t for t, v in result.iteritems() if not v]
if missing_thread_ids:
thread_list = disqusapi.threads.list(thread=missing_thread_ids, forum=app.config['DISQUS_FORUM'])
for thread in thread_list:
result[thread['id']] = Thread.save(thread)
for thread in result.itervalues():
if not thread:
continue
thread['session'] = schedule.get(thread.get('link'))
return [result[t] for t in thread_ids if result.get(t)]
class Post:
@classmethod
def format(cls, post):
avatar = secure_avatar(post['author']['avatar']['cache'])
return {
'id': post['id'],
'avatar': avatar,
'name': post['author']['username'],
'createdAtISO': post['createdAt'].isoformat(),
'message': post['message'],
'hash': avatar_hash(post['author'])
}
@classmethod
def save(cls, post, incr_posts=True):
dt = datestr_to_datetime(post['createdAt'])
post['createdAt'] = dt
result = cls.format(post)
score = dt.strftime('%s.%m')
posts.add(result, score, thread_id=post['thread'])
if incr_posts:
threads.incr_counter(post['thread'], 'posts', 1)
threads.incr_in_set(post['thread'], 1, _key='posts')
threads.add_to_set(post['thread'], score)
user = User.save(post['author'])
users.add_to_set(user['id'], score, thread_id=post['thread'])
threads.add_to_set(post['thread'], score)
threads.add_to_set(post['thread'], score, author_id=post['author'])
return result
@classmethod
def list_by_thread(cls, thread_id, offset=0, limit=100):
result = posts.list(thread_id=thread_id, offset=offset, limit=limit)
if result is None:
result = []
paginator = Paginator(disqusapi.threads.listPosts, thread=thread_id)
for post in paginator:
if post['author']['isAnonymous']:
continue
result.append(cls.save(post, incr_posts=False))
return result
| |
#!/usr/bin/env python
# encoding: utf-8
from os.path import abspath, expanduser, join as pjoin
import sys
from importlib import import_module
import numpy as np
import cv2
import rospy
from tf import TransformListener, Exception as TFException
from tf.transformations import quaternion_about_axis
from rospkg import RosPack
from cv_bridge import CvBridge
import message_filters
from sensor_msgs.msg import Image as ROSImage, CameraInfo
from geometry_msgs.msg import PoseArray, Pose, Point, Quaternion, QuaternionStamped
from biternion.msg import HeadOrientations
from visualization_msgs.msg import Marker
import DeepFried2 as df
from common import bit2deg, ensemble_biternions, subtractbg, cutout
# Distinguish between STRANDS and SPENCER.
try:
from rwth_perception_people_msgs.msg import UpperBodyDetector
from spencer_tracking_msgs.msg import TrackedPersons2d, TrackedPersons
HAS_TRACKED_PERSONS = True
except ImportError:
from upper_body_detector.msg import UpperBodyDetector
from mdl_people_tracker.msg import TrackedPersons2d
HAS_TRACKED_PERSONS = False
def get_rects(msg, with_depth=False):
if isinstance(msg, TrackedPersons2d):
return [(p2d.x, p2d.y, p2d.w, p2d.h) + ((p2d.depth,) if with_depth else tuple()) for p2d in msg.boxes]
elif isinstance(msg, UpperBodyDetector):
return list(zip(*([msg.pos_x, msg.pos_y, msg.width, msg.height] + ([msg.median_depth] if with_depth else []))))
else:
raise TypeError("Unknown source type: {}".format(type(msg)))
class Predictor(object):
def __init__(self):
rospy.loginfo("Initializing biternion predictor")
self.counter = 0
modelname = rospy.get_param("~model", "head_50_50")
weightsname = abspath(expanduser(rospy.get_param("~weights", ".")))
rospy.loginfo("Predicting using {} & {}".format(modelname, weightsname))
topic = rospy.get_param("~topic", "/biternion")
self.pub = rospy.Publisher(topic, HeadOrientations, queue_size=3)
self.pub_vis = rospy.Publisher(topic + '/image', ROSImage, queue_size=3)
self.pub_pa = rospy.Publisher(topic + "/pose", PoseArray, queue_size=3)
# Ugly workaround for "jumps back in time" that the synchronizer sometime does.
self.last_stamp = rospy.Time()
# Create and load the network.
netlib = import_module(modelname)
self.net = netlib.mknet()
self.net.__setstate__(np.load(weightsname))
self.net.evaluate()
self.aug = netlib.mkaug(None, None)
self.preproc = netlib.preproc
self.getrect = netlib.getrect
# Do a fake forward-pass for precompilation.
im = cutout(np.zeros((480,640,3), np.uint8), 0, 0, 150, 450)
im = next(self.aug.augimg_pred(self.preproc(im), fast=True))
self.net.forward(np.array([im]))
rospy.loginfo("BiternionNet initialized")
src = rospy.get_param("~src", "tra")
subs = []
if src == "tra":
subs.append(message_filters.Subscriber(rospy.get_param("~tra", "/TODO"), TrackedPersons2d))
elif src == "ubd":
subs.append(message_filters.Subscriber(rospy.get_param("~ubd", "/upper_body_detector/detections"), UpperBodyDetector))
else:
raise ValueError("Unknown source type: " + src)
rgb = rospy.get_param("~rgb", "/head_xtion/rgb/image_rect_color")
subs.append(message_filters.Subscriber(rgb, ROSImage))
subs.append(message_filters.Subscriber(rospy.get_param("~d", "/head_xtion/depth/image_rect_meters"), ROSImage))
subs.append(message_filters.Subscriber('/'.join(rgb.split('/')[:-1] + ['camera_info']), CameraInfo))
tra3d = rospy.get_param("~tra3d", "")
if src == "tra" and tra3d and HAS_TRACKED_PERSONS:
self.pub_tracks = rospy.Publisher(topic + "/tracks", TrackedPersons, queue_size=3)
subs.append(message_filters.Subscriber(tra3d, TrackedPersons))
self.listener = TransformListener()
else:
self.pub_tracks = None
ts = message_filters.ApproximateTimeSynchronizer(subs, queue_size=5, slop=0.5)
ts.registerCallback(self.cb)
def cb(self, src, rgb, d, caminfo, *more):
# Ugly workaround because approximate sync sometimes jumps back in time.
if rgb.header.stamp <= self.last_stamp:
rospy.logwarn("Jump back in time detected and dropped like it's hot")
return
self.last_stamp = rgb.header.stamp
detrects = get_rects(src)
# Early-exit to minimize CPU usage if possible.
#if len(detrects) == 0:
# return
# If nobody's listening, why should we be computing?
listeners = sum(p.get_num_connections() for p in (self.pub, self.pub_vis, self.pub_pa))
if self.pub_tracks is not None:
listeners += self.pub_tracks.get_num_connections()
if listeners == 0:
return
header = rgb.header
bridge = CvBridge()
rgb = bridge.imgmsg_to_cv2(rgb)[:,:,::-1] # Need to do BGR-RGB conversion manually.
d = bridge.imgmsg_to_cv2(d)
imgs = []
for detrect in detrects:
detrect = self.getrect(*detrect)
det_rgb = cutout(rgb, *detrect)
det_d = cutout(d, *detrect)
# Preprocess and stick into the minibatch.
im = subtractbg(det_rgb, det_d, 1.0, 0.5)
im = self.preproc(im)
imgs.append(im)
sys.stderr.write("\r{}".format(self.counter)) ; sys.stderr.flush()
self.counter += 1
# TODO: We could further optimize by putting all augmentations in a
# single batch and doing only one forward pass. Should be easy.
if len(detrects):
bits = [self.net.forward(batch) for batch in self.aug.augbatch_pred(np.array(imgs), fast=True)]
preds = bit2deg(ensemble_biternions(bits)) - 90 # Subtract 90 to correct for "my weird" origin.
# print(preds)
else:
preds = []
if 0 < self.pub.get_num_connections():
self.pub.publish(HeadOrientations(
header=header,
angles=list(preds),
confidences=[0.83] * len(imgs)
))
# Visualization
if 0 < self.pub_vis.get_num_connections():
rgb_vis = rgb[:,:,::-1].copy()
for detrect, alpha in zip(detrects, preds):
l, t, w, h = self.getrect(*detrect)
px = int(round(np.cos(np.deg2rad(alpha))*w/2))
py = -int(round(np.sin(np.deg2rad(alpha))*h/2))
cv2.rectangle(rgb_vis, (detrect[0], detrect[1]), (detrect[0]+detrect[2],detrect[1]+detrect[3]), (0,255,255), 1)
cv2.rectangle(rgb_vis, (l,t), (l+w,t+h), (0,255,0), 2)
cv2.line(rgb_vis, (l+w//2, t+h//2), (l+w//2+px,t+h//2+py), (0,255,0), 2)
# cv2.putText(rgb_vis, "{:.1f}".format(alpha), (l, t+25), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,255), 2)
vismsg = bridge.cv2_to_imgmsg(rgb_vis, encoding='rgb8')
vismsg.header = header # TODO: Seems not to work!
self.pub_vis.publish(vismsg)
if 0 < self.pub_pa.get_num_connections():
fx, cx = caminfo.K[0], caminfo.K[2]
fy, cy = caminfo.K[4], caminfo.K[5]
poseArray = PoseArray(header=header)
for (dx, dy, dw, dh, dd), alpha in zip(get_rects(src, with_depth=True), preds):
dx, dy, dw, dh = self.getrect(dx, dy, dw, dh)
# PoseArray message for boundingbox centres
poseArray.poses.append(Pose(
position=Point(
x=dd*((dx+dw/2.0-cx)/fx),
y=dd*((dy+dh/2.0-cy)/fy),
z=dd
),
# TODO: Use global UP vector (0,0,1) and transform into frame used by this message.
orientation=Quaternion(*quaternion_about_axis(np.deg2rad(alpha), [0, -1, 0]))
))
self.pub_pa.publish(poseArray)
if len(more) == 1 and self.pub_tracks is not None and 0 < self.pub_tracks.get_num_connections():
t3d = more[0]
try:
self.listener.waitForTransform(header.frame_id, t3d.header.frame_id, rospy.Time(), rospy.Duration(1))
for track, alpha in zip(t3d.tracks, preds):
track.pose.pose.orientation = self.listener.transformQuaternion(t3d.header.frame_id, QuaternionStamped(
header=header,
# TODO: Same as above!
quaternion=Quaternion(*quaternion_about_axis(np.deg2rad(alpha), [0, -1, 0]))
)).quaternion
self.pub_tracks.publish(t3d)
except TFException:
pass
if __name__ == "__main__":
rospy.init_node("biternion_predict")
# Add the "models" directory to the path!
sys.path.append(pjoin(RosPack().get_path('biternion'), 'scripts'))
sys.path.append(pjoin(RosPack().get_path('biternion'), 'models'))
p = Predictor()
rospy.spin()
rospy.loginfo("Predicted a total of {} UBDs.".format(p.counter))
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import logging
import multiprocessing
import os
import signal
import threading
from contextlib import redirect_stderr, redirect_stdout, suppress
from datetime import timedelta
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import List, Optional, Set, Tuple
from setproctitle import setproctitle
from sqlalchemy import func, or_
from sqlalchemy.orm.session import Session
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.models import DAG, DagModel, SlaMiss, errors
from airflow.models.dagbag import DagBag
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.callback_requests import (
CallbackRequest,
DagCallbackRequest,
SlaCallbackRequest,
TaskCallbackRequest,
)
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.session import provide_session
from airflow.utils.state import State
TI = models.TaskInstance
class DagFileProcessorProcess(LoggingMixin, MultiprocessingStartMethodMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(
self,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
callback_requests: List[CallbackRequest],
):
super().__init__()
self._file_path = file_path
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._callback_requests = callback_requests
# The process that was launched to process the given .
self._process: Optional[multiprocessing.process.BaseProcess] = None
# The result of DagFileProcessor.process_file(file_path).
self._result: Optional[Tuple[int, int]] = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time: Optional[datetime.datetime] = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel: Optional[MultiprocessingConnection] = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self) -> str:
return self._file_path
@staticmethod
def _run_file_processor(
result_channel: MultiprocessingConnection,
parent_channel: MultiprocessingConnection,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
thread_name: str,
callback_requests: List[CallbackRequest],
) -> None:
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param parent_channel: the parent end of the channel to close in the child
:type parent_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:type dag_ids: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log: logging.Logger = logging.getLogger("airflow.processor")
# Since we share all open FDs from the parent, we need to close the parent side of the pipe here in
# the child, else it won't get closed properly until we exit.
log.info("Closing parent pipe")
parent_channel.close()
del parent_channel
set_context(log, file_path)
setproctitle(f"airflow scheduler - DagFileProcessor {file_path}")
try:
# redirect stdout/stderr to log
with redirect_stdout(StreamLogWriter(log, logging.INFO)), redirect_stderr(
StreamLogWriter(log, logging.WARN)
), Stats.timer() as timer:
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_ids, log=log)
result: Tuple[int, int] = dag_file_processor.process_file(
file_path=file_path,
pickle_dags=pickle_dags,
callback_requests=callback_requests,
)
result_channel.send(result)
log.info("Processing %s took %.3f seconds", file_path, timer.duration)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
result_channel.close()
def start(self) -> None:
"""Launch the process and start processing the DAG."""
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
_parent_channel, _child_channel = context.Pipe(duplex=False)
process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
_parent_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
f"DagFileProcessor{self._instance_id}",
self._callback_requests,
),
name=f"DagFileProcessor{self._instance_id}-Process",
)
self._process = process
self._start_time = timezone.utcnow()
process.start()
# Close the child side of the pipe now the subprocess has started -- otherwise this would prevent it
# from closing in some cases
_child_channel.close()
del _child_channel
# Don't store it on self until after we've started the child process - we don't want to keep it from
# getting GCd/closed
self._parent_channel = _parent_channel
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
self._kill_process()
def terminate(self, sigkill: bool = False) -> None:
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
with suppress(TimeoutError):
self._process._popen.wait(5) # type: ignore
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self) -> None:
if self._process is None:
raise AirflowException("Tried to kill process before starting!")
if self._process.is_alive() and self._process.pid:
self.log.warning("Killing DAGFileProcessorProcess (PID=%d)", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
if self._parent_channel:
self._parent_channel.close()
@property
def pid(self) -> int:
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None or self._process.pid is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get exit code before starting!")
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
# If we get an EOFError, it means the child end of the pipe has been closed. This only happens
# in the finally block. But due to a possible race condition, the process may have not yet
# terminated (it could be doing cleanup/python shutdown still). So we kill it here after a
# "suitable" timeout.
self._done = True
# Arbitrary timeout -- error/race condition only, so this doesn't need to be tunable.
self._process.join(timeout=5)
if self._process.is_alive():
# Didn't shut down cleanly - kill it
self._kill_process()
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self) -> Optional[Tuple[int, int]]:
"""
:return: result of running DagFileProcessor.process_file()
:rtype: tuple[int, int] or None
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self) -> datetime.datetime:
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
@property
def waitable_handle(self):
return self._process.sentinel
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to DagFileProcessor.process_file
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Record any errors importing the file into ORM
Returns a tuple of 'number of dags found' and 'the count of import errors'
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
UNIT_TEST_MODE: bool = conf.getboolean('core', 'UNIT_TEST_MODE')
def __init__(self, dag_ids: Optional[List[str]], log: logging.Logger):
super().__init__()
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag: DAG, session: Session = None) -> None:
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
self.log.info("Running SLA Checks for %s", dag.dag_id)
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
qry = (
session.query(TI.task_id, func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(TI.state == State.SUCCESS, TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id)
.subquery('sq')
)
max_tis: List[TI] = (
session.query(TI)
.filter(
TI.dag_id == dag.dag_id,
TI.task_id == qry.c.task_id,
TI.execution_date == qry.c.max_ti,
)
.all()
)
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
if not task.sla:
continue
if not isinstance(task.sla, timedelta):
raise TypeError(
f"SLA is expected to be timedelta object, got "
f"{type(task.sla)} in {task.dag_id}:{task.task_id}"
)
dttm = dag.following_schedule(ti.execution_date)
while dttm < ts:
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < ts:
session.merge(
SlaMiss(task_id=ti.task_id, dag_id=ti.dag_id, execution_date=dttm, timestamp=ts)
)
dttm = dag.following_schedule(dttm)
session.commit()
slas: List[SlaMiss] = (
session.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa
.all()
)
if slas:
sla_dates: List[datetime.datetime] = [sla.execution_date for sla in slas]
fetched_tis: List[TI] = (
session.query(TI)
.filter(TI.state != State.SUCCESS, TI.execution_date.in_(sla_dates), TI.dag_id == dag.dag_id)
.all()
)
blocking_tis: List[TI] = []
for ti in fetched_tis:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join(sla.task_id + ' on ' + sla.execution_date.isoformat() for sla in slas)
blocking_task_list = "\n".join(
ti.task_id + ' on ' + ti.execution_date.isoformat() for ti in blocking_tis
)
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info('Calling SLA miss callback')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas, blocking_tis)
notification_sent = True
except Exception:
self.log.exception("Could not call sla_miss_callback for DAG %s", dag.dag_id)
email_content = f"""\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}<code></pre>
Airflow Webserver URL: {conf.get(section='webserver', key='base_url')}
"""
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.", sla.task_id
)
continue
tasks_missed_sla.append(task)
emails: Set[str] = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(emails, f"[airflow] SLA miss on DAG={dag.dag_id}", email_content)
email_sent = True
notification_sent = True
except Exception:
Stats.incr('sla_email_notification_failure')
self.log.exception("Could not send SLA Miss email notification for DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
sla.email_sent = email_sent
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session: Session, dagbag: DagBag) -> None:
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(errors.ImportError.filename == dagbag_file).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(
errors.ImportError(filename=filename, timestamp=timezone.utcnow(), stacktrace=stacktrace)
)
session.commit()
@provide_session
def execute_callbacks(
self, dagbag: DagBag, callback_requests: List[CallbackRequest], session: Session = None
) -> None:
"""
Execute on failure callbacks. These objects can come from SchedulerJob or from
DagFileProcessorManager.
:param dagbag: Dag Bag of dags
:param callback_requests: failure callbacks to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:param session: DB session.
"""
for request in callback_requests:
self.log.debug("Processing Callback Request: %s", request)
try:
if isinstance(request, TaskCallbackRequest):
self._execute_task_callbacks(dagbag, request)
elif isinstance(request, SlaCallbackRequest):
self.manage_slas(dagbag.dags.get(request.dag_id))
elif isinstance(request, DagCallbackRequest):
self._execute_dag_callbacks(dagbag, request, session)
except Exception:
self.log.exception(
"Error executing %s callback for file: %s",
request.__class__.__name__,
request.full_filepath,
)
session.commit()
@provide_session
def _execute_dag_callbacks(self, dagbag: DagBag, request: DagCallbackRequest, session: Session):
dag = dagbag.dags[request.dag_id]
dag_run = dag.get_dagrun(execution_date=request.execution_date, session=session)
dag.handle_callback(
dagrun=dag_run, success=not request.is_failure_callback, reason=request.msg, session=session
)
def _execute_task_callbacks(self, dagbag: DagBag, request: TaskCallbackRequest):
simple_ti = request.simple_task_instance
if simple_ti.dag_id in dagbag.dags:
dag = dagbag.dags[simple_ti.dag_id]
if simple_ti.task_id in dag.task_ids:
task = dag.get_task(simple_ti.task_id)
if request.is_failure_callback:
ti = TI(task, simple_ti.execution_date)
# TODO: Use simple_ti to improve performance here in the future
ti.refresh_from_db()
ti.handle_failure_with_callback(error=request.msg, test_mode=self.UNIT_TEST_MODE)
self.log.info('Executed failure callback for %s in state %s', ti, ti.state)
@provide_session
def process_file(
self,
file_path: str,
callback_requests: List[CallbackRequest],
pickle_dags: bool = False,
session: Session = None,
) -> Tuple[int, int]:
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to this method.
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Record any errors importing the file into ORM
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.dag_processing.CallbackRequest]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:param session: Sqlalchemy ORM Session
:type session: Session
:return: number of dags found, count of import errors
:rtype: Tuple[int, int]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
try:
dagbag = DagBag(file_path, include_examples=False, include_smart_sensor=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return 0, 0
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return 0, len(dagbag.import_errors)
self.execute_callbacks(dagbag, callback_requests)
# Save individual DAGs in the ORM
dagbag.sync_to_db()
if pickle_dags:
paused_dag_ids = DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
unpaused_dags: List[DAG] = [
dag for dag_id, dag in dagbag.dags.items() if dag_id not in paused_dag_ids
]
for dag in unpaused_dags:
dag.pickle(session)
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
return len(dagbag.dags), len(dagbag.import_errors)
| |
# Copyright 2011-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import contextlib
import os
import socket
import threading
from bson import DEFAULT_CODEC_OPTIONS
from bson.py3compat import u, itervalues
from pymongo import auth, helpers, thread_util
from pymongo.errors import (AutoReconnect,
ConnectionFailure,
DocumentTooLarge,
NetworkTimeout,
NotMasterError,
OperationFailure)
from pymongo.ismaster import IsMaster
from pymongo.monotonic import time as _time
from pymongo.network import (command,
receive_message,
socket_closed)
from pymongo.read_preferences import ReadPreference
from pymongo.server_type import SERVER_TYPE
# If the first getaddrinfo call of this interpreter's life is on a thread,
# while the main thread holds the import lock, getaddrinfo deadlocks trying
# to import the IDNA codec. Import it here, where presumably we're on the
# main thread, to avoid the deadlock. See PYTHON-607.
u('foo').encode('idna')
try:
from ssl import match_hostname, CertificateError
except ImportError:
# These don't require the ssl module
from pymongo.ssl_match_hostname import match_hostname, CertificateError
def _raise_connection_failure(address, error):
"""Convert a socket.error to ConnectionFailure and raise it."""
host, port = address
# If connecting to a Unix socket, port will be None.
if port is not None:
msg = '%s:%d: %s' % (host, port, error)
else:
msg = '%s: %s' % (host, error)
if isinstance(error, socket.timeout):
raise NetworkTimeout(msg)
else:
raise AutoReconnect(msg)
class PoolOptions(object):
__slots__ = ('__max_pool_size', '__connect_timeout', '__socket_timeout',
'__wait_queue_timeout', '__wait_queue_multiple',
'__ssl_context', '__ssl_match_hostname', '__socket_keepalive')
def __init__(self, max_pool_size=100, connect_timeout=None,
socket_timeout=None, wait_queue_timeout=None,
wait_queue_multiple=None, ssl_context=None,
ssl_match_hostname=True, socket_keepalive=False):
self.__max_pool_size = max_pool_size
self.__connect_timeout = connect_timeout
self.__socket_timeout = socket_timeout
self.__wait_queue_timeout = wait_queue_timeout
self.__wait_queue_multiple = wait_queue_multiple
self.__ssl_context = ssl_context
self.__ssl_match_hostname = ssl_match_hostname
self.__socket_keepalive = socket_keepalive
@property
def max_pool_size(self):
"""The maximum number of connections that the pool will open
simultaneously. If this is set, operations will block if there
are `max_pool_size` outstanding connections.
"""
return self.__max_pool_size
@property
def connect_timeout(self):
"""How long a connection can take to be opened before timing out.
"""
return self.__connect_timeout
@property
def socket_timeout(self):
"""How long a send or receive on a socket can take before timing out.
"""
return self.__socket_timeout
@property
def wait_queue_timeout(self):
"""How long a thread will wait for a socket from the pool if the pool
has no free sockets.
"""
return self.__wait_queue_timeout
@property
def wait_queue_multiple(self):
"""Multiplied by max_pool_size to give the number of threads allowed
to wait for a socket at one time.
"""
return self.__wait_queue_multiple
@property
def ssl_context(self):
"""An SSLContext instance or None.
"""
return self.__ssl_context
@property
def ssl_match_hostname(self):
"""Call ssl.match_hostname if cert_reqs is not ssl.CERT_NONE.
"""
return self.__ssl_match_hostname
@property
def socket_keepalive(self):
"""Whether to send periodic messages to determine if a connection
is closed.
"""
return self.__socket_keepalive
class SocketInfo(object):
"""Store a socket with some metadata.
:Parameters:
- `sock`: a raw socket object
- `pool`: a Pool instance
- `ismaster`: optional IsMaster instance, response to ismaster on `sock`
- `address`: the server's (host, port)
"""
def __init__(self, sock, pool, ismaster, address):
self.sock = sock
self.address = address
self.authset = set()
self.closed = False
self.last_checkout = _time()
self.is_writable = ismaster.is_writable if ismaster else None
self.max_wire_version = ismaster.max_wire_version if ismaster else None
self.max_bson_size = ismaster.max_bson_size if ismaster else None
self.max_message_size = ismaster.max_message_size if ismaster else None
self.max_write_batch_size = (
ismaster.max_write_batch_size if ismaster else None)
if ismaster:
self.is_mongos = ismaster.server_type == SERVER_TYPE.Mongos
else:
self.is_mongos = None
# The pool's pool_id changes with each reset() so we can close sockets
# created before the last reset.
self.pool_id = pool.pool_id
def command(self, dbname, spec, slave_ok=False,
read_preference=ReadPreference.PRIMARY,
codec_options=DEFAULT_CODEC_OPTIONS, check=True,
allowable_errors=None, check_keys=False):
"""Execute a command or raise ConnectionFailure or OperationFailure.
:Parameters:
- `dbname`: name of the database on which to run the command
- `spec`: a command document as a dict, SON, or mapping object
- `slave_ok`: whether to set the SlaveOkay wire protocol bit
- `read_preference`: a read preference
- `codec_options`: a CodecOptions instance
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `check_keys`: if True, check `spec` for invalid keys
"""
try:
return command(self.sock, dbname, spec, slave_ok,
self.is_mongos, read_preference, codec_options,
check, allowable_errors, self.address, True,
check_keys)
except OperationFailure:
raise
# Catch socket.error, KeyboardInterrupt, etc. and close ourselves.
except BaseException as error:
self._raise_connection_failure(error)
def send_message(self, message, max_doc_size):
"""Send a raw BSON message or raise ConnectionFailure.
If a network exception is raised, the socket is closed.
"""
if (self.max_bson_size is not None
and max_doc_size > self.max_bson_size):
raise DocumentTooLarge(
"BSON document too large (%d bytes) - the connected server"
"supports BSON document sizes up to %d bytes." %
(max_doc_size, self.max_bson_size))
try:
self.sock.sendall(message)
except BaseException as error:
self._raise_connection_failure(error)
def receive_message(self, operation, request_id):
"""Receive a raw BSON message or raise ConnectionFailure.
If any exception is raised, the socket is closed.
"""
try:
return receive_message(self.sock, operation, request_id)
except BaseException as error:
self._raise_connection_failure(error)
def legacy_write(self, request_id, msg, max_doc_size, with_last_error):
"""Send OP_INSERT, etc., optionally returning response as a dict.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `request_id`: an int.
- `msg`: bytes, an OP_INSERT, OP_UPDATE, or OP_DELETE message,
perhaps with a getlasterror command appended.
- `max_doc_size`: size in bytes of the largest document in `msg`.
- `with_last_error`: True if a getlasterror command is appended.
"""
if not with_last_error and not self.is_writable:
# Write won't succeed, bail as if we'd done a getlasterror.
raise NotMasterError("not master")
self.send_message(msg, max_doc_size)
if with_last_error:
response = self.receive_message(1, request_id)
return helpers._check_gle_response(response)
def write_command(self, request_id, msg):
"""Send "insert" etc. command, returning response as a dict.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `request_id`: an int.
- `msg`: bytes, the command message.
"""
self.send_message(msg, 0)
response = helpers._unpack_response(self.receive_message(1, request_id))
assert response['number_returned'] == 1
result = response['data'][0]
# Raises NotMasterError or OperationFailure.
helpers._check_command_response(result)
return result
def check_auth(self, all_credentials):
"""Update this socket's authentication.
Log in or out to bring this socket's credentials up to date with
those provided. Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `all_credentials`: dict, maps auth source to MongoCredential.
"""
if all_credentials or self.authset:
cached = set(itervalues(all_credentials))
authset = self.authset.copy()
# Logout any credentials that no longer exist in the cache.
for credentials in authset - cached:
auth.logout(credentials.source, self)
self.authset.discard(credentials)
for credentials in cached - authset:
auth.authenticate(credentials, self)
self.authset.add(credentials)
def authenticate(self, credentials):
"""Log in to the server and store these credentials in `authset`.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `credentials`: A MongoCredential.
"""
auth.authenticate(credentials, self)
self.authset.add(credentials)
def close(self):
self.closed = True
# Avoid exceptions on interpreter shutdown.
try:
self.sock.close()
except:
pass
def _raise_connection_failure(self, error):
# Catch *all* exceptions from socket methods and close the socket. In
# regular Python, socket operations only raise socket.error, even if
# the underlying cause was a Ctrl-C: a signal raised during socket.recv
# is expressed as an EINTR error from poll. See internal_select_ex() in
# socketmodule.c. All error codes from poll become socket.error at
# first. Eventually in PyEval_EvalFrameEx the interpreter checks for
# signals and throws KeyboardInterrupt into the current frame on the
# main thread.
#
# But in Gevent and Eventlet, the polling mechanism (epoll, kqueue,
# ...) is called in Python code, which experiences the signal as a
# KeyboardInterrupt from the start, rather than as an initial
# socket.error, so we catch that, close the socket, and reraise it.
self.close()
if isinstance(error, socket.error):
_raise_connection_failure(self.address, error)
else:
raise error
def __eq__(self, other):
return self.sock == other.sock
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.sock)
def __repr__(self):
return "SocketInfo(%s)%s at %s" % (
repr(self.sock),
self.closed and " CLOSED" or "",
id(self)
)
def _create_connection(address, options):
"""Given (host, port) and PoolOptions, connect and return a socket object.
Can raise socket.error.
This is a modified version of create_connection from CPython >= 2.6.
"""
host, port = address
# Check if dealing with a unix domain socket
if host.endswith('.sock'):
if not hasattr(socket, "AF_UNIX"):
raise ConnectionFailure("UNIX-sockets are not supported "
"on this system")
sock = socket.socket(socket.AF_UNIX)
try:
sock.connect(host)
return sock
except socket.error:
sock.close()
raise
# Don't try IPv6 if we don't support it. Also skip it if host
# is 'localhost' (::1 is fine). Avoids slow connect issues
# like PYTHON-356.
family = socket.AF_INET
if socket.has_ipv6 and host != 'localhost':
family = socket.AF_UNSPEC
err = None
for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
af, socktype, proto, dummy, sa = res
sock = socket.socket(af, socktype, proto)
try:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sock.settimeout(options.connect_timeout)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE,
options.socket_keepalive)
sock.connect(sa)
return sock
except socket.error as e:
err = e
sock.close()
if err is not None:
raise err
else:
# This likely means we tried to connect to an IPv6 only
# host with an OS/kernel or Python interpreter that doesn't
# support IPv6. The test case is Jython2.5.1 which doesn't
# support IPv6 at all.
raise socket.error('getaddrinfo failed')
def _configured_socket(address, options):
"""Given (host, port) and PoolOptions, return a configured socket.
Can raise socket.error, ConnectionFailure, or CertificateError.
Sets socket's SSL and timeout options.
"""
sock = _create_connection(address, options)
ssl_context = options.ssl_context
if ssl_context is not None:
try:
sock = ssl_context.wrap_socket(sock)
except IOError as exc:
sock.close()
raise ConnectionFailure("SSL handshake failed: %s" % (str(exc),))
if ssl_context.verify_mode and options.ssl_match_hostname:
try:
match_hostname(sock.getpeercert(), hostname=address[0])
except CertificateError:
sock.close()
raise
sock.settimeout(options.socket_timeout)
return sock
# Do *not* explicitly inherit from object or Jython won't call __del__
# http://bugs.jython.org/issue1057
class Pool:
def __init__(self, address, options, handshake=True):
"""
:Parameters:
- `address`: a (hostname, port) tuple
- `options`: a PoolOptions instance
- `handshake`: whether to call ismaster for each new SocketInfo
"""
# Check a socket's health with socket_closed() every once in a while.
# Can override for testing: 0 to always check, None to never check.
self._check_interval_seconds = 1
self.sockets = set()
self.lock = threading.Lock()
# Keep track of resets, so we notice sockets created before the most
# recent reset and close them.
self.pool_id = 0
self.pid = os.getpid()
self.address = address
self.opts = options
self.handshake = handshake
if (self.opts.wait_queue_multiple is None or
self.opts.max_pool_size is None):
max_waiters = None
else:
max_waiters = (
self.opts.max_pool_size * self.opts.wait_queue_multiple)
self._socket_semaphore = thread_util.create_semaphore(
self.opts.max_pool_size, max_waiters)
def reset(self):
with self.lock:
self.pool_id += 1
self.pid = os.getpid()
sockets, self.sockets = self.sockets, set()
for sock_info in sockets:
sock_info.close()
def connect(self):
"""Connect to Mongo and return a new SocketInfo.
Can raise ConnectionFailure or CertificateError.
Note that the pool does not keep a reference to the socket -- you
must call return_socket() when you're done with it.
"""
sock = None
try:
sock = _configured_socket(self.address, self.opts)
if self.handshake:
ismaster = IsMaster(command(sock, 'admin', {'ismaster': 1},
False, False,
ReadPreference.PRIMARY,
DEFAULT_CODEC_OPTIONS))
else:
ismaster = None
return SocketInfo(sock, self, ismaster, self.address)
except socket.error as error:
if sock is not None:
sock.close()
_raise_connection_failure(self.address, error)
@contextlib.contextmanager
def get_socket(self, all_credentials, checkout=False):
"""Get a socket from the pool. Use with a "with" statement.
Returns a :class:`SocketInfo` object wrapping a connected
:class:`socket.socket`.
This method should always be used in a with-statement::
with pool.get_socket(credentials, checkout) as socket_info:
socket_info.send_message(msg)
data = socket_info.receive_message(op_code, request_id)
The socket is logged in or out as needed to match ``all_credentials``
using the correct authentication mechanism for the server's wire
protocol version.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `all_credentials`: dict, maps auth source to MongoCredential.
- `checkout` (optional): keep socket checked out.
"""
# First get a socket, then attempt authentication. Simplifies
# semaphore management in the face of network errors during auth.
sock_info = self._get_socket_no_auth()
try:
sock_info.check_auth(all_credentials)
yield sock_info
except:
# Exception in caller. Decrement semaphore.
self.return_socket(sock_info)
raise
else:
if not checkout:
self.return_socket(sock_info)
def _get_socket_no_auth(self):
"""Get or create a SocketInfo. Can raise ConnectionFailure."""
# We use the pid here to avoid issues with fork / multiprocessing.
# See test.test_client:TestClient.test_fork for an example of
# what could go wrong otherwise
if self.pid != os.getpid():
self.reset()
# Get a free socket or create one.
if not self._socket_semaphore.acquire(
True, self.opts.wait_queue_timeout):
self._raise_wait_queue_timeout()
# We've now acquired the semaphore and must release it on error.
try:
try:
# set.pop() isn't atomic in Jython less than 2.7, see
# http://bugs.jython.org/issue1854
with self.lock:
sock_info, from_pool = self.sockets.pop(), True
except KeyError:
# Can raise ConnectionFailure or CertificateError.
sock_info, from_pool = self.connect(), False
if from_pool:
# Can raise ConnectionFailure.
sock_info = self._check(sock_info)
except:
self._socket_semaphore.release()
raise
sock_info.last_checkout = _time()
return sock_info
def return_socket(self, sock_info):
"""Return the socket to the pool, or if it's closed discard it."""
if self.pid != os.getpid():
self.reset()
else:
if sock_info.pool_id != self.pool_id:
sock_info.close()
elif not sock_info.closed:
with self.lock:
self.sockets.add(sock_info)
self._socket_semaphore.release()
def _check(self, sock_info):
"""This side-effecty function checks if this pool has been reset since
the last time this socket was used, or if the socket has been closed by
some external network error, and if so, attempts to create a new socket.
If this connection attempt fails we reset the pool and reraise the
ConnectionFailure.
Checking sockets lets us avoid seeing *some*
:class:`~pymongo.errors.AutoReconnect` exceptions on server
hiccups, etc. We only do this if it's been > 1 second since
the last socket checkout, to keep performance reasonable - we
can't avoid AutoReconnects completely anyway.
"""
error = False
# How long since socket was last checked out.
age = _time() - sock_info.last_checkout
if (self._check_interval_seconds is not None
and (
0 == self._check_interval_seconds
or age > self._check_interval_seconds)):
if socket_closed(sock_info.sock):
sock_info.close()
error = True
if not error:
return sock_info
else:
return self.connect()
def _raise_wait_queue_timeout(self):
raise ConnectionFailure(
'Timed out waiting for socket from pool with max_size %r and'
' wait_queue_timeout %r' % (
self.opts.max_pool_size, self.opts.wait_queue_timeout))
def __del__(self):
# Avoid ResourceWarnings in Python 3
for sock_info in self.sockets:
sock_info.close()
| |
"""
The MIT License (MIT)
Copyright (c) Serenity Software, LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from SereneRegistry import registry
from cahoots.parsers.base import BaseParser
from cahoots.data import DataHandler
from datetime import timedelta, datetime
import dateutil.parser as dateUtilParser
from pyparsing import\
Or, \
CaselessLiteral, \
ParseException, \
Word, \
originalTextFor, \
ZeroOrMore, \
nums, \
alphas, \
StringEnd
class DateParser(BaseParser):
'''Determines is given data is a date'''
@staticmethod
def bootstrap(config):
"""
This method is statically called to bootstrap a parser
:param config: cahoots config
:type config: cahoots.config.BaseConfig
"""
time_scales = [
'microseconds',
'milliseconds',
'seconds',
'minutes',
'hours',
'days',
'weeks',
'years',
'microsecond',
'millisecond',
'second',
'minute',
'hour',
'day',
'week',
'year',
]
# <number> <timescale> <preposition>
# 3 seconds until / 50 seconds since
pre_timedeltas = Or(
[DateParser.create_pre_timedelta_literal(t) for t in time_scales]
)
pre_timedelta_phrases = \
pre_timedeltas + Word(alphas + nums + " .,;-/'")
registry.set('DP_pre_timedelta_phrases', pre_timedelta_phrases)
# <operator> <number> <timescale>
# plus 5 hours / - 17 days
post_timedelta_phrases = Or(
[DateParser.create_post_timedelta_literal(t) for t in time_scales]
)
registry.set('DP_post_timedelta_phrases', post_timedelta_phrases)
@staticmethod
def create_pre_timedelta_literal(tok):
"""
Detects <number> <timescale> <preposition>
:param tok: the token we want to produce a detector for
:type tok: str
:return: the caseless literal
:rtype: pyparsing.And
"""
delta = originalTextFor(Or([
Word(nums) +
ZeroOrMore(',' + Word(nums+',')) +
ZeroOrMore('.' + Word(nums)),
CaselessLiteral('an'),
CaselessLiteral('a')
])) + CaselessLiteral(tok) + DateParser.get_preposition_literals()
delta.setName('pre' + tok).\
setParseAction(DateParser.generate_pre_timedelta)
return delta
@staticmethod
def generate_pre_timedelta(toks):
"""
Generates a timedelta object for a delta-prefix match
:param tok: the tokens we want to produce detectors for
:type tok: str
:return: the caseless literal
:rtype: pyparsing.And
"""
minus_prepositions = [
'until',
'before',
'to',
'from',
]
number, timescale, preposition = toks
number = DateParser.get_number_value(number)
if preposition in minus_prepositions:
number = number * -1
return DateParser.determine_timescale_delta(timescale, number)
@staticmethod
def create_post_timedelta_literal(tok):
"""
Detects <plus/minus> <number> <timescale>
:param tok: the token we want to produce a detector for
:type tok: str
:return: the caseless literal
:rtype: pyparsing.Or
"""
delta = Or(
[CaselessLiteral(t) for t in ['+', '-', 'plus', 'minus']]
) + originalTextFor(Or([
Word(nums) +
ZeroOrMore(',' + Word(nums+',')) +
ZeroOrMore('.' + Word(nums)),
CaselessLiteral('an'),
CaselessLiteral('a')
])) + CaselessLiteral(tok) + StringEnd()
delta.setName('post' + tok).\
setParseAction(DateParser.generate_post_timedelta)
return delta
@staticmethod
def generate_post_timedelta(toks):
"""
Generates a timedelta object for a delta-suffix match
:param tok: the tokens we want to produce detectors for
:type tok: str
:return: the caseless literal
:rtype: pyparsing.Or
"""
operator, number, timescale = toks
number = DateParser.get_number_value(number)
if operator in ['minus', '-']:
number = number * -1
delta = DateParser.determine_timescale_delta(timescale, number)
return delta
@staticmethod
def get_preposition_literals():
"""
Generates the prepositions parser and returns it
:return: the parser for prepositions
:rtype: pyparsing.Or
"""
if registry.test('DP_prepositions'):
return registry.get('DP_prepositions')
prepositions = \
Or([CaselessLiteral(s) for s in DataHandler().get_prepositions()])
registry.set('DP_prepositions', prepositions)
return prepositions
@staticmethod
def get_number_value(number):
"""
Turns a provided number into a proper float
:param number: number as string
:type number: str
:return: the number in numeric form
:rtype: float
"""
if number in ['a', 'an']:
number = 1.0
else:
number = \
float("".join([char for char in number if char in nums+'.']))
return number
@staticmethod
def determine_timescale_delta(timescale, number):
"""
Gets a timedelta representing the change desired
:param timescale: natural language timescale
:type timescale: str
:param number: number of "timescales"
:type number: float
:return: the timedelta for this timescale
:rtype: timedelta
"""
if timescale[-1:] != 's':
timescale += 's'
if timescale == 'microseconds':
delta = timedelta(microseconds=number)
elif timescale == 'milliseconds':
delta = timedelta(milliseconds=number)
elif timescale == 'seconds':
delta = timedelta(seconds=number)
elif timescale == 'minutes':
delta = timedelta(minutes=number)
elif timescale == 'hours':
delta = timedelta(hours=number)
elif timescale == 'days':
delta = timedelta(days=number)
elif timescale == 'weeks':
delta = timedelta(weeks=number)
elif timescale == 'years':
delta = timedelta(days=365*number)
else:
delta = timedelta()
return delta
def __init__(self, config):
"""
:param config: cahoots config
:type config: cahoots.config.BaseConfig
"""
BaseParser.__init__(self, config, "Date", 0)
@classmethod
def natural_parse(cls, data):
"""
Parse out natural-language strings like "yesterday", "next week", etc
:param data: potential natural language value
:type data: str
:return: the value of the language string
:rtype: datetime
"""
data = data.lower()
today = datetime.today()
if data in ['now', 'current time']:
value = today.now()
elif data == 'today':
value = today
elif data == "tomorrow":
value = today + timedelta(1)
elif data == "yesterday":
value = today + timedelta(-1)
elif data == "next week":
value = today + timedelta(7)
elif data == "last week":
value = today + timedelta(-7)
elif data == "next year":
value = today + timedelta(365)
elif data == "last year":
value = today + timedelta(-365)
else:
value = False
return value
def date_parse(self, data):
"""
Uses the dateUtilParser to determine what our date is
:param data: string that might be a date
:type data: str
:return: parsed date or false
:rtype: datetime
"""
parsed_date = self.natural_parse(data)
if parsed_date:
return ('Natural', parsed_date)
else:
try:
return ('Standard', dateUtilParser.parse(data))
except BaseException:
pass
return False
def parse(self, data_string):
"""
parses for dates
:param data_string: the string we want to parse
:type data_string: str
:return: yields parse result(s) if there are any
:rtype: ParseResult
"""
data_string = data_string.strip()
if len(data_string) < 3 or len(data_string) > 50:
return
# Just date detection
parsed_date = self.date_parse(data_string)
if parsed_date:
yield self.result(parsed_date[0], 100, parsed_date[1])
return
# Looking for <number> <timescale> <prepositions> <datetime>
pre_timedelta_phrases = registry.get('DP_pre_timedelta_phrases')
try:
pre_delta = pre_timedelta_phrases.parseString(data_string)
except ParseException:
pass
else:
parsed_date = self.date_parse(pre_delta[1])
if parsed_date:
try:
yield self.result(
"Number Timescale Preposition Date",
100,
parsed_date[1] + pre_delta[0]
)
except OverflowError:
pass
return
# Looking for <datetime> <plus/minus> <number> <timescale>
post_timedelta_phrases = registry.get('DP_post_timedelta_phrases')
post_deltas = \
[t for t in post_timedelta_phrases.scanString(data_string)]
if len(post_deltas) == 1:
for token, start, _ in post_deltas:
parsed_date = self.date_parse(data_string[0:start].strip())
if parsed_date:
try:
yield self.result(
"Date Operator Number Timescale",
100,
parsed_date[1] + token.pop()
)
except OverflowError:
pass
return
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Functional operations.
## Higher Order Operators
TensorFlow provides several higher order operators to simplify the common
map-reduce programming patterns.
@@map_fn
@@foldl
@@foldr
@@scan
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_functional_ops import *
# pylint: enable=wildcard-import
# pylint: disable=unused-import
from tensorflow.python.ops.gen_functional_ops import _symbolic_gradient
# pylint: enable=unused-import
# TODO(yuanbyu, mrry): Handle stride to support sliding windows.
def foldl(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
swap_memory=False, name=None):
"""foldl on the list of tensors unpacked from `elems` on dimension 0.
This foldl operator repeatedly applies the callable `fn` to a sequence
of elements from first to last. The elements are made of the tensors
unpacked from `elems` on dimension 0. The callable fn takes two tensors as
arguments. The first argument is the accumulated value computed from the
preceding invocation of fn. If `initializer` is None, `elems` must contain
at least one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is fn(initializer, values[0]).shape`.
Args:
fn: The callable to be performed.
elems: A tensor to be unpacked on dimension 0.
initializer: (optional) The initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor resulting from applying `fn` consecutively to the list of tensors
unpacked from `elems`, from first to last.
Raises:
TypeError: if `fn` is not callable.
Example:
```python
elems = [1, 2, 3, 4, 5, 6]
sum = foldl(lambda a, x: a + x, elems)
# sum == 21
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
# TODO(ebrevdo): Change to using colocate_with here and in other methods.
with vs.variable_op_scope([elems], name, "foldl") as varscope:
# Any get_variable calls fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
# Convert elems to tensor array.
elems = ops.convert_to_tensor(elems, name="elems")
n = array_ops.shape(elems)[0]
elems_ta = tensor_array_ops.TensorArray(dtype=elems.dtype, size=n,
dynamic_size=False,
infer_shape=True)
elems_ta = elems_ta.unpack(elems)
if initializer is None:
a = elems_ta.read(0)
i = constant_op.constant(1)
else:
a = ops.convert_to_tensor(initializer)
i = constant_op.constant(0)
def compute(i, a):
a = fn(a, elems_ta.read(i))
return [i + 1, a]
_, r_a = control_flow_ops.while_loop(
lambda i, a: i < n, compute, [i, a],
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory)
return r_a
def foldr(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
swap_memory=False, name=None):
"""foldr on the list of tensors unpacked from `elems` on dimension 0.
This foldr operator repeatedly applies the callable `fn` to a sequence
of elements from last to first. The elements are made of the tensors
unpacked from `elems`. The callable fn takes two tensors as arguments.
The first argument is the accumulated value computed from the preceding
invocation of fn. If `initializer` is None, `elems` must contain at least
one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `fn(initializer, values[0]).shape`.
Args:
fn: The callable to be performed.
elems: A tensor that is unpacked into a sequence of tensors to apply `fn`.
initializer: (optional) The initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor resulting from applying `fn` consecutively to the list of tensors
unpacked from `elems`, from last to first.
Raises:
TypeError: if `fn` is not callable.
Example:
```python
elems = [1, 2, 3, 4, 5, 6]
sum = foldr(lambda a, x: a + x, elems)
# sum == 21
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
with vs.variable_op_scope([elems], name, "foldr") as varscope:
# Any get_variable calls fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
# Convert elems to tensor array.
elems = ops.convert_to_tensor(elems, name="elems")
n = array_ops.shape(elems)[0]
elems_ta = tensor_array_ops.TensorArray(dtype=elems.dtype, size=n,
dynamic_size=False,
infer_shape=True)
elems_ta = elems_ta.unpack(elems)
if initializer is None:
i = n - 1
a = elems_ta.read(i)
else:
i = n
a = ops.convert_to_tensor(initializer)
def compute(i, a):
i -= 1
a = fn(a, elems_ta.read(i))
return [i, a]
_, r_a = control_flow_ops.while_loop(
lambda i, a: i > 0, compute, [i, a],
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory)
return r_a
def map_fn(fn, elems, dtype=None, parallel_iterations=10, back_prop=True,
swap_memory=False, name=None):
"""map on the list of tensors unpacked from `elems` on dimension 0.
This map operator repeatedly applies the callable `fn` to a sequence of
elements from first to last. The elements are made of the tensors unpacked
from `elems`. `dtype` is the data type of the return value of `fn`. Users
must provide `dtype` if it is different from the data type of `elems`.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `[len(values)] + fn(values[0]).shape`.
Args:
fn: The callable to be performed.
elems: A tensor to be unpacked to apply `fn`.
dtype: (optional) The output type of `fn`.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor that packs the results of applying `fn` to the list of tensors
unpacked from `elems`, from first to last.
Raises:
TypeError: if `fn` is not callable.
Example:
```python
elems = [1, 2, 3, 4, 5, 6]
squares = map_fn(lambda x: x * x, elems)
# squares == [1, 4, 9, 16, 25, 36]
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
with vs.variable_op_scope([elems], name, "map") as varscope:
# Any get_variable calls fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
elems = ops.convert_to_tensor(elems, name="elems")
dtype = dtype if dtype else elems.dtype
# Convert elems to tensor array.
n = array_ops.shape(elems)[0]
elems_ta = tensor_array_ops.TensorArray(dtype=elems.dtype, size=n,
dynamic_size=False,
infer_shape=True)
elems_ta = elems_ta.unpack(elems)
i = constant_op.constant(0)
acc_ta = tensor_array_ops.TensorArray(dtype=dtype, size=n,
dynamic_size=False,
infer_shape=True)
def compute(i, ta):
ta = ta.write(i, fn(elems_ta.read(i)))
return [i + 1, ta]
_, r_a = control_flow_ops.while_loop(
lambda i, a: i < n, compute, [i, acc_ta],
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory)
result = r_a.pack()
result.set_shape(elems.get_shape().with_rank_at_least(1)[0:1].concatenate(
result.get_shape()[1:]))
return result
def scan(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
swap_memory=False, name=None):
"""scan on the list of tensors unpacked from `elems` on dimension 0.
This scan operator repeatedly applies the callable `fn` to a sequence
of elements from first to last. The elements are made of the tensors
unpacked from `elems` on dimension 0. The callable fn takes two tensors as
arguments. The first argument is the accumulated value computed from the
preceding invocation of fn. If `initializer` is None, `elems` must contain
at least one element, and its first element is used as the initializer.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `[len(values)] + fn(initializer, values[0]).shape`.
Args:
fn: The callable to be performed.
elems: A tensor to be unpacked on dimension 0.
initializer: (optional) The initial value for the accumulator.
parallel_iterations: (optional) The number of iterations allowed to run
in parallel.
back_prop: (optional) True enables back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
name: (optional) Name prefix for the returned tensors.
Returns:
A tensor that packs the results of applying `fn` to the list of tensors
unpacked from `elems`, from first to last.
Raises:
TypeError: if `fn` is not callable.
Example:
```python
elems = [1, 2, 3, 4, 5, 6]
sum = scan(lambda a, x: a + x, elems)
# sum == [1, 3, 6, 10, 15, 21]
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
with vs.variable_op_scope([elems], name, "scan") as varscope:
# Any get_variable calls fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
# Convert elems to tensor array.
elems = ops.convert_to_tensor(elems, name="elems")
n = array_ops.shape(elems)[0]
elems_ta = tensor_array_ops.TensorArray(dtype=elems.dtype, size=n,
dynamic_size=False,
infer_shape=True)
elems_ta = elems_ta.unpack(elems)
if initializer is None:
a = elems_ta.read(0)
i = constant_op.constant(1)
else:
a = ops.convert_to_tensor(initializer)
i = constant_op.constant(0)
# Create a tensor array to store the intermediate values.
acc_ta = tensor_array_ops.TensorArray(dtype=a.dtype, size=n,
dynamic_size=False,
infer_shape=True)
if initializer is None:
acc_ta = acc_ta.write(0, a)
def compute(i, a, ta):
a = fn(a, elems_ta.read(i))
ta = ta.write(i, a)
return [i + 1, a, ta]
_, _, r_a = control_flow_ops.while_loop(
lambda i, a, ta: i < n, compute, [i, a, acc_ta],
parallel_iterations=parallel_iterations,
back_prop=back_prop, swap_memory=swap_memory)
result = r_a.pack()
result.set_shape(elems.get_shape().with_rank_at_least(1)[0:1].concatenate(
result.get_shape()[1:]))
return result
@ops.RegisterShape("SymbolicGradient")
def _symbolic_gradient_shape(op):
# Say, (u, v) = f(x, y, z), _symbolic_gradient(f) is a function of
# (x, y, z, du, dv) -> (dx, dy, dz). Therefore, shapes of its
# outputs (dx, dy, dz) are the same as (x, y, z).
return [op.inputs[i].get_shape() for i in range(len(op.outputs))]
| |
# Copyright (c) 2015-2020 by Rocky Bernstein
# Copyright (c) 2005 by Dan Pascu <dan@windowmaker.org>
# Copyright (c) 2000-2002 by hartmut Goebel <h.goebel@crazy-compilers.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Python 37 bytecode scanner/deparser base.
Also we *modify* the instruction sequence to assist deparsing code.
For example:
- we add "COME_FROM" instructions to help in figuring out
conditional branching and looping.
- LOAD_CONSTs are classified further into the type of thing
they load:
lambda's, genexpr's, {dict,set,list} comprehension's,
- PARAMETER counts appended {CALL,MAKE}_FUNCTION, BUILD_{TUPLE,SET,SLICE}
Finally we save token information.
"""
from xdis import iscode, instruction_size, Instruction
from xdis.bytecode import _get_const_info
from uncompyle6.scanner import Token
import xdis
# Get all the opcodes into globals
import xdis.opcodes.opcode_37 as op3
from uncompyle6.scanner import Scanner
import sys
globals().update(op3.opmap)
class Scanner37Base(Scanner):
def __init__(self, version, show_asm=None, is_pypy=False):
super(Scanner37Base, self).__init__(version, show_asm, is_pypy)
# Create opcode classification sets
# Note: super initilization above initializes self.opc
# Ops that start SETUP_ ... We will COME_FROM with these names
# Some blocks and END_ statements. And they can start
# a new statement
if self.version < 3.8:
setup_ops = [
self.opc.SETUP_LOOP,
self.opc.SETUP_EXCEPT,
self.opc.SETUP_FINALLY,
]
self.setup_ops_no_loop = frozenset(setup_ops) - frozenset(
[self.opc.SETUP_LOOP]
)
else:
setup_ops = [self.opc.SETUP_FINALLY]
self.setup_ops_no_loop = frozenset(setup_ops)
# Add back these opcodes which help us detect "break" and
# "continue" statements via parsing.
self.opc.BREAK_LOOP = 80
self.opc.CONTINUE_LOOP = 119
pass
setup_ops.append(self.opc.SETUP_WITH)
self.setup_ops = frozenset(setup_ops)
self.pop_jump_tf = frozenset([self.opc.PJIF, self.opc.PJIT])
self.not_continue_follow = ("END_FINALLY", "POP_BLOCK")
# Opcodes that can start a statement.
statement_opcodes = [
self.opc.POP_BLOCK,
self.opc.STORE_FAST,
self.opc.DELETE_FAST,
self.opc.STORE_DEREF,
self.opc.STORE_GLOBAL,
self.opc.DELETE_GLOBAL,
self.opc.STORE_NAME,
self.opc.DELETE_NAME,
self.opc.STORE_ATTR,
self.opc.DELETE_ATTR,
self.opc.STORE_SUBSCR,
self.opc.POP_TOP,
self.opc.DELETE_SUBSCR,
self.opc.END_FINALLY,
self.opc.RETURN_VALUE,
self.opc.RAISE_VARARGS,
self.opc.PRINT_EXPR,
self.opc.JUMP_ABSOLUTE,
# These are phony for 3.8+
self.opc.BREAK_LOOP,
self.opc.CONTINUE_LOOP,
]
self.statement_opcodes = frozenset(statement_opcodes) | self.setup_ops_no_loop
# Opcodes that can start a "store" non-terminal.
# FIXME: JUMP_ABSOLUTE is weird. What's up with that?
self.designator_ops = frozenset(
[
self.opc.STORE_FAST,
self.opc.STORE_NAME,
self.opc.STORE_GLOBAL,
self.opc.STORE_DEREF,
self.opc.STORE_ATTR,
self.opc.STORE_SUBSCR,
self.opc.UNPACK_SEQUENCE,
self.opc.JUMP_ABSOLUTE,
self.opc.UNPACK_EX,
]
)
self.jump_if_pop = frozenset(
[self.opc.JUMP_IF_FALSE_OR_POP, self.opc.JUMP_IF_TRUE_OR_POP]
)
self.pop_jump_if_pop = frozenset(
[
self.opc.JUMP_IF_FALSE_OR_POP,
self.opc.JUMP_IF_TRUE_OR_POP,
self.opc.POP_JUMP_IF_TRUE,
self.opc.POP_JUMP_IF_FALSE,
]
)
# Not really a set, but still clasification-like
self.statement_opcode_sequences = [
(self.opc.POP_JUMP_IF_FALSE, self.opc.JUMP_FORWARD),
(self.opc.POP_JUMP_IF_FALSE, self.opc.JUMP_ABSOLUTE),
(self.opc.POP_JUMP_IF_TRUE, self.opc.JUMP_FORWARD),
(self.opc.POP_JUMP_IF_TRUE, self.opc.JUMP_ABSOLUTE),
]
# FIXME: remove this and use instead info from xdis.
# Opcodes that take a variable number of arguments
# (expr's)
varargs_ops = set(
[
self.opc.BUILD_LIST,
self.opc.BUILD_TUPLE,
self.opc.BUILD_SET,
self.opc.BUILD_SLICE,
self.opc.BUILD_MAP,
self.opc.UNPACK_SEQUENCE,
self.opc.RAISE_VARARGS,
]
)
varargs_ops.add(self.opc.CALL_METHOD)
varargs_ops |= set(
[
self.opc.BUILD_SET_UNPACK,
self.opc.BUILD_MAP_UNPACK, # we will handle this later
self.opc.BUILD_LIST_UNPACK,
self.opc.BUILD_TUPLE_UNPACK,
]
)
varargs_ops.add(self.opc.BUILD_CONST_KEY_MAP)
# Below is in bit order, "default = bit 0, closure = bit 3
self.MAKE_FUNCTION_FLAGS = tuple(
"""
default keyword-only annotation closure""".split()
)
self.varargs_ops = frozenset(varargs_ops)
# FIXME: remove the above in favor of:
# self.varargs_ops = frozenset(self.opc.hasvargs)
return
def ingest(self, co, classname=None, code_objects={}, show_asm=None):
"""
Pick out tokens from an uncompyle6 code object, and transform them,
returning a list of uncompyle6 Token's.
The transformations are made to assist the deparsing grammar.
Specificially:
- various types of LOAD_CONST's are categorized in terms of what they load
- COME_FROM instructions are added to assist parsing control structures
- MAKE_FUNCTION and FUNCTION_CALLS append the number of positional arguments
- some EXTENDED_ARGS instructions are removed
Also, when we encounter certain tokens, we add them to a set which will cause custom
grammar rules. Specifically, variable arg tokens like MAKE_FUNCTION or BUILD_LIST
cause specific rules for the specific number of arguments they take.
"""
def tokens_append(j, token):
tokens.append(token)
self.offset2tok_index[token.offset] = j
j += 1
assert j == len(tokens)
return j
if not show_asm:
show_asm = self.show_asm
bytecode = self.build_instructions(co)
# show_asm = 'both'
if show_asm in ("both", "before"):
for instr in bytecode.get_instructions(co):
print(instr.disassemble())
# "customize" is in the process of going away here
customize = {}
if self.is_pypy:
customize["PyPy"] = 0
# Scan for assertions. Later we will
# turn 'LOAD_GLOBAL' to 'LOAD_ASSERT'.
# 'LOAD_ASSERT' is used in assert statements.
self.load_asserts = set()
# list of tokens/instructions
tokens = []
self.offset2tok_index = {}
n = len(self.insts)
for i, inst in enumerate(self.insts):
# We need to detect the difference between:
# raise AssertionError
# and
# assert ...
# If we have a JUMP_FORWARD after the
# RAISE_VARARGS then we have a "raise" statement
# else we have an "assert" statement.
assert_can_follow = inst.opname == "POP_JUMP_IF_TRUE" and i + 1 < n
if assert_can_follow:
next_inst = self.insts[i + 1]
if (
next_inst.opname == "LOAD_GLOBAL"
and next_inst.argval == "AssertionError"
and inst.argval
):
raise_idx = self.offset2inst_index[self.prev_op[inst.argval]]
raise_inst = self.insts[raise_idx]
if raise_inst.opname.startswith("RAISE_VARARGS"):
self.load_asserts.add(next_inst.offset)
pass
pass
# Operand values in Python wordcode are small. As a result,
# there are these EXTENDED_ARG instructions - way more than
# before 3.6. These parsing a lot of pain.
# To simplify things we want to untangle this. We also
# do this loop before we compute jump targets.
for i, inst in enumerate(self.insts):
# One artifact of the "too-small" operand problem, is that
# some backward jumps, are turned into forward jumps to another
# "extended arg" backward jump to the same location.
if inst.opname == "JUMP_FORWARD":
jump_inst = self.insts[self.offset2inst_index[inst.argval]]
if jump_inst.has_extended_arg and jump_inst.opname.startswith("JUMP"):
# Create comination of the jump-to instruction and
# this one. Keep the position information of this instruction,
# but the operator and operand properties come from the other
# instruction
self.insts[i] = Instruction(
jump_inst.opname,
jump_inst.opcode,
jump_inst.optype,
jump_inst.inst_size,
jump_inst.arg,
jump_inst.argval,
jump_inst.argrepr,
jump_inst.has_arg,
inst.offset,
inst.starts_line,
inst.is_jump_target,
inst.has_extended_arg,
)
# Get jump targets
# Format: {target offset: [jump offsets]}
jump_targets = self.find_jump_targets(show_asm)
# print("XXX2", jump_targets)
last_op_was_break = False
j = 0
for i, inst in enumerate(self.insts):
argval = inst.argval
op = inst.opcode
if inst.opname == "EXTENDED_ARG":
# FIXME: The EXTENDED_ARG is used to signal annotation
# parameters
if i + 1 < n and self.insts[i + 1].opcode != self.opc.MAKE_FUNCTION:
continue
if inst.offset in jump_targets:
jump_idx = 0
# We want to process COME_FROMs to the same offset to be in *descending*
# offset order so we have the larger range or biggest instruction interval
# last. (I think they are sorted in increasing order, but for safety
# we sort them). That way, specific COME_FROM tags will match up
# properly. For example, a "loop" with an "if" nested in it should have the
# "loop" tag last so the grammar rule matches that properly.
for jump_offset in sorted(jump_targets[inst.offset], reverse=True):
come_from_name = "COME_FROM"
opname = self.opname_for_offset(jump_offset)
if opname == "EXTENDED_ARG":
k = xdis.next_offset(op, self.opc, jump_offset)
opname = self.opname_for_offset(k)
if opname.startswith("SETUP_"):
come_from_type = opname[len("SETUP_") :]
come_from_name = "COME_FROM_%s" % come_from_type
pass
elif inst.offset in self.except_targets:
come_from_name = "COME_FROM_EXCEPT_CLAUSE"
j = tokens_append(
j,
Token(
come_from_name,
jump_offset,
repr(jump_offset),
offset="%s_%s" % (inst.offset, jump_idx),
has_arg=True,
opc=self.opc,
has_extended_arg=False,
),
)
jump_idx += 1
pass
pass
elif inst.offset in self.else_start:
end_offset = self.else_start[inst.offset]
j = tokens_append(
j,
Token(
"ELSE",
None,
repr(end_offset),
offset="%s" % (inst.offset),
has_arg=True,
opc=self.opc,
has_extended_arg=inst.has_extended_arg,
),
)
pass
pattr = inst.argrepr
opname = inst.opname
if op in self.opc.CONST_OPS:
const = argval
if iscode(const):
if const.co_name == "<lambda>":
assert opname == "LOAD_CONST"
opname = "LOAD_LAMBDA"
elif const.co_name == "<genexpr>":
opname = "LOAD_GENEXPR"
elif const.co_name == "<dictcomp>":
opname = "LOAD_DICTCOMP"
elif const.co_name == "<setcomp>":
opname = "LOAD_SETCOMP"
elif const.co_name == "<listcomp>":
opname = "LOAD_LISTCOMP"
else:
opname = "LOAD_CODE"
# verify() uses 'pattr' for comparison, since 'attr'
# now holds Code(const) and thus can not be used
# for comparison (todo: think about changing this)
# pattr = 'code_object @ 0x%x %s->%s' %\
# (id(const), const.co_filename, const.co_name)
pattr = "<code_object " + const.co_name + ">"
elif isinstance(const, str):
opname = "LOAD_STR"
else:
if isinstance(inst.arg, int) and inst.arg < len(co.co_consts):
argval, _ = _get_const_info(inst.arg, co.co_consts)
# Why don't we use _ above for "pattr" rather than "const"?
# This *is* a little hoaky, but we have to coordinate with
# other parts like n_LOAD_CONST in pysource.py for example.
pattr = const
pass
elif opname == "IMPORT_NAME":
if "." in inst.argval:
opname = "IMPORT_NAME_ATTR"
pass
elif opname in ("MAKE_FUNCTION", "MAKE_CLOSURE"):
flags = argval
opname = "MAKE_FUNCTION_%d" % (flags)
attr = []
for flag in self.MAKE_FUNCTION_FLAGS:
bit = flags & 1
attr.append(bit)
flags >>= 1
attr = attr[:4] # remove last value: attr[5] == False
j = tokens_append(
j,
Token(
opname=opname,
attr=attr,
pattr=pattr,
offset=inst.offset,
linestart=inst.starts_line,
op=op,
has_arg=inst.has_arg,
opc=self.opc,
has_extended_arg=inst.has_extended_arg,
),
)
continue
elif op in self.varargs_ops:
pos_args = argval
if self.is_pypy and not pos_args and opname == "BUILD_MAP":
opname = "BUILD_MAP_n"
else:
opname = "%s_%d" % (opname, pos_args)
elif self.is_pypy and opname == "JUMP_IF_NOT_DEBUG":
# The value in the dict is in special cases in semantic actions, such
# as JUMP_IF_NOT_DEBUG. The value is not used in these cases, so we put
# in arbitrary value 0.
customize[opname] = 0
elif opname == "UNPACK_EX":
# FIXME: try with scanner and parser by
# changing argval
before_args = argval & 0xFF
after_args = (argval >> 8) & 0xFF
pattr = "%d before vararg, %d after" % (before_args, after_args)
argval = (before_args, after_args)
opname = "%s_%d+%d" % (opname, before_args, after_args)
elif op == self.opc.JUMP_ABSOLUTE:
# Further classify JUMP_ABSOLUTE into backward jumps
# which are used in loops, and "CONTINUE" jumps which
# may appear in a "continue" statement. The loop-type
# and continue-type jumps will help us classify loop
# boundaries The continue-type jumps help us get
# "continue" statements with would otherwise be turned
# into a "pass" statement because JUMPs are sometimes
# ignored in rules as just boundary overhead. In
# comprehensions we might sometimes classify JUMP_BACK
# as CONTINUE, but that's okay since we add a grammar
# rule for that.
pattr = argval
target = self.get_target(inst.offset)
if target <= inst.offset:
next_opname = self.insts[i + 1].opname
# 'Continue's include jumps to loops that are not
# and the end of a block which follow with POP_BLOCK and COME_FROM_LOOP.
# If the JUMP_ABSOLUTE is to a FOR_ITER and it is followed by another JUMP_FORWARD
# then we'll take it as a "continue".
is_continue = (
self.insts[self.offset2inst_index[target]].opname == "FOR_ITER"
and self.insts[i + 1].opname == "JUMP_FORWARD"
)
if self.version < 3.8 and (
is_continue
or (
inst.offset in self.stmts
and (
inst.starts_line
and next_opname not in self.not_continue_follow
)
)
):
opname = "CONTINUE"
else:
opname = "JUMP_BACK"
# FIXME: this is a hack to catch stuff like:
# if x: continue
# the "continue" is not on a new line.
# There are other situations where we don't catch
# CONTINUE as well.
if tokens[-1].kind == "JUMP_BACK" and tokens[-1].attr <= argval:
if tokens[-2].kind == "BREAK_LOOP":
del tokens[-1]
else:
# intern is used because we are changing the *previous* token
tokens[-1].kind = sys.intern("CONTINUE")
if last_op_was_break and opname == "CONTINUE":
last_op_was_break = False
continue
elif inst.offset in self.load_asserts:
opname = "LOAD_ASSERT"
last_op_was_break = opname == "BREAK_LOOP"
j = tokens_append(
j,
Token(
opname=opname,
attr=argval,
pattr=pattr,
offset=inst.offset,
linestart=inst.starts_line,
op=op,
has_arg=inst.has_arg,
opc=self.opc,
has_extended_arg=inst.has_extended_arg,
),
)
pass
if show_asm in ("both", "after"):
for t in tokens:
print(t.format(line_prefix=""))
print()
return tokens, customize
def find_jump_targets(self, debug):
"""
Detect all offsets in a byte code which are jump targets
where we might insert a COME_FROM instruction.
Return the list of offsets.
Return the list of offsets. An instruction can be jumped
to in from multiple instructions.
"""
code = self.code
n = len(code)
self.structs = [{"type": "root", "start": 0, "end": n - 1}]
# All loop entry points
self.loops = []
# Map fixed jumps to their real destination
self.fixed_jumps = {}
self.except_targets = {}
self.ignore_if = set()
self.build_statement_indices()
self.else_start = {}
# Containers filled by detect_control_flow()
self.not_continue = set()
self.return_end_ifs = set()
self.setup_loop_targets = {} # target given setup_loop offset
self.setup_loops = {} # setup_loop offset given target
targets = {}
for i, inst in enumerate(self.insts):
offset = inst.offset
op = inst.opcode
# FIXME: this code is going to get removed.
# Determine structures and fix jumps in Python versions
# since 2.3
self.detect_control_flow(offset, targets, i)
if inst.has_arg:
label = self.fixed_jumps.get(offset)
oparg = inst.arg
if self.code[offset] == self.opc.EXTENDED_ARG:
j = xdis.next_offset(op, self.opc, offset)
next_offset = xdis.next_offset(op, self.opc, j)
else:
next_offset = xdis.next_offset(op, self.opc, offset)
if label is None:
if op in self.opc.hasjrel and op != self.opc.FOR_ITER:
label = next_offset + oparg
elif op in self.opc.hasjabs:
if op in self.jump_if_pop:
if oparg > offset:
label = oparg
if label is not None and label != -1:
targets[label] = targets.get(label, []) + [offset]
elif op == self.opc.END_FINALLY and offset in self.fixed_jumps:
label = self.fixed_jumps[offset]
targets[label] = targets.get(label, []) + [offset]
pass
pass # for loop
# DEBUG:
if debug in ("both", "after"):
import pprint as pp
pp.pprint(self.structs)
return targets
def build_statement_indices(self):
code = self.code
start = 0
end = codelen = len(code)
# Compose preliminary list of indices with statements,
# using plain statement opcodes
prelim = self.inst_matches(start, end, self.statement_opcodes)
# Initialize final container with statements with
# preliminary data
stmts = self.stmts = set(prelim)
# Same for opcode sequences
pass_stmts = set()
for sequence in self.statement_opcode_sequences:
for i in self.op_range(start, end - (len(sequence) + 1)):
match = True
for elem in sequence:
if elem != code[i]:
match = False
break
i += instruction_size(code[i], self.opc)
if match is True:
i = self.prev_op[i]
stmts.add(i)
pass_stmts.add(i)
# Initialize statement list with the full data we've gathered so far
if pass_stmts:
stmt_offset_list = list(stmts)
stmt_offset_list.sort()
else:
stmt_offset_list = prelim
# 'List-map' which contains offset of start of
# next statement, when op offset is passed as index
self.next_stmt = slist = []
last_stmt_offset = -1
i = 0
# Go through all statement offsets
for stmt_offset in stmt_offset_list:
# Process absolute jumps, but do not remove 'pass' statements
# from the set
if (
code[stmt_offset] == self.opc.JUMP_ABSOLUTE
and stmt_offset not in pass_stmts
):
# If absolute jump occurs in forward direction or it takes off from the
# same line as previous statement, this is not a statement
# FIXME: 0 isn't always correct
target = self.get_target(stmt_offset)
if (
target > stmt_offset
or self.lines[last_stmt_offset].l_no == self.lines[stmt_offset].l_no
):
stmts.remove(stmt_offset)
continue
# Rewing ops till we encounter non-JUMP_ABSOLUTE one
j = self.prev_op[stmt_offset]
while code[j] == self.opc.JUMP_ABSOLUTE:
j = self.prev_op[j]
# If we got here, then it's list comprehension which
# is not a statement too
if code[j] == self.opc.LIST_APPEND:
stmts.remove(stmt_offset)
continue
# Exclude ROT_TWO + POP_TOP
elif (
code[stmt_offset] == self.opc.POP_TOP
and code[self.prev_op[stmt_offset]] == self.opc.ROT_TWO
):
stmts.remove(stmt_offset)
continue
# Exclude FOR_ITER + designators
elif code[stmt_offset] in self.designator_ops:
j = self.prev_op[stmt_offset]
while code[j] in self.designator_ops:
j = self.prev_op[j]
if code[j] == self.opc.FOR_ITER:
stmts.remove(stmt_offset)
continue
# Add to list another list with offset of current statement,
# equal to length of previous statement
slist += [stmt_offset] * (stmt_offset - i)
last_stmt_offset = stmt_offset
i = stmt_offset
# Finish filling the list for last statement
slist += [codelen] * (codelen - len(slist))
def detect_control_flow(self, offset, targets, inst_index):
"""
Detect type of block structures and their boundaries to fix optimized jumps
in python2.3+
"""
code = self.code
inst = self.insts[inst_index]
op = inst.opcode
# Detect parent structure
parent = self.structs[0]
start = parent["start"]
end = parent["end"]
# Pick inner-most parent for our offset
for struct in self.structs:
current_start = struct["start"]
current_end = struct["end"]
if (current_start <= offset < current_end) and (
current_start >= start and current_end <= end
):
start = current_start
end = current_end
parent = struct
if self.version < 3.8 and op == self.opc.SETUP_LOOP:
# We categorize loop types: 'for', 'while', 'while 1' with
# possibly suffixes '-loop' and '-else'
# Try to find the jump_back instruction of the loop.
# It could be a return instruction.
start += inst.inst_size
target = self.get_target(offset)
end = self.restrict_to_parent(target, parent)
self.setup_loops[target] = offset
if target != end:
self.fixed_jumps[offset] = end
(line_no, next_line_byte) = self.lines[offset]
jump_back = self.last_instr(
start, end, self.opc.JUMP_ABSOLUTE, next_line_byte, False
)
if jump_back:
jump_forward_offset = xdis.next_offset(
code[jump_back], self.opc, jump_back
)
else:
jump_forward_offset = None
return_val_offset1 = self.prev[self.prev[end]]
if (
jump_back
and jump_back != self.prev_op[end]
and self.is_jump_forward(jump_forward_offset)
):
if code[self.prev_op[end]] == self.opc.RETURN_VALUE or (
code[self.prev_op[end]] == self.opc.POP_BLOCK
and code[return_val_offset1] == self.opc.RETURN_VALUE
):
jump_back = None
if not jump_back:
# loop suite ends in return
jump_back = self.last_instr(start, end, self.opc.RETURN_VALUE)
if not jump_back:
return
jb_inst = self.get_inst(jump_back)
jump_back = self.next_offset(jb_inst.opcode, jump_back)
if_offset = None
if code[self.prev_op[next_line_byte]] not in self.pop_jump_tf:
if_offset = self.prev[next_line_byte]
if if_offset:
loop_type = "while"
self.ignore_if.add(if_offset)
else:
loop_type = "for"
target = next_line_byte
end = xdis.next_offset(code[jump_back], self.opc, jump_back)
else:
if self.get_target(jump_back) >= next_line_byte:
jump_back = self.last_instr(
start, end, self.opc.JUMP_ABSOLUTE, start, False
)
jb_inst = self.get_inst(jump_back)
jb_next_offset = self.next_offset(jb_inst.opcode, jump_back)
if end > jb_next_offset and self.is_jump_forward(end):
if self.is_jump_forward(jb_next_offset):
if self.get_target(jb_next_offset) == self.get_target(end):
self.fixed_jumps[offset] = jb_next_offset
end = jb_next_offset
elif target < offset:
self.fixed_jumps[offset] = jb_next_offset
end = jb_next_offset
target = self.get_target(jump_back)
if code[target] in (self.opc.FOR_ITER, self.opc.GET_ITER):
loop_type = "for"
else:
loop_type = "while"
test = self.prev_op[next_line_byte]
if test == offset:
loop_type = "while 1"
elif self.code[test] in self.opc.JUMP_OPs:
self.ignore_if.add(test)
test_target = self.get_target(test)
if test_target > (jump_back + 3):
jump_back = test_target
self.not_continue.add(jump_back)
self.loops.append(target)
self.structs.append(
{"type": loop_type + "-loop", "start": target, "end": jump_back}
)
after_jump_offset = xdis.next_offset(code[jump_back], self.opc, jump_back)
if after_jump_offset != end:
self.structs.append(
{
"type": loop_type + "-else",
"start": after_jump_offset,
"end": end,
}
)
elif op in self.pop_jump_tf:
target = inst.argval
self.fixed_jumps[offset] = target
elif self.version < 3.8 and op == self.opc.SETUP_EXCEPT:
target = self.get_target(offset)
end = self.restrict_to_parent(target, parent)
self.fixed_jumps[offset] = end
elif op == self.opc.POP_EXCEPT:
next_offset = xdis.next_offset(op, self.opc, offset)
target = self.get_target(next_offset)
if target > next_offset:
next_op = code[next_offset]
if (
self.opc.JUMP_ABSOLUTE == next_op
and self.opc.END_FINALLY
!= code[xdis.next_offset(next_op, self.opc, next_offset)]
):
self.fixed_jumps[next_offset] = target
self.except_targets[target] = next_offset
elif op == self.opc.SETUP_FINALLY:
target = self.get_target(offset)
end = self.restrict_to_parent(target, parent)
self.fixed_jumps[offset] = end
elif op in self.jump_if_pop:
target = self.get_target(offset)
if target > offset:
unop_target = self.last_instr(
offset, target, self.opc.JUMP_FORWARD, target
)
if unop_target and code[unop_target + 3] != self.opc.ROT_TWO:
self.fixed_jumps[offset] = unop_target
else:
self.fixed_jumps[offset] = self.restrict_to_parent(target, parent)
pass
pass
else:
# 3.5+ has Jump optimization which too often causes RETURN_VALUE to get
# misclassified as RETURN_END_IF. Handle that here.
# In RETURN_VALUE, JUMP_ABSOLUTE, RETURN_VALUE is never RETURN_END_IF
if op == self.opc.RETURN_VALUE:
next_offset = xdis.next_offset(op, self.opc, offset)
if next_offset < len(code) and (
code[next_offset] == self.opc.JUMP_ABSOLUTE
and offset in self.return_end_ifs
):
self.return_end_ifs.remove(offset)
pass
pass
elif op == self.opc.JUMP_FORWARD:
# If we have:
# JUMP_FORWARD x, [non-jump, insns], RETURN_VALUE, x:
# then RETURN_VALUE is not RETURN_END_IF
rtarget = self.get_target(offset)
rtarget_prev = self.prev[rtarget]
if (
code[rtarget_prev] == self.opc.RETURN_VALUE
and rtarget_prev in self.return_end_ifs
):
i = rtarget_prev
while i != offset:
if code[i] in [op3.JUMP_FORWARD, op3.JUMP_ABSOLUTE]:
return
i = self.prev[i]
self.return_end_ifs.remove(rtarget_prev)
pass
return
def is_jump_back(self, offset, extended_arg):
"""
Return True if the code at offset is some sort of jump back.
That is, it is ether "JUMP_FORWARD" or an absolute jump that
goes forward.
"""
if self.code[offset] != self.opc.JUMP_ABSOLUTE:
return False
return offset > self.get_target(offset, extended_arg)
def next_except_jump(self, start):
"""
Return the next jump that was generated by an except SomeException:
construct in a try...except...else clause or None if not found.
"""
if self.code[start] == self.opc.DUP_TOP:
except_match = self.first_instr(
start, len(self.code), self.opc.POP_JUMP_IF_FALSE
)
if except_match:
jmp = self.prev_op[self.get_target(except_match)]
self.ignore_if.add(except_match)
self.not_continue.add(jmp)
return jmp
count_END_FINALLY = 0
count_SETUP_ = 0
for i in self.op_range(start, len(self.code)):
op = self.code[i]
if op == self.opc.END_FINALLY:
if count_END_FINALLY == count_SETUP_:
assert self.code[self.prev_op[i]] in frozenset(
[
self.opc.JUMP_ABSOLUTE,
self.opc.JUMP_FORWARD,
self.opc.RETURN_VALUE,
]
)
self.not_continue.add(self.prev_op[i])
return self.prev_op[i]
count_END_FINALLY += 1
elif op in self.setup_opts_no_loop:
count_SETUP_ += 1
if __name__ == "__main__":
from uncompyle6 import PYTHON_VERSION
if PYTHON_VERSION >= 3.7:
import inspect
co = inspect.currentframe().f_code
from uncompyle6 import PYTHON_VERSION
tokens, customize = Scanner37Base(PYTHON_VERSION).ingest(co)
for t in tokens:
print(t)
else:
print(
"Need to be Python 3.7 or greater to demo; I am version {PYTHON_VERSION}."
% PYTHON_VERSION
)
pass
| |
#!/usr/bin/env python
'''
left elbow is in upper arm controller
Created March, 2012
@author: Peter Heim
r_shoulder.py - gateway to Arduino based arm controller
Copyright (c) 2011 Peter Heim. All right reserved.
Borrowed heavily from Mike Feguson's ArbotiX base_controller.py code.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Vanadium Labs LLC nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import rospy
import tf
import math
from math import sin, cos, pi, radians, degrees
import sys
import time
from std_msgs.msg import String
from std_msgs.msg import Float64, Float32
from dynamixel_msgs.msg import MotorState
from dynamixel_msgs.msg import JointState
#from sensor_msgs.msg import JointState
from SerialDataGateway import SerialDataGateway
class R_shoulder(object):
'''
Helper class for communicating with an R_shoulder board over serial port
'''
def _HandleReceivedLine(self, line):
self._Counter = self._Counter + 1
#rospy.logwarn(str(self._Counter) + " " + line)
#if (self._Counter % 50 == 0):
self._SerialPublisher.publish(String(str(self._Counter) + ", in: " + line))
if (len(line) > 0):
lineParts = line.split('\t')
if (lineParts[0] == 'p5'):
self._BroadcastJointStateinfo_P5(lineParts)
return
if (lineParts[0] == 'p6'):
self._BroadcastJointStateinfo_P6(lineParts)
return
if (lineParts[0] == 'p7'):
self._BroadcastJointStateinfo_P7(lineParts)
return
if (lineParts[0] == 'p8'):
self._BroadcastJointStateinfo_P8(lineParts)
return
if (lineParts[0] == 'p9'):
self._BroadcastJointStateinfo_P9(lineParts)
return
def _BroadcastJointStateinfo_P5(self, lineParts):
partsCount = len(lineParts)
#rospy.logwarn(partsCount)
if (partsCount < 7):
pass
try:
P1 = 0-((radians((float(lineParts[1])))/10)-0.65)
P2 = self.right_rotate #0-((float(lineParts[2])* 0.00174532925)-1.57)
P3 = float(lineParts[3])
P4 = 0
val = [P1, P2, P3, P4]
Motor_State = MotorState()
Motor_State.id = 11
Motor_State.goal = P2
Motor_State.position = P1
Motor_State.speed = P4
Motor_State.load = P3
Motor_State.moving = 0
Motor_State.timestamp = time.time()
self.P5_MotorPublisher.publish(Motor_State)
#rospy.logwarn(Motor_State)
self._right_rotate_Publisher.publish(P1)
Joint_State = JointState()
Joint_State.name = "right_arm_rotate_joint"
Joint_State.goal_pos = P2
Joint_State.current_pos = P1
Joint_State.velocity = P4
Joint_State.load = P3
Joint_State.error = P1 - P2
Joint_State.is_moving = 0
Joint_State.header.stamp = rospy.Time.now()
self._P5_JointPublisher.publish(Joint_State)
#rospy.logwarn(val)
except:
rospy.logwarn("Unexpected error:right_arm_rotate_joint" + str(sys.exc_info()[0]))
def _BroadcastJointStateinfo_P6(self, lineParts):
partsCount = len(lineParts)
#rospy.logwarn(partsCount)
if (partsCount < 7):
pass
try:
P1 = (radians((float(lineParts[1])))/10)-2.08
P2 = self.left_rotate #0-((float(lineParts[2])* 0.00174532925)-1.57)
P3 = float(lineParts[3])
P4 = 0
val = [P1, P2, P3, P4]
Motor_State = MotorState()
Motor_State.id = 11
Motor_State.goal = P2
Motor_State.position = P1
Motor_State.speed = P4
Motor_State.load = P3
Motor_State.moving = 0
Motor_State.timestamp = time.time()
self.P6_MotorPublisher.publish(Motor_State)
self._left_rotate_Publisher.publish(P1)
Joint_State = JointState()
Joint_State.name = "left_arm_rotate_joint"
Joint_State.goal_pos = P2
Joint_State.current_pos = P1
Joint_State.velocity = P4
Joint_State.load = P3
Joint_State.error = P1 - P2
Joint_State.is_moving = 0
Joint_State.header.stamp = rospy.Time.now()
self._P6_JointPublisher.publish(Joint_State)
#rospy.logwarn(val)
except:
rospy.logwarn("Unexpected error:left_arm_rotate_joint" + str(sys.exc_info()[0]))
def _BroadcastJointStateinfo_P7(self, lineParts):
partsCount = len(lineParts)
#rospy.logwarn(partsCount)
if (partsCount < 7):
pass
try:
#P1 = 0-(radians(float(lineParts[1])))/10
P1 = 0 - (radians((float(lineParts[1])))/10)+1.57
P2 = self.right_elbow #0-((float(lineParts[2])* 0.00174532925)-0.67)
P3 = float(lineParts[3])
P4 = 0
val = [P1, P2, P3, P4]
Motor_State = MotorState()
Motor_State.id = 11
Motor_State.goal = P2
Motor_State.position = P1
Motor_State.speed = P4
Motor_State.load = P3
Motor_State.moving = 0
Motor_State.timestamp = time.time()
self.P7_MotorPublisher.publish(Motor_State)
self._right_elbow_Publisher.publish(P1)
Joint_State = JointState()
Joint_State.name = "right_arm_elbow_joint"
Joint_State.goal_pos = P2
Joint_State.current_pos = P1
Joint_State.velocity = P4
Joint_State.load = P3
Joint_State.error = P1 - P2
Joint_State.is_moving = 0
Joint_State.header.stamp = rospy.Time.now()
self._P7_JointPublisher.publish(Joint_State)
#rospy.logwarn(val)
except:
rospy.logwarn("Unexpected error:right_arm_elbow_joint" + str(sys.exc_info()[0]))
def _BroadcastJointStateinfo_P8(self, lineParts):
partsCount = len(lineParts)
#rospy.logwarn(partsCount)
if (partsCount < 7):
pass
try:
P1 = 0-((float(lineParts[1])* 0.00174532925)-1.57)
P2 = 0-((float(lineParts[2])* 0.00174532925)-1.57)
P3 = float(lineParts[3])
P4 = 0
val = [P1, P2, P3, P4]
Motor_State = MotorState()
Motor_State.id = 11
Motor_State.goal = P2
Motor_State.position = P1
Motor_State.speed = P4
Motor_State.load = P3
Motor_State.moving = 0
Motor_State.timestamp = time.time()
self.P8_MotorPublisher.publish(Motor_State)
self._left_elbow_Publisher.publish(P1)
Joint_State = JointState()
Joint_State.name = "right_arm_elbow_joint"
Joint_State.goal_pos = P2
Joint_State.current_pos = P1
Joint_State.velocity = P4
Joint_State.load = P3
Joint_State.error = P1 - P2
Joint_State.is_moving = 0
Joint_State.header.stamp = rospy.Time.now()
self._P8_JointPublisher.publish(Joint_State)
#rospy.logwarn(val)
except:
rospy.logwarn("Unexpected error:right_arm_elbow_joint" + str(sys.exc_info()[0]))
def _BroadcastJointStateinfo_P9(self, lineParts):
partsCount = len(lineParts)
#rospy.logwarn(partsCount)
if (partsCount < 7):
pass
try:
P1 = 1.57#0-((float(lineParts[1])* 0.00174532925)-1.57)
P2 = 1.57#0-((float(lineParts[2])* 0.00174532925)-1.57)
P3 = 0#float(lineParts[3])
P4 = 0
val = [P1, P2, P3, P4]
Motor_State = MotorState()
Motor_State.id = 11
Motor_State.goal = P2
Motor_State.position = P1
Motor_State.speed = P4
Motor_State.load = P3
Motor_State.moving = 0
Motor_State.timestamp = time.time()
self.P9_MotorPublisher.publish(Motor_State)
self._pan_jount_Publisher.publish(P1)
Joint_State = JointState()
Joint_State.name = "pan_joint"
Joint_State.goal_pos = P2
Joint_State.current_pos = P1
Joint_State.velocity = P4
Joint_State.load = P3
Joint_State.error = P1 - P2
Joint_State.is_moving = 0
Joint_State.header.stamp = rospy.Time.now()
self._P9_JointPublisher.publish(Joint_State)
#rospy.logwarn(val)
except:
rospy.logwarn("Unexpected error:pan_joint" + str(sys.exc_info()[0]))
def _WriteSerial(self, message):
self._SerialPublisher.publish(String(str(self._Counter) + ", out: " + message))
self._SerialDataGateway.Write(message)
def __init__(self,):
'''
Initializes the receiver class.
port: The serial port to listen to.
baudrate: Baud rate for the serial communication
'''
#port = rospy.get_param("~port", "/dev/ttyACM0")
#baud = int(rospy.get_param("~baud", "115200"))
#self.name = name
self.rate = rospy.get_param("~rate", 100.0)
self.fake = rospy.get_param("~sim", False)
self.cal_pan = rospy.get_param("~cal_pan", 0)
self.cal_tilt = rospy.get_param("~cal_tilt", 0)
self.cal_lift = rospy.get_param("~cal_lift", 0)
self.cal_rotate = rospy.get_param("~cal_rotate", 0)
self.cal_elbow = rospy.get_param("~cal_elbow", 0)
self.right_rotate = 0
self.left_rotate = 0
self.right_elbow = 0
#name = rospy.get_param("~name")
self._Counter = 0
rospy.init_node('lower_arms')
port = rospy.get_param("~port", "/dev/ttyACM1")
baudRate = int(rospy.get_param("~baudRate", 115200))
rospy.logwarn("Starting lower arms with serial port: " + port + ", baud rate: " + str(baudRate))
# subscriptions
rospy.Subscriber('right_arm_rotate_joint/command',Float64, self._HandleJoint_5_Command)
rospy.Subscriber('left_arm_rotate_joint/command',Float64, self._HandleJoint_6_Command)
rospy.Subscriber('right_arm_elbow_joint/command',Float64, self._HandleJoint_7_Command)
rospy.Subscriber('pan_joint/command',Float64, self._HandleJoint_8_Command)
self._SerialPublisher = rospy.Publisher('arm_lower', String, queue_size=5)
self.P5_MotorPublisher = rospy.Publisher("/right_arm_rotate/motor_state", MotorState, queue_size=5)
self.P6_MotorPublisher = rospy.Publisher("/left_arm_rotate/motor_state", MotorState, queue_size=5)
self.P7_MotorPublisher = rospy.Publisher("/right_arm_elbow/motor_state", MotorState, queue_size=5)
#self.P8_MotorPublisher = rospy.Publisher("/left_arm_elbow/motor_state", MotorState, queue_size=5)
self.P9_MotorPublisher = rospy.Publisher("/pan/motor_state", MotorState, queue_size=5)
self._P5_JointPublisher = rospy.Publisher("/right_arm_rotate_joint/state", JointState, queue_size=5)
self._P6_JointPublisher = rospy.Publisher("/left_arm_rotate_joint/state", JointState, queue_size=5)
self._P7_JointPublisher = rospy.Publisher("/right_arm_elbow_joint/state", JointState, queue_size=5)
#self._P8_JointPublisher = rospy.Publisher("/left_arm_elbow_joint/state", JointState, queue_size=5)
self._P9_JointPublisher = rospy.Publisher("/pan_joint/state", JointState, queue_size=5)
self._right_rotate_Publisher = rospy.Publisher("right_rotate", Float32, queue_size=5)
self._right_elbow_Publisher = rospy.Publisher("right_elbow", Float32, queue_size=5)
self._left_rotate_Publisher = rospy.Publisher("left_rotate", Float32, queue_size=5)
#self._left_elbow_Publisher = rospy.Publisher("left_elbow", Float32, queue_size=5)
self._left_rotate_Publisher = rospy.Publisher("pan", Float32, queue_size=5)
self._SerialDataGateway = SerialDataGateway(port, baudRate, self._HandleReceivedLine)
def Start(self):
rospy.loginfo("Starting start function")
self._SerialDataGateway.Start()
message = 'r \r'
self._WriteSerial(message)
def Stop(self):
rospy.loginfo("Stopping")
message = 'r \r'
self._WriteSerial(message)
sleep(5)
self._SerialDataGateway.Stop()
def _HandleJoint_5_Command(self, Command):
""" Handle movement requests.
right_arm_rotate_joint
send message in degrees * 10
"""
v = Command.data # angel request in radians
self.right_rotate = v
v1 =int(1023 -((v + 4.5) * 195.3786081396))#convert encoder value
if v1 < 70: v1 = 100 #degrees * 10
if v1 > 1000: v1 = 1000 #degrees * 10
message = 'j5 %d \r' % (v1)#% self._GetBaseAndExponents((v1)
rospy.logwarn("Sending right_arm_rotate_joint command: " + (message))
self._WriteSerial(message)
def _HandleJoint_6_Command(self, Command):
""" Handle movement requests.
left_arm_rotate_joint
send message in degrees * 10
"""
v = Command.data # angel request in radians
self.left_rotate = v
v1 =int((degrees(v))*10)+200 #(1023 -((v + 4.5) * 195.3786081396))#convert encoder value
#if v1 < 100: v1 = 100 #degrees * 10
#if v1 > 1000: v1 = 1000 #degrees * 10
message = 'j7 %d \r' % (v1)#% self._GetBaseAndExponents((v1)
rospy.logwarn("Sending left_arm_rotate_joint command : " + (message))
self._WriteSerial(message)
def _HandleJoint_7_Command(self, Command):
""" Handle movement requests.
right_arm_elbow_joint
send message in degrees * 10
"""
v = Command.data # angel request in radians
self.right_elbow = v
v1 =int(1023 -((v + 2.6) * 195.3786081396))#convert encoder value
if v1 < 100: v1 = 100 #degrees * 10
if v1 > 1000: v1 = 1000 #degrees * 10
message = 'j6 %d \r' % (v1)#% self._GetBaseAndExponents((v1)
rospy.logwarn("Sending right_arm_elbow_joint command: " + (message))
self._WriteSerial(message)
def _HandleJoint_8_Command(self, Command):
""" Handle movement requests.
pan_joint
send message in degrees * 10
"""
v = Command.data # angel request in radians
v1 =int(1023 -((v + 2.6) * 195.3786081396))#convert encoder value
if v1 < 100: v1 = 100 #degrees * 10
if v1 > 1000: v1 = 1000 #degrees * 10
message = 'j8 %d \r' % (v1)#% self._GetBaseAndExponents((v1)
rospy.logwarn("Sending pan_joint command: " + (message))
self._WriteSerial(message)
def _GetBaseAndExponent(self, floatValue, resolution=4):
'''
Converts a float into a tuple holding two integers:
The base, an integer with the number of digits equaling resolution.
The exponent indicating what the base needs to multiplied with to get
back the original float value with the specified resolution.
'''
if (floatValue == 0.0):
return (0, 0)
else:
exponent = int(1.0 + math.log10(abs(floatValue)))
multiplier = math.pow(10, resolution - exponent)
base = int(floatValue * multiplier)
return(base, exponent - resolution)
def _GetBaseAndExponents(self, floatValues, resolution=4):
'''
Converts a list or tuple of floats into a tuple holding two integers for each float:
The base, an integer with the number of digits equaling resolution.
The exponent indicating what the base needs to multiplied with to get
back the original float value with the specified resolution.
'''
baseAndExponents = []
for floatValue in floatValues:
baseAndExponent = self._GetBaseAndExponent(floatValue)
baseAndExponents.append(baseAndExponent[0])
baseAndExponents.append(baseAndExponent[1])
return tuple(baseAndExponents)
if __name__ == '__main__':
r_shoulder = R_shoulder()
try:
r_shoulder.Start()
rospy.spin()
except rospy.ROSInterruptException:
r_shoulder.Stop()
| |
#!/usr/bin/env python
import sys
import os
import json
import argparse
import subprocess
import logging
import logging.handlers
import time
import re
import requests
import tenacity
from pexpect import pxssh
import pexpect
from aeon.opx.device import Device
from paramiko import AuthenticationException
from paramiko.ssh_exception import NoValidConnectionsError
from aeon.exceptions import LoginNotReadyError
_DEFAULTS = {
'init-delay': 5,
'reload-delay': 10 * 60,
}
# ##### -----------------------------------------------------------------------
# #####
# ##### Command Line Arguments
# #####
# ##### -----------------------------------------------------------------------
def cli_parse(cmdargs=None):
psr = argparse.ArgumentParser(
prog='opx_bootstrap',
description="Aeon-ZTP bootstrapper for OPX",
add_help=True)
psr.add_argument(
'--target', required=True,
help='hostname or ip_addr of target device')
psr.add_argument(
'--server', required=True,
help='Aeon-ZTP host:port')
psr.add_argument(
'--topdir', required=True,
help='Aeon-ZTP install directory')
psr.add_argument(
'--logfile',
help='name of log file')
psr.add_argument(
'--reload-delay',
type=int, default=_DEFAULTS['reload-delay'],
help="about of time/s to try to reconnect to device after reload")
psr.add_argument(
'--init-delay',
type=int, default=_DEFAULTS['init-delay'],
help="amount of time/s to wait before starting the bootstrap process")
# ##### -------------------------
# ##### authentication
# ##### -------------------------
group = psr.add_argument_group('authentication')
group.add_argument(
'--user', help='login user-name')
group.add_argument(
'-U', '--env-user',
help='Username environment variable')
group.add_argument(
'-P', '--env-passwd',
required=True,
help='Passwd environment variable')
return psr.parse_args(cmdargs)
class OpxBootstrap(object):
def __init__(self, server, cli_args):
self.server = server
self.cli_args = cli_args
self.target = self.cli_args.target
self.os_name = 'opx'
self.progname = '%s-bootstrap' % self.os_name
self.logfile = self.cli_args.logfile
self.log = self.setup_logging(logname=self.progname, logfile=self.logfile)
self.user, self.passwd = self.get_user_and_passwd()
self.image_name = None
self.finally_script = None
self.dev = None
def setup_logging(self, logname, logfile=None):
log = logging.getLogger(name=logname)
log.setLevel(logging.INFO)
fmt = logging.Formatter(
'%(name)s %(levelname)s {target}: %(message)s'
.format(target=self.target))
if logfile:
handler = logging.FileHandler(self.logfile)
else:
handler = logging.handlers.SysLogHandler(address='/dev/log')
handler.setFormatter(fmt)
log.addHandler(handler)
return log
def get_ssh_session(self, user=None, password=None, onie=False, sudo=False):
ssh = pxssh.pxssh(options={"StrictHostKeyChecking": "no", "UserKnownHostsFile": "/dev/null"})
# Uncomment for debugging when running opx_bootstrap.py from bash
# ssh.logfile = sys.stdout
if password:
ssh.login(self.target, user, password=password, auto_prompt_reset=not onie)
else:
ssh.login(self.target, user, auto_prompt_reset=not onie)
if onie:
ssh.PROMPT = 'ONIE:.*#'
if sudo:
rootprompt = re.compile('root@.*[#]')
ssh.sendline('sudo -s')
i = ssh.expect([rootprompt, 'assword.*: '])
if i == 0:
# Password not required
pass
elif i == 1:
# Sending sudo password
ssh.sendline(self.passwd)
j = ssh.expect([rootprompt, 'Sorry, try again'])
if j == 0:
pass
elif j == 1:
errmsg = 'Bad sudo password.'
self.exit_results(results=dict(
ok=False,
error_type='install',
message=errmsg))
else:
errmsg = 'Unable to obtain root privileges.'
self.exit_results(results=dict(
ok=False,
error_type='install',
message=errmsg))
ssh.set_unique_prompt()
ssh.sendline('whoami')
ssh.expect('root')
self.log.info('Logged in as root')
ssh.sendline('\n')
ssh.prompt()
return ssh
# ##### -----------------------------------------------------------------------
# #####
# ##### REST API functions
# #####
# ##### -----------------------------------------------------------------------
def post_device_facts(self):
facts = self.dev.facts
facts['ip_addr'] = self.dev.target
facts = json.dumps(facts)
dev_data = dict(
ip_addr=self.dev.target,
serial_number=self.dev.facts['serial_number'],
hw_model=self.dev.facts['hw_model'],
os_version=self.dev.facts['os_version'],
os_name=self.os_name,
facts=facts)
dev_data['image_name'] = self.image_name
dev_data['finally_script'] = self.finally_script
requests.put(url='http://%s/api/devices/facts' % self.server, json=dev_data)
def post_device_status(self, message=None, state=None):
if not (self.dev or self.target):
self.log.error('Either dev or target is required to post device status. Message was: {}'.format(message))
return
requests.put(
url='http://%s/api/devices/status' % self.server,
json=dict(
os_name=self.os_name,
ip_addr=self.target or self.dev.target,
state=state, message=message))
# ##### -----------------------------------------------------------------------
# #####
# ##### Utility Functions
# #####
# ##### -----------------------------------------------------------------------
def exit_results(self, results, exit_error=None):
if results['ok']:
msg = 'bootstrap completed OK'
self.post_device_status(message=msg, state='DONE')
self.log.info(msg)
sys.exit(0)
else:
msg = results['message']
self.post_device_status(message=msg, state='FAILED')
self.log.error(msg)
sys.exit(exit_error or 1)
def get_user_and_passwd(self):
user = self.cli_args.user or os.getenv(self.cli_args.env_user)
passwd = os.getenv(self.cli_args.env_passwd)
if not user:
errmsg = "login user-name missing"
self.log.error(errmsg)
self.exit_results(results=dict(
ok=False,
error_type='login',
message=errmsg))
if not passwd:
errmsg = "login user-password missing"
self.log.error(errmsg)
self.exit_results(results=dict(
ok=False,
error_type='login',
message=errmsg))
return user, passwd
def wait_for_device(self, countdown, poll_delay, msg=None):
dev = None
# first we need to wait for the device to be 'reachable' via the API.
# we'll use the probe error to detect if it is or not
while not dev:
new_msg = msg or 'Waiting for device access via SSH. Timeout remaining: {} seconds'.format(countdown)
self.post_device_status(message=new_msg, state='AWAIT-ONLINE')
self.log.info(new_msg)
try:
dev = Device(self.target, user=self.user, passwd=self.passwd,
timeout=poll_delay)
except AuthenticationException as e:
self.log.info('Authentication exception reported: {} \n args: {}'.format(e, e.args))
self.exit_results(results=dict(
ok=False,
error_type='login',
message='Unauthorized - check user/password'))
except NoValidConnectionsError as e:
countdown -= poll_delay
if countdown <= 0:
self.exit_results(results=dict(
ok=False,
error_type='login',
message='Failed to connect to target %s within reload countdown' % self.target))
except LoginNotReadyError as e:
countdown -= poll_delay
if countdown <= 0:
self.exit_results(results=dict(
ok=False,
error_type='login',
message='Failed to connect to target %s within reload countdown' % self.target))
time.sleep(poll_delay)
self.dev = dev
self.post_device_facts()
def wait_for_onie_rescue(self, countdown, poll_delay, user='root'):
"""Polls for SSH access to OPX device in ONIE rescue mode.
Args:
countdown (int): Countdown in seconds to wait for device to become reachable.
poll_delay (int): Countdown in seconds between poll attempts.
user (str): SSH username to use. Defaults to 'root'.
"""
while countdown >= 0:
try:
msg = 'OPX installation in progress. Waiting for ONIE rescue mode. Timeout remaining: {} seconds'.format(
countdown)
self.post_device_status(message=msg, state='AWAIT-ONLINE')
self.log.info(msg)
ssh = pxssh.pxssh(options={"StrictHostKeyChecking": "no", "UserKnownHostsFile": "/dev/null"})
ssh.login(self.target, user, auto_prompt_reset=False)
ssh.PROMPT = 'ONIE:.*#'
ssh.sendline('\n')
ssh.prompt()
return True
except (pexpect.pxssh.ExceptionPxssh, pexpect.exceptions.EOF) as e:
if (str(e) == 'Could not establish connection to host') or isinstance(e, pexpect.exceptions.EOF):
countdown -= poll_delay
time.sleep(poll_delay)
else:
self.log.error('Error accessing {} in ONIE rescue mode: {}.'.format(self.target, str(e)))
self.exit_results(results=dict(
ok=False,
error_type='login',
message='Error accessing {} in ONIE rescue mode: {}.'.format(self.target, str(e))))
else:
self.log.error('Device {} not reachable in ONIE rescue mode within reload countdown.'.format(self.target))
self.exit_results(results=dict(
ok=False,
error_type='login',
message='Device {} not reachable in ONIE rescue mode within reload countdown.'.format(self.target)))
# ##### -----------------------------------------------------------------------
# #####
# ##### OS install process
# #####
# ##### -----------------------------------------------------------------------
def check_os_install_and_finally(self):
profile_dir = os.path.join(self.cli_args.topdir, 'etc', 'profiles', self.os_name)
conf_fpath = os.path.join(profile_dir, 'os-selector.cfg')
cmd = "{topdir}/bin/aztp_os_selector.py -j '{dev_json}' -c {config}".format(
topdir=self.cli_args.topdir,
dev_json=json.dumps(self.dev.facts),
config=conf_fpath)
self.log.info('os-select: [%s]' % cmd)
child = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
_stdout, _stderr = child.communicate()
self.log.info('os-select rc={}, stdout={}'.format(child.returncode, _stdout))
self.log.info('os-select stderr={}'.format(_stderr))
try:
results = json.loads(_stdout)
image_name = results.get('image_name', None)
finally_script = results.get('finally', None)
self.image_name = image_name
self.finally_script = finally_script
self.post_device_facts()
return results
except Exception as exc:
errmsg = 'Unable to load os-select output as JSON: {}\n {}'.format(_stdout, str(exc))
self.exit_results(results=dict(
ok=False,
error_type='install',
message=errmsg
), exit_error=errmsg)
# Cannot mock out retry decorator in unittest.
# Retry wrapper function around do_onie_install to avoid long unittest times.
@tenacity.retry(wait=tenacity.wait_fixed(15000), stop=tenacity.stop_after_attempt(3))
def onie_install(self, *args, **kwargs):
self.do_onie_install(**kwargs)
def do_onie_install(self, user='root'):
"""Initiates install in ONIE-RESCUE mode.
Args:
dev (Device object): OPX device object
user (str): ONIE rescue mode user
"""
msg = 'Beginning OPX download and installation.'
self.post_device_status(message=msg, state='ONIE-RESCUE')
self.log.info(msg)
ssh = self.get_ssh_session(user=user, onie=True)
def start_installation():
# Start installation process
ssh.sendline('onie-nos-install http://{server}/images/{os_name}/{image_name}'
.format(server=self.cli_args.server, os_name=self.os_name, image_name=self.image_name))
# 'installer' means that the download has started
ssh.expect('installer', timeout=30)
msg = 'OPX image download has started. Will timeout if not completed within 10 minutes.'
self.log.info(msg)
self.post_device_status(message=msg, state='OS-INSTALL')
check_install_status()
msg = 'OPX download complete. Executing installer. Will timeout if not completed within 20 minutes.'
self.log.info(msg)
self.post_device_status(message=msg, state='OS-INSTALL')
# Indicates that the image has been downloaded and verified
ssh.expect('Installation finished. No error reported.', timeout=20 * 60)
ssh.prompt()
ssh.sendline('reboot')
msg = 'OPX download completed and verified, reboot initiated.'
self.log.info(msg)
self.post_device_status(message=msg, state='OS-INSTALL')
ssh.close()
@tenacity.retry(wait=tenacity.wait_fixed(5),
stop=tenacity.stop_after_delay(10 * 60),
retry=tenacity.retry_if_exception(pexpect.exceptions.TIMEOUT))
def check_install_status():
"""
Check to see that either the install has started, or that the download has timed out.
:return:
"""
# 'Executing installer' means that the download has finished
i = ssh.expect(['Verifying image checksum...OK', 'download timed out'], timeout=5)
if i == 0:
pass
if i == 1:
msg = 'Download timed out: http://{server}/images/{os_name}/{image_name}'.format(
server=self.cli_args.server, os_name=self.os_name, image_name=self.image_name)
self.log.info(msg)
self.exit_results(results=dict(ok=False, error_type='install', message=msg))
try:
start_installation()
except pxssh.ExceptionPxssh as e:
self.log.info(str(e))
self.exit_results(results=dict(ok=False, error_type='install', message=str(e)))
def install_os(self):
vendor_dir = os.path.join(self.cli_args.topdir, 'vendor_images', self.os_name)
image_fpath = os.path.join(vendor_dir, self.image_name)
if not os.path.exists(image_fpath):
errmsg = 'Image file does not exist: %s' % image_fpath
self.log.error(errmsg)
self.exit_results(results=dict(
ok=False, error_type='install',
message=errmsg))
msg = 'Installing OPX image=[%s] ... this can take up to 30 min.' % self.image_name
self.log.info(msg)
self.post_device_status(message=msg, state='OS-INSTALL')
try:
ssh = self.get_ssh_session(user=self.user, password=self.passwd, sudo=True)
ssh.sendline('grub-reboot --boot-directory=/mnt/boot ONIE')
ssh.prompt()
ssh.sendline('/mnt/onie-boot/onie/tools/bin/onie-boot-mode -o rescue')
ssh.prompt()
ssh.sendline('reboot')
except pxssh.ExceptionPxssh as e:
self.log.info(str(e))
self.exit_results(results=dict(ok=False, error_type='install', message=str(e)))
msg = 'Booting into ONIE rescue mode to install OS: %s' % self.image_name
self.log.info(msg)
self.post_device_status(message=msg, state='OS-INSTALL')
time.sleep(60)
# Wait for ONIE rescue mode
self.wait_for_onie_rescue(countdown=300, poll_delay=10, user='root')
# Download and verify OS
self.onie_install()
# Wait for onie-rescue shell to terminate
time.sleep(60)
# Wait for device to come back online after OS install
self.wait_for_device(countdown=10 * 60, poll_delay=30)
def ensure_os_version(self):
self.check_os_install_and_finally()
if not self.image_name:
self.log.info('no software install required')
return self.dev
self.log.info('software image install required: %s' % self.image_name)
self.install_os()
self.log.info('software install OK')
# ##### -----------------------------------------------------------------------
# #####
# ##### !!! MAIN !!!
# #####
# ##### -----------------------------------------------------------------------
def main():
cli_args = cli_parse()
self_server = cli_args.server
opxboot = OpxBootstrap(self_server, cli_args)
if not os.path.isdir(cli_args.topdir):
opxboot.exit_results(dict(
ok=False,
error_type='args',
message='{} is not a directory'.format(cli_args.topdir)))
opxboot.post_device_status(message='bootstrap started, waiting for device access', state='START')
opxboot.wait_for_device(countdown=cli_args.reload_delay, poll_delay=10, msg='Waiting for device access')
# Give the device time to stabilize since SSH may not be reliable yet.
time.sleep(30)
opxboot.log.info("proceeding with bootstrap")
if opxboot.dev.facts['virtual']:
opxboot.log.info('Virtual device. No OS upgrade necessary.')
opxboot.check_os_install_and_finally()
else:
opxboot.ensure_os_version()
opxboot.log.info("bootstrap process finished")
opxboot.exit_results(dict(ok=True))
if '__main__' == __name__:
main()
| |
from __future__ import print_function
import time
import os
import sys
from Foundation import NSObject, NSURL, NSString
from AppKit import NSApplication, NSView, NSColor, NSImage, NSCursor, NSSegmentedControl, NSSegmentSwitchTrackingSelectOne, NSRectFill, NSToolbarPrintItemIdentifier, NSToolbarFlexibleSpaceItemIdentifier, NSToolbarCustomizeToolbarItemIdentifier, NSImageNameInfo
import vanilla
try:
reload(vanilla)
except NameError:
# the built-in 'reload' was moved to importlib with Python 3.4
from importlib import reload
reload(vanilla)
from vanilla import *
from vanilla.test.testStackView import TestStackView
from vanilla.test.testGridView import TestGridView
import objc
objc.setVerbose(True)
vanillaPath = os.path.realpath(vanilla.__file__)
vanillaPath = os.path.dirname(os.path.dirname(os.path.dirname(vanillaPath)))
iconPath = os.path.join(vanillaPath, "Data", "testIcon.tif")
iconName = None
if not os.path.exists(iconPath):
iconPath = None
iconName = NSImageNameInfo
sizeStyles = ["regular", "small", "mini"]
listOptions = list(sys.modules.keys())
sortedListOptions = sorted(listOptions)
class BaseTest(object):
def drawGrid(self):
w, h = self.w.getPosSize()[2:]
increment = 10
for i in range(int(w/increment)):
if i == 0:
continue
attrName = "vline%d" % i
line = VerticalLine((increment*i, 0, 1, h))
setattr(self.w, attrName, line)
for i in range(int(h/increment)):
if i == 0:
continue
attrName = "hline%d" % i
line = HorizontalLine((0, increment*i, w, 1))
setattr(self.w, attrName, line)
def basicCallback(self, sender):
print(sender)
def titleCallback(self, sender):
print(sender, sender.getTitle())
def getCallback(self, sender):
print(sender, sender.get())
class WindowTest(BaseTest):
def __init__(self, textured=False):
self.textured = textured
self.w = Window((200, 130), "Window Test", textured=textured)
self.w.windowButton = Button((10, 10, -10, 20), "Window", callback=self.windowCallback)
self.w.sheetButton = Button((10, 40, -10, 20), "Sheet", callback=self.sheetCallback)
self.w.drawerButton = Button((10, 70, -10, 20), "Drawer", callback=self.drawerCallback)
self.w.floatButton = Button((10, 100, -10, 20), "Floating Window", callback=self.floatCallback)
self.w.open()
def windowCallback(self, sender):
WindowTest(not self.textured)
def sheetCallback(self, sender):
self.sheet = Sheet((300, 100), self.w)
self.sheet.closeButton = Button((10, -30, -10, 20), "Close", callback=self._closeSheet)
self.sheet.open()
def _closeSheet(self, sender):
self.sheet.close()
del self.sheet
def drawerCallback(self, sender):
if not hasattr(self, "drawer1"):
self.drawer1 = Drawer((50, 50), self.w, preferredEdge="left")
self.drawer2 = Drawer((50, 50), self.w, preferredEdge="top")
self.drawer3 = Drawer((50, 50), self.w, preferredEdge="right")
self.drawer4 = Drawer((50, 50), self.w, preferredEdge="bottom")
self.drawer1.toggle()
self.drawer2.toggle()
self.drawer3.toggle()
self.drawer4.toggle()
def floatCallback(self, sender):
floater = FloatingWindow((100, 100))
floater.open()
class TextTest(BaseTest):
def __init__(self, drawGrid=False):
self.w = Window((440, 190), "Text Test")
_top = 10
top = _top
textSizeStyles = [("regular", 17), ("small", 14), ("mini", 11)]
for sizeStyle, height in textSizeStyles:
attrName = "TextBox_%s" % sizeStyle
button = TextBox((10, top, 100, height), attrName, sizeStyle=sizeStyle)
setattr(self.w, attrName, button)
top += 30
textSizeStyles = [("regular", 22), ("small", 19), ("mini", 15)]
for sizeStyle, height in textSizeStyles:
attrName = "SearchBox_%s" % sizeStyle
button = SearchBox((10, top, 100, height), attrName, callback=self.getCallback, sizeStyle=sizeStyle)
setattr(self.w, attrName, button)
top += 30
top = _top
textSizeStyles = [("regular", 22), ("small", 19), ("mini", 16)]
for sizeStyle, height in textSizeStyles:
attrName = "EditText_%s" % sizeStyle
button = EditText((120, top, 100, height), attrName, callback=self.getCallback, sizeStyle=sizeStyle)
setattr(self.w, attrName, button)
top += 30
textSizeStyles = [("regular", 21), ("small", 17), ("mini", 14)]
for sizeStyle, height in textSizeStyles:
attrName = "ComboBox_%s" % sizeStyle
button = ComboBox((120, top, 100, height), items=listOptions, callback=self.getCallback, sizeStyle=sizeStyle)
setattr(self.w, attrName, button)
top += 30
self.w.TextEditor = TextEditor((240, 10, 190, 170), sys.copyright, callback=self.getCallback)
if drawGrid:
self.drawGrid()
self.w.open()
class ButtonTest(BaseTest):
def __init__(self, drawGrid=False):
self.w = Window((440, 800), "Button Test")
_top = 10
top = _top
buttonSizeStyles = [("regular", 20), ("small", 17), ("mini", 14)]
for sizeStyle, height in buttonSizeStyles:
attrName = "Button_%s" % sizeStyle
button = Button((10, top, 150, height), attrName, callback=self.titleCallback, sizeStyle=sizeStyle)
setattr(self.w, attrName, button)
top += 30
height = 20
for sizeStyle in sizeStyles:
attrName = "SquareButton_%s" % sizeStyle
button = SquareButton((10, top, 150, height), attrName, callback=self.titleCallback, sizeStyle=sizeStyle)
setattr(self.w, attrName, button)
top += 30
settings = [(None, False, "top"),
(None, True, "top"),
("bop", True, "top"),
("bop", True, "bottom"),
("bop", True, "left"),
("bop", True, "right"),]
for title, bordered, imagePosition in settings:
attrName = "ImageButton_%s_%s_%s" % (title, bordered, imagePosition)
button = ImageButton((10, top, 150, 50), title=title, imagePath=iconPath, imageNamed=iconName, bordered=bordered, imagePosition=imagePosition, callback=self.basicCallback)
setattr(self.w, attrName, button)
top += 60
segmentedControlSizeStyles = [("regular", 20), ("small", 17), ("mini", 14)]
descriptions = [{"title":"One"}, {"title":"Two"}, {"title":"3", "enabled":False}]
for sizeStyle, height in segmentedControlSizeStyles:
attrName = "SegmentedButton_%s" % sizeStyle
button = SegmentedButton((10, top, 150, height), descriptions, callback=None, sizeStyle=sizeStyle)
setattr(self.w, attrName, button)
top += 30
top = _top
_left = 170
left = _left
height = 100
sliderSizeStyles = [("regular", 15), ("small", 11), ("mini", 10)]
for sizeStyle, width in sliderSizeStyles:
attrName = "VSlider_noTicks_%s" % sizeStyle
button = Slider((left, top, width, height), 0, 100, callback=self.getCallback, sizeStyle=sizeStyle)
setattr(self.w, attrName, button)
left += 30
sliderSizeStyles = [("regular", 23), ("small", 17), ("mini", 16)]
for sizeStyle, width in sliderSizeStyles:
attrName = "VSlider_rightTicks_%s" % sizeStyle
button = Slider((left, top, width, height), 0, 100, tickMarkCount=10, callback=self.getCallback, sizeStyle=sizeStyle)
setattr(self.w, attrName, button)
left += 30
for sizeStyle, width in sliderSizeStyles:
attrName = "VSlider_leftTicks_%s" % sizeStyle
button = Slider((left, top, width, height), 0, 100, tickMarkCount=10, callback=self.getCallback, sizeStyle=sizeStyle)
button.setTickMarkPosition("left")
setattr(self.w, attrName, button)
left += 30
left = _left
width = 260
top = 130
sliderSizeStyles = [("regular", 15), ("small", 12), ("mini", 10)]
for sizeStyle, height in sliderSizeStyles:
attrName = "HSlider_noTicks_%s" % sizeStyle
button = Slider((left, top, width, height), 0, 100, callback=self.getCallback, sizeStyle=sizeStyle)
setattr(self.w, attrName, button)
top += 30
sliderSizeStyles = [("regular", 24), ("small", 17), ("mini", 16)]
for sizeStyle, height in sliderSizeStyles:
attrName = "HSlider_belowTicks_%s" % sizeStyle
button = Slider((left, top, width, height), 0, 100, tickMarkCount=30, callback=self.getCallback, sizeStyle=sizeStyle)
setattr(self.w, attrName, button)
top += 30
for sizeStyle, height in sliderSizeStyles:
attrName = "HSlider_aboveTicks_%s" % sizeStyle
button = Slider((left, top, width, height), 0, 100, tickMarkCount=30, callback=self.getCallback, sizeStyle=sizeStyle)
button.setTickMarkPosition("top")
setattr(self.w, attrName, button)
top += 30
_top = top
popupSizeStyles = [("regular", 20), ("small", 17), ("mini", 15)]
width = 120
for sizeStyle, height in popupSizeStyles:
attrName = "PopUpButton_%s" % sizeStyle
button = PopUpButton((left, top, width, height), listOptions, callback=self.getCallback, sizeStyle=sizeStyle)
setattr(self.w, attrName, button)
top += 30
top = _top
checkboxSizeStyles = [("regular", 22), ("small", 18), ("mini", 11)]
width = 125
left = 300
for sizeStyle, height in popupSizeStyles:
attrName = "CheckBox_%s" % sizeStyle
button = CheckBox((left, top, width, height), attrName, callback=self.getCallback, sizeStyle=sizeStyle)
setattr(self.w, attrName, button)
top += 30
_top = top
left = _left
width = 120
self.w.DLevelIndicator = LevelIndicator((left, top, width, 18), style="discrete",
value=5, warningValue=7, criticalValue=9,
callback=self.getCallback)
top += 30
self.w.DLevelIndicator_ticksAbove = LevelIndicator((left, top, width, 25), style="discrete",
value=5, warningValue=7, criticalValue=9,
tickMarkPosition="above", minorTickMarkCount=5, majorTickMarkCount=3, callback=self.getCallback)
top += 30
self.w.DLevelIndicator_ticksBelow = LevelIndicator((left, top, width, 25), style="discrete",
value=5, warningValue=7, criticalValue=9,
tickMarkPosition="below", minorTickMarkCount=5, majorTickMarkCount=3, callback=self.getCallback)
left = 300
top = _top
width = 120
self.w.CLevelIndicator = LevelIndicator((left, top, width, 16), style="continuous",
value=5, warningValue=7, criticalValue=9,
callback=self.getCallback)
top += 30
self.w.CLevelIndicator_ticksAbove = LevelIndicator((left, top, width, 23), style="continuous",
value=5, warningValue=7, criticalValue=9,
tickMarkPosition="above", minorTickMarkCount=5, majorTickMarkCount=3, callback=self.getCallback)
top += 30
self.w.CLevelIndicator_ticksBelow = LevelIndicator((left, top, width, 23), style="continuous",
value=5, warningValue=7, criticalValue=9,
tickMarkPosition="below", minorTickMarkCount=5, majorTickMarkCount=3, callback=self.getCallback)
_top = 660
top = _top
pathControlSizeStyles = [("regular", 22), ("small", 20), ("mini", 18)]
left = 10
width = -10
urlPath = __file__
for sizeStyle, height in pathControlSizeStyles:
attrName = "PathControl_%s" % sizeStyle
button = PathControl((left, top, width, height), urlPath, callback=None, sizeStyle=sizeStyle)
setattr(self.w, attrName, button)
top += 30
if drawGrid:
self.drawGrid()
self.w.open()
class ListTest(BaseTest):
def __init__(self, drawGrid=False):
self.w = Window((600, 500), "List Test", minSize=(400, 400))
simpleList = List((0, 0, 0, 0), listOptions, enableTypingSensitivity=True)
multiItems = [
{"name": name, "path": os.path.basename(getattr(module, "__file__", "Unknown"))}
for name, module in sys.modules.items()
]
columnDescriptions = [
{"title": "Module Name", "key": "name"},
{"title": "File Name", "key": "path"}
]
multiList = List((0, 0, 0, 0), multiItems, columnDescriptions=columnDescriptions, enableTypingSensitivity=True)
if iconPath:
image = NSImage.alloc().initWithContentsOfFile_(iconPath)
else:
image = NSImage.imageNamed_(iconName)
miscItems = [
{"slider": 50, "sliderWithTicks" : 50, "checkBox": False, "image" : image, "segment" : 0},
{"slider": 20, "sliderWithTicks" : 20, "checkBox": True, "image" : image, "segment" : 1},
{"slider": 70, "sliderWithTicks" : 70, "checkBox": False, "image" : image, "segment" : 2},
{"slider": 20, "sliderWithTicks" : 20, "checkBox": True, "image" : image, "segment" : 0},
{"slider": 10, "sliderWithTicks" : 10, "checkBox": True, "image" : image, "segment" : 1},
{"slider": 90, "sliderWithTicks" : 90, "checkBox": False, "image" : image, "segment" : 2},
]
columnDescriptions = [
{"title": "SliderListCell", "key": "slider", "cell": SliderListCell()},
{"title": "SliderListCell", "key": "sliderWithTicks", "cell": SliderListCell(tickMarkCount=10, stopOnTickMarks=True)},
{"title": "CheckBoxListCell", "key": "checkBox", "cell": CheckBoxListCell()},
{"title": "ImageListCell", "key": "image", "cell": ImageListCell()},
{"title": "SegmentedButtonListCell", "key": "segment", "cell": SegmentedButtonListCell([dict(title="0"), dict(title="1"), dict(title="2")]), "binding": "selectedIndex"},
]
miscCellList = List((0, 0, 0, 0), items=miscItems, columnDescriptions=columnDescriptions)
paneDescriptions = [
dict(view=simpleList, identifier="simpleList"),
dict(view=multiList, identifier="multiList"),
dict(view=miscCellList, identifier="miscCellList"),
]
# only add the ListIndicator tests if the controls are available
try:
listIndicatorItems = [
{"discrete": 3, "continuous": 4, "rating": 1, "relevancy": 9},
{"discrete": 8, "continuous": 3, "rating": 5, "relevancy": 5},
{"discrete": 3, "continuous": 7, "rating": 3, "relevancy": 4},
{"discrete": 2, "continuous": 5, "rating": 4, "relevancy": 7},
{"discrete": 6, "continuous": 9, "rating": 3, "relevancy": 2},
{"discrete": 4, "continuous": 0, "rating": 6, "relevancy": 8},
]
columnDescriptions = [
{"title": "discrete",
"cell": LevelIndicatorListCell(style="discrete", warningValue=7, criticalValue=9)},
{"title": "continuous",
"cell": LevelIndicatorListCell(style="continuous", warningValue=7, criticalValue=9)},
{"title": "rating",
"cell": LevelIndicatorListCell(style="rating", maxValue=6)},
{"title": "relevancy",
"cell": LevelIndicatorListCell(style="relevancy")},
]
levelIndicatorList = List((0, 0, 0, 0), items=listIndicatorItems, columnDescriptions=columnDescriptions)
paneDescriptions.append(dict(view=levelIndicatorList, identifier="levelIndicatorList"))
except NameError:
pass
self.w.splitView = SplitView((0, 0, -0, -0), paneDescriptions, isVertical=False)
if drawGrid:
self.drawGrid()
self.w.open()
class BrowserTest(BaseTest):
def __init__(self, drawGrid=False):
self.w = Window((440, 500), "Browser Test", minSize=(400, 400))
import vanilla
self.w.browser = ObjectBrowser((0, 0, 0, 0), vanilla)
if drawGrid:
self.drawGrid()
self.w.open()
class TestCustomNSView(NSView):
def viewDidEndLiveResize(self):
self._recalcSize()
def _recalcSize(self):
# XXX Note that this is specific for embedding in a ScrollView,
# it may behave strangely when used in another context.
w, h = self.superview().visibleRect()[1]
self.setFrame_(((0, 0), (w, h)))
def drawRect_(self, rect):
if self.inLiveResize():
self._recalcSize()
from random import random
NSColor.redColor().set()
NSRectFill(self.bounds())
width, height = self.frame()[1]
w = width / 5
h = height / 5
for xI in range(5):
for yI in range(5):
x = xI * w
y = height - (yI * h) - h
r = ((x, y), (w, h))
NSColor.colorWithDeviceRed_green_blue_alpha_(random(), random(), random(), 1.0).set()
NSRectFill(r)
class ViewTest(BaseTest):
def __init__(self, drawGrid=False):
self.w = Window((450, 350), "View Test", minSize=(350, 300))
self.w.tabs = Tabs((10, 10, 220, 120), ["Small", "Mini"])
self.w.tabs[0].tabs = Tabs((10, 10, -10, -10), ["One", "Two", "Three"], sizeStyle="small")
self.w.tabs[1].tabs = Tabs((10, 10, -10, -10), ["One", "Two", "Three"], sizeStyle="mini")
self.w.box = Box((10, 140, 220, 70), "Box")
self.w.box.box = Box((10, 10, -10, -10))
self.scrollViewNSView = TestCustomNSView.alloc().initWithFrame_(((0, 0), (500, 500)))
self.w.scrollView = ScrollView((240, 10, 200, 200), self.scrollViewNSView, backgroundColor=NSColor.redColor())
self.splitViewNSView1 = TestCustomNSView.alloc().initWithFrame_(((0, 0), (0, 0)))
self.splitViewNSView2 = TestCustomNSView.alloc().initWithFrame_(((0, 0), (0, 0)))
view1 = ScrollView((0, 0, 0, 50), self.splitViewNSView1, autohidesScrollers=True, backgroundColor=NSColor.redColor())
view2 = ScrollView((0, 0, 0, -10), self.splitViewNSView2, autohidesScrollers=True, backgroundColor=NSColor.redColor())
paneDescriptions = [
dict(view=view1, identifier="view1"),
dict(view=view2, identifier="view2"),
]
self.w.splitView = SplitView((10, 220, -10, -10), paneDescriptions)
if drawGrid:
self.drawGrid()
self.w.open()
class ToolbarTest(BaseTest):
def __init__(self, drawGrid=False):
self.w = Window((350, 20), "Toolbar Test", minSize=(250, 20))
customView = NSSegmentedControl.alloc().initWithFrame_(((0, 0), (100, 30)))
cell = customView.cell()
cell.setTrackingMode_(NSSegmentSwitchTrackingSelectOne)
cell.setSegmentCount_(2)
cell.setImage_forSegment_(NSCursor.arrowCursor().image(), 0)
cell.setImage_forSegment_(NSCursor.crosshairCursor().image(), 1)
customView.sizeToFit()
toolbarItems = [
{"itemIdentifier": "Test Item One",
"label": "Test One",
"imagePath": iconPath,
"imageNamed": iconName,
"callback": self.basicCallback},
{"itemIdentifier": "Test Item Two",
"label": "Test Two",
"imagePath": iconPath,
"imageNamed": iconName,
"callback": self.basicCallback},
{"itemIdentifier": "Test Item Three",
"imagePath": iconPath,
"imageNamed": iconName,
"callback": self.basicCallback},
{"itemIdentifier": "Test Item Four",
"label": "Test Four",
"view": customView,
"callback": self.basicCallback},
{"itemIdentifier": NSToolbarPrintItemIdentifier, "visibleByDefault": False},
{"itemIdentifier": NSToolbarFlexibleSpaceItemIdentifier},
{"itemIdentifier": NSToolbarCustomizeToolbarItemIdentifier},
]
self.w.addToolbar("Vanilla Test Toolbar", toolbarItems=toolbarItems)
self.w.open()
class MiscTest(BaseTest):
def __init__(self, drawGrid=False):
self.w = Window((150, 180), "Misc. Test")
self.w.spinner1 = ProgressSpinner((10, 10, 32, 32), sizeStyle="regular")
self.w.spinner2 = ProgressSpinner((50, 10, 16, 16), sizeStyle="small")
self.w.bar1 = ProgressBar((10, 50, -10, 16))
self.w.bar2 = ProgressBar((10, 70, -10, 10), isIndeterminate=True, sizeStyle="small")
self.w.progressStartButton = Button((10, 90, -10, 20), "Start Progress", callback=self.startProgress)
self.w.colorWell = ColorWell((10, 130, -10, -10), callback=self.getCallback, color=NSColor.redColor())
if drawGrid:
self.drawGrid()
self.w.open()
def startProgress(self, sender):
self.w.spinner1.start()
self.w.spinner2.start()
self.w.bar2.start()
for i in range(10):
self.w.bar1.increment(10)
time.sleep(.1)
time.sleep(.5)
self.w.spinner1.stop()
self.w.spinner2.stop()
self.w.bar2.stop()
self.w.bar1.set(0)
class _VanillaTestViewForSplitView(NSView):
def drawRect_(self, rect):
from AppKit import NSRectFill, NSBezierPath, NSColor
self.color.set()
NSRectFill(self.bounds())
NSColor.blackColor().set()
p = NSBezierPath.bezierPathWithRect_(self.bounds())
p.setLineWidth_(10)
p.stroke()
class TestSplitSubview(VanillaBaseObject):
def __init__(self, posSize, color):
self._setupView(_VanillaTestViewForSplitView, posSize)
self._nsObject.color = color
class TestSplitView(BaseTest):
def __init__(self, drawGrid=False):
self.w = Window((600, 500), "", minSize=(300, 250))
grp = Group((0, 0, 0, 0))
grp.button = Button((10, 10, -10, 20), "Toggle", self.buttonCallback)
self.view1 = TestSplitSubview((0, 0, 0, 0), NSColor.redColor())
paneDescriptions2 = [
dict(view=self.view1, canCollapse=True, size=50, identifier="pane1"),
dict(view=grp, identifier="pane2"),
dict(view=TestSplitSubview((0, 0, 0, 0), NSColor.greenColor()), minSize=50, identifier="pane3"),
dict(view=TestSplitSubview((0, 0, 0, 0), NSColor.yellowColor()), identifier="pane4"),
]
self.nestedSplit = SplitView((0, 0, 0, 0), paneDescriptions2, isVertical=True)
paneDescriptions1 = [
dict(view=self.nestedSplit, identifier="pane5"),
dict(view=TestSplitSubview((0, 0, 0, 0), NSColor.magentaColor()), minSize=100, size=100, canCollapse=True, identifier="pane6"),
]
self.w.splitView = SplitView((10, 10, -10, -10), paneDescriptions1, isVertical=False)
if drawGrid:
self.drawGrid()
self.w.open()
def buttonCallback(self, sender):
self.nestedSplit.togglePane("pane1")
class Test(object):
def __init__(self):
self.w = FloatingWindow((200, 300, 120, 400))
self.w.drawGrid = CheckBox((10, 10, -10, 22), "Draw Grid", value=False)
self.w.windows = Button((10, 40, -10, 20), "Windows", callback=self.openTestCallback)
self.w.geometry = Button((10, 70, -10, 20), "Geometry", callback=self.openTestCallback)
self.w.text = Button((10, 100, -10, 20), "Text", callback=self.openTestCallback)
self.w.buttons = Button((10, 130, -10, 20), "Buttons", callback=self.openTestCallback)
self.w.list = Button((10, 160, -10, 20), "List", callback=self.openTestCallback)
self.w.browser = Button((10, 190, -10, 20), "Browser", callback=self.openTestCallback)
self.w.view = Button((10, 220, -10, 20), "Views", callback=self.openTestCallback)
self.w.toolbar = Button((10, 250, -10, 20), "Toolbar", callback=self.openTestCallback)
self.w.misc = Button((10, 280, -10, 20), "Misc.", callback=self.openTestCallback)
self.w.split = Button((10, 310, -10, 20), "SplitView", callback=self.openTestCallback)
self.w.stack = Button((10, 340, -10, 20), "StackView", callback=self.openTestCallback)
self.w.grid = Button((10, 370, -10, 20), "GridView", callback=self.openTestCallback)
self.w.open()
def openTestCallback(self, sender):
title = sender.getTitle()
try:
if title == "Windows":
WindowTest()
elif title == "Geometry":
from vanilla.test.testGeometry import TestGeometry
TestGeometry()
elif title == "Text":
TextTest(self.w.drawGrid.get())
elif title == "Buttons":
ButtonTest(self.w.drawGrid.get())
elif title == "List":
ListTest(self.w.drawGrid.get())
elif title == "Browser":
BrowserTest(self.w.drawGrid.get())
elif title == "Views":
ViewTest(self.w.drawGrid.get())
elif title == "Toolbar":
ToolbarTest(self.w.drawGrid.get())
elif title == "Misc.":
MiscTest(self.w.drawGrid.get())
elif title == "SplitView":
TestSplitView(self.w.drawGrid.get())
elif title == "StackView":
TestStackView()
elif title == "GridView":
TestGridView()
except:
import traceback
print(traceback.format_exc())
if __name__ == "__main__":
from vanilla.test.testTools import executeVanillaTest
executeVanillaTest(Test)
| |
"""
Load pp, plot and save
8km difference
"""
import os, sys
#%matplotlib inline
#%pylab inline
import matplotlib
#matplotlib.use('Agg')
# Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
#from matplotlib import figure
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.unit as unit
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
from dateutil import tz
#import multiprocessing as mp
import gc
import types
import scipy.interpolate
import pdb
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_title.py')
model_name_convert_legend = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_legend.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/unrotate_pole.py')
pp_file_contourf = 'temp_on_p_levs_mean'
pp_file_contour ='408_on_p_levs_mean'
plot_diag='temp'
#plot_diags=['sp_hum']
plot_levels = [925]
#experiment_ids = ['dkmbq', 'dklyu']
experiment_ids = ['djzny', 'djznw', 'djznq', 'djzns', 'dklwu', 'dklzq'] # All minus large 2
#Experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
#experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklwu', 'dklzq', 'dkbhu',] # All 12
#experiment_ids = ['dkbhu', 'dkjxq']
#experiment_ids = ['dkmbq', 'dklyu', 'djznw', 'djzny', 'djznq', 'djzns', 'dklwu', 'dklzq'] # All minus large 2
#experiment_ids = ['dklyu, dkmgw']
experiment_ids = ['dkmgw', 'dklyu']
#experiment_ids = ['dklyu']
diff_id='dkmbq'
#min_contour = 0
#max_contour = 3
#tick_interval=0.3
#clevs = np.linspace(min_contour, max_contour,64)
#cmap=cm.s3pcpn_l
cmap = plt.cm.RdBu_r
#ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
pp_file_path = '/nfs/a90/eepdw/Data/EMBRACE/'
degs_crop_top = 1.7
degs_crop_bottom = 2.5
from iris.coord_categorisation import add_categorised_coord
# def add_hour_of_day(cube, coord, name='hour'):
# add_categorised_coord(cube, name, coord,
# lambda coord, x: coord.units.num2date(x).hour)
figprops = dict(figsize=(8,8), dpi=100)
#cmap=cm.s3pcpn_l
un = unit.Unit('hours since 1970-01-01 00:00:00',calendar='gregorian')
dx, dy = 10, 10
divisor=10 # for lat/lon rounding
lon_high = 101.866
lon_low = 64.115
lat_high = 33.
lat_low =-6.79
lon_low_tick=lon_low -(lon_low%divisor)
lon_high_tick=math.ceil(lon_high/divisor)*divisor
lat_low_tick=lat_low - (lat_low%divisor)
lat_high_tick=math.ceil(lat_high/divisor)*divisor
def main():
for p_level in plot_levels:
# Set pressure height contour min/max
if p_level == 925:
clev_min = -72.
clev_max = 72.
elif p_level == 850:
clev_min = -72.
clev_max = 72.
elif p_level == 700:
clev_min = -72.
clev_max = 72.
elif p_level == 500:
clev_min = -72.
clev_max = 72.
else:
print 'Contour min/max not set for this pressure level'
# Set potential temperature min/max
if p_level == 925:
clevpt_min = -3.
clevpt_max = 3.
elif p_level == 850:
clevpt_min = -3.
clevpt_max = 3.
elif p_level == 700:
clevpt_min = -3.
clevpt_max = 3.
elif p_level == 500:
clevpt_min = -3.
clevpt_max = 3.
else:
print 'Potential temperature min/max not set for this pressure level'
# Set specific humidity min/max
if p_level == 925:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p_level == 850:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p_level == 700:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p_level == 500:
clevsh_min = -0.0025
clevsh_max = 0.0025
else:
print 'Specific humidity min/max not set for this pressure level'
#clevs_col = np.arange(clev_min, clev_max)
clevs_lin = np.arange(clev_min, clev_max, 4.)
p_level_constraint = iris.Constraint(pressure=p_level)
#for plot_diag in plot_diags:
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
diffmin1 = diff_id[:-1]
pfile = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_%s.pp' % (expmin1, experiment_id, experiment_id, pp_file_contourf)
pfile_diff = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_%s.pp' % (diffmin1, diff_id, diff_id, pp_file_contourf)
pcube_contourf = iris.load_cube(pfile, p_level_constraint)
#pcube_contourf=iris.analysis.maths.multiply(pcube_contourf,3600)
pcube_contourf_diff = iris.load_cube(pfile_diff, p_level_constraint)
#pcube_contourf_diff=iris.analysis.maths.multiply(pcube_contourf_diff,3600)
#pdb.set_trace()
height_pp_file = '%s_%s.pp' % (experiment_id, pp_file_contour)
height_pfile = '%s%s/%s/%s' % (pp_file_path, expmin1, experiment_id, height_pp_file)
height_pp_file_diff = '%s_%s.pp' % (diff_id, pp_file_contour)
height_pfile_diff = '%s%s/%s/%s' % (pp_file_path, diffmin1, diff_id, height_pp_file_diff)
pcube_contour = iris.load_cube(height_pfile, p_level_constraint)
pcube_contour_diff = iris.load_cube(height_pfile_diff, p_level_constraint)
#pdb.set_trace()
pcube_contourf=pcube_contourf-pcube_contourf_diff
pcube_contour=pcube_contour-pcube_contour_diff
del pcube_contourf_diff, pcube_contour_diff
#pdb.set_trace()
#time_coords = pcube_contourf.coord('time')
#iris.coord_categorisation.add_day_of_year(pcube_contourf, time_coords, name='day_of_year')
#time_coords = pcube_contour.coord('time')
#iris.coord_categorisation.add_day_of_year(pcube_contour, time_coords, name='day_of_year')
fu = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_30201_mean.pp' \
% (expmin1, experiment_id, experiment_id)
fu_diff = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_30201_mean.pp' \
% (diffmin1, diff_id, diff_id)
#pdb.set_trace()
u_wind,v_wind = iris.load(fu, p_level_constraint)
u_wind_diff,v_wind_diff = iris.load(fu_diff, p_level_constraint)
u_wind = u_wind - u_wind_diff
v_wind = v_wind - v_wind_diff
del u_wind_diff, v_wind_diff
for t, time_cube in enumerate(pcube_contourf.slices(['grid_latitude', 'grid_longitude'])):
#pdb.set_trace()
#height_cube_slice = pcube_contour.extract(iris.Constraint(day_of_year=time_cube.coord('day_of_year').points))
height_cube_slice = pcube_contour
u_wind_slice = u_wind
v_wind_slice = v_wind
#pdb.set_trace()
# Get time of averagesfor plot title
#h = un.num2date(np.array(time_cube.coord('time').points, dtype=float)[0]).strftime('%d%b')
#Convert to India time
# from_zone = tz.gettz('UTC')
# to_zone = tz.gettz('Asia/Kolkata')
# h_utc = un.num2date(np.array(time_cube.coord('day_of_year').points, dtype=float)[0]).replace(tzinfo=from_zone)
# h_local = h_utc.astimezone(to_zone).strftime('%H%M')
### Winds
cs_w = u_wind_slice.coord_system('CoordSystem')
lat_w = u_wind_slice.coord('grid_latitude').points
lon_w = u_wind_slice.coord('grid_longitude').points
lons_w, lats_w = np.meshgrid(lon_w, lat_w)
lons_w,lats_w = iris.analysis.cartography.unrotate_pole(lons_w,lats_w, cs_w.grid_north_pole_longitude, cs_w.grid_north_pole_latitude)
lon_w=lons_w[0]
lat_w=lats_w[:,0]
### Regrid winds to 2 degree spacing
lat_wind_1deg = np.arange(lat_low,lat_high, 2)
lon_wind_1deg = np.arange(lon_low,lon_high, 2)
#pdb.set_trace()
lons_wi, lats_wi = np.meshgrid(lon_wind_1deg, lat_wind_1deg)
fl_la_lo = (lats_w.flatten(),lons_w.flatten())
p_levs = u_wind_slice.coord('pressure').points
sc = np.searchsorted(p_levs, p_level)
u = scipy.interpolate.griddata(fl_la_lo, u_wind_slice.data.flatten(), (lats_wi, lons_wi), method='linear')
v = scipy.interpolate.griddata(fl_la_lo, v_wind_slice.data.flatten(), (lats_wi, lons_wi), method='linear')
################################### # PLOT ##############################################
fig = plt.figure(**figprops)
#cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low+degs_crop_bottom,lat_high-degs_crop_top))
m =\
Basemap(llcrnrlon=lon_low,llcrnrlat=lat_low,urcrnrlon=lon_high,urcrnrlat=lat_high, rsphere = 6371229)
#pdb.set_trace()
# lat = pcube_contourf.coord('grid_latitude').points
# lon = pcube_contourf.coord('grid_longitude').points
# cs = cube.coord_system('CoordSystem')
# lons, lats = np.meshgrid(lon, lat)
# lons, lats = iris.analysis.cartography.unrotate_pole\
# (lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
# x,y = m(lons,lats)
#x_w,y_w = m(lons_wi, lats_wi)
if plot_diag=='temp':
min_contour = clevpt_min
max_contour = clevpt_max
cb_label='K'
main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), potential temperature (colours),\
and wind (vectors)'
tick_interval=2
clev_number=max_contour-min_contour+1
elif plot_diag=='sp_hum':
min_contour = clevsh_min
max_contour = clevsh_max
cb_label='kg/kg'
main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), specific humidity (colours),\
and wind (vectors)'
tick_interval=0.002
clev_number=max_contour-min_contour+0.001
clevs = np.linspace(min_contour, max_contour, clev_number)
clevs = np.linspace(min_contour, max_contour, 32)
#clevs=np.linspace(-10.,10.,32)
# #clevs = np.linspace(-3, 3, 32)
# cont = plt.contourf(x,y,time_cube.data, clevs, cmap=cmap, extend='both')
#cont = iplt.contourf(time_cube, clevs, cmap=cmap, extend='both')
lat = time_cube.coord('grid_latitude').points
lon = time_cube.coord('grid_longitude').points
lons, lats = np.meshgrid(lon, lat)
cs = time_cube.coord_system('CoordSystem')
lons,lats = iris.analysis.cartography.unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
cont = plt.contourf(lons, lats, time_cube.data, clevs, cmap=cmap, extend='both')
#pdb.set_trace()
cs_lin = plt.contour(lons, lats, height_cube_slice.data, clevs_lin,colors='#262626',linewidths=1.)
plt.clabel(cs_lin, fontsize=14, fmt='%d', color='black')
#del time_cube
#plt.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'#262626'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'#262626'}
x_w,y_w = m(lons_wi, lats_wi)
wind = m.quiver(x_w,y_w, u, v,scale=75, color='#262626' )
qk = plt.quiverkey(wind, 0.1, 0.1, 1, '5 m/s', labelpos='W')
cbar = fig.colorbar(cont, orientation='horizontal', pad=0.05, extend='both')
cbar.set_label('%s' % cb_label, fontsize=10, color='#262626')
cbar.set_label(time_cube.units, fontsize=10, color='#262626')
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['${%.1f}$' % i for i in ticks])
cbar.ax.tick_params(labelsize=10, color='#262626')
#main_title='Mean Rainfall for EMBRACE Period -%s UTC (%s IST)' % (h, h_local)
#main_title=time_cube.standard_name.title().replace('_',' ')
#model_info = re.sub(r'[(\']', ' ', model_info)
#model_info = re.sub(r'[\',)]', ' ', model_info)
#print model_info
file_save_name = '%s_minus_%s_%s_and_%s_%s_hPa_geop_height_and_wind' \
% (experiment_id, diff_id, pp_file_contour, pp_file_contourf, p_level)
save_dir = '%s%s/%s_and_%s' % (save_path, experiment_id, pp_file_contour, pp_file_contourf)
if not os.path.exists('%s' % save_dir): os.makedirs('%s' % (save_dir))
#plt.show()
plt.title('%s-%s' % (str(model_name_convert_legend.main(experiment_id)), str(model_name_convert_legend.main(diff_id))))
fig.savefig('%s/%s.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
#fig.savefig('%s/%s_short_title.png' % (save_dir, file_save_name) , format='png', bbox_inches='tight')
#plt.show()
#model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
#plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
#fig.savefig('%s/%s.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
fig.clf()
plt.close()
#del time_cube
gc.collect()
if __name__ == '__main__':
main()
#proc=mp.Process(target=worker)
#proc.daemon=True
#proc.start()
#proc.join()
| |
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Openstack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import re
import sys
import traceback
from oslo.config import cfg
import six
from six import moves
from contractor.openstack.common.gettextutils import _
from contractor.openstack.common import importutils
from contractor.openstack.common import jsonutils
from contractor.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of logging configuration file. It does not '
'disable existing loggers, but just appends specified '
'logging configuration to any other existing logging '
'options. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='syslog facility to receive log lines')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'iso8601=WARN',
],
help='list of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, six.string_types):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid', None) or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {}
if CONF.verbose:
extra['exc_info'] = (exc_type, value, tb)
getLogger(product_name).critical(str(value), **extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except moves.configparser.Error as exc:
raise LogConfigError(log_config_append, str(exc))
def setup(product_name):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
def _setup_logging_from_conf():
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"contractor.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formating params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
| |
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from django.urls import reverse
from django.utils.http import urlunquote
import mock
from horizon import forms
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks import tests
from openstack_dashboard.test import helpers as test
from openstack_dashboard import usage
INDEX_TEMPLATE = 'horizon/common/_data_table_view.html'
INDEX_URL = reverse('horizon:admin:networks:index')
class NetworkTests(test.BaseAdminViewTests):
def _stub_is_extension_supported(self, features):
self._features = features
self._feature_call_counts = collections.defaultdict(int)
def fake_extension_supported(request, alias):
self._feature_call_counts[alias] += 1
return self._features[alias]
self.mock_is_extension_supported.side_effect = fake_extension_supported
def _check_is_extension_supported(self, expected_count):
self.assertEqual(expected_count, self._feature_call_counts)
@test.create_mocks({api.neutron: ('network_list',
'list_dhcp_agent_hosting_networks',
'is_extension_supported'),
api.keystone: ('tenant_list',),
usage.quotas: ('tenant_quota_usages',)})
def test_index(self):
tenants = self.tenants.list()
quota_data = self.quota_usages.first()
self.mock_network_list.return_value = self.networks.list()
self.mock_tenant_list.return_value = [tenants, False]
self._stub_is_extension_supported(
{'network_availability_zone': True,
'dhcp_agent_scheduler': True})
self.mock_tenant_quota_usages.return_value = quota_data
self.mock_list_dhcp_agent_hosting_networks.return_value = \
self.agents.list()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, INDEX_TEMPLATE)
networks = res.context['networks_table'].data
self.assertItemsEqual(networks, self.networks.list())
self.mock_network_list.assert_called_once_with(test.IsHttpRequest())
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest())
self._check_is_extension_supported(
{'network_availability_zone': 1,
'dhcp_agent_scheduler': len(self.networks.list()) + 1})
self.mock_tenant_quota_usages.assert_has_calls(
[mock.call(test.IsHttpRequest(), tenant_id=network.tenant_id,
targets=('subnet', ))
for network in self.networks.list()])
self.assertEqual(len(self.networks.list()),
self.mock_tenant_quota_usages.call_count)
self.mock_list_dhcp_agent_hosting_networks.assert_has_calls(
[mock.call(test.IsHttpRequest(), network.id)
for network in self.networks.list()])
self.assertEqual(len(self.networks.list()),
self.mock_list_dhcp_agent_hosting_networks.call_count)
@test.create_mocks({api.neutron: ('network_list',
'is_extension_supported',)})
def test_index_network_list_exception(self):
self.mock_network_list.side_effect = self.exceptions.neutron
self._stub_is_extension_supported(
{'network_availability_zone': True,
'dhcp_agent_scheduler': True})
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, INDEX_TEMPLATE)
self.assertEqual(len(res.context['networks_table'].data), 0)
self.assertMessageCount(res, error=1)
self.mock_network_list.assert_called_once_with(test.IsHttpRequest())
self._check_is_extension_supported(
{'network_availability_zone': 1,
'dhcp_agent_scheduler': 1})
@test.create_mocks({api.neutron: ('network_get',
'is_extension_supported'),
usage.quotas: ('tenant_quota_usages',)})
def test_network_detail_new(self, mac_learning=False):
network = self.networks.first()
quota_data = self.quota_usages.first()
self.mock_network_get.return_value = network
self.mock_tenant_quota_usages.return_value = quota_data
self._stub_is_extension_supported(
{'network-ip-availability': True,
'network_availability_zone': True,
'mac-learning': mac_learning,
'dhcp_agent_scheduler': True})
url = urlunquote(reverse('horizon:admin:networks:detail',
args=[network.id]))
res = self.client.get(url)
network = res.context['network']
self.assertEqual(self.networks.first().name_or_id, network.name_or_id)
self.assertEqual(self.networks.first().status_label,
network.status_label)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_network_get, 2,
mock.call(test.IsHttpRequest(), network.id))
self.mock_tenant_quota_usages.assert_called_once_with(
test.IsHttpRequest(), tenant_id=network.tenant_id,
targets=('subnet',))
self._check_is_extension_supported(
{'network-ip-availability': 1,
'network_availability_zone': 1,
'mac-learning': 1,
'dhcp_agent_scheduler': 1})
def test_network_detail_subnets_tab(self):
self._test_network_detail_subnets_tab()
def test_network_detail_subnets_tab_with_mac_learning(self):
self._test_network_detail_subnets_tab(mac_learning=True)
@test.create_mocks({api.neutron: ('network_get',
'subnet_list',
'show_network_ip_availability',
'is_extension_supported'),
usage.quotas: ('tenant_quota_usages',)})
def _test_network_detail_subnets_tab(self, mac_learning=False):
network = self.networks.first()
ip_availability = self.ip_availability.get()
quota_data = self.quota_usages.first()
self.mock_show_network_ip_availability.return_value = ip_availability
self.mock_network_get.return_value = network
self.mock_subnet_list.return_value = [self.subnets.first()]
self._stub_is_extension_supported(
{'network-ip-availability': True,
'mac-learning': mac_learning,
'network_availability_zone': True,
'dhcp_agent_scheduler': True})
self.mock_tenant_quota_usages.return_value = quota_data
url = urlunquote(reverse('horizon:admin:networks:subnets_tab',
args=[network.id]))
res = self.client.get(url)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
subnets = res.context['subnets_table'].data
self.assertItemsEqual(subnets, [self.subnets.first()])
self.mock_show_network_ip_availability.assert_called_once_with(
test.IsHttpRequest(), network.id)
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_network_get, 2,
mock.call(test.IsHttpRequest(), network.id))
self.mock_subnet_list.assert_called_once_with(test.IsHttpRequest(),
network_id=network.id)
self._check_is_extension_supported(
{'network-ip-availability': 2,
'mac-learning': 1,
'network_availability_zone': 1,
'dhcp_agent_scheduler': 1})
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_tenant_quota_usages, 3,
mock.call(test.IsHttpRequest(), tenant_id=network.tenant_id,
targets=('subnet',)))
@test.create_mocks({api.neutron: ('network_get',
'port_list',
'is_extension_supported'),
usage.quotas: ('tenant_quota_usages',)})
def test_network_detail_ports_tab(self, mac_learning=False):
network = self.networks.first()
quota_data = self.neutron_quota_usages.first()
self.mock_network_get.return_value = network
self.mock_port_list.return_value = [self.ports.first()]
self.mock_tenant_quota_usages.return_value = quota_data
self._stub_is_extension_supported(
{'network-ip-availability': True,
'mac-learning': mac_learning,
'network_availability_zone': True,
'dhcp_agent_scheduler': True})
url = reverse('horizon:admin:networks:ports_tab',
args=[network.id])
res = self.client.get(urlunquote(url))
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
ports = res.context['ports_table'].data
self.assertItemsEqual(ports, [self.ports.first()])
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_network_get, 2,
mock.call(test.IsHttpRequest(), network.id))
self.mock_port_list.assert_called_once_with(test.IsHttpRequest(),
network_id=network.id)
self.assertEqual(3, self.mock_tenant_quota_usages.call_count)
self.mock_tenant_quota_usages.assert_has_calls([
mock.call(test.IsHttpRequest(), tenant_id=network.tenant_id,
targets=('subnet',)),
mock.call(test.IsHttpRequest(), tenant_id=network.tenant_id,
targets=('port',)),
mock.call(test.IsHttpRequest(), tenant_id=network.tenant_id,
targets=('port',)),
])
self._check_is_extension_supported(
{'network-ip-availability': 1,
'mac-learning': 1,
'network_availability_zone': 1,
'dhcp_agent_scheduler': 1})
@test.create_mocks({api.neutron: ('network_get',
'is_extension_supported',
'list_dhcp_agent_hosting_networks',),
usage.quotas: ('tenant_quota_usages',)})
def test_network_detail_agents_tab(self, mac_learning=False):
network = self.networks.first()
quota_data = self.quota_usages.first()
self._stub_is_extension_supported(
{'network-ip-availability': True,
'mac-learning': mac_learning,
'network_availability_zone': True,
'dhcp_agent_scheduler': True})
self.mock_list_dhcp_agent_hosting_networks.return_value = \
self.agents.list()
self.mock_network_get.return_value = network
self.mock_tenant_quota_usages.return_value = quota_data
url = reverse('horizon:admin:networks:agents_tab', args=[network.id])
res = self.client.get(urlunquote(url))
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
result_agents = res.context['agents_table'].data
expected_agents = self.agents.list()
self.assertItemsEqual(result_agents, expected_agents)
self._check_is_extension_supported(
{'network-ip-availability': 1,
'mac-learning': 1,
'network_availability_zone': 1,
'dhcp_agent_scheduler': 2})
self.mock_list_dhcp_agent_hosting_networks.assert_called_once_with(
test.IsHttpRequest(), network.id)
self.mock_network_get.assert_called_once_with(
test.IsHttpRequest(), network.id)
self.mock_tenant_quota_usages.assert_called_once_with(
test.IsHttpRequest(), tenant_id=network.tenant_id,
targets=('subnet',))
def test_network_detail_subnets_tab_network_exception(self):
self._test_network_detail_subnets_tab_network_exception()
def test_network_detail_network_exception_with_mac_learning(self):
self._test_network_detail_subnets_tab_network_exception(
mac_learning=True)
@test.create_mocks({api.neutron: ('network_get',
'subnet_list',
'is_extension_supported',
'show_network_ip_availability')})
def _test_network_detail_subnets_tab_network_exception(self,
mac_learning=False):
network_id = self.networks.first().id
ip_availability = self.ip_availability.get()
self.mock_show_network_ip_availability.return_value = ip_availability
self.mock_network_get.side_effect = self.exceptions.neutron
self.mock_subnet_list.return_value = [self.subnets.first()]
self._stub_is_extension_supported(
{'network-ip-availability': True,
'mac-learning': mac_learning})
url = urlunquote(reverse('horizon:admin:networks:subnets_tab',
args=[network_id]))
res = self.client.get(url)
redir_url = INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
self.mock_show_network_ip_availability.assert_called_once_with(
test.IsHttpRequest(), network_id)
self.mock_network_get.assert_called_once_with(test.IsHttpRequest(),
network_id)
self.mock_subnet_list.assert_called_once_with(test.IsHttpRequest(),
network_id=network_id)
self._check_is_extension_supported(
{'network-ip-availability': 2,
'mac-learning': 1})
def test_network_detail_subnets_tab_subnet_exception(self):
self._test_network_detail_subnets_tab_subnet_exception()
def test_network_detail_subnets_tab_subnet_exception_w_mac_learning(self):
self._test_network_detail_subnets_tab_subnet_exception(
mac_learning=True)
@test.create_mocks({api.neutron: ('network_get',
'subnet_list',
'show_network_ip_availability',
'is_extension_supported'),
usage.quotas: ('tenant_quota_usages',)})
def _test_network_detail_subnets_tab_subnet_exception(self,
mac_learning=False):
network = self.networks.first()
quota_data = self.quota_usages.first()
self.mock_show_network_ip_availability.return_value = \
self.ip_availability.get()
self.mock_network_get.return_value = network
self.mock_subnet_list.side_effect = self.exceptions.neutron
self._stub_is_extension_supported(
{'network-ip-availability': True,
'mac-learning': mac_learning,
'dhcp_agent_scheduler': True,
'network_availability_zone': True})
self.mock_tenant_quota_usages.return_value = quota_data
url = urlunquote(reverse('horizon:admin:networks:subnets_tab',
args=[network.id]))
res = self.client.get(url)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
subnets = res.context['subnets_table'].data
self.assertEqual(len(subnets), 0)
self.mock_show_network_ip_availability.assert_called_once_with(
test.IsHttpRequest(), network.id)
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_network_get, 2,
mock.call(test.IsHttpRequest(), network.id))
self.mock_subnet_list.assert_called_once_with(test.IsHttpRequest(),
network_id=network.id)
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_tenant_quota_usages, 3,
mock.call(test.IsHttpRequest(), tenant_id=network.tenant_id,
targets=('subnet',)))
self._stub_is_extension_supported(
{'network-ip-availability': 1,
'mac-learning': 1,
'dhcp_agent_scheduler': 2,
'network_availability_zone': 1})
def test_network_detail_port_exception(self):
self._test_network_detail_subnets_tab_port_exception()
def test_network_detail_subnets_tab_port_exception_with_mac_learning(self):
self._test_network_detail_subnets_tab_port_exception(mac_learning=True)
@test.create_mocks({api.neutron: ('network_get',
'subnet_list',
'is_extension_supported',
'show_network_ip_availability'),
usage.quotas: ('tenant_quota_usages',)})
def _test_network_detail_subnets_tab_port_exception(self,
mac_learning=False):
network = self.networks.first()
ip_availability = self.ip_availability.get()
quota_data = self.quota_usages.first()
self.mock_show_network_ip_availability.return_value = ip_availability
self.mock_network_get.return_value = network
self.mock_subnet_list.return_value = [self.subnets.first()]
self._stub_is_extension_supported(
{'network-ip-availability': True,
'mac-learning': mac_learning,
'network_availability_zone': True,
'dhcp_agent_scheduler': True})
self.mock_tenant_quota_usages.return_value = quota_data
url = urlunquote(reverse('horizon:admin:networks:subnets_tab',
args=[network.id]))
res = self.client.get(url)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
subnets = res.context['subnets_table'].data
self.assertItemsEqual(subnets, [self.subnets.first()])
self.mock_show_network_ip_availability.assert_called_once_with(
test.IsHttpRequest(), network.id)
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_network_get, 2,
mock.call(test.IsHttpRequest(), network.id))
self.mock_subnet_list.assert_called_once_with(test.IsHttpRequest(),
network_id=network.id)
self._check_is_extension_supported(
{'network-ip-availability': 2,
'mac-learning': 1,
'network_availability_zone': 1,
'dhcp_agent_scheduler': 1})
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_tenant_quota_usages, 3,
mock.call(test.IsHttpRequest(), tenant_id=network.tenant_id,
targets=('subnet',)))
@test.create_mocks({api.neutron: ('is_extension_supported',),
api.keystone: ('tenant_list',)})
def test_network_create_get(self):
tenants = self.tenants.list()
self.mock_tenant_list.return_value = [tenants, False]
self._stub_is_extension_supported(
{'provider': True,
'network_availability_zone': False,
'subnet_allocation': False})
url = reverse('horizon:admin:networks:create')
res = self.client.get(url)
self.assertTemplateUsed(res, 'horizon/common/_workflow_base.html')
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest())
self._check_is_extension_supported(
{'provider': 1,
'network_availability_zone': 2,
'subnet_allocation': 1})
@test.create_mocks({api.neutron: ('network_create',
'is_extension_supported',
'subnetpool_list'),
api.keystone: ('tenant_list',)})
def test_network_create_post(self):
tenants = self.tenants.list()
tenant_id = self.tenants.first().id
network = self.networks.first()
self.mock_tenant_list.return_value = [tenants, False]
self._stub_is_extension_supported(
{'provider': True,
'network_availability_zone': False,
'subnet_allocation': True})
self.mock_subnetpool_list.return_value = self.subnetpools.list()
self.mock_network_create.return_value = network
form_data = {'tenant_id': tenant_id,
'name': network.name,
'admin_state': network.admin_state_up,
'external': True,
'shared': True,
'network_type': 'local'}
url = reverse('horizon:admin:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest())
self.mock_subnetpool_list.assert_called_once_with(test.IsHttpRequest())
params = {'name': network.name,
'tenant_id': tenant_id,
'admin_state_up': network.admin_state_up,
'router:external': True,
'shared': True,
'provider:network_type': 'local'}
self.mock_network_create.assert_called_once_with(test.IsHttpRequest(),
**params)
self._check_is_extension_supported(
{'provider': 3,
'network_availability_zone': 2,
'subnet_allocation': 1})
@test.create_mocks({api.neutron: ('network_create',
'is_extension_supported',
'list_availability_zones',
'subnetpool_list'),
api.keystone: ('tenant_list',)})
def test_network_create_post_with_az(self):
tenants = self.tenants.list()
tenant_id = self.tenants.first().id
network = self.networks.first()
self.mock_tenant_list.return_value = [tenants, False]
self._stub_is_extension_supported(
{'provider': True,
'network_availability_zone': True,
'subnet_allocation': True})
self.mock_list_availability_zones.return_value = \
self.neutron_availability_zones.list()
self.mock_subnetpool_list.return_value = self.subnetpools.list()
self.mock_network_create.return_value = network
form_data = {'tenant_id': tenant_id,
'name': network.name,
'admin_state': network.admin_state_up,
'external': True,
'shared': True,
'network_type': 'local',
'az_hints': ['nova']}
url = reverse('horizon:admin:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest())
self._stub_is_extension_supported(
{'provider': 1,
'network_availability_zone': 1,
'subnet_allocation': 1})
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_list_availability_zones, 2,
mock.call(test.IsHttpRequest(), "network", "available"))
self.mock_subnetpool_list.assert_called_once_with(test.IsHttpRequest())
params = {'name': network.name,
'tenant_id': tenant_id,
'admin_state_up': network.admin_state_up,
'router:external': True,
'shared': True,
'provider:network_type': 'local',
'availability_zone_hints': ['nova']}
self.mock_network_create.assert_called_once_with(test.IsHttpRequest(),
**params)
@test.create_mocks({api.neutron: ('network_create',
'subnet_create',
'is_extension_supported',
'subnetpool_list'),
api.keystone: ('tenant_list',)})
def test_network_create_post_with_subnet(self):
tenants = self.tenants.list()
tenant_id = self.tenants.first().id
network = self.networks.first()
subnet = self.subnets.first()
self._stub_is_extension_supported(
{'provider': True,
'network_availability_zone': False,
'subnet_allocation': True})
self.mock_tenant_list.return_value = [tenants, False]
self.mock_subnetpool_list.return_value = self.subnetpools.list()
self.mock_network_create.return_value = network
self.mock_subnet_create.return_value = subnet
form_data = {'tenant_id': tenant_id,
'name': network.name,
'admin_state': network.admin_state_up,
'external': True,
'shared': True,
'network_type': 'local',
'with_subnet': True}
form_data.update(tests.form_data_subnet(subnet, allocation_pools=[]))
url = reverse('horizon:admin:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self._check_is_extension_supported(
{'provider': 3,
'network_availability_zone': 2,
'subnet_allocation': 1})
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest())
self.mock_subnetpool_list.assert_called_once_with(test.IsHttpRequest())
params = {'name': network.name,
'tenant_id': tenant_id,
'admin_state_up': network.admin_state_up,
'router:external': True,
'shared': True,
'provider:network_type': 'local'}
self.mock_network_create.assert_called_once_with(test.IsHttpRequest(),
**params)
subnet_params = {'name': subnet.name,
'network_id': subnet.network_id,
'cidr': subnet.cidr,
'enable_dhcp': subnet.enable_dhcp,
'gateway_ip': subnet.gateway_ip,
'ip_version': subnet.ip_version}
self.mock_subnet_create.assert_called_once_with(test.IsHttpRequest(),
**subnet_params)
@test.create_mocks({api.neutron: ('network_create',
'is_extension_supported',
'subnetpool_list'),
api.keystone: ('tenant_list',)})
def test_network_create_post_network_exception(self):
tenants = self.tenants.list()
tenant_id = self.tenants.first().id
network = self.networks.first()
self.mock_tenant_list.return_value = [tenants, False]
self._stub_is_extension_supported(
{'provider': True,
'network_availability_zone': False,
'subnet_allocation': True})
self.mock_subnetpool_list.return_value = self.subnetpools.list()
self.mock_network_create.side_effect = self.exceptions.neutron
form_data = {'tenant_id': tenant_id,
'name': network.name,
'admin_state': network.admin_state_up,
'external': True,
'shared': False,
'network_type': 'local'}
url = reverse('horizon:admin:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest())
self._check_is_extension_supported(
{'provider': 3,
'network_availability_zone': 2,
'subnet_allocation': 1})
self.mock_subnetpool_list.assert_called_once_with(test.IsHttpRequest())
params = {'name': network.name,
'tenant_id': tenant_id,
'admin_state_up': network.admin_state_up,
'router:external': True,
'shared': False,
'provider:network_type': 'local'}
self.mock_network_create.assert_called_once_with(test.IsHttpRequest(),
**params)
@test.create_mocks({api.neutron: ('is_extension_supported',),
api.keystone: ('tenant_list',)})
def test_network_create_vlan_segmentation_id_invalid(self):
tenants = self.tenants.list()
tenant_id = self.tenants.first().id
network = self.networks.first()
self.mock_tenant_list.return_value = [tenants, False]
self._stub_is_extension_supported(
{'network_availability_zone': False,
'subnet_allocation': False,
'provider': True})
form_data = {'tenant_id': tenant_id,
'name': network.name,
'admin_state': network.admin_state_up,
'external': True,
'shared': False,
'network_type': 'vlan',
'physical_network': 'default',
'segmentation_id': 4095}
url = reverse('horizon:admin:networks:create')
res = self.client.post(url, form_data)
self.assertWorkflowErrors(res, 1)
self.assertContains(res, "1 through 4094")
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest())
self._check_is_extension_supported(
{'network_availability_zone': 2,
'subnet_allocation': 1,
'provider': 2})
@test.create_mocks({api.neutron: ('is_extension_supported',),
api.keystone: ('tenant_list',)})
def test_network_create_gre_segmentation_id_invalid(self):
tenants = self.tenants.list()
tenant_id = self.tenants.first().id
network = self.networks.first()
self.mock_tenant_list.return_value = [tenants, False]
self._stub_is_extension_supported(
{'network_availability_zone': False,
'subnet_allocation': False,
'provider': True})
form_data = {'tenant_id': tenant_id,
'name': network.name,
'admin_state': network.admin_state_up,
'external': True,
'shared': False,
'network_type': 'gre',
'physical_network': 'default',
'segmentation_id': (2 ** 32) + 1}
url = reverse('horizon:admin:networks:create')
res = self.client.post(url, form_data)
self.assertWorkflowErrors(res, 1)
self.assertContains(res, "1 through %s" % ((2 ** 32) - 1))
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest())
self._check_is_extension_supported(
{'network_availability_zone': 2,
'subnet_allocation': 1,
'provider': 2})
@test.create_mocks({api.neutron: ('is_extension_supported',),
api.keystone: ('tenant_list',)})
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={
'segmentation_id_range': {'vxlan': [10, 20]}})
def test_network_create_vxlan_segmentation_id_custom(self):
tenants = self.tenants.list()
tenant_id = self.tenants.first().id
network = self.networks.first()
self.mock_tenant_list.return_value = [tenants, False]
self._stub_is_extension_supported(
{'network_availability_zone': False,
'subnet_allocation': False,
'provider': True})
form_data = {'tenant_id': tenant_id,
'name': network.name,
'admin_state': network.admin_state_up,
'external': True,
'shared': False,
'network_type': 'vxlan',
'physical_network': 'default',
'segmentation_id': 9}
url = reverse('horizon:admin:networks:create')
res = self.client.post(url, form_data)
self.assertWorkflowErrors(res, 1)
self.assertContains(res, "10 through 20")
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest())
self._check_is_extension_supported(
{'network_availability_zone': 2,
'subnet_allocation': 1,
'provider': 2})
@test.create_mocks({api.neutron: ('is_extension_supported',),
api.keystone: ('tenant_list',)})
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={
'supported_provider_types': []})
def test_network_create_no_provider_types(self):
tenants = self.tenants.list()
self.mock_tenant_list.return_value = [tenants, False]
self._stub_is_extension_supported(
{'network_availability_zone': False,
'subnet_allocation': False,
'provider': True})
url = reverse('horizon:admin:networks:create')
res = self.client.get(url)
self.assertTemplateUsed(res, 'horizon/common/_workflow_base.html')
self.assertContains(
res,
'<input type="hidden" name="network_type" id="id_network_type" />',
html=True)
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest())
self._check_is_extension_supported(
{'network_availability_zone': 2,
'subnet_allocation': 1,
'provider': 1})
@test.create_mocks({api.neutron: ('is_extension_supported',),
api.keystone: ('tenant_list',)})
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={
'supported_provider_types': ['local', 'flat', 'gre']})
def test_network_create_unsupported_provider_types(self):
tenants = self.tenants.list()
self.mock_tenant_list.return_value = [tenants, False]
self._stub_is_extension_supported(
{'network_availability_zone': False,
'subnet_allocation': False,
'provider': True})
url = reverse('horizon:admin:networks:create')
res = self.client.get(url)
self.assertTemplateUsed(res, 'horizon/common/_workflow_base.html')
network_type = res.context['form'].fields['network_type']
self.assertListEqual(list(network_type.choices), [('local', 'Local'),
('flat', 'Flat'),
('gre', 'GRE')])
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest())
self._check_is_extension_supported(
{'network_availability_zone': 2,
'subnet_allocation': 1,
'provider': 1})
@test.create_mocks({api.neutron: ('network_get',)})
def test_network_update_get(self):
network = self.networks.first()
self.mock_network_get.return_value = network
url = reverse('horizon:admin:networks:update', args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'admin/networks/update.html')
self.mock_network_get.assert_called_once_with(test.IsHttpRequest(),
network.id,
expand_subnet=False)
@test.create_mocks({api.neutron: ('network_get',)})
def test_network_update_get_exception(self):
network = self.networks.first()
self.mock_network_get.side_effect = self.exceptions.neutron
url = reverse('horizon:admin:networks:update', args=[network.id])
res = self.client.get(url)
redir_url = INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
self.mock_network_get.assert_called_once_with(test.IsHttpRequest(),
network.id,
expand_subnet=False)
@test.create_mocks({api.neutron: ('network_update',
'network_get',)})
def test_network_update_post(self):
network = self.networks.first()
self.mock_network_update.return_value = network
self.mock_network_get.return_value = network
form_data = {'network_id': network.id,
'name': network.name,
'tenant_id': network.tenant_id,
'admin_state': network.admin_state_up,
'shared': True,
'external': True}
url = reverse('horizon:admin:networks:update', args=[network.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
params = {'name': network.name,
'shared': True,
'admin_state_up': network.admin_state_up,
'router:external': True}
self.mock_network_update.assert_called_once_with(test.IsHttpRequest(),
network.id,
**params)
self.mock_network_get.assert_called_once_with(test.IsHttpRequest(),
network.id,
expand_subnet=False)
@test.create_mocks({api.neutron: ('network_update',
'network_get',)})
def test_network_update_post_exception(self):
network = self.networks.first()
params = {'name': network.name,
'shared': False,
'admin_state_up': network.admin_state_up,
'router:external': False}
self.mock_network_update.side_effect = self.exceptions.neutron
self.mock_network_get.return_value = network
form_data = {'network_id': network.id,
'name': network.name,
'tenant_id': network.tenant_id,
'admin_state': network.admin_state_up,
'shared': False,
'external': False}
url = reverse('horizon:admin:networks:update', args=[network.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_network_update.assert_called_once_with(test.IsHttpRequest(),
network.id,
**params)
self.mock_network_get.assert_called_once_with(test.IsHttpRequest(),
network.id,
expand_subnet=False)
@test.create_mocks({api.neutron: ('network_list',
'network_delete',
'list_dhcp_agent_hosting_networks',
'is_extension_supported'),
api.keystone: ('tenant_list',)})
def test_delete_network(self):
tenants = self.tenants.list()
network = self.networks.first()
self.mock_list_dhcp_agent_hosting_networks.return_value = \
self.agents.list()
self._stub_is_extension_supported(
{'network_availability_zone': True,
'dhcp_agent_scheduler': True})
self.mock_tenant_list.return_value = [tenants, False]
self.mock_network_list.return_value = [network]
self.mock_network_delete.return_value = None
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_list_dhcp_agent_hosting_networks.assert_called_once_with(
test.IsHttpRequest(), network.id)
self._check_is_extension_supported(
{'network_availability_zone': 1,
'dhcp_agent_scheduler': 2})
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest())
self.mock_network_list.assert_called_once_with(test.IsHttpRequest())
self.mock_network_delete.assert_called_once_with(test.IsHttpRequest(),
network.id)
@test.create_mocks({api.neutron: ('network_list',
'network_delete',
'list_dhcp_agent_hosting_networks',
'is_extension_supported'),
api.keystone: ('tenant_list',)})
def test_delete_network_exception(self):
tenants = self.tenants.list()
network = self.networks.first()
self.mock_list_dhcp_agent_hosting_networks.return_value = \
self.agents.list()
self._stub_is_extension_supported(
{'network_availability_zone': True,
'dhcp_agent_scheduler': True})
self.mock_tenant_list.return_value = [tenants, False]
self.mock_network_list.return_value = [network]
self.mock_network_delete.side_effect = self.exceptions.neutron
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_list_dhcp_agent_hosting_networks.assert_called_once_with(
test.IsHttpRequest(), network.id)
self._check_is_extension_supported(
{'network_availability_zone': 1,
'dhcp_agent_scheduler': 2})
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest())
self.mock_network_list.assert_called_once_with(test.IsHttpRequest())
self.mock_network_delete.assert_called_once_with(test.IsHttpRequest(),
network.id)
@test.create_mocks({api.neutron: ('is_extension_supported',)})
@test.update_settings(FILTER_DATA_FIRST={'admin.networks': True})
def test_networks_list_with_admin_filter_first(self):
self._stub_is_extension_supported(
{'network_availability_zone': True,
'dhcp_agent_scheduler': True})
res = self.client.get(reverse('horizon:admin:networks:index'))
self.assertTemplateUsed(res, INDEX_TEMPLATE)
networks = res.context['networks_table'].data
self.assertItemsEqual(networks, [])
self._check_is_extension_supported(
{'network_availability_zone': 1,
'dhcp_agent_scheduler': 1})
@test.create_mocks({api.keystone: ('tenant_list',),
api.neutron: ('is_extension_supported',)})
def test_networks_list_with_non_exist_tenant_filter(self):
self._stub_is_extension_supported(
{'network_availability_zone': True,
'dhcp_agent_scheduler': True})
self.mock_tenant_list.return_value = [self.tenants.list(), False]
self.client.post(
reverse('horizon:admin:networks:index'),
data={'networks__filter_admin_networks__q_field': 'project',
'networks__filter_admin_networks__q': 'non_exist_tenant'})
res = self.client.get(reverse('horizon:admin:networks:index'))
self.assertTemplateUsed(res, INDEX_TEMPLATE)
networks = res.context['networks_table'].data
self.assertItemsEqual(networks, [])
self._check_is_extension_supported(
{'network_availability_zone': 2,
'dhcp_agent_scheduler': 2})
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest())
@test.create_mocks({api.neutron: ('is_extension_supported',),
api.keystone: ('tenant_list',)})
def test_network_create_without_physical_networks(self):
tenants = self.tenants.list()
self.mock_tenant_list.return_value = [tenants, False]
self._stub_is_extension_supported(
{'provider': True,
'network_availability_zone': False,
'subnet_allocation': False})
url = reverse('horizon:admin:networks:create')
res = self.client.get(url)
physical_network = res.context['form'].fields['physical_network']
self.assertEqual(type(physical_network), forms.CharField)
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest())
self._check_is_extension_supported(
{'provider': 1,
'network_availability_zone': 2,
'subnet_allocation': 1})
@test.create_mocks({api.neutron: ('is_extension_supported',),
api.keystone: ('tenant_list',)})
@test.update_settings(
OPENSTACK_NEUTRON_NETWORK={
'physical_networks': ['default', 'test']})
def test_network_create_with_physical_networks(self):
tenants = self.tenants.list()
self.mock_tenant_list.return_value = [tenants, False]
self._stub_is_extension_supported(
{'provider': True,
'network_availability_zone': False,
'subnet_allocation': False})
url = reverse('horizon:admin:networks:create')
res = self.client.get(url)
physical_network = res.context['form'].fields['physical_network']
self.assertEqual(type(physical_network), forms.ThemableChoiceField)
self.assertListEqual(list(physical_network.choices),
[('default', 'default'), ('test', 'test')])
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest())
self._check_is_extension_supported(
{'provider': 1,
'network_availability_zone': 2,
'subnet_allocation': 1})
| |
import pygame
import time
class Background(pygame.sprite.Sprite):
def __init__(self, image, location):
# Call Sprite initializer
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(image)
self.rect = self.image.get_rect()
self.rect.left, self.rect.top = location
class DrawButton:
def __init__(self, screen, b_color, t_color, text, b_width, b_height, position_x, position_y):
self.screen = screen
self.b_color = b_color
self.t_color = t_color
self.text = text
self.b_width = b_width
self.b_height = b_height
self.position_x = position_x
self.position_y = position_y
self.clicked = False
self.sound = LoadSound("./assets/sounds/click.wav")
self.font = pygame.font.SysFont("Times", 40)
self.draw()
def draw(self, image=""):
pygame.draw.rect(self.screen, self.b_color or image,
[self.position_x - self.b_width*0.5, self.position_y - self.b_height*0.5, self.b_width, self.b_height], 0)
text = self.font.render(str(self.text), 1, self.t_color)
self.screen.blit(text, (self.position_x - self.b_width*0.5 + self.b_width*0.5 - text.get_width()*0.5,
self.position_y - self.b_height * 0.5 + self.b_height*0.5 - text.get_height()*0.5))
def collision(self, new_color=(0, 0, 0)):
# Check for collision with mouse and change background color
mouse = pygame.mouse.get_pos()
if (mouse[0] in range(int(self.position_x - self.b_width*0.5), int(self.position_x - self.b_width*0.5 + self.b_width))) \
and (mouse[1] in range(int(self.position_y - self.b_height * 0.5), int( self.position_y - self.b_height * 0.5 + self.b_height))):
self.b_color = new_color
self.draw()
# If pressed on a button change state
if pygame.mouse.get_pressed()[0]:
self.sound.play()
time.sleep(0.3)
return True
def singleclick(self, new_color=(0,0,0)):
if self.collision(new_color) and self.clicked == False:
self.clicked = True
return True
else:
return False
def checkCollision(mouse, posX, posY, width, height):
return (mouse[0] in range(int(posX - width*0.5), int(posX - width*0.5 + width))) \
and (mouse[1] in range(int(posY - height * 0.5), int(posY - height * 0.5 + height)))
class DrawText:
def __init__(self, screen, text, color, position_x, position_y, transparent=1):
self.screen = screen
self.text = text
self.color = color
self.position_x = position_x
self.position_y = position_y
self.transparent = transparent
self.font = pygame.font.SysFont("Times", 40)
self.draw()
def draw(self):
self.text = self.font.render(self.text, self.transparent, self.color)
self.screen.blit(self.text,
(self.position_x - self.text.get_width()*0.5, self.position_y - self.text.get_height()*0.5))
class DrawImage:
def __init__(self, screen, image, position_x, position_y):
self.screen = screen
self.image = pygame.image.load(image)
self.position_x = position_x
self.position_y = position_y
self.draw()
def draw(self):
self.screen.blit(self.image, (self.position_x - self.image.get_rect().size[0]*0.5,
self.position_y - self.image.get_rect().size[1]*0.5))
class BaseImage:
def __init__(self, screen, image, position_x, position_y):
self.screen = screen
self.image = pygame.image.load(image)
self.position_x = position_x
self.position_y = position_y
def draw(self):
self.screen.blit(self.image, (self.position_x - self.image.get_rect().size[0]*0.5,
self.position_y - self.image.get_rect().size[1]*0.5))
class DrawCard(BaseImage):
def __init__(self, screen, image, posX, postY, cardId):
super(DrawCard, self).__init__(screen, image, posX, postY)
size = self.image.get_rect().size
self.image = pygame.transform.scale(self.image, (int(size[0] / 3), int(size[1] / 3)))
size = self.image.get_rect().size
self.cardId = cardId
self.width = size[0]
self.height = size[1]
self.draw()
def collision(self):
mouse = pygame.mouse.get_pos()
if checkCollision(mouse, self.position_x, self.position_y, self.width, self.height):
if pygame.mouse.get_pressed()[0]:
time.sleep(0.3)
return True
class Player:
def __init__(self, player_id, name, score, position = (0,11), roll = 0):
self.id = player_id
self.name = name
self.score = score
self.position = position
self.roll = roll
self.category = 0
self.x = 0
self.y = 11
self.rect = (self.x, self.y)
self.moved = True
self.steps = 0
self.direction = None
def directionset(self, direction):
self.direction = direction
def relocate(self, c, x, y):
self.c = c
self.x = x
self.y = y
self.location = (x,y)
def add_category(self, category):
self.category = category
def add_steps(self, steps):
self.steps = steps
def add_type(self, type):
self.type = type
def canmove(self):
self.moved = False
def update(self, screen, width, height, grid_height=10):
print(self.direction)
if self.moved == False:
if self.direction == "Left" or self.direction[0] == "Left":
self.x -= 1
self.moved = True
elif self.direction == "Right" or self.direction[0] == "Right":
self.x += 1
self.moved = True
elif self.direction == "Up" or self.direction[0] == "Up":
self.y -= 1
self.moved = True
elif self.direction == "Down" or self.direction[0] == "Down":
self.y += 1
self.moved = True
print(self.x)
print(self.y)
pygame.draw.rect(screen, (255, 255,255),
[width / 20 + width / 8 * self.x,
height / grid_height * self.y + height / 50, 8,
8], 2)
if self.y < 0:
drawTextInRect(screen, "Player {} Wins!".format(self.name), (0, 0, 0), (width / 2, height / 2),
pygame.font.SysFont("Arial", 40))
print("Terminate Game")
return True
class Point:
def __init__(self, x, y, category, highlight):
self.x = x
self.y = y
self.category = category
self.highlight = highlight
def highlight(self):
if self.highlight == 0:
self.highlight = 1
else:
self.highlight = 0
def drawself(self, screen, width, height, grid_height=10):
if self.x >= 0 and self.y >= 0:
pygame.draw.rect(screen, (0,0,(0+self.highlight)*255), [width/20 + width/4*self.category + width/8*self.x, height/grid_height *self.y + height/50, 8*(1+self.highlight), 8*(1+self.highlight)], 2)
else:
print("Player is not in game yet")
class Grid:
def __init__(self, grid_width=2, grid_height=10):
self.points =[]
self.players =[]
self.grid_width = grid_width
self.grid_height = grid_height
self.colorlist = ((255,0,0), (0,0,255), (255, 255, 0), (0,255, 0))
def addplayer(self, player):
if not self.players.__contains__(player):
self.players.append(player)
#draw the grid and update whilst checking if someone wins
#if someone wins, def returns True
def draw(self, screen, width, height):
#draw backgroundcolors
i = 1
for counter in range(0,4):
pygame.draw.rect(screen, self.colorlist[counter], [i, 0, width / 4, height], 0)
i += width / 4
#TODO fix player highlight and movement
for c in range(0,4):
templist = []
for x in range(0, self.grid_width):
for y in range(0, self.grid_height):
Point(x, y ,c, 0).drawself(screen, width, height, self.grid_height)
templist.append(Point(x, y ,c, 0))
templist.append(Point(x, y ,c, 1))
self.points.append(templist)
# draw some text into an area of a surface
# automatically wraps words
# returns any text that didn't get blitted
def drawTextInRect(surface, text, color, rect, font, aa=False, bkg=None):
rect = pygame.Rect(rect)
y = rect.top
lineSpacing = -2
# get the height of the font
fontHeight = font.size("Tg")[1]
while text:
i = 1
# determine if the row of text will be outside our area
if y + fontHeight > rect.bottom:
break
# determine maximum width of line
while font.size(text[:i])[0] < rect.width and i < len(text):
i += 1
# if we've wrapped the text, then adjust the wrap to the last word
if i < len(text):
i = text.rfind(" ", 0, i) + 1
# render the line and blit it to the surface
if bkg:
image = font.render(text[:i], 1, color, bkg)
image.set_colorkey(bkg)
else:
image = font.render(text[:i], aa, color)
surface.blit(image, (rect.left, y))
y += fontHeight + lineSpacing
# remove the text we just blitted
text = text[i:]
return text
class getPressed:
def __init__(self, waittime):
self.done = False
self.boolswitch = False
self.timer = 0.0
self.clock = time.time()
self.x = time
if self.timer > waittime:
self.boolswitch = True
self.timer = 0.0
else:
self.x = time.time()
self.timer += (self.x - self.clock)
self.clock = self.x
if self.boolswitch:
self.click = pygame.mouse.get_pressed()
if self.click[0] == 1:
self.boolswitch = False
self.done = True
self.clock = time.time()
class LoadSound:
def __init__(self, file, volume=1.0, loop=0):
self.file = file
self.volume = volume
self.loop = loop
self.music = pygame.mixer.Sound(self.file)
self.is_playing = False
def play(self):
if self.is_playing is False:
self.music.set_volume(self.volume)
self.music.play(self.loop)
self.is_playing = True
def stop(self):
self.music.stop()
self.is_playing = False
| |
##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2014, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import re
import sys
import functools
import collections
import Gaffer
import GafferUI
from Qt import QtWidgets
## A class for laying out widgets to represent all the plugs held on a particular parent.
#
# Per-plug metadata support :
#
# - "<layoutName>:index" controls ordering of plugs within the layout
# - "<layoutName>:section" places the plug in a named section of the layout
# - "<layoutName>:divider" specifies whether or not a plug should be followed by a divider
# - "<layoutName>:activator" the name of an activator to control editability
# - "<layoutName>:visibilityActivator" the name of an activator to control visibility
# - "<layoutName>:accessory" groups as an accessory to the previous widget
# - "<layoutName>:width" gives a specific width to the plug's widget
#
# Per-parent metadata support :
#
# - <layoutName>:section:sectionName:summary" dynamic metadata entry returning a
# string to be used as a summary for the section.
# - <layoutName>:section:sectionName:collapsed" boolean indicating whether or
# not a section should be collapsed initially.
# - "<layoutName>:activator:activatorName" a dynamic boolean metadata entry to control
# the activation of plugs within the layout
# - "<layoutName>:activators" a dynamic metadata entry returning a CompoundData of booleans
# for several named activators.
#
# ## Custom widgets
#
# Custom widgets unassociated with any specific plugs may also be added to plug layouts.
# This can be useful when customising user interfaces for a particular facility - for instance
# to display asset management information for each node.
#
# A custom widget is specified using parent metadata entries starting with
# "<layoutName>:customWidget:Name:" prefixes, where "Name" is a unique identifier for the
# custom widget :
#
# - "<layoutName>:customWidget:Name:widgetType" specifies a string containing the fully qualified
# name of a python callable which will be used to create the widget. This callable will be passed
# the same parent GraphComponent (node or plug) that the PlugLayout is being created for.
# - "<layoutName>:customWidget:Name:*" as for the standard per-plug "<layoutName>:*" metadata, so custom
# widgets may be assigned to a section, reordered, given activators etc.
#
class PlugLayout( GafferUI.Widget ) :
# We use this when we can't find a ScriptNode to provide the context.
__fallbackContext = Gaffer.Context()
def __init__( self, parent, orientation = GafferUI.ListContainer.Orientation.Vertical, layoutName = "layout", rootSection = "", embedded = False, **kw ) :
assert( isinstance( parent, ( Gaffer.Node, Gaffer.Plug ) ) )
# embedded indicates that the PlugLayout is embedded in another layout
# which affects how the widget is built
self.__embedded = embedded
self.__layout = _TabLayout( orientation, embedded = embedded ) if isinstance( parent, Gaffer.Node ) and not rootSection else _CollapsibleLayout( orientation )
GafferUI.Widget.__init__( self, self.__layout, **kw )
self.__parent = parent
self.__readOnly = False
self.__layoutName = layoutName
# not to be confused with __rootSection, which holds an actual _Section object
self.__rootSectionName = rootSection
# we need to connect to the childAdded/childRemoved signals on
# the parent so we can update the ui when plugs are added and removed.
parent.childAddedSignal().connect( Gaffer.WeakMethod( self.__childAddedOrRemoved ), scoped = False )
parent.childRemovedSignal().connect( Gaffer.WeakMethod( self.__childAddedOrRemoved ), scoped = False )
# since our layout is driven by metadata, we must respond dynamically
# to changes in that metadata.
Gaffer.Metadata.plugValueChangedSignal().connect( Gaffer.WeakMethod( self.__plugMetadataChanged ), scoped = False )
# and since our activations are driven by plug values, we must respond
# when the plugs are dirtied.
self.__node().plugDirtiedSignal().connect( Gaffer.WeakMethod( self.__plugDirtied ), scoped = False )
# frequently events that trigger a ui update come in batches, so we
# perform the update lazily using a LazyMethod. the dirty variables
# keep track of the work we'll need to do in the update.
self.__layoutDirty = True
self.__activationsDirty = True
self.__summariesDirty = True
# mapping from layout item to widget, where the key is either a plug or
# the name of a custom widget (as returned by layoutOrder()).
self.__widgets = {}
self.__rootSection = _Section( self.__parent )
# set up an appropriate default context in which to view the plugs.
scriptNode = self.__node() if isinstance( self.__node(), Gaffer.ScriptNode ) else self.__node().scriptNode()
self.setContext( scriptNode.context() if scriptNode is not None else self.__fallbackContext )
# schedule our first update, which will take place when we become
# visible for the first time.
self.__updateLazily()
def getReadOnly( self ) :
return self.__readOnly
def setReadOnly( self, readOnly ) :
if readOnly == self.getReadOnly() :
return
self.__readOnly = readOnly
for widget in self.__widgets.values() :
self.__applyReadOnly( widget, self.__readOnly )
def getContext( self ) :
return self.__context
def setContext( self, context ) :
self.__context = context
self.__contextChangedConnection = self.__context.changedSignal().connect( Gaffer.WeakMethod( self.__contextChanged ) )
for widget in self.__widgets.values() :
self.__applyContext( widget, context )
## Returns a PlugValueWidget representing the specified child plug.
# Because the layout is built lazily on demand, this might return None due
# to the user not having opened up the ui - in this case lazy=False may
# be passed to force the creation of the ui.
def plugValueWidget( self, childPlug, lazy=True ) :
if not lazy :
self.__updateLazily.flush( self )
w = self.__widgets.get( childPlug, None )
if w is None :
return w
elif isinstance( w, GafferUI.PlugValueWidget ) :
return w
else :
return w.plugValueWidget()
## Returns the custom widget registered with the specified name.
# Because the layout is built lazily on demand, this might return None due
# to the user not having opened up the ui - in this case lazy=False may
# be passed to force the creation of the ui.
def customWidget( self, name, lazy=True ) :
if not lazy :
self.__updateLazily.flush( self )
return self.__widgets.get( name )
## Returns the list of section names that will be used when laying
# out the plugs of the specified parent. The sections are returned
# in the order in which they will be created.
@classmethod
def layoutSections( cls, parent, includeCustomWidgets = False, layoutName = "layout" ) :
d = collections.OrderedDict()
for item in cls.layoutOrder( parent, includeCustomWidgets, layoutName = layoutName ) :
sectionPath = cls.__staticSectionPath( item, parent, layoutName )
sectionName = ".".join( sectionPath )
d[sectionName] = 1
return d.keys()
## Returns the child plugs of the parent in the order in which they
# will be laid out, based on "<layoutName>:index" Metadata entries. If
# includeCustomWidgets is True, then the positions of custom widgets
# are represented by the appearance of the names of the widgets as
# strings within the list. If a section name is specified, then the
# result will be filtered to include only items in that section.
@classmethod
def layoutOrder( cls, parent, includeCustomWidgets = False, section = None, layoutName = "layout", rootSection = "" ) :
items = parent.children( Gaffer.Plug )
items = [ plug for plug in items if not plug.getName().startswith( "__" ) ]
if includeCustomWidgets :
for name in Gaffer.Metadata.registeredValues( parent ) :
m = re.match( layoutName + ":customWidget:(.+):widgetType", name )
if m and cls.__metadataValue( parent, name ) :
items.append( m.group( 1 ) )
itemsAndIndices = [ list( x ) for x in enumerate( items ) ]
for itemAndIndex in itemsAndIndices :
index = cls.__staticItemMetadataValue( itemAndIndex[1], "index", parent, layoutName )
if index is not None :
index = index if index >= 0 else sys.maxsize + index
itemAndIndex[0] = index
itemsAndIndices.sort( key = lambda x : x[0] )
if section is not None :
sectionPath = section.split( "." ) if section else []
itemsAndIndices = [ x for x in itemsAndIndices if cls.__staticSectionPath( x[1], parent, layoutName ) == sectionPath ]
if rootSection :
rootSectionPath = rootSection.split( "." if rootSection else [] )
itemsAndIndices = [ x for x in itemsAndIndices if cls.__staticSectionPath( x[1], parent, layoutName )[:len(rootSectionPath)] == rootSectionPath ]
return [ x[1] for x in itemsAndIndices ]
@GafferUI.LazyMethod()
def __updateLazily( self ) :
self.__update()
def __update( self ) :
if self.__layoutDirty :
self.__updateLayout()
self.__layoutDirty = False
if self.__activationsDirty :
self.__updateActivations()
self.__activationsDirty = False
if self.__summariesDirty :
self.__updateSummariesWalk( self.__rootSection )
self.__summariesDirty = False
# delegate to our layout class to create a concrete
# layout from the section definitions.
self.__layout.update( self.__rootSection )
def __updateLayout( self ) :
# get the items to lay out - these are a combination
# of plugs and strings representing custom widgets.
items = self.layoutOrder( self.__parent, includeCustomWidgets = True, layoutName = self.__layoutName, rootSection = self.__rootSectionName )
# ditch widgets we don't need any more
itemsSet = set( items )
self.__widgets = { k : v for k, v in self.__widgets.items() if k in itemsSet }
# ditch widgets whose metadata type has changed - we must recreate these.
self.__widgets = {
k : v for k, v in self.__widgets.items()
if isinstance( k, str ) or v is not None and Gaffer.Metadata.value( k, "plugValueWidget:type" ) == v.__plugValueWidgetType
}
# make (or reuse existing) widgets for each item, and sort them into
# sections.
rootSectionDepth = self.__rootSectionName.count( "." ) + 1 if self.__rootSectionName else 0
self.__rootSection.clear()
for item in items :
if item not in self.__widgets :
if isinstance( item, Gaffer.Plug ) :
widget = self.__createPlugWidget( item )
else :
widget = self.__createCustomWidget( item )
self.__widgets[item] = widget
else :
widget = self.__widgets[item]
if widget is None :
continue
section = self.__rootSection
for sectionName in self.__sectionPath( item )[rootSectionDepth:] :
section = section.subsection( sectionName )
if len( section.widgets ) and self.__itemMetadataValue( item, "accessory" ) :
if isinstance( section.widgets[-1], _AccessoryRow ) :
section.widgets[-1].append( widget )
else :
row = _AccessoryRow()
row.append( section.widgets[-1] )
row.append( widget )
section.widgets[-1] = row
else :
section.widgets.append( widget )
if self.__itemMetadataValue( item, "divider" ) :
section.widgets.append( GafferUI.Divider(
GafferUI.Divider.Orientation.Horizontal if self.__layout.orientation() == GafferUI.ListContainer.Orientation.Vertical else GafferUI.Divider.Orientation.Vertical
) )
def __updateActivations( self ) :
with self.getContext() :
# Must scope the context when getting activators, because they are typically
# computed from the plug values, and may therefore trigger a compute.
activators = self.__metadataValue( self.__parent, self.__layoutName + ":activators" ) or {}
activators = { k : v.value for k, v in activators.items() } # convert CompoundData of BoolData to dict of booleans
def active( activatorName ) :
result = True
if activatorName :
result = activators.get( activatorName )
if result is None :
with self.getContext() :
result = self.__metadataValue( self.__parent, self.__layoutName + ":activator:" + activatorName )
result = result if result is not None else False
activators[activatorName] = result
return result
for item, widget in self.__widgets.items() :
if widget is not None :
widget.setEnabled( active( self.__itemMetadataValue( item, "activator" ) ) )
widget.setVisible( active( self.__itemMetadataValue( item, "visibilityActivator" ) ) )
def __updateSummariesWalk( self, section ) :
with self.getContext() :
# Must scope the context because summaries are typically
# generated from plug values, and may therefore trigger
# a compute.
section.summary = self.__metadataValue( self.__parent, self.__layoutName + ":section:" + section.fullName + ":summary" ) or ""
for subsection in section.subsections.values() :
self.__updateSummariesWalk( subsection )
def __import( self, path ) :
path = path.split( "." )
result = __import__( path[0] )
for n in path[1:] :
result = getattr( result, n )
return result
def __createPlugWidget( self, plug ) :
result = GafferUI.PlugValueWidget.create( plug )
if result is None :
return result
width = self.__itemMetadataValue( plug, "width" )
if width is not None :
result._qtWidget().setFixedWidth( width )
if result._qtWidget().layout() is not None :
result._qtWidget().layout().setSizeConstraint( QtWidgets.QLayout.SetDefaultConstraint )
if isinstance( result, GafferUI.PlugValueWidget ) and not result.hasLabel() and self.__itemMetadataValue( plug, "label" ) != "" :
result = GafferUI.PlugWidget( result )
if self.__layout.orientation() == GafferUI.ListContainer.Orientation.Horizontal :
# undo the annoying fixed size the PlugWidget has applied
# to the label.
## \todo Shift all the label size fixing out of PlugWidget and just fix the
# widget here if we're in a vertical orientation.
QWIDGETSIZE_MAX = 16777215 # qt #define not exposed by PyQt or PySide
result.labelPlugValueWidget().label()._qtWidget().setFixedWidth( QWIDGETSIZE_MAX )
self.__applyReadOnly( result, self.getReadOnly() )
self.__applyContext( result, self.getContext() )
# Store the metadata value that controlled the type created, so we can compare to it
# in the future to determine if we can reuse the widget.
result.__plugValueWidgetType = Gaffer.Metadata.value( plug, "plugValueWidget:type" )
return result
def __createCustomWidget( self, name ) :
widgetType = self.__itemMetadataValue( name, "widgetType" )
widgetClass = self.__import( widgetType )
result = widgetClass( self.__parent )
self.__applyContext( result, self.getContext() )
return result
def __node( self ) :
return self.__parent if isinstance( self.__parent, Gaffer.Node ) else self.__parent.node()
@classmethod
def __metadataValue( cls, plugOrNode, name ) :
return Gaffer.Metadata.value( plugOrNode, name )
@classmethod
def __staticItemMetadataValue( cls, item, name, parent, layoutName ) :
if isinstance( item, Gaffer.Plug ) :
v = Gaffer.Metadata.value( item, layoutName + ":" + name )
if v is None and name in ( "divider", "label" ) :
# Backwards compatibility with old unprefixed metadata names.
v = Gaffer.Metadata.value( item, name )
return v
else :
return cls.__metadataValue( parent, layoutName + ":customWidget:" + item + ":" + name )
def __itemMetadataValue( self, item, name ) :
return self.__staticItemMetadataValue( item, name, parent = self.__parent, layoutName = self.__layoutName )
@classmethod
def __staticSectionPath( cls, item, parent, layoutName ) :
m = None
if isinstance( parent, Gaffer.Node ) :
# Backwards compatibility with old metadata entry
## \todo Remove
m = cls.__staticItemMetadataValue( item, "nodeUI:section", parent, layoutName )
if m == "header" :
m = ""
if m is None :
m = cls.__staticItemMetadataValue( item, "section", parent, layoutName )
return m.split( "." ) if m else []
def __sectionPath( self, item ) :
return self.__staticSectionPath( item, parent = self.__parent, layoutName = self.__layoutName )
def __childAddedOrRemoved( self, *unusedArgs ) :
# typically many children are added and removed at once, so
# we do a lazy update so we can batch up several changes into one.
# upheaval is over.
self.__layoutDirty = True
self.__updateLazily()
def __applyReadOnly( self, widget, readOnly ) :
if widget is None :
return
if hasattr( widget, "setReadOnly" ) :
widget.setReadOnly( readOnly )
elif isinstance( widget, GafferUI.PlugWidget ) :
widget.labelPlugValueWidget().setReadOnly( readOnly )
widget.plugValueWidget().setReadOnly( readOnly )
elif hasattr( widget, "plugValueWidget" ) :
widget.plugValueWidget().setReadOnly( readOnly )
def __applyContext( self, widget, context ) :
if hasattr( widget, "setContext" ) :
widget.setContext( context )
elif isinstance( widget, GafferUI.PlugWidget ) :
widget.labelPlugValueWidget().setContext( context )
widget.plugValueWidget().setContext( context )
elif hasattr( widget, "plugValueWidget" ) :
widget.plugValueWidget().setContext( context )
def __plugMetadataChanged( self, nodeTypeId, plugPath, key, plug ) :
parentAffected = isinstance( self.__parent, Gaffer.Plug ) and Gaffer.MetadataAlgo.affectedByChange( self.__parent, nodeTypeId, plugPath, plug )
childAffected = Gaffer.MetadataAlgo.childAffectedByChange( self.__parent, nodeTypeId, plugPath, plug )
if not parentAffected and not childAffected :
return
if key in (
"divider",
self.__layoutName + ":divider",
self.__layoutName + ":index",
self.__layoutName + ":section",
self.__layoutName + ":accessory",
"plugValueWidget:type"
) :
# we often see sequences of several metadata changes - so
# we schedule a lazy update to batch them into one ui update.
self.__layoutDirty = True
self.__updateLazily()
elif re.match( self.__layoutName + ":section:.*:summary", key ) :
self.__summariesDirty = True
self.__updateLazily()
def __plugDirtied( self, plug ) :
if not self.visible() or plug.direction() != plug.Direction.In :
return
self.__activationsDirty = True
self.__summariesDirty = True
self.__updateLazily()
def __contextChanged( self, context, name ) :
self.__activationsDirty = True
self.__summariesDirty = True
self.__updateLazily()
class _AccessoryRow( GafferUI.ListContainer ) :
def __init__( self, **kw ) :
GafferUI.ListContainer.__init__( self, GafferUI.ListContainer.Orientation.Horizontal, spacing = 4, **kw )
# The _Section class provides a simple abstract representation of a hierarchical
# layout. Each section contains a list of widgets to be displayed in that section,
# and an OrderedDict of named subsections.
class _Section( object ) :
def __init__( self, _parent, _fullName = "" ) :
self.__parent = _parent
self.fullName = _fullName
self.clear()
def subsection( self, name ) :
result = self.subsections.get( name )
if result is not None :
return result
result = _Section(
self.__parent,
self.fullName + "." + name if self.fullName else name
)
self.subsections[name] = result
return result
def clear( self ) :
self.widgets = []
self.subsections = collections.OrderedDict()
self.summary = ""
def saveState( self, name, value ) :
Gaffer.Metadata.registerValue( self.__parent, self.__stateName( name ), value, persistent = False )
def restoreState( self, name ) :
return Gaffer.Metadata.value( self.__parent, self.__stateName( name ) )
def __stateName( self, name ) :
return "layout:section:" + self.fullName + ":" + name
# The PlugLayout class deals with all the details of plugs, metadata and
# signals to define an abstract layout in terms of _Sections. It then
# delegates to the _Layout classes to create an actual layout in terms
# of Widgets. This allows us to present different layouts based on whether
# or the parent is a node (tabbed layout) or a plug (collapsible layout).
class _Layout( GafferUI.Widget ) :
def __init__( self, topLevelWidget, orientation, **kw ) :
GafferUI.Widget.__init__( self, topLevelWidget, **kw )
self.__orientation = orientation
def orientation( self ) :
return self.__orientation
def update( self, section ) :
raise NotImplementedError
class _TabLayout( _Layout ) :
def __init__( self, orientation, embedded = False, **kw ) :
self.__embedded = embedded
self.__mainColumn = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
_Layout.__init__( self, self.__mainColumn, orientation, **kw )
with self.__mainColumn :
self.__widgetsColumn = GafferUI.ListContainer( self.orientation(), spacing = 4, borderWidth = 4 )
self.__tabbedContainer = GafferUI.TabbedContainer()
# if the TabLayout is embedded, we want to restrict the maximum width/height depending on the orientation
if self.__embedded :
if self.orientation() == GafferUI.ListContainer.Orientation.Vertical :
self.__tabbedContainer._qtWidget().setSizePolicy( QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum ) )
else :
self.__tabbedContainer._qtWidget().setSizePolicy( QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Expanding ) )
self.__currentTabChangedConnection = self.__tabbedContainer.currentChangedSignal().connect(
Gaffer.WeakMethod( self.__currentTabChanged )
)
def update( self, section ) :
self.__section = section
self.__widgetsColumn[:] = section.widgets
existingTabs = collections.OrderedDict()
for tab in self.__tabbedContainer[:] :
existingTabs[self.__tabbedContainer.getLabel( tab )] = tab
updatedTabs = collections.OrderedDict()
for name, subsection in section.subsections.items() :
tab = existingTabs.get( name )
if tab is None :
# Use scroll bars only when the TabLayout is not embedded
if self.__embedded :
tab = GafferUI.Frame( borderWidth = 0, borderStyle = GafferUI.Frame.BorderStyle.None )
else :
tab = GafferUI.ScrolledContainer( borderWidth = 8 )
if self.orientation() == GafferUI.ListContainer.Orientation.Vertical :
tab.setHorizontalMode( GafferUI.ScrollMode.Never )
else :
tab.setVerticalMode( GafferUI.ScrollMode.Never )
tab.setChild( _CollapsibleLayout( self.orientation() ) )
tab.getChild().update( subsection )
updatedTabs[name] = tab
if existingTabs.keys() != updatedTabs.keys() :
with Gaffer.BlockedConnection( self.__currentTabChangedConnection ) :
del self.__tabbedContainer[:]
for name, tab in updatedTabs.items() :
self.__tabbedContainer.append( tab, label = name )
for index, subsection in enumerate( section.subsections.values() ) :
## \todo Consider how/if we should add a public tooltip API to TabbedContainer.
self.__tabbedContainer._qtWidget().setTabToolTip( index, subsection.summary )
if not len( existingTabs ) :
currentTabIndex = self.__section.restoreState( "currentTab" ) or 0
if currentTabIndex < len( self.__tabbedContainer ) :
self.__tabbedContainer.setCurrent( self.__tabbedContainer[currentTabIndex] )
self.__widgetsColumn.setVisible( len( section.widgets ) )
self.__tabbedContainer.setVisible( len( self.__tabbedContainer ) )
def __currentTabChanged( self, tabbedContainer, currentTab ) :
self.__section.saveState( "currentTab", tabbedContainer.index( currentTab ) )
class _CollapsibleLayout( _Layout ) :
def __init__( self, orientation, **kw ) :
self.__column = GafferUI.ListContainer( orientation, spacing = 4 )
_Layout.__init__( self, self.__column, orientation, **kw )
self.__collapsibles = {} # Indexed by section name
def update( self, section ) :
widgets = list( section.widgets )
for name, subsection in section.subsections.items() :
collapsible = self.__collapsibles.get( name )
if collapsible is None :
collapsible = GafferUI.Collapsible( name, _CollapsibleLayout( self.orientation() ), collapsed = True )
# Hack to add margins at the top and bottom but not at the sides.
## \todo This is exposed in the public API via the borderWidth
# parameter to the Collapsible. That parameter sucks because a) it
# makes a margin rather than a border, and b) it doesn't allow per-edge
# control. Either make that make sense, or remove it and find a way
# of deferring all this to the style.
collapsible._qtWidget().layout().setContentsMargins( 0, 2, 0, 2 )
collapsible.setCornerWidget( GafferUI.Label(), True )
## \todo This is fighting the default sizing applied in the Label constructor. Really we need a standard
# way of controlling size behaviours for all widgets in the public API.
collapsible.getCornerWidget()._qtWidget().setSizePolicy( QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Fixed )
if subsection.restoreState( "collapsed" ) is False :
collapsible.setCollapsed( False )
collapsible.stateChangedSignal().connect(
functools.partial( Gaffer.WeakMethod( self.__collapsibleStateChanged ), subsection = subsection ),
scoped = False
)
self.__collapsibles[name] = collapsible
collapsible.getChild().update( subsection )
collapsible.getCornerWidget().setText(
"<small>" + " ( " + subsection.summary + " )</small>" if subsection.summary else ""
)
widgets.append( collapsible )
self.__column[:] = widgets
def __collapsibleStateChanged( self, collapsible, subsection ) :
subsection.saveState( "collapsed", collapsible.getCollapsed() )
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/AllergyIntolerance) on 2017-03-22.
# 2017, SMART Health IT.
from . import domainresource
class AllergyIntolerance(domainresource.DomainResource):
""" Allergy or Intolerance (generally: Risk of adverse reaction to a substance).
Risk of harmful or undesirable, physiological response which is unique to
an individual and associated with exposure to a substance.
"""
resource_type = "AllergyIntolerance"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.assertedDate = None
""" Date record was believed accurate.
Type `FHIRDate` (represented as `str` in JSON). """
self.asserter = None
""" Source of the information about the allergy.
Type `FHIRReference` referencing `Patient, RelatedPerson, Practitioner` (represented as `dict` in JSON). """
self.category = None
""" food | medication | environment | biologic.
List of `str` items. """
self.clinicalStatus = None
""" active | inactive | resolved.
Type `str`. """
self.code = None
""" Code that identifies the allergy or intolerance.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.criticality = None
""" low | high | unable-to-assess.
Type `str`. """
self.identifier = None
""" External ids for this item.
List of `Identifier` items (represented as `dict` in JSON). """
self.lastOccurrence = None
""" Date(/time) of last known occurrence of a reaction.
Type `FHIRDate` (represented as `str` in JSON). """
self.note = None
""" Additional text not captured in other fields.
List of `Annotation` items (represented as `dict` in JSON). """
self.onsetAge = None
""" When allergy or intolerance was identified.
Type `Age` (represented as `dict` in JSON). """
self.onsetDateTime = None
""" When allergy or intolerance was identified.
Type `FHIRDate` (represented as `str` in JSON). """
self.onsetPeriod = None
""" When allergy or intolerance was identified.
Type `Period` (represented as `dict` in JSON). """
self.onsetRange = None
""" When allergy or intolerance was identified.
Type `Range` (represented as `dict` in JSON). """
self.onsetString = None
""" When allergy or intolerance was identified.
Type `str`. """
self.patient = None
""" Who the sensitivity is for.
Type `FHIRReference` referencing `Patient` (represented as `dict` in JSON). """
self.reaction = None
""" Adverse Reaction Events linked to exposure to substance.
List of `AllergyIntoleranceReaction` items (represented as `dict` in JSON). """
self.recorder = None
""" Who recorded the sensitivity.
Type `FHIRReference` referencing `Practitioner, Patient` (represented as `dict` in JSON). """
self.type = None
""" allergy | intolerance - Underlying mechanism (if known).
Type `str`. """
self.verificationStatus = None
""" unconfirmed | confirmed | refuted | entered-in-error.
Type `str`. """
super(AllergyIntolerance, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(AllergyIntolerance, self).elementProperties()
js.extend([
("assertedDate", "assertedDate", fhirdate.FHIRDate, False, None, False),
("asserter", "asserter", fhirreference.FHIRReference, False, None, False),
("category", "category", str, True, None, False),
("clinicalStatus", "clinicalStatus", str, False, None, False),
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("criticality", "criticality", str, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("lastOccurrence", "lastOccurrence", fhirdate.FHIRDate, False, None, False),
("note", "note", annotation.Annotation, True, None, False),
("onsetAge", "onsetAge", age.Age, False, "onset", False),
("onsetDateTime", "onsetDateTime", fhirdate.FHIRDate, False, "onset", False),
("onsetPeriod", "onsetPeriod", period.Period, False, "onset", False),
("onsetRange", "onsetRange", range.Range, False, "onset", False),
("onsetString", "onsetString", str, False, "onset", False),
("patient", "patient", fhirreference.FHIRReference, False, None, True),
("reaction", "reaction", AllergyIntoleranceReaction, True, None, False),
("recorder", "recorder", fhirreference.FHIRReference, False, None, False),
("type", "type", str, False, None, False),
("verificationStatus", "verificationStatus", str, False, None, True),
])
return js
from . import backboneelement
class AllergyIntoleranceReaction(backboneelement.BackboneElement):
""" Adverse Reaction Events linked to exposure to substance.
Details about each adverse reaction event linked to exposure to the
identified substance.
"""
resource_type = "AllergyIntoleranceReaction"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Description of the event as a whole.
Type `str`. """
self.exposureRoute = None
""" How the subject was exposed to the substance.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.manifestation = None
""" Clinical symptoms/signs associated with the Event.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.note = None
""" Text about event not captured in other fields.
List of `Annotation` items (represented as `dict` in JSON). """
self.onset = None
""" Date(/time) when manifestations showed.
Type `FHIRDate` (represented as `str` in JSON). """
self.severity = None
""" mild | moderate | severe (of event as a whole).
Type `str`. """
self.substance = None
""" Specific substance or pharmaceutical product considered to be
responsible for event.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(AllergyIntoleranceReaction, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(AllergyIntoleranceReaction, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("exposureRoute", "exposureRoute", codeableconcept.CodeableConcept, False, None, False),
("manifestation", "manifestation", codeableconcept.CodeableConcept, True, None, True),
("note", "note", annotation.Annotation, True, None, False),
("onset", "onset", fhirdate.FHIRDate, False, None, False),
("severity", "severity", str, False, None, False),
("substance", "substance", codeableconcept.CodeableConcept, False, None, False),
])
return js
import sys
try:
from . import age
except ImportError:
age = sys.modules[__package__ + '.age']
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import range
except ImportError:
range = sys.modules[__package__ + '.range']
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
import logging
from itertools import product
from airflow.operators.s3_to_hive_operator import S3ToHiveTransfer
from collections import OrderedDict
from airflow.exceptions import AirflowException
from tempfile import NamedTemporaryFile, mkdtemp
from gzip import GzipFile
import bz2
import shutil
import filecmp
import errno
try:
import boto3
from moto import mock_s3
except ImportError:
mock_s3 = None
class S3ToHiveTransferTest(unittest.TestCase):
def setUp(self):
self.fn = {}
self.task_id = 'S3ToHiveTransferTest'
self.s3_key = 'S32hive_test_file'
self.field_dict = OrderedDict([('Sno', 'BIGINT'), ('Some,Text', 'STRING')])
self.hive_table = 'S32hive_test_table'
self.delimiter = '\t'
self.create = True
self.recreate = True
self.partition = {'ds': 'STRING'}
self.headers = True
self.check_headers = True
self.wildcard_match = False
self.input_compressed = False
self.kwargs = {'task_id': self.task_id,
's3_key': self.s3_key,
'field_dict': self.field_dict,
'hive_table': self.hive_table,
'delimiter': self.delimiter,
'create': self.create,
'recreate': self.recreate,
'partition': self.partition,
'headers': self.headers,
'check_headers': self.check_headers,
'wildcard_match': self.wildcard_match,
'input_compressed': self.input_compressed
}
try:
header = "Sno\tSome,Text \n".encode()
line1 = "1\tAirflow Test\n".encode()
line2 = "2\tS32HiveTransfer\n".encode()
self.tmp_dir = mkdtemp(prefix='test_tmps32hive_')
# create sample txt, gz and bz2 with and without headers
with NamedTemporaryFile(mode='wb+',
dir=self.tmp_dir,
delete=False) as f_txt_h:
self._set_fn(f_txt_h.name, '.txt', True)
f_txt_h.writelines([header, line1, line2])
fn_gz = self._get_fn('.txt', True) + ".gz"
with GzipFile(filename=fn_gz, mode="wb") as f_gz_h:
self._set_fn(fn_gz, '.gz', True)
f_gz_h.writelines([header, line1, line2])
fn_gz_upper = self._get_fn('.txt', True) + ".GZ"
with GzipFile(filename=fn_gz_upper, mode="wb") as f_gz_upper_h:
self._set_fn(fn_gz_upper, '.GZ', True)
f_gz_upper_h.writelines([header, line1, line2])
fn_bz2 = self._get_fn('.txt', True) + '.bz2'
with bz2.BZ2File(filename=fn_bz2, mode="wb") as f_bz2_h:
self._set_fn(fn_bz2, '.bz2', True)
f_bz2_h.writelines([header, line1, line2])
# create sample txt, bz and bz2 without header
with NamedTemporaryFile(mode='wb+', dir=self.tmp_dir, delete=False) as f_txt_nh:
self._set_fn(f_txt_nh.name, '.txt', False)
f_txt_nh.writelines([line1, line2])
fn_gz = self._get_fn('.txt', False) + ".gz"
with GzipFile(filename=fn_gz, mode="wb") as f_gz_nh:
self._set_fn(fn_gz, '.gz', False)
f_gz_nh.writelines([line1, line2])
fn_gz_upper = self._get_fn('.txt', False) + ".GZ"
with GzipFile(filename=fn_gz_upper, mode="wb") as f_gz_upper_nh:
self._set_fn(fn_gz_upper, '.GZ', False)
f_gz_upper_nh.writelines([line1, line2])
fn_bz2 = self._get_fn('.txt', False) + '.bz2'
with bz2.BZ2File(filename=fn_bz2, mode="wb") as f_bz2_nh:
self._set_fn(fn_bz2, '.bz2', False)
f_bz2_nh.writelines([line1, line2])
# Base Exception so it catches Keyboard Interrupt
except BaseException as e:
logging.error(e)
self.tearDown()
def tearDown(self):
try:
shutil.rmtree(self.tmp_dir)
except OSError as e:
# ENOENT - no such file or directory
if e.errno != errno.ENOENT:
raise e
# Helper method to create a dictionary of file names and
# file types (file extension and header)
def _set_fn(self, fn, ext, header):
key = self._get_key(ext, header)
self.fn[key] = fn
# Helper method to fetch a file of a
# certain format (file extension and header)
def _get_fn(self, ext, header):
key = self._get_key(ext, header)
return self.fn[key]
@staticmethod
def _get_key(ext, header):
key = ext + "_" + ('h' if header else 'nh')
return key
@staticmethod
def _check_file_equality(fn_1, fn_2, ext):
# gz files contain mtime and filename in the header that
# causes filecmp to return False even if contents are identical
# Hence decompress to test for equality
if ext.lower() == '.gz':
with GzipFile(fn_1, 'rb') as f_1, NamedTemporaryFile(mode='wb') as f_txt_1:
with GzipFile(fn_2, 'rb') as f_2, NamedTemporaryFile(mode='wb') as f_txt_2:
shutil.copyfileobj(f_1, f_txt_1)
shutil.copyfileobj(f_2, f_txt_2)
f_txt_1.flush()
f_txt_2.flush()
return filecmp.cmp(f_txt_1.name, f_txt_2.name, shallow=False)
else:
return filecmp.cmp(fn_1, fn_2, shallow=False)
def test_bad_parameters(self):
self.kwargs['check_headers'] = True
self.kwargs['headers'] = False
self.assertRaisesRegexp(AirflowException,
"To check_headers.*",
S3ToHiveTransfer,
**self.kwargs)
def test__get_top_row_as_list(self):
self.kwargs['delimiter'] = '\t'
fn_txt = self._get_fn('.txt', True)
header_list = S3ToHiveTransfer(**self.kwargs). \
_get_top_row_as_list(fn_txt)
self.assertEqual(header_list, ['Sno', 'Some,Text'],
msg="Top row from file doesnt matched expected value")
self.kwargs['delimiter'] = ','
header_list = S3ToHiveTransfer(**self.kwargs). \
_get_top_row_as_list(fn_txt)
self.assertEqual(header_list, ['Sno\tSome', 'Text'],
msg="Top row from file doesnt matched expected value")
def test__match_headers(self):
self.kwargs['field_dict'] = OrderedDict([('Sno', 'BIGINT'),
('Some,Text', 'STRING')])
self.assertTrue(S3ToHiveTransfer(**self.kwargs).
_match_headers(['Sno', 'Some,Text']),
msg="Header row doesnt match expected value")
# Testing with different column order
self.assertFalse(S3ToHiveTransfer(**self.kwargs).
_match_headers(['Some,Text', 'Sno']),
msg="Header row doesnt match expected value")
# Testing with extra column in header
self.assertFalse(S3ToHiveTransfer(**self.kwargs).
_match_headers(['Sno', 'Some,Text', 'ExtraColumn']),
msg="Header row doesnt match expected value")
def test__delete_top_row_and_compress(self):
s32hive = S3ToHiveTransfer(**self.kwargs)
# Testing gz file type
fn_txt = self._get_fn('.txt', True)
gz_txt_nh = s32hive._delete_top_row_and_compress(fn_txt,
'.gz',
self.tmp_dir)
fn_gz = self._get_fn('.gz', False)
self.assertTrue(self._check_file_equality(gz_txt_nh, fn_gz, '.gz'),
msg="gz Compressed file not as expected")
# Testing bz2 file type
bz2_txt_nh = s32hive._delete_top_row_and_compress(fn_txt,
'.bz2',
self.tmp_dir)
fn_bz2 = self._get_fn('.bz2', False)
self.assertTrue(self._check_file_equality(bz2_txt_nh, fn_bz2, '.bz2'),
msg="bz2 Compressed file not as expected")
@unittest.skipIf(mock is None, 'mock package not present')
@unittest.skipIf(mock_s3 is None, 'moto package not present')
@mock.patch('airflow.operators.s3_to_hive_operator.HiveCliHook')
@mock_s3
def test_execute(self, mock_hiveclihook):
conn = boto3.client('s3')
conn.create_bucket(Bucket='bucket')
# Testing txt, zip, bz2 files with and without header row
for (ext, has_header) in product(['.txt', '.gz', '.bz2', '.GZ'], [True, False]):
self.kwargs['headers'] = has_header
self.kwargs['check_headers'] = has_header
logging.info("Testing {0} format {1} header".
format(ext,
('with' if has_header else 'without'))
)
self.kwargs['input_compressed'] = ext.lower() != '.txt'
self.kwargs['s3_key'] = 's3://bucket/' + self.s3_key + ext
ip_fn = self._get_fn(ext, self.kwargs['headers'])
op_fn = self._get_fn(ext, False)
# Upload the file into the Mocked S3 bucket
conn.upload_file(ip_fn, 'bucket', self.s3_key + ext)
# file parameter to HiveCliHook.load_file is compared
# against expected file output
mock_hiveclihook().load_file.side_effect = \
lambda *args, **kwargs: self.assertTrue(
self._check_file_equality(args[0], op_fn, ext),
msg='{0} output file not as expected'.format(ext))
# Execute S3ToHiveTransfer
s32hive = S3ToHiveTransfer(**self.kwargs)
s32hive.execute(None)
@unittest.skipIf(mock is None, 'mock package not present')
@unittest.skipIf(mock_s3 is None, 'moto package not present')
@mock.patch('airflow.operators.s3_to_hive_operator.HiveCliHook')
@mock_s3
def test_execute_with_select_expression(self, mock_hiveclihook):
conn = boto3.client('s3')
conn.create_bucket(Bucket='bucket')
select_expression = "SELECT * FROM S3Object s"
bucket = 'bucket'
# Only testing S3ToHiveTransfer calls S3Hook.select_key with
# the right parameters and its execute method succeeds here,
# since Moto doesn't support select_object_content as of 1.3.2.
for (ext, has_header) in product(['.txt', '.gz', '.GZ'], [True, False]):
input_compressed = ext.lower() != '.txt'
key = self.s3_key + ext
self.kwargs['check_headers'] = False
self.kwargs['headers'] = has_header
self.kwargs['input_compressed'] = input_compressed
self.kwargs['select_expression'] = select_expression
self.kwargs['s3_key'] = 's3://{0}/{1}'.format(bucket, key)
ip_fn = self._get_fn(ext, has_header)
# Upload the file into the Mocked S3 bucket
conn.upload_file(ip_fn, bucket, key)
input_serialization = {
'CSV': {'FieldDelimiter': self.delimiter}
}
if input_compressed:
input_serialization['CompressionType'] = 'GZIP'
if has_header:
input_serialization['CSV']['FileHeaderInfo'] = 'USE'
# Confirm that select_key was called with the right params
with mock.patch('airflow.hooks.S3_hook.S3Hook.select_key',
return_value="") as mock_select_key:
# Execute S3ToHiveTransfer
s32hive = S3ToHiveTransfer(**self.kwargs)
s32hive.execute(None)
mock_select_key.assert_called_once_with(
bucket_name=bucket, key=key,
expression=select_expression,
input_serialization=input_serialization
)
if __name__ == '__main__':
unittest.main()
| |
# -*- coding: utf-8 -*-
# Script to start deployment api and make request to it.
import argparse
import base64
import datetime
import logging
import os
import errno
import shutil
import subprocess
import tempfile
import threading
from functools import partial
from multiprocessing import Process
from time import sleep
from google.auth.transport.requests import Request
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from prometheus_client import start_http_server, Gauge, Counter
import requests
import yaml
import google.auth
import google.auth.compute_engine.credentials
import google.auth.iam
import google.oauth2.credentials
import google.oauth2.service_account
from retrying import retry
from kubeflow.testing import test_util
FILE_PATH = os.path.dirname(os.path.abspath(__file__))
SSL_DIR = os.path.join(FILE_PATH, "sslcert")
SSL_BUCKET = 'kubeflow-ci-deploy-cert'
IAM_SCOPE = 'https://www.googleapis.com/auth/iam'
OAUTH_TOKEN_URI = 'https://www.googleapis.com/oauth2/v4/token'
METHOD = 'GET'
SERVICE_HEALTH = Gauge(
'deployment_service_status',
'0: normal; 1: deployment not successful; 2: service down')
PROBER_HEALTH = Gauge('prober_health', '0: normal; 1: not working')
LOADTEST_HEALTH = Gauge('loadtest_health', '0: normal; 1: not working')
LOADTEST_SUCCESS = Gauge('loadtest_success',
'number of successful requests in current load test')
SUCCESS_COUNT = Counter('deployment_success_count',
'accumulative count of successful deployment')
FAILURE_COUNT = Counter('deployment_failure_count',
'accumulative count of failed deployment')
LOADTEST_ZONE = [
'us-central1-a', 'us-central1-c', 'us-east1-c', 'us-east1-d', 'us-west1-b'
]
class requestThread(threading.Thread):
def __init__(self, target_url, req_data, google_open_id_connect_token):
threading.Thread.__init__(self)
self.target_url = target_url
self.req_data = req_data
self.google_open_id_connect_token = google_open_id_connect_token
def run(self):
try:
resp = requests.post(
"https://%s/kfctl/e2eDeploy" % self.target_url,
json=self.req_data,
headers={
'Authorization':
'Bearer {}'.format(self.google_open_id_connect_token)
})
if resp.status_code != 200:
logging.error("request failed:%s\n request data:%s"
% (resp, self.req_data))
# Mark service down if return code abnormal
SERVICE_HEALTH.set(2)
except Exception as e:
logging.error(e)
SERVICE_HEALTH.set(2)
def may_get_env_var(name):
env_val = os.getenv(name)
if env_val:
logging.info("%s is set" % name)
return env_val
else:
raise Exception("%s not set" % name)
def getZone(args, deployment):
if args.mode == "loadtest":
return LOADTEST_ZONE[int(deployment[-1]) % len(LOADTEST_ZONE)]
return args.zone
def get_target_url(args):
if args.mode == "loadtest":
return "deploy-staging.kubeflow.cloud"
if args.mode == "prober":
return "deploy.kubeflow.cloud"
raise RuntimeError("No default target url for test mode %s !" % args.mode)
def prepare_request_data(args, deployment):
logging.info("prepare deploy call data")
with open(
os.path.join(FILE_PATH, "../bootstrap/config/gcp_prototype.yaml"),
'r') as conf_input:
defaultApp = yaml.load(conf_input)["defaultApp"]
for param in defaultApp["parameters"]:
if param["name"] == "acmeEmail":
param["value"] = args.email
if param["name"] == "ipName":
param["value"] = deployment + "-ip"
if param["name"] == "hostname":
param["value"] = "%s.endpoints.%s.cloud.goog" % (deployment, args.project)
defaultApp['registries'][0]['version'] = args.kfversion
access_token = util_run(
'gcloud auth application-default print-access-token'.split(' '),
cwd=FILE_PATH)
client_id = may_get_env_var("CLIENT_ID")
client_secret = may_get_env_var("CLIENT_SECRET")
credentials = GoogleCredentials.get_application_default()
crm = discovery.build('cloudresourcemanager', 'v1', credentials=credentials)
project = crm.projects().get(projectId=args.project).execute()
logging.info("project info: %s", project)
request_data = {
"AppConfig": defaultApp,
"Apply": True,
"AutoConfigure": True,
"ClientId": base64.b64encode(client_id.encode()).decode("utf-8"),
"ClientSecret": base64.b64encode(client_secret.encode()).decode("utf-8"),
"Cluster": deployment,
"Email": args.email,
"IpName": deployment + '-ip',
"Name": deployment,
"Namespace": 'kubeflow',
"Project": args.project,
"ProjectNumber": project["projectNumber"],
# service account client id of account: kubeflow-testing@kubeflow-ci.iam.gserviceaccount.com
"SAClientId": args.sa_client_id,
"Token": access_token,
"Zone": getZone(args, deployment)
}
return request_data
def make_e2e_call(args):
if not clean_up_resource(args, set([args.deployment])):
raise RuntimeError("Failed to cleanup resource")
req_data = prepare_request_data(args, args.deployment)
resp = requests.post(
"http://kubeflow-controller.%s.svc.cluster.local:8080/kfctl/e2eDeploy" %
args.namespace,
json=req_data)
if resp.status_code != 200:
raise RuntimeError("deploy request received status code: %s, message: %s" %
(resp.status_code, resp.text))
logging.info("deploy call done")
# Make 1 deployment request to service url, return if request call successful.
def make_prober_call(args, service_account_credentials):
logging.info("start new prober call")
req_data = prepare_request_data(args, args.deployment)
google_open_id_connect_token = get_google_open_id_connect_token(
service_account_credentials)
try:
resp = requests.post(
"https://%s/kfctl/e2eDeploy" % get_target_url(args),
json=req_data,
headers={
'Authorization': 'Bearer {}'.format(google_open_id_connect_token)
})
if resp.status_code != 200:
# Mark service down if return code abnormal
SERVICE_HEALTH.set(2)
return False
except Exception as e:
logging.error(e)
SERVICE_HEALTH.set(2)
return False
logging.info("prober call done")
return True
# For each deployment, make a request to service url, return if all requests call successful.
def make_loadtest_call(args, service_account_credentials, projects, deployments):
logging.info("start new load test call")
google_open_id_connect_token = get_google_open_id_connect_token(
service_account_credentials)
threads = []
for project in projects:
args.project = project
for deployment in deployments:
req_data = prepare_request_data(args, deployment)
threads.append(
requestThread(
get_target_url(args), req_data, google_open_id_connect_token))
for t in threads:
t.start()
for t in threads:
t.join()
if SERVICE_HEALTH._value.get() == 2:
return False
logging.info("load test call done")
return True
def get_gcs_path(mode, project, deployment):
return os.path.join(SSL_BUCKET, mode, project, deployment)
# Insert ssl cert into GKE cluster
def insert_ssl_cert(args, deployment):
logging.info("Wait till deployment is done and GKE cluster is up")
credentials = GoogleCredentials.get_application_default()
service = discovery.build('deploymentmanager', 'v2', credentials=credentials)
# Wait up to 10 minutes till GKE cluster up and available.
end_time = datetime.datetime.now() + datetime.timedelta(minutes=10)
while datetime.datetime.now() < end_time:
sleep(5)
try:
request = service.deployments().get(
project=args.project, deployment=deployment)
response = request.execute()
if response['operation']['status'] != 'DONE':
logging.info("Deployment running")
continue
except Exception as e:
logging.info("Deployment hasn't started")
continue
break
ssl_local_dir = os.path.join(SSL_DIR, args.project, deployment)
if os.path.exists(ssl_local_dir):
shutil.rmtree(ssl_local_dir)
os.makedirs(ssl_local_dir)
logging.info("donwload ssl cert and insert to GKE cluster")
try:
# TODO: switch to client lib
gcs_path = get_gcs_path(args.mode, args.project, deployment)
util_run(("gsutil cp gs://%s/* %s" % (gcs_path, ssl_local_dir)).split(' '))
except Exception:
logging.warning("ssl cert for %s doesn't exist in gcs" % args.mode)
# clean up local dir
shutil.rmtree(ssl_local_dir)
return True
try:
create_secret(args, deployment, ssl_local_dir)
except Exception as e:
logging.error(e)
return False
return True
@retry(wait_fixed=2000, stop_max_delay=15000)
def create_secret(args, deployment, ssl_local_dir):
util_run(
("gcloud container clusters get-credentials %s --zone %s --project %s" %
(deployment, getZone(args, deployment), args.project)).split(' '))
util_run(("kubectl create -f %s" % ssl_local_dir).split(' '))
# deployments: set(string) which contains all deployment names in current test round.
def check_deploy_status(args, deployments):
num_deployments = len(deployments)
logging.info("check deployment status")
service_account_credentials = get_service_account_credentials("CLIENT_ID")
google_open_id_connect_token = get_google_open_id_connect_token(
service_account_credentials)
# Wait up to 30 minutes for IAP access test.
num_req = 0
end_time = datetime.datetime.now() + datetime.timedelta(
minutes=args.iap_wait_min)
success_deploy = set()
while datetime.datetime.now() < end_time and len(deployments) > 0:
sleep(10)
num_req += 1
for deployment in deployments:
url = "https://%s.endpoints.%s.cloud.goog" % (deployment, args.project)
logging.info("Trying url: %s", url)
try:
resp = requests.request(
METHOD,
url,
headers={
'Authorization':
'Bearer {}'.format(google_open_id_connect_token)
},
verify=False)
if resp.status_code == 200:
success_deploy.add(deployment)
logging.info("IAP is ready for %s!", url)
else:
logging.info(
"%s: IAP not ready, request number: %s" % (deployment, num_req))
except Exception:
logging.info("%s: IAP not ready, exception caught, request number: %s" %
(deployment, num_req))
deployments = deployments.difference(success_deploy)
for deployment in success_deploy:
try:
ssl_local_dir = os.path.join(SSL_DIR, args.project, deployment)
try:
os.makedirs(ssl_local_dir)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(ssl_local_dir):
pass
else:
raise
util_run((
"gcloud container clusters get-credentials %s --zone %s --project %s"
% (deployment, getZone(args, deployment), args.project)).split(' '))
for sec in ["envoy-ingress-tls", "letsencrypt-prod-secret"]:
sec_data = util_run(
("kubectl get secret %s -n kubeflow -o yaml" % sec).split(' '))
with open(os.path.join(ssl_local_dir, sec + ".yaml"),
'w+') as sec_file:
sec_file.write(sec_data)
sec_file.close()
# TODO: switch to client lib
gcs_path = get_gcs_path(args.mode, args.project, deployment)
util_run(
("gsutil cp %s/* gs://%s/" % (ssl_local_dir, gcs_path)).split(' '))
except Exception:
logging.error("%s: failed uploading ssl cert" % deployment)
# return number of successful deployments
return num_deployments - len(deployments)
def get_service_account_credentials(client_id_key):
# Figure out what environment we're running in and get some preliminary
# information about the service account.
credentials, _ = google.auth.default(scopes=[IAM_SCOPE])
if isinstance(credentials, google.oauth2.credentials.Credentials):
raise Exception('make_iap_request is only supported for service '
'accounts.')
# For service account's using the Compute Engine metadata service,
# service_account_email isn't available until refresh is called.
credentials.refresh(Request())
signer_email = credentials.service_account_email
if isinstance(credentials,
google.auth.compute_engine.credentials.Credentials):
signer = google.auth.iam.Signer(Request(), credentials, signer_email)
else:
# A Signer object can sign a JWT using the service account's key.
signer = credentials.signer
# Construct OAuth 2.0 service account credentials using the signer
# and email acquired from the bootstrap credentials.
return google.oauth2.service_account.Credentials(
signer,
signer_email,
token_uri=OAUTH_TOKEN_URI,
additional_claims={'target_audience': may_get_env_var(client_id_key)})
def get_google_open_id_connect_token(service_account_credentials):
service_account_jwt = (
service_account_credentials._make_authorization_grant_assertion())
request = google.auth.transport.requests.Request()
body = {
'assertion': service_account_jwt,
'grant_type': google.oauth2._client._JWT_GRANT_TYPE,
}
token_response = google.oauth2._client._token_endpoint_request(
request, OAUTH_TOKEN_URI, body)
return token_response['id_token']
def delete_gcloud_resource(args, keyword, filter='', dlt_params=[]):
# TODO: switch to client lib
get_cmd = 'gcloud compute %s list --project=%s --format="value(name)"' % (
keyword, args.project)
elements = util_run(get_cmd + filter, shell=True)
for element in elements.split('\n'):
dlt_cmd = 'gcloud compute %s delete -q --project=%s %s' % (
keyword, args.project, element)
try:
util_run(dlt_cmd.split(' ') + dlt_params)
except Exception as e:
logging.warning('Cannot remove %s %s' % (keyword, element))
logging.warning(e)
def clean_up_resource(args, deployments):
"""Clean up deployment / app config from previous test
Args:
args: The args from ArgParse.
deployments set(string): which contains all deployment names in current test round.
Returns:
bool: True if cleanup is done
"""
logging.info(
"Clean up project resource (backend service and deployment)")
# Will reuse source repo for continuous tests
# Within 7 days after repo deleted, source repo won't allow recreation with same name
# Delete deployment
credentials = GoogleCredentials.get_application_default()
service = discovery.build('deploymentmanager', 'v2', credentials=credentials)
delete_done = False
for deployment in deployments:
try:
request = service.deployments().delete(
project=args.project, deployment=deployment)
request.execute()
except Exception as e:
logging.info("Deployment doesn't exist, continue")
# wait up to 10 minutes till delete finish.
end_time = datetime.datetime.now() + datetime.timedelta(minutes=10)
while datetime.datetime.now() < end_time:
sleep(10)
try:
request = service.deployments().list(project=args.project)
response = request.execute()
if ('deployments' not in response) or (len(deployments & set(
d['name'] for d in response['deployments'])) == 0):
delete_done = True
break
except Exception:
logging.info("Failed listing current deployments, retry in 10 seconds")
# Delete forwarding-rules
delete_gcloud_resource(args, 'forwarding-rules', dlt_params=['--global'])
# Delete target-http-proxies
delete_gcloud_resource(args, 'target-http-proxies')
# Delete target-http-proxies
delete_gcloud_resource(args, 'target-https-proxies')
# Delete url-maps
delete_gcloud_resource(args, 'url-maps')
# Delete backend-services
delete_gcloud_resource(args, 'backend-services', dlt_params=['--global'])
# Delete instance-groups
for zone in LOADTEST_ZONE:
delete_gcloud_resource(
args,
'instance-groups unmanaged',
filter=' --filter=INSTANCES:0',
dlt_params=['--zone=' + zone])
# Delete ssl-certificates
delete_gcloud_resource(args, 'ssl-certificates')
# Delete health-checks
delete_gcloud_resource(args, 'health-checks')
if not delete_done:
logging.error("failed to clean up resources for project %s deployments %s",
args.project, deployments)
return delete_done
def util_run(command,
cwd=None,
env=None,
shell=False,
polling_interval=datetime.timedelta(seconds=1)):
"""Run a subprocess.
Any subprocess output is emitted through the logging modules.
Returns:
output: A string containing the output.
"""
logging.info("Running: %s \ncwd=%s", " ".join(command), cwd)
if not env:
env = os.environ
else:
keys = sorted(env.keys())
lines = []
for k in keys:
lines.append("{0}={1}".format(k, env[k]))
logging.info("Running: Environment:\n%s", "\n".join(lines))
process = subprocess.Popen(
command,
cwd=cwd,
env=env,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# logging.info("Subprocess output:\n")
output = []
while process.poll() is None:
process.stdout.flush()
for line in iter(process.stdout.readline, ''):
output.append(line.strip('\n'))
# logging.info(line.strip())
sleep(polling_interval.total_seconds())
process.stdout.flush()
for line in iter(process.stdout.readline, b''):
output.append(line.strip('\n'))
# logging.info(line.strip())
if process.returncode != 0:
raise subprocess.CalledProcessError(
process.returncode, "cmd: {0} exited with code {1}".format(
" ".join(command), process.returncode), "\n".join(output))
return "\n".join(output)
def clean_up_project_resource(args, projects, deployments):
proc = []
for project in projects:
args.project = project
p = Process(target = partial(clean_up_resource, args, deployments))
p.start()
proc.append(p)
for p in proc:
p.join()
def upload_load_test_ssl_cert(args, projects, deployments):
for project in projects:
args.project = project
for deployment in deployments:
insert_ssl_cert(args, deployment)
def check_load_test_results(args, projects, deployments):
num_deployments = len(deployments)
total_success = 0
# deadline for checking all the results.
end_time = datetime.datetime.now() + datetime.timedelta(
minutes=args.iap_wait_min)
for project in projects:
args.project = project
# set the deadline for each check.
now = datetime.datetime.now()
if end_time < now:
args.iap_wait_min = 1
else:
delta = end_time - now
args.iap_wait_min = delta.seconds / 60 + 1
num_success = check_deploy_status(args, deployments)
total_success += num_success
logging.info("%s out of %s deployments succeed for project %s",
num_success, num_deployments, project)
# We only wait 1 minute for subsequent checks because we already waited forIAP since we already
args.iap_wait_min = 1
LOADTEST_SUCCESS.set(num_success)
if num_success == num_deployments:
SUCCESS_COUNT.inc()
else:
FAILURE_COUNT.inc()
logging.info("%s out of %s deployments succeed in total",
total_success, num_deployments * len(projects))
def run_load_test(args):
num_deployments = args.number_deployments_per_project
num_projects = args.number_projects
start_http_server(8000)
LOADTEST_SUCCESS.set(num_deployments)
LOADTEST_HEALTH.set(0)
service_account_credentials = get_service_account_credentials(
"SERVICE_CLIENT_ID")
deployments = set(
['kubeflow' + str(i) for i in range(1, num_deployments + 1)])
projects = [args.project_prefix + str(i)
for i in range(1, num_projects + 1)]
logging.info("deployments: %s" % deployments)
logging.info("projects: %s" % projects)
clean_up_project_resource(args, projects, deployments)
if not make_loadtest_call(
args, service_account_credentials, projects, deployments):
LOADTEST_SUCCESS.set(0)
FAILURE_COUNT.inc()
logging.error("load test request failed")
return
upload_load_test_ssl_cert(args, projects, deployments)
check_load_test_results(args, projects, deployments)
clean_up_project_resource(args, projects, deployments)
def run_e2e_test(args):
sleep(args.wait_sec)
make_e2e_call(args)
insert_ssl_cert(args, args.deployment)
if not check_deploy_status(args, set([args.deployment])):
raise RuntimeError("IAP endpoint not ready after 30 minutes, time out...")
logging.info("Test finished.")
def wrap_test(args):
"""Run the tests given by args.func and output artifacts as necessary.
"""
test_name = "bootstrapper"
test_case = test_util.TestCase()
test_case.class_name = "KubeFlow"
test_case.name = args.workflow_name + "-" + test_name
try:
def run():
args.func(args)
test_util.wrap_test(run, test_case)
finally:
# Test grid has problems with underscores in the name.
# https://github.com/kubeflow/kubeflow/issues/631
# TestGrid currently uses the regex junit_(^_)*.xml so we only
# want one underscore after junit.
junit_name = test_case.name.replace("_", "-")
junit_path = os.path.join(args.artifacts_dir,
"junit_{0}.xml".format(junit_name))
logging.info("Writing test results to %s", junit_path)
test_util.create_junit_xml_file([test_case], junit_path)
# Clone repos to tmp folder and build docker images
def main(unparsed_args=None):
parser = argparse.ArgumentParser(
description="Start deployment api and make request to it.")
parser.add_argument(
"--deployment",
default="periodic-test",
type=str,
help="Deployment name.")
parser.add_argument(
"--email",
default="google-kubeflow-support@google.com",
type=str,
help="Email used during e2e test")
parser.add_argument(
"--project",
default="kubeflow-ci-deployment",
type=str,
help="e2e test project id")
parser.add_argument(
"--project_prefix",
default="kf-gcp-deploy-test",
type=str,
help="project prefix for load test")
parser.add_argument(
"--number_projects",
default="2",
type=int,
help="number of projects used in load test")
parser.add_argument(
"--number_deployments_per_project",
default="5",
type=int,
help="number of deployments per project used in load test")
parser.add_argument(
"--namespace",
default="",
type=str,
help="namespace where deployment service is running")
parser.add_argument(
"--wait_sec", default=120, type=int, help="oauth client secret")
parser.add_argument(
"--iap_wait_min", default=30, type=int, help="minutes to wait for IAP")
parser.add_argument(
"--zone", default="us-east1-d", type=str, help="GKE cluster zone")
parser.add_argument(
"--sa_client_id",
default="111670663612681935351",
type=str,
help="Service account client id")
parser.add_argument(
"--kfversion",
default="v0.4.1",
type=str,
help="Service account client id")
parser.add_argument(
"--mode",
default="e2e",
type=str,
help="offer three test mode: e2e, prober, and loadtest")
# args for e2e test
parser.set_defaults(func=run_e2e_test)
parser.add_argument(
"--artifacts_dir",
default="",
type=str,
help="Directory to use for artifacts that should be preserved after "
"the test runs. Defaults to test_dir if not set.")
parser.add_argument(
"--workflow_name",
default="deployapp",
type=str,
help="The name of the workflow.")
args = parser.parse_args(args=unparsed_args)
if not args.artifacts_dir:
args.artifacts_dir = tempfile.gettempdir()
util_run(
('gcloud auth activate-service-account --key-file=' +
may_get_env_var("GOOGLE_APPLICATION_CREDENTIALS")).split(' '),
cwd=FILE_PATH)
if args.mode == "e2e":
wrap_test(args)
if args.mode == "prober":
start_http_server(8000)
SERVICE_HEALTH.set(0)
PROBER_HEALTH.set(0)
service_account_credentials = get_service_account_credentials(
"SERVICE_CLIENT_ID")
while True:
sleep(args.wait_sec)
if not clean_up_resource(args, set([args.deployment])):
PROBER_HEALTH.set(1)
FAILURE_COUNT.inc()
logging.error(
"request cleanup failed, retry in %s seconds" % args.wait_sec)
continue
PROBER_HEALTH.set(0)
if make_prober_call(args, service_account_credentials):
if insert_ssl_cert(args, args.deployment):
PROBER_HEALTH.set(0)
else:
PROBER_HEALTH.set(1)
FAILURE_COUNT.inc()
logging.error("request insert_ssl_cert failed, retry in %s seconds" %
args.wait_sec)
continue
if check_deploy_status(args, set([args.deployment])):
SERVICE_HEALTH.set(0)
SUCCESS_COUNT.inc()
else:
SERVICE_HEALTH.set(1)
FAILURE_COUNT.inc()
else:
SERVICE_HEALTH.set(2)
FAILURE_COUNT.inc()
logging.error(
"prober request failed, retry in %s seconds" % args.wait_sec)
if args.mode == "loadtest":
run_load_test(args)
if __name__ == '__main__':
logging.basicConfig(
level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)
logging.getLogger().setLevel(logging.INFO)
main()
| |
# neural.py
#
#
# Created by Phoenix on 10/13/15.
#
from tools import rotate2D, sum_vector, neg_vector
class Brain:
def __init__(self,name,num):
self.__name = name
self.__ring = 1 + num
self.__cell_num = 1 + 3*(1+num)*num
self.__cells = [Cell(i,(0.,0.)) for i in range(self.__cell_num)]
# Set the coordinate of cells
self.__cells[0].set_Cord((0.,0.,))
cell_count = 1
for i in range(num):
vect1 = (1.*(i+1),0.)
cell_count += 1
self.__cells[cell_count-1].set_Cord(vect1)
vect2 = rotate2D((1.,0.),120)
for j in range(i+1):
vect1 = sum_vector(vect1,vect2)
cell_count += 1
self.__cells[cell_count-1].set_Cord(vect1)
vect2 = rotate2D(vect2,60)
for j in range(i+1):
vect1 = sum_vector(vect1,vect2)
cell_count += 1
self.__cells[cell_count-1].set_Cord(vect1)
vect2 = rotate2D(vect2,60)
for j in range(i):
vect1 = sum_vector(vect1,vect2)
cell_count += 1
self.__cells[cell_count-1].set_Cord(vect1)
for j in range(3*(i+1)):
cell_count += 1
cord = neg_vector(self.__cells[cell_count-1 - 3*(i+1)].get_Cord())
self.__cells[cell_count-1].set_Cord(cord)
# set initial connection of cells
for i in range(self.__cell_num):
for j in range(self.__cell_num):
if i != j:
self.__cells[i].ConnectTo(self.__cells[j])
def get_AllBond(self):
bonds = []
for i in range(self.__cell_num):
cell_bonds = self.__cells[i].get_Bonds()
for k in range(len(cell_bonds)):
bonds.append(cell_bonds[k])
return bonds
def get_AllCell(self):
return self.__cells
def get_Cell(self,ind):
return self.__cells[ind]
def get_CellNum(self):
return self.__cell_num
class Cell:
def __init__(self,name,cord):
self.__name = name
self.__cord = cord
self.__connect_to = []
self.__connect_from = []
self.__cell_size = 1
self.__dendrite_num = 0
self.__axon_num = 0
self.__bond = []
self.__charge = Charge(self.__cord,0.)
self.__threshold = 1.
# self.__indn[name] = __ind
# print ('Create a Cell "',self.__name,'" with index:',self.__ind)
def ConnectTo(self,targ):
cord = targ.get_Cord()
self.__axon_num += 1
self.__connect_to.append(cord)
self.__bond.append(Bond(self,targ))
targ.ConnectFrom(self)
# print ('Connect',self.__name,'to ',targ.__name)
# def ConnectToWithName(self,name):
# ind = self.__indn[name]
# targ
def ConnectFrom(self,targ):
cord = targ.get_Cord()
self.__dendrite_num += 1
self.__connect_from.append(cord)
# print ('Connect',self.__name,'from ',targ.__name)
def get_CellInfo(self):
print ()
print (' Cell Name: ',self.__name,'( Cord: ',self.__cord,')')
print (' Connect From: ',self.__dendrite_num,self.__connect_from)
print (' Connect to: ',self.__axon_num,self.__connect_to)
print ('Strength of Bond: ',self.get_BondInfo())
def get_Name(self):
return self.__name
def get_Cord(self):
return self.__cord
def set_Cord(self,cord):
self.__cord = cord
def get_AxonNum(self):
return self.__axon_num
def get_DendriteNum(self):
return self.__dendrite_num
def get_ConnectFrom(self):
return self.__connect_from
def get_ConnectTo(self):
return self.__connect_to
def get_Bonds(self):
return self.__bond
def get_BondNum(self):
return len(self.__bond)
def get_BondInfo(self):
i = len(self.__bond)
return [self.__bond[x].get_Strength() for x in range(i)]
def get_BondCord(self):
x = [self.get_Bonds[i].get_Start[0] for i in range(len(self.__bond))]
y = [self.get_Bonds[i].get_Start[1] for i in range(len(self.__bond))]
return x, y
def get_Charge(self):
return self.__charge
def obtain_Charge(self,inte):
self.__charge.marge(inte)
def release_Charge(self):
self.__charge.set_Inte(self.__charge.get_Inte()-self.__threshold)
def check_OverCharge(self):
if self.__charge.get_Inte() >= self.__threshold:
return True
class Bond:
def __init__(self,cell,targ):
self.__targID = targ.get_Name()
self.__start = cell.get_Cord()
self.__end = targ.get_Cord()
self.__distance = ((self.__start[0]-self.__end[0])**2+(self.__start[1]-self.__end[1])**2)**0.5
self.__strength = 1./self.__distance
self.strekey = 1./self.__distance
def __iter__(self):
return self
def __next__(self):
return self
def WeakenBond(self):
self.__strength *= 0.9
def StrengthenBond(self):
self.__strength *= 1.05
def get_Start(self):
return self.__start
def get_End(self):
return self.__end
def get_Cord(self):
return [self.__start[0],self.__end[0]], [self.__start[1],self.__end[1]]
def get_Strength(self):
return self.__strength
def get_Distance(self):
return self.__distance
def get_TargID(self):
return self.__targID
class Charge:
def __init__(self,cord,intensity):
self.__cord = cord
self.__inte = intensity
def marge(self,pulse):
self.__inte += pulse
def move(self,bond):
cord = bond.get_End()
stre = bond.get_Strength()
def get_Inte(self):
return self.__inte
def set_Inte(self,inte):
self.__inte = inte
# phenomenon
def charg_seperate(cells,targID):
print('============= Charge release from',targID,' =============')
cell = cells[targID]
# conduct charge to bonded cells
# get bond info
bonds = cell.get_Bonds()
targIDs = [bond.get_TargID() for bond in bonds]
bonds_stre = [bond.get_Strength() for bond in bonds]
for i in range(len(bonds)):
if bonds_stre[i] >= 0.8:
cells[targIDs[i]].obtain_Charge(bonds_stre[i]**2)
bonds[i].StrengthenBond()
cell.release_Charge()
over_chargIDs = []
for i in range(len(bonds)):
if cells[targIDs[i]].check_OverCharge():
over_chargIDs.append(targIDs[i])
return over_chargIDs
# def move_Charge(self,ID):
# print('============= Charge release from',ID,' =============')
# over_charg = []
# cell = self.get_Cell(ID)
# bonds = cell.get_Bonds()
# for i in range(cell.get_AxonNum()):
# bond = bonds[i]
# targ_cell = self.get_Cell(bond.get_TargID())
# targ_cell.obtain_Charge(bond.get_Strength())
# if targ_cell.check_Charge():
# over_charg.append(bond.get_TargID())
# print (bond.get_Strength())
# bond.StrengthenBond()
# print ('Charge goes to: ',targ_cell.get_Name(),bond.get_Strength())
# cell.release_Charge()
# return over_charg
| |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FlatValue.indicator_slug'
db.add_column(u'profiles_flatvalue', 'indicator_slug', self.gf('django.db.models.fields.CharField')(default='', max_length='255', db_index=True), keep_default=False)
# Adding field 'FlatValue.geography_name'
db.add_column(u'profiles_flatvalue', 'geography_name', self.gf('django.db.models.fields.CharField')(default='', max_length='255'), keep_default=False)
# Adding field 'FlatValue.geography_slug'
db.add_column(u'profiles_flatvalue', 'geography_slug', self.gf('django.db.models.fields.CharField')(default='', max_length='255', db_index=True), keep_default=False)
# Adding field 'FlatValue.geometry_id'
db.add_column(u'profiles_flatvalue', 'geometry_id', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'FlatValue.indicator_slug'
db.delete_column(u'profiles_flatvalue', 'indicator_slug')
# Deleting field 'FlatValue.geography_name'
db.delete_column(u'profiles_flatvalue', 'geography_name')
# Deleting field 'FlatValue.geography_slug'
db.delete_column(u'profiles_flatvalue', 'geography_slug')
# Deleting field 'FlatValue.geometry_id'
db.delete_column(u'profiles_flatvalue', 'geometry_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 11, 29, 15, 38, 51, 541949)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 11, 29, 15, 38, 51, 541195)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maps.shapefile': {
'Meta': {'object_name': 'ShapeFile'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geo_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geo_meta_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'geom_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'shape_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zoom_threshold': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'profiles.customvalue': {
'Meta': {'object_name': 'CustomValue'},
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_value': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'supress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_operator': ('django.db.models.fields.CharField', [], {'max_length': "'255'"})
},
u'profiles.datadomain': {
'Meta': {'ordering': "['weight']", 'object_name': 'DataDomain'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Indicator']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'subdomain_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subdomains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'symmetrical': 'False', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.datapoint': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'DataPoint'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_from'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_to'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']", 'null': 'True'})
},
u'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'profiles.denominator': {
'Meta': {'object_name': 'Denominator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'multiplier': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.denominatorpart': {
'Meta': {'object_name': 'DenominatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.IndicatorPart']"})
},
u'profiles.flatvalue': {
'Meta': {'object_name': 'FlatValue'},
'display_title': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'f_moe': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_number': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_percent': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'geography': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'geography_name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'geography_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'geometry_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'indicator_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'time_key': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'value_type': ('django.db.models.fields.CharField', [], {'max_length': "'100'"})
},
u'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataSource']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'shapefile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.ShapeFile']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'}),
'summary_level': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geo_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
u'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_generated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'default': "'U.S. Census Bureau'", 'max_length': '300', 'blank': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
u'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']"})
},
u'profiles.legendoption': {
'Meta': {'object_name': 'LegendOption'},
'bin_options': ('django.db.models.fields.TextField', [], {'default': "''"}),
'bin_type': ('django.db.models.fields.CharField', [], {'default': "'jenks'", 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.precalculatedvalue': {
'Meta': {'object_name': 'PrecalculatedValue'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'geo_record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'table': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'profiles.taskstatus': {
'Meta': {'object_name': 'TaskStatus'},
'error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
't_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'profiles.time': {
'Meta': {'object_name': 'Time'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
},
u'profiles.value': {
'Meta': {'object_name': 'Value'},
'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataPoint']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['profiles']
| |
import socket
import logging
import traceback
from kombu.syn import blocking
from kombu.utils.finalize import Finalize
from celery import beat
from celery import concurrency as _concurrency
from celery import registry
from celery import platforms
from celery import signals
from celery.app import app_or_default
from celery.exceptions import SystemTerminate
from celery.log import SilenceRepeated
from celery.utils import noop, instantiate
from celery.worker import state
from celery.worker.buckets import TaskBucket, FastQueue
RUN = 0x1
CLOSE = 0x2
TERMINATE = 0x3
#: List of signals to reset when a child process starts.
WORKER_SIGRESET = frozenset(["SIGTERM",
"SIGHUP",
"SIGTTIN",
"SIGTTOU",
"SIGUSR1"])
#: List of signals to ignore when a child process starts.
WORKER_SIGIGNORE = frozenset(["SIGINT"])
def process_initializer(app, hostname):
"""Initializes the process so it can be used to process tasks.
Used for multiprocessing environments.
"""
app = app_or_default(app)
app.set_current()
platforms.signals.reset(*WORKER_SIGRESET)
platforms.signals.ignore(*WORKER_SIGIGNORE)
platforms.set_mp_process_title("celeryd", hostname=hostname)
# This is for Windows and other platforms not supporting
# fork(). Note that init_worker makes sure it's only
# run once per process.
app.loader.init_worker()
signals.worker_process_init.send(sender=None)
class WorkController(object):
"""Unmanaged worker instance."""
RUN = RUN
CLOSE = CLOSE
TERMINATE = TERMINATE
#: The number of simultaneous processes doing work (default:
#: :setting:`CELERYD_CONCURRENCY`)
concurrency = None
#: The loglevel used (default: :const:`logging.INFO`)
loglevel = logging.ERROR
#: The logfile used, if no logfile is specified it uses `stderr`
#: (default: :setting:`CELERYD_LOG_FILE`).
logfile = None
#: If :const:`True`, celerybeat is embedded, running in the main worker
#: process as a thread.
embed_clockservice = None
#: Enable the sending of monitoring events, these events can be captured
#: by monitors (celerymon).
send_events = False
#: The :class:`logging.Logger` instance used for logging.
logger = None
#: The pool instance used.
pool = None
#: The internal queue object that holds tasks ready for immediate
#: processing.
ready_queue = None
#: Instance of :class:`celery.worker.mediator.Mediator`.
mediator = None
#: Consumer instance.
consumer = None
_state = None
_running = 0
def __init__(self, concurrency=None, logfile=None, loglevel=None,
send_events=None, hostname=None, ready_callback=noop,
embed_clockservice=False, pool_cls=None, consumer_cls=None,
mediator_cls=None, eta_scheduler_cls=None,
schedule_filename=None, task_time_limit=None,
task_soft_time_limit=None, max_tasks_per_child=None,
pool_putlocks=None, db=None, prefetch_multiplier=None,
eta_scheduler_precision=None, disable_rate_limits=None,
autoscale=None, autoscaler_cls=None, scheduler_cls=None,
app=None):
self.app = app_or_default(app)
conf = self.app.conf
# Options
self.loglevel = loglevel or self.loglevel
self.concurrency = concurrency or conf.CELERYD_CONCURRENCY
self.logfile = logfile or conf.CELERYD_LOG_FILE
self.logger = self.app.log.get_default_logger()
if send_events is None:
send_events = conf.CELERY_SEND_EVENTS
self.send_events = send_events
self.pool_cls = _concurrency.get_implementation(
pool_cls or conf.CELERYD_POOL)
self.consumer_cls = consumer_cls or conf.CELERYD_CONSUMER
self.mediator_cls = mediator_cls or conf.CELERYD_MEDIATOR
self.eta_scheduler_cls = eta_scheduler_cls or \
conf.CELERYD_ETA_SCHEDULER
self.autoscaler_cls = autoscaler_cls or \
conf.CELERYD_AUTOSCALER
self.schedule_filename = schedule_filename or \
conf.CELERYBEAT_SCHEDULE_FILENAME
self.scheduler_cls = scheduler_cls or conf.CELERYBEAT_SCHEDULER
self.hostname = hostname or socket.gethostname()
self.embed_clockservice = embed_clockservice
self.ready_callback = ready_callback
self.task_time_limit = task_time_limit or \
conf.CELERYD_TASK_TIME_LIMIT
self.task_soft_time_limit = task_soft_time_limit or \
conf.CELERYD_TASK_SOFT_TIME_LIMIT
self.max_tasks_per_child = max_tasks_per_child or \
conf.CELERYD_MAX_TASKS_PER_CHILD
self.pool_putlocks = pool_putlocks or \
conf.CELERYD_POOL_PUTLOCKS
self.eta_scheduler_precision = eta_scheduler_precision or \
conf.CELERYD_ETA_SCHEDULER_PRECISION
self.prefetch_multiplier = prefetch_multiplier or \
conf.CELERYD_PREFETCH_MULTIPLIER
self.timer_debug = SilenceRepeated(self.logger.debug,
max_iterations=10)
self.db = db or conf.CELERYD_STATE_DB
self.disable_rate_limits = disable_rate_limits or \
conf.CELERY_DISABLE_RATE_LIMITS
self._finalize = Finalize(self, self.stop, exitpriority=1)
self._finalize_db = None
if self.db:
persistence = state.Persistent(self.db)
self._finalize_db = Finalize(persistence, persistence.save,
exitpriority=5)
# Queues
if self.disable_rate_limits:
self.ready_queue = FastQueue()
self.ready_queue.put = self.process_task
else:
self.ready_queue = TaskBucket(task_registry=registry.tasks)
self.logger.debug("Instantiating thread components...")
# Threads + Pool + Consumer
self.autoscaler = None
max_concurrency = None
min_concurrency = concurrency
if autoscale:
max_concurrency, min_concurrency = autoscale
self.pool = instantiate(self.pool_cls, min_concurrency,
logger=self.logger,
initializer=process_initializer,
initargs=(self.app, self.hostname),
maxtasksperchild=self.max_tasks_per_child,
timeout=self.task_time_limit,
soft_timeout=self.task_soft_time_limit,
putlocks=self.pool_putlocks)
self.priority_timer = instantiate(self.pool.Timer)
if not self.eta_scheduler_cls:
# Default Timer is set by the pool, as e.g. eventlet
# needs a custom implementation.
self.eta_scheduler_cls = self.pool.Timer
self.autoscaler = None
if autoscale:
self.autoscaler = instantiate(self.autoscaler_cls, self.pool,
max_concurrency=max_concurrency,
min_concurrency=min_concurrency,
logger=self.logger)
self.mediator = None
if not self.disable_rate_limits:
self.mediator = instantiate(self.mediator_cls, self.ready_queue,
app=self.app,
callback=self.process_task,
logger=self.logger)
self.scheduler = instantiate(self.eta_scheduler_cls,
precision=eta_scheduler_precision,
on_error=self.on_timer_error,
on_tick=self.on_timer_tick)
self.beat = None
if self.embed_clockservice:
self.beat = beat.EmbeddedService(app=self.app,
logger=self.logger,
schedule_filename=self.schedule_filename,
scheduler_cls=self.scheduler_cls)
prefetch_count = self.concurrency * self.prefetch_multiplier
self.consumer = instantiate(self.consumer_cls,
self.ready_queue,
self.scheduler,
logger=self.logger,
hostname=self.hostname,
send_events=self.send_events,
init_callback=self.ready_callback,
initial_prefetch_count=prefetch_count,
pool=self.pool,
priority_timer=self.priority_timer,
app=self.app,
controller=self)
# The order is important here;
# the first in the list is the first to start,
# and they must be stopped in reverse order.
self.components = filter(None, (self.pool,
self.mediator,
self.scheduler,
self.beat,
self.autoscaler,
self.consumer))
def start(self):
"""Starts the workers main loop."""
self._state = self.RUN
try:
for i, component in enumerate(self.components):
self.logger.debug("Starting thread %s..." % (
component.__class__.__name__))
self._running = i + 1
blocking(component.start)
except SystemTerminate:
self.terminate()
raise
except:
self.stop()
raise
def process_task(self, request):
"""Process task by sending it to the pool of workers."""
try:
request.task.execute(request, self.pool,
self.loglevel, self.logfile)
except Exception, exc:
self.logger.critical("Internal error %s: %s\n%s" % (
exc.__class__, exc, traceback.format_exc()))
except SystemTerminate:
self.terminate()
raise SystemExit()
except BaseException, exc:
self.stop()
raise exc
def stop(self, in_sighandler=False):
"""Graceful shutdown of the worker server."""
if not in_sighandler or self.pool.signal_safe:
blocking(self._shutdown, warm=True)
def terminate(self, in_sighandler=False):
"""Not so graceful shutdown of the worker server."""
if not in_sighandler or self.pool.signal_safe:
blocking(self._shutdown, warm=False)
def _shutdown(self, warm=True):
what = (warm and "stopping" or "terminating").capitalize()
if self._state != self.RUN or self._running != len(self.components):
# Not fully started, can safely exit.
self._state = self.TERMINATE
return
self._state = self.CLOSE
signals.worker_shutdown.send(sender=self)
for component in reversed(self.components):
self.logger.debug("%s thread %s..." % (
what, component.__class__.__name__))
stop = component.stop
if not warm:
stop = getattr(component, "terminate", None) or stop
stop()
self.priority_timer.stop()
self.consumer.close_connection()
self._state = self.TERMINATE
def on_timer_error(self, exc_info):
_, exc, _ = exc_info
self.logger.error("Timer error: %r" % (exc, ))
def on_timer_tick(self, delay):
self.timer_debug("Scheduler wake-up! Next eta %s secs." % delay)
| |
#!/usr/bin/env python
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Help include git hash in tensorflow bazel build.
This creates symlinks from the internal git repository directory so
that the build system can see changes in the version state. We also
remember what branch git was on so when the branch changes we can
detect that the ref file is no longer correct (so we can suggest users
run ./configure again).
NOTE: this script is only used in opensource.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import subprocess
import shutil
def parse_branch_ref(filename):
"""Given a filename of a .git/HEAD file return ref path.
In particular, if git is in detached head state, this will
return None. If git is in attached head, it will return
the branch reference. E.g. if on 'master', the HEAD will
contain 'ref: refs/heads/master' so 'refs/heads/master'
will be returned.
Example: parse_branch_ref(".git/HEAD")
Args:
filename: file to treat as a git HEAD file
Returns:
None if detached head, otherwise ref subpath
Raises:
RuntimeError: if the HEAD file is unparseable.
"""
data = open(filename).read().strip()
items = data.split(" ")
if len(items) == 1:
return None
elif len(items) == 2 and items[0] == "ref:":
return items[1].strip()
else:
raise RuntimeError("Git directory has unparseable HEAD")
def configure(src_base_path, debug=False):
"""Configure `src_base_path` to embed git hashes if available."""
# TODO(aselle): No files generated or symlinked here are deleted by
# the build system. I don't know of a way to do it in bazel. It
# should only be a problem if somebody moves a sandbox directory
# without running ./configure again.
git_path = os.path.join(src_base_path, ".git")
gen_path = os.path.join(src_base_path, "tensorflow", "tools", "git", "gen")
# Remove and recreate the path
if os.path.exists(gen_path):
if os.path.isdir(gen_path):
try:
shutil.rmtree(gen_path)
except OSError:
raise RuntimeError("Cannot delete directory %s due to permission "
"error, inspect and remove manually" % gen_path)
else:
raise RuntimeError("Cannot delete non-directory %s, inspect ",
"and remove manually" % gen_path)
os.makedirs(gen_path)
if not os.path.isdir(gen_path):
raise RuntimeError("gen_git_source.py: Failed to create dir")
# file that specifies what the state of the git repo is
spec = {}
# value file names will be mapped to the keys
link_map = {"head": None, "branch_ref": None}
if not os.path.isdir(git_path):
# No git directory
spec["git"] = False
open(os.path.join(gen_path, "head"), "w").write("")
open(os.path.join(gen_path, "branch_ref"), "w").write("")
else:
# Git directory, possibly detached or attached
spec["git"] = True
spec["path"] = src_base_path
git_head_path = os.path.join(git_path, "HEAD")
spec["branch"] = parse_branch_ref(git_head_path)
link_map["head"] = git_head_path
if spec["branch"] is not None:
# attached method
link_map["branch_ref"] = os.path.join(git_path, *
os.path.split(spec["branch"]))
# Create symlinks or dummy files
for target, src in link_map.items():
if src is None:
open(os.path.join(gen_path, target), "w").write("")
else:
try:
# In python 3.5, symlink function exists even on Windows. But requires
# Windows Admin privileges, otherwise an OSError will be thrown.
if hasattr(os, 'symlink'):
os.symlink(src, os.path.join(gen_path, target))
else:
shutil.copy2(src, os.path.join(gen_path, target))
except OSError:
shutil.copy2(src, os.path.join(gen_path, target))
json.dump(spec, open(os.path.join(gen_path, "spec.json"), "w"), indent=2)
if debug:
print("gen_git_source.py: list %s" % gen_path)
print("gen_git_source.py: %s" + repr(os.listdir(gen_path)))
print("gen_git_source.py: spec is %r" % spec)
def get_git_version(git_base_path):
"""Get the git version from the repository.
This function runs `git describe ...` in the path given as `git_base_path`.
This will return a string of the form:
<base-tag>-<number of commits since tag>-<shortened sha hash>
For example, 'v0.10.0-1585-gbb717a6' means v0.10.0 was the last tag when
compiled. 1585 commits are after that commit tag, and we can get back to this
version by running `git checkout gbb717a6`.
Args:
git_base_path: where the .git directory is located
Returns:
A bytestring representing the git version
"""
unknown_label = b"unknown"
try:
val = bytes(subprocess.check_output([
"git", str("--git-dir=%s/.git" % git_base_path),
str("--work-tree=" + git_base_path), "describe", "--long", "--tags"
]).strip())
return val if val else unknown_label
except subprocess.CalledProcessError:
return unknown_label
def write_version_info(filename, git_version):
"""Write a c file that defines the version functions.
Args:
filename: filename to write to.
git_version: the result of a git describe.
"""
if b"\"" in git_version or b"\\" in git_version:
git_version = "git_version_is_invalid" # do not cause build to fail!
contents = """/* Generated by gen_git_source.py */
const char* tf_git_version() {return "%s";}
const char* tf_compiler_version() {return __VERSION__;}
""" % git_version
open(filename, "w").write(contents)
def generate(arglist):
"""Generate version_info.cc as given `destination_file`.
Args:
arglist: should be a sequence that contains
spec, head_symlink, ref_symlink, destination_file.
`destination_file` is the filename where version_info.cc will be written
`spec` is a filename where the file contains a JSON dictionary
'git' bool that is true if the source is in a git repo
'path' base path of the source code
'branch' the name of the ref specification of the current branch/tag
`head_symlink` is a filename to HEAD that is cross-referenced against
what is contained in the json branch designation.
`ref_symlink` is unused in this script but passed, because the build
system uses that file to detect when commits happen.
Raises:
RuntimeError: If ./configure needs to be run, RuntimeError will be raised.
"""
# unused ref_symlink arg
spec, head_symlink, _, dest_file = arglist
data = json.load(open(spec))
git_version = None
if not data["git"]:
git_version = b"unknown"
else:
old_branch = data["branch"]
new_branch = parse_branch_ref(head_symlink)
if new_branch != old_branch:
raise RuntimeError(
"Run ./configure again, branch was '%s' but is now '%s'" %
(old_branch, new_branch))
git_version = get_git_version(data["path"])
write_version_info(dest_file, git_version)
def raw_generate(output_file):
"""Simple generator used for cmake/make build systems.
This does not create any symlinks. It requires the build system
to build unconditionally.
Args:
output_file: Output filename for the version info cc
"""
git_version = get_git_version(".")
write_version_info(output_file, git_version)
parser = argparse.ArgumentParser(description="""Git hash injection into bazel.
If used with --configure <path> will search for git directory and put symlinks
into source so that a bazel genrule can call --generate""")
parser.add_argument(
"--debug",
type=bool,
help="print debugging information about paths",
default=False)
parser.add_argument(
"--configure", type=str,
help="Path to configure as a git repo dependency tracking sentinel")
parser.add_argument(
"--generate",
type=str,
help="Generate given spec-file, HEAD-symlink-file, ref-symlink-file",
nargs="+")
parser.add_argument(
"--raw_generate",
type=str,
help="Generate version_info.cc (simpler version used for cmake/make)")
args = parser.parse_args()
if args.configure is not None:
configure(args.configure, debug=args.debug)
elif args.generate is not None:
generate(args.generate)
elif args.raw_generate is not None:
raw_generate(args.raw_generate)
else:
raise RuntimeError("--configure or --generate or --raw_generate "
"must be used")
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from contextlib import contextmanager
import datetime
import hashlib
import os
import StringIO
import tempfile
import time
import fixtures
import stubout
from glance.common import exception
from glance import image_cache
#NOTE(bcwaldon): This is imported to load the registry config options
import glance.registry
import glance.store.filesystem as fs_store
import glance.store.s3 as s3_store
from glance.tests import utils as test_utils
from glance.tests.utils import skip_if_disabled, xattr_writes_supported
FIXTURE_LENGTH = 1024
FIXTURE_DATA = '*' * FIXTURE_LENGTH
class ImageCacheTestCase(object):
def _setup_fixture_file(self):
FIXTURE_FILE = StringIO.StringIO(FIXTURE_DATA)
self.assertFalse(self.cache.is_cached(1))
self.assertTrue(self.cache.cache_image_file(1, FIXTURE_FILE))
self.assertTrue(self.cache.is_cached(1))
@skip_if_disabled
def test_is_cached(self):
"""Verify is_cached(1) returns 0, then add something to the cache
and verify is_cached(1) returns 1.
"""
self._setup_fixture_file()
@skip_if_disabled
def test_read(self):
"""Verify is_cached(1) returns 0, then add something to the cache
and verify after a subsequent read from the cache that
is_cached(1) returns 1.
"""
self._setup_fixture_file()
buff = StringIO.StringIO()
with self.cache.open_for_read(1) as cache_file:
for chunk in cache_file:
buff.write(chunk)
self.assertEqual(FIXTURE_DATA, buff.getvalue())
@skip_if_disabled
def test_open_for_read(self):
"""Test convenience wrapper for opening a cache file via
its image identifier.
"""
self._setup_fixture_file()
buff = StringIO.StringIO()
with self.cache.open_for_read(1) as cache_file:
for chunk in cache_file:
buff.write(chunk)
self.assertEqual(FIXTURE_DATA, buff.getvalue())
@skip_if_disabled
def test_get_image_size(self):
"""Test convenience wrapper for querying cache file size via
its image identifier.
"""
self._setup_fixture_file()
size = self.cache.get_image_size(1)
self.assertEqual(FIXTURE_LENGTH, size)
@skip_if_disabled
def test_delete(self):
"""Test delete method that removes an image from the cache."""
self._setup_fixture_file()
self.cache.delete_cached_image(1)
self.assertFalse(self.cache.is_cached(1))
@skip_if_disabled
def test_delete_all(self):
"""Test delete method that removes an image from the cache."""
for image_id in (1, 2):
self.assertFalse(self.cache.is_cached(image_id))
for image_id in (1, 2):
FIXTURE_FILE = StringIO.StringIO(FIXTURE_DATA)
self.assertTrue(self.cache.cache_image_file(image_id,
FIXTURE_FILE))
for image_id in (1, 2):
self.assertTrue(self.cache.is_cached(image_id))
self.cache.delete_all_cached_images()
for image_id in (1, 2):
self.assertFalse(self.cache.is_cached(image_id))
@skip_if_disabled
def test_clean_stalled(self):
"""Test the clean method removes expected images."""
incomplete_file_path = os.path.join(self.cache_dir, 'incomplete', '1')
incomplete_file = open(incomplete_file_path, 'w')
incomplete_file.write(FIXTURE_DATA)
incomplete_file.close()
self.assertTrue(os.path.exists(incomplete_file_path))
self.cache.clean(stall_time=0)
self.assertFalse(os.path.exists(incomplete_file_path))
@skip_if_disabled
def test_clean_stalled_nonzero_stall_time(self):
"""
Test the clean method removes the stalled images as expected
"""
incomplete_file_path_1 = os.path.join(self.cache_dir,
'incomplete', '1')
incomplete_file_path_2 = os.path.join(self.cache_dir,
'incomplete', '2')
for f in (incomplete_file_path_1, incomplete_file_path_2):
incomplete_file = open(f, 'w')
incomplete_file.write(FIXTURE_DATA)
incomplete_file.close()
mtime = os.path.getmtime(incomplete_file_path_1)
pastday = datetime.datetime.fromtimestamp(mtime) - \
datetime.timedelta(days=1)
atime = int(time.mktime(pastday.timetuple()))
mtime = atime
os.utime(incomplete_file_path_1, (atime, mtime))
self.assertTrue(os.path.exists(incomplete_file_path_1))
self.assertTrue(os.path.exists(incomplete_file_path_2))
self.cache.clean(stall_time=3600)
self.assertFalse(os.path.exists(incomplete_file_path_1))
self.assertTrue(os.path.exists(incomplete_file_path_2))
@skip_if_disabled
def test_prune(self):
"""
Test that pruning the cache works as expected...
"""
self.assertEqual(0, self.cache.get_cache_size())
# Add a bunch of images to the cache. The max cache
# size for the cache is set to 5KB and each image is
# 1K. We add 10 images to the cache and then we'll
# prune it. We should see only 5 images left after
# pruning, and the images that are least recently accessed
# should be the ones pruned...
for x in xrange(10):
FIXTURE_FILE = StringIO.StringIO(FIXTURE_DATA)
self.assertTrue(self.cache.cache_image_file(x,
FIXTURE_FILE))
self.assertEqual(10 * 1024, self.cache.get_cache_size())
# OK, hit the images that are now cached...
for x in xrange(10):
buff = StringIO.StringIO()
with self.cache.open_for_read(x) as cache_file:
for chunk in cache_file:
buff.write(chunk)
self.cache.prune()
self.assertEqual(5 * 1024, self.cache.get_cache_size())
for x in xrange(0, 5):
self.assertFalse(self.cache.is_cached(x),
"Image %s was cached!" % x)
for x in xrange(5, 10):
self.assertTrue(self.cache.is_cached(x),
"Image %s was not cached!" % x)
@skip_if_disabled
def test_prune_to_zero(self):
"""Test that an image_cache_max_size of 0 doesn't kill the pruner
This is a test specifically for LP #1039854
"""
self.assertEqual(0, self.cache.get_cache_size())
FIXTURE_FILE = StringIO.StringIO(FIXTURE_DATA)
self.assertTrue(self.cache.cache_image_file('xxx', FIXTURE_FILE))
self.assertEqual(1024, self.cache.get_cache_size())
# OK, hit the image that is now cached...
buff = StringIO.StringIO()
with self.cache.open_for_read('xxx') as cache_file:
for chunk in cache_file:
buff.write(chunk)
self.config(image_cache_max_size=0)
self.cache.prune()
self.assertEqual(0, self.cache.get_cache_size())
self.assertFalse(self.cache.is_cached('xxx'))
@skip_if_disabled
def test_queue(self):
"""
Test that queueing works properly
"""
self.assertFalse(self.cache.is_cached(1))
self.assertFalse(self.cache.is_queued(1))
FIXTURE_FILE = StringIO.StringIO(FIXTURE_DATA)
self.assertTrue(self.cache.queue_image(1))
self.assertTrue(self.cache.is_queued(1))
self.assertFalse(self.cache.is_cached(1))
# Should not return True if the image is already
# queued for caching...
self.assertFalse(self.cache.queue_image(1))
self.assertFalse(self.cache.is_cached(1))
# Test that we return False if we try to queue
# an image that has already been cached
self.assertTrue(self.cache.cache_image_file(1, FIXTURE_FILE))
self.assertFalse(self.cache.is_queued(1))
self.assertTrue(self.cache.is_cached(1))
self.assertFalse(self.cache.queue_image(1))
self.cache.delete_cached_image(1)
for x in xrange(3):
self.assertTrue(self.cache.queue_image(x))
self.assertEqual(self.cache.get_queued_images(),
['0', '1', '2'])
def test_open_for_write_good(self):
"""
Test to see if open_for_write works in normal case
"""
# test a good case
image_id = '1'
self.assertFalse(self.cache.is_cached(image_id))
with self.cache.driver.open_for_write(image_id) as cache_file:
cache_file.write('a')
self.assertTrue(self.cache.is_cached(image_id),
"Image %s was NOT cached!" % image_id)
# make sure it has tidied up
incomplete_file_path = os.path.join(self.cache_dir,
'incomplete', image_id)
invalid_file_path = os.path.join(self.cache_dir, 'invalid', image_id)
self.assertFalse(os.path.exists(incomplete_file_path))
self.assertFalse(os.path.exists(invalid_file_path))
def test_open_for_write_with_exception(self):
"""
Test to see if open_for_write works in a failure case for each driver
This case is where an exception is raised while the file is being
written. The image is partially filled in cache and filling wont resume
so verify the image is moved to invalid/ directory
"""
# test a case where an exception is raised while the file is open
image_id = '1'
self.assertFalse(self.cache.is_cached(image_id))
try:
with self.cache.driver.open_for_write(image_id) as cache_file:
raise IOError
except Exception as e:
self.assertEqual(type(e), IOError)
self.assertFalse(self.cache.is_cached(image_id),
"Image %s was cached!" % image_id)
# make sure it has tidied up
incomplete_file_path = os.path.join(self.cache_dir,
'incomplete', image_id)
invalid_file_path = os.path.join(self.cache_dir, 'invalid', image_id)
self.assertFalse(os.path.exists(incomplete_file_path))
self.assertTrue(os.path.exists(invalid_file_path))
def test_caching_iterator(self):
"""
Test to see if the caching iterator interacts properly with the driver
When the iterator completes going through the data the driver should
have closed the image and placed it correctly
"""
# test a case where an exception NOT raised while the file is open,
# and a consuming iterator completes
def consume(image_id):
data = ['a', 'b', 'c', 'd', 'e', 'f']
checksum = None
caching_iter = self.cache.get_caching_iter(image_id, checksum,
iter(data))
self.assertEqual(list(caching_iter), data)
image_id = '1'
self.assertFalse(self.cache.is_cached(image_id))
consume(image_id)
self.assertTrue(self.cache.is_cached(image_id),
"Image %s was NOT cached!" % image_id)
# make sure it has tidied up
incomplete_file_path = os.path.join(self.cache_dir,
'incomplete', image_id)
invalid_file_path = os.path.join(self.cache_dir, 'invalid', image_id)
self.assertFalse(os.path.exists(incomplete_file_path))
self.assertFalse(os.path.exists(invalid_file_path))
def test_caching_iterator_handles_backend_failure(self):
"""
Test that when the backend fails, caching_iter does not continue trying
to consume data, and rolls back the cache.
"""
def faulty_backend():
data = ['a', 'b', 'c', 'Fail', 'd', 'e', 'f']
for d in data:
if d == 'Fail':
raise exception.GlanceException('Backend failure')
yield d
def consume(image_id):
caching_iter = self.cache.get_caching_iter(image_id, None,
faulty_backend())
# excercise the caching_iter
list(caching_iter)
image_id = '1'
self.assertRaises(exception.GlanceException, consume, image_id)
# make sure bad image was not cached
self.assertFalse(self.cache.is_cached(image_id))
def test_caching_iterator_falloffend(self):
"""
Test to see if the caching iterator interacts properly with the driver
in a case where the iterator is only partially consumed. In this case
the image is only partially filled in cache and filling wont resume.
When the iterator goes out of scope the driver should have closed the
image and moved it from incomplete/ to invalid/
"""
# test a case where a consuming iterator just stops.
def falloffend(image_id):
data = ['a', 'b', 'c', 'd', 'e', 'f']
checksum = None
caching_iter = self.cache.get_caching_iter(image_id, checksum,
iter(data))
self.assertEqual(caching_iter.next(), 'a')
image_id = '1'
self.assertFalse(self.cache.is_cached(image_id))
falloffend(image_id)
self.assertFalse(self.cache.is_cached(image_id),
"Image %s was cached!" % image_id)
# make sure it has tidied up
incomplete_file_path = os.path.join(self.cache_dir,
'incomplete', image_id)
invalid_file_path = os.path.join(self.cache_dir, 'invalid', image_id)
self.assertFalse(os.path.exists(incomplete_file_path))
self.assertTrue(os.path.exists(invalid_file_path))
def test_gate_caching_iter_good_checksum(self):
image = "12345678990abcdefghijklmnop"
image_id = 123
md5 = hashlib.md5()
md5.update(image)
checksum = md5.hexdigest()
cache = image_cache.ImageCache()
img_iter = cache.get_caching_iter(image_id, checksum, image)
for chunk in img_iter:
pass
# checksum is valid, fake image should be cached:
self.assertTrue(cache.is_cached(image_id))
def test_gate_caching_iter_fs_chunked_file(self):
"""Tests get_caching_iter when using a filesystem ChunkedFile"""
image_id = 123
with tempfile.NamedTemporaryFile() as test_data_file:
test_data_file.write(FIXTURE_DATA)
test_data_file.seek(0)
image = fs_store.ChunkedFile(test_data_file.name)
md5 = hashlib.md5()
md5.update(FIXTURE_DATA)
checksum = md5.hexdigest()
cache = image_cache.ImageCache()
img_iter = cache.get_caching_iter(image_id, checksum, image)
for chunk in img_iter:
pass
# checksum is valid, fake image should be cached:
self.assertTrue(cache.is_cached(image_id))
def test_gate_caching_iter_s3_chunked_file(self):
"""Tests get_caching_iter when using an S3 ChunkedFile"""
image_id = 123
with tempfile.NamedTemporaryFile() as test_data_file:
test_data_file.write(FIXTURE_DATA)
test_data_file.seek(0)
image = s3_store.ChunkedFile(test_data_file)
md5 = hashlib.md5()
md5.update(FIXTURE_DATA)
checksum = md5.hexdigest()
cache = image_cache.ImageCache()
img_iter = cache.get_caching_iter(image_id, checksum, image)
for chunk in img_iter:
pass
# checksum is valid, fake image should be cached:
self.assertTrue(cache.is_cached(image_id))
def test_gate_caching_iter_bad_checksum(self):
image = "12345678990abcdefghijklmnop"
image_id = 123
checksum = "foobar" # bad.
cache = image_cache.ImageCache()
img_iter = cache.get_caching_iter(image_id, checksum, image)
def reader():
for chunk in img_iter:
pass
self.assertRaises(exception.GlanceException, reader)
# checksum is invalid, caching will fail:
self.assertFalse(cache.is_cached(image_id))
class TestImageCacheXattr(test_utils.BaseTestCase,
ImageCacheTestCase):
"""Tests image caching when xattr is used in cache"""
def setUp(self):
"""
Test to see if the pre-requisites for the image cache
are working (python-xattr installed and xattr support on the
filesystem)
"""
super(TestImageCacheXattr, self).setUp()
if getattr(self, 'disable', False):
return
self.cache_dir = self.useFixture(fixtures.TempDir()).path
if not getattr(self, 'inited', False):
try:
import xattr
except ImportError:
self.inited = True
self.disabled = True
self.disabled_message = ("python-xattr not installed.")
return
self.inited = True
self.disabled = False
self.config(image_cache_dir=self.cache_dir,
image_cache_driver='xattr',
image_cache_max_size=1024 * 5)
self.cache = image_cache.ImageCache()
if not xattr_writes_supported(self.cache_dir):
self.inited = True
self.disabled = True
self.disabled_message = ("filesystem does not support xattr")
return
class TestImageCacheSqlite(test_utils.BaseTestCase,
ImageCacheTestCase):
"""Tests image caching when SQLite is used in cache"""
def setUp(self):
"""
Test to see if the pre-requisites for the image cache
are working (python-sqlite3 installed)
"""
super(TestImageCacheSqlite, self).setUp()
if getattr(self, 'disable', False):
return
if not getattr(self, 'inited', False):
try:
import sqlite3
except ImportError:
self.inited = True
self.disabled = True
self.disabled_message = ("python-sqlite3 not installed.")
return
self.inited = True
self.disabled = False
self.cache_dir = self.useFixture(fixtures.TempDir()).path
self.config(image_cache_dir=self.cache_dir,
image_cache_driver='sqlite',
image_cache_max_size=1024 * 5)
self.cache = image_cache.ImageCache()
class TestImageCacheNoDep(test_utils.BaseTestCase):
def setUp(self):
super(TestImageCacheNoDep, self).setUp()
self.driver = None
def init_driver(self2):
self2.driver = self.driver
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(image_cache.ImageCache, 'init_driver', init_driver)
self.addCleanup(self.stubs.UnsetAll)
def test_get_caching_iter_when_write_fails(self):
class FailingFile(object):
def write(self, data):
if data == "Fail":
raise IOError
class FailingFileDriver(object):
def is_cacheable(self, *args, **kwargs):
return True
@contextmanager
def open_for_write(self, *args, **kwargs):
yield FailingFile()
self.driver = FailingFileDriver()
cache = image_cache.ImageCache()
data = ['a', 'b', 'c', 'Fail', 'd', 'e', 'f']
caching_iter = cache.get_caching_iter('dummy_id', None, iter(data))
self.assertEqual(list(caching_iter), data)
def test_get_caching_iter_when_open_fails(self):
class OpenFailingDriver(object):
def is_cacheable(self, *args, **kwargs):
return True
@contextmanager
def open_for_write(self, *args, **kwargs):
raise IOError
self.driver = OpenFailingDriver()
cache = image_cache.ImageCache()
data = ['a', 'b', 'c', 'd', 'e', 'f']
caching_iter = cache.get_caching_iter('dummy_id', None, iter(data))
self.assertEqual(list(caching_iter), data)
| |
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import abc
import re
from collections import OrderedDict
from sqlite3 import Cursor
from typing import Any, Dict, Generator, List, Optional, Sequence, Type, cast
import typepy
from typepy.type import AbstractType
from .core import SimpleSQLite
from .error import DatabaseError
from .query import Attr, AttrList, Value, WhereQuery
def dict_factory(cursor: Cursor, row: Sequence) -> Dict:
record = {}
for idx, col in enumerate(cursor.description):
record[col[0]] = row[idx]
return record
class Column(metaclass=abc.ABCMeta):
@abc.abstractproperty
def sqlite_datatype(self):
return ""
@abc.abstractproperty
def typepy_class(self):
return None
@property
def not_null(self):
return self.__not_null
def __init__(
self,
attr_name=None,
not_null=False,
primary_key=False,
unique=False,
autoincrement=False,
default=None,
):
self.__header_name = attr_name
self.__not_null = not_null
self.__primary_key = primary_key
self.__unique = unique
self.__autoincrement = autoincrement
self.__default_value = None if self.__not_null else default
def get_header(self, attr_name: str) -> str:
if self.__header_name:
return self.__header_name
return attr_name
def get_desc(self) -> str:
constraints = [self.sqlite_datatype]
if self.__primary_key:
constraints.append("PRIMARY KEY")
else:
if self.__not_null:
constraints.append("NOT NULL")
if self.__unique:
constraints.append("UNIQUE")
if self.__autoincrement and self.sqlite_datatype == "INTEGER":
constraints.append("AUTOINCREMENT")
if self.__default_value is not None:
constraints.append(f"DEFAULT {Value(self.__default_value)}")
return " ".join(constraints)
class Integer(Column):
@property
def sqlite_datatype(self) -> str:
return "INTEGER"
@property
def typepy_class(self) -> Type[AbstractType]:
return typepy.Integer
class Real(Column):
@property
def sqlite_datatype(self) -> str:
return "REAL"
@property
def typepy_class(self) -> Type[AbstractType]:
return typepy.RealNumber
class Text(Column):
@property
def sqlite_datatype(self) -> str:
return "TEXT"
@property
def typepy_class(self) -> Type[AbstractType]:
return typepy.String
class Blob(Column):
@property
def sqlite_datatype(self) -> str:
return "BLOB"
@property
def typepy_class(self) -> Type[AbstractType]:
return typepy.Bytes
class Model:
__connection = None
__is_hidden = False
__table_name = None
__attr_names = None
@classmethod
def attach(cls, database_src: SimpleSQLite, is_hidden: bool = False) -> None:
cls.__connection = SimpleSQLite(database_src)
cls.__is_hidden = is_hidden
@classmethod
def get_table_name(cls) -> str:
if cls.__table_name:
return cls.__table_name
table_name = re.sub(r"([A-Z]+)([A-Z][a-z])", r"\1_\2", cls.__name__)
table_name = re.sub(r"([a-z\d])([A-Z])", r"\1_\2", table_name)
table_name = table_name.replace("-", "_").lower()
if cls.__is_hidden:
table_name = f"_{table_name:s}_"
cls.__table_name = table_name
return cls.__table_name
@classmethod
def get_attr_names(cls) -> List[str]:
if cls.__attr_names:
return cls.__attr_names
cls.__attr_names = [attr_name for attr_name in cls.__dict__ if cls.__is_attr(attr_name)]
return cls.__attr_names
@classmethod
def create(cls) -> None:
cls.__validate_connection()
assert cls.__connection # to avoid type check error
attr_descs = []
for attr_name in cls.get_attr_names():
col = cls._get_col(attr_name)
attr_descs.append(
"{attr} {constraints}".format(
attr=Attr(col.get_header(attr_name)), constraints=col.get_desc()
)
)
cls.__connection.create_table(cls.get_table_name(), attr_descs)
@classmethod
def select(cls, where: Optional[WhereQuery] = None, extra: None = None) -> Generator:
cls.__validate_connection()
assert cls.__connection # to avoid type check error
try:
stash_row_factory = cls.__connection.connection.row_factory # type: ignore
cls.__connection.set_row_factory(dict_factory)
result = cls.__connection.select(
select=AttrList(
[
cls._get_col(attr_name).get_header(attr_name)
for attr_name in cls.get_attr_names()
]
),
table_name=cls.get_table_name(),
where=where,
extra=extra,
)
assert result # to avoid type check error
for record in result.fetchall():
yield cls(**record)
finally:
cls.__connection.set_row_factory(stash_row_factory)
@classmethod
def insert(cls, model_obj: "Model") -> None:
cls.__validate_connection()
assert cls.__connection # to avoid type check error
if type(model_obj).__name__ != cls.__name__:
raise TypeError(
"unexpected type: expected={}, actual={}".format(
cls.__name__, type(model_obj).__name__
)
)
record = {}
for attr_name in cls.get_attr_names():
value = getattr(model_obj, attr_name)
if value is None:
continue
cls.__validate_value(attr_name, value)
record[cls._get_col(attr_name).get_header(attr_name)] = value
cls.__connection.insert(cls.get_table_name(), record)
@classmethod
def commit(cls) -> None:
cls.__validate_connection()
assert cls.__connection # to avoid type check error
cls.__connection.commit()
@classmethod
def fetch_schema(cls):
return cls.__connection.schema_extractor.fetch_table_schema(cls.get_table_name())
@classmethod
def fetch_num_records(cls, where: None = None) -> int:
assert cls.__connection # to avoid type check error
return cast(int, cls.__connection.fetch_num_records(cls.get_table_name(), where=where))
@classmethod
def attr_to_header(cls, attr_name: str) -> str:
return cls._get_col(attr_name).get_header(attr_name)
def as_dict(self) -> Dict:
record = OrderedDict()
for attr_name in self.get_attr_names():
value = getattr(self, attr_name)
if value is None:
continue
record[self.attr_to_header(attr_name)] = value
return record
def __init__(self, *args, **kwargs) -> None:
for attr_name in self.get_attr_names():
value = kwargs.get(attr_name)
if value is None:
value = kwargs.get(self.attr_to_header(attr_name))
setattr(self, attr_name, value)
def __eq__(self, other) -> bool:
if type(self) != type(other):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other) -> bool:
if type(self) != type(other):
return True
return self.__dict__ != other.__dict__
def __repr__(self) -> str:
return "{name:s} ({attributes:s})".format(
name=type(self).__name__,
attributes=", ".join([f"{key}={value}" for key, value in self.as_dict().items()]),
)
@classmethod
def __validate_connection(cls) -> None:
if cls.__connection is None:
raise DatabaseError("SimpleSQLite connection required. you need to call attach first")
@classmethod
def __validate_value(cls, attr_name: str, value: Any) -> None:
column = cls._get_col(attr_name)
if value is None and not column.not_null:
return
column.typepy_class(value).validate()
@classmethod
def __is_attr(cls, attr_name: str) -> bool:
private_var_regexp = re.compile(f"^_{Model.__name__}__[a-zA-Z]+")
return (
not attr_name.startswith("__")
and private_var_regexp.search(attr_name) is None
and not callable(cls.__dict__.get(attr_name))
)
@classmethod
def _get_col(cls, attr_name: str) -> Column:
if attr_name not in cls.get_attr_names():
raise ValueError(f"invalid attribute: {attr_name}")
return cls.__dict__[attr_name]
| |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file contains helper methods used in creating a release.
import re
import sys
from subprocess import Popen, PIPE
try:
from jira.client import JIRA
# Old versions have JIRAError in exceptions package, new (0.5+) in utils.
try:
from jira.exceptions import JIRAError
except ImportError:
from jira.utils import JIRAError
except ImportError:
print("This tool requires the jira-python library")
print("Install using 'sudo pip install jira'")
sys.exit(-1)
try:
from github import Github
from github import GithubException
except ImportError:
print("This tool requires the PyGithub library")
print("Install using 'sudo pip install PyGithub'")
sys.exit(-1)
try:
import unidecode
except ImportError:
print("This tool requires the unidecode library to decode obscure github usernames")
print("Install using 'sudo pip install unidecode'")
sys.exit(-1)
if sys.version < '3':
input = raw_input # noqa
# Contributors list file name
contributors_file_name = "contributors.txt"
# Prompt the user to answer yes or no until they do so
def yesOrNoPrompt(msg):
response = input("%s [y/n]: " % msg)
while response != "y" and response != "n":
return yesOrNoPrompt(msg)
return response == "y"
# Utility functions run git commands (written with Git 1.8.5)
def run_cmd(cmd):
return Popen(cmd, stdout=PIPE).communicate()[0]
def run_cmd_error(cmd):
return Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()[1]
def get_date(commit_hash):
return run_cmd(["git", "show", "--quiet", "--pretty=format:%cd", commit_hash])
def tag_exists(tag):
stderr = run_cmd_error(["git", "show", tag])
return "error" not in stderr
# A type-safe representation of a commit
class Commit:
def __init__(self, _hash, author, title, pr_number=None):
self._hash = _hash
self.author = author
self.title = title
self.pr_number = pr_number
def get_hash(self):
return self._hash
def get_author(self):
return self.author
def get_title(self):
return self.title
def get_pr_number(self):
return self.pr_number
def __str__(self):
closes_pr = "(Closes #%s)" % self.pr_number if self.pr_number else ""
return "%s %s %s %s" % (self._hash, self.author, self.title, closes_pr)
# Return all commits that belong to the specified tag.
#
# Under the hood, this runs a `git log` on that tag and parses the fields
# from the command output to construct a list of Commit objects. Note that
# because certain fields reside in the commit description and cannot be parsed
# through the Github API itself, we need to do some intelligent regex parsing
# to extract those fields.
#
# This is written using Git 1.8.5.
def get_commits(tag):
commit_start_marker = "|=== COMMIT START MARKER ===|"
commit_end_marker = "|=== COMMIT END MARKER ===|"
field_end_marker = "|=== COMMIT FIELD END MARKER ===|"
log_format =\
commit_start_marker + "%h" +\
field_end_marker + "%an" +\
field_end_marker + "%s" +\
commit_end_marker + "%b"
output = run_cmd(["git", "log", "--quiet", "--pretty=format:" + log_format, tag])
commits = []
raw_commits = [c for c in output.split(commit_start_marker) if c]
for commit in raw_commits:
if commit.count(commit_end_marker) != 1:
print("Commit end marker not found in commit: ")
for line in commit.split("\n"):
print(line)
sys.exit(1)
# Separate commit digest from the body
# From the digest we extract the hash, author and the title
# From the body, we extract the PR number and the github username
[commit_digest, commit_body] = commit.split(commit_end_marker)
if commit_digest.count(field_end_marker) != 2:
sys.exit("Unexpected format in commit: %s" % commit_digest)
[_hash, author, title] = commit_digest.split(field_end_marker)
# The PR number and github username is in the commit message
# itself and cannot be accessed through any Github API
pr_number = None
match = re.search("Closes #([0-9]+) from ([^/\\s]+)/", commit_body)
if match:
[pr_number, github_username] = match.groups()
# If the author name is not valid, use the github
# username so we can translate it properly later
if not is_valid_author(author):
author = github_username
# Guard against special characters
try: # Python 2
author = unicode(author, "UTF-8")
except NameError: # Python 3
author = str(author)
author = unidecode.unidecode(author).strip()
commit = Commit(_hash, author, title, pr_number)
commits.append(commit)
return commits
# Maintain a mapping for translating issue types to contributions in the release notes
# This serves an additional function of warning the user against unknown issue types
# Note: This list is partially derived from this link:
# https://issues.apache.org/jira/plugins/servlet/project-config/SPARK/issuetypes
# Keep these in lower case
known_issue_types = {
"bug": "bug fixes",
"build": "build fixes",
"dependency upgrade": "build fixes",
"improvement": "improvements",
"new feature": "new features",
"documentation": "documentation",
"test": "test",
"task": "improvement",
"sub-task": "improvement"
}
# Maintain a mapping for translating component names when creating the release notes
# This serves an additional function of warning the user against unknown components
# Note: This list is largely derived from this link:
# https://issues.apache.org/jira/plugins/servlet/project-config/SPARK/components
CORE_COMPONENT = "Core"
known_components = {
"block manager": CORE_COMPONENT,
"build": CORE_COMPONENT,
"deploy": CORE_COMPONENT,
"documentation": CORE_COMPONENT,
"examples": CORE_COMPONENT,
"graphx": "GraphX",
"input/output": CORE_COMPONENT,
"java api": "Java API",
"k8s": "Kubernetes",
"kubernetes": "Kubernetes",
"mesos": "Mesos",
"ml": "MLlib",
"mllib": "MLlib",
"project infra": "Project Infra",
"pyspark": "PySpark",
"shuffle": "Shuffle",
"spark core": CORE_COMPONENT,
"spark shell": CORE_COMPONENT,
"sql": "SQL",
"streaming": "Streaming",
"web ui": "Web UI",
"windows": "Windows",
"yarn": "YARN"
}
# Translate issue types using a format appropriate for writing contributions
# If an unknown issue type is encountered, warn the user
def translate_issue_type(issue_type, issue_id, warnings):
issue_type = issue_type.lower()
if issue_type in known_issue_types:
return known_issue_types[issue_type]
else:
warnings.append("Unknown issue type \"%s\" (see %s)" % (issue_type, issue_id))
return issue_type
# Translate component names using a format appropriate for writing contributions
# If an unknown component is encountered, warn the user
def translate_component(component, commit_hash, warnings):
component = component.lower()
if component in known_components:
return known_components[component]
else:
warnings.append("Unknown component \"%s\" (see %s)" % (component, commit_hash))
return component
# Parse components in the commit message
# The returned components are already filtered and translated
def find_components(commit, commit_hash):
components = re.findall(r"\[\w*\]", commit.lower())
components = [translate_component(c, commit_hash, [])
for c in components if c in known_components]
return components
# Join a list of strings in a human-readable manner
# e.g. ["Juice"] -> "Juice"
# e.g. ["Juice", "baby"] -> "Juice and baby"
# e.g. ["Juice", "baby", "moon"] -> "Juice, baby, and moon"
def nice_join(str_list):
str_list = list(str_list) # sometimes it's a set
if not str_list:
return ""
elif len(str_list) == 1:
return next(iter(str_list))
elif len(str_list) == 2:
return " and ".join(str_list)
else:
return ", ".join(str_list[:-1]) + ", and " + str_list[-1]
# Return the full name of the specified user on Github
# If the user doesn't exist, return None
def get_github_name(author, github_client):
if github_client:
try:
return github_client.get_user(author).name
except GithubException as e:
# If this is not a "not found" exception
if e.status != 404:
raise e
return None
# Return the full name of the specified user on JIRA
# If the user doesn't exist, return None
def get_jira_name(author, jira_client):
if jira_client:
try:
return jira_client.user(author).displayName
except JIRAError as e:
# If this is not a "not found" exception
if e.status_code != 404:
raise e
return None
# Return whether the given name is in the form <First Name><space><Last Name>
def is_valid_author(author):
if not author:
return False
return " " in author and not re.findall("[0-9]", author)
# Capitalize the first letter of each word in the given author name
def capitalize_author(author):
if not author:
return None
words = author.split(" ")
words = [w[0].capitalize() + w[1:] for w in words if w]
return " ".join(words)
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from textwrap import dedent
from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.backend.jvm.tasks.scalastyle import FileExcluder, Scalastyle
from pants.base.exceptions import TaskError
from pants.java.jar.jar_dependency import JarDependency
from pants_test.jvm.nailgun_task_test_base import NailgunTaskTestBase
from pants_test.subsystem.subsystem_util import init_subsystem
from pants_test.task_test_base import ensure_cached
logger = logging.getLogger(__name__)
class ScalastyleTest(NailgunTaskTestBase):
"""Tests for the class Scalastyle."""
@classmethod
def task_type(cls):
return Scalastyle
#
# Internal test helper section
#
def _create_scalastyle_config_file(self, rules=None):
# put a default rule there if rules are not specified.
rules = rules or ['org.scalastyle.scalariform.ImportGroupingChecker']
rule_section_xml = ''
for rule in rules:
rule_section_xml += dedent("""
<check level="error" class="{rule}" enabled="true"></check>
""".format(rule=rule))
return self.create_file(
relpath='scalastyle_config.xml',
contents=dedent("""
<scalastyle commentFilter="enabled">
<name>Test Scalastyle configuration</name>
{rule_section_xml}
</scalastyle>
""".format(rule_section_xml=rule_section_xml)))
def _create_scalastyle_excludes_file(self, exclude_patterns=None):
return self.create_file(
relpath='scalastyle_excludes.txt',
contents='\n'.join(exclude_patterns) if exclude_patterns else '')
def _create_context(self, scalastyle_config=None, excludes=None, target_roots=None):
# If config is not specified, then we override pants.ini scalastyle such that
# we have a default scalastyle config xml but with empty excludes.
self.set_options(skip=False, config=scalastyle_config, excludes=excludes)
return self.context(target_roots=target_roots)
def _create_scalastyle_task(self, scalastyle_config):
return self.prepare_execute(self._create_context(scalastyle_config))
def setUp(self):
super(ScalastyleTest, self).setUp()
# Default scalastyle config (import grouping rule) and no excludes.
init_subsystem(ScalaPlatform, {
ScalaPlatform.options_scope: {
'version': '2.11'
}
})
def test_initialize_config_no_config_settings(self):
with self.assertRaises(Scalastyle.UnspecifiedConfig):
self._create_scalastyle_task(scalastyle_config=None).validate_scalastyle_config()
def test_initialize_config_config_setting_exist_but_invalid(self):
with self.assertRaises(Scalastyle.MissingConfig):
self._create_scalastyle_task(
scalastyle_config='file_does_not_exist.xml').validate_scalastyle_config()
def test_excludes_setting_exists_but_invalid(self):
with self.assertRaises(TaskError):
FileExcluder('file_does_not_exist.txt', logger)
def test_excludes_parsed_loaded_correctly(self):
excludes_text = dedent('''
# ignore C++
.*\.cpp
# ignore python
.*\.py''')
excluder = FileExcluder(self._create_scalastyle_excludes_file([excludes_text]), logger)
self.assertEqual(2, len(excluder.excludes))
self.assertTrue(excluder.should_include('com/some/org/x.scala'))
self.assertFalse(excluder.should_include('com/some/org/y.cpp'))
self.assertFalse(excluder.should_include('z.py'))
def custom_scala_platform_setup(self):
# We don't need to specify :scalac or :scala-repl since they are never being fetched.
self.make_target('//:scala-library',
JarLibrary,
jars=[JarDependency('org.scala-lang', 'scala-library', '2.10')],
)
self.make_target('//:scalastyle',
JarLibrary,
jars=[JarDependency('org.scalastyle', 'scalastyle_2.10', '0.3.2')],
)
init_subsystem(ScalaPlatform, {
ScalaPlatform.options_scope: {
'version': 'custom'
}
})
def test_get_non_synthetic_scala_targets(self):
# scala_library - should remain.
scala_target = self.make_target('a/scala:s', ScalaLibrary, sources=['Source.scala'])
# scala_library but with java sources - should be filtered
scala_target_with_java_source = self.make_target('a/scala_java:sj',
ScalaLibrary,
sources=['Source.java'])
# java_library - should be filtered
java_target = self.make_target('a/java:j', JavaLibrary, sources=['Source.java'])
# synthetic scala_library - should be filtered
synthetic_scala_target = self.make_target('a/synthetic_scala:ss',
ScalaLibrary,
sources=['SourceGenerated.scala'],
derived_from=scala_target)
result_targets = Scalastyle.get_non_synthetic_scala_targets([java_target,
scala_target,
scala_target_with_java_source,
synthetic_scala_target])
# Only the scala target should remain
self.assertEquals(1, len(result_targets))
self.assertEqual(scala_target, result_targets[0])
def test_get_non_excluded_scala_sources(self):
# this scala target has mixed *.scala and *.java sources.
# the *.java source should be filtered out.
scala_target_1 = self.make_target('a/scala_1:s1',
ScalaLibrary,
sources=['Source1.java', 'Source1.scala'])
# this scala target has single *.scala source but will be excluded out
# by the [scalastyle].[excludes] setting.
scala_target_2 = self.make_target('a/scala_2:s2', ScalaLibrary, sources=['Source2.scala'])
# Create a custom context so we can manually inject scala targets
# with mixed sources in them to test the source filtering logic.
context = self._create_context(
scalastyle_config=self._create_scalastyle_config_file(),
excludes=self._create_scalastyle_excludes_file(['a/scala_2/Source2.scala']),
target_roots=[
scala_target_1,
scala_target_2
]
)
# Remember, we have the extra 'scala-library' dep target.
self.assertEqual(3, len(context.targets()))
# Now create the task and run the scala source and exclusion filtering.
task = self.prepare_execute(context)
result_sources = task.get_non_excluded_scala_sources(
task.create_file_excluder(),
task.get_non_synthetic_scala_targets(context.targets()))
# Only the scala source from target 1 should remain
self.assertEquals(1, len(result_sources))
self.assertEqual('a/scala_1/Source1.scala', result_sources[0])
@ensure_cached(Scalastyle, expected_num_artifacts=1)
def test_end_to_end_pass(self):
# Create a scala source that would PASS ImportGroupingChecker rule.
self.create_file(
relpath='a/scala/pass.scala',
contents=dedent("""
import java.util
object HelloWorld {
def main(args: Array[String]) {
println("Hello, world!")
}
}
"""))
scala_target = self.make_target('a/scala:pass', ScalaLibrary, sources=['pass.scala'])
context = self._create_context(scalastyle_config=self._create_scalastyle_config_file(),
target_roots=[scala_target])
self.execute(context)
def test_custom_end_to_end_pass(self):
# Override the default version set in setUp().
self.custom_scala_platform_setup()
# Create a scala source that would PASS ImportGroupingChecker rule.
self.create_file(
relpath='a/scala/pass.scala',
contents=dedent("""
import java.util
object HelloWorld {
def main(args: Array[String]) {
println("Hello, world!")
}
}
"""))
scala_target = self.make_target('a/scala:pass', ScalaLibrary, sources=['pass.scala'])
context = self._create_context(scalastyle_config=self._create_scalastyle_config_file(),
target_roots=[scala_target])
self.execute(context)
def test_fail(self):
# Create a scala source that would FAIL ImportGroupingChecker rule.
self.create_file(
relpath='a/scala/fail.scala',
contents=dedent("""
import java.io._
object HelloWorld {
def main(args: Array[String]) {
println("Hello, world!")
}
}
import java.util._
"""))
scala_target = self.make_target('a/scala:fail', ScalaLibrary, sources=['fail.scala'])
context = self._create_context(target_roots=[scala_target])
with self.assertRaises(TaskError):
self.execute(context)
| |
import asyncio
import json
import logging
import signal
import ssl
import time
from collections import defaultdict, namedtuple
from functools import partial
from again.utils import natural_sort
from aiohttp import web
from .packet import ControlPacket
from .pinger import TCPPinger
from .protocol_factory import get_trellio_protocol
from .utils.log import setup_logging
import os
Service = namedtuple('Service', ['name', 'version', 'dependencies', 'host', 'port', 'node_id', 'type'])
def tree():
return defaultdict(tree)
def json_file_to_dict(_file: str) -> dict:
config = None
with open(_file) as config_file:
config = json.load(config_file)
return config
class Repository:
def __init__(self):
self._registered_services = defaultdict(lambda: defaultdict(list))
self._pending_services = defaultdict(list)
self._service_dependencies = {}
self._subscribe_list = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
self._uptimes = tree()
self.logger = logging.getLogger(__name__)
def register_service(self, service: Service):
service_name = self._get_full_service_name(service.name, service.version)
service_entry = (service.host, service.port, service.node_id, service.type)
self._registered_services[service.name][service.version].append(service_entry)
# in future there can be multiple nodes for same service, for load balancing purposes
self._pending_services[service_name].append(service.node_id)
self._uptimes[service_name][service.host] = {
'uptime': int(time.time()),
'node_id': service.node_id
}
if len(service.dependencies):
if not self._service_dependencies.get(service.name):
self._service_dependencies[service_name] = service.dependencies
def is_pending(self, name, version):
return self._get_full_service_name(name, version) in self._pending_services
def add_pending_service(self, name, version, node_id):
self._pending_services[self._get_full_service_name(name, version)].append(node_id)
def get_pending_services(self):
return [self._split_key(k) for k in self._pending_services.keys()]
def get_pending_instances(self, name, version):
return self._pending_services.get(self._get_full_service_name(name, version), [])
def remove_pending_instance(self, name, version, node_id):
self.get_pending_instances(name, version).remove(node_id)
if not len(self.get_pending_instances(name, version)):
self._pending_services.pop(self._get_full_service_name(name, version))
def get_instances(self, name, version):
return self._registered_services[name][version]
def get_versioned_instances(self, name, version):
version = self._get_non_breaking_version(version, list(self._registered_services[name].keys()))
return self._registered_services[name][version]
def get_consumers(self, name, service_version):
consumers = set()
for _name, dependencies in self._service_dependencies.items():
for dependency in dependencies:
if dependency['name'] == name and dependency['version'] == service_version:
consumers.add(self._split_key(_name))
return consumers
def get_dependencies(self, name, version):
return self._service_dependencies.get(self._get_full_service_name(name, version), [])
def get_node(self, node_id):
for name, versions in self._registered_services.items():
for version, instances in versions.items():
for host, port, node, service_type in instances:
if node_id == node:
return Service(name, version, [], host, port, node, service_type)
return None
def remove_node(self, node_id):
thehost = None
for name, versions in self._registered_services.items():
for version, instances in versions.items():
for instance in instances:
host, port, node, service_type = instance
if node_id == node:
thehost = host
instances.remove(instance)
break
for name, nodes in self._uptimes.items():
for host, uptimes in nodes.items():
if host == thehost and uptimes['node_id'] == node_id:
uptimes['downtime'] = int(time.time())
self.log_uptimes()
return None
def get_uptimes(self):
return self._uptimes
def log_uptimes(self):
for name, nodes in self._uptimes.items():
for host, d in nodes.items():
now = int(time.time())
live = d.get('downtime', 0) < d['uptime']
uptime = now - d['uptime'] if live else 0
logd = {'service_name': name.split('/')[0], 'hostname': host, 'status': live,
'uptime': int(uptime)}
logging.getLogger('stats').info(logd)
def xsubscribe(self, name, version, host, port, node_id, endpoints):
entry = (name, version, host, port, node_id)
for endpoint in endpoints:
self._subscribe_list[endpoint['name']][endpoint['version']][endpoint['endpoint']].append(
entry + (endpoint['strategy'],))
def get_subscribers(self, name, version, endpoint):
return self._subscribe_list[name][version][endpoint]
def _get_non_breaking_version(self, version, versions):
if version in versions:
return version
versions.sort(key=natural_sort, reverse=True)
for v in versions:
if self._is_non_breaking(v, version):
return v
return version
@staticmethod
def _is_non_breaking(v, version):
return version.split('.')[0] == v.split('.')[0]
@staticmethod
def _get_full_service_name(name: str, version):
return '{}/{}'.format(name, version)
@staticmethod
def _split_key(key: str):
return tuple(key.split('/'))
class Registry:
def __init__(self, ip, port, repository: Repository):
self._ip = ip
self._port = port
self._loop = asyncio.get_event_loop()
self._client_protocols = {}
self._service_protocols = {}
self._repository = repository
self._tcp_pingers = {}
self._http_pingers = {}
self.logger = logging.getLogger()
try:
config = json_file_to_dict('./config.json')
self._ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self._ssl_context.load_cert_chain(config['SSL_CERTIFICATE'], config['SSL_KEY'])
except:
self._ssl_context = None
def _create_http_app(self):
app = web.Application()
registry_dump_handle.registry = self
app.router.add_get('/registry/', registry_dump_handle)
handler = app.make_handler(access_log=self.logger)
task = asyncio.get_event_loop().create_server(handler, self._ip, os.environ.get('TRELLIO_HTTP_PORT', 4501))
http_server = asyncio.get_event_loop().run_until_complete(task)
return http_server
def start(self):
setup_logging("registry")
self._loop.add_signal_handler(getattr(signal, 'SIGINT'), partial(self._stop, 'SIGINT'))
self._loop.add_signal_handler(getattr(signal, 'SIGTERM'), partial(self._stop, 'SIGTERM'))
registry_coroutine = self._loop.create_server(
partial(get_trellio_protocol, self), self._ip, self._port, ssl=self._ssl_context)
server = self._loop.run_until_complete(registry_coroutine)
http_server = self._create_http_app()
try:
self._loop.run_forever()
except Exception as e:
print(e)
finally:
server.close()
http_server.close()
self._loop.run_until_complete(server.wait_closed())
self._loop.close()
def _stop(self, signame: str):
print('\ngot signal {} - exiting'.format(signame))
self._loop.stop()
def receive(self, packet: dict, protocol, transport):
request_type = packet['type']
if request_type in ['register', 'get_instances', 'xsubscribe', 'get_subscribers']:
for_log = {}
params = packet['params']
for_log["caller_name"] = params['name'] + '/' + params['version']
for_log["caller_address"] = transport.get_extra_info("peername")[0]
for_log["request_type"] = request_type
self.logger.debug(for_log)
if request_type == 'register':
packet['params']['host'] = transport.get_extra_info("peername")[0]
self.register_service(packet, protocol)
elif request_type == 'get_instances':
self.get_service_instances(packet, protocol)
elif request_type == 'xsubscribe':
self._xsubscribe(packet)
elif request_type == 'get_subscribers':
self.get_subscribers(packet, protocol)
elif request_type == 'pong':
self._ping(packet)
elif request_type == 'ping':
self._handle_ping(packet, protocol)
elif request_type == 'uptime_report':
self._get_uptime_report(packet, protocol)
def deregister_service(self, host, port, node_id):
service = self._repository.get_node(node_id)
self._tcp_pingers.pop(node_id, None)
self._http_pingers.pop((host, port), None)
if service:
for_log = {"caller_name": service.name + '/' + service.version, "caller_address": service.host,
"request_type": 'deregister'}
self.logger.debug(for_log)
self._repository.remove_node(node_id)
if service is not None:
self._service_protocols.pop(node_id, None)
self._client_protocols.pop(node_id, None)
self._notify_consumers(service.name, service.version, node_id)
if not len(self._repository.get_instances(service.name, service.version)):
consumers = self._repository.get_consumers(service.name, service.version)
for consumer_name, consumer_version in consumers:
for _, _, node_id, _ in self._repository.get_instances(consumer_name, consumer_version):
self._repository.add_pending_service(consumer_name, consumer_version, node_id)
def register_service(self, packet: dict, registry_protocol):
params = packet['params']
service = Service(params['name'], params['version'], params['dependencies'], params['host'], params['port'],
params['node_id'], params['type'])
self._repository.register_service(service)
self._client_protocols[params['node_id']] = registry_protocol
if params['node_id'] not in self._service_protocols.keys():
self._connect_to_service(params['host'], params['port'], params['node_id'], params['type'])
self._handle_pending_registrations()
self._inform_consumers(service)
def _inform_consumers(self, service: Service):
consumers = self._repository.get_consumers(service.name, service.version)
for service_name, service_version in consumers:
if not self._repository.is_pending(service_name, service_version):
instances = self._repository.get_instances(service_name, service_version)
for host, port, node, type in instances:
protocol = self._client_protocols[node]
protocol.send(ControlPacket.new_instance(
service.name, service.version, service.host, service.port, service.node_id, service.type))
def _send_activated_packet(self, name, version, node):
protocol = self._client_protocols.get(node, None)
if protocol:
packet = self._make_activated_packet(name, version)
protocol.send(packet)
def _handle_pending_registrations(self):
for name, version in self._repository.get_pending_services():
dependencies = self._repository.get_dependencies(name, version) # list
should_activate = True
for dependency in dependencies:
instances = self._repository.get_versioned_instances(dependency['name'], dependency['version']) # list
tcp_instances = [instance for instance in instances if instance[3] == 'tcp']
if not len(
tcp_instances): # means the dependency doesn't have an activated tcp service, so registration
# pending
should_activate = False
break
for node in self._repository.get_pending_instances(name, version): # node is node id
if should_activate:
self._send_activated_packet(name, version, node)
self._repository.remove_pending_instance(name, version, node)
self.logger.info('%s activated', (name, version))
else:
self.logger.info('%s can\'t register because it depends on %s', (name, version), dependency)
def _make_activated_packet(self, name, version):
dependencies = self._repository.get_dependencies(name, version)
instances = {
(dependency['name'], dependency['version']): self._repository.get_versioned_instances(dependency['name'],
dependency['version'])
for dependency in dependencies}
return ControlPacket.activated(instances)
def _connect_to_service(self, host, port, node_id, service_type):
if service_type == 'tcp':
if node_id not in self._service_protocols:
coroutine = self._loop.create_connection(partial(get_trellio_protocol, self), host, port)
future = asyncio.ensure_future(coroutine)
future.add_done_callback(partial(self._handle_service_connection, node_id, host, port))
elif service_type == 'http':
pass
# if not (host, port) in self._http_pingers:
# pinger = HTTPPinger(host, port, node_id, self)
# self._http_pingers[(host, port)] = pinger
# pinger.ping()
def _handle_service_connection(self, node_id, host, port, future):
transport, protocol = future.result()
self._service_protocols[node_id] = protocol
pinger = TCPPinger(host, port, node_id, protocol, self)
self._tcp_pingers[node_id] = pinger
pinger.ping()
def _notify_consumers(self, name, version, node_id):
packet = ControlPacket.deregister(name, version, node_id)
for consumer_name, consumer_version in self._repository.get_consumers(name, version):
for host, port, node, service_type in self._repository.get_instances(consumer_name, consumer_version):
protocol = self._client_protocols[node]
protocol.send(packet)
def get_service_instances(self, packet, registry_protocol):
params = packet['params']
name, version = params['name'].lower(), params['version']
instances = self._repository.get_instances(name, version)
instance_packet = ControlPacket.send_instances(name, version, packet['request_id'], instances)
registry_protocol.send(instance_packet)
def get_subscribers(self, packet, protocol):
params = packet['params']
request_id = packet['request_id']
name, version, endpoint = params['name'].lower(), params['version'], params['endpoint']
subscribers = self._repository.get_subscribers(name, version, endpoint)
packet = ControlPacket.subscribers(name, version, endpoint, request_id, subscribers)
protocol.send(packet)
def on_timeout(self, host, port, node_id):
service = self._repository.get_node(node_id)
self.logger.debug('%s timed out', service)
self.deregister_service(host, port, node_id)
def _ping(self, packet):
pinger = self._tcp_pingers[packet['node_id']]
pinger.pong_received()
def _pong(self, packet, protocol):
protocol.send(ControlPacket.pong(packet['node_id']))
def _xsubscribe(self, packet):
params = packet['params']
name, version, host, port, node_id = (
params['name'], params['version'], params['host'], params['port'], params['node_id'])
endpoints = params['events']
self._repository.xsubscribe(name, version, host, port, node_id, endpoints)
def _get_uptime_report(self, packet, protocol):
uptimes = self._repository.get_uptimes()
protocol.send(ControlPacket.uptime(uptimes))
def periodic_uptime_logger(self):
self._repository.log_uptimes()
asyncio.get_event_loop().call_later(300, self.periodic_uptime_logger)
def _handle_ping(self, packet, protocol):
""" Responds to pings from registry_client only if the node_ids present in the ping payload are registered
:param packet: The 'ping' packet received
:param protocol: The protocol on which the pong should be sent
"""
if 'payload' in packet:
is_valid_node = True
node_ids = list(packet['payload'].values())
for node_id in node_ids:
if self._repository.get_node(node_id) is None:
is_valid_node = False
break
if is_valid_node:
self._pong(packet, protocol)
else:
self._pong(packet, protocol)
async def registry_dump_handle(request):
'''
only read
:param request:
:return:
'''
registry = registry_dump_handle.registry
response_dict = {}
repo = registry._repository
response_dict['registered_services'] = repo._registered_services
response_dict['uptimes'] = repo._uptimes
response_dict['service_dependencies'] = repo._service_dependencies
return web.Response(status=400, content_type='application/json', body=json.dumps(response_dict).encode())
if __name__ == '__main__':
from setproctitle import setproctitle
setproctitle("trellio-registry")
REGISTRY_HOST = None
REGISTRY_PORT = 4500
registry = Registry(REGISTRY_HOST, REGISTRY_PORT, Repository())
registry.periodic_uptime_logger()
registry.start()
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
#
# Copyright 2014 Robert Bird
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# blar.py
# Version 0.1
# A simple program for converting log files to a streaming UTF genome and analyzing it.
# Syntax is: python blar.py -i <file_name> or python blar.py -h for help & options
import sys
import xxhash
import numpy
import fileinput
import locale
import codecs
import unicodedata
import math
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import operator
import time
import argparse
# Is odd...roughly twice as fast as mod test
def is_odd(num):
return num & 0x1
# Moving average
def moving_average(values, window):
weights = numpy.repeat(1.0, window) / window
sma = numpy.convolve(values, weights, 'valid')
return sma
# Calculate vector magnitude
def magnitude(V):
return numpy.sqrt(sum([x * x for x in numpy.nditer(V)]))
# Vector normalizer
def normalize(V):
v_m = magnitude(V)
return [numpy.divide(vi, v_m) for vi in numpy.nditer(V)]
# Feature hash input. Has the effect of quantizing inputs with graceful random collisions
def feature_hash_string(s, window, dim):
start = time.clock()
# Generate window-char Markov chains & create feature hash vector
v = {}
for x in range(0, dim):
v[x] = 0
length = len(s)
max_num = 2.0 ** 64
for x in range(0, length - window):
key = xxhash.xxh64(s[x:x + window]) % dim
v[key] += 0x1
return numpy.asarray(v.values())
# Use random projection for LSH and output a UTF char for the hash
def locality_hash_vector(v, width):
start = time.clock()
hash = numpy.zeros(width, dtype=int)
for x in range(0, width):
projection = numpy.dot(PROJECTION_VECTORS[x], v)
if projection < 0:
hash[x] = 0
else:
hash[x] = 0x1
# Return unicode char equal to the LSH
return unichr(int(''.join(map(str, hash)), 2))
# MAIN
#
# Handle encodings for STDOUT vs other
# if sys.stdout.isatty():
# default_encoding = sys.stdout.encoding
# else:
# default_encoding = locale.getpreferredencoding()
# Process command line arguments
parser = argparse.ArgumentParser()
parser.add_argument(
'-a',
action='store',
dest='a_width',
default=0,
type=int,
help='Set the nucleotide alphabet width in bits',
)
parser.add_argument(
'-f',
action='store',
dest='f_width',
default=0,
type=int,
help='Set the feature hash vector width in array length',
)
parser.add_argument(
'-v',
action='store',
dest='v_width',
default=0,
type=int,
help='Set the n-gram vectorization length for line processing',
)
parser.add_argument(
'-c',
action='store',
dest='c_width',
default=0,
type=int,
help='Set the width of codons in nucleotides',
)
parser.add_argument(
'-C',
action='store_true',
dest='C',
default=False,
help='Display charts; default off'
)
parser.add_argument(
'-m',
action='store',
dest='ma_window',
default=100,
type=int,
help='Set the window size for moving average display',
)
parser.add_argument(
'-g',
action='store',
dest='granularity',
default=2,
type=int,
help='Sets the granularity factor for auto-tuning, from 1-N'
)
parser.add_argument(
'-t',
action='store',
dest='i_threshold',
default=0.,
type=float,
help='Sets the threshold for printing interesting items in std. deviations, default 2.0'
)
parser.add_argument(
'-p',
action='store_false',
dest='hard',
required=False,
default=True,
help='Do not use hard projection vectors',
)
parser.add_argument(
'-i',
action='store',
dest='f_name',
required=True,
help='Input file name'
)
args = vars(parser.parse_args())
# File handling
f = open(args['f_name'], 'r')
# Read in the file once and build a list of line offsets for display use later
line_offset = []
offset = 0
file_length = 0
for line in f:
line_offset.append(offset)
offset += len(line)
file_length += 0x1
print '# of lines: ', file_length
f.seek(0)
# Assign argument values; autotune feature_width & alphabet_width
# based on file length re: Shannon, et al.
granularity = args['granularity']
ma_window = args['ma_window']
if args['c_width'] > 0:
codon_width = args['c_width']
else:
codon_width = 4
if args['v_width'] > 0:
vect_width = args['v_width']
else:
vect_width = 4
if args['f_width'] > 0:
feature_width = args['f_width']
else:
print 'Autotuning feature hash vector width'
feature_width = granularity * int(math.ceil(math.log(file_length,
2)))
if args['a_width'] > 0:
alphabet_width = args['a_width']
else:
print 'Autotuning alphabet width'
alphabet_width = int(math.ceil(math.sqrt(math.log(file_length, 2))))
if args['i_threshold'] != 0.:
interestingness_threshold = args['i_threshold']
else:
interestingness_threshold = 2.0
print 'Feature width: ', feature_width
print 'Alphabet width: ', alphabet_width
print 'Codon width: ', codon_width
print 'Granularity: ', granularity
# Generate random unit normal comparison vectors for random projection
PROJECTION_VECTORS = []
if args['hard']:
for vector in range(0, alphabet_width):
numpy.random.seed(vector)
v = numpy.random.randint(2, size=feature_width)
counter = 0
for e in v:
if e == 0:
v[counter] = -0x1
counter += 0x1
else:
v[counter] = 0x1
counter += 0x1
PROJECTION_VECTORS.append(v)
else:
for vector in range(0, alphabet_width):
numpy.random.seed(vector)
PROJECTION_VECTORS.append(normalize(numpy.random.randn(0x1,
feature_width)))
# CREATE GENOME
#
# Note irritating list method to guarantee unicode handling beyond
# UTF8 rather than concatenating a string which is nicer for STDOUT.
# To say I'm tempted to cap it at 8 is an understatement, it doesn't
# seem necessary for an effective tool
start = time.clock()
genome = []
for line in f:
genome.append(locality_hash_vector(feature_hash_string(line,
vect_width, feature_width), alphabet_width))
genome_len = len(genome)
print 'File genome assembly time: ', round(time.clock() - start), 's'
print 'Genome length: ', genome_len
# SCORING
#
# Find codon counts over genome; create stream of windows for scoring display
counts = {}
codon_stream = []
for x in range(0, len(genome) - codon_width + 0x1):
window = []
for shift in range(0, codon_width):
window.append(genome[x + shift])
key = ''.join(window)
if key in counts:
counts[key] += 0x1
else:
counts[key] = 0x1
codon_stream.append(key)
# Find coding cost for each codon and create scoring map.
scoring_map = {}
for key in counts.keys():
scoring_map[key] = -math.log(float(counts[key]) / (len(genome)
- codon_width + 0x1), 2)
# Normalize scores to 0..1
#
# for key in scoring_map:
# scoring_map[key] = (scoring_map[key] - min) / max
# Floor scores; use if not normalizing to reduce visual noise
min = numpy.min(scoring_map.values())
for key in scoring_map:
scoring_map[key] = scoring_map[key] - min
# Create stream of scores; Apply exponential decay to scores as symbols re-occur
codon_score_stream = []
decay = {}
score = 0
for e in codon_stream:
if e in decay:
decay[e] += 0x1
else:
decay[e] = 0x1
if decay[e] < 16:
score = 1.0 / 2 ** (decay[e] - 0x1) * scoring_map[e]
else:
score = 1.0 / 2 ** 16 * scoring_map[e]
codon_score_stream.append(score)
max_anomaly = numpy.max(codon_score_stream)
print 'Max anomaly score: ', max_anomaly, 'bits'
# Find some robust statistics to pick out interesting items for printing
# Too expensive for files of any significant length
#
# start = time.clock()
# median_score = numpy.median(codon_score_stream)
# cr_s_estimator = 0
# med_dists = []
# for e in codon_score_stream:
# e_dist = []
# for all in codon_score_stream:
# e_dist.append(e - all)
# # print e_dist
# med_dists.append(numpy.median(e_dist))
# cr_s_estimator = numpy.median(med_dists)
# print 'Median: ', median_score
# print 'CR Estimator: ', cr_s_estimator
# print 'Calc time: ', time.clock() - start
# Find some Gaussian statistics
average_score = numpy.average(codon_score_stream)
std_dev_score = numpy.std(codon_score_stream)
print 'Average score: ', average_score
print 'Std. deviation: ', std_dev_score
# PRINTING
#
# Should be replaced with some kind of auto scrolling line explorer
# where you highlight a region in the chart and it displays those lines + n extra
# from the file. It's more useful to view the lines in a viewer for scanning
#
# Scan over file and print out interesting lines. Does not use linecache to avoid memory caching file
tracker = 0
interesting = 0
for e in codon_score_stream:
if e > average_score + interestingness_threshold * std_dev_score:
f.seek(line_offset[tracker])
print tracker, ',', f.readline().rstrip(), ',', e
interesting += 0x1
tracker += 0x1
f.seek(0)
print '# of interesting things: ', interesting
f.close()
# CHARTING
#
if args['C'] == True:
# Create an LSH bucket counter for charting
buckets = {}
for e in genome:
if e in buckets:
buckets[e] += 0x1
else:
buckets[e] = 0x1
# Create moving average list for charting
streamMA = moving_average(codon_score_stream, ma_window)
# Chart stuff
fig = plt.figure()
score_plt = fig.add_subplot(211)
score_plt.plot(streamMA)
score_plt.set_ylabel('Anomaly Score')
score_plt.set_ylim(0., numpy.max(streamMA) + 0x1)
score_plt.set_xlabel('Line number')
bucket_plt = fig.add_subplot(212)
bucket_plt.bar(range(len(buckets.keys())), [math.log(float(y), 10)
for y in buckets.values()], 0x1)
bucket_plt.set_ylim(0., numpy.max([math.log(float(y), 10) for y in
buckets.values()]) + 0x1)
bucket_plt.set_xlim(0., 2 ** alphabet_width)
bucket_plt.set_xlabel('LSH Bin number')
bucket_plt.set_ylabel('Counts (log10)')
plt.show()
| |
"""
Title: Neural Style Transfer with AdaIN
Author: [Aritra Roy Gosthipaty](https://twitter.com/arig23498), [Ritwik Raha](https://twitter.com/ritwik_raha)
Date created: 2021/11/08
Last modified: 2021/11/08
Description: Neural Style Transfer with Adaptive Instance Normalization.
"""
"""
# Introduction
[Neural Style Transfer](https://www.tensorflow.org/tutorials/generative/style_transfer)
is the process of transferring the style of one image onto the content
of another. This was first introduced in the seminal paper
["A Neural Algorithm of Artistic Style"](https://arxiv.org/abs/1508.06576)
by Gatys et al. A major limitation of the technique proposed in this
work is in its runtime, as the algorithm uses a slow iterative
optimization process.
Follow-up papers that introduced
[Batch Normalization](https://arxiv.org/abs/1502.03167),
[Instance Normalization](https://arxiv.org/abs/1701.02096) and
[Conditional Instance Normalization](https://arxiv.org/abs/1610.07629)
allowed Style Transfer to be performed in new ways, no longer
requiring a slow iterative process.
Following these papers, the authors Xun Huang and Serge
Belongie propose
[Adaptive Instance Normalization](https://arxiv.org/abs/1703.06868) (AdaIN),
which allows arbitrary style transfer in real time.
In this example we implement Adapative Instance Normalization
for Neural Style Transfer. We show in the below figure the output
of our AdaIN model trained for
only **30 epochs**.

You can also try out the model with your own images with this
[Hugging Face demo](https://huggingface.co/spaces/ariG23498/nst).
"""
"""
# Setup
We begin with importing the necessary packages. We also set the
seed for reproducibility. The global variables are hyperparameters
which we can change as we like.
"""
import os
import glob
import imageio
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import tensorflow_datasets as tfds
from tensorflow.keras import layers
# Defining the global variables.
IMAGE_SIZE = (224, 224)
BATCH_SIZE = 64
# Training for single epoch for time constraint.
# Please use atleast 30 epochs to see good results.
EPOCHS = 1
AUTOTUNE = tf.data.AUTOTUNE
"""
## Style transfer sample gallery
For Neural Style Transfer we need style images and content images. In
this example we will use the
[Best Artworks of All Time](https://www.kaggle.com/ikarus777/best-artworks-of-all-time)
as our style dataset and
[Pascal VOC](https://www.tensorflow.org/datasets/catalog/voc)
as our content dataset.
This is a deviation from the original paper implementation by the
authors, where they use
[WIKI-Art](https://paperswithcode.com/dataset/wikiart) as style and
[MSCOCO](https://cocodataset.org/#home) as content datasets
respectively. We do this to create a minimal yet reproducible example.
## Downloading the dataset from Kaggle
The [Best Artworks of All Time](https://www.kaggle.com/ikarus777/best-artworks-of-all-time)
dataset is hosted on Kaggle and one can easily download it in Colab by
following these steps:
- Follow the instructions [here](https://github.com/Kaggle/kaggle-api)
in order to obtain your Kaggle API keys in case you don't have them.
- Use the following command to upload the Kaggle API keys.
```python
from google.colab import files
files.upload()
```
- Use the following commands to move the API keys to the proper
directory and download the dataset.
```shell
$ mkdir ~/.kaggle
$ cp kaggle.json ~/.kaggle/
$ chmod 600 ~/.kaggle/kaggle.json
$ kaggle datasets download ikarus777/best-artworks-of-all-time
$ unzip -qq best-artworks-of-all-time.zip
$ rm -rf images
$ mv resized artwork
$ rm best-artworks-of-all-time.zip artists.csv
```
"""
"""
## `tf.data` pipeline
In this section, we will build the `tf.data` pipeline for the project.
For the style dataset, we decode, convert and resize the images from
the folder. For the content images we are already presented with a
`tf.data` dataset as we use the `tfds` module.
After we have our style and content data pipeline ready, we zip the
two together to obtain the data pipeline that our model will consume.
"""
def decode_and_resize(image_path):
"""Decodes and resizes an image from the image file path.
Args:
image_path: The image file path.
size: The size of the image to be resized to.
Returns:
A resized image.
"""
image = tf.io.read_file(image_path)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.convert_image_dtype(image, dtype="float32")
image = tf.image.resize(image, IMAGE_SIZE)
return image
def extract_image_from_voc(element):
"""Extracts image from the PascalVOC dataset.
Args:
element: A dictionary of data.
size: The size of the image to be resized to.
Returns:
A resized image.
"""
image = element["image"]
image = tf.image.convert_image_dtype(image, dtype="float32")
image = tf.image.resize(image, IMAGE_SIZE)
return image
# Get the image file paths for the style images.
style_images = os.listdir("artwork/resized")
style_images = [os.path.join("artwork/resized", path) for path in style_images]
# split the style images in train, val and test
total_style_images = len(style_images)
train_style = style_images[: int(0.8 * total_style_images)]
val_style = style_images[int(0.8 * total_style_images) : int(0.9 * total_style_images)]
test_style = style_images[int(0.9 * total_style_images) :]
# Build the style and content tf.data datasets.
train_style_ds = (
tf.data.Dataset.from_tensor_slices(train_style)
.map(decode_and_resize, num_parallel_calls=AUTOTUNE)
.repeat()
)
train_content_ds = tfds.load("voc", split="train").map(extract_image_from_voc).repeat()
val_style_ds = (
tf.data.Dataset.from_tensor_slices(val_style)
.map(decode_and_resize, num_parallel_calls=AUTOTUNE)
.repeat()
)
val_content_ds = (
tfds.load("voc", split="validation").map(extract_image_from_voc).repeat()
)
test_style_ds = (
tf.data.Dataset.from_tensor_slices(test_style)
.map(decode_and_resize, num_parallel_calls=AUTOTUNE)
.repeat()
)
test_content_ds = (
tfds.load("voc", split="test")
.map(extract_image_from_voc, num_parallel_calls=AUTOTUNE)
.repeat()
)
# Zipping the style and content datasets.
train_ds = (
tf.data.Dataset.zip((train_style_ds, train_content_ds))
.shuffle(BATCH_SIZE * 2)
.batch(BATCH_SIZE)
.prefetch(AUTOTUNE)
)
val_ds = (
tf.data.Dataset.zip((val_style_ds, val_content_ds))
.shuffle(BATCH_SIZE * 2)
.batch(BATCH_SIZE)
.prefetch(AUTOTUNE)
)
test_ds = (
tf.data.Dataset.zip((test_style_ds, test_content_ds))
.shuffle(BATCH_SIZE * 2)
.batch(BATCH_SIZE)
.prefetch(AUTOTUNE)
)
"""
## Visualizing the data
It is always better to visualize the data before training. To ensure
the correctness of our preprocessing pipeline, we visualize 10 samples
from our dataset.
"""
style, content = next(iter(train_ds))
fig, axes = plt.subplots(nrows=10, ncols=2, figsize=(5, 30))
[ax.axis("off") for ax in np.ravel(axes)]
for (axis, style_image, content_image) in zip(axes, style[0:10], content[0:10]):
(ax_style, ax_content) = axis
ax_style.imshow(style_image)
ax_style.set_title("Style Image")
ax_content.imshow(content_image)
ax_content.set_title("Content Image")
"""
## Architecture
The style transfer network takes a content image and a style image as
inputs and outputs the style transfered image. The authors of AdaIN
propose a simple encoder-decoder structure for achieving this.

The content image (`C`) and the style image (`S`) are both fed to the
encoder networks. The output from these encoder networks (feature maps)
are then fed to the AdaIN layer. The AdaIN layer computes a combined
feature map. This feature map is then fed into a randomly initialized
decoder network that serves as the generator for the neural style
transfered image.

The style feature map (`fs`) and the content feature map (`fc`) are
fed to the AdaIN layer. This layer produced the combined feature map
`t`. The function `g` represents the decoder (generator) network.
"""
"""
### Encoder
The encoder is a part of the pretrained (pretrained on
[imagenet](https://www.image-net.org/)) VGG19 model. We slice the
model from the `block4-conv1` layer. The output layer is as suggested
by the authors in their paper.
"""
def get_encoder():
vgg19 = keras.applications.VGG19(
include_top=False, weights="imagenet", input_shape=(*IMAGE_SIZE, 3),
)
vgg19.trainable = False
mini_vgg19 = keras.Model(vgg19.input, vgg19.get_layer("block4_conv1").output)
inputs = layers.Input([*IMAGE_SIZE, 3])
mini_vgg19_out = mini_vgg19(inputs)
return keras.Model(inputs, mini_vgg19_out, name="mini_vgg19")
"""
### Adaptive Instance Normalization
The AdaIN layer takes in the features
of the content and style image. The layer can be defined via the
following equation:

where `sigma` is the standard deviation and `mu` is the mean for the
concerned variable. In the above equation the mean and variance of the
content feature map `fc` is aligned with the mean and variance of the
style feature maps `fs`.
It is important to note that the AdaIN layer proposed by the authors
uses no other parameters apart from mean and variance. The layer also
does not have any trainable parameters. This is why we use a
*Python function* instead of using a *Keras layer*. The function takes
style and content feature maps, computes the mean and standard deviation
of the images and returns the adaptive instance normalized feature map.
"""
def get_mean_std(x, epsilon=1e-5):
axes = [1, 2]
# Compute the mean and standard deviation of a tensor.
mean, variance = tf.nn.moments(x, axes=axes, keepdims=True)
standard_deviation = tf.sqrt(variance + epsilon)
return mean, standard_deviation
def ada_in(style, content):
"""Computes the AdaIn feature map.
Args:
style: The style feature map.
content: The content feature map.
Returns:
The AdaIN feature map.
"""
content_mean, content_std = get_mean_std(content)
style_mean, style_std = get_mean_std(style)
t = style_std * (content - content_mean) / content_std + style_mean
return t
"""
### Decoder
The authors specify that the decoder network must mirror the encoder
network. We have symmetrically inverted the encoder to build our
decoder. We have used `UpSampling2D` layers to increase the spatial
resolution of the feature maps.
Note that the authors warn against using any normalization layer
in the decoder network, and do indeed go on to show that including
batch normalization or instance normalization hurts the performance
of the overall network.
This is the only portion of the entire architecture that is trainable.
"""
def get_decoder():
config = {"kernel_size": 3, "strides": 1, "padding": "same", "activation": "relu"}
decoder = keras.Sequential(
[
layers.InputLayer((None, None, 512)),
layers.Conv2D(filters=512, **config),
layers.UpSampling2D(),
layers.Conv2D(filters=256, **config),
layers.Conv2D(filters=256, **config),
layers.Conv2D(filters=256, **config),
layers.Conv2D(filters=256, **config),
layers.UpSampling2D(),
layers.Conv2D(filters=128, **config),
layers.Conv2D(filters=128, **config),
layers.UpSampling2D(),
layers.Conv2D(filters=64, **config),
layers.Conv2D(
filters=3,
kernel_size=3,
strides=1,
padding="same",
activation="sigmoid",
),
]
)
return decoder
"""
### Loss functions
Here we build the loss functions for the neural style transfer model.
The authors propose to use a pretrained VGG-19 to compute the loss
function of the network. It is important to keep in mind that this
will be used for training only the decoder netwrok. The total
loss (`Lt`) is a weighted combination of content loss (`Lc`) and style
loss (`Ls`). The `lambda` term is used to vary the amount of style
transfered.

### Content Loss
This is the Euclidean distance between the content image features
and the features of the neural style transferred image.

Here the authors propose to use the output from the AdaIn layer `t` as
the content target rather than using features of the original image as
target. This is done to speed up convergence.
### Style Loss
Rather than using the more commonly used
[Gram Matrix](https://mathworld.wolfram.com/GramMatrix.html),
the authors propose to compute the difference between the statistical features
(mean and variance) which makes it conceptually cleaner. This can be
easily visualized via the following equation:

where `theta` denotes the layers in VGG-19 used to compute the loss.
In this case this corresponds to:
- `block1_conv1`
- `block1_conv2`
- `block1_conv3`
- `block1_conv4`
"""
def get_loss_net():
vgg19 = keras.applications.VGG19(
include_top=False, weights="imagenet", input_shape=(*IMAGE_SIZE, 3)
)
vgg19.trainable = False
layer_names = ["block1_conv1", "block2_conv1", "block3_conv1", "block4_conv1"]
outputs = [vgg19.get_layer(name).output for name in layer_names]
mini_vgg19 = keras.Model(vgg19.input, outputs)
inputs = layers.Input([*IMAGE_SIZE, 3])
mini_vgg19_out = mini_vgg19(inputs)
return keras.Model(inputs, mini_vgg19_out, name="loss_net")
"""
## Neural Style Transfer
This is the trainer module. We wrap the encoder and decoder inside of
a `tf.keras.Model` subclass. This allows us to customize what happens
in the `model.fit()` loop.
"""
class NeuralStyleTransfer(tf.keras.Model):
def __init__(self, encoder, decoder, loss_net, style_weight, **kwargs):
super().__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
self.loss_net = loss_net
self.style_weight = style_weight
def compile(self, optimizer, loss_fn):
super().compile()
self.optimizer = optimizer
self.loss_fn = loss_fn
self.style_loss_tracker = keras.metrics.Mean(name="style_loss")
self.content_loss_tracker = keras.metrics.Mean(name="content_loss")
self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
def train_step(self, inputs):
style, content = inputs
# Initialize the content and style loss.
loss_content = 0.0
loss_style = 0.0
with tf.GradientTape() as tape:
# Encode the style and content image.
style_encoded = self.encoder(style)
content_encoded = self.encoder(content)
# Compute the AdaIN target feature maps.
t = ada_in(style=style_encoded, content=content_encoded)
# Generate the neural style transferred image.
reconstructed_image = self.decoder(t)
# Compute the losses.
reconstructed_vgg_features = self.loss_net(reconstructed_image)
style_vgg_features = self.loss_net(style)
loss_content = self.loss_fn(t, reconstructed_vgg_features[-1])
for inp, out in zip(style_vgg_features, reconstructed_vgg_features):
mean_inp, std_inp = get_mean_std(inp)
mean_out, std_out = get_mean_std(out)
loss_style += self.loss_fn(mean_inp, mean_out) + self.loss_fn(
std_inp, std_out
)
loss_style = self.style_weight * loss_style
total_loss = loss_content + loss_style
# Compute gradients and optimize the decoder.
trainable_vars = self.decoder.trainable_variables
gradients = tape.gradient(total_loss, trainable_vars)
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update the trackers.
self.style_loss_tracker.update_state(loss_style)
self.content_loss_tracker.update_state(loss_content)
self.total_loss_tracker.update_state(total_loss)
return {
"style_loss": self.style_loss_tracker.result(),
"content_loss": self.content_loss_tracker.result(),
"total_loss": self.total_loss_tracker.result(),
}
def test_step(self, inputs):
style, content = inputs
# Initialize the content and style loss.
loss_content = 0.0
loss_style = 0.0
# Encode the style and content image.
style_encoded = self.encoder(style)
content_encoded = self.encoder(content)
# Compute the AdaIN target feature maps.
t = ada_in(style=style_encoded, content=content_encoded)
# Generate the neural style transferred image.
reconstructed_image = self.decoder(t)
# Compute the losses.
recons_vgg_features = self.loss_net(reconstructed_image)
style_vgg_features = self.loss_net(style)
loss_content = self.loss_fn(t, recons_vgg_features[-1])
for inp, out in zip(style_vgg_features, recons_vgg_features):
mean_inp, std_inp = get_mean_std(inp)
mean_out, std_out = get_mean_std(out)
loss_style += self.loss_fn(mean_inp, mean_out) + self.loss_fn(
std_inp, std_out
)
loss_style = self.style_weight * loss_style
total_loss = loss_content + loss_style
# Update the trackers.
self.style_loss_tracker.update_state(loss_style)
self.content_loss_tracker.update_state(loss_content)
self.total_loss_tracker.update_state(total_loss)
return {
"style_loss": self.style_loss_tracker.result(),
"content_loss": self.content_loss_tracker.result(),
"total_loss": self.total_loss_tracker.result(),
}
@property
def metrics(self):
return [
self.style_loss_tracker,
self.content_loss_tracker,
self.total_loss_tracker,
]
"""
## Train Monitor callback
This callback is used to visualize the style transfer output of
the model at the end of each epoch. The objective of style transfer cannot be
quantified properly, and is to be subjectively evaluated by an audience.
For this reason, visualization is a key aspect of evaluating the model.
"""
test_style, test_content = next(iter(test_ds))
class TrainMonitor(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
# Encode the style and content image.
test_style_encoded = self.model.encoder(test_style)
test_content_encoded = self.model.encoder(test_content)
# Compute the AdaIN features.
test_t = ada_in(style=test_style_encoded, content=test_content_encoded)
test_reconstructed_image = self.model.decoder(test_t)
# Plot the Style, Content and the NST image.
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(20, 5))
ax[0].imshow(tf.keras.preprocessing.image.array_to_img(test_style[0]))
ax[0].set_title(f"Style: {epoch:03d}")
ax[1].imshow(tf.keras.preprocessing.image.array_to_img(test_content[0]))
ax[1].set_title(f"Content: {epoch:03d}")
ax[2].imshow(
tf.keras.preprocessing.image.array_to_img(test_reconstructed_image[0])
)
ax[2].set_title(f"NST: {epoch:03d}")
plt.show()
plt.close()
"""
## Train the model
In this section, we define the optimizer, the loss funtion, and the
trainer module. We compile the trainer module with the optimizer and
the loss function and then train it.
*Note*: We train the model for a single epoch for time constranints,
but we will need to train is for atleast 30 epochs to see good results.
"""
optimizer = keras.optimizers.Adam(learning_rate=1e-5)
loss_fn = keras.losses.MeanSquaredError()
encoder = get_encoder()
loss_net = get_loss_net()
decoder = get_decoder()
model = NeuralStyleTransfer(
encoder=encoder, decoder=decoder, loss_net=loss_net, style_weight=4.0
)
model.compile(optimizer=optimizer, loss_fn=loss_fn)
history = model.fit(
train_ds,
epochs=EPOCHS,
steps_per_epoch=50,
validation_data=val_ds,
validation_steps=50,
callbacks=[TrainMonitor()],
)
"""
## Inference
After we train the model, we now need to run inference with it. We will
pass arbitrary content and style images from the test dataset and take a look at
the output images.
*NOTE*: To try out the model on your own images, you can use this
[Hugging Face demo](https://huggingface.co/spaces/ariG23498/nst).
"""
for style, content in test_ds.take(1):
style_encoded = model.encoder(style)
content_encoded = model.encoder(content)
t = ada_in(style=style_encoded, content=content_encoded)
reconstructed_image = model.decoder(t)
fig, axes = plt.subplots(nrows=10, ncols=3, figsize=(10, 30))
[ax.axis("off") for ax in np.ravel(axes)]
for axis, style_image, content_image, reconstructed_image in zip(
axes, style[0:10], content[0:10], reconstructed_image[0:10]
):
(ax_style, ax_content, ax_reconstructed) = axis
ax_style.imshow(style_image)
ax_style.set_title("Style Image")
ax_content.imshow(content_image)
ax_content.set_title("Content Image")
ax_reconstructed.imshow(reconstructed_image)
ax_reconstructed.set_title("NST Image")
"""
## Conclusion
Adaptive Instance Normalization allows arbitrary style transfer in
real time. It is also important to note that the novel proposition of
the authors is to achieve this only by aligning the statistical
features (mean and standard deviation) of the style and the content
images.
*Note*: AdaIN also serves as the base for
[Style-GANs](https://arxiv.org/abs/1812.04948).
## Reference
- [TF implementation](https://github.com/ftokarev/tf-adain)
## Acknowledgement
We thank [Luke Wood](https://lukewood.xyz) for his
detailed review.
"""
| |
# -*- coding: utf-8 -*-
"""
This scripts reqire a third party module 'requests'.
You can get it from PyPI, i.e. you can install it using
easy_install or pip.
http://docs.python-requests.org/en/v0.10.4/
Original source code is written by shin1ogawa, which is in Java.
https://gist.github.com/1899391
Materials for this session are avaliable in following URLs:
- Hands on material: http://goo.gl/oAhzI
- Google APIs Console: https://code.google.com/apis/console/
- Google APIs Explorer: http://code.google.com/apis/explorer/
- OAuth 2.0 Playground: https://code.google.com/oauthplayground/
"""
__author__ = "@ymotongpoo"
from urllib import urlencode
import json
from subprocess import Popen
import time, sha, jwt, hashlib, requests, OpenSSL
from oauth2client import anyjson
client_email = "222573514309@developer.gserviceaccount.com"
client_id='222573514309.apps.googleusercontent.com'
client_secret='r4BerSFPl7p6bHr2uYK4MHik'
user_agent='gcal_tests/v01'
developerKey='AIzaSyB101MP8UXS7I8jIgJ0IYEDhr3arua5mB0'
api_key = developerKey
iat = int("{0}".format(time.time())[:10])
exp = iat + 3600
#redirect_uri = "urn:localhost" #"urn:ietf:wg:oauth:2.0:oob"
#redirect_uri = "urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob"
redirect_uri = r"http://localhost:8000"
base_url = r"https://accounts.google.com/o/oauth2/"
authorization_code = ""
access_token = ""
"""
Retrieving authorization_code from authorization API.
"""
def retrieve_authorization_code():
authorization_code_req = {
"response_type": "code",
"client_id": client_id,
"redirect_uri": redirect_uri,
"scope": (r"https://www.googleapis.com/auth/userinfo.profile" +
r" https://www.googleapis.com/auth/userinfo.email" +
r" https://www.googleapis.com/auth/calendar")
}
r = requests.get(base_url + "auth?%s" % urlencode(authorization_code_req), allow_redirects=False)
print "Request Gotten"
url = r.headers.get('location')
print url
Popen(["open", url])
authorization_code = raw_input("\nAuthorization Code >>> ")
return authorization_code
"""
Retrieving access_token and refresh_token from Token API.
"""
def retrieve_tokens(authorization_code):
access_token_req = {
"code" : authorization_code,
"client_id" : client_id,
"client_secret" : client_secret,
"redirect_uri" : redirect_uri,
"grant_type": "authorization_code",
}
content_length=len(urlencode(access_token_req))
access_token_req['content-length'] = str(content_length)
r = requests.post(base_url + "token", data=access_token_req)
data = json.loads(r.text)
return data
"""
Sample code of fetching user information from userinfo API.
"""
def get_userinfo():
global authorization_code
authorization_code = retrieve_authorization_code()
tokens = retrieve_tokens(authorization_code)
access_token = tokens['access_token']
authorization_header = {"Authorization": "OAuth %s" % access_token}
r = requests.get("https://www.googleapis.com/oauth2/v2/userinfo", headers=authorization_header)
print r.text
def get_calendar_list():
global authorization_code
global access_token
authorization_code = retrieve_authorization_code()
tokens = retrieve_tokens(authorization_code)
access_token = tokens['access_token']
authorization_header = {"Authorization": "OAuth %s" % access_token}
r = requests.get("https://www.googleapis.com/calendar/v3/users/me/calendarList", headers=authorization_header)
return r.text
def _get_start_end_time(event):
try:
if event['start'].has_key('date'):
start = event['start']['date']
elif event['start'].has_key('dateTime'):
start = event['start']['dateTime']
else:
start = 'N/A'
if event['end'].has_key('date'):
end = event['end']['date']
elif event['end'].has_key('dateTime'):
end = event['end']['dateTime']
else:
end = 'N/A'
return start, end
except:
return event['etag'], event['status']
def get_events_list():
global authorization_code
global access_token
data = json.loads(get_calendar_list())
for calendar in data['items']:
calendar_id = calendar['id']
print calendar['summary']
if authorization_code == "" or access_token == "":
authorization_code = retrieve_authorization_code()
tokens = retrieve_tokens(authorization_code)
access_token = tokens['access_token']
authorization_header = {"Authorization": "OAuth %s" % access_token}
url = ("https://www.googleapis.com/calendar/v3/calendars/%s/events?key=%s" % ((calendar_id), (api_key)))
r = requests.get(url, headers=authorization_header)
events = json.loads(r.text)
try:
for event in events['items']:
print event.get('summary', '(Event title not set)')
if event['status'] != 'cancelled':
start, end = _get_start_end_time(event)
print " start : ", start, " end : ", end
except:
pass
return events
def main():
get_events_list()
if __name__ == '__main__':
main()
import os.path
here = os.path.dirname(os.path.realpath(os.path.expanduser('~')))
storage_file = os.path.join(here, 'calendar.dat')
import gflags
import httplib2
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications
# The client_id and client_secret are copied from the API Access tab on
# the Google APIs Console
FLOW = OAuth2WebServerFlow(
client_id=client_id,
client_secret=client_secret,
scope='https://www.googleapis.com/auth/calendar',
user_agent=user_agent)
# To disable the local server feature, uncomment the following line:
FLAGS.auth_local_webserver = False
# If the Credentials don't exist or are invalid, run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage(storage_file)
credentials = storage.get()
if credentials is None or credentials.invalid == True:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
# Build a service object for interacting with the API. Visit
# the Google APIs Console
# to get a developerKey for your own application.
service = build(serviceName='calendar', version='v3', http=http,
developerKey=developerKey)
| |
#!/usr/bin/python
# interpolate scalar gradient onto nedelec space
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
from dolfin import *
Print = PETSc.Sys.Print
# from MatrixOperations import *
import numpy as np
#import matplotlib.pylab as plt
import PETScIO as IO
import common
import scipy
import scipy.io
import time
import BiLinear as forms
import IterOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import ExactSol
import Solver as S
import MHDmatrixPrecondSetup as PrecondSetup
import NSprecondSetup
import MHDprec as MHDpreconditioner
import memory_profiler
import gc
import MHDmulti
import MHDmatrixSetup as MHDsetup
#@profile
def foo():
m = 7
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
NSave = np.zeros((m-1,1))
Mave = np.zeros((m-1,1))
TotalTime = np.zeros((m-1,1))
nn = 2
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
MU[0]= 1e0
for xx in xrange(1,m):
print xx
level[xx-1] = xx+ 0
nn = 2**(level[xx-1])
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
# parameters["form_compiler"]["quadrature_degree"] = 6
# parameters = CP.ParameterSetup()
mesh = UnitCubeMesh(nn,nn,nn)
order = 1
parameters['reorder_dofs_serial'] = False
Velocity = VectorFunctionSpace(mesh, "CG", order)
Pressure = FunctionSpace(mesh, "DG", order-1)
Magnetic = FunctionSpace(mesh, "N1curl", order)
Lagrange = FunctionSpace(mesh, "CG", order)
W = MixedFunctionSpace([Velocity, Pressure, Magnetic,Lagrange])
# W = Velocity*Pressure*Magnetic*Lagrange
Velocitydim[xx-1] = Velocity.dim()
Pressuredim[xx-1] = Pressure.dim()
Magneticdim[xx-1] = Magnetic.dim()
Lagrangedim[xx-1] = Lagrange.dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(), Lagrange.dim()]
def boundary(x, on_boundary):
return on_boundary
u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD3D(4,1)
bcu = DirichletBC(W.sub(0),u0, boundary)
bcb = DirichletBC(W.sub(2),b0, boundary)
bcr = DirichletBC(W.sub(3),r0, boundary)
# bc = [u0,p0,b0,r0]
bcs = [bcu,bcb,bcr]
FSpaces = [Velocity,Pressure,Magnetic,Lagrange]
(u, b, p, r) = TrialFunctions(W)
(v, c, q, s) = TestFunctions(W)
kappa = 1.0
Mu_m =1e1
MU = 1.0/1
IterType = 'Full'
Split = "Yes"
Saddle = "No"
Stokes = "No"
F_NS = -MU*Laplacian+Advection+gradPres-kappa*NS_Couple
if kappa == 0:
F_M = Mu_m*CurlCurl+gradR -kappa*M_Couple
else:
F_M = Mu_m*kappa*CurlCurl+gradR -kappa*M_Couple
params = [kappa,Mu_m,MU]
# MO.PrintStr("Preconditioning MHD setup",5,"+","\n\n","\n\n")
Hiptmairtol = 1e-5
HiptmairMatrices = PrecondSetup.MagneticSetup(Magnetic, Lagrange, b0, r0, Hiptmairtol, params)
MO.PrintStr("Setting up MHD initial guess",5,"+","\n\n","\n\n")
u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,HiptmairMatrices,1e-6,Neumann=Expression(("0","0")),options ="New", FS = "DG")
#plot(p_k, interactive = True)
b_t = TrialFunction(Velocity)
c_t = TestFunction(Velocity)
#print assemble(inner(b,c)*dx).array().shape
#print mat
#ShiftedMass = assemble(inner(mat*b,c)*dx)
#as_vector([inner(b,c)[0]*b_k[0],inner(b,c)[1]*(-b_k[1])])
ones = Function(Pressure)
ones.vector()[:]=(0*ones.vector().array()+1)
# pConst = - assemble(p_k*dx)/assemble(ones*dx)
p_k.vector()[:] += - assemble(p_k*dx)/assemble(ones*dx)
x = Iter.u_prev(u_k,p_k,b_k,r_k)
KSPlinearfluids, MatrixLinearFluids = PrecondSetup.FluidLinearSetup(Pressure, MU)
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
#plot(b_k)
ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, FSpaces,F_M,F_NS, u_k,b_k,params,IterType,"DG",Saddle,Stokes)
RHSform = forms.PicardRHS(mesh, FSpaces, u_k, p_k, b_k, r_k, params,"DG",Saddle,Stokes)
bcu = DirichletBC(Velocity,Expression(("0.0","0.0","0.0")), boundary)
bcb = DirichletBC(Magnetic,Expression(("0.0","0.0","0.0")), boundary)
bcr = DirichletBC(Lagrange,Expression(("0.0")), boundary)
bcs = [bcu,bcb,bcr]
parameters['linear_algebra_backend'] = 'uBLAS'
SetupType = 'multi-class'
BC = MHDsetup.BoundaryIndices(mesh)
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-4 # tolerance
iter = 0 # iteration counter
maxiter = 40 # max no of iterations allowed
SolutionTime = 0
outer = 0
# parameters['linear_algebra_backend'] = 'uBLAS'
# FSpaces = [Velocity,Magnetic,Pressure,Lagrange]
if IterType == "CD":
MO.PrintStr("Setting up PETSc "+SetupType,2,"=","\n","\n")
Alin = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "Linear",IterType)
Fnlin,b = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "NonLinear",IterType)
A = Fnlin+Alin
A,b = MHDsetup.SystemAssemble(FSpaces,A,b,SetupType,IterType)
u = b.duplicate()
u_is = PETSc.IS().createGeneral(range(Velocity.dim()))
NS_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim()))
M_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim(),W.dim()))
OuterTol = 1e-5
InnerTol = 1e-5
NSits =0
Mits =0
TotalStart =time.time()
SolutionTime = 0
while eps > tol and iter < maxiter:
iter += 1
MO.PrintStr("Iter "+str(iter),7,"=","\n\n","\n\n")
AssembleTime = time.time()
if IterType == "CD":
MO.StrTimePrint("MHD CD RHS assemble, time: ", time.time()-AssembleTime)
b = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "CD",IterType)
else:
MO.PrintStr("Setting up PETSc "+SetupType,2,"=","\n","\n")
if iter == 1:
Alin = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "Linear",IterType)
Fnlin,b = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "NonLinear",IterType)
A = Fnlin+Alin
A,b = MHDsetup.SystemAssemble(FSpaces,A,b,SetupType,IterType)
u = b.duplicate()
else:
Fnline,b = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "NonLinear",IterType)
A = Fnlin+Alin
A,b = MHDsetup.SystemAssemble(FSpaces,A,b,SetupType,IterType)
# AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
# A,b = CP.Assemble(AA,bb)
# if iter == 1:
MO.StrTimePrint("MHD total assemble, time: ", time.time()-AssembleTime)
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
print "Inititial guess norm: ", u.norm()
u = b.duplicate()
#A,Q
if IterType == 'Full':
n = FacetNormal(mesh)
mat = as_matrix([[b_k[2]*b_k[2]+b[1]*b[1],-b_k[1]*b_k[0],-b_k[0]*b_k[2]],
[-b_k[1]*b_k[0],b_k[0]*b_k[0]+b_k[2]*b_k[2],-b_k[2]*b_k[1]],
[-b_k[0]*b_k[2],-b_k[1]*b_k[2],b_k[0]*b_k[0]+b_k[1]*b_k[1]]])
F = CP.Scipy2PETSc(Fnlin[0])
a = params[2]*inner(grad(b_t), grad(c_t))*dx(W.mesh()) + inner((grad(b_t)*u_k),c_t)*dx(W.mesh()) +(1/2)*div(u_k)*inner(c_t,b_t)*dx(W.mesh()) - (1/2)*inner(u_k,n)*inner(c_t,b_t)*ds(W.mesh())+kappa/Mu_m*inner(mat*b_t,c_t)*dx(W.mesh())
ShiftedMass = assemble(a)
bcu.apply(ShiftedMass)
kspF = NSprecondSetup.LSCKSPnonlinear(F)
else:
F = CP.Scipy2PETSc(Fnlin[0])
kspF = NSprecondSetup.LSCKSPnonlinear(F)
stime = time.time()
u, mits,nsits = S.solve(A,b,u,params,W,'Directclass',IterType,OuterTol,InnerTol,HiptmairMatrices,Hiptmairtol,KSPlinearfluids, Fp,kspF)
Soltime = time.time()- stime
MO.StrTimePrint("MHD solve, time: ", Soltime)
Mits += mits
NSits += nsits
SolutionTime += Soltime
u1, p1, b1, r1, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter)
p1.vector()[:] += - assemble(p1*dx)/assemble(ones*dx)
u_k.assign(u1)
p_k.assign(p1)
b_k.assign(b1)
r_k.assign(r1)
uOld= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
x = IO.arrayToVec(uOld)
XX= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
SolTime[xx-1] = SolutionTime/iter
NSave[xx-1] = (float(NSits)/iter)
Mave[xx-1] = (float(Mits)/iter)
iterations[xx-1] = iter
TotalTime[xx-1] = time.time() - TotalStart
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(),Lagrange.dim()]
#
# ExactSolution = [u0,p0,b0,r0]
# errL2u[xx-1], errH1u[xx-1], errL2p[xx-1], errL2b[xx-1], errCurlb[xx-1], errL2r[xx-1], errH1r[xx-1] = Iter.Errors(XX,mesh,FSpaces,ExactSolution,order,dim, "DG")
#
# if xx > 1:
# l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1]))
# H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1]))
#
# l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1]))
#
# l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1]))
# Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1]))
#
# l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1]))
# H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1]))
#
#
#
#
# import pandas as pd
#
#
#
# LatexTitles = ["l","DoFu","Dofp","V-L2","L2-order","V-H1","H1-order","P-L2","PL2-order"]
# LatexValues = np.concatenate((level,Velocitydim,Pressuredim,errL2u,l2uorder,errH1u,H1uorder,errL2p,l2porder), axis=1)
# LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
# pd.set_option('precision',3)
# LatexTable = MO.PandasFormat(LatexTable,"V-L2","%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,'V-H1',"%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,"H1-order","%1.2f")
# LatexTable = MO.PandasFormat(LatexTable,'L2-order',"%1.2f")
# LatexTable = MO.PandasFormat(LatexTable,"P-L2","%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,'PL2-order',"%1.2f")
# print LatexTable
#
#
# print "\n\n Magnetic convergence"
# MagneticTitles = ["l","B DoF","R DoF","B-L2","L2-order","B-Curl","HCurl-order"]
# MagneticValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2b,l2border,errCurlb,Curlborder),axis=1)
# MagneticTable= pd.DataFrame(MagneticValues, columns = MagneticTitles)
# pd.set_option('precision',3)
# MagneticTable = MO.PandasFormat(MagneticTable,"B-Curl","%2.4e")
# MagneticTable = MO.PandasFormat(MagneticTable,'B-L2',"%2.4e")
# MagneticTable = MO.PandasFormat(MagneticTable,"L2-order","%1.2f")
# MagneticTable = MO.PandasFormat(MagneticTable,'HCurl-order',"%1.2f")
# print MagneticTable
#
import pandas as pd
print "\n\n Iteration table"
if IterType == "Full":
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av Outer its","Av Inner its",]
else:
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av NS iters","Av M iters"]
IterValues = np.concatenate((level,Wdim,SolTime,TotalTime,iterations,Mave,NSave),axis=1)
IterTable= pd.DataFrame(IterValues, columns = IterTitles)
if IterType == "Full":
IterTable = MO.PandasFormat(IterTable,'Av Outer its',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av Inner its',"%2.1f")
else:
IterTable = MO.PandasFormat(IterTable,'Av NS iters',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av M iters',"%2.1f")
print IterTable.to_latex()
print " \n Outer Tol: ",OuterTol, "Inner Tol: ", InnerTol
IterTable.to_latex('3d.tex')
# # # if (ShowResultPlots == 'yes'):
# plot(u_k)
# plot(interpolate(u0,Velocity))
#
# plot(p_k)
#
# plot(interpolate(p0,Pressure))
#
# plot(b_k)
# plot(interpolate(b0,Magnetic))
#
# plot(r_k)
# plot(interpolate(r0,Lagrange))
#
# interactive()
interactive()
foo()
| |
import asyncio
import pytest
from aiohttp import web
from aiohttp.web_urldispatcher import UrlDispatcher
@pytest.fixture
def router():
return UrlDispatcher()
def test_get(router):
@asyncio.coroutine
def handler(request):
pass
router.add_routes([web.get('/', handler)])
assert len(router.routes()) == 2 # GET and HEAD
route = list(router.routes())[1]
assert route.handler is handler
assert route.method == 'GET'
assert str(route.url_for()) == '/'
route2 = list(router.routes())[0]
assert route2.handler is handler
assert route2.method == 'HEAD'
def test_head(router):
@asyncio.coroutine
def handler(request):
pass
router.add_routes([web.head('/', handler)])
assert len(router.routes()) == 1
route = list(router.routes())[0]
assert route.handler is handler
assert route.method == 'HEAD'
assert str(route.url_for()) == '/'
def test_post(router):
@asyncio.coroutine
def handler(request):
pass
router.add_routes([web.post('/', handler)])
route = list(router.routes())[0]
assert route.handler is handler
assert route.method == 'POST'
assert str(route.url_for()) == '/'
def test_put(router):
@asyncio.coroutine
def handler(request):
pass
router.add_routes([web.put('/', handler)])
assert len(router.routes()) == 1
route = list(router.routes())[0]
assert route.handler is handler
assert route.method == 'PUT'
assert str(route.url_for()) == '/'
def test_patch(router):
@asyncio.coroutine
def handler(request):
pass
router.add_routes([web.patch('/', handler)])
assert len(router.routes()) == 1
route = list(router.routes())[0]
assert route.handler is handler
assert route.method == 'PATCH'
assert str(route.url_for()) == '/'
def test_delete(router):
@asyncio.coroutine
def handler(request):
pass
router.add_routes([web.delete('/', handler)])
assert len(router.routes()) == 1
route = list(router.routes())[0]
assert route.handler is handler
assert route.method == 'DELETE'
assert str(route.url_for()) == '/'
def test_route(router):
@asyncio.coroutine
def handler(request):
pass
router.add_routes([web.route('OTHER', '/', handler)])
assert len(router.routes()) == 1
route = list(router.routes())[0]
assert route.handler is handler
assert route.method == 'OTHER'
assert str(route.url_for()) == '/'
def test_head_deco(router):
routes = web.RouteTableDef()
@routes.head('/path')
@asyncio.coroutine
def handler(request):
pass
router.add_routes(routes)
assert len(router.routes()) == 1
route = list(router.routes())[0]
assert route.method == 'HEAD'
assert str(route.url_for()) == '/path'
def test_get_deco(router):
routes = web.RouteTableDef()
@routes.get('/path')
@asyncio.coroutine
def handler(request):
pass
router.add_routes(routes)
assert len(router.routes()) == 2
route1 = list(router.routes())[0]
assert route1.method == 'HEAD'
assert str(route1.url_for()) == '/path'
route2 = list(router.routes())[1]
assert route2.method == 'GET'
assert str(route2.url_for()) == '/path'
def test_post_deco(router):
routes = web.RouteTableDef()
@routes.post('/path')
@asyncio.coroutine
def handler(request):
pass
router.add_routes(routes)
assert len(router.routes()) == 1
route = list(router.routes())[0]
assert route.method == 'POST'
assert str(route.url_for()) == '/path'
def test_put_deco(router):
routes = web.RouteTableDef()
@routes.put('/path')
@asyncio.coroutine
def handler(request):
pass
router.add_routes(routes)
assert len(router.routes()) == 1
route = list(router.routes())[0]
assert route.method == 'PUT'
assert str(route.url_for()) == '/path'
def test_patch_deco(router):
routes = web.RouteTableDef()
@routes.patch('/path')
@asyncio.coroutine
def handler(request):
pass
router.add_routes(routes)
assert len(router.routes()) == 1
route = list(router.routes())[0]
assert route.method == 'PATCH'
assert str(route.url_for()) == '/path'
def test_delete_deco(router):
routes = web.RouteTableDef()
@routes.delete('/path')
@asyncio.coroutine
def handler(request):
pass
router.add_routes(routes)
assert len(router.routes()) == 1
route = list(router.routes())[0]
assert route.method == 'DELETE'
assert str(route.url_for()) == '/path'
def test_route_deco(router):
routes = web.RouteTableDef()
@routes.route('OTHER', '/path')
@asyncio.coroutine
def handler(request):
pass
router.add_routes(routes)
assert len(router.routes()) == 1
route = list(router.routes())[0]
assert route.method == 'OTHER'
assert str(route.url_for()) == '/path'
def test_routedef_sequence_protocol():
routes = web.RouteTableDef()
@routes.delete('/path')
@asyncio.coroutine
def handler(request):
pass
assert len(routes) == 1
info = routes[0]
assert isinstance(info, web.RouteDef)
assert info in routes
assert list(routes)[0] is info
def test_repr_route_def():
routes = web.RouteTableDef()
@routes.get('/path')
@asyncio.coroutine
def handler(request):
pass
rd = routes[0]
assert repr(rd) == "<RouteDef GET /path -> 'handler'>"
def test_repr_route_def_with_extra_info():
routes = web.RouteTableDef()
@routes.get('/path', extra='info')
@asyncio.coroutine
def handler(request):
pass
rd = routes[0]
assert repr(rd) == "<RouteDef GET /path -> 'handler', extra='info'>"
def test_repr_route_table_def():
routes = web.RouteTableDef()
@routes.get('/path')
@asyncio.coroutine
def handler(request):
pass
assert repr(routes) == "<RouteTableDef count=1>"
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import errno
import itertools
import os
import time
from bs4 import BeautifulSoup
import click
import dataset
import funcy as fy
import requests
HTML_DB_FILENAME = 'scraped_html.db'
DATASET = dataset.connect('sqlite:///' + HTML_DB_FILENAME)
TABLE = DATASET['raw_html']
def mkdir_p(path):
"""
Makes directories. Taken from: http://stackoverflow.com/a/600612
"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
@click.group()
def cli():
"""Pile of commands to scrape the boston marathon results."""
pass
def extract_entrants(html):
"""Generator yielding entrant info dicts given an HTML file."""
soup = BeautifulSoup(html, 'html5lib')
trs = soup.select('.tablegrid_list_item > .tablegrid_table > tbody > tr')
# Two rows per entrant, so we group them in pairs, omitting the extra end
# row.
user_rows = fy.ipartition(2, trs)
for tr_header, tr_data in user_rows:
header_strings = [td.get_text(strip=True)
for td in tr_header.find_all('td', recursive=False)]
assert len(header_strings) == 9
yield {
'bib_number': header_strings[0],
'name': header_strings[1],
'age': header_strings[2],
'gender': header_strings[3],
'city': header_strings[4],
'state': header_strings[5],
'county': header_strings[6],
'origin': header_strings[7],
}
@cli.command()
@click.argument('output_csv', type=click.File('wb'))
def output_csv(output_csv):
"""Write a csv listing of all entrants."""
entrants = fy.cat(extract_entrants(row['page_html']) for row in TABLE.all())
# We could technically use the first entry's keys, but I like this column order.
keys = [
'bib_number',
'name',
'age',
'gender',
'city',
'state',
'county',
'origin',
]
writer = csv.DictWriter(output_csv, keys)
writer.writeheader()
writer.writerows(entrants)
click.echo('Wrote %d entrants.' % len(entrants))
@cli.command()
def output_html():
"""Write all pages in the database into HTML files."""
mkdir_p('output')
for row in TABLE.all():
filename = 'output/state_%s_page_%s.html' % (row['state_id'], row['page_number'])
click.echo('Writing ' + filename)
with file(filename, 'w') as f:
f.write(row['page_html'])
def scrape_state(state_id):
"""
Generator yielding pages of HTML for a particular state.
Returns tuples of (page_number, html_text).
"""
# Fuckton of random shit in here, but whatever, don't fuck with whatever the
# server is doing if it works.
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.8',
'Cache-Control': 'max-age=0',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'http://registration.baa.org',
'Referer': 'http://registration.baa.org/2015/cf/Public/iframe_ResultsSearch.cfm?mode=results',
}
params = {
'mode': 'results',
'criteria': '',
'StoredProcParamsOn': 'yes',
'VarGenderID': 0,
'VarBibNumber': '',
'VarLastName': '',
'VarFirstName': '',
'VarStateID': state_id,
'VarCountryOfResID': 0,
'VarCountryOfCtzID': 0,
'VarReportingSegID': 1,
'VarAwardsDivID': 0,
'VarQualClassID': 0,
'VarCity': '',
'VarTargetCount': 1000,
'records': 25,
'headerexists': 'Yes',
'bordersize': 0,
'bordercolor': '#ffffff',
'rowcolorone': '#FFCC33',
'rowcolortwo': '#FFCC33',
'headercolor': '#ffffff',
'headerfontface': 'Verdana,Arial,Helvetica,sans-serif',
'headerfontcolor': '#004080',
'headerfontsize': '12px',
'fontface': 'Verdana,Arial,Helvetica,sans-serif',
'fontcolor': '#000099',
'fontsize': '10px',
'linkfield': 'FormattedSortName',
'linkurl': 'OpenDetailsWindow',
'linkparams': 'RaceAppID',
'queryname': 'SearchResults',
'tablefields': 'FullBibNumber,FormattedSortName,AgeOnRaceDay,GenderCode,'
'City,StateAbbrev,CountryOfResAbbrev,CountryOfCtzAbbrev,'
'DisabilityGroup',
}
for page_number, start in enumerate(itertools.count(1, 25)):
# Don't hammer the server. Give it a sec between requests.
time.sleep(1.0)
click.echo('Requesting state %d - page %d' % (state_id, page_number))
response = requests.post(
'http://registration.baa.org/2015/cf/Public/iframe_ResultsSearch.cfm',
headers=headers,
params=params,
data={'start': start, 'next': 'Next 25 Records'},
)
response.raise_for_status()
# Only yield if there actually are results. Just found this random
# tr_header thing in the HTML of the pages that have results, but not
# empty results pages.
if 'tr_header' in response.text:
yield page_number, response.text
else:
assert 'Next 25 Records' not in response.text
click.echo(' No results found.')
break
# No more pages!
if 'Next 25 Records' not in response.text:
break
@cli.command()
def scrape():
"""Pull down HTML from the server into dataset."""
# Bullshit, I know right? But no, go look at the search page.
state_ids = range(2, 78)
for state_id in state_ids:
for page_number, page_html in scrape_state(state_id):
TABLE.upsert(dict(
state_id=state_id,
page_number=page_number,
page_html=page_html,
), ['state_id', 'page_number'])
if __name__ == '__main__':
cli()
| |
from __future__ import absolute_import
# Copyright (c) 2010-2017 openpyxl
# Builtins styles as defined in Part 4 Annex G.2
from .named_styles import NamedStyle
from openpyxl.xml.functions import fromstring
normal = """
<namedStyle builtinId="0" name="Normal">
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
comma = """
<namedStyle builtinId="3" name="Comma">
<alignment/>
<number_format>_-* #,##0.00\\ _$_-;\\-* #,##0.00\\ _$_-;_-* "-"??\\ _$_-;_-@_-</number_format>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
comma_0 = """
<namedStyle builtinId="6" name="Comma [0]">
<alignment/>
<number_format>_-* #,##0\\ _$_-;\\-* #,##0\\ _$_-;_-* "-"\\ _$_-;_-@_-</number_format>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
currency = """
<namedStyle builtinId="4" name="Currency">
<alignment/>
<number_format>_-* #,##0.00\\ "$"_-;\\-* #,##0.00\\ "$"_-;_-* "-"??\\ "$"_-;_-@_-</number_format>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
currency_0 = """
<namedStyle builtinId="7" name="Currency [0]">
<alignment/>
<number_format>_-* #,##0\\ "$"_-;\\-* #,##0\\ "$"_-;_-* "-"\\ "$"_-;_-@_-</number_format>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
percent = """
<namedStyle builtinId="5" name="Percent">
<alignment/>
<number_format>0%</number_format>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
hyperlink = """
<namedStyle builtinId="8" name="Hyperlink" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="10"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>"""
followed_hyperlink = """
<namedStyle builtinId="9" name="Followed Hyperlink" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="11"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>"""
title = """
<namedStyle builtinId="15" name="Title">
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Cambria"/>
<family val="2"/>
<b val="1"/>
<color theme="3"/>
<sz val="18"/>
<scheme val="major"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
headline_1 = """
<namedStyle builtinId="16" name="Headline 1" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom style="thick">
<color theme="4"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color theme="3"/>
<sz val="15"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
headline_2 = """
<namedStyle builtinId="17" name="Headline 2" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom style="thick">
<color theme="4" tint="0.5"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color theme="3"/>
<sz val="13"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
headline_3 = """
<namedStyle builtinId="18" name="Headline 3" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom style="medium">
<color theme="4" tint="0.4"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color theme="3"/>
<sz val="11"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
headline_4 = """
<namedStyle builtinId="19" name="Headline 4">
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color theme="3"/>
<sz val="11"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
good = """
<namedStyle builtinId="26" name="Good" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFC6EFCE"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color rgb="FF006100"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
bad = """
<namedStyle builtinId="27" name="Bad" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFFFC7CE"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color rgb="FF9C0006"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
neutral = """
<namedStyle builtinId="28" name="Neutral" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFFFEB9C"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color rgb="FF9C6500"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
input = """
<namedStyle builtinId="20" name="Input" >
<alignment/>
<border>
<left style="thin">
<color rgb="FF7F7F7F"/>
</left>
<right style="thin">
<color rgb="FF7F7F7F"/>
</right>
<top style="thin">
<color rgb="FF7F7F7F"/>
</top>
<bottom style="thin">
<color rgb="FF7F7F7F"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFFFCC99"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color rgb="FF3F3F76"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
output = """
<namedStyle builtinId="21" name="Output" >
<alignment/>
<border>
<left style="thin">
<color rgb="FF3F3F3F"/>
</left>
<right style="thin">
<color rgb="FF3F3F3F"/>
</right>
<top style="thin">
<color rgb="FF3F3F3F"/>
</top>
<bottom style="thin">
<color rgb="FF3F3F3F"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFF2F2F2"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color rgb="FF3F3F3F"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
calculation = """
<namedStyle builtinId="22" name="Calculation" >
<alignment/>
<border>
<left style="thin">
<color rgb="FF7F7F7F"/>
</left>
<right style="thin">
<color rgb="FF7F7F7F"/>
</right>
<top style="thin">
<color rgb="FF7F7F7F"/>
</top>
<bottom style="thin">
<color rgb="FF7F7F7F"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFF2F2F2"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color rgb="FFFA7D00"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
linked_cell = """
<namedStyle builtinId="24" name="Linked Cell" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom style="double">
<color rgb="FFFF8001"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color rgb="FFFA7D00"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
check_cell = """
<namedStyle builtinId="23" name="Check Cell" >
<alignment/>
<border>
<left style="double">
<color rgb="FF3F3F3F"/>
</left>
<right style="double">
<color rgb="FF3F3F3F"/>
</right>
<top style="double">
<color rgb="FF3F3F3F"/>
</top>
<bottom style="double">
<color rgb="FF3F3F3F"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFA5A5A5"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
warning = """
<namedStyle builtinId="11" name="Warning Text" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color rgb="FFFF0000"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
note = """
<namedStyle builtinId="10" name="Note" >
<alignment/>
<border>
<left style="thin">
<color rgb="FFB2B2B2"/>
</left>
<right style="thin">
<color rgb="FFB2B2B2"/>
</right>
<top style="thin">
<color rgb="FFB2B2B2"/>
</top>
<bottom style="thin">
<color rgb="FFB2B2B2"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFFFFFCC"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
explanatory = """
<namedStyle builtinId="53" name="Explanatory Text" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<i val="1"/>
<color rgb="FF7F7F7F"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
total = """
<namedStyle builtinId="25" name="Total" >
<alignment/>
<border>
<left/>
<right/>
<top style="thin">
<color theme="4"/>
</top>
<bottom style="double">
<color theme="4"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_1 = """
<namedStyle builtinId="29" name="Accent1" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="4"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_1_20 = """
<namedStyle builtinId="30" name="20 % - Accent1" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="4" tint="0.7999816888943144"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_1_40 = """
<namedStyle builtinId="31" name="40 % - Accent1" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="4" tint="0.5999938962981048"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_1_60 = """
<namedStyle builtinId="32" name="60 % - Accent1" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="4" tint="0.3999755851924192"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_2 = """<namedStyle builtinId="33" name="Accent2" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="5"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>"""
accent_2_20 = """
<namedStyle builtinId="34" name="20 % - Accent2" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="5" tint="0.7999816888943144"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>"""
accent_2_40 = """
<namedStyle builtinId="35" name="40 % - Accent2" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="5" tint="0.5999938962981048"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>"""
accent_2_60 = """
<namedStyle builtinId="36" name="60 % - Accent2" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="5" tint="0.3999755851924192"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>"""
accent_3 = """
<namedStyle builtinId="37" name="Accent3" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="6"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>"""
accent_3_20 = """
<namedStyle builtinId="38" name="20 % - Accent3" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="6" tint="0.7999816888943144"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>"""
accent_3_40 = """
<namedStyle builtinId="39" name="40 % - Accent3" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="6" tint="0.5999938962981048"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_3_60 = """
<namedStyle builtinId="40" name="60 % - Accent3" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="6" tint="0.3999755851924192"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_4 = """
<namedStyle builtinId="41" name="Accent4" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="7"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_4_20 = """
<namedStyle builtinId="42" name="20 % - Accent4" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="7" tint="0.7999816888943144"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_4_40 = """
<namedStyle builtinId="43" name="40 % - Accent4" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="7" tint="0.5999938962981048"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_4_60 = """
<namedStyle builtinId="44" name="60 % - Accent4" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="7" tint="0.3999755851924192"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_5 = """
<namedStyle builtinId="45" name="Accent5" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="8"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_5_20 = """
<namedStyle builtinId="46" name="20 % - Accent5" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="8" tint="0.7999816888943144"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_5_40 = """
<namedStyle builtinId="47" name="40 % - Accent5" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="8" tint="0.5999938962981048"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_5_60 = """
<namedStyle builtinId="48" name="60 % - Accent5" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="8" tint="0.3999755851924192"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_6 = """
<namedStyle builtinId="49" name="Accent6" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="9"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_6_20 = """
<namedStyle builtinId="50" name="20 % - Accent6" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="9" tint="0.7999816888943144"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_6_40 = """
<namedStyle builtinId="51" name="40 % - Accent6" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="9" tint="0.5999938962981048"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_6_60 = """
<namedStyle builtinId="52" name="60 % - Accent6" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="9" tint="0.3999755851924192"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
pandas_highlight = """
<namedStyle hidden="0" name="Pandas">
<alignment horizontal="center"/>
<border>
<left style="thin"><color rgb="00000000"/></left>
<right style="thin"><color rgb="00000000"/></right>
<top style="thin"><color rgb="00000000"/></top>
<bottom style="thin"><color rgb="00000000"/></bottom>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<b val="1"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
styles = dict(
[
('Normal', NamedStyle.from_tree(fromstring(normal))),
('Comma', NamedStyle.from_tree(fromstring(comma))),
('Currency', NamedStyle.from_tree(fromstring(currency))),
('Percent', NamedStyle.from_tree(fromstring(percent))),
('Comma [0]', NamedStyle.from_tree(fromstring(comma_0))),
('Currency [0]', NamedStyle.from_tree(fromstring(currency_0))),
('Hyperlink', NamedStyle.from_tree(fromstring(hyperlink))),
('Followed Hyperlink', NamedStyle.from_tree(fromstring(followed_hyperlink))),
('Note', NamedStyle.from_tree(fromstring(note))),
('Warning Text', NamedStyle.from_tree(fromstring(warning))),
('Title', NamedStyle.from_tree(fromstring(title))),
('Headline 1', NamedStyle.from_tree(fromstring(headline_1))),
('Headline 2', NamedStyle.from_tree(fromstring(headline_2))),
('Headline 3', NamedStyle.from_tree(fromstring(headline_3))),
('Headline 4', NamedStyle.from_tree(fromstring(headline_4))),
('Input', NamedStyle.from_tree(fromstring(input))),
('Output', NamedStyle.from_tree(fromstring(output))),
('Calculation',NamedStyle.from_tree(fromstring(calculation))),
('Check Cell', NamedStyle.from_tree(fromstring(check_cell))),
('Linked Cell', NamedStyle.from_tree(fromstring(linked_cell))),
('Total', NamedStyle.from_tree(fromstring(total))),
('Good', NamedStyle.from_tree(fromstring(good))),
('Bad', NamedStyle.from_tree(fromstring(bad))),
('Neutral', NamedStyle.from_tree(fromstring(neutral))),
('Accent1', NamedStyle.from_tree(fromstring(accent_1))),
('20 % - Accent1', NamedStyle.from_tree(fromstring(accent_1_20))),
('40 % - Accent1', NamedStyle.from_tree(fromstring(accent_1_40))),
('60 % - Accent1', NamedStyle.from_tree(fromstring(accent_1_60))),
('Accent2', NamedStyle.from_tree(fromstring(accent_2))),
('20 % - Accent2', NamedStyle.from_tree(fromstring(accent_2_20))),
('40 % - Accent2', NamedStyle.from_tree(fromstring(accent_2_40))),
('60 % - Accent2', NamedStyle.from_tree(fromstring(accent_2_60))),
('Accent3', NamedStyle.from_tree(fromstring(accent_3))),
('20 % - Accent3', NamedStyle.from_tree(fromstring(accent_3_20))),
('40 % - Accent3', NamedStyle.from_tree(fromstring(accent_3_40))),
('60 % - Accent3', NamedStyle.from_tree(fromstring(accent_3_60))),
('Accent4', NamedStyle.from_tree(fromstring(accent_4))),
('20 % - Accent4', NamedStyle.from_tree(fromstring(accent_4_20))),
('40 % - Accent4', NamedStyle.from_tree(fromstring(accent_4_40))),
('60 % - Accent4', NamedStyle.from_tree(fromstring(accent_4_60))),
('Accent5', NamedStyle.from_tree(fromstring(accent_5))),
('20 % - Accent5', NamedStyle.from_tree(fromstring(accent_5_20))),
('40 % - Accent5', NamedStyle.from_tree(fromstring(accent_5_40))),
('60 % - Accent5', NamedStyle.from_tree(fromstring(accent_5_60))),
('Accent6', NamedStyle.from_tree(fromstring(accent_6))),
('20 % - Accent6', NamedStyle.from_tree(fromstring(accent_6_20))),
('40 % - Accent6', NamedStyle.from_tree(fromstring(accent_6_40))),
('60 % - Accent6', NamedStyle.from_tree(fromstring(accent_6_60))),
('Explanatory Text', NamedStyle.from_tree(fromstring(explanatory))),
('Pandas', NamedStyle.from_tree(fromstring(pandas_highlight)))
]
)
| |
"""
Classes and subroutines dealing with network connections and related topics.
"""
from __future__ import with_statement
from functools import wraps
import getpass
import os
import re
import time
import socket
import sys
from StringIO import StringIO
from fabric.auth import get_password, set_password
from fabric.utils import handle_prompt_abort, warn
from fabric.exceptions import NetworkError
try:
import warnings
warnings.simplefilter('ignore', DeprecationWarning)
import paramiko as ssh
except ImportError, e:
import traceback
traceback.print_exc()
msg = """
There was a problem importing our SSH library (see traceback above).
Please make sure all dependencies are installed and importable.
""".rstrip()
sys.stderr.write(msg + '\n')
sys.exit(1)
ipv6_regex = re.compile(
'^\[?(?P<host>[0-9A-Fa-f:]+(?:%[a-z]+\d+)?)\]?(:(?P<port>\d+))?$')
def direct_tcpip(client, host, port):
return client.get_transport().open_channel(
'direct-tcpip',
(host, int(port)),
('', 0)
)
def is_key_load_error(e):
return (
e.__class__ is ssh.SSHException
and 'Unable to parse key file' in str(e)
)
def _tried_enough(tries):
from fabric.state import env
return tries >= env.connection_attempts
def get_gateway(host, port, cache, replace=False):
"""
Create and return a gateway socket, if one is needed.
This function checks ``env`` for gateway or proxy-command settings and
returns the necessary socket-like object for use by a final host
connection.
:param host:
Hostname of target server.
:param port:
Port to connect to on target server.
:param cache:
A ``HostConnectionCache`` object, in which gateway ``SSHClient``
objects are to be retrieved/cached.
:param replace:
Whether to forcibly replace a cached gateway client object.
:returns:
A ``socket.socket``-like object, or ``None`` if none was created.
"""
from fabric.state import env, output
sock = None
proxy_command = ssh_config().get('proxycommand', None)
if env.gateway:
gateway = normalize_to_string(env.gateway)
# ensure initial gateway connection
if replace or gateway not in cache:
if output.debug:
print "Creating new gateway connection to %r" % gateway
cache[gateway] = connect(*normalize(gateway) + (cache, False))
# now we should have an open gw connection and can ask it for a
# direct-tcpip channel to the real target. (bypass cache's own
# __getitem__ override to avoid hilarity - this is usually called
# within that method.)
sock = direct_tcpip(dict.__getitem__(cache, gateway), host, port)
elif proxy_command:
sock = ssh.ProxyCommand(proxy_command)
return sock
class HostConnectionCache(dict):
"""
Dict subclass allowing for caching of host connections/clients.
This subclass will intelligently create new client connections when keys
are requested, or return previously created connections instead.
It also handles creating new socket-like objects when required to implement
gateway connections and `ProxyCommand`, and handing them to the inner
connection methods.
Key values are the same as host specifiers throughout Fabric: optional
username + ``@``, mandatory hostname, optional ``:`` + port number.
Examples:
* ``example.com`` - typical Internet host address.
* ``firewall`` - atypical, but still legal, local host address.
* ``user@example.com`` - with specific username attached.
* ``bob@smith.org:222`` - with specific nonstandard port attached.
When the username is not given, ``env.user`` is used. ``env.user``
defaults to the currently running user at startup but may be overwritten by
user code or by specifying a command-line flag.
Note that differing explicit usernames for the same hostname will result in
multiple client connections being made. For example, specifying
``user1@example.com`` will create a connection to ``example.com``, logged
in as ``user1``; later specifying ``user2@example.com`` will create a new,
2nd connection as ``user2``.
The same applies to ports: specifying two different ports will result in
two different connections to the same host being made. If no port is given,
22 is assumed, so ``example.com`` is equivalent to ``example.com:22``.
"""
def connect(self, key):
"""
Force a new connection to ``key`` host string.
"""
from fabric.state import env
user, host, port = normalize(key)
key = normalize_to_string(key)
seek_gateway = True
# break the loop when the host is gateway itself
if env.gateway:
seek_gateway = normalize_to_string(env.gateway) != key
self[key] = connect(
user, host, port, cache=self, seek_gateway=seek_gateway)
def __getitem__(self, key):
"""
Autoconnect + return connection object
"""
key = normalize_to_string(key)
if key not in self:
self.connect(key)
return dict.__getitem__(self, key)
#
# Dict overrides that normalize input keys
#
def __setitem__(self, key, value):
return dict.__setitem__(self, normalize_to_string(key), value)
def __delitem__(self, key):
return dict.__delitem__(self, normalize_to_string(key))
def __contains__(self, key):
return dict.__contains__(self, normalize_to_string(key))
def ssh_config(host_string=None):
"""
Return ssh configuration dict for current env.host_string host value.
Memoizes the loaded SSH config file, but not the specific per-host results.
This function performs the necessary "is SSH config enabled?" checks and
will simply return an empty dict if not. If SSH config *is* enabled and the
value of env.ssh_config_path is not a valid file, it will abort.
May give an explicit host string as ``host_string``.
"""
from fabric.state import env
dummy = {}
if not env.use_ssh_config:
return dummy
if '_ssh_config' not in env:
try:
conf = ssh.SSHConfig()
path = os.path.expanduser(env.ssh_config_path)
with open(path) as fd:
conf.parse(fd)
env._ssh_config = conf
except IOError:
warn("Unable to load SSH config file '%s'" % path)
return dummy
host = parse_host_string(host_string or env.host_string)['host']
return env._ssh_config.lookup(host)
def key_filenames():
"""
Returns list of SSH key filenames for the current env.host_string.
Takes into account ssh_config and env.key_filename, including normalization
to a list. Also performs ``os.path.expanduser`` expansion on any key
filenames.
"""
from fabric.state import env
keys = env.key_filename
# For ease of use, coerce stringish key filename into list
if isinstance(env.key_filename, basestring) or env.key_filename is None:
keys = [keys]
# Strip out any empty strings (such as the default value...meh)
keys = filter(bool, keys)
# Honor SSH config
conf = ssh_config()
if 'identityfile' in conf:
# Assume a list here as we require Paramiko 1.10+
keys.extend(conf['identityfile'])
return map(os.path.expanduser, keys)
def key_from_env(passphrase=None):
"""
Returns a paramiko-ready key from a text string of a private key
"""
from fabric.state import env, output
if 'key' in env:
if output.debug:
# NOTE: this may not be the most secure thing; OTOH anybody running
# the process must by definition have access to the key value,
# so only serious problem is if they're logging the output.
sys.stderr.write("Trying to honor in-memory key %r\n" % env.key)
for pkey_class in (ssh.rsakey.RSAKey, ssh.dsskey.DSSKey):
if output.debug:
sys.stderr.write("Trying to load it as %s\n" % pkey_class)
try:
return pkey_class.from_private_key(StringIO(env.key), passphrase)
except Exception, e:
# File is valid key, but is encrypted: raise it, this will
# cause cxn loop to prompt for passphrase & retry
if 'Private key file is encrypted' in e:
raise
# Otherwise, it probably means it wasn't a valid key of this
# type, so try the next one.
else:
pass
def parse_host_string(host_string):
# Split host_string to user (optional) and host/port
user_hostport = host_string.rsplit('@', 1)
hostport = user_hostport.pop()
user = user_hostport[0] if user_hostport and user_hostport[0] else None
# Split host/port string to host and optional port
# For IPv6 addresses square brackets are mandatory for host/port separation
if hostport.count(':') > 1:
# Looks like IPv6 address
r = ipv6_regex.match(hostport).groupdict()
host = r['host'] or None
port = r['port'] or None
else:
# Hostname or IPv4 address
host_port = hostport.rsplit(':', 1)
host = host_port.pop(0) or None
port = host_port[0] if host_port and host_port[0] else None
return {'user': user, 'host': host, 'port': port}
def normalize(host_string, omit_port=False):
"""
Normalizes a given host string, returning explicit host, user, port.
If ``omit_port`` is given and is True, only the host and user are returned.
This function will process SSH config files if Fabric is configured to do
so, and will use them to fill in some default values or swap in hostname
aliases.
Regarding SSH port used:
* Ports explicitly given within host strings always win, no matter what.
* When the host string lacks a port, SSH-config driven port configurations
are used next.
* When the SSH config doesn't specify a port (at all - including a default
``Host *`` block), Fabric's internal setting ``env.port`` is consulted.
* If ``env.port`` is empty, ``env.default_port`` is checked (which should
always be, as one would expect, port ``22``).
"""
from fabric.state import env
# Gracefully handle "empty" input by returning empty output
if not host_string:
return ('', '') if omit_port else ('', '', '')
# Parse host string (need this early on to look up host-specific ssh_config
# values)
r = parse_host_string(host_string)
host = r['host']
# Env values (using defaults if somehow earlier defaults were replaced with
# empty values)
user = env.user or env.local_user
# SSH config data
conf = ssh_config(host_string)
# Only use ssh_config values if the env value appears unmodified from
# the true defaults. If the user has tweaked them, that new value
# takes precedence.
if user == env.local_user and 'user' in conf:
user = conf['user']
# Also override host if needed
if 'hostname' in conf:
host = conf['hostname']
# Merge explicit user/port values with the env/ssh_config derived ones
# (Host is already done at this point.)
user = r['user'] or user
if omit_port:
return user, host
# determine port from ssh config if enabled
ssh_config_port = None
if env.use_ssh_config:
ssh_config_port = conf.get('port', None)
# port priority order (as in docstring)
port = r['port'] or ssh_config_port or env.port or env.default_port
return user, host, port
def to_dict(host_string):
user, host, port = normalize(host_string)
return {
'user': user, 'host': host, 'port': port, 'host_string': host_string
}
def from_dict(arg):
return join_host_strings(arg['user'], arg['host'], arg['port'])
def denormalize(host_string):
"""
Strips out default values for the given host string.
If the user part is the default user, it is removed;
if the port is port 22, it also is removed.
"""
from fabric.state import env
r = parse_host_string(host_string)
user = ''
if r['user'] is not None and r['user'] != env.user:
user = r['user'] + '@'
port = ''
if r['port'] is not None and r['port'] != '22':
port = ':' + r['port']
host = r['host']
host = '[%s]' % host if port and host.count(':') > 1 else host
return user + host + port
def join_host_strings(user, host, port=None):
"""
Turns user/host/port strings into ``user@host:port`` combined string.
This function is not responsible for handling missing user/port strings;
for that, see the ``normalize`` function.
If ``host`` looks like IPv6 address, it will be enclosed in square brackets
If ``port`` is omitted, the returned string will be of the form
``user@host``.
"""
if port:
# Square brackets are necessary for IPv6 host/port separation
template = "%s@[%s]:%s" if host.count(':') > 1 else "%s@%s:%s"
return template % (user, host, port)
else:
return "%s@%s" % (user, host)
def normalize_to_string(host_string):
"""
normalize() returns a tuple; this returns another valid host string.
"""
return join_host_strings(*normalize(host_string))
def connect(user, host, port, cache, seek_gateway=True):
"""
Create and return a new SSHClient instance connected to given host.
:param user: Username to connect as.
:param host: Network hostname.
:param port: SSH daemon port.
:param cache:
A ``HostConnectionCache`` instance used to cache/store gateway hosts
when gatewaying is enabled.
:param seek_gateway:
Whether to try setting up a gateway socket for this connection. Used so
the actual gateway connection can prevent recursion.
"""
from fabric.state import env, output
#
# Initialization
#
# Init client
client = ssh.SSHClient()
# Load system hosts file (e.g. /etc/ssh/ssh_known_hosts)
known_hosts = env.get('system_known_hosts')
if known_hosts:
client.load_system_host_keys(known_hosts)
# Load known host keys (e.g. ~/.ssh/known_hosts) unless user says not to.
if not env.disable_known_hosts:
client.load_system_host_keys()
# Unless user specified not to, accept/add new, unknown host keys
if not env.reject_unknown_hosts:
client.set_missing_host_key_policy(ssh.AutoAddPolicy())
#
# Connection attempt loop
#
# Initialize loop variables
connected = False
password = get_password(user, host, port, login_only=True)
tries = 0
sock = None
# Loop until successful connect (keep prompting for new password)
while not connected:
# Attempt connection
try:
tries += 1
# (Re)connect gateway socket, if needed.
# Nuke cached client object if not on initial try.
if seek_gateway:
sock = get_gateway(host, port, cache, replace=tries > 0)
# Set up kwargs (this lets us skip GSS-API kwargs unless explicitly
# set; otherwise older Paramiko versions will be cranky.)
kwargs = dict(
hostname=host,
port=int(port),
username=user,
password=password,
pkey=key_from_env(password),
key_filename=key_filenames(),
timeout=env.timeout,
allow_agent=not env.no_agent,
look_for_keys=not env.no_keys,
sock=sock,
)
for suffix in ('auth', 'deleg_creds', 'kex'):
name = "gss_" + suffix
val = env.get(name, None)
if val is not None:
kwargs[name] = val
# Ready to connect
client.connect(**kwargs)
connected = True
# set a keepalive if desired
if env.keepalive:
client.get_transport().set_keepalive(env.keepalive)
return client
# BadHostKeyException corresponds to key mismatch, i.e. what on the
# command line results in the big banner error about man-in-the-middle
# attacks.
except ssh.BadHostKeyException, e:
raise NetworkError("Host key for %s did not match pre-existing key! Server's key was changed recently, or possible man-in-the-middle attack." % host, e)
# Prompt for new password to try on auth failure
except (
ssh.AuthenticationException,
ssh.PasswordRequiredException,
ssh.SSHException
), e:
msg = str(e)
# If we get SSHExceptionError and the exception message indicates
# SSH protocol banner read failures, assume it's caused by the
# server load and try again.
#
# If we are using a gateway, we will get a ChannelException if
# connection to the downstream host fails. We should retry.
if (e.__class__ is ssh.SSHException \
and msg == 'Error reading SSH protocol banner') \
or e.__class__ is ssh.ChannelException:
if _tried_enough(tries):
raise NetworkError(msg, e)
continue
# For whatever reason, empty password + no ssh key or agent
# results in an SSHException instead of an
# AuthenticationException. Since it's difficult to do
# otherwise, we must assume empty password + SSHException ==
# auth exception.
#
# Conversely: if we get SSHException and there
# *was* a password -- it is probably something non auth
# related, and should be sent upwards. (This is not true if the
# exception message does indicate key parse problems.)
#
# This also holds true for rejected/unknown host keys: we have to
# guess based on other heuristics.
if (
e.__class__ is ssh.SSHException
and (
password
or msg.startswith('Unknown server')
or "not found in known_hosts" in msg
)
and not is_key_load_error(e)
):
raise NetworkError(msg, e)
# Otherwise, assume an auth exception, and prompt for new/better
# password.
# Paramiko doesn't handle prompting for locked private
# keys (i.e. keys with a passphrase and not loaded into an agent)
# so we have to detect this and tweak our prompt slightly.
# (Otherwise, however, the logic flow is the same, because
# ssh's connect() method overrides the password argument to be
# either the login password OR the private key passphrase. Meh.)
#
# NOTE: This will come up if you normally use a
# passphrase-protected private key with ssh-agent, and enter an
# incorrect remote username, because ssh.connect:
# * Tries the agent first, which will fail as you gave the wrong
# username, so obviously any loaded keys aren't gonna work for a
# nonexistent remote account;
# * Then tries the on-disk key file, which is passphrased;
# * Realizes there's no password to try unlocking that key with,
# because you didn't enter a password, because you're using
# ssh-agent;
# * In this condition (trying a key file, password is None)
# ssh raises PasswordRequiredException.
text = None
if e.__class__ is ssh.PasswordRequiredException \
or is_key_load_error(e):
# NOTE: we can't easily say WHICH key's passphrase is needed,
# because ssh doesn't provide us with that info, and
# env.key_filename may be a list of keys, so we can't know
# which one raised the exception. Best not to try.
prompt = "[%s] Passphrase for private key"
text = prompt % env.host_string
password = prompt_for_password(text)
# Update env.password, env.passwords if empty
set_password(user, host, port, password)
# Ctrl-D / Ctrl-C for exit
# TODO: this may no longer actually serve its original purpose and may
# also hide TypeErrors from paramiko. Double check in v2.
except (EOFError, TypeError):
# Print a newline (in case user was sitting at prompt)
print('')
sys.exit(0)
# Handle DNS error / name lookup failure
except socket.gaierror, e:
raise NetworkError('Name lookup failed for %s' % host, e)
# Handle timeouts and retries, including generic errors
# NOTE: In 2.6, socket.error subclasses IOError
except socket.error, e:
not_timeout = type(e) is not socket.timeout
giving_up = _tried_enough(tries)
# Baseline error msg for when debug is off
msg = "Timed out trying to connect to %s" % host
# Expanded for debug on
err = msg + " (attempt %s of %s)" % (tries, env.connection_attempts)
if giving_up:
err += ", giving up"
err += ")"
# Debuggin'
if output.debug:
sys.stderr.write(err + '\n')
# Having said our piece, try again
if not giving_up:
# Sleep if it wasn't a timeout, so we still get timeout-like
# behavior
if not_timeout:
time.sleep(env.timeout)
continue
# Override eror msg if we were retrying other errors
if not_timeout:
msg = "Low level socket error connecting to host %s on port %s: %s" % (
host, port, e[1]
)
# Here, all attempts failed. Tweak error msg to show # tries.
# TODO: find good humanization module, jeez
s = "s" if env.connection_attempts > 1 else ""
msg += " (tried %s time%s)" % (env.connection_attempts, s)
raise NetworkError(msg, e)
# Ensure that if we terminated without connecting and we were given an
# explicit socket, close it out.
finally:
if not connected and sock is not None:
sock.close()
def _password_prompt(prompt, stream):
# NOTE: Using encode-to-ascii to prevent (Windows, at least) getpass from
# choking if given Unicode.
return getpass.getpass(prompt.encode('ascii', 'ignore'), stream)
def prompt_for_password(prompt=None, no_colon=False, stream=None):
"""
Prompts for and returns a new password if required; otherwise, returns
None.
A trailing colon is appended unless ``no_colon`` is True.
If the user supplies an empty password, the user will be re-prompted until
they enter a non-empty password.
``prompt_for_password`` autogenerates the user prompt based on the current
host being connected to. To override this, specify a string value for
``prompt``.
``stream`` is the stream the prompt will be printed to; if not given,
defaults to ``sys.stderr``.
"""
from fabric.state import env
handle_prompt_abort("a connection or sudo password")
stream = stream or sys.stderr
# Construct prompt
default = "[%s] Login password for '%s'" % (env.host_string, env.user)
password_prompt = prompt if (prompt is not None) else default
if not no_colon:
password_prompt += ": "
# Get new password value
new_password = _password_prompt(password_prompt, stream)
# Otherwise, loop until user gives us a non-empty password (to prevent
# returning the empty string, and to avoid unnecessary network overhead.)
while not new_password:
print("Sorry, you can't enter an empty password. Please try again.")
new_password = _password_prompt(password_prompt, stream)
return new_password
def needs_host(func):
"""
Prompt user for value of ``env.host_string`` when ``env.host_string`` is
empty.
This decorator is basically a safety net for silly users who forgot to
specify the host/host list in one way or another. It should be used to wrap
operations which require a network connection.
Due to how we execute commands per-host in ``main()``, it's not possible to
specify multiple hosts at this point in time, so only a single host will be
prompted for.
Because this decorator sets ``env.host_string``, it will prompt once (and
only once) per command. As ``main()`` clears ``env.host_string`` between
commands, this decorator will also end up prompting the user once per
command (in the case where multiple commands have no hosts set, of course.)
"""
from fabric.state import env
@wraps(func)
def host_prompting_wrapper(*args, **kwargs):
while not env.get('host_string', False):
handle_prompt_abort("the target host connection string")
host_string = raw_input("No hosts found. Please specify (single)"
" host string for connection: ")
env.update(to_dict(host_string))
return func(*args, **kwargs)
host_prompting_wrapper.undecorated = func
return host_prompting_wrapper
def disconnect_all():
"""
Disconnect from all currently connected servers.
Used at the end of ``fab``'s main loop, and also intended for use by
library users.
"""
from fabric.state import connections, output
# Explicitly disconnect from all servers
for key in connections.keys():
if output.status:
# Here we can't use the py3k print(x, end=" ")
# because 2.5 backwards compatibility
sys.stdout.write("Disconnecting from %s... " % denormalize(key))
connections[key].close()
del connections[key]
if output.status:
sys.stdout.write("done.\n")
| |
#!/usr/bin/env python2.7
# tool_versions.py v1.1 Creates "SW" versions json string for a particular DX applet.
# Write request to stdout and verbose info to stderr. This allows easy use
# in dx app scripts.
#
# Creates versions json string for a particular applet
import sys
import argparse
import json
import commands
# APP_TOOLS is a dict keyed by applet script name with a list of tools that it uses.
APP_TOOLS = {
"dnase-index-bwa": ["dnase_index_bwa.sh", "bwa", "hotspot2", "bedops"],
"dnase-align-bwa-pe": ["dnase_align_bwa_pe.sh", "bwa", "samtools", "edwBamStats",
"trim-adapters-illumina", "fastq_umi_add.py (stampipes)"],
"dnase-align-bwa-se": ["dnase_align_bwa_se.sh", "bwa", "samtools", "edwBamStats",
"cutadapt"],
"dnase-filter-pe": ["dnase_filter_pe.sh", "samtools", "filter_reads.py (stampipes)",
"java", "picard"],
"dnase-filter-se": ["dnase_filter_se.sh", "samtools", "java", "picard"],
"dnase-qc-bam": ["dnase_qc_bam.sh", "samtools", "edwBamFilter", "edwBamStats", # "R",
"Rscript", "phantompeakqualtools", "caTools", "snow", "spp", "gawk",
"hotspot1", "hotspot.py", "bedops", "bedtools"],
"dnase-density": ["dnase_density.sh", "samtools", "bedops", "bedGraphToBigWig", "gawk"],
"dnase-call-hotspots": ["dnase_hotspot.sh", "samtools", "hotspot2", "bedops", "modwt", "gawk", "mawk",
"bedToBigBed", "bedGraphToBigWig"],
"dnase-rep-corr": ["dnase_rep_corr.sh", "chromCor.Rscript", "bigWigToWig", "bedops"],
# special for optional output
"dnase-index-bwa(hotspot2)": ["dnase_index_bwa.sh", "hotspot2", "bedops"],
}
# Virtual apps only differ from their parent by name/version.
VIRTUAL_APPS = {
"dnase-qc-bam-alt": "dnase-qc-bam",
"dnase-density-alt": "dnase-density",
"dnase-call-hotspots-alt": "dnase-call-hotspots",
"dnase-idr-alt": "dnase-idr",
"dnase-rep-corr-alt": "dnase-rep-corr",
}
# ALL_TOOLS contains printable tool name (key) and the command that is used to determine version.
ALL_TOOLS = {"Anaconda3": "ls Anaconda3*.sh | head -1 | cut -d - -f 2",
"bedGraphPack": "bedGraphPack 2>&1 | grep 'bedGraphPack v' | awk '{print $2}'",
"bedGraphToBigWig": "bedGraphToBigWig 2>&1 | grep 'bedGraphToBigWig v' | awk '{print $2$3}'",
"bedops": "bedops --version 2>&1 | grep version | awk '{print $2}'",
# "bam2bed (bedops)": "bedops --version 2>&1 | grep version | awk '{print $2}'", # Note: no version.. subsituting bedops
# "bedmap (bedops)": "bedmap --version 2>&1 | grep version | awk '{print $2}'",
# "convert2bed (bedops)": "convert2bed --version 2>&1 | grep version | awk '{print $2}'",
# "sort-bed (bedops)": "sort-bed --version 2>&1 | grep version | awk '{print $2}'",
# "starch (bedops)": "starch --version 2>&1 | grep version | awk '{print $3}'",
# "starchcat (bedops)": "starchcat --version 2>&1 | grep version | awk '{print $3}'",
# "unstarch (bedops)": "unstarch --version 2>&1 | grep version | awk '{print $3}'",
"bedToBigBed": "bedToBigBed 2>&1 | grep 'bedToBigBed v' | awk '{print $3}'",
"bedtools": "bedtools --version 2>&1 | awk '{print $2}'",
# "bamToBed (bedtools)": "bamToBed -h 2>&1 | grep Version | awk '{print $2}'",
# "intersectBed (bedtools)": "intersectBed 2>&1 | grep Version | awk '{print $2}'",
# "shuffleBed (bedtools)": "shuffleBed -h 2>&1 | grep Version | awk '{print $2}'",
"bigBedToBed": "bigBedToBed 2>&1 | grep 'bigBedToBed v' | awk '{print $2}'",
"bigWigCorrelate": "md5sum /usr/bin/bigWigCorrelate | awk '{printf \"unversioned %-8.8s\",$1}'",
"bwa": "bwa 2>&1 | grep Version | awk '{print $2}'",
"caTools": "grep caTools_ phantompeakqualtools/install.log | head -1 | sed 's/_/ /' | awk '{print $4}' | sed 's/\.tar\.gz.*//'",
"edwBamFilter": "edwBamFilter 2>&1 | grep 'edwBamFilter v' | awk '{print $2}'",
"edwBamStats": "edwBamStats 2>&1 | grep 'edwBamStats v' | awk '{print $2}'",
"edwComparePeaks": "md5sum /usr/bin/edwComparePeaks | awk '{printf \"unversioned %-8.8s\",$1}'", #"edwComparePeaks 2>&1 | grep 'edwComparePeaks -' | awk '{print $3,$4,$5,$6}'",
"faSize": "md5sum /usr/bin/faSize | awk '{printf \"unversioned %-8.8s\",$1}'", #"bigWigCorrelate 2>&1 | grep 'bigWigCorrelate -' | awk '{print $3,$4,$5}'",
"fastqStatsAndSubsample": "fastqStatsAndSubsample 2>&1 | grep 'fastqStatsAndSubsample v' | awk '{print $2}'",
# stampipes:
"fastq_umi_add.py (stampipes)": "md5sum /usr/bin/fastq_umi_add.py | awk '{printf \"unversioned %-8.8s\",$1}'", # From https://github.com/StamLab/stampipes/tree/encode-release/scripts/umi/
"filter_reads.py (stampipes)": "md5sum ./filter_reads.py | awk '{printf \"unversioned %-8.8s\",$1}'", # From https://github.com/StamLab/stampipes/tree/encode-release/scripts/bwa/
# "umi_sort_sam_annotate.awk (stampipes)":"md5sum /usr/bin/umi_sort_sam_annotate.awk | awk '{printf \"unversioned %-8.8s\",$1}'", # From https://github.com/StamLab/stampipes/tree/encode-release/scripts/umi/
# "mark_umi_dups.mk (stampipes)": "md5sum /usr/bin/mark_duplicates.mk | awk '{printf \"unversioned %-8.8s\",$1}'", # From https://github.com/StamLab/stampipes/tree/encode-release/makefiles/umi/mark_duplicates.mk (dcc minimal change)
"gawk": "gawk --version | grep Awk | awk '{print $3}'",
"idr": "idr/bin/idr --version 2>&1 | grep IDR | awk '{print $2}'",
"mawk": "mawk -W version 2>&1 | grep mawk | awk '{print $2}'",
"hotspot1": "hotspot 2>&1 | grep HotSpot | awk '{printf \"%s-%s\",$1,$2}'",
"hotspot.py": "hotspot.py -h | grep Version | awk '{print $8}'",
"java": "java -version 2>&1 | head -1 | awk '{print $3}' | tr -d '\"'",
"phantompeakqualtools": "grep Version phantompeakqualtools/README.txt | awk '{print $2}'",
"R": "R --version | grep 'R version' | awk '{print $3,$4}'",
"Rscript": "Rscript --version 2>&1 | awk '{print $5,$6}'",
"samtools": "samtools 2>&1 | grep Version | awk '{print $2}'",
"snow": "grep snow_ phantompeakqualtools/install.log | head -1 | sed 's/_/ /' | awk '{print $4}' | sed 's/\.tar\.gz.*//'",
"spp": "grep spp_ phantompeakqualtools/installPkgs.R | sed 's/_/ /' | awk '{print $2}' | sed 's/\.tar\.gz.*//'",
#"hotspot2": "hotspot2 --version | awk '{print $3}'",
#"hotspot2": "[ -e /usr/bin/hotspot2.version ] && cat /usr/bin/hotspot2.version || hotspot2 --version | awk '{print $3}'",
"hotspot2": "hotspot2_part1 --version | awk '{print $3}'",
"modwt": "md5sum /usr/bin/modwt | awk '{printf \"unversioned %-8.8s\",$1}'", # From https://github.com/StamLab/modwt/tree/1.0
"picard": "java -jar ./picard.jar MarkDuplicates --version", # From https://github.com/broadinstitute/picard.git
"pigz": "pigz --version 2>&1 | awk '{print $2}'",
"trim-adapters-illumina": "trim-adapters-illumina --version 2>&1 | awk '{print $3}'", # https://bitbucket.org/jvierstra/bio-tools/get/master.tar.gz https://bitbucket.org/jvierstra/bio-tools/src/6fe54fa5a3d9b5c930ee77e8ccd757b347c86ac1/apps/trim-adapters-illumina/?at=master
"chromCor.Rscript": "md5sum /usr/bin/chromCor.Rscript | awk '{printf \"unversioned %-8.8s\",$1}'", # emailed from Richard Sandstrom Will reside in our github
"bigWigToWig": "md5sum /usr/bin/bigWigToWig | awk '{printf \"unversioned %-8.8s\",$1}'",
"dnase_index_bwa.sh": "dnase_index_bwa.sh | grep usage | awk '{print $2}' | tr -d :",
"dnase_align_bwa_pe.sh": "dnase_align_bwa_pe.sh | grep usage | awk '{print $2}' | tr -d :",
"dnase_align_bwa_se.sh": "dnase_align_bwa_se.sh | grep usage | awk '{print $2}' | tr -d :",
"dnase_filter_pe.sh": "dnase_filter_pe.sh | grep usage | awk '{print $2}' | tr -d :",
"dnase_filter_se.sh": "dnase_filter_se.sh | grep usage | awk '{print $2}' | tr -d :",
"dnase_qc_bam.sh": "dnase_qc_bam.sh | grep usage | awk '{print $2}' | tr -d :",
"dnase_density.sh": "dnase_density.sh | grep usage | awk '{print $2}' | tr -d :",
"dnase_hotspot.sh": "dnase_hotspot.sh | grep usage | awk '{print $2}' | tr -d :",
"dnase_rep_corr.sh": "dnase_rep_corr.sh | grep usage | awk '{print $2}' | tr -d :",
"cutadapt": "cutadapt --version",
}
def parse_dxjson(dxjson):
'''Parses the dnanexus-executable.json file in the job directory to get applet name and version.'''
with open(dxjson) as data_file:
dxapp = json.load(data_file)
appver = "unknown"
applet = dxapp.get("name")
if "version" in dxapp:
appver = dxapp.get("version")
else:
title = dxapp.get("title")
last_word = title.split(' ')[-1]
if last_word.startswith('(virtual-') and last_word.endswith(')'):
appver = last_word[9:-1]
elif last_word.startswith('(v') and last_word.endswith(')'):
appver = last_word[2:-1]
return (applet, appver)
def main():
parser = argparse.ArgumentParser(description="Versions parser for a dx applet. " + \
"Prints version lines to stderr and json string to stdout. " + \
"MUST specify either --applet and --appver or --dxjson.")
parser.add_argument('-a', '--applet', required=False,
help="Applet to print versions for")
parser.add_argument('-av', '--appver', required=False,
help="Version of applet")
parser.add_argument('-j', '--dxjson', required=False,
help="Use dnanexus json file to discover 'applet' and 'appver'")
parser.add_argument('-k', '--key',
help='Prints just the value for this key.',
default=None,
required=False)
parser.add_argument('-q', '--quiet', action="store_true", required=False, default=False,
help="Don't print versions to stderr.")
parser.add_argument('-v', '--verbose', action="store_true", required=False, default=False,
help="Show the command-line that is used to get the version.")
args = parser.parse_args(sys.argv[1:])
if len(sys.argv) < 3:
parser.print_usage()
return
if (args.applet == None or args.appver == None) and args.dxjson == None:
parser.print_help()
return
applet = args.applet
appver = args.appver
if args.dxjson != None:
(applet, appver) = parse_dxjson(args.dxjson)
versions = {}
versions["DX applet"] = {applet: appver}
if not args.quiet:
sys.stderr.write("********\n")
sys.stderr.write("* Running " + applet + ": " + appver + "\n")
if applet in VIRTUAL_APPS:
tools = APP_TOOLS[VIRTUAL_APPS[applet]]
else:
tools = APP_TOOLS[applet]
for tool in tools:
cmd = ALL_TOOLS[tool]
if args.verbose:
sys.stderr.write("cmd> " + cmd + "\n")
err, ver = commands.getstatusoutput(cmd)
versions[tool] = ver
if not args.quiet:
sys.stderr.write("* " + tool + " version: " + ver + "\n")
if not args.quiet:
sys.stderr.write("********\n")
if args.key != None:
if args.key in versions:
print versions[args.key]
if not args.quiet:
sys.stderr.write(versions[args.key] + '\n')
elif args.key in versions["DX applet"]:
print versions["DX applet"][args.key]
if not args.quiet:
sys.stderr.write(versions["DX applet"][args.key] + '\n')
else:
print ''
if not args.quiet:
sys.stderr.write('(not found)\n')
else:
print json.dumps(versions)
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python2
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
import sys
import time
import random
import tempfile
import subprocess
import shutil
import argparse
# params overwrite priority:
# for default:
# default_params < {blackbox,whitebox}_default_params < args
# for simple:
# default_params < {blackbox,whitebox}_default_params <
# simple_default_params <
# {blackbox,whitebox}_simple_default_params < args
# for cf_consistency:
# default_params < {blackbox,whitebox}_default_params <
# cf_consistency_params < args
# for txn:
# default_params < {blackbox,whitebox}_default_params < txn_params < args
expected_values_file = tempfile.NamedTemporaryFile()
default_params = {
"acquire_snapshot_one_in": 10000,
"block_size": 16384,
"bloom_bits": lambda: random.choice([random.randint(0,19),
random.lognormvariate(2.3, 1.3)]),
"cache_index_and_filter_blocks": lambda: random.randint(0, 1),
"cache_size": 1048576,
"checkpoint_one_in": 1000000,
"compression_type": lambda: random.choice(
["none", "snappy", "zlib", "bzip2", "lz4", "lz4hc", "xpress", "zstd"]),
"bottommost_compression_type": lambda:
"disable" if random.randint(0, 1) == 0 else
random.choice(
["none", "snappy", "zlib", "bzip2", "lz4", "lz4hc", "xpress",
"zstd"]),
"checksum_type" : lambda: random.choice(["kCRC32c", "kxxHash", "kxxHash64"]),
"compression_max_dict_bytes": lambda: 16384 * random.randint(0, 1),
"compression_zstd_max_train_bytes": lambda: 65536 * random.randint(0, 1),
"clear_column_family_one_in": 0,
"compact_files_one_in": 1000000,
"compact_range_one_in": 1000000,
"delpercent": 4,
"delrangepercent": 1,
"destroy_db_initially": 0,
"enable_pipelined_write": lambda: random.randint(0, 1),
"expected_values_path": expected_values_file.name,
"flush_one_in": 1000000,
"get_live_files_one_in": 1000000,
# Note: the following two are intentionally disabled as the corresponding
# APIs are not guaranteed to succeed.
"get_sorted_wal_files_one_in": 0,
"get_current_wal_file_one_in": 0,
# Temporarily disable hash index
"index_type": lambda: random.choice([0,2]),
"max_background_compactions": 20,
"max_bytes_for_level_base": 10485760,
"max_key": 100000000,
"max_write_buffer_number": 3,
"mmap_read": lambda: random.randint(0, 1),
"nooverwritepercent": 1,
"open_files": lambda : random.choice([-1, 500000]),
"partition_filters": lambda: random.randint(0, 1),
"pause_background_one_in": 1000000,
"prefixpercent": 5,
"progress_reports": 0,
"readpercent": 45,
"recycle_log_file_num": lambda: random.randint(0, 1),
"reopen": 20,
"snapshot_hold_ops": 100000,
"long_running_snapshots": lambda: random.randint(0, 1),
"subcompactions": lambda: random.randint(1, 4),
"target_file_size_base": 2097152,
"target_file_size_multiplier": 2,
"use_direct_reads": lambda: random.randint(0, 1),
"use_direct_io_for_flush_and_compaction": lambda: random.randint(0, 1),
"use_full_merge_v1": lambda: random.randint(0, 1),
"use_merge": lambda: random.randint(0, 1),
"verify_checksum": 1,
"write_buffer_size": 4 * 1024 * 1024,
"writepercent": 35,
"format_version": lambda: random.choice([2, 3, 4, 5, 5]),
"index_block_restart_interval": lambda: random.choice(range(1, 16)),
"use_multiget" : lambda: random.randint(0, 1),
"periodic_compaction_seconds" :
lambda: random.choice([0, 0, 1, 2, 10, 100, 1000]),
"compaction_ttl" : lambda: random.choice([0, 0, 1, 2, 10, 100, 1000]),
# Test small max_manifest_file_size in a smaller chance, as most of the
# time we wnat manifest history to be preserved to help debug
"max_manifest_file_size" : lambda : random.choice(
[t * 16384 if t < 3 else 1024 * 1024 * 1024 for t in range(1, 30)]),
# Sync mode might make test runs slower so running it in a smaller chance
"sync" : lambda : random.choice(
[1 if t == 0 else 0 for t in range(0, 20)]),
# Disable compation_readahead_size because the test is not passing.
#"compaction_readahead_size" : lambda : random.choice(
# [0, 0, 1024 * 1024]),
"db_write_buffer_size" : lambda: random.choice(
[0, 0, 0, 1024 * 1024, 8 * 1024 * 1024, 128 * 1024 * 1024]),
"avoid_unnecessary_blocking_io" : random.randint(0, 1),
"write_dbid_to_manifest" : random.randint(0, 1),
"max_write_batch_group_size_bytes" : lambda: random.choice(
[16, 64, 1024 * 1024, 16 * 1024 * 1024]),
"level_compaction_dynamic_level_bytes" : True,
"verify_checksum_one_in": 1000000,
"verify_db_one_in": 100000,
"continuous_verification_interval" : 0,
"max_key_len": 3,
"key_len_percent_dist": "1,30,69"
}
_TEST_DIR_ENV_VAR = 'TEST_TMPDIR'
def get_dbname(test_name):
test_tmpdir = os.environ.get(_TEST_DIR_ENV_VAR)
if test_tmpdir is None or test_tmpdir == "":
dbname = tempfile.mkdtemp(prefix='rocksdb_crashtest_' + test_name)
else:
dbname = test_tmpdir + "/rocksdb_crashtest_" + test_name
shutil.rmtree(dbname, True)
os.mkdir(dbname)
return dbname
def is_direct_io_supported(dbname):
with tempfile.NamedTemporaryFile(dir=dbname) as f:
try:
os.open(f.name, os.O_DIRECT)
except:
return False
return True
blackbox_default_params = {
# total time for this script to test db_stress
"duration": 6000,
# time for one db_stress instance to run
"interval": 120,
# since we will be killing anyway, use large value for ops_per_thread
"ops_per_thread": 100000000,
"set_options_one_in": 10000,
"test_batches_snapshots": 1,
}
whitebox_default_params = {
"duration": 10000,
"log2_keys_per_lock": 10,
"ops_per_thread": 200000,
"random_kill_odd": 888887,
"test_batches_snapshots": lambda: random.randint(0, 1),
}
simple_default_params = {
"allow_concurrent_memtable_write": lambda: random.randint(0, 1),
"column_families": 1,
"max_background_compactions": 1,
"max_bytes_for_level_base": 67108864,
"memtablerep": "skip_list",
"prefixpercent": 0,
"readpercent": 50,
"prefix_size" : -1,
"target_file_size_base": 16777216,
"target_file_size_multiplier": 1,
"test_batches_snapshots": 0,
"write_buffer_size": 32 * 1024 * 1024,
"level_compaction_dynamic_level_bytes": False,
}
blackbox_simple_default_params = {
"open_files": -1,
"set_options_one_in": 0,
}
whitebox_simple_default_params = {}
cf_consistency_params = {
"disable_wal": lambda: random.randint(0, 1),
"reopen": 0,
"test_cf_consistency": 1,
# use small value for write_buffer_size so that RocksDB triggers flush
# more frequently
"write_buffer_size": 1024 * 1024,
"enable_pipelined_write": lambda: random.randint(0, 1),
}
txn_params = {
"use_txn" : 1,
# Avoid lambda to set it once for the entire test
"txn_write_policy": random.randint(0, 2),
"unordered_write": random.randint(0, 1),
"disable_wal": 0,
# OpenReadOnly after checkpoint is not currnetly compatible with WritePrepared txns
"checkpoint_one_in": 0,
# pipeline write is not currnetly compatible with WritePrepared txns
"enable_pipelined_write": 0,
}
def finalize_and_sanitize(src_params):
dest_params = dict([(k, v() if callable(v) else v)
for (k, v) in src_params.items()])
if dest_params.get("compression_type") != "zstd" or \
dest_params.get("compression_max_dict_bytes") == 0:
dest_params["compression_zstd_max_train_bytes"] = 0
if dest_params.get("allow_concurrent_memtable_write", 1) == 1:
dest_params["memtablerep"] = "skip_list"
if dest_params["mmap_read"] == 1 or not is_direct_io_supported(
dest_params["db"]):
dest_params["use_direct_io_for_flush_and_compaction"] = 0
dest_params["use_direct_reads"] = 0
# DeleteRange is not currnetly compatible with Txns
if dest_params.get("test_batches_snapshots") == 1 or \
dest_params.get("use_txn") == 1:
dest_params["delpercent"] += dest_params["delrangepercent"]
dest_params["delrangepercent"] = 0
# Only under WritePrepared txns, unordered_write would provide the same guarnatees as vanilla rocksdb
if dest_params.get("unordered_write", 0) == 1:
dest_params["txn_write_policy"] = 1
dest_params["allow_concurrent_memtable_write"] = 1
if dest_params.get("disable_wal", 0) == 1:
dest_params["atomic_flush"] = 1
dest_params["sync"] = 0
if dest_params.get("open_files", 1) != -1:
# Compaction TTL and periodic compactions are only compatible
# with open_files = -1
dest_params["compaction_ttl"] = 0
dest_params["periodic_compaction_seconds"] = 0
if dest_params.get("compaction_style", 0) == 2:
# Disable compaction TTL in FIFO compaction, because right
# now assertion failures are triggered.
dest_params["compaction_ttl"] = 0
dest_params["periodic_compaction_seconds"] = 0
if dest_params["partition_filters"] == 1:
if dest_params["index_type"] != 2:
dest_params["partition_filters"] = 0
else:
dest_params["use_block_based_filter"] = 0
if dest_params.get("atomic_flush", 0) == 1:
# disable pipelined write when atomic flush is used.
dest_params["enable_pipelined_write"] = 0
return dest_params
def gen_cmd_params(args):
params = {}
params.update(default_params)
if args.test_type == 'blackbox':
params.update(blackbox_default_params)
if args.test_type == 'whitebox':
params.update(whitebox_default_params)
if args.simple:
params.update(simple_default_params)
if args.test_type == 'blackbox':
params.update(blackbox_simple_default_params)
if args.test_type == 'whitebox':
params.update(whitebox_simple_default_params)
if args.cf_consistency:
params.update(cf_consistency_params)
if args.txn:
params.update(txn_params)
for k, v in vars(args).items():
if v is not None:
params[k] = v
return params
def gen_cmd(params, unknown_params):
finalzied_params = finalize_and_sanitize(params)
cmd = ['./db_stress'] + [
'--{0}={1}'.format(k, v)
for k, v in [(k, finalzied_params[k]) for k in sorted(finalzied_params)]
if k not in set(['test_type', 'simple', 'duration', 'interval',
'random_kill_odd', 'cf_consistency', 'txn'])
and v is not None] + unknown_params
return cmd
# This script runs and kills db_stress multiple times. It checks consistency
# in case of unsafe crashes in RocksDB.
def blackbox_crash_main(args, unknown_args):
cmd_params = gen_cmd_params(args)
dbname = get_dbname('blackbox')
exit_time = time.time() + cmd_params['duration']
print("Running blackbox-crash-test with \n"
+ "interval_between_crash=" + str(cmd_params['interval']) + "\n"
+ "total-duration=" + str(cmd_params['duration']) + "\n")
while time.time() < exit_time:
run_had_errors = False
killtime = time.time() + cmd_params['interval']
cmd = gen_cmd(dict(
cmd_params.items() +
{'db': dbname}.items()), unknown_args)
child = subprocess.Popen(cmd, stderr=subprocess.PIPE)
print("Running db_stress with pid=%d: %s\n\n"
% (child.pid, ' '.join(cmd)))
stop_early = False
while time.time() < killtime:
if child.poll() is not None:
print("WARNING: db_stress ended before kill: exitcode=%d\n"
% child.returncode)
stop_early = True
break
time.sleep(1)
if not stop_early:
if child.poll() is not None:
print("WARNING: db_stress ended before kill: exitcode=%d\n"
% child.returncode)
else:
child.kill()
print("KILLED %d\n" % child.pid)
time.sleep(1) # time to stabilize after a kill
while True:
line = child.stderr.readline().strip()
if line == '':
break
elif not line.startswith('WARNING'):
run_had_errors = True
print('stderr has error message:')
print('***' + line + '***')
if run_had_errors:
sys.exit(2)
time.sleep(1) # time to stabilize before the next run
# we need to clean up after ourselves -- only do this on test success
shutil.rmtree(dbname, True)
# This python script runs db_stress multiple times. Some runs with
# kill_random_test that causes rocksdb to crash at various points in code.
def whitebox_crash_main(args, unknown_args):
cmd_params = gen_cmd_params(args)
dbname = get_dbname('whitebox')
cur_time = time.time()
exit_time = cur_time + cmd_params['duration']
half_time = cur_time + cmd_params['duration'] / 2
print("Running whitebox-crash-test with \n"
+ "total-duration=" + str(cmd_params['duration']) + "\n")
total_check_mode = 4
check_mode = 0
kill_random_test = cmd_params['random_kill_odd']
kill_mode = 0
while time.time() < exit_time:
if check_mode == 0:
additional_opts = {
# use large ops per thread since we will kill it anyway
"ops_per_thread": 100 * cmd_params['ops_per_thread'],
}
# run with kill_random_test, with three modes.
# Mode 0 covers all kill points. Mode 1 covers less kill points but
# increases change of triggering them. Mode 2 covers even less
# frequent kill points and further increases triggering change.
if kill_mode == 0:
additional_opts.update({
"kill_random_test": kill_random_test,
})
elif kill_mode == 1:
if cmd_params.get('disable_wal', 0) == 1:
my_kill_odd = kill_random_test / 50 + 1
else:
my_kill_odd = kill_random_test / 10 + 1
additional_opts.update({
"kill_random_test": my_kill_odd,
"kill_prefix_blacklist": "WritableFileWriter::Append,"
+ "WritableFileWriter::WriteBuffered",
})
elif kill_mode == 2:
# TODO: May need to adjust random odds if kill_random_test
# is too small.
additional_opts.update({
"kill_random_test": (kill_random_test / 5000 + 1),
"kill_prefix_blacklist": "WritableFileWriter::Append,"
"WritableFileWriter::WriteBuffered,"
"PosixMmapFile::Allocate,WritableFileWriter::Flush",
})
# Run kill mode 0, 1 and 2 by turn.
kill_mode = (kill_mode + 1) % 3
elif check_mode == 1:
# normal run with universal compaction mode
additional_opts = {
"kill_random_test": None,
"ops_per_thread": cmd_params['ops_per_thread'],
"compaction_style": 1,
}
elif check_mode == 2:
# normal run with FIFO compaction mode
# ops_per_thread is divided by 5 because FIFO compaction
# style is quite a bit slower on reads with lot of files
additional_opts = {
"kill_random_test": None,
"ops_per_thread": cmd_params['ops_per_thread'] / 5,
"compaction_style": 2,
}
else:
# normal run
additional_opts = {
"kill_random_test": None,
"ops_per_thread": cmd_params['ops_per_thread'],
}
cmd = gen_cmd(dict(cmd_params.items() + additional_opts.items()
+ {'db': dbname}.items()), unknown_args)
print "Running:" + ' '.join(cmd) + "\n" # noqa: E999 T25377293 Grandfathered in
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdoutdata, stderrdata = popen.communicate()
retncode = popen.returncode
msg = ("check_mode={0}, kill option={1}, exitcode={2}\n".format(
check_mode, additional_opts['kill_random_test'], retncode))
print msg
print stdoutdata
expected = False
if additional_opts['kill_random_test'] is None and (retncode == 0):
# we expect zero retncode if no kill option
expected = True
elif additional_opts['kill_random_test'] is not None and retncode <= 0:
# When kill option is given, the test MIGHT kill itself.
# If it does, negative retncode is expected. Otherwise 0.
expected = True
if not expected:
print "TEST FAILED. See kill option and exit code above!!!\n"
sys.exit(1)
stdoutdata = stdoutdata.lower()
errorcount = (stdoutdata.count('error') -
stdoutdata.count('got errors 0 times'))
print "#times error occurred in output is " + str(errorcount) + "\n"
if (errorcount > 0):
print "TEST FAILED. Output has 'error'!!!\n"
sys.exit(2)
if (stdoutdata.find('fail') >= 0):
print "TEST FAILED. Output has 'fail'!!!\n"
sys.exit(2)
# First half of the duration, keep doing kill test. For the next half,
# try different modes.
if time.time() > half_time:
# we need to clean up after ourselves -- only do this on test
# success
shutil.rmtree(dbname, True)
os.mkdir(dbname)
cmd_params.pop('expected_values_path', None)
check_mode = (check_mode + 1) % total_check_mode
time.sleep(1) # time to stabilize after a kill
def main():
parser = argparse.ArgumentParser(description="This script runs and kills \
db_stress multiple times")
parser.add_argument("test_type", choices=["blackbox", "whitebox"])
parser.add_argument("--simple", action="store_true")
parser.add_argument("--cf_consistency", action='store_true')
parser.add_argument("--txn", action='store_true')
all_params = dict(default_params.items()
+ blackbox_default_params.items()
+ whitebox_default_params.items()
+ simple_default_params.items()
+ blackbox_simple_default_params.items()
+ whitebox_simple_default_params.items())
for k, v in all_params.items():
parser.add_argument("--" + k, type=type(v() if callable(v) else v))
# unknown_args are passed directly to db_stress
args, unknown_args = parser.parse_known_args()
test_tmpdir = os.environ.get(_TEST_DIR_ENV_VAR)
if test_tmpdir is not None and not os.path.isdir(test_tmpdir):
print('%s env var is set to a non-existent directory: %s' %
(_TEST_DIR_ENV_VAR, test_tmpdir))
sys.exit(1)
if args.test_type == 'blackbox':
blackbox_crash_main(args, unknown_args)
if args.test_type == 'whitebox':
whitebox_crash_main(args, unknown_args)
if __name__ == '__main__':
main()
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from jacket.compute.scheduler.filters import extra_specs_ops
from jacket.compute import test
class ExtraSpecsOpsTestCase(test.NoDBTestCase):
def _do_extra_specs_ops_test(self, value, req, matches):
assertion = self.assertTrue if matches else self.assertFalse
assertion(extra_specs_ops.match(value, req))
def test_extra_specs_matches_simple(self):
self._do_extra_specs_ops_test(
value='1',
req='1',
matches=True)
def test_extra_specs_fails_simple(self):
self._do_extra_specs_ops_test(
value='',
req='1',
matches=False)
def test_extra_specs_fails_simple2(self):
self._do_extra_specs_ops_test(
value='3',
req='1',
matches=False)
def test_extra_specs_fails_simple3(self):
self._do_extra_specs_ops_test(
value='222',
req='2',
matches=False)
def test_extra_specs_fails_with_bogus_ops(self):
self._do_extra_specs_ops_test(
value='4',
req='> 2',
matches=False)
def test_extra_specs_matches_with_op_eq(self):
self._do_extra_specs_ops_test(
value='123',
req='= 123',
matches=True)
def test_extra_specs_matches_with_op_eq2(self):
self._do_extra_specs_ops_test(
value='124',
req='= 123',
matches=True)
def test_extra_specs_fails_with_op_eq(self):
self._do_extra_specs_ops_test(
value='34',
req='= 234',
matches=False)
def test_extra_specs_fails_with_op_eq3(self):
self._do_extra_specs_ops_test(
value='34',
req='=',
matches=False)
def test_extra_specs_matches_with_op_seq(self):
self._do_extra_specs_ops_test(
value='123',
req='s== 123',
matches=True)
def test_extra_specs_fails_with_op_seq(self):
self._do_extra_specs_ops_test(
value='1234',
req='s== 123',
matches=False)
def test_extra_specs_matches_with_op_sneq(self):
self._do_extra_specs_ops_test(
value='1234',
req='s!= 123',
matches=True)
def test_extra_specs_fails_with_op_sneq(self):
self._do_extra_specs_ops_test(
value='123',
req='s!= 123',
matches=False)
def test_extra_specs_fails_with_op_sge(self):
self._do_extra_specs_ops_test(
value='1000',
req='s>= 234',
matches=False)
def test_extra_specs_fails_with_op_sle(self):
self._do_extra_specs_ops_test(
value='1234',
req='s<= 1000',
matches=False)
def test_extra_specs_fails_with_op_sl(self):
self._do_extra_specs_ops_test(
value='2',
req='s< 12',
matches=False)
def test_extra_specs_fails_with_op_sg(self):
self._do_extra_specs_ops_test(
value='12',
req='s> 2',
matches=False)
def test_extra_specs_matches_with_op_in(self):
self._do_extra_specs_ops_test(
value='12311321',
req='<in> 11',
matches=True)
def test_extra_specs_matches_with_op_in2(self):
self._do_extra_specs_ops_test(
value='12311321',
req='<in> 12311321',
matches=True)
def test_extra_specs_matches_with_op_in3(self):
self._do_extra_specs_ops_test(
value='12311321',
req='<in> 12311321 <in>',
matches=True)
def test_extra_specs_fails_with_op_in(self):
self._do_extra_specs_ops_test(
value='12310321',
req='<in> 11',
matches=False)
def test_extra_specs_fails_with_op_in2(self):
self._do_extra_specs_ops_test(
value='12310321',
req='<in> 11 <in>',
matches=False)
def test_extra_specs_matches_with_op_or(self):
self._do_extra_specs_ops_test(
value='12',
req='<or> 11 <or> 12',
matches=True)
def test_extra_specs_matches_with_op_or2(self):
self._do_extra_specs_ops_test(
value='12',
req='<or> 11 <or> 12 <or>',
matches=True)
def test_extra_specs_fails_with_op_or(self):
self._do_extra_specs_ops_test(
value='13',
req='<or> 11 <or> 12',
matches=False)
def test_extra_specs_fails_with_op_or2(self):
self._do_extra_specs_ops_test(
value='13',
req='<or> 11 <or> 12 <or>',
matches=False)
def test_extra_specs_matches_with_op_le(self):
self._do_extra_specs_ops_test(
value='2',
req='<= 10',
matches=True)
def test_extra_specs_fails_with_op_le(self):
self._do_extra_specs_ops_test(
value='3',
req='<= 2',
matches=False)
def test_extra_specs_matches_with_op_ge(self):
self._do_extra_specs_ops_test(
value='3',
req='>= 1',
matches=True)
def test_extra_specs_fails_with_op_ge(self):
self._do_extra_specs_ops_test(
value='2',
req='>= 3',
matches=False)
def test_extra_specs_matches_all_with_op_allin(self):
values = ['aes', 'mmx', 'aux']
self._do_extra_specs_ops_test(
value=str(values),
req='<all-in> aes mmx',
matches=True)
def test_extra_specs_matches_one_with_op_allin(self):
values = ['aes', 'mmx', 'aux']
self._do_extra_specs_ops_test(
value=str(values),
req='<all-in> mmx',
matches=True)
def test_extra_specs_fails_with_op_allin(self):
values = ['aes', 'mmx', 'aux']
self._do_extra_specs_ops_test(
value=str(values),
req='<all-in> txt',
matches=False)
def test_extra_specs_fails_all_with_op_allin(self):
values = ['aes', 'mmx', 'aux']
self._do_extra_specs_ops_test(
value=str(values),
req='<all-in> txt 3dnow',
matches=False)
def test_extra_specs_fails_match_one_with_op_allin(self):
values = ['aes', 'mmx', 'aux']
self._do_extra_specs_ops_test(
value=str(values),
req='<all-in> txt aes',
matches=False)
| |
from importlib import machinery
import importlib
import importlib.abc
from .. import abc
from .. import util
from . import util as source_util
import errno
import imp
import marshal
import os
import py_compile
import shutil
import stat
import sys
import unittest
from test.support import make_legacy_pyc
class SimpleTest(unittest.TestCase):
"""Should have no issue importing a source module [basic]. And if there is
a syntax error, it should raise a SyntaxError [syntax error].
"""
def test_load_module_API(self):
# If fullname is not specified that assume self.name is desired.
class TesterMixin(importlib.abc.Loader):
def load_module(self, fullname): return fullname
def module_repr(self, module): return '<module>'
class Tester(importlib.abc.FileLoader, TesterMixin):
def get_code(self, _): pass
def get_source(self, _): pass
def is_package(self, _): pass
name = 'mod_name'
loader = Tester(name, 'some_path')
self.assertEqual(name, loader.load_module())
self.assertEqual(name, loader.load_module(None))
self.assertEqual(name, loader.load_module(name))
with self.assertRaises(ImportError):
loader.load_module(loader.name + 'XXX')
def test_get_filename_API(self):
# If fullname is not set then assume self.path is desired.
class Tester(importlib.abc.FileLoader):
def get_code(self, _): pass
def get_source(self, _): pass
def is_package(self, _): pass
def module_repr(self, _): pass
path = 'some_path'
name = 'some_name'
loader = Tester(name, path)
self.assertEqual(path, loader.get_filename(name))
self.assertEqual(path, loader.get_filename())
self.assertEqual(path, loader.get_filename(None))
with self.assertRaises(ImportError):
loader.get_filename(name + 'XXX')
# [basic]
def test_module(self):
with source_util.create_modules('_temp') as mapping:
loader = machinery.SourceFileLoader('_temp', mapping['_temp'])
module = loader.load_module('_temp')
self.assertIn('_temp', sys.modules)
check = {'__name__': '_temp', '__file__': mapping['_temp'],
'__package__': ''}
for attr, value in check.items():
self.assertEqual(getattr(module, attr), value)
def test_package(self):
with source_util.create_modules('_pkg.__init__') as mapping:
loader = machinery.SourceFileLoader('_pkg',
mapping['_pkg.__init__'])
module = loader.load_module('_pkg')
self.assertIn('_pkg', sys.modules)
check = {'__name__': '_pkg', '__file__': mapping['_pkg.__init__'],
'__path__': [os.path.dirname(mapping['_pkg.__init__'])],
'__package__': '_pkg'}
for attr, value in check.items():
self.assertEqual(getattr(module, attr), value)
def test_lacking_parent(self):
with source_util.create_modules('_pkg.__init__', '_pkg.mod')as mapping:
loader = machinery.SourceFileLoader('_pkg.mod',
mapping['_pkg.mod'])
module = loader.load_module('_pkg.mod')
self.assertIn('_pkg.mod', sys.modules)
check = {'__name__': '_pkg.mod', '__file__': mapping['_pkg.mod'],
'__package__': '_pkg'}
for attr, value in check.items():
self.assertEqual(getattr(module, attr), value)
def fake_mtime(self, fxn):
"""Fake mtime to always be higher than expected."""
return lambda name: fxn(name) + 1
def test_module_reuse(self):
with source_util.create_modules('_temp') as mapping:
loader = machinery.SourceFileLoader('_temp', mapping['_temp'])
module = loader.load_module('_temp')
module_id = id(module)
module_dict_id = id(module.__dict__)
with open(mapping['_temp'], 'w') as file:
file.write("testing_var = 42\n")
module = loader.load_module('_temp')
self.assertIn('testing_var', module.__dict__,
"'testing_var' not in "
"{0}".format(list(module.__dict__.keys())))
self.assertEqual(module, sys.modules['_temp'])
self.assertEqual(id(module), module_id)
self.assertEqual(id(module.__dict__), module_dict_id)
def test_state_after_failure(self):
# A failed reload should leave the original module intact.
attributes = ('__file__', '__path__', '__package__')
value = '<test>'
name = '_temp'
with source_util.create_modules(name) as mapping:
orig_module = imp.new_module(name)
for attr in attributes:
setattr(orig_module, attr, value)
with open(mapping[name], 'w') as file:
file.write('+++ bad syntax +++')
loader = machinery.SourceFileLoader('_temp', mapping['_temp'])
with self.assertRaises(SyntaxError):
loader.load_module(name)
for attr in attributes:
self.assertEqual(getattr(orig_module, attr), value)
# [syntax error]
def test_bad_syntax(self):
with source_util.create_modules('_temp') as mapping:
with open(mapping['_temp'], 'w') as file:
file.write('=')
loader = machinery.SourceFileLoader('_temp', mapping['_temp'])
with self.assertRaises(SyntaxError):
loader.load_module('_temp')
self.assertNotIn('_temp', sys.modules)
def test_file_from_empty_string_dir(self):
# Loading a module found from an empty string entry on sys.path should
# not only work, but keep all attributes relative.
file_path = '_temp.py'
with open(file_path, 'w') as file:
file.write("# test file for importlib")
try:
with util.uncache('_temp'):
loader = machinery.SourceFileLoader('_temp', file_path)
mod = loader.load_module('_temp')
self.assertEqual(file_path, mod.__file__)
self.assertEqual(imp.cache_from_source(file_path),
mod.__cached__)
finally:
os.unlink(file_path)
pycache = os.path.dirname(imp.cache_from_source(file_path))
if os.path.exists(pycache):
shutil.rmtree(pycache)
def test_timestamp_overflow(self):
# When a modification timestamp is larger than 2**32, it should be
# truncated rather than raise an OverflowError.
with source_util.create_modules('_temp') as mapping:
source = mapping['_temp']
compiled = imp.cache_from_source(source)
with open(source, 'w') as f:
f.write("x = 5")
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno != getattr(errno, 'EOVERFLOW', None):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
loader = machinery.SourceFileLoader('_temp', mapping['_temp'])
mod = loader.load_module('_temp')
# Sanity checks.
self.assertEqual(mod.__cached__, compiled)
self.assertEqual(mod.x, 5)
# The pyc file was created.
os.stat(compiled)
class BadBytecodeTest(unittest.TestCase):
def import_(self, file, module_name):
loader = self.loader(module_name, file)
module = loader.load_module(module_name)
self.assertIn(module_name, sys.modules)
def manipulate_bytecode(self, name, mapping, manipulator, *,
del_source=False):
"""Manipulate the bytecode of a module by passing it into a callable
that returns what to use as the new bytecode."""
try:
del sys.modules['_temp']
except KeyError:
pass
py_compile.compile(mapping[name])
if not del_source:
bytecode_path = imp.cache_from_source(mapping[name])
else:
os.unlink(mapping[name])
bytecode_path = make_legacy_pyc(mapping[name])
if manipulator:
with open(bytecode_path, 'rb') as file:
bc = file.read()
new_bc = manipulator(bc)
with open(bytecode_path, 'wb') as file:
if new_bc is not None:
file.write(new_bc)
return bytecode_path
def _test_empty_file(self, test, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: b'',
del_source=del_source)
test('_temp', mapping, bc_path)
@source_util.writes_bytecode_files
def _test_partial_magic(self, test, *, del_source=False):
# When their are less than 4 bytes to a .pyc, regenerate it if
# possible, else raise ImportError.
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:3],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_magic_only(self, test, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:4],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_partial_timestamp(self, test, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:7],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_partial_size(self, test, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:11],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_no_marshal(self, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:12],
del_source=del_source)
file_path = mapping['_temp'] if not del_source else bc_path
with self.assertRaises(EOFError):
self.import_(file_path, '_temp')
def _test_non_code_marshal(self, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bytecode_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:12] + marshal.dumps(b'abcd'),
del_source=del_source)
file_path = mapping['_temp'] if not del_source else bytecode_path
with self.assertRaises(ImportError) as cm:
self.import_(file_path, '_temp')
self.assertEqual(cm.exception.name, '_temp')
self.assertEqual(cm.exception.path, bytecode_path)
def _test_bad_marshal(self, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bytecode_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:12] + b'<test>',
del_source=del_source)
file_path = mapping['_temp'] if not del_source else bytecode_path
with self.assertRaises(EOFError):
self.import_(file_path, '_temp')
def _test_bad_magic(self, test, *, del_source=False):
with source_util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: b'\x00\x00\x00\x00' + bc[4:])
test('_temp', mapping, bc_path)
class SourceLoaderBadBytecodeTest(BadBytecodeTest):
loader = machinery.SourceFileLoader
@source_util.writes_bytecode_files
def test_empty_file(self):
# When a .pyc is empty, regenerate it if possible, else raise
# ImportError.
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as file:
self.assertGreater(len(file.read()), 12)
self._test_empty_file(test)
def test_partial_magic(self):
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as file:
self.assertGreater(len(file.read()), 12)
self._test_partial_magic(test)
@source_util.writes_bytecode_files
def test_magic_only(self):
# When there is only the magic number, regenerate the .pyc if possible,
# else raise EOFError.
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as file:
self.assertGreater(len(file.read()), 12)
self._test_magic_only(test)
@source_util.writes_bytecode_files
def test_bad_magic(self):
# When the magic number is different, the bytecode should be
# regenerated.
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as bytecode_file:
self.assertEqual(bytecode_file.read(4), imp.get_magic())
self._test_bad_magic(test)
@source_util.writes_bytecode_files
def test_partial_timestamp(self):
# When the timestamp is partial, regenerate the .pyc, else
# raise EOFError.
def test(name, mapping, bc_path):
self.import_(mapping[name], name)
with open(bc_path, 'rb') as file:
self.assertGreater(len(file.read()), 12)
self._test_partial_timestamp(test)
@source_util.writes_bytecode_files
def test_partial_size(self):
# When the size is partial, regenerate the .pyc, else
# raise EOFError.
def test(name, mapping, bc_path):
self.import_(mapping[name], name)
with open(bc_path, 'rb') as file:
self.assertGreater(len(file.read()), 12)
self._test_partial_size(test)
@source_util.writes_bytecode_files
def test_no_marshal(self):
# When there is only the magic number and timestamp, raise EOFError.
self._test_no_marshal()
@source_util.writes_bytecode_files
def test_non_code_marshal(self):
self._test_non_code_marshal()
# XXX ImportError when sourceless
# [bad marshal]
@source_util.writes_bytecode_files
def test_bad_marshal(self):
# Bad marshal data should raise a ValueError.
self._test_bad_marshal()
# [bad timestamp]
@source_util.writes_bytecode_files
def test_old_timestamp(self):
# When the timestamp is older than the source, bytecode should be
# regenerated.
zeros = b'\x00\x00\x00\x00'
with source_util.create_modules('_temp') as mapping:
py_compile.compile(mapping['_temp'])
bytecode_path = imp.cache_from_source(mapping['_temp'])
with open(bytecode_path, 'r+b') as bytecode_file:
bytecode_file.seek(4)
bytecode_file.write(zeros)
self.import_(mapping['_temp'], '_temp')
source_mtime = os.path.getmtime(mapping['_temp'])
source_timestamp = importlib._w_long(source_mtime)
with open(bytecode_path, 'rb') as bytecode_file:
bytecode_file.seek(4)
self.assertEqual(bytecode_file.read(4), source_timestamp)
# [bytecode read-only]
@source_util.writes_bytecode_files
def test_read_only_bytecode(self):
# When bytecode is read-only but should be rewritten, fail silently.
with source_util.create_modules('_temp') as mapping:
# Create bytecode that will need to be re-created.
py_compile.compile(mapping['_temp'])
bytecode_path = imp.cache_from_source(mapping['_temp'])
with open(bytecode_path, 'r+b') as bytecode_file:
bytecode_file.seek(0)
bytecode_file.write(b'\x00\x00\x00\x00')
# Make the bytecode read-only.
os.chmod(bytecode_path,
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
try:
# Should not raise IOError!
self.import_(mapping['_temp'], '_temp')
finally:
# Make writable for eventual clean-up.
os.chmod(bytecode_path, stat.S_IWUSR)
class SourcelessLoaderBadBytecodeTest(BadBytecodeTest):
loader = machinery.SourcelessFileLoader
def test_empty_file(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(ImportError) as cm:
self.import_(bytecode_path, name)
self.assertEqual(cm.exception.name, name)
self.assertEqual(cm.exception.path, bytecode_path)
self._test_empty_file(test, del_source=True)
def test_partial_magic(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(ImportError) as cm:
self.import_(bytecode_path, name)
self.assertEqual(cm.exception.name, name)
self.assertEqual(cm.exception.path, bytecode_path)
self._test_partial_magic(test, del_source=True)
def test_magic_only(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(EOFError):
self.import_(bytecode_path, name)
self._test_magic_only(test, del_source=True)
def test_bad_magic(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(ImportError) as cm:
self.import_(bytecode_path, name)
self.assertEqual(cm.exception.name, name)
self.assertEqual(cm.exception.path, bytecode_path)
self._test_bad_magic(test, del_source=True)
def test_partial_timestamp(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(EOFError):
self.import_(bytecode_path, name)
self._test_partial_timestamp(test, del_source=True)
def test_partial_size(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(EOFError):
self.import_(bytecode_path, name)
self._test_partial_size(test, del_source=True)
def test_no_marshal(self):
self._test_no_marshal(del_source=True)
def test_non_code_marshal(self):
self._test_non_code_marshal(del_source=True)
def test_main():
from test.support import run_unittest
run_unittest(SimpleTest,
SourceLoaderBadBytecodeTest,
SourcelessLoaderBadBytecodeTest
)
if __name__ == '__main__':
test_main()
| |
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
@script : test_model.py
@created : 2012-11-04 02:28:46.742
@changed : 2012-11-08 10:29:40.844
@creator : mkpy.py --version 0.0.27
@author : Igor A.Vetrov <qprostu@gmail.com>
@about : testing application model classes
"""
import os, sys
sys.path.insert( 0, os.path.normpath( os.path.join( os.getcwd(), '..' ) ) )
from db.sqlite import SQLite
from db.model import Priority, Task
import threading
import unittest
from datetime import datetime, date, timedelta
from sqlite3 import IntegrityError
__revision__ = 13
class PriorityTable(unittest.TestCase):
def setUp(self):
with threading.Lock():
self.dbName = "test.sqlite3"
self.db = SQLite(self.dbName)
self.table = Priority(self.db)
def tearDown(self):
with threading.Lock():
if os.path.exists(self.dbName):
self.db.__del__()
os.unlink(self.dbName)
def test_table_exists(self):
self.assertTrue( self.db.tableExists(self.table._tableName) )
def test_create_sql(self):
sql = "create table TodoPriority(\n" \
"\tcode integer primary key not null,\n" \
"\tname text not null,\n" \
"\tcreated timestamp default (datetime('now', 'localtime'))\n" \
");"
self.assertEqual( sql, self.table.createSql() )
def test_count(self):
cnt = self.table.count()
self.assertEqual( cnt, 3 )
def test_id_name(self):
self.assertEqual( self.table._idName, "code" )
def test_low(self):
row = self.table.select( "select name from {} where code=?;".format(self.table._tableName), (1,) )[0]
self.assertEqual( row["name"], "Low" )
def test_medium(self):
row = self.table.select( "select name from {} where code=?;".format(self.table._tableName), (2,) )[0]
self.assertEqual( row["name"], "Medium" )
def test_high(self):
row = self.table.select( "select name from {} where code=?;".format(self.table._tableName), (3,) )[0]
self.assertEqual( row["name"], "High" )
def test_openId(self):
self.table.openId(1)
self.assertEqual( self.table.name, "Low" )
def test_read(self):
args = self.table.read(2)
self.assertEqual( args["code"], 2 )
self.assertEqual( args["name"], "Medium" )
self.assertEqual( args["id"], 2 )
def test_save(self):
args = dict(code=9, name="Unused")
args = self.table.save("", **args)
self.assertEqual( self.table.count(), 4 )
self.assertEqual( args["id"], 9 )
del args
args = self.table.read(9)
self.assertEqual( args["id"], 9 )
self.assertEqual( args["code"], 9 )
self.assertEqual( args["name"], "Unused" )
def test_update(self):
args = dict(code=9, name="Unused")
args = self.table.save("", **args)
del args
args = self.table.read(9)
args["name"] = "Used"
args = self.table.save(9, **args)
del args
args = self.table.read(9)
self.assertEqual( args["id"], 9 )
self.assertEqual( args["code"], 9 )
self.assertEqual( args["name"], "Used" )
def test_delete(self):
args = self.table.deleteId(2)
self.assertEqual( self.table.count(), 2 )
self.assertFalse( self.table.existsId(2) )
def test_getValue(self):
value = self.table.getValue(2, "code", "name")
self.assertEqual( value, (2, "Medium") )
def test_setValue(self):
self.table.setValue(2, name="Changing Medium")
value = self.table.getValue(2, "name")
self.assertEqual( value, ("Changing Medium",) )
def test_getCode(self):
code = self.table.getCode("High")
self.assertEqual( code, 3 )
def test_getName(self):
name = self.table.getName(3)
self.assertEqual( name, "High" )
def test_listNames(self):
values = self.table.listNames()
self.assertEqual( values, ["Low", "Medium", "High"] )
def test_repr(self):
self.table.open(2)
dt = self.table['created']
value = str(self.table)
self.assertEqual( value, "Priority([('created', {}, ('id', 2), ('code', 2), ('name', 'Medium')])".format(dt) )
def test_keys(self):
self.table.open(2)
self.assertEqual( set(list(self.table.keys())), set(["id", "code", "name", "created"]) )
def test_open(self):
obj = self.table
obj.open(3)
self.assertEqual( obj["name"], "High" )
class TaskTable(unittest.TestCase):
def setUp(self):
with threading.Lock():
self.dbName = "test.sqlite3"
self.db = SQLite(self.dbName)
self.priority = Priority(self.db)
self.task = Task(self.db)
self.task.exec( "insert into {} (name, priority, deadline) values(?, ?, ?)".format(self.task._tableName),
("Low Test", 1, date.today() + timedelta(2)) )
self.task.exec( "insert into {} (name, priority, deadline) values(?, ?, ?)".format(self.task._tableName),
("Medium Test", 2, date.today() + timedelta(3)) )
self.task.exec( "insert into {} (name, priority, deadline) values(?, ?, ?)".format(self.task._tableName),
("High Test", 3, date.today() + timedelta(4)) )
def tearDown(self):
with threading.Lock():
if os.path.exists(self.dbName):
self.db.__del__()
os.unlink(self.dbName)
def test_table_exists(self):
self.assertTrue( self.db.tableExists(self.priority._tableName) )
self.assertTrue( self.db.tableExists(self.task._tableName) )
def test_create_sql(self):
sql = "create table TodoTask(\n" \
"\tid integer primary key autoincrement not null,\n" \
"\tname text not null,\n" \
"\tpriority integer references TodoPriority(code) default 2,\n" \
"\tdeadline date not null default (date('now', 'localtime')),\n" \
"\tstatus integer default 0,\n" \
"\tcompleted timestamp,\n" \
"\tcreated timestamp default (datetime('now', 'localtime'))\n" \
");"
self.assertEqual( sql, self.task.createSql() )
def test_index_exists(self):
self.assertIn( "status", self.task._indices )
def test_id_name(self):
self.assertEqual( self.task._idName, "id" )
def test_count(self):
cnt = self.task.count()
self.assertEqual( cnt, 3 )
def test_low(self):
row = self.task.select( "select * from {} where id=?;".format(self.task._tableName), (1,) )[0]
self.assertEqual( row["name"], "Low Test" )
self.assertEqual( row["priority"], 1 )
self.assertEqual( row["deadline"], date.today() + timedelta(2) )
def test_medium(self):
row = self.task.select( "select * from {} where id=?;".format(self.task._tableName), (2,) )[0]
self.assertEqual( row["name"], "Medium Test" )
self.assertEqual( row["priority"], 2 )
self.assertEqual( row["deadline"], date.today() + timedelta(3) )
def test_high(self):
row = self.task.select( "select * from {} where id=?;".format(self.task._tableName), (3,) )[0]
self.assertEqual( row["name"], "High Test" )
self.assertEqual( row["priority"], 3 )
self.assertEqual( row["deadline"], date.today() + timedelta(4) )
def test_integrity(self):
self.assertRaises( IntegrityError, self.db.execSql,
"insert into {} (name, priority, deadline) values(?, ?, ?)".format(self.task._tableName),
("Highest Test", 4, date.today() + timedelta(4)) )
def test_openId(self):
self.task.openId(2)
self.assertEqual( self.task.name, "Medium Test" )
def test_read(self):
args = self.task.read(2)
self.assertEqual( args["id"], 2 )
self.assertEqual( args["name"], "Medium Test" )
self.assertEqual( args["priority"], 2 )
self.assertEqual( args["deadline"], date.today() + timedelta(3) )
def test_save(self):
args = dict(name="Highest Test", priority=3, deadline=date.today()+timedelta(5))
args = self.task.save("", **args)
self.assertEqual( self.task.count(), 4 )
self.assertEqual( args["id"], 4 )
del args
args = self.task.read(4)
self.assertEqual( args["id"], 4 )
self.assertEqual( args["status"], 0 )
self.assertEqual( args["name"], "Highest Test" )
self.assertEqual( args["priority"], 3 )
self.assertEqual( args["deadline"], date.today() + timedelta(5) )
def test_delete(self):
args = self.task.deleteId(3)
self.assertEqual( self.task.count(), 2 )
self.assertFalse( self.task.existsId(3) )
def test_getValue(self):
value = self.task.getValue(3, "name")
self.assertEqual( value, ("High Test",) )
def test_setValue(self):
self.task.setValue(3, name="Changing High Test", deadline=date.today() + timedelta(8))
value = self.task.getValue(3, "name", "deadline")
self.assertEqual( value, ("Changing High Test", date.today() + timedelta(8)) )
if __name__ == '__main__':
unittest.main(verbosity=2)
# end of test_model.py
| |
"""Event dispatcher sends events."""
from __future__ import absolute_import, unicode_literals
import os
import threading
import time
from collections import defaultdict, deque
from kombu import Producer
from celery.app import app_or_default
from celery.five import items
from celery.utils.nodenames import anon_nodename
from celery.utils.time import utcoffset
from .event import Event, get_exchange, group_from
__all__ = ('EventDispatcher',)
class EventDispatcher(object):
"""Dispatches event messages.
Arguments:
connection (kombu.Connection): Connection to the broker.
hostname (str): Hostname to identify ourselves as,
by default uses the hostname returned by
:func:`~celery.utils.anon_nodename`.
groups (Sequence[str]): List of groups to send events for.
:meth:`send` will ignore send requests to groups not in this list.
If this is :const:`None`, all events will be sent.
Example groups include ``"task"`` and ``"worker"``.
enabled (bool): Set to :const:`False` to not actually publish any
events, making :meth:`send` a no-op.
channel (kombu.Channel): Can be used instead of `connection` to specify
an exact channel to use when sending events.
buffer_while_offline (bool): If enabled events will be buffered
while the connection is down. :meth:`flush` must be called
as soon as the connection is re-established.
Note:
You need to :meth:`close` this after use.
"""
DISABLED_TRANSPORTS = {'sql'}
app = None
# set of callbacks to be called when :meth:`enabled`.
on_enabled = None
# set of callbacks to be called when :meth:`disabled`.
on_disabled = None
def __init__(self, connection=None, hostname=None, enabled=True,
channel=None, buffer_while_offline=True, app=None,
serializer=None, groups=None, delivery_mode=1,
buffer_group=None, buffer_limit=24, on_send_buffered=None):
self.app = app_or_default(app or self.app)
self.connection = connection
self.channel = channel
self.hostname = hostname or anon_nodename()
self.buffer_while_offline = buffer_while_offline
self.buffer_group = buffer_group or frozenset()
self.buffer_limit = buffer_limit
self.on_send_buffered = on_send_buffered
self._group_buffer = defaultdict(list)
self.mutex = threading.Lock()
self.producer = None
self._outbound_buffer = deque()
self.serializer = serializer or self.app.conf.event_serializer
self.on_enabled = set()
self.on_disabled = set()
self.groups = set(groups or [])
self.tzoffset = [-time.timezone, -time.altzone]
self.clock = self.app.clock
self.delivery_mode = delivery_mode
if not connection and channel:
self.connection = channel.connection.client
self.enabled = enabled
conninfo = self.connection or self.app.connection_for_write()
self.exchange = get_exchange(conninfo)
if conninfo.transport.driver_type in self.DISABLED_TRANSPORTS:
self.enabled = False
if self.enabled:
self.enable()
self.headers = {'hostname': self.hostname}
self.pid = os.getpid()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def enable(self):
self.producer = Producer(self.channel or self.connection,
exchange=self.exchange,
serializer=self.serializer,
auto_declare=False)
self.enabled = True
for callback in self.on_enabled:
callback()
def disable(self):
if self.enabled:
self.enabled = False
self.close()
for callback in self.on_disabled:
callback()
def publish(self, type, fields, producer,
blind=False, Event=Event, **kwargs):
"""Publish event using custom :class:`~kombu.Producer`.
Arguments:
type (str): Event type name, with group separated by dash (`-`).
fields: Dictionary of event fields, must be json serializable.
producer (kombu.Producer): Producer instance to use:
only the ``publish`` method will be called.
retry (bool): Retry in the event of connection failure.
retry_policy (Mapping): Map of custom retry policy options.
See :meth:`~kombu.Connection.ensure`.
blind (bool): Don't set logical clock value (also don't forward
the internal logical clock).
Event (Callable): Event type used to create event.
Defaults to :func:`Event`.
utcoffset (Callable): Function returning the current
utc offset in hours.
"""
clock = None if blind else self.clock.forward()
event = Event(type, hostname=self.hostname, utcoffset=utcoffset(),
pid=self.pid, clock=clock, **fields)
with self.mutex:
return self._publish(event, producer,
routing_key=type.replace('-', '.'), **kwargs)
def _publish(self, event, producer, routing_key, retry=False,
retry_policy=None, utcoffset=utcoffset):
exchange = self.exchange
try:
producer.publish(
event,
routing_key=routing_key,
exchange=exchange.name,
retry=retry,
retry_policy=retry_policy,
declare=[exchange],
serializer=self.serializer,
headers=self.headers,
delivery_mode=self.delivery_mode,
)
except Exception as exc: # pylint: disable=broad-except
if not self.buffer_while_offline:
raise
self._outbound_buffer.append((event, routing_key, exc))
def send(self, type, blind=False, utcoffset=utcoffset, retry=False,
retry_policy=None, Event=Event, **fields):
"""Send event.
Arguments:
type (str): Event type name, with group separated by dash (`-`).
retry (bool): Retry in the event of connection failure.
retry_policy (Mapping): Map of custom retry policy options.
See :meth:`~kombu.Connection.ensure`.
blind (bool): Don't set logical clock value (also don't forward
the internal logical clock).
Event (Callable): Event type used to create event,
defaults to :func:`Event`.
utcoffset (Callable): unction returning the current utc offset
in hours.
**fields (Any): Event fields -- must be json serializable.
"""
if self.enabled:
groups, group = self.groups, group_from(type)
if groups and group not in groups:
return
if group in self.buffer_group:
clock = self.clock.forward()
event = Event(type, hostname=self.hostname,
utcoffset=utcoffset(),
pid=self.pid, clock=clock, **fields)
buf = self._group_buffer[group]
buf.append(event)
if len(buf) >= self.buffer_limit:
self.flush()
elif self.on_send_buffered:
self.on_send_buffered()
else:
return self.publish(type, fields, self.producer, blind=blind,
Event=Event, retry=retry,
retry_policy=retry_policy)
def flush(self, errors=True, groups=True):
"""Flush the outbound buffer."""
if errors:
buf = list(self._outbound_buffer)
try:
with self.mutex:
for event, routing_key, _ in buf:
self._publish(event, self.producer, routing_key)
finally:
self._outbound_buffer.clear()
if groups:
with self.mutex:
for group, events in items(self._group_buffer):
self._publish(events, self.producer, '%s.multi' % group)
events[:] = [] # list.clear
def extend_buffer(self, other):
"""Copy the outbound buffer of another instance."""
self._outbound_buffer.extend(other._outbound_buffer)
def close(self):
"""Close the event dispatcher."""
self.mutex.locked() and self.mutex.release()
self.producer = None
def _get_publisher(self):
return self.producer
def _set_publisher(self, producer):
self.producer = producer
publisher = property(_get_publisher, _set_publisher) # XXX compat
| |
# Added Fortran compiler support to config. Currently useful only for
# try_compile call. try_run works but is untested for most of Fortran
# compilers (they must define linker_exe first).
# Pearu Peterson
from __future__ import division, absolute_import, print_function
import os, signal
import warnings
import sys
from distutils.command.config import config as old_config
from distutils.command.config import LANG_EXT
from distutils import log
from distutils.file_util import copy_file
from distutils.ccompiler import CompileError, LinkError
import distutils
from numpy.distutils.exec_command import exec_command
from numpy.distutils.mingw32ccompiler import generate_manifest
from numpy.distutils.command.autodist import (check_gcc_function_attribute,
check_gcc_variable_attribute,
check_inline,
check_restrict,
check_compiler_gcc4)
from numpy.distutils.compat import get_exception
LANG_EXT['f77'] = '.f'
LANG_EXT['f90'] = '.f90'
class config(old_config):
old_config.user_options += [
('fcompiler=', None, "specify the Fortran compiler type"),
]
def initialize_options(self):
self.fcompiler = None
old_config.initialize_options(self)
def _check_compiler (self):
old_config._check_compiler(self)
from numpy.distutils.fcompiler import FCompiler, new_fcompiler
if sys.platform == 'win32' and (self.compiler.compiler_type in
('msvc', 'intelw', 'intelemw')):
# XXX: hack to circumvent a python 2.6 bug with msvc9compiler:
# initialize call query_vcvarsall, which throws an IOError, and
# causes an error along the way without much information. We try to
# catch it here, hoping it is early enough, and print an helpful
# message instead of Error: None.
if not self.compiler.initialized:
try:
self.compiler.initialize()
except IOError:
e = get_exception()
msg = """\
Could not initialize compiler instance: do you have Visual Studio
installed? If you are trying to build with MinGW, please use "python setup.py
build -c mingw32" instead. If you have Visual Studio installed, check it is
correctly installed, and the right version (VS 2008 for python 2.6, 2.7 and 3.2,
VS 2010 for >= 3.3).
Original exception was: %s, and the Compiler class was %s
============================================================================""" \
% (e, self.compiler.__class__.__name__)
print ("""\
============================================================================""")
raise distutils.errors.DistutilsPlatformError(msg)
# After MSVC is initialized, add an explicit /MANIFEST to linker
# flags. See issues gh-4245 and gh-4101 for details. Also
# relevant are issues 4431 and 16296 on the Python bug tracker.
from distutils import msvc9compiler
if msvc9compiler.get_build_version() >= 10:
for ldflags in [self.compiler.ldflags_shared,
self.compiler.ldflags_shared_debug]:
if '/MANIFEST' not in ldflags:
ldflags.append('/MANIFEST')
if not isinstance(self.fcompiler, FCompiler):
self.fcompiler = new_fcompiler(compiler=self.fcompiler,
dry_run=self.dry_run, force=1,
c_compiler=self.compiler)
if self.fcompiler is not None:
self.fcompiler.customize(self.distribution)
if self.fcompiler.get_version():
self.fcompiler.customize_cmd(self)
self.fcompiler.show_customization()
def _wrap_method(self, mth, lang, args):
from distutils.ccompiler import CompileError
from distutils.errors import DistutilsExecError
save_compiler = self.compiler
if lang in ['f77', 'f90']:
self.compiler = self.fcompiler
try:
ret = mth(*((self,)+args))
except (DistutilsExecError, CompileError):
msg = str(get_exception())
self.compiler = save_compiler
raise CompileError
self.compiler = save_compiler
return ret
def _compile (self, body, headers, include_dirs, lang):
return self._wrap_method(old_config._compile, lang,
(body, headers, include_dirs, lang))
def _link (self, body,
headers, include_dirs,
libraries, library_dirs, lang):
if self.compiler.compiler_type=='msvc':
libraries = (libraries or [])[:]
library_dirs = (library_dirs or [])[:]
if lang in ['f77', 'f90']:
lang = 'c' # always use system linker when using MSVC compiler
if self.fcompiler:
for d in self.fcompiler.library_dirs or []:
# correct path when compiling in Cygwin but with
# normal Win Python
if d.startswith('/usr/lib'):
s, o = exec_command(['cygpath', '-w', d],
use_tee=False)
if not s: d = o
library_dirs.append(d)
for libname in self.fcompiler.libraries or []:
if libname not in libraries:
libraries.append(libname)
for libname in libraries:
if libname.startswith('msvc'): continue
fileexists = False
for libdir in library_dirs or []:
libfile = os.path.join(libdir, '%s.lib' % (libname))
if os.path.isfile(libfile):
fileexists = True
break
if fileexists: continue
# make g77-compiled static libs available to MSVC
fileexists = False
for libdir in library_dirs:
libfile = os.path.join(libdir, 'lib%s.a' % (libname))
if os.path.isfile(libfile):
# copy libname.a file to name.lib so that MSVC linker
# can find it
libfile2 = os.path.join(libdir, '%s.lib' % (libname))
copy_file(libfile, libfile2)
self.temp_files.append(libfile2)
fileexists = True
break
if fileexists: continue
log.warn('could not find library %r in directories %s' \
% (libname, library_dirs))
elif self.compiler.compiler_type == 'mingw32':
generate_manifest(self)
return self._wrap_method(old_config._link, lang,
(body, headers, include_dirs,
libraries, library_dirs, lang))
def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'):
self._check_compiler()
return self.try_compile(
"/* we need a dummy line to make distutils happy */",
[header], include_dirs)
def check_decl(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = """
int main(void)
{
#ifndef %s
(void) %s;
#endif
;
return 0;
}""" % (symbol, symbol)
return self.try_compile(body, headers, include_dirs)
def check_macro_true(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = """
int main(void)
{
#if %s
#else
#error false or undefined macro
#endif
;
return 0;
}""" % (symbol,)
return self.try_compile(body, headers, include_dirs)
def check_type(self, type_name, headers=None, include_dirs=None,
library_dirs=None):
"""Check type availability. Return True if the type can be compiled,
False otherwise"""
self._check_compiler()
# First check the type can be compiled
body = r"""
int main(void) {
if ((%(name)s *) 0)
return 0;
if (sizeof (%(name)s))
return 0;
}
""" % {'name': type_name}
st = False
try:
try:
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
st = True
except distutils.errors.CompileError:
st = False
finally:
self._clean()
return st
def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None):
"""Check size of a given type."""
self._check_compiler()
# First check the type can be compiled
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main (void)
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)];
test_array [0] = 0
;
return 0;
}
"""
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
self._clean()
if expected:
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main (void)
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)];
test_array [0] = 0
;
return 0;
}
"""
for size in expected:
try:
self._compile(body % {'type': type_name, 'size': size},
headers, include_dirs, 'c')
self._clean()
return size
except CompileError:
pass
# this fails to *compile* if size > sizeof(type)
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main (void)
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)];
test_array [0] = 0
;
return 0;
}
"""
# The principle is simple: we first find low and high bounds of size
# for the type, where low/high are looked up on a log scale. Then, we
# do a binary search to find the exact size between low and high
low = 0
mid = 0
while True:
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
break
except CompileError:
#log.info("failure to test for bound %d" % mid)
low = mid + 1
mid = 2 * mid + 1
high = mid
# Binary search:
while low != high:
mid = (high - low) // 2 + low
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
high = mid
except CompileError:
low = mid + 1
return low
def check_func(self, func,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
# clean up distutils's config a bit: add void to main(), and
# return a value.
self._check_compiler()
body = []
if decl:
if type(decl) == str:
body.append(decl)
else:
body.append("int %s (void);" % func)
# Handle MSVC intrinsics: force MS compiler to make a function call.
# Useful to test for some functions when built with optimization on, to
# avoid build error because the intrinsic and our 'fake' test
# declaration do not match.
body.append("#ifdef _MSC_VER")
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
if call_args is None:
call_args = ''
body.append(" %s(%s);" % (func, call_args))
else:
body.append(" %s;" % func)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_funcs_once(self, funcs,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
"""Check a list of functions at once.
This is useful to speed up things, since all the functions in the funcs
list will be put in one compilation unit.
Arguments
---------
funcs : seq
list of functions to test
include_dirs : seq
list of header paths
libraries : seq
list of libraries to link the code snippet to
library_dirs : seq
list of library paths
decl : dict
for every (key, value), the declaration in the value will be
used for function in key. If a function is not in the
dictionay, no declaration will be used.
call : dict
for every item (f, value), if the value is True, a call will be
done to the function f.
"""
self._check_compiler()
body = []
if decl:
for f, v in decl.items():
if v:
body.append("int %s (void);" % f)
# Handle MS intrinsics. See check_func for more info.
body.append("#ifdef _MSC_VER")
for func in funcs:
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
for f in funcs:
if f in call and call[f]:
if not (call_args and f in call_args and call_args[f]):
args = ''
else:
args = call_args[f]
body.append(" %s(%s);" % (f, args))
else:
body.append(" %s;" % f)
else:
for f in funcs:
body.append(" %s;" % f)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_inline(self):
"""Return the inline keyword recognized by the compiler, empty string
otherwise."""
return check_inline(self)
def check_restrict(self):
"""Return the restrict keyword recognized by the compiler, empty string
otherwise."""
return check_restrict(self)
def check_compiler_gcc4(self):
"""Return True if the C compiler is gcc >= 4."""
return check_compiler_gcc4(self)
def check_gcc_function_attribute(self, attribute, name):
return check_gcc_function_attribute(self, attribute, name)
def check_gcc_variable_attribute(self, attribute):
return check_gcc_variable_attribute(self, attribute)
def get_output(self, body, headers=None, include_dirs=None,
libraries=None, library_dirs=None,
lang="c", use_tee=None):
"""Try to compile, link to an executable, and run a program
built from 'body' and 'headers'. Returns the exit status code
of the program and its output.
"""
# 2008-11-16, RemoveMe
warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \
"Usage of get_output is deprecated: please do not \n" \
"use it anymore, and avoid configuration checks \n" \
"involving running executable on the target machine.\n" \
"+++++++++++++++++++++++++++++++++++++++++++++++++\n",
DeprecationWarning, stacklevel=2)
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
exitcode, output = 255, ''
try:
grabber = GrabStdout()
try:
src, obj, exe = self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
grabber.restore()
except:
output = grabber.data
grabber.restore()
raise
exe = os.path.join('.', exe)
exitstatus, output = exec_command(exe, execute_in='.',
use_tee=use_tee)
if hasattr(os, 'WEXITSTATUS'):
exitcode = os.WEXITSTATUS(exitstatus)
if os.WIFSIGNALED(exitstatus):
sig = os.WTERMSIG(exitstatus)
log.error('subprocess exited with signal %d' % (sig,))
if sig == signal.SIGINT:
# control-C
raise KeyboardInterrupt
else:
exitcode = exitstatus
log.info("success!")
except (CompileError, LinkError):
log.info("failure.")
self._clean()
return exitcode, output
class GrabStdout(object):
def __init__(self):
self.sys_stdout = sys.stdout
self.data = ''
sys.stdout = self
def write (self, data):
self.sys_stdout.write(data)
self.data += data
def flush (self):
self.sys_stdout.flush()
def restore(self):
sys.stdout = self.sys_stdout
| |
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import abc
import os
import errno
import libcloud.security
from os.path import expanduser
from IPy import IP
from schemas import PROVIDER_CONFIG_SCHEMA
from cloudify_cli.provider_common import BaseProviderClass
from cloudify_cli.logger import get_logger
libcloud.security.VERIFY_SSL_CERT = False
CREATE_IF_MISSING = 'create_if_missing'
verbose_output = False
class ProviderManager(BaseProviderClass):
schema = PROVIDER_CONFIG_SCHEMA
CONFIG_NAMES_TO_MODIFY = (
('networking', 'agents_security_group'),
('networking', 'management_security_group'),
('compute', 'management_server', 'instance'),
('compute', 'management_server', 'management_keypair'),
('compute', 'agent_servers', 'agents_keypair'),
)
CONFIG_FILES_PATHS_TO_MODIFY = (
('compute', 'agent_servers', 'private_key_path'),
('compute', 'management_server', 'management_keypair',
'private_key_path'),
)
def __init__(self,
provider_config=None,
is_verbose_output=False):
super(ProviderManager, self).\
__init__(provider_config,
is_verbose_output)
provider_name = provider_config['connection']['cloud_provider_name']
provider_name = transfer_cloud_provider_name(provider_name)
from mapper import Mapper
self.mapper = Mapper(provider_name)
def validate(self):
connection_conf = self.provider_config['connection']
if not self.mapper.is_initialized():
raise RuntimeError(
'Error during trying to create context'
' for a cloud provider: {0}'
.format(connection_conf['cloud_provider_name'])
)
connector = LibcloudConnector(connection_conf)
validation_errors = {}
validator = self.mapper.generate_validator(
connector, self.provider_config, validation_errors, self.logger)
validator.validate()
self.logger.error('resource validation failed!') if validation_errors \
else self.logger.info('resources validated successfully')
return validation_errors
def provision(self):
driver = self.get_driver(self.provider_config)
public_ip, private_ip, ssh_key, ssh_user, provider_context = \
driver.create_topology()
driver.copy_files_to_manager(public_ip, ssh_key, ssh_user)
return public_ip, private_ip, ssh_key, ssh_user, provider_context
def teardown(self, provider_context, ignore_validation=False):
driver = self.get_driver(self.provider_config)
driver.delete_topology(ignore_validation,
provider_context['resources'])
def get_driver(self, provider_config, provider_context=None):
provider_context = provider_context if provider_context else {}
connector = LibcloudConnector(provider_config['connection'])
return self.mapper.generate_cosmo_driver(connector,
provider_context,
provider_config)
def _format_resource_name(res_type, res_id, res_name=None):
if res_name:
return "{0} - {1} - {2}".format(res_type, res_id, res_name)
else:
return "{0} - {1}".format(res_type, res_id)
class CosmoOnLibcloudDriver(object):
def __init__(self, provider_config, provider_context):
self.config = provider_config
self.provider_context = provider_context
global verbose_output
self.verbose_output = verbose_output
self.logger = get_logger()
@abc.abstractmethod
def create_topology(self):
return
@abc.abstractmethod
def _delete_resources(self, resources):
return
def delete_topology(self, ignore_validation, resources):
deleted_resources, not_found_resources, failed_to_delete_resources =\
self._delete_resources(resources)
def format_resources_data_for_print(resources_data):
return '\t'.join(['{0}\n'.format(
_format_resource_name(
resource_data['name'] if 'name' in resource_data else
resource_data['ip'],
resource_data['type'],
resource_data['id'])) for resource_data in resources_data])
deleted_resources_print = \
'Successfully deleted the following resources:\n\t{0}\n' \
.format(format_resources_data_for_print(deleted_resources))
not_found_resources_print = \
"The following resources weren't found:\n\t{0}\n" \
.format(format_resources_data_for_print(not_found_resources))
failed_to_delete_resources_print = \
'Failed to delete the following resources:\n\t{0}' \
.format(format_resources_data_for_print(
failed_to_delete_resources))
self.logger.info(
'Finished deleting topology;\n'
'{0}{1}{2}'
.format(
deleted_resources_print if deleted_resources else '',
not_found_resources_print if not_found_resources else '',
failed_to_delete_resources_print if
failed_to_delete_resources else ''))
class LibcloudConnector(object):
def __init__(self, connection_config):
self.connection_config = connection_config
provider_name = self.connection_config['cloud_provider_name']
provider_name = transfer_cloud_provider_name(provider_name)
from mapper import Mapper
self.mapper = Mapper(provider_name)
self.driver = self.mapper.connect(self.connection_config)
def get_driver(self):
return self.driver
class LibcloudValidator(object):
def __init__(self, provider_config, validation_errors, lgr, **kwargs):
self.provider_config = provider_config
self.validation_errors = validation_errors
self.lgr = lgr
@abc.abstractmethod
def validate(self):
return
def validate_cidr_syntax(self, cidr):
try:
IP(cidr)
return True
except ValueError:
return False
class BaseController(object):
def __init__(self, connector, **kwargs):
self.driver = connector.get_driver()
self.logger = get_logger()
@abc.abstractmethod
def _ensure_exist(self, name):
return
@abc.abstractmethod
def _create(self, name, **kwargs):
return
@abc.abstractmethod
def kill(self, item):
return
@abc.abstractmethod
def get_by_id(self, ident):
return
def _create_or_ensure_exists(self, config, name, **kwargs):
res_id, result = self._ensure_exist(name)
if result:
created = False
else:
if CREATE_IF_MISSING in config and not config[CREATE_IF_MISSING]:
raise RuntimeError("{0} '{1}' is not configured to"
" create_if_missing but does not"
" exist."
.format(self.__class__.WHAT, name))
res_id, result = self._create(name, **kwargs)
created = True
return res_id, result, created
def create_or_ensure_exists_log_resources(self, config, name, resources,
resource_name, **kwargs):
res_id, result, created =\
self._create_or_ensure_exists(config, name, **kwargs)
resources[resource_name] = {
'id': str(res_id),
'type': self.__class__.WHAT,
'name': name,
'created': created
}
return result, created
class LibcloudKeypairController(BaseController):
WHAT = "key_pair"
def _mkdir(self, path):
path = expanduser(path)
try:
self.logger.debug('creating dir {0}'
.format(path))
os.makedirs(path)
except OSError, exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
return
raise
class LibcloudSGController(BaseController):
WHAT = "security_group"
class LibcloudFloatingIpController(BaseController):
WHAT = "floating_ip"
class LibcloudServerController(BaseController):
WHAT = "server"
def transfer_cloud_provider_name(provider_name):
return provider_name.replace('-', '_')
| |
"""
.. module:: request.forms.
:synopsis: Defines forms used to create FOIL requests.
"""
from datetime import datetime
from flask_login import current_user
from flask_wtf import Form
from flask_wtf.file import FileField
from wtforms import (
StringField,
SelectField,
TextAreaField,
SubmitField,
DateTimeField,
SelectMultipleField,
)
from wtforms.validators import Email, Length, InputRequired
from sqlalchemy import or_, asc
from app.agency.api.utils import get_active_users_as_choices
from app.constants import (
CATEGORIES,
STATES,
submission_methods,
determination_type,
response_type,
)
from app.lib.db_utils import get_agency_choices
from app.models import Reasons, LetterTemplates, EnvelopeTemplates, CustomRequestForms
from app.lib.recaptcha_utils import Recaptcha3Field
class PublicUserRequestForm(Form):
"""
Form for public users to create a new FOIL request.
For a public user, the required fields are:
# Request information
agency: agency selected for the request
title: name or title of the request
description: detailed description of the request
"""
# Request Information
request_category = SelectField("Category (optional)", choices=CATEGORIES)
request_agency = SelectField("Agency (required)", choices=None)
request_title = StringField("Request Title (required)")
request_type = SelectField("Request Type (required)", choices=[])
request_description = TextAreaField("Request Description (required)")
# File Upload
request_file = FileField("Upload File (optional, must be less than 20 Mb)")
recaptcha = Recaptcha3Field(action="TestAction", execute_on_load=True)
# Submit Button
submit = SubmitField("Submit Request")
def __init__(self):
super(PublicUserRequestForm, self).__init__()
self.request_agency.choices = get_agency_choices()
self.request_agency.choices.insert(0, ("", ""))
class AgencyUserRequestForm(Form):
"""
Form for agency users to create a new FOIL request.
For an agency user, the required fields are:
# Request Information
agency: agency selected for the request
title: name or title of the request
description: detailed description of the request
request_date: date the request was made
method_received: format the request was received
# Personal Information
first_name: first name of the requester
last_name: last name of the requester
# Contact Information (at least one form on contact is required)
email: requester's email address
phone: requester's phone number
fax: requester's fax number
address, city, state, zip: requester's address
"""
# Request Information
request_agency = SelectField("Agency (required)", choices=None)
request_type = SelectField("Request Type (required)", choices=[])
request_title = StringField("Request Title (required)")
request_description = TextAreaField("Request Description (required)")
request_date = DateTimeField(
"Date (required)", format="%m/%d/%Y", default=datetime.today
)
# Personal Information
# TODO: when refactoring these classes, include length and other validators
first_name = StringField("First Name (required)")
last_name = StringField("Last Name (required)")
user_title = StringField("Title")
user_organization = StringField("Organization")
# Contact Information
email = StringField("Email")
phone = StringField("Phone")
fax = StringField("Fax")
address = StringField("Address Line 1")
address_two = StringField("Address Line 2")
city = StringField("City")
state = SelectField("State / U.S. Territory", choices=STATES, default="NY")
zipcode = StringField("Zip")
# Method Received
method_received = SelectField(
"Format Received (required)", choices=submission_methods.AS_CHOICES
)
# File Upload
request_file = FileField("Upload File (optional, must be less than 20 Mb)")
recaptcha = Recaptcha3Field(action="TestAction", execute_on_load=True)
# Submit Button
submit = SubmitField("Submit Request")
def __init__(self):
super(AgencyUserRequestForm, self).__init__()
if len(current_user.agencies.all()) > 1:
self.request_agency.choices = current_user.agencies_for_forms()
class AnonymousRequestForm(Form):
"""
Form for anonymous users to create a new FOIL request.
For a anonymous user, the required fields are:
# Request Information
agency: agency selected for the request
title: name or title of the request
description: detailed description of the request
# Personal Information
first_name: first name of the requester
last_name: last name of the requester
# Contact Information (at least one form on contact is required)
email: requester's email address
phone: requester's phone number
fax: requester's fax number
address, city, state, zip: requester's address
"""
# Request Information
request_category = SelectField("Category (optional)", choices=CATEGORIES)
request_agency = SelectField("Agency (required)", choices=None)
request_type = SelectField("Request Type (required)", choices=[])
request_title = StringField("Request Title (required)")
request_description = TextAreaField("Request Description (required)")
# Personal Information
first_name = StringField("First Name (required)")
last_name = StringField("Last Name (required)")
user_title = StringField("Title")
user_organization = StringField("Organization")
# Contact Information
email = StringField("Email")
phone = StringField("Phone")
fax = StringField("Fax")
address = StringField("Address Line 1")
address_two = StringField("Address Line 2")
city = StringField("City")
state = SelectField("State / U.S. Territory", choices=STATES, default="NY")
zipcode = StringField("Zip")
# File Upload
request_file = FileField("Upload File (optional, must be less than 20 Mb)")
recaptcha = Recaptcha3Field(action="TestAction", execute_on_load=True)
submit = SubmitField("Submit Request")
def __init__(self):
super(AnonymousRequestForm, self).__init__()
self.request_agency.choices = get_agency_choices()
self.request_agency.choices.insert(0, ("", ""))
class EditRequesterForm(Form):
# TODO: Add class docstring
email = StringField("Email")
phone = StringField("Phone Number")
fax = StringField("Fax Number")
address_one = StringField("Address Line 1")
address_two = StringField("Address Line 2")
city = StringField("City")
state = SelectField("State / U.S. Territory", choices=STATES)
zipcode = StringField("Zip Code")
title = StringField("Title")
organization = StringField("Organization")
def __init__(self, requester):
"""
:type requester: app.models.Users
"""
super(EditRequesterForm, self).__init__()
self.email.data = requester.email or ""
self.phone.data = requester.phone_number or ""
self.fax.data = requester.fax_number or ""
self.title.data = requester.title or ""
self.organization.data = requester.organization or ""
if requester.mailing_address is not None:
self.address_one.data = requester.mailing_address.get("address_one") or ""
self.address_two.data = requester.mailing_address.get("address_two") or ""
self.city.data = requester.mailing_address.get("city") or ""
self.state.data = requester.mailing_address.get("state") or ""
self.zipcode.data = requester.mailing_address.get("zip") or ""
class DeterminationForm(Form):
# TODO: Add class docstring
def __init__(self, agency_ein):
super(DeterminationForm, self).__init__()
agency_closings = [
(reason.id, reason.title)
for reason in Reasons.query.filter(
Reasons.type == determination_type.CLOSING,
Reasons.agency_ein == agency_ein,
).order_by(asc(Reasons.id))
]
agency_denials = [
(reason.id, reason.title)
for reason in Reasons.query.filter(
Reasons.type == determination_type.DENIAL,
Reasons.agency_ein == agency_ein,
).order_by(asc(Reasons.id))
]
agency_reopenings = [
(reason.id, reason.title)
for reason in Reasons.query.filter(
Reasons.type == determination_type.REOPENING,
Reasons.agency_ein == agency_ein,
).order_by(asc(Reasons.id))
]
default_closings = [
(reason.id, reason.title)
for reason in Reasons.query.filter(
Reasons.type == determination_type.CLOSING, Reasons.agency_ein == None
).order_by(asc(Reasons.id))
]
default_denials = [
(reason.id, reason.title)
for reason in Reasons.query.filter(
Reasons.type == determination_type.DENIAL, Reasons.agency_ein == None
).order_by(asc(Reasons.id))
]
default_reopenings = [
(reason.id, reason.title)
for reason in Reasons.query.filter(
Reasons.type == determination_type.REOPENING, Reasons.agency_ein == None
).order_by(asc(Reasons.id))
]
if (
determination_type.CLOSING in self.ultimate_determination_type
and determination_type.DENIAL in self.ultimate_determination_type
):
self.reasons.choices = (
agency_closings + agency_denials + default_closings + default_denials
)
elif determination_type.DENIAL in self.ultimate_determination_type:
self.reasons.choices = agency_denials + default_denials
elif determination_type.REOPENING in self.ultimate_determination_type:
self.reasons.choices = agency_reopenings + default_reopenings
@property
def reasons(self):
""" SelectMultipleField or SelectField """
raise NotImplementedError
@property
def ultimate_determination_type(self):
""" Closing or Denial """
raise NotImplementedError
class DenyRequestForm(DeterminationForm):
# TODO: Add class docstring
reasons = SelectMultipleField("Reasons for Denial (Choose 1 or more)")
ultimate_determination_type = [determination_type.DENIAL]
class CloseRequestForm(DeterminationForm):
# TODO: Add class docstring
reasons = SelectMultipleField("Reasons for Closing (Choose 1 or more)")
ultimate_determination_type = [
determination_type.CLOSING,
determination_type.DENIAL,
]
class ReopenRequestForm(DeterminationForm):
# TODO: Add class docstring
reasons = SelectField("Reason for Re-Opening")
ultimate_determination_type = [determination_type.REOPENING]
class GenerateEnvelopeForm(Form):
# TODO: Add class docstring
template = SelectField("Template")
recipient_name = StringField("Recipient Name")
organization = StringField("Organization")
address_one = StringField("Address Line One")
address_two = StringField("Address Line Two")
city = StringField("City")
state = StringField("State")
zipcode = StringField("Zip Code")
def __init__(self, agency_ein, requester):
"""
:type requester: app.models.Users
"""
super(GenerateEnvelopeForm, self).__init__()
self.template.choices = [
(envelope_template.id, envelope_template.title)
for envelope_template in EnvelopeTemplates.query.filter_by(
agency_ein=agency_ein
)
]
self.recipient_name.data = requester.name or ""
self.organization.data = requester.organization or ""
if requester.mailing_address is not None:
self.address_one.data = requester.mailing_address.get("address_one") or ""
self.address_two.data = requester.mailing_address.get("address_two") or ""
self.city.data = requester.mailing_address.get("city") or ""
self.state.data = requester.mailing_address.get("state") or ""
self.zipcode.data = requester.mailing_address.get("zip") or ""
class GenerateLetterForm(Form):
# TODO: Add class docstring
def __init__(self, agency_ein):
super(GenerateLetterForm, self).__init__()
self.letter_templates.choices = [
(letter.id, letter.title)
for letter in LetterTemplates.query.filter(
LetterTemplates.type_.in_(self.letter_type),
or_(
LetterTemplates.agency_ein == agency_ein,
LetterTemplates.agency_ein == None,
),
)
]
self.letter_templates.choices.insert(0, ("", ""))
@property
def letter_templates(self):
""" SelectField """
raise NotImplementedError
@property
def letter_type(self):
""" Acknowledgement, Extension, """
raise NotImplementedError
class GenerateAcknowledgmentLetterForm(GenerateLetterForm):
# TODO: Add class docstring
letter_templates = SelectField("Letter Templates")
letter_type = [determination_type.ACKNOWLEDGMENT]
class GenerateDenialLetterForm(GenerateLetterForm):
# TODO: Add class docstring
letter_templates = SelectField("Letter Templates")
letter_type = [determination_type.DENIAL]
class GenerateClosingLetterForm(GenerateLetterForm):
# TODO: Add class docstring
letter_templates = SelectField("Letter Templates")
letter_type = [determination_type.CLOSING, determination_type.DENIAL]
def __init__(self, agency_ein):
super(GenerateClosingLetterForm, self).__init__(agency_ein)
agency_closings = [
(letter.id, letter.title)
for letter in LetterTemplates.query.filter(
LetterTemplates.type_ == determination_type.CLOSING,
LetterTemplates.agency_ein == agency_ein,
)
]
agency_denials = [
(letter.id, letter.title)
for letter in LetterTemplates.query.filter(
LetterTemplates.type_ == determination_type.DENIAL,
LetterTemplates.agency_ein == agency_ein,
)
]
default_closings = [
(letter.id, letter.title)
for letter in LetterTemplates.query.filter(
LetterTemplates.type_ == determination_type.CLOSING,
LetterTemplates.agency_ein == None,
)
]
default_denials = [
(letter.id, letter.title)
for letter in LetterTemplates.query.filter(
LetterTemplates.type_ == determination_type.DENIAL,
LetterTemplates.agency_ein == None,
)
]
self.letter_templates.choices = (
agency_closings + agency_denials + default_closings + default_denials
)
self.letter_templates.choices.insert(0, ("", ""))
class GenerateExtensionLetterForm(GenerateLetterForm):
# TODO: Add class docstring
letter_templates = SelectField("Letter Templates")
letter_type = [determination_type.EXTENSION]
class GenerateReopeningLetterForm(GenerateLetterForm):
# TODO: Add class docstring
letter_templates = SelectField("Letter Templates")
letter_type = [determination_type.REOPENING]
class GenerateResponseLetterForm(GenerateLetterForm):
# TODO: Add class docstring
letter_templates = SelectField("Letter Templates")
letter_type = [response_type.LETTER]
class SearchRequestsForm(Form):
# TODO: Add class docstring
agency_ein = SelectField("Agency")
agency_user = SelectField("User")
request_type = SelectField("Request Type", choices=[])
# category = SelectField('Category', get_categories())
def __init__(self):
super(SearchRequestsForm, self).__init__()
self.agency_ein.choices = get_agency_choices()
self.agency_ein.choices.insert(0, ("", "All"))
if current_user.is_agency:
self.agency_ein.default = current_user.default_agency_ein
user_agencies = sorted(
[
(agencies.ein, agencies.name)
for agencies in current_user.agencies
if agencies.ein != current_user.default_agency_ein
],
key=lambda x: x[1],
)
default_agency = current_user.default_agency
# set default value of agency select field to agency user's primary agency
self.agency_ein.default = default_agency.ein
self.agency_ein.choices.insert(
1,
self.agency_ein.choices.pop(
self.agency_ein.choices.index(
(default_agency.ein, default_agency.name)
)
),
)
# set secondary agencies to be below the primary
for agency in user_agencies:
self.agency_ein.choices.insert(
2,
self.agency_ein.choices.pop(self.agency_ein.choices.index(agency)),
)
# get choices for agency user select field
if current_user.is_agency_admin():
self.agency_user.choices = get_active_users_as_choices(
current_user.default_agency.ein
)
if current_user.is_agency_active() and not current_user.is_agency_admin():
self.agency_user.choices = [
("", "All"),
(current_user.get_id(), "My Requests"),
]
self.agency_user.default = current_user.get_id()
if default_agency.agency_features["custom_request_forms"]["enabled"]:
self.request_type.choices = [
(custom_request_form.form_name, custom_request_form.form_name)
for custom_request_form in CustomRequestForms.query.filter_by(
agency_ein=default_agency.ein
).order_by(asc(CustomRequestForms.category), asc(CustomRequestForms.id)).all()
]
self.request_type.choices.insert(0, ("", "All"))
# process form for default values
self.process()
class ContactAgencyForm(Form):
# TODO: Add class docstring
first_name = StringField(
u"First Name", validators=[InputRequired(), Length(max=32)]
)
last_name = StringField(u"Last Name", validators=[InputRequired(), Length(max=64)])
email = StringField(
u"Email", validators=[InputRequired(), Length(max=254), Email()]
)
subject = StringField(u"Subject")
message = TextAreaField(u"Message", validators=[InputRequired(), Length(max=5000)])
submit = SubmitField(u"Send")
def __init__(self, request):
super(ContactAgencyForm, self).__init__()
if current_user == request.requester:
self.first_name.data = request.requester.first_name
self.last_name.data = request.requester.last_name
self.email.data = (
request.requester.notification_email or request.requester.email
)
self.subject.data = "Inquiry about {}".format(request.id)
class TechnicalSupportForm(Form):
recaptcha = Recaptcha3Field(action="TestAction", execute_on_load=True)
| |
#This script will download all the lyrics from the given site(s) and get info (BPM, genre, etc) about them
#Save all to a db
from lxml import html
import urllib
import requests
import datetime
import sqlite3
import os.path
import sys
import ast
from random import randint
network_error_message = "Network error, try again"
def is_number(num):
try:
return int(num)
except:
return False
def count_letters(word):
BAD_LETTERS = " "
return len([letter for letter in word if letter not in BAD_LETTERS])
def clear_word(word):
TO_REMOVE = '''()?![]{}=,;:".-+*/\\|'''
return "".join([letter for letter in word if letter not in TO_REMOVE])
try:
script, task, item = sys.argv
except:
script = sys.argv[0]
print "Usage: %s [download|update|analyse] [hits|lyrics]" % (script)
print "Usage generate: %s generate starting_word" % (script)
exit()
is_new_session = not os.path.isfile('lyricsdb.sqlite')
db = sqlite3.connect('lyricsdb.sqlite')
c = db.cursor()
d = db.cursor()
if is_new_session:
c.execute('''CREATE TABLE hits
(year int, artist text, title text, number int, month int)
''')
c.execute('''CREATE TABLE lyrics
(artist text, title text, lyric text, charsNumber int, versesNumber int, linesNumber int, linesPerVerse int, wordNumbers int)
''')
c.execute('''CREATE TABLE assoc
(artist text, title text, word text, times int, verseNumber int, nextWord text, precWord text)
''')
if task == 'download':
if item == "hits":
##import pprint #debug
#Get the current year
now = datetime.datetime.now().year
#Download last 5 years' top songs
page = {}
for year in range(now-10,now):
#Don't download songs we already have
c.execute('SELECT 1 FROM hits WHERE `year` = ?', (year,))
if c.fetchone():
print "We already have hits for year %s" % (year)
continue
else:
try:
print "Downloading top10songs from year: %s" % year
url = "http://top10songs.com/months-of-%s.html" % year
#page.append(year)
page[year] = requests.get(url).text
#print r.content
tree = html.fromstring(page[year])
list = tree.xpath('//td[@class="left"]//text()')
artists = []
songs = []
num = 0
for element in list:
if ( num == 0 ):
songs.append(element)
num = 1
else:
artists.append(element)
num = 0
num = 0
for artist in artists:
##print "Artista, canzone, anno: %s, %s, %s" % (artist, songs[num], year)
number = num + 1
month_counter = 12
while number > 10:
number -= 10
month_counter -= 1
to_add = [year,artist,songs[num],number,month_counter]
db.execute('INSERT INTO hits VALUES ( ?,?,?,?,? )', to_add)
num += 1
except:
print network_error_message
else:
print "Lyrics Download"
c.execute('SELECT artist,title FROM hits')
songs = c.fetchall()
for song in songs:
query = song[0] + ' ' + song[1]
query = query.encode('utf-8')
artist = urllib.quote_plus(song[0].encode('utf-8'))
song_title = urllib.quote_plus(song[1].encode('utf-8'))
c.execute('SELECT 1 FROM lyrics WHERE `artist` = ? AND `title` = ?', (artist,song_title,))
if c.fetchone():
print "Lyrics %s from %s already present, skipping" % (song[1], song[0])
continue
else:
# try:
#Download lyrics
print "Looking for %s ..." % query,
# TODO: if not found, try to replace & with and etc
# Or removing "featuring" etc
url = "http://lyrics.wikia.com/api.php?artist=%s&song=%s" % (artist,song_title)
page = requests.get(url).text
tree = html.fromstring(page)
in_page_search = tree.xpath('//body/h3/a')
song_url = in_page_search[0].attrib['href']
print "... downloading it"
page = requests.get(song_url).text
tree = html.fromstring(page)
song_text = tree.xpath('//div[@class="lyricbox"]/text()')
new_lines = tree.xpath('//div[@class="lyricbox"]//br')
total_lines = 0
for br in new_lines:
total_lines += 1
song_text = [value for value in song_text if value != '\n']
charlenght = 0
words_number = 0
for verse in song_text:
verse.encode('utf-8')
charlenght += count_letters(verse)
words_number += len(verse) - count_letters(verse) + 1 #Count words as spaces +1
#TODO: I lose a new line somewhere... but this way works
br_number = total_lines - len(song_text) + 1 + 1
verse_lenght = str(song_text)
# (artist, title, lyric, charsNumber, versesNumber, linesNumber, linesPerVerse, wordNumbers)
to_add = [artist, song_title, str(song_text), charlenght, br_number, len(song_text),"", words_number]
db.execute('INSERT INTO lyrics VALUES ( ?,?,?,?,?,?,?, ? )', to_add)
db.commit()
# except:
# print network_error_message
elif task == 'analyse':
##Add BPM, genre, mood
c.execute('SELECT artist,title,lyric FROM lyrics')
songs = c.fetchall()
for song in songs:
artist = song[0]
title = song[1]
lyric = ast.literal_eval(song[2])
d.execute('SELECT 1 FROM assoc WHERE artist = ? AND title = ?', (artist, title,))
if d.fetchone():
print "Lyrics %s from %s already present, skipping" % (title, artist)
continue
else:
print "Analysing %s from %s ..." % (title, artist),
for line in lyric:
a = ""
b = ""
c = ""
##Works but is ugly. FIX
for word in line.split():
a = b
b = c
c = clear_word(word)
#(artist text, title text, word text, times int, verseNumber int, nextWord text, precWord text)
to_add = [artist, title, b, "", "", c, a]
if b:
db.execute('INSERT INTO assoc VALUES ( ?,?,?,?,?,?,? )', to_add)
a = b
b = c
c = ""
to_add = [artist, title, b, "", "", c, a]
db.execute('INSERT INTO assoc VALUES ( ?,?,?,?,?,?,? )', to_add)
db.commit()
print "... done!"
elif task == 'generate':
#Get mean line word lenght
c.execute('SELECT AVG(`wordNumbers`), AVG(`linesNumber`), AVG(`versesNumber`) FROM lyrics')
stats = c.fetchone()
if not stats:
print "Error with the db"
exit()
meanWords = stats[0]
meanLines = stats[1]
meanVerse = stats[2]
meanLineLen = meanWords / meanLines
meanVerseLen = meanLines / meanVerse
print "Info: generating lines of lenght: %s words, %s verse lines, %s verses\n" % (meanLineLen, meanVerseLen, meanVerse)
for gen_verse in range (0, int(meanVerse)+randint(-3,3)): # 15 versi
#print "NEW VERSE (%s)" % (gen_verse)
for gen_line in range(0,int(meanVerseLen)+randint(-2,2)): # 3 linee
#print "NEW LINE (%s)" % (gen_line)
#Decide the word to start with: NOT NULL, WITHOUT A PRECEDING WORD; WITH A FOLLOWING WORD
c.execute('SELECT `word` FROM assoc WHERE `precWord` == "" AND `word` != "" AND `nextWord` != "" ORDER BY RANDOM () LIMIT 1')
next_word = c.fetchone()[0]
current_word = ""
for gen_word in range(0,(int(meanLineLen)+randint(-3,3))): #6 parole
prec_word = current_word
current_word = next_word
#prevent same-word-loop.
if (current_word == prec_word):
QUERY = 'SELECT `nextWord` FROM assoc WHERE `precWord` LIKE ? AND `word` != ? AND `nextWord` != "" ORDER BY RANDOM () LIMIT 1'
QUERY_LIMIT = 'SELECT `nextWord` FROM assoc WHERE `precWord` LIKE ? AND `word` != ? ORDER BY RANDOM () LIMIT 1'
else:
#prec_word = '%prec_word%'
QUERY = 'SELECT `nextWord` FROM assoc WHERE `precWord` LIKE ? AND `word` = ? AND `nextWord` != "" ORDER BY RANDOM () LIMIT 1'
QUERY_LIMIT = 'SELECT `nextWord` FROM assoc WHERE `precWord` LIKE ? AND `word` = ? ORDER BY RANDOM () LIMIT 1'
#Use that word to gen N words
# We could use len(x) to get the exact number of letters
# With LIKE % & LIKE _ we can search for "rhyming" words (difficult in english)
if (gen_word <= int (meanLineLen) - 4): # far from desired lenght. Let nextWord exist
c.execute(QUERY, (prec_word,current_word,))
elif (gen_word <= int (meanLineLen) + 4): #approching to meanLineLen. nextWord can be unexistent
c.execute(QUERY_LIMIT, (prec_word,current_word,))
else: #last needed word. Force it not to exist
c.execute('SELECT `nextWord` FROM assoc WHERE `precWord` = ? AND `word` = ? AND `nextWord` == "" ORDER BY RANDOM () LIMIT 1', (prec_word,current_word,))
try:
next_word = c.fetchone()[0]
except:
try:
if (gen_word <= int (meanLineLen) - 4): # far from desired lenght. Let nextWord exist
c.execute('SELECT `word` FROM assoc WHERE `precWord` = ? AND `word` != "" AND `nextWord` != ? ORDER BY RANDOM () LIMIT 1', (prec_word,current_word,))
elif (gen_word == int (meanLineLen)): #approching to meanLineLen. nextWord can be unexistent
c.execute('SELECT `word` FROM assoc WHERE `precWord` = ? AND `word` != "" ORDER BY RANDOM () LIMIT 1', (current_word,))
else: #last needed word. Force it not to exist
c.execute('SELECT `word` FROM assoc WHERE `precWord` = ? AND `word` != "" AND `nextWord` == "" ORDER BY RANDOM () LIMIT 1', (current_word,))
except:
try:
next_word = c.fetchone()[0]
except:
next_word = ""
break
if is_number(next_word):
randint(0,int(next_word))
print next_word.encode("utf-8"),
print "\n",
print "\n"
# Save (commit) the changes
db.commit()
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
#db.close()
| |
"""Some examples for quick testing/demonstrations.
All function accept `show` and `draw` arguments
* If `draw` is `True` it will return the widgets (Scatter, Volume, Mesh)
* If `draw` is `False`, it will return the data
* if `show` is `False`, `ipv.show()` will not be called.
"""
import warnings
import numpy as np
from numpy import cos, sin, pi
try:
import scipy.ndimage
import scipy.special
except:
pass # it's ok, it's not crucial
# __all__ = ["example_ylm"]
def xyz(shape=128, limits=[-3, 3], spherical=False, sparse=True, centers=False):
dim = 3
try:
shape[0]
except:
shape = [shape] * dim
try:
limits[0][0] # pylint: disable=unsubscriptable-object
except:
limits = [limits] * dim
if centers:
v = [
slice(vmin + (vmax - vmin) / float(N) / 2, vmax - (vmax - vmin) / float(N) / 4, (vmax - vmin) / float(N))
for (vmin, vmax), N in zip(limits, shape)
]
else:
v = [
slice(vmin, vmax + (vmax - vmin) / float(N) / 2, (vmax - vmin) / float(N - 1))
for (vmin, vmax), N in zip(limits, shape)
]
if sparse:
x, y, z = np.ogrid.__getitem__(v)
else:
x, y, z = np.mgrid.__getitem__(v)
if spherical:
r = (x ** 2 + y**2 + z**2)**0.5
theta = np.arctan2(y, x)
phi = np.arccos(z / r)
return x, y, z, r, theta, phi
else:
return x, y, z
def example_ylm(m=0, n=2, shape=128, limits=[-4, 4], draw=True, show=True, **kwargs):
"""Show a spherical harmonic."""
import ipyvolume.pylab as p3
__, __, __, r, theta, phi = xyz(shape=shape, limits=limits, spherical=True)
radial = np.exp(-(r - 2) ** 2)
data = np.abs(scipy.special.sph_harm(m, n, theta, phi) ** 2) * radial # pylint: disable=no-member
if draw:
vol = p3.volshow(data=data, **kwargs)
if show:
p3.show()
return vol
else:
return data
# return ipyvolume.volshow(data=data.T, **kwargs)
def ball(rmax=3, rmin=0, shape=128, limits=[-4, 4], draw=True, show=True, **kwargs):
"""Show a ball."""
import ipyvolume.pylab as p3
__, __, __, r, _theta, _phi = xyz(shape=shape, limits=limits, spherical=True)
data = r * 0
data[(r < rmax) & (r >= rmin)] = 0.5
if "data_min" not in kwargs:
kwargs["data_min"] = 0
if "data_max" not in kwargs:
kwargs["data_max"] = 1
data = data.T
if draw:
vol = p3.volshow(data=data, **kwargs)
if show:
p3.show()
return vol
else:
return data
# http://graphics.stanford.edu/data/voldata/
def klein_bottle(
draw=True,
show=True,
figure8=False,
endpoint=True,
uv=True,
wireframe=False,
texture=None,
both=False,
interval=1000,
**kwargs
):
"""Show one or two Klein bottles."""
import ipyvolume.pylab as p3
# http://paulbourke.net/geometry/klein/
u = np.linspace(0, 2 * pi, num=40, endpoint=endpoint)
v = np.linspace(0, 2 * pi, num=40, endpoint=endpoint)
u, v = np.meshgrid(u, v)
if both:
x1, y1, z1, _u1, _v1 = klein_bottle(endpoint=endpoint, draw=False, show=False, **kwargs)
x2, y2, z2, _u2, _v2 = klein_bottle(endpoint=endpoint, draw=False, show=False, figure8=True, **kwargs)
x = [x1, x2]
y = [y1, y2]
z = [z1, z2]
else:
if figure8:
# u -= np.pi
# v -= np.pi
a = 2
s = 5
x = s * (a + cos(u / 2) * sin(v) - sin(u / 2) * sin(2 * v) / 2) * cos(u)
y = s * (a + cos(u / 2) * sin(v) - sin(u / 2) * sin(2 * v) / 2) * sin(u)
z = s * (sin(u / 2) * sin(v) + cos(u / 2) * sin(2 * v) / 2)
else:
r = 4 * (1 - cos(u) / 2)
x = 6 * cos(u) * (1 + sin(u)) + r * cos(u) * cos(v) * (u < pi) + r * cos(v + pi) * (u >= pi)
y = 16 * sin(u) + r * sin(u) * cos(v) * (u < pi)
z = r * sin(v)
x = x / 20
y = y / 20
z = z / 20
if draw:
if texture:
uv = True
if uv:
mesh = p3.plot_mesh(
x,
y,
z,
wrapx=not endpoint,
wrapy=not endpoint,
u=u / (2 * np.pi),
v=v / (2 * np.pi),
wireframe=wireframe,
texture=texture,
**kwargs
)
else:
mesh = p3.plot_mesh(x, y, z, wrapx=not endpoint, wrapy=not endpoint, wireframe=wireframe, texture=texture, **kwargs)
if show:
if both:
p3.animation_control(mesh, interval=interval)
p3.squarelim()
p3.show()
return mesh
else:
return x, y, z, u, v
def brain(
draw=True, show=True, fiducial=True, flat=True, inflated=True, subject='S1', interval=1000, uv=True, color=None
):
"""Show a human brain model.
Requirement:
$ pip install https://github.com/gallantlab/pycortex
"""
import ipyvolume as ipv
try:
import cortex
except:
warnings.warn("it seems pycortex is not installed, which is needed for this example")
raise
xlist, ylist, zlist = [], [], []
polys_list = []
def add(pts, polys):
xlist.append(pts[:, 0])
ylist.append(pts[:, 1])
zlist.append(pts[:, 2])
polys_list.append(polys)
def n(x):
return (x - x.min()) / x.ptp()
if fiducial or color is True:
pts, polys = cortex.db.get_surf('S1', 'fiducial', merge=True)
x, y, z = pts.T
r = n(x)
g = n(y)
b = n(z)
if color is True:
color = np.array([r, g, b]).T.copy()
else:
color = None
if fiducial:
add(pts, polys)
else:
if color is False:
color = None
if inflated:
add(*cortex.db.get_surf('S1', 'inflated', merge=True, nudge=True))
u = v = None
if flat or uv:
pts, polys = cortex.db.get_surf('S1', 'flat', merge=True, nudge=True)
x, y, z = pts.T
u = n(x)
v = n(y)
if flat:
add(pts, polys)
polys_list.sort(key=lambda x: len(x))
polys = polys_list[0]
if draw:
if color is None:
mesh = ipv.plot_trisurf(xlist, ylist, zlist, polys, u=u, v=v)
else:
mesh = ipv.plot_trisurf(xlist, ylist, zlist, polys, color=color, u=u, v=v)
if show:
if len(x) > 1:
ipv.animation_control(mesh, interval=interval)
ipv.squarelim()
ipv.show()
return mesh
else:
return xlist, ylist, zlist, polys
def head(draw=True, show=True, max_shape=256, description="Male head"):
"""Show a volumetric rendering of a human male head."""
# inspired by http://graphicsrunner.blogspot.com/2009/01/volume-rendering-102-transfer-functions.html
import ipyvolume as ipv
from scipy.interpolate import interp1d
# First part is a simpler version of setting up the transfer function. Interpolation with higher order
# splines does not work well, the original must do sth different
colors = [[0.91, 0.7, 0.61, 0.0], [0.91, 0.7, 0.61, 80.0], [1.0, 1.0, 0.85, 82.0], [1.0, 1.0, 0.85, 256]]
x = np.array([k[-1] for k in colors])
rgb = np.array([k[:3] for k in colors])
N = 256
xnew = np.linspace(0, 256, N)
tf_data = np.zeros((N, 4))
kind = 'linear'
for channel in range(3):
f = interp1d(x, rgb[:, channel], kind=kind)
ynew = f(xnew)
tf_data[:, channel] = ynew
alphas = [[0, 0], [0, 40], [0.2, 60], [0.05, 63], [0, 80], [0.9, 82], [1.0, 256]]
x = np.array([k[1] * 1.0 for k in alphas])
y = np.array([k[0] * 1.0 for k in alphas])
f = interp1d(x, y, kind=kind)
ynew = f(xnew)
tf_data[:, 3] = ynew
tf = ipv.TransferFunction(rgba=tf_data.astype(np.float32))
head_data = ipv.datasets.head.fetch().data
head_data = head_data.transpose((1, 0, 2))[::-1, ::-1, ::-1]
if draw:
vol = ipv.volshow(head_data, tf=tf, max_shape=max_shape, description=description)
if show:
ipv.show()
return vol
else:
return head_data
def gaussian(N=1000, draw=True, show=True, seed=42, color=None, marker='sphere', description="Gaussian blob", **kwargs):
"""Show N random gaussian distributed points using a scatter plot."""
import ipyvolume as ipv
rng = np.random.RandomState(seed) # pylint: disable=no-member
x, y, z = rng.normal(size=(3, N))
if draw:
if color:
mesh = ipv.scatter(x, y, z, marker=marker, color=color, description=description, **kwargs)
else:
mesh = ipv.scatter(x, y, z, marker=marker, description=description, **kwargs)
if show:
# ipv.squarelim()
ipv.show()
return mesh
else:
return x, y, z
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_user
short_description: Manage user accounts and user attributes on a BIG-IP
description:
- Manage user accounts and user attributes on a BIG-IP system. Typically this
module operates only on REST API users and not CLI users.
When specifying C(root), you may only change the password.
Your other parameters are ignored in this case. Changing the C(root)
password is not an idempotent operation. Therefore, it changes the
password every time this module attempts to change it.
version_added: "1.0.0"
options:
full_name:
description:
- Full name of the user.
type: str
username_credential:
description:
- Name of the user to create, remove, or modify.
- The C(root) user may not be removed.
type: str
required: True
aliases:
- name
password_credential:
description:
- Set the users password to this unencrypted value.
C(password_credential) is required when creating a new account.
type: str
shell:
description:
- Optionally set the users shell.
type: str
choices:
- bash
- none
- tmsh
partition_access:
description:
- Specifies the administrative partition to which the user has access.
C(partition_access) is required when creating a new account, and
should be in the form "partition:role".
- Valid roles include C(acceleration-policy-editor), C(admin), C(application-editor),
C(auditor), C(certificate-manager), C(guest), C(irule-manager), C(manager), C(no-access),
C(operator), C(resource-admin), C(user-manager), C(web-application-security-administrator),
and C(web-application-security-editor).
- The partition portion the of tuple should be an existing partition or the value 'all'.
type: list
elements: str
state:
description:
- Whether the account should exist or not, taking action if the state is
different from what is stated.
type: str
choices:
- present
- absent
default: present
update_password:
description:
- C(always) allows the user to update passwords.
C(on_create) only sets the password for newly created users.
- When C(username_credential) is C(root), this value is forced to C(always).
type: str
choices:
- always
- on_create
default: always
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
notes:
- Requires BIG-IP versions >= 12.0.0
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Add the user 'johnd' as an admin
bigip_user:
username_credential: johnd
password_credential: password
full_name: John Doe
partition_access:
- all:admin
update_password: on_create
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Change the user "johnd's" role and shell
bigip_user:
username_credential: johnd
partition_access:
- NewPartition:manager
shell: tmsh
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Make the user 'johnd' an admin and set to advanced shell
bigip_user:
name: johnd
partition_access:
- all:admin
shell: bash
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Remove the user 'johnd'
bigip_user:
name: johnd
state: absent
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Update password
bigip_user:
state: present
username_credential: johnd
password_credential: newsupersecretpassword
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
# Note that the second time this task runs, it would fail because
# The password has been changed. Therefore, it is recommended that
# you either,
#
# * Put this in its own playbook that you run when you need to
# * Put this task in a `block`
# * Include `ignore_errors` on this task
- name: Change the Admin password
bigip_user:
state: present
username_credential: admin
password_credential: NewSecretPassword
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Change the root user's password
bigip_user:
username_credential: root
password_credential: secret
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
'''
RETURN = r'''
full_name:
description: Full name of the user.
returned: changed and success
type: str
sample: John Doe
partition_access:
description:
- List of strings containing the user's roles and which partitions they
are applied to. They are specified in the form "partition:role".
returned: changed and success
type: list
sample: ['all:admin']
shell:
description: The shell assigned to the user account.
returned: changed and success
type: str
sample: tmsh
'''
import os
import tempfile
from datetime import datetime
from distutils.version import LooseVersion
try:
from BytesIO import BytesIO
except ImportError:
from io import BytesIO
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_bytes
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, f5_argument_spec, is_empty_list
)
from ..module_utils.icontrol import (
tmos_version, upload_file
)
from ..module_utils.teem import send_teem
try:
# Crypto is used specifically for changing the root password via
# tmsh over REST.
#
# We utilize the crypto library to encrypt the contents of a file
# before we upload it, and then decrypt it on-box to change the
# password.
#
# To accomplish such a process, we need to be able to encrypt the
# temporary file with the public key found on the box.
#
# These libraries are used to do the encryption.
#
# Note that, if these are not available, the ability to change the
# root password is disabled and the user will be notified as such
# by a failure of the module.
#
# These libraries *should* be available on most Ansible controllers
# by default though as crypto is a dependency of Ansible.
#
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
HAS_CRYPTO = True
except ImportError:
HAS_CRYPTO = False
class Parameters(AnsibleF5Parameters):
api_map = {
'partitionAccess': 'partition_access',
'description': 'full_name',
}
updatables = [
'partition_access',
'full_name',
'shell',
'password_credential',
]
returnables = [
'shell',
'partition_access',
'full_name',
'username_credential',
'password_credential',
]
api_attributes = [
'shell',
'partitionAccess',
'description',
'name',
'password',
]
@property
def temp_upload_file(self):
if self._values['temp_upload_file'] is None:
f = tempfile.NamedTemporaryFile()
name = os.path.basename(f.name)
self._values['temp_upload_file'] = name
return self._values['temp_upload_file']
class ApiParameters(Parameters):
@property
def partition_access(self):
if self._values['partition_access'] is None:
return None
result = []
part_access = self._values['partition_access']
for access in part_access:
if isinstance(access, dict):
if 'nameReference' in access:
del access['nameReference']
result.append(access)
else:
result.append(access)
return result
class ModuleParameters(Parameters):
@property
def partition_access(self):
if self._values['partition_access'] is None:
return None
if is_empty_list(self._values['partition_access']):
return []
result = []
part_access = self._values['partition_access']
for access in part_access:
if isinstance(access, string_types):
acl = access.split(':')
if acl[0].lower() == 'all':
acl[0] = 'all-partitions'
value = dict(
name=acl[0],
role=acl[1]
)
result.append(value)
return result
class Changes(Parameters):
def to_return(self):
result = {}
for returnable in self.returnables:
try:
result[returnable] = getattr(self, returnable)
except Exception:
pass
result = self._filter_params(result)
return result
class UsableChanges(Changes):
@property
def password(self):
if self._values['password_credential'] is None:
return None
return self._values['password_credential']
class ReportableChanges(Changes):
@property
def partition_access(self):
if self._values['partition_access'] is None:
return None
result = []
part_access = self._values['partition_access']
for access in part_access:
if access['name'] == 'all-partitions':
name = 'all'
else:
name = access['name']
role = access['role']
result.append(('{0}:{1}'.format(name, role)))
return result
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def password_credential(self):
if self.want.password_credential is None:
return None
if self.want.update_password in ['always']:
return self.want.password_credential
return None
@property
def shell(self):
if self.want.shell == 'none' and self.have.shell is None:
return None
if self.want.shell == 'bash':
self._validate_shell_parameter()
if self.want.shell == self.have.shell:
return None
else:
return self.want.shell
if self.want.shell != self.have.shell:
return self.want.shell
def _validate_shell_parameter(self):
"""Method to validate shell parameters.
Raise when shell attribute is set to 'bash' with roles set to
either 'admin' or 'resource-admin'.
NOTE: Admin and Resource-Admin roles automatically enable access to
all partitions, removing any other roles that the user might have
had. There are few other roles which do that but those roles,
do not allow bash.
"""
err = "Shell access is only available to " \
"'admin' or 'resource-admin' roles."
permit = ['admin', 'resource-admin']
have = self.have.partition_access
if not any(r['role'] for r in have if r['role'] in permit):
raise F5ModuleError(err)
if self.want.partition_access is not None:
want = self.want.partition_access
if not any(r['role'] for r in want if r['role'] in permit):
raise F5ModuleError(err)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.kwargs = kwargs
def exec_module(self):
if self.is_root_username_credential():
manager = self.get_manager('root')
elif self.is_version_less_than_13():
manager = self.get_manager('v1')
else:
manager = self.get_manager('v2')
return manager.exec_module()
def get_manager(self, type):
if type == 'root':
return RootUserManager(**self.kwargs)
elif type == 'v1':
return UnpartitionedManager(**self.kwargs)
elif type == 'v2':
return PartitionedManager(**self.kwargs)
def is_version_less_than_13(self):
"""Checks to see if the TMOS version is less than 13
Anything less than BIG-IP 13.x does not support users
on different partitions.
:return: Bool
"""
version = tmos_version(self.client)
if LooseVersion(version) < LooseVersion('13.0.0'):
return True
else:
return False
def is_root_username_credential(self):
user = self.module.params.get('username_credential', None)
if user == 'root':
return True
return False
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the user.")
return True
def create(self):
self.validate_create_parameters()
if self.want.shell == 'bash':
self.validate_shell_parameter()
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def validate_shell_parameter(self):
"""Method to validate shell parameters.
Raise when shell attribute is set to 'bash' with roles set to
either 'admin' or 'resource-admin'.
NOTE: Admin and Resource-Admin roles automatically enable access to
all partitions, removing any other roles that the user might have
had. There are few other roles which do that but those roles,
do not allow bash.
"""
err = "Shell access is only available to " \
"'admin' or 'resource-admin' roles."
permit = ['admin', 'resource-admin']
if self.want.partition_access is not None:
want = self.want.partition_access
if not any(r['role'] for r in want if r['role'] in permit):
raise F5ModuleError(err)
def validate_create_parameters(self):
if self.want.partition_access is None:
err = "The 'partition_access' option " \
"is required when creating a resource."
raise F5ModuleError(err)
class UnpartitionedManager(BaseManager):
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/auth/user/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.username_credential
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.username_credential
uri = "https://{0}:{1}/mgmt/tm/auth/user/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/auth/user/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.username_credential
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/auth/user/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.username_credential
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/auth/user/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.username_credential
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class PartitionedManager(BaseManager):
def exists(self):
response = self.list_users_on_device()
if 'items' in response:
collection = [x for x in response['items'] if x['name'] == self.want.username_credential]
if len(collection) == 1:
return True
elif len(collection) == 0:
return False
else:
raise F5ModuleError(
"Multiple users with the provided name were found!"
)
return False
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.username_credential
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/auth/user/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 404, 409, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def read_current_from_device(self):
response = self.list_users_on_device()
collection = [x for x in response['items'] if x['name'] == self.want.username_credential]
if len(collection) == 1:
user = collection.pop()
return ApiParameters(params=user)
elif len(collection) == 0:
raise F5ModuleError(
"No accounts with the provided name were found."
)
else:
raise F5ModuleError(
"Multiple users with the provided name were found!"
)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/auth/user/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.username_credential
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 404, 409, 403]:
if 'message' in response:
if 'updated successfully' not in response['message']:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/auth/user/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.username_credential
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def list_users_on_device(self):
uri = "https://{0}:{1}/mgmt/tm/auth/user/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
query = "?$filter=partition+eq+'{0}'".format(self.want.partition)
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response
class RootUserManager(BaseManager):
def exec_module(self):
if not HAS_CRYPTO:
raise F5ModuleError(
"An installed and up-to-date python 'cryptography' package is "
"required to change the 'root' password."
)
start = datetime.now().isoformat()
version = tmos_version(self.client)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
raise F5ModuleError(
"You may not remove the root user."
)
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result
def exists(self):
return True
def update(self):
public_key = self.get_public_key_from_device()
public_key = self.extract_key(public_key)
encrypted = self.encrypt_password_change_file(
public_key, self.want.password_credential
)
self.upload_to_device(encrypted, self.want.temp_upload_file)
result = self.update_on_device()
self.remove_uploaded_file_from_device(self.want.temp_upload_file)
return result
def encrypt_password_change_file(self, public_key, password):
# This function call requires that the public_key be expressed in bytes
pub = serialization.load_pem_public_key(
to_bytes(public_key),
backend=default_backend()
)
message = to_bytes("{0}\n{0}\n".format(password))
ciphertext = pub.encrypt(
message,
# OpenSSL craziness
#
# Using this padding because it is the only one that works with
# the OpenSSL on BIG-IP at this time.
padding.PKCS1v15(),
#
# OAEP is the recommended padding to use for encrypting, however, two
# things are wrong with it on BIG-IP.
#
# The first is that one of the parameters required to decrypt the data
# is not supported by the OpenSSL version on BIG-IP. A "parameter setting"
# error is raised when you attempt to use the OAEP parameters to specify
# hashing algorithms.
#
# This is validated by this thread here
#
# https://mta.openssl.org/pipermail/openssl-dev/2017-September/009745.html
#
# Were is supported, we could use OAEP, but the second problem is that OAEP
# is not the default mode of the ``openssl`` command. Therefore, we need
# to adjust the command we use to decrypt the encrypted file when it is
# placed on BIG-IP.
#
# The correct (and recommended if BIG-IP ever upgrades OpenSSL) code is
# shown below.
#
# padding.OAEP(
# mgf=padding.MGF1(algorithm=hashes.SHA256()),
# algorithm=hashes.SHA256(),
# label=None
# )
#
# Additionally, the code in ``update_on_device()`` would need to be changed
# to pass the correct command line arguments to decrypt the file.
)
return BytesIO(ciphertext)
def extract_key(self, content):
"""Extracts the public key from the openssl command output over REST
The REST output includes some extra output that is not relevant to the
public key. This function attempts to only return the valid public key
data from the openssl output
Args:
content: The output from the REST API command to view the public key.
Returns:
string: The discovered public key
"""
lines = content.split("\n")
start = lines.index('-----BEGIN PUBLIC KEY-----')
end = lines.index('-----END PUBLIC KEY-----')
result = "\n".join(lines[start:end + 1])
return result
def update_on_device(self):
errors = ['Bad password', 'password change canceled', 'based on a dictionary word']
# Decrypting logic
#
# The following commented out command will **not** work on BIG-IP versions
# utilizing OpenSSL 1.0.11-fips (15 Jan 2015).
#
# The reason is because that version of OpenSSL does not support the various
# ``-pkeyopt`` parameters shown below.
#
# Nevertheless, I am including it here as a possible future enhancement in
# case the method currently in use stops working.
#
# This command overrides defaults provided by OpenSSL because I am not
# sure how long the defaults will remain the defaults. Probably as long
# as it took OpenSSL to reach 1.0...
#
# openssl = [
# 'openssl', 'pkeyutl', '-in', '/var/config/rest/downloads/{0}'.format(self.want.temp_upload_file),
# '-decrypt', '-inkey', '/config/ssl/ssl.key/default.key',
# '-pkeyopt', 'rsa_padding_mode:oaep', '-pkeyopt', 'rsa_oaep_md:sha256',
# '-pkeyopt', 'rsa_mgf1_md:sha256'
# ]
#
# The command we actually use is (while not recommended) also the only one
# that works. It forgoes the usage of OAEP and uses the defaults that come
# with OpenSSL (PKCS1v15)
#
# See this link for information on the parameters used
#
# https://www.openssl.org/docs/manmaster/man1/pkeyutl.html
#
# If you change the command below, you will need to additionally change
# how the encryption is done in ``encrypt_password_change_file()``.
#
openssl = [
'openssl', 'pkeyutl', '-in', '/var/config/rest/downloads/{0}'.format(self.want.temp_upload_file),
'-decrypt', '-inkey', '/config/ssl/ssl.key/default.key',
]
cmd = '-c "{0} | tmsh modify auth password root"'.format(' '.join(openssl))
params = dict(
command='run',
utilCmdArgs=cmd
)
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
if 'commandResult' in response:
if any(x for x in errors if x in response['commandResult']):
raise F5ModuleError(response['commandResult'])
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def upload_to_device(self, content, name):
"""Uploads a file-like object via the REST API to a given filename
Args:
content: The file-like object whose content to upload
name: The remote name of the file to store the content in. The
final location of the file will be in /var/config/rest/downloads.
Returns:
void
"""
url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
try:
upload_file(self.client, url, content, name)
except F5ModuleError:
raise F5ModuleError(
"Failed to upload the file."
)
def remove_uploaded_file_from_device(self, name):
filepath = '/var/config/rest/downloads/{0}'.format(name)
params = {
"command": "run",
"utilCmdArgs": filepath
}
uri = "https://{0}:{1}/mgmt/tm/util/unix-rm".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def get_public_key_from_device(self):
cmd = '-c "openssl rsa -in /config/ssl/ssl.key/default.key -pubout"'
params = dict(
command='run',
utilCmdArgs=cmd
)
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if 'commandResult' in response:
return response['commandResult']
return None
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
username_credential=dict(
required=True,
aliases=['name']
),
password_credential=dict(
no_log=True,
),
partition_access=dict(
type='list',
elements='str',
),
full_name=dict(),
shell=dict(
choices=['none', 'bash', 'tmsh']
),
update_password=dict(
default='always',
choices=['always', 'on_create']
),
state=dict(default='present', choices=['absent', 'present']),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| |
import csv
import errno
import heapq
import math
import os
import sqlite3
import time
from operator import itemgetter
###############################################################################
def create_dirs(file_path):
"""Create any directories needed for the specified file.
If the directories already exists for the file, this will do nothing.
"""
# Retrieve the directory for the file.
directory = os.path.dirname(file_path)
# Ensure the directory does not exist.
if (not directory or os.path.isdir(directory)):
return
# Create the directory.
try:
os.makedirs(directory)
# NOTE: It is possible that a race condition can allow the directory to be
# created in between checking if the directory exists and attempting
# to make the directory. If this happens, an error will be thrown.
except OSError as e:
# If the error was not due to the directory already existing, something
# else happened.
if (e.errno != errno.EEXIST):
raise
###############################################################################
def load_features(feature_file_path):
"""Load the array of features from the specified feature file.
A feature file should be formatted as a .csv (Comma Separated Value)
file with only one row. Essentially, this means that the file should
be formatted as a text file with every value separated by a comma.
"""
# Ensure the file exists.
if (not os.path.isfile(feature_file_path)):
raise ValueError('Could not find feature file %s'
% feature_file_path)
# Read first row of values.
with open(feature_file_path) as file:
feature_reader = csv.reader(file)
return next(feature_reader)
###############################################################################
def save_features(features, feature_file_path):
"""Save the array of features in a csv file at the specified path.
The .csv file extension will not be automatically added to the file
name.
If the directory where the file is to be held does not already exist, it
will be created.
"""
# Ensure the file's directory exists.
create_dirs(feature_file_path)
# Save the list of features to the file.
with open(feature_file_path, 'w') as file:
csv.writer(file).writerow(features)
###############################################################################
def percent_difference(feature1, feature2):
"""Compares each values of two sets of features by percent difference.
Returns the average of the percent difference
"""
if len(feature1) != len(feature2):
raise ValueError('Feature lengths are not of the same length')
# Calculate the percent differences between each of the features.
percent_differences = ((abs(f1 - f2), float(f1 + f2) / 2) #(Difference, Average)
for f1, f2 in zip(feature1, feature2))
percent_differences = [float(x[0]) / float(x[1]) if x[1] != 0 else 0
for x in percent_differences]
# Calculate the average of the percent differences.
return sum(percent_differences) / len(percent_differences)
###############################################################################
def euclidean_distance(point1, point2):
"""Calculates the euclidean distance between the 2 specified n-dimensional
points.
"""
if len(point1) != len(point2):
raise ValueError('The specified points do not have the same number of '
'dimensions')
sum_of_squares = sum((x1 - x2) ** 2 for x1, x2 in zip(point1, point2))
return math.sqrt(sum_of_squares)
###############################################################################
def search(feature_matrix, features, label=None, num_samples=1):
"""Search the specified feature matrix for sample whose features are most
similar to the specified features.
The feature matrix should contain each sample's identifier as the first
element for each sample.
If a label is specified, only the samples with the matching label will
be searched. If the label is provided, it will be assumed that the last
element of every row in the feature matrix is the label of sample. If the
label is not provided, then it will be assumed that the feature matrix
provides no labels.
Returns the label followed by the percent difference of the most similar
sample.
"""
# Filter out only the samples with the same label if the label was
# provided.
if (label):
samples = [sample[:-1] for sample in feature_matrix
if sample[-1] == label]
else:
samples = feature_matrix
# Determine the distance between the features and each of the samples.
differences = [(sample[0], euclidean_distance(features, sample[1:]))
for sample in samples]
# Find the samples with the smallest distances.
return heapq.nsmallest(num_samples, differences, key=itemgetter(1))
###############################################################################
class SampleDao(object):
"""A SampleDao (Sample Database Access Object) provides access to a
database that holds a variety of samples. The type of sample available
depends on the type of sample passed into the constructor.
SampleDao is abstract and should not be instantiated directly. Instead,
a subclass should be made to adapt it to the appropriate sample type.
"""
# The directory where all of the features are stored.
_features_directory = r'Features/'
def __init__(self, sample_type, database_file_name, table_name=None,
identifier_column_name='ID', label_column_name='Label'):
"""Initialize a new SampleDao for the specified sample type that
accesses the database at the specified path. If the database does not
already exist, it is created.
Optional parameters:
table_name - the name of the table in the database
identifier_column_name - the name of the primary key colmumn
label_column_name - the name of the column containing labels
"""
# Set the sample type.
self._sample_type = sample_type
# Set the column names.
self._identifier_column_name = identifier_column_name
self._label_column_name = label_column_name
# Set the table name.
self._table_name = table_name or '{0}s'.format(sample_type.__name__)
# Ensure the directory for the database exists.
create_dirs(database_file_name)
# Set the features directory inside the same directory as the database.
database_directory = os.path.dirname(database_file_name)
self._features_directory = os.path.join(database_directory,
SampleDao._features_directory)
# Create the connection for the database.
#
# Note: If the database file doesn't exist yet, this will create it.
self._connection = sqlite3.connect(database_file_name)
# If the samples table already exists in the database, alter it to add
# any missing columns. If it doesn't already exist, create it.
if (self.__table_exists()):
self.__alter_table()
else:
self.__create_table()
def __exit__(self):
"""Close the connection to the databse.
This method provides compatibility with the "with" keyword.
"""
self.close()
def __alter_table(self):
"""Alter the table to add any missing columns. This should only be
called in the constructor if the table already exists.
"""
# Find out what columns the table already contains.
query = 'PRAGMA table_info({0})'.format(self.table_name)
cursor = self._connection.cursor()
cursor.execute(query)
# Note: The resultset contains a record for each column. The name of
# the column is the second item.
columns = set(column_record[1] for column_record in cursor)
# Determine what columns are missing.
required = set(feature[0] for feature
in self.sample_type.feature_methods())
required |= set((self.identifier_column_name, self.label_column_name))
missing = required - columns
# Alter the table to add the missing columns.
if (not missing):
return
query = 'ALTER TABLE {0} '.format(self.table_name)
for column in missing:
self._connection.execute(query + 'ADD [{0}] TEXT'.format(column))
def __create_table(self):
"""Construct the table to hold all of the samples. This should only
be called in the constructor if the table does not already exist.
"""
feature_names = [feature[0] for feature
in self.sample_type.feature_methods()]
feature_query = ','.join('[%s] TEXT' % feature for feature
in feature_names)
query = 'CREATE TABLE IF NOT EXISTS %s (' % self.table_name
query += '%s TEXT PRIMARY KEY,' % self.identifier_column_name
query += '%s TEXT' % self.label_column_name
if (feature_query):
query += ',{0}'.format(feature_query)
query += ')'
self._connection.execute(query)
def __table_exists(self):
"""Determine if the table to hold all of the samples exists in the
database already.
"""
# Retrieve the number of tables with the table name from the database.
cursor = self._connection.cursor()
query = ("SELECT COUNT(*) FROM sqlite_master WHERE type='table'"
"AND name=?")
cursor.execute(query, (self.table_name,))
return cursor.fetchone()[0] > 0
@property
def identifier_column_name(self):
"""Get the name of the column used for the primary key column in the
database.
"""
return self._identifier_column_name
@property
def label_column_name(self):
"""Get the name of the column used for the label in the database."""
return self._label_column_name
@property
def sample_type(self):
"""Get the sample class type of the samples stored in the database
accessed by this
"""
return self._sample_type
@property
def table_name(self):
"""Get the name of the table created in the database to hold the
samples.
By default, this returns the name of the sample type with an 's'
appended to the end.
"""
return self._table_name
def add_sample(self, sample, verbose_callback=None):
"""Add the specified sample to the database.
A function that takes a single string as an argument can be specified
as the verbose_callback in order to print updates during the feature
extraction process.
"""
# Ensure the sample is of the correct type.
if (not isinstance(sample, self.sample_type)):
raise TypeError("Sample is of the incorrect type")
# Retrieve the id, label, and set of features.
id = sample.identifier
label = sample.label
features, feature_names = self.extract_features(sample,
verbose_callback=verbose_callback)
# Construct the query to insert the record in the database.
feature_names = ','.join('[%s]' % name for name in feature_names)
feature_params = ','.join('?' * len(features))
query = 'INSERT OR REPLACE INTO {0} ([{1}], [{2}], {3}) VALUES(?, ?, {4})'
query = query.format(self.table_name, self.identifier_column_name,
self.label_column_name, feature_names,
feature_params)
self._connection.execute(query, [id, label] + features)
self._connection.commit()
def close(self):
"""Closes the connection to the database."""
self._connection.close()
def exists(self, identifier):
"""Checks if a sample has the specified identifier in the database."""
query = 'SELECT {0} FROM {1} WHERE {0} = ?'.format(
self.identifier_column_name,
self.table_name)
cursor = self._connection.cursor()
cursor.execute(query, [identifier])
return len(cursor.fetchall()) > 0
def extract_features(self, sample, feature_methods=None,
verbose_callback=None):
"""Extract the specified features from the specified sample.
feature_methods should contain the features to extract, with each
feature being represented as a list with the first element being the
name of the feature and the second element being the unbounded feature
method.
verbose_callback is an optional callback function that takes a single
string parameter. If supplied, it will be called periodically to
update the progress of the extraction.
The return value will be a tuple containing two parallel lists. The
first list will be the list of the features extracted from the sample.
If the feature was a single numerical value, that number will be the
value. If the feature was a list of values, the list will be saved to
a .csv value and the value will be the path to the saved file. The
second list will contain the name of the feature corresponding to
each element in the first list.
"""
# Use the full list of feature methods by default.
feature_methods = feature_methods or self.sample_type.feature_methods()
features = []
feature_names = []
for feature_name, feature_method in feature_methods:
# Log start
if (verbose_callback):
verbose_callback('Parsing {0} ... '.format(feature_name))
start = time.clock()
# Execute the method for the sample and retrieve the feature
# value.
value = feature_method(sample)
# If the value is a list of features instead of a single value,
# save the list to a csv file and insert the path to the file
# in the database in place of a value.
if (isinstance(value, (list, tuple))):
# The file directory should have be:
#
# {Features Directory}/{Table Name}/{Feature Name}/{Sample}
#
file_name = '%s.csv' % sample.identifier
file_path = os.path.join(self._features_directory,
self.table_name, feature_name,
file_name)
# Save the features to the file and then set the file path as
# the value to be written.
save_features(value, file_path)
value = file_path
# Add the feature
features.append(value)
feature_names.append(feature_name)
# Log end
if (verbose_callback):
elapsed = time.clock() - start
verbose_callback('Finished in {0:.5} seconds\n'.format(elapsed))
return (features, feature_names)
def feature_matrix(self, include_ids=False, include_labels=False):
"""Retrieve a matrix containing all of the features (columns) for all
of the samples (rows) in the database.
The feature matrix is an m x n 2D array where m is the number of
samples in the database and n is the number of columns in the database.
Setting include_ids to true will include the ids column at the
beginning.
Setting include_labels to true will include the labels column at the
end.
"""
# Construct a query to grab all of the feature columns (and optionally
# the label column) from the database.
feature_methods = self.sample_type.feature_methods()
columns = [method[0] for method in feature_methods]
if (include_ids):
columns.insert(0, self.identifier_column_name)
if (include_labels):
columns += self.label_column_name
columns_string = ','.join('[%s]' % column for column in columns)
query = 'SELECT {0} FROM {1}'.format(columns_string, self.table_name)
cursor = self._connection.cursor()
cursor.execute(query)
# If a feature column holds multiple features, the value will be a
# string containing the path to a csv file that holds the list of
# values. Otherwise, it will simply be a numerical value.
feature_matrix = []
for record in cursor:
# Construct a list of all of the features in the cursor.
features = []
feature_matrix.append(features)
# Handle IDs/Labels
columns = record
if (include_ids):
features.append(columns[0])
columns = columns[1:]
if (include_labels):
columns = columns[:-1]
for value in columns:
# Try to parse the value as a number.
try:
features.append(float(value))
# If the value is not a number, it must be a path to a csv
# file or empty (None).
except:
if (value is None):
features.append(value)
else:
features.extend(load_features(value))
# Add Label to end.
if (include_labels):
features.append(record[len(record)-1])
return feature_matrix
def identifiers(self):
"""Retrieve a list of all of the identifiers for all of the samples in
the database. All identifiers are guaranteed to uniquely identify a
sample in the database.
"""
# Construct query to grab identifier from table
query = 'SELECT {0} FROM {1}'.format(self.identifier_column_name,
self.table_name)
cursor = self._connection.cursor()
cursor.execute(query)
# The cursor is constructed as a list of records, with each record
# holding a list of columns. In this case, there is only one column
# in each record. Therefore, the cursor can simply be flattened into
# a regular list.
return [str(label) for record in cursor for label in record]
def labels(self):
"""Retrieve a dictionary of all of the labels associated with each
sample. The keys for the dictionary will be the identifiers for each
of the samples.
"""
# Construct a query to grab the identifier and label from the table.
query = 'SELECT {0}, {1} FROM {2}'.format(self.identifier_column_name,
self.label_column_name,
self.table_name)
cursor = self._connection.cursor()
cursor.execute(query)
# The ID is in the first column and the label is in the second column.
# Use the ID as the key and the label as the value.
return {str(record[0]): int(record[1]) if record[1] else None
for record in cursor}
def missing_features(self, id):
"""Determine which feature columns the sample with the specified id
is missing in the database.
The missing columns will be returned as an list of strings
representing the column names.
"""
# Retrieve all of the features for the specified sample.
features = [feature[0] for feature
in self.sample_type.feature_methods()]
query = 'SELECT {0} FROM {1} WHERE {2} = ?'
query = query.format(','.join('[%s]' % f for f in features),
self.table_name,
self.identifier_column_name)
cursor = self._connection.execute(query, (id,))
values = cursor.fetchone()
# Find the features that have no value.
return [feature for feature, value in zip(features, values)
if value is None]
def samples(self, include_labels=False):
"""Retrieve all of the samples from the dictionary.
The samples will be returned as a dictionary with the each sample's
identifier representing the key for the sample. Each sample is
represented by the array of its features.
If include_labels is set to true, each feature's label will be appended
to the array of features.
"""
feature_matrix = self.feature_matrix(include_ids=True,
include_labels=include_labels)
return {row[0]: row[1:] for row in feature_matrix}
def save_feature_matrix(self, file_path, include_ids=False,
include_labels=False):
"""Retrieve the feature matrix and save it to the specified file.
file_path is the path to the file where the feature matrix should be
saved.
Setting include_ids to true will include the ids column at the
beginning of the feature matrix.
Setting include_labels to true will include the labels column at the
end of the feature matrix.
"""
# Retrieve feature matrix.
feature_matrix = self.feature_matrix(include_ids=include_ids,
include_labels=include_labels)
# Ensure the directory for the file exists.
create_dirs(file_path)
# Save feature matrix to file.
with open(file_path, 'w') as file:
writer = csv.writer(file, lineterminator='\n')
writer.writerows(feature_matrix)
def search(self, features, label):
"""Search the feature matrix for the sample whose features are most
similar to the specified features.
Returns the label followed by the percent difference of the most
similar sample.
"""
feature_matrix = self.feature_matrix(self, include_id=True,
include_labels=True)
return search(feature_matrix, features, label)
def update_sample(self, sample, features=None, verbose_callback=None):
"""Update the specified features for the specified sample in the
database. If no features are specified, the sample will have only its
missing features filled in.
features should be a list of feature names. It should not contain the
actual feature methods.
verbose_callback is an optional callback function that takes a single
string parameter. If supplied, it will be called periodically to
update the progress of the extraction.
"""
# Use the missing features by default.
features = features or self.missing_features(sample.identifier)
# Ensure there are features to update.
if (not features):
if (verbose_callback):
verbose_callback('No features to update.\n')
return
# Retrieve the feature methods for each of the feature names.
feature_methods = {feature[0]: feature[1] for feature
in self.sample_type.feature_methods()}
features = [(feature, feature_methods[feature])
for feature in features]
# Extract all of the features from the sample.
features, feature_names = self.extract_features(sample, features,
verbose_callback=verbose_callback)
# Update the sample in the database.
query = 'UPDATE {0} SET '.format(self.table_name)
query += ','.join('[%s]=?' % feature for feature in feature_names)
query += ' WHERE [{0}]=?'.format(self.identifier_column_name)
params = features + [sample.identifier]
self._connection.execute(query, params)
self._connection.commit()
| |
# Copyright 2020 The Weakly-Supervised Control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environment wrappers for augmenting environment data."""
import abc
import collections.abc
from typing import Any, Callable, Dict, List, Tuple, Type
import gym
from gym import spaces
import numpy as np
def create_wrapped_env(env_cls: Type, env_kwargs: Dict[str, Any],
wrappers: List[Tuple[Type, Dict[str, Any]]]):
"""Creates a wrapped environment."""
env = env_cls(**env_kwargs)
for wrapper_cls, wrapper_kwargs in wrappers:
env = wrapper_cls(env, **wrapper_kwargs)
return env
class GoalFactorWrapper(gym.Wrapper, metaclass=abc.ABCMeta):
"""Wraps a goal-based environment."""
STATE_OBS_KEYS = ('state_desired_goal', 'state_achieved_goal',
'state_observation')
GOAL_KEYS = ('state_desired_goal', 'desired_goal')
def __init__(self, env: gym.Env, factor_space: spaces.Box):
super().__init__(env)
self.factor_space = factor_space
self.factor_size = len(np.atleast_1d(factor_space.low))
self.current_factor_value = None
# Augment the observation space.
if isinstance(self.observation_space, spaces.Dict):
for key in self.STATE_OBS_KEYS:
prev_goal_space = self.observation_space.spaces[key]
self.observation_space.spaces[key] = spaces.Box(
low=np.append(prev_goal_space.low, factor_space.low),
high=np.append(prev_goal_space.high, factor_space.high),
dtype=prev_goal_space.dtype,
)
else:
print('WARNING: {} observation_space is not Dict: {}'.format(
self.__class__.__name__, self.observation_space))
@abc.abstractmethod
def _set_to_factor(self, value: np.ndarray):
"""Sets to the given factor."""
def randomize_factor(self):
factor = self._sample_factors(1)[0]
self._set_and_cache_factor(factor)
def reset(self):
obs = self.env.reset()
self.randomize_factor()
obs = self._add_factor_to_obs(obs)
return obs
def step(self, action):
obs, reward, done, info = self.env.step(action)
obs = self._add_factor_to_obs(obs)
return obs, reward, done, info
def set_to_goal(self, goal):
goal = goal.copy()
# Extract out the factor before calling the sub-env's set_to_goal.
for goal_key in self.GOAL_KEYS:
if goal_key in goal:
factor = goal[goal_key][-self.factor_size:]
goal[goal_key] = (goal[goal_key][:-self.factor_size])
self.env.set_to_goal(goal)
self._set_and_cache_factor(factor)
def set_goal(self, goal):
goal = goal.copy()
# Extract out the factor before calling the sub-env's set_to_goal.
for goal_key in self.GOAL_KEYS:
if goal_key in goal:
factor = goal[goal_key][-self.factor_size:]
goal[goal_key] = (goal[goal_key][:-self.factor_size])
self.env.set_goal(goal)
self._set_and_cache_factor(factor)
def sample_goals(self, batch_size):
goals = self.env.sample_goals(batch_size)
# Add the factor value.
factor = self._sample_factors(batch_size)
for goal_key in self.GOAL_KEYS:
desired_goal = np.concatenate([goals[goal_key], factor], axis=1)
goals[goal_key] = desired_goal
return goals
def compute_rewards(self, actions, obs):
obs = self._remove_factor_from_obs(obs)
return self.env.compute_rewards(actions, obs)
def _sample_factors(self, batch_size: int) -> np.ndarray:
"""Returns factor values."""
return np.stack([
np.atleast_1d(self.factor_space.sample())
for i in range(batch_size)
])
def _set_and_cache_factor(self, value: np.ndarray):
if self.factor_space.low.shape == ():
value = np.array(value.item())
# if not self.factor_space.contains(value):
# print('WARNING: {} does not contain {}'.format(
# self.factor_space, value))
self._set_to_factor(value)
self.current_factor_value = value
def _add_factor_to_obs(self, obs):
assert self.current_factor_value is not None
if not isinstance(obs, collections.abc.Mapping):
return obs
obs = obs.copy()
for key in self.STATE_OBS_KEYS:
if key not in obs:
print('WARNING: {} not in obs'.format(key))
continue
obs[key] = np.append(obs[key], self.current_factor_value)
return obs
def _remove_factor_from_obs(self, obs):
obs = obs.copy()
if not isinstance(obs, collections.abc.Mapping):
return obs
for key in self.STATE_OBS_KEYS:
if key not in obs:
print('WARNING: {} not in obs'.format(key))
continue
obs_value = obs[key]
assert obs_value.ndim in (1, 2), obs_value.ndim
if obs_value.ndim == 1:
obs[key] = obs_value[:-self.factor_size]
else:
obs[key] = obs_value[:, :-self.factor_size]
return obs
def __getattr__(self, name: str):
return getattr(self.env, name)
class MujocoRandomLightsWrapper(GoalFactorWrapper):
"""Wrapper over MuJoCo environments that modifies lighting."""
def __init__(self,
env: gym.Env,
diffuse_range: Tuple[float, float] = (0.2, 0.8)):
"""Creates a new wrapper."""
super().__init__(
env,
factor_space=spaces.Box(low=np.array(diffuse_range[0]),
high=np.array(diffuse_range[1]),
dtype=np.float32),
)
self.model = self.unwrapped.model
@property
def factor_names(self):
factor_names = self.unwrapped.factor_names
return factor_names + ['light']
def _set_to_factor(self, value: float):
"""Sets to the given factor."""
self.model.vis.headlight.ambient[:] = np.full((3, ), value)
self.model.vis.headlight.diffuse[:] = np.full((3, ), value)
def __getattr__(self, name: str):
return getattr(self.env, name)
class MujocoRandomColorWrapper(GoalFactorWrapper):
"""Wrapper over MuJoCo environments that modifies lighting."""
def __init__(self,
env: gym.Env,
color_choices: List[Tuple[float, float, float, float]],
geom_names: List[str] = None,
site_names: List[str] = None):
"""Creates a new wrapper."""
super().__init__(
env,
factor_space=spaces.Box(
low=np.array(0, dtype=int),
high=np.array(len(color_choices) - 1, dtype=int),
dtype=int,
),
)
self.model = self.unwrapped.model
self.geom_ids = [
self.model.geom_name2id(name) for name in geom_names or []
]
self.site_ids = [
self.model.site_name2id(name) for name in site_names or []
]
self.color_choices = color_choices
@property
def factor_names(self):
factor_names = self.unwrapped.factor_names
return factor_names + ['table_color', 'obj_color']
def _set_to_factor(self, value: int):
"""Sets to the given factor."""
color = self.color_choices[int(value)]
for geom_id in self.geom_ids:
self.model.geom_rgba[geom_id, :] = color
for site_id in self.site_ids:
self.model.site_rgba[site_id, :] = color
def __getattr__(self, name: str):
return getattr(self.env, name)
if __name__ == '__main__':
from weakly_supervised_control.envs import register_all_envs
import gym
register_all_envs()
env = gym.make('SawyerPickupRandomLightsColorsEnv-v1')
# env = MujocoRandomColorWrapper(
# env,
# geom_names=['tableTop'],
# color_choices=[
# (.6, .6, .5, 1),
# (1., .6, .5, 1),
# (.6, 1., .5, 1),
# (.6, 1., 1., 1),
# (1., 1., .5, 1),
# ],
# )
for e in range(10):
obs = env.reset()
# assert len(obs['state_desired_goal']) == 5, obs['state_desired_goal']
for _ in range(100):
env.step(env.action_space.sample())
# env.randomize_factor()
env.render()
| |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import progressbar
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge, Lasso, BayesianRidge
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import StratifiedKFold
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import Imputer, StandardScaler, OneHotEncoder
from .encoders import EncodeCategorical
def all_algorithms(func):
def wrapper(*args, **kwargs):
with progressbar.ProgressBar(max_value=len(args[0].algorithms)) as pbar:
for i, algorithm in enumerate(args[0].algorithms):
kwargs['algorithm'] = algorithm
func(*args, **kwargs)
pbar.update(i+1)
return wrapper
class AutoLearn(object):
def __init__(self, encode_categoricals=False, onehot=False, impute=False, standardize=False, decompose=False,
impute_strategy='mean', missing_values='NaN', target=None, id_col=None, error_metric='rmse',
algorithms={'linear', 'ridge', 'lasso', 'bayes', 'bayes_ridge', 'boost', 'forest'}):
impute_strategy_types = {'mean', 'median', 'most_frequent'}
assert impute_strategy in impute_strategy_types,\
'Strategy must be one of the following: {} {} {}'.format('mean', 'median', 'most_frequent')
self.encode_categoricals = encode_categoricals
self.onehot = onehot
self.impute = impute
self.impute_strategy = impute_strategy
self.missing_values = missing_values
self.standardize = standardize
self.decompose = decompose
self.target = target
self.id_col = id_col
self.error_metric = error_metric
self.model = {}
self.algorithms = algorithms
self.encoder_label = None
self.imputer = None
self.encoder_onehot = None
self.scaler = None
self.pca = None
for i, algorithm in enumerate(self.algorithms):
self.model[algorithm] = {}
def process_training_data(self, filename):
training_data = pd.read_csv(filename, sep=',')
if self.encode_categoricals:
self.encoder_label = EncodeCategorical()
self.encoder_label.fit(training_data)
training_data = self.encoder_label.transform(training_data)
X = training_data.copy()
X.drop(self.target, axis=1, inplace=True)
if self.id_col:
X.drop(self.id_col, axis=1, inplace=True)
mask = []
for column in X.columns:
if column in X.select_dtypes(include=['object', 'category']).columns:
mask.append(True)
else:
mask.append(False)
X = X.values
y = training_data[self.target].values
if self.impute:
self.imputer = Imputer(missing_values=self.missing_values,
strategy=self.impute_strategy,
copy=False)
self.imputer.fit(X)
X = self.imputer.transform(X)
if self.onehot:
self.encoder_onehot = OneHotEncoder(categorical_features=mask,
dtype=np.int,
sparse=False,
handle_unknown='ignore')
X = self.encoder_onehot.fit_transform(X)
if self.standardize:
self.scaler = StandardScaler()
self.scaler.fit(X)
if self.decompose:
self.pca = PCA()
self.pca.fit(X)
return training_data, X, y
def process_test_data(self, filename, separator=','):
test_data = pd.read_csv(filename, sep=separator)
if self.encode_categoricals:
test_data = self.encoder_label.transform(test_data)
X = test_data.copy()
if self.id_col:
X.drop(self.id_col, axis=1, inplace=True)
X = X.values
if self.impute:
X = self.imputer.transform(X)
if self.onehot:
X = self.encoder_onehot.transform(X)
return test_data, X
def train(self, X, y, algorithm):
if self.standardize:
X = self.scaler.transform(X)
if self.decompose:
X = self.pca.transform(X)
algs = {'linear': LinearRegression(),
'logistic': LogisticRegression(),
'ridge': Ridge(),
'lasso': Lasso(max_iter=10000),
'bayes': GaussianNB(),
'bayes_ridge': BayesianRidge(),
'boost': GradientBoostingRegressor(),
'forest': RandomForestRegressor()}
model = algs[algorithm]
model.fit(X, y)
return model
@all_algorithms
def train_all(self, X, y, algorithm):
self.model[algorithm]['model'] = self.train(X, y, algorithm)
# def tune(self, model, X, y):
# print('add this feature')
#
# def tune_best(self, model, X, y):
# print('add this feature')
def cross_validate(self, X, y, algorithm):
skf = StratifiedKFold(n_splits=3)
scores = []
# print('X: {}\ty: {}'.format(X.shape, y.shape))
for train_index, test_index in skf.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# print('x_train: {}\tx_test: {}'.format(X_train.shape, X_test.shape))
# print('y_train: {}\ty_test: {}'.format(y_train.shape, y_test.shape))
model = self.train(X_train, y_train, algorithm)
y_test_predictions = self.predict(X_test, model)
scores.append(self.score(y_test, y_test_predictions))
return scores
@all_algorithms
def cross_validate_all(self, X, y, algorithm):
# print('Beginning cross validation for: {}'.format(algorithm))
self.model[algorithm]['cv'] = self.cross_validate(X, y, algorithm)
# print('Finished cross validation for: {}'.format(algorithm))
def predict(self, X, model):
if self.standardize:
X = self.scaler.transform(X)
if self.decompose:
X = self.pca.transform(X)
y = model.predict(X)
return y
@all_algorithms
def predict_all(self, X, algorithm):
model = self.model[algorithm]['model']
self.model[algorithm]['predictions'] = self.predict(X, model)
@staticmethod
def visualize(y, y_predictions):
actuals = y
predictions = y_predictions
residuals = actuals - predictions
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
sns.regplot(x=predictions, y=actuals, ax=ax[0], color='#34495e')
sns.regplot(x=predictions, y=residuals, ax=ax[1], fit_reg=False, color='#34495e')
ax[0].set_title('Compare Predictions')
ax[1].set_title('Residuals')
plt.setp(ax[0].get_xticklabels(), rotation=45)
plt.setp(ax[1].get_xticklabels(), rotation=45)
return fig
@all_algorithms
def visualize_all(self, y, algorithm):
y_predictions = self.model[algorithm]['predictions']
return self.visualize(y, y_predictions)
@staticmethod
def root_mean_squared_logarithmic_error(y, y_pred):
return np.sqrt(np.mean(np.power(np.log1p(y_pred) - np.log1p(y), 2)))
def score(self, y, y_pred, metric=None):
if metric is None:
metric = self.error_metric
if metric == 'rmsle':
return self.root_mean_squared_logarithmic_error(y, y_pred)
elif metric == 'mse':
return mean_squared_error(y, y_pred)
else:
print('No metric defined for {}'.format(metric))
exit()
@all_algorithms
def score_all(self, y, algorithm):
y_predictions = self.model[algorithm]['predictions']
self.model[algorithm]['scores'] = self.score(y, y_predictions)
self.model[algorithm]['variance'] = self.variance(y_predictions)
@staticmethod
def variance(y_pred):
return np.var(y_pred)
def get_results(self):
algs = []
scores = []
params = []
variance = []
cv = []
for algorithm, item in self.model.items():
algs.append(algorithm)
scores.append(item['scores'])
params.append(item['model'])
variance.append(item['variance'])
cv.append(item['cv'])
data = {'algorithm': algs, 'score': scores, 'parameters': params, 'variance': variance, 'cv': cv}
results = pd.DataFrame(data=data, index=algs, columns=['score', 'cv', 'variance', 'parameters'])
results.sort_values('score', ascending=True, inplace=True)
return results
| |
from __future__ import absolute_import
import datetime
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.timezone import utc
from .github_utils import (get_main_branch_url, get_last_commit_on_branch,
get_commit_comparison_url)
User = get_user_model()
class Challenge(models.Model):
title = models.CharField(max_length=200)
body = models.TextField()
repo_name = models.CharField(max_length=200)
branch_name = models.CharField(max_length=200)
start_commit = models.CharField(max_length=150)
end_commit = models.CharField(max_length=150)
creation_datetime = models.DateTimeField(auto_now_add=True)
# Don"t include self.role_set.user because the user will not exist
# in some cases.
def __unicode__(self):
return "Challenge {0}, created on {1}".format(
self.id,
self.creation_datetime.ctime()
)
def get_absolute_url(self):
return reverse("challenge_api_retrieve", kwargs={"pk": self.pk})
def get_clencher(self):
try:
return self.role_set.get(type=self.role_set.model.CLENCHER)
except ObjectDoesNotExist:
raise Exception("No user has been assigned as clencher "
"for this challenge yet.")
# Make manager method out of it, but will I then be able to use it in
# serializer?
def get_jurors(self):
# `filter` call with no results returns an empty list (vs `get`)
# Also, the order_by id is probably default, but want to explicit that
# ordering by id is important here (other model method relies on it).
jurors = self.role_set.filter(
type=self.role_set.model.JUROR).order_by("id")
if jurors:
return jurors
else:
raise self.Exception("No Juror has been assigned as juror "
"for this challenge yet.")
def get_juror_representation_number(self, juror):
jurors = self.get_jurors()
for i in range(len(jurors)):
if juror == jurors[i]:
# Id numbering of instances starts at 1 instead of 0
return (i + 1)
raise Exception("Argument is not a Juror in the current challenge")
def get_challenge_period_end_datetime(self):
return (self.creation_datetime + settings.CHALLENGE_PERIOD_DURATION)
def get_voting_period_end_datetime(self):
return (self.get_challenge_period_end_datetime()
+ settings.VOTING_PERIOD_DURATION)
def in_challenge_period(self):
now = datetime.datetime.utcnow().replace(tzinfo=utc)
if now < self.get_challenge_period_end_datetime():
return True
else:
return False
def in_voting_period(self):
now = datetime.datetime.utcnow().replace(tzinfo=utc)
if (self.get_challenge_period_end_datetime() < now <
self.get_voting_period_end_datetime()):
return True
else:
return False
def has_ended(self):
now = datetime.datetime.utcnow().replace(tzinfo=utc)
if now > self.get_voting_period_end_datetime():
return True
else:
return False
def get_vote_results(self):
jurors = self.get_jurors()
vote_results = {"positive": 0, "negative": 0,
"not_voted": 0}
for juror in jurors:
try:
if juror.vote.decision == "positive":
vote_results["positive"] += 1
elif juror.vote.decision == "negative":
vote_results["negative"] += 1
except ObjectDoesNotExist:
vote_results["not_voted"] += 1
return vote_results
def has_majority_vote(self):
vote_results = self.get_vote_results()
total_votes = vote_results["positive"] + vote_results["negative"]
try:
vote_ratio = float(vote_results["positive"]) / total_votes
if vote_ratio > 0.5:
return True
else:
return False
# When no votes where made
except ZeroDivisionError:
return None
def is_successful(self):
if self.has_ended():
if self.has_majority_vote():
return True
# When no jurors voted for a given challenge
elif self.has_majority_vote() is None:
return None
else:
return False
else:
return None
def get_repo_branch_path_representation(self):
return "{0}/{1}".format(self.repo_name, self.branch_name)
def get_branch_main_url(self):
clencher = self.get_clencher()
# Every clencher has a github socialaccount
github_social_account = clencher.user.socialaccount_set.get(id=1)
github_login = github_social_account.extra_data["login"]
return get_main_branch_url(github_login,
self.repo_name,
self.branch_name)
# Later store latest github commit to branch to the end_commit
# model field. Let this saving to field be done using celery, e.g.
# every 10 mins or so. And Stop checking once the end date of the
# challenge is reached.
def get_commit_comparison_url(self):
clencher = self.get_clencher()
github_social_account = clencher.user.socialaccount_set.get(id=1)
github_login = github_social_account.extra_data["login"]
last_commit = get_last_commit_on_branch(github_login,
self.repo_name,
self.branch_name)
start_commit = self.start_commit
return get_commit_comparison_url(github_login,
self.repo_name,
start_commit,
last_commit)
# Note, each time get_last_commit_on_branch is called, makes a request,
# change later on!
def is_last_commit_different_from_start_commit(self):
clencher = self.get_clencher()
github_social_account = clencher.user.socialaccount_set.get(id=1)
github_login = github_social_account.extra_data["login"]
last_commit = get_last_commit_on_branch(github_login,
self.repo_name,
self.branch_name)
if self.start_commit != last_commit:
return True
else:
return False
def user_has_role(self, user):
try:
# Must be id because request.user is a SimpleLazyObject
# http://stackoverflow.com/questions/17623234/django-simplelazyobject
self.role_set.get(user=user.id)
return True
except ObjectDoesNotExist:
return False
class Role(models.Model):
"""
Role for a given challenge: Clencher or Juror.
A role instance for a given challenge is attached to one user at maximum.
"""
CLENCHER = "clencher" # is more descriptive than a single capital in js
JUROR = "juror"
ROLE_CHOICES = ((CLENCHER, CLENCHER.capitalize()),
(JUROR, JUROR.capitalize()))
user = models.ForeignKey(User)
type = models.CharField(max_length=10, choices=ROLE_CHOICES)
challenge = models.ForeignKey(Challenge)
def __unicode__(self):
return "{0} '{1}' of '{2}'".format(self.type.capitalize(),
self.user,
self.challenge)
def get_absolute_url(self):
return reverse("role_api_retrieve", kwargs={"pk": self.pk})
def is_juror(self):
if self.type == self.JUROR:
return True
elif self.type == self.CLENCHER:
return False
else:
raise Exception("Else Die")
def can_make_head_comment(self):
"""
Only allow jurors that haven't made a head comment yet while
the challenge is still in the challenge period.
"""
if self.type == self.JUROR:
if len(self.headcomment_set.all()) != 0:
return False
else:
return self.challenge.in_voting_period()
else:
return False
class Vote(models.Model):
"""
Vote if challenge is deemed successful or not
by a juror for a given challenge.
"""
POSITIVE = "positive"
NEGATIVE = "negative"
DECISION_CHOICES = ((POSITIVE, POSITIVE), (NEGATIVE, NEGATIVE))
decision = models.CharField(max_length=10,
choices=DECISION_CHOICES,
default="")
juror = models.OneToOneField(Role) # only use for jurors!
def __unicode__(self):
return "{0} vote of {1}".format(self.decision, self.juror)
| |
#!/usr/bin/env python3
from reporter.connections import RedcapInstance
from reporter.application_abstract_reports.redcap.percentage_complete import (
RedcapPercentageCompleteReport,
)
from reporter.application_abstract_reports.redcap.withdrawn_or_excluded_with_data import (
RedcapWithdrawnOrExcludedWithDataReport,
)
from reporter.emailing import (
RECIPIENT_BRAVE_ADMIN as RECIPIENT_ADMIN,
RECIPIENT_BRAVE_MANAGER as RECIPIENT_MANAGER,
)
from reporter.application_abstract_reports.redcap.web_data_quality import (
RedcapWebDataQuality,
)
from reporter.application_abstract_reports.redcap.data_quality import (
RedcapInvalidNhsNumber,
RedcapInvalidDate,
RedcapInvalidStudyNumber,
RedcapRecordInvalidStudyNumber,
RedcapInvalidBloodPressure,
RedcapInvalidPulse,
RedcapInvalidHeightInCm,
RedcapInvalidWeightInKg,
RedcapInvalidBmi,
RedcapInvalidPostCode,
RedcapInvalidEmailAddress,
RedcapInvalidUhlSystemNumber,
)
REDCAP_LEICESTER_PROJECT_ID = 26
REDCAP_KETTERING_PROJECT_ID = 28
REDCAP_LINCOLN_PROJECT_ID = 37
REDCAP_SHEFFIELD_PROJECT_ID = 54
REDCAP_IMPERIAL_PROJECT_ID = 56
REDCAP_GRANTHAM_PROJECT_ID = 59
REDCAP_WEST_SUFFOLK_PROJECT_ID = 60
class BraveRedcapPercentageCompleteReport(RedcapPercentageCompleteReport):
def __init__(self):
super().__init__(
'BRAVE',
[RECIPIENT_ADMIN, RECIPIENT_MANAGER])
class BraveRedcapWithdrawnOrExcludedWithDataReport(
RedcapWithdrawnOrExcludedWithDataReport):
def __init__(self):
super().__init__(
'BRAVE',
[RECIPIENT_ADMIN, RECIPIENT_MANAGER])
# Leicester
class BraveRedcapLeicesterWebDataQuality(RedcapWebDataQuality):
def __init__(self):
super().__init__(
RedcapInstance.internal,
REDCAP_LEICESTER_PROJECT_ID,
[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLeicesterRedcapInvalidNhsNumber(RedcapInvalidNhsNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.internal,
project_id=REDCAP_LEICESTER_PROJECT_ID,
fields=['nhs_number'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLeicesterRedcapInvalidUhlSystemNumber(
RedcapInvalidUhlSystemNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.internal,
project_id=REDCAP_LEICESTER_PROJECT_ID,
fields=['s_number'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLeicesterRedcapInvalidDate(
RedcapInvalidDate):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.internal,
project_id=REDCAP_LEICESTER_PROJECT_ID,
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLeicesterRedcapInvalidStudyNumber(
RedcapInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.internal,
project_id=REDCAP_LEICESTER_PROJECT_ID,
fields=['record_id', 'briccs_id'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLeicesterRedcapRecordInvalidStudyNumber(
RedcapRecordInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.internal,
project_id=REDCAP_LEICESTER_PROJECT_ID,
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLeicesterRedcapInvalidPostCode(
RedcapInvalidPostCode):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.internal,
project_id=REDCAP_LEICESTER_PROJECT_ID,
fields=['address_postcode'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLeicesterRedcapInvalidEmailAddress(
RedcapInvalidEmailAddress):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.internal,
project_id=REDCAP_LEICESTER_PROJECT_ID,
fields=[
'pat_email1',
'pat_email2',
],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLeicesterRedcapInvalidBloodPressure1(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.internal,
project_id=REDCAP_LEICESTER_PROJECT_ID,
systolic_field_name='part_bp1_sys',
diastolic_field_name='part_bp_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLeicesterRedcapInvalidBloodPressure2(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.internal,
project_id=REDCAP_LEICESTER_PROJECT_ID,
systolic_field_name='part_bp2_sys',
diastolic_field_name='part_bp2_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLeicesterRedcapInvalidBloodPressure3(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.internal,
project_id=REDCAP_LEICESTER_PROJECT_ID,
systolic_field_name='part_bp3_sys',
diastolic_field_name='part_bp3_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLeicesterRedcapInvalidBloodPressureAvg(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.internal,
project_id=REDCAP_LEICESTER_PROJECT_ID,
systolic_field_name='part_avg_sys_bp',
diastolic_field_name='part_avg_dias_bp',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLeicesterRedcapInvalidPulse(
RedcapInvalidPulse):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.internal,
project_id=REDCAP_LEICESTER_PROJECT_ID,
fields=[
'part_pulse1',
'part_pulse2',
'part_pulse3',
'avg_pulse',
],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLeicesterRedcapInvalidHeightInCm(
RedcapInvalidHeightInCm):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.internal,
project_id=REDCAP_LEICESTER_PROJECT_ID,
fields=['part_height'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLeicesterRedcapInvalidWeightInKg(
RedcapInvalidWeightInKg):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.internal,
project_id=REDCAP_LEICESTER_PROJECT_ID,
fields=['part_weight'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLeicesterRedcapInvalidBmi(
RedcapInvalidBmi):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.internal,
project_id=REDCAP_LEICESTER_PROJECT_ID,
fields=['part_bmi'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
# Kettering
class BraveRedcapKetteringWebDataQuality(RedcapWebDataQuality):
def __init__(self):
super().__init__(
RedcapInstance.external,
REDCAP_KETTERING_PROJECT_ID,
[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveKetteringRedcapInvalidNhsNumber(RedcapInvalidNhsNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_KETTERING_PROJECT_ID,
fields=['nhs_number'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveKetteringRedcapInvalidDate(
RedcapInvalidDate):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_KETTERING_PROJECT_ID,
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveKetteringRedcapInvalidStudyNumber(
RedcapInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_KETTERING_PROJECT_ID,
fields=['record_id', 'briccs_id'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveKetteringRedcapRecordInvalidStudyNumber(
RedcapRecordInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_KETTERING_PROJECT_ID,
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveKetteringRedcapInvalidPostCode(
RedcapInvalidPostCode):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_KETTERING_PROJECT_ID,
fields=['address_postcode'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveKetteringRedcapInvalidEmailAddress(
RedcapInvalidEmailAddress):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_KETTERING_PROJECT_ID,
fields=[
'pat_email1',
'pat_email2',
],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveKetteringRedcapInvalidBloodPressure1(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_KETTERING_PROJECT_ID,
systolic_field_name='part_bp1_sys',
diastolic_field_name='part_bp_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveKetteringRedcapInvalidBloodPressure2(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_KETTERING_PROJECT_ID,
systolic_field_name='part_bp2_sys',
diastolic_field_name='part_bp2_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveKetteringRedcapInvalidBloodPressure3(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_KETTERING_PROJECT_ID,
systolic_field_name='part_bp3_sys',
diastolic_field_name='part_bp3_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveKetteringRedcapInvalidBloodPressureAvg(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_KETTERING_PROJECT_ID,
systolic_field_name='part_avg_sys_bp',
diastolic_field_name='part_avg_dias_bp',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveKetteringRedcapInvalidPulse(
RedcapInvalidPulse):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_KETTERING_PROJECT_ID,
fields=[
'part_pulse1',
'part_pulse2',
'part_pulse3',
'avg_pulse',
],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveKetteringRedcapInvalidHeightInCm(
RedcapInvalidHeightInCm):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_KETTERING_PROJECT_ID,
fields=['part_height'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveKetteringRedcapInvalidWeightInKg(
RedcapInvalidWeightInKg):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_KETTERING_PROJECT_ID,
fields=['part_weight'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveKetteringRedcapInvalidBmi(
RedcapInvalidBmi):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_KETTERING_PROJECT_ID,
fields=['part_bmi'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
# Lincoln
class BraveRedcapLincolnWebDataQuality(RedcapWebDataQuality):
def __init__(self):
super().__init__(
RedcapInstance.external,
REDCAP_LINCOLN_PROJECT_ID,
[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLincolnRedcapInvalidNhsNumber(RedcapInvalidNhsNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_LINCOLN_PROJECT_ID,
fields=['nhs_number'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLincolnRedcapInvalidDate(
RedcapInvalidDate):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_LINCOLN_PROJECT_ID,
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLincolnRedcapInvalidStudyNumber(
RedcapInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_LINCOLN_PROJECT_ID,
fields=['record_id', 'briccs_id'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLincolnRedcapRecordInvalidStudyNumber(
RedcapRecordInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_LINCOLN_PROJECT_ID,
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLincolnRedcapInvalidPostCode(
RedcapInvalidPostCode):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_LINCOLN_PROJECT_ID,
fields=['address_postcode'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLincolnRedcapInvalidEmailAddress(
RedcapInvalidEmailAddress):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_LINCOLN_PROJECT_ID,
fields=[
'pat_email1',
'pat_email2',
],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLincolnRedcapInvalidBloodPressure1(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_LINCOLN_PROJECT_ID,
systolic_field_name='part_bp1_sys',
diastolic_field_name='part_bp_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLincolnRedcapInvalidBloodPressure2(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_LINCOLN_PROJECT_ID,
systolic_field_name='part_bp2_sys',
diastolic_field_name='part_bp2_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLincolnRedcapInvalidBloodPressure3(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_LINCOLN_PROJECT_ID,
systolic_field_name='part_bp3_sys',
diastolic_field_name='part_bp3_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLincolnRedcapInvalidBloodPressureAvg(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_LINCOLN_PROJECT_ID,
systolic_field_name='part_avg_sys_bp',
diastolic_field_name='part_avg_dias_bp',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLincolnRedcapInvalidPulse(
RedcapInvalidPulse):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_LINCOLN_PROJECT_ID,
fields=[
'part_pulse1',
'part_pulse2',
'part_pulse3',
'avg_pulse',
],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLincolnRedcapInvalidHeightInCm(
RedcapInvalidHeightInCm):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_LINCOLN_PROJECT_ID,
fields=['part_height'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLincolnRedcapInvalidWeightInKg(
RedcapInvalidWeightInKg):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_LINCOLN_PROJECT_ID,
fields=['part_weight'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveLincolnRedcapInvalidBmi(
RedcapInvalidBmi):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_LINCOLN_PROJECT_ID,
fields=['part_bmi'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
# Sheffield
class BraveRedcapSheffieldWebDataQuality(RedcapWebDataQuality):
def __init__(self):
super().__init__(
RedcapInstance.external,
REDCAP_SHEFFIELD_PROJECT_ID,
[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveSheffieldRedcapInvalidNhsNumber(RedcapInvalidNhsNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_SHEFFIELD_PROJECT_ID,
fields=['nhs_number'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveSheffieldRedcapInvalidDate(
RedcapInvalidDate):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_SHEFFIELD_PROJECT_ID,
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveSheffieldRedcapInvalidStudyNumber(
RedcapInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_SHEFFIELD_PROJECT_ID,
fields=['record_id', 'briccs_id'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveSheffieldRedcapRecordInvalidStudyNumber(
RedcapRecordInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_SHEFFIELD_PROJECT_ID,
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveSheffieldRedcapInvalidPostCode(
RedcapInvalidPostCode):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_SHEFFIELD_PROJECT_ID,
fields=['address_postcode'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveSheffieldRedcapInvalidEmailAddress(
RedcapInvalidEmailAddress):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_SHEFFIELD_PROJECT_ID,
fields=[
'pat_email1',
'pat_email2',
],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveSheffieldRedcapInvalidBloodPressure1(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_SHEFFIELD_PROJECT_ID,
systolic_field_name='part_bp1_sys',
diastolic_field_name='part_bp_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveSheffieldRedcapInvalidBloodPressure2(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_SHEFFIELD_PROJECT_ID,
systolic_field_name='part_bp2_sys',
diastolic_field_name='part_bp2_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveSheffieldRedcapInvalidBloodPressure3(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_SHEFFIELD_PROJECT_ID,
systolic_field_name='part_bp3_sys',
diastolic_field_name='part_bp3_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveSheffieldRedcapInvalidBloodPressureAvg(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_SHEFFIELD_PROJECT_ID,
systolic_field_name='part_avg_sys_bp',
diastolic_field_name='part_avg_dias_bp',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveSheffieldRedcapInvalidPulse(
RedcapInvalidPulse):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_SHEFFIELD_PROJECT_ID,
fields=[
'part_pulse1',
'part_pulse2',
'part_pulse3',
'avg_pulse',
],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveSheffieldRedcapInvalidHeightInCm(
RedcapInvalidHeightInCm):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_SHEFFIELD_PROJECT_ID,
fields=['part_height'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveSheffieldRedcapInvalidWeightInKg(
RedcapInvalidWeightInKg):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_SHEFFIELD_PROJECT_ID,
fields=['part_weight'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveSheffieldRedcapInvalidBmi(
RedcapInvalidBmi):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_SHEFFIELD_PROJECT_ID,
fields=['part_bmi'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
# Imperial
class BraveRedcapImperialWebDataQuality(RedcapWebDataQuality):
def __init__(self):
super().__init__(
RedcapInstance.external,
REDCAP_IMPERIAL_PROJECT_ID,
[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveImperialRedcapInvalidNhsNumber(RedcapInvalidNhsNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_IMPERIAL_PROJECT_ID,
fields=['nhs_number'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveImperialRedcapInvalidDate(
RedcapInvalidDate):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_IMPERIAL_PROJECT_ID,
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveImperialRedcapInvalidStudyNumber(
RedcapInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_IMPERIAL_PROJECT_ID,
fields=['record_id', 'briccs_id'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveImperialRedcapRecordInvalidStudyNumber(
RedcapRecordInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_IMPERIAL_PROJECT_ID,
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveImperialRedcapInvalidPostCode(
RedcapInvalidPostCode):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_IMPERIAL_PROJECT_ID,
fields=['address_postcode'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveImperialRedcapInvalidEmailAddress(
RedcapInvalidEmailAddress):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_IMPERIAL_PROJECT_ID,
fields=[
'pat_email1',
'pat_email2',
],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveImperialRedcapInvalidBloodPressure1(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_IMPERIAL_PROJECT_ID,
systolic_field_name='part_bp1_sys',
diastolic_field_name='part_bp_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveImperialRedcapInvalidBloodPressure2(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_IMPERIAL_PROJECT_ID,
systolic_field_name='part_bp2_sys',
diastolic_field_name='part_bp2_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveImperialRedcapInvalidBloodPressure3(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_IMPERIAL_PROJECT_ID,
systolic_field_name='part_bp3_sys',
diastolic_field_name='part_bp3_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveImperialRedcapInvalidBloodPressureAvg(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_IMPERIAL_PROJECT_ID,
systolic_field_name='part_avg_sys_bp',
diastolic_field_name='part_avg_dias_bp',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveImperialRedcapInvalidPulse(
RedcapInvalidPulse):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_IMPERIAL_PROJECT_ID,
fields=[
'part_pulse1',
'part_pulse2',
'part_pulse3',
'avg_pulse',
],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveImperialRedcapInvalidHeightInCm(
RedcapInvalidHeightInCm):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_IMPERIAL_PROJECT_ID,
fields=['part_height'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveImperialRedcapInvalidWeightInKg(
RedcapInvalidWeightInKg):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_IMPERIAL_PROJECT_ID,
fields=['part_weight'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveImperialRedcapInvalidBmi(
RedcapInvalidBmi):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_IMPERIAL_PROJECT_ID,
fields=['part_bmi'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
# Grantham
class BraveRedcapGranthamWebDataQuality(RedcapWebDataQuality):
def __init__(self):
super().__init__(
RedcapInstance.external,
REDCAP_GRANTHAM_PROJECT_ID,
[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveGranthamRedcapInvalidNhsNumber(RedcapInvalidNhsNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_GRANTHAM_PROJECT_ID,
fields=['nhs_number'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveGranthamRedcapInvalidDate(
RedcapInvalidDate):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_GRANTHAM_PROJECT_ID,
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveGranthamRedcapInvalidStudyNumber(
RedcapInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_GRANTHAM_PROJECT_ID,
fields=['record_id', 'briccs_id'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveGranthamRedcapRecordInvalidStudyNumber(
RedcapRecordInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_GRANTHAM_PROJECT_ID,
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveGranthamRedcapInvalidPostCode(
RedcapInvalidPostCode):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_GRANTHAM_PROJECT_ID,
fields=['address_postcode'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveGranthamRedcapInvalidEmailAddress(
RedcapInvalidEmailAddress):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_GRANTHAM_PROJECT_ID,
fields=[
'pat_email1',
'pat_email2',
],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveGranthamRedcapInvalidBloodPressure1(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_GRANTHAM_PROJECT_ID,
systolic_field_name='part_bp1_sys',
diastolic_field_name='part_bp_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveGranthamRedcapInvalidBloodPressure2(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_GRANTHAM_PROJECT_ID,
systolic_field_name='part_bp2_sys',
diastolic_field_name='part_bp2_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveGranthamRedcapInvalidBloodPressure3(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_GRANTHAM_PROJECT_ID,
systolic_field_name='part_bp3_sys',
diastolic_field_name='part_bp3_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveGranthamRedcapInvalidBloodPressureAvg(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_GRANTHAM_PROJECT_ID,
systolic_field_name='part_avg_sys_bp',
diastolic_field_name='part_avg_dias_bp',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveGranthamRedcapInvalidPulse(
RedcapInvalidPulse):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_GRANTHAM_PROJECT_ID,
fields=[
'part_pulse1',
'part_pulse2',
'part_pulse3',
'avg_pulse',
],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveGranthamRedcapInvalidHeightInCm(
RedcapInvalidHeightInCm):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_GRANTHAM_PROJECT_ID,
fields=['part_height'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveGranthamRedcapInvalidWeightInKg(
RedcapInvalidWeightInKg):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_GRANTHAM_PROJECT_ID,
fields=['part_weight'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveGranthamRedcapInvalidBmi(
RedcapInvalidBmi):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_GRANTHAM_PROJECT_ID,
fields=['part_bmi'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
# West Suffolk
class BraveRedcapWestSuffolkWebDataQuality(RedcapWebDataQuality):
def __init__(self):
super().__init__(
RedcapInstance.external,
REDCAP_WEST_SUFFOLK_PROJECT_ID,
[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveWestSuffolkRedcapInvalidNhsNumber(RedcapInvalidNhsNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_WEST_SUFFOLK_PROJECT_ID,
fields=['nhs_number'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveWestSuffolkRedcapInvalidDate(
RedcapInvalidDate):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_WEST_SUFFOLK_PROJECT_ID,
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveWestSuffolkRedcapInvalidStudyNumber(
RedcapInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_WEST_SUFFOLK_PROJECT_ID,
fields=['record_id', 'briccs_id'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveWestSuffolkRedcapRecordInvalidStudyNumber(
RedcapRecordInvalidStudyNumber):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_WEST_SUFFOLK_PROJECT_ID,
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveWestSuffolkRedcapInvalidPostCode(
RedcapInvalidPostCode):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_WEST_SUFFOLK_PROJECT_ID,
fields=['address_postcode'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveWestSuffolkRedcapInvalidEmailAddress(
RedcapInvalidEmailAddress):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_WEST_SUFFOLK_PROJECT_ID,
fields=[
'pat_email1',
'pat_email2',
],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveWestSuffolkRedcapInvalidBloodPressure1(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_WEST_SUFFOLK_PROJECT_ID,
systolic_field_name='part_bp1_sys',
diastolic_field_name='part_bp_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveWestSuffolkRedcapInvalidBloodPressure2(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_WEST_SUFFOLK_PROJECT_ID,
systolic_field_name='part_bp2_sys',
diastolic_field_name='part_bp2_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveWestSuffolkRedcapInvalidBloodPressure3(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_WEST_SUFFOLK_PROJECT_ID,
systolic_field_name='part_bp3_sys',
diastolic_field_name='part_bp3_dias',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveWestSuffolkRedcapInvalidBloodPressureAvg(
RedcapInvalidBloodPressure):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_WEST_SUFFOLK_PROJECT_ID,
systolic_field_name='part_avg_sys_bp',
diastolic_field_name='part_avg_dias_bp',
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveWestSuffolkRedcapInvalidPulse(
RedcapInvalidPulse):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_WEST_SUFFOLK_PROJECT_ID,
fields=[
'part_pulse1',
'part_pulse2',
'part_pulse3',
'avg_pulse',
],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveWestSuffolkRedcapInvalidHeightInCm(
RedcapInvalidHeightInCm):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_WEST_SUFFOLK_PROJECT_ID,
fields=['part_height'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveWestSuffolkRedcapInvalidWeightInKg(
RedcapInvalidWeightInKg):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_WEST_SUFFOLK_PROJECT_ID,
fields=['part_weight'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
class BraveWestSuffolkRedcapInvalidBmi(
RedcapInvalidBmi):
def __init__(self):
super().__init__(
redcap_instance=RedcapInstance.external,
project_id=REDCAP_WEST_SUFFOLK_PROJECT_ID,
fields=['part_bmi'],
recipients=[RECIPIENT_ADMIN, RECIPIENT_MANAGER],
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.