hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c4ad0de484852ef9a59a8447411a80386935077c | 443 | py | Python | lims/shared/apps.py | sqilz/LIMS-Backend | b64e1fa512f89e4492803d44c6b8c35e4d4724cc | [
"MIT"
] | null | null | null | lims/shared/apps.py | sqilz/LIMS-Backend | b64e1fa512f89e4492803d44c6b8c35e4d4724cc | [
"MIT"
] | null | null | null | lims/shared/apps.py | sqilz/LIMS-Backend | b64e1fa512f89e4492803d44c6b8c35e4d4724cc | [
"MIT"
] | null | null | null | import sys
from django.apps import AppConfig
from django.db.models.signals import post_save
| 29.533333 | 95 | 0.68623 | import sys
from django.apps import AppConfig
from django.db.models.signals import post_save
class SharedConfig(AppConfig):
name = 'lims.shared'
def ready(self):
if ('runserver' in sys.argv or '/usr/local/bin/daphne' in sys.argv
or 'runworker' in sys.argv):
from lims.shared.models import TriggerSet
post_save.connect(TriggerSet()._fire_triggersets, dispatch_uid='Fire Trigger Sets')
| 265 | 61 | 23 |
b2b0e485d14a0baf41773f90c2eb52cdc85221e1 | 10,595 | py | Python | src/patterns.py | KathTheDragon/SCE | f1ade0a0d40582b68b59cf9725e67ab6358d0237 | [
"MIT"
] | 1 | 2020-12-23T07:53:56.000Z | 2020-12-23T07:53:56.000Z | src/patterns.py | KathTheDragon/SCE | f1ade0a0d40582b68b59cf9725e67ab6358d0237 | [
"MIT"
] | null | null | null | src/patterns.py | KathTheDragon/SCE | f1ade0a0d40582b68b59cf9725e67ab6358d0237 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from typing import overload
from .words import Word
@overload
@overload
@dataclass
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass
| 36.916376 | 161 | 0.612742 | from dataclasses import dataclass
from typing import overload
from .words import Word
class MatchFailed(Exception):
pass
@overload
def advance(word: Word, length: int, start: int, stop: None) -> tuple[int, None]:
...
@overload
def advance(word: Word, length: int, start: None, stop: int) -> tuple[None, int]:
...
def advance(word, length, start=None, stop=None):
if (start is None) == (stop is None):
raise TypeError('exactly one of start and stop must be given.')
elif start is not None:
if 0 <= start <= len(word) - length:
return start + length, None
else:
raise MatchFailed()
else: # stop is not None
if length <= stop <= len(word):
return None, stop - length
else:
raise MatchFailed()
def get_index(word: Word, start: int|None=None, stop: int|None=None) -> int:
if (start is None) == (stop is None):
raise TypeError('exactly one of start and stop must be given.')
elif start is not None:
index = start
else: # stop is not None
index = stop - 1
if 0 <= index < len(word):
return index
else:
raise MatchFailed()
def Match(start: int, stop: int) -> slice:
return slice(start, stop)
@dataclass
class Element:
def __str__(self) -> str:
return ''
def __repr__(self) -> str:
return f'{self.__class__.__name__}({str(self)!r})'
def match(self, word: Word, start: int|None=None, stop: int|None=None) -> tuple[int, dict[int, int]]:
if (start is None) == (stop is None):
raise TypeError('exactly one of start and stop must be given.')
else:
raise MatchFailed()
class CharacterMixin:
def _match(self, word: Word, index: int) -> bool:
return False
def match(self, word: Word, start: int|None=None, stop: int|None=None, catixes: dict[int, int]={}) -> tuple[int, dict[int, int]]:
index = get_index(word, start=start, stop=stop)
if self._match(word, index):
return 1, catixes
else:
raise MatchFailed()
@dataclass(repr=False)
class Grapheme(CharacterMixin, Element):
grapheme: str
def __str__(self) -> str:
return self.grapheme
def _match(self, word: Word, index: int) -> bool:
return word[index] == self.grapheme
@dataclass(repr=False)
class Ditto(CharacterMixin, Element):
def __str__(self) -> str:
return '"'
def _match(self, word: Word, index: int) -> bool:
return index and word[index] == word[index-1]
@dataclass(repr=False)
class Category(Element):
category: 'cats.Category'
subscript: int | None
def __str__(self) -> str:
if self.category.name is None:
return str(self.category)
else:
return f'[{self.category.name}]'
def match(self, word: Word, start: int|None=None, stop: int|None=None, catixes: dict[int, int]={}) -> tuple[int, dict[int, int]]:
index = get_index(word, start=start, stop=stop)
# Note that this will change if sequences become supported in categories
if self.subscript is None:
if word[index] in self.category:
return 1, catixes
elif self.subscript in catixes:
if word[index] == self.category[catixes[self.subscript]]:
return 1, catixes
else:
if word[index] in self.category:
return 1, catixes | {self.subscript: self.category.index(word[index])}
raise MatchFailed()
@dataclass(repr=False)
class BranchMixin:
greedy: bool
def match_pattern(self, pattern: 'Pattern', word: Word, start: int|None=None, stop: int|None=None, catixes: dict[int, int]={}) -> tuple[int, dict[int, int]]:
if self.greedy:
try:
return self._match_branch(pattern, word, start, stop, catixes)
except MatchFailed:
return pattern._match(word, start, stop, catixes)
else:
try:
return pattern._match(word, start, stop, catixes)
except:
return self._match_branch(pattern, word, start, stop, catixes)
@dataclass(repr=False)
class WildcardMixin(BranchMixin):
def _match_branch(self, pattern: 'Pattern', word: Word, start:int|None=None, stop: int|None=None, catixes: dict[int, int]={}) -> tuple[int, dict[int, int]]:
return self.match_pattern(pattern, word, start, stop, catixes)
def match_pattern(self, pattern: 'Pattern', word: Word, start:int|None=None, stop: int|None=None, catixes: dict[int, int]={}) -> tuple[int, dict[int, int]]:
length, catixes = self.match(word, start, stop, catixes)
_length, catixes = super().match_pattern(pattern, word, *advance(word, length, start, stop), catixes)
return length + _length, catixes
@dataclass(repr=False)
class Wildcard(WildcardMixin, CharacterMixin, Element):
extended: bool
def __str__(self) -> str:
return ('**' if self.extended else '*') + ('' if self.greedy else '?')
def _match(self, word: Word, index: int) -> bool:
return self.extended or word[index] != '#'
@dataclass(repr=False)
class SubpatternMixin:
pattern: 'Pattern'
def match(self, word: Word, start: int|None=None, stop: int|None=None, catixes: dict[int, int]={}) -> tuple[int, dict[int, int]]:
return self.pattern._match(word, start=start, stop=stop, catixes=catixes)
@dataclass(repr=False)
class Repetition(SubpatternMixin, Element):
number: int
def __str__(self) -> str:
return f'({self.pattern}){{{self.number}}}'
def match_pattern(self, pattern: 'Pattern', word: Word, start:int|None=None, stop: int|None=None, catixes: dict[int, int]={}) -> tuple[int, dict[int, int]]:
length = 0
for _ in range(self.number):
_length, catixes = self.match(word, *advance(word, length, start, stop), catixes=catixes)
length += _length
_length, catixes = pattern._match(word, *advance(word, length, start, stop), catixes=catixes)
return length + _length, catixes
@dataclass(repr=False)
class WildcardRepetition(WildcardMixin, SubpatternMixin, Element):
def __str__(self) -> str:
return f'({self.pattern})' + ('{*}' if self.greedy else '{*?}')
@dataclass(repr=False)
class Optional(BranchMixin, SubpatternMixin, Element):
def __str__(self) -> str:
return f'({self.pattern})' + ('' if self.greedy else '?')
def _match_branch(self, pattern: 'Pattern', word: Word, start:int|None=None, stop: int|None=None, catixes: dict[int, int]={}) -> tuple[int, dict[int, int]]:
length, catixes = self.match(word, start, stop, catixes)
_length, catixes = pattern._match(word, *advance(word, length, start, stop), catixes)
return length + _length, catixes
@dataclass(repr=False)
class TargetRef(Element):
direction: int
def __str__(self) -> str:
return '%' if self.direction == 1 else '<'
@dataclass
class Pattern:
elements: list[Element]
def __str__(self) -> str:
return ''.join(map(str, self.elements))
def __repr__(self) -> str:
return f'Pattern({str(self)!r})'
def __bool__(self) -> bool:
return bool(self.elements)
def resolve(self, target: Word) -> 'Pattern':
_target = [Grapheme(phone) for phone in target]
_rtarget = reversed(_target)
elements = []
for element in self.elements:
if isinstance(element, TargetRef):
elements.extend(_target if element.direction == 1 else _rtarget)
elif isinstance(element, Repetition):
elements.append(Repetition(element.pattern.resolve(target), element.number))
elif isinstance(element, WildcardRepetition):
elements.append(WildcardRepetition(element.pattern.resolve(target), element.greedy))
elif isinstance(element, Optional):
elements.append(Optional(element.pattern.resolve(target), element.greedy))
else:
elements.append(element)
return Pattern(elements)
def as_phones(self, last_phone: str, catixes: dict[int, int]={}) -> list[str]:
phones = []
for elem in self.elements:
if isinstance(elem, Grapheme):
phones.append(elem.grapheme)
elif isinstance(elem, Ditto):
phones.append(phones[-1] if phones else last_phone)
elif isinstance(elem, Category):
if elem.subscript in catixes:
phones.append(elem.category[catixes[elem.subscript]])
else:
raise ValueError(f'no index for category {str(elem.subscript)!r}')
elif isinstance(elem, Repetition):
for _ in range(elem.number):
phones.extend(elem.pattern.as_phones(phones[-1] if phones else last_phone, catixes))
else:
raise TypeError(f'cannot convert {type(elem).__name__!r} to phones')
return phones
def _match(self, word: Word, start: int|None=None, stop: int|None=None, catixes: dict[int, int]={}) -> tuple[int, dict[int, int]]:
if (start is None) == (stop is None):
raise TypeError('exactly one of start and stop must be given.')
elif start is not None:
iter_elements = ((element, Pattern(self.elements[i+1:])) for i, element in enumerate(self.elements))
else: # stop is not None
iter_elements = ((element, Pattern(self.elements[:i])) for i, element in reversed(list(enumerate(self.elements))))
length = 0
for element, pattern in iter_elements:
if hasattr(element, 'match_pattern'):
_length, catixes = element.match_pattern(pattern, word, *advance(word, length, start, stop), catixes)
length += _length
break
else:
_length, catixes = element.match(word, *advance(word, length, start, stop), catixes)
length += _length
return length, catixes
def match(self, word: Word, start: int|None=None, stop: int|None=None, catixes: dict[int, int]={}) -> tuple[slice|None, dict[int, int]]:
try:
length, catixes = self._match(word, start, stop, catixes)
if start is not None:
return Match(start, start+length), catixes
else: # stop is not None
return Match(stop-length, stop), catixes
except MatchFailed:
return None, {}
| 8,552 | 871 | 762 |
0f8ca47744a52c3f1507469b5558e68c1be7b0d4 | 23,813 | py | Python | pysnmp-with-texts/SYMME1T1.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/SYMME1T1.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/SYMME1T1.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module SYMME1T1 (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/neermitt/Dev/kusanagi/mibs.snmplabs.com/asn1/SYMME1T1
# Produced by pysmi-0.3.4 at Tue Jul 30 11:34:59 2019
# On host NEERMITT-M-J0NV platform Darwin version 18.6.0 by user neermitt
# Using Python version 3.7.4 (default, Jul 9 2019, 18:13:23)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint")
entPhysicalIndex, = mibBuilder.importSymbols("ENTITY-MIB", "entPhysicalIndex")
ifNumber, ifIndex = mibBuilder.importSymbols("IF-MIB", "ifNumber", "ifIndex")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
NotificationType, Unsigned32, Bits, iso, Counter32, MibIdentifier, ModuleIdentity, TimeTicks, IpAddress, Integer32, Gauge32, ObjectIdentity, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Unsigned32", "Bits", "iso", "Counter32", "MibIdentifier", "ModuleIdentity", "TimeTicks", "IpAddress", "Integer32", "Gauge32", "ObjectIdentity", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
EnableValue, symmPhysicalSignal, ONVALUETYPE = mibBuilder.importSymbols("SYMM-COMMON-SMI", "EnableValue", "symmPhysicalSignal", "ONVALUETYPE")
symmE1T1 = ModuleIdentity((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2))
symmE1T1.setRevisions(('2011-03-18 17:06',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: symmE1T1.setRevisionsDescriptions(('Revision 1.0',))
if mibBuilder.loadTexts: symmE1T1.setLastUpdated('201103181705Z')
if mibBuilder.loadTexts: symmE1T1.setOrganization('Symmetricom.')
if mibBuilder.loadTexts: symmE1T1.setContactInfo('Symmetricom Technical Support 1-888-367-7966 toll free USA 1-408-428-7907 worldwide Support@symmetricom.com')
if mibBuilder.loadTexts: symmE1T1.setDescription('This is the Symmetricom Common MIB for the configuration and status monitoring of E1/T1 ports in the system. It is one of the MIBs under the symmPhysicalSignal node. This MIB is organized into two main nodes: input and output. Each node is further has two tables, one for status and one for configuration.')
e1T1input = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1))
inputE1T1Status = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 1))
e1T1InputStatusTable = MibTable((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 1, 1), )
if mibBuilder.loadTexts: e1T1InputStatusTable.setStatus('current')
if mibBuilder.loadTexts: e1T1InputStatusTable.setDescription('This table contains status information for each E1/T1 input port.')
e1T1InputStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "SYMME1T1", "e1T1InputStatusIndex"))
if mibBuilder.loadTexts: e1T1InputStatusEntry.setStatus('current')
if mibBuilder.loadTexts: e1T1InputStatusEntry.setDescription('An entry of the e1T1InputStatusTable. Table index is ifIndex (port/interface index). Each entry has three parameters for the specified E1/T1 input port: 1. Port enable status (enable or disable) 2. Current value of the incoming SSM 3. Port status ')
e1T1InputStatusIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000)))
if mibBuilder.loadTexts: e1T1InputStatusIndex.setStatus('current')
if mibBuilder.loadTexts: e1T1InputStatusIndex.setDescription('Local index of the E1/T1 input status table.')
e1T1InputPQLCurValueV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 1, 1, 1, 3), TP5000PQLVALUE()).setMaxAccess("readonly")
if mibBuilder.loadTexts: e1T1InputPQLCurValueV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputPQLCurValueV1.setDescription('The current PQL value of the incoming SSM on this input port.')
e1T1InputPortStatusV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 1, 1, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: e1T1InputPortStatusV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputPortStatusV1.setDescription('The port status of the specified input E1/T1 input port. Possible values are On (1) and Off (2). When the input port state is enabled, port status becomes on. When input port state is disabled, input port status is off.')
e1T1InputConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2))
e1T1InputConfigTable = MibTable((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1), )
if mibBuilder.loadTexts: e1T1InputConfigTable.setStatus('current')
if mibBuilder.loadTexts: e1T1InputConfigTable.setDescription('Configuration Table for E1/T1 input ports')
e1T1InputConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "SYMME1T1", "e1T1InputConfigIndex"))
if mibBuilder.loadTexts: e1T1InputConfigEntry.setStatus('current')
if mibBuilder.loadTexts: e1T1InputConfigEntry.setDescription('An entry of the E1/T1 input configuration table. Table index is ifIndex (port/interface). Each entry has the following configuration parameters for the selected input port: 1. Frame type 2. CRC enable state 3. SSM enable state 4. SSM bit position 5. Default PQL value that can be used to override the input SSM value 6. Zero suppression state ')
e1T1InputConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000)))
if mibBuilder.loadTexts: e1T1InputConfigIndex.setStatus('current')
if mibBuilder.loadTexts: e1T1InputConfigIndex.setDescription('Local index of the E1/T1 input configuration table.')
e1T1InputFrameTypeV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 2), INPUTE1T1FRAMETYPE()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1InputFrameTypeV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputFrameTypeV1.setDescription('E1 or T1 input frame type. Supported frame types include: 1. Freq1544khz (1) 2. Freq2048khz (2) 3. CCS (3) 4. CAS (4) 5. D4 (5) 6. ESF (6) Default frame type is 2048 kHz ')
e1T1InputCRCStateV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 3), EnableValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1InputCRCStateV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputCRCStateV1.setDescription('CRC enable state can be Enable (1) or Disable (2). Disabling the CRC means the CRC in the SSM is not used.')
e1T1InputSSMStateV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 4), EnableValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1InputSSMStateV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputSSMStateV1.setDescription("SSM enable state. It can be Enable (1) or Disable (2). Disabling the SSM means the incoming SSM is not used, and the forced (default) PQL value for this input port will be used during the reference selection. SSM is supported for only three frame types: EFS, CAS with CRC4, and CCA with CRC4. SSM should not be enabled for other frame types. If SSM is enabled for an input port, but the frame type does not support SSM or is not sending a valid SSM, then this input will be disqualified and the input PQL will be set to 'invalid.' The system will go into holdover no other qualified reference is available. ")
e1T1InputSSMBitV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(4, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1InputSSMBitV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputSSMBitV1.setDescription('SSM Bit position. The value range is 4 to 8. This parameter is only used for frame types ESF, CCS, or CAS.')
e1T1InputPQLValueV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 6), TP5000PQLVALUE()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1InputPQLValueV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputPQLValueV1.setDescription('The user assigned PQL value for the specified input. This PQL value is used when the SSM state is disabled. The range for the user assigned PQL value is 1 to 9. ')
eT1InputZeroSupprV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 7), ONVALUETYPE()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eT1InputZeroSupprV1.setStatus('current')
if mibBuilder.loadTexts: eT1InputZeroSupprV1.setDescription('The number indicates whether zero suppression (ZS) on the input port is enabled or disabled. Valid values are On (1) or Off (2). ')
e1T1Output = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2))
e1T1OutputStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 1))
e1T1OutputStatusTable = MibTable((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 1, 1), )
if mibBuilder.loadTexts: e1T1OutputStatusTable.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputStatusTable.setDescription('This table contains status information for each E1/T1 output port.')
e1T1OutputStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "SYMME1T1", "e1T1OutputStatusIndex"))
if mibBuilder.loadTexts: e1T1OutputStatusEntry.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputStatusEntry.setDescription('An entry of the e1T1OutputStatusTable. Table index is ifIndex (port/interface index). Each entry has two parameters for the specified E1/T1 input port: 1. Port status 2. Outgoing SSM value ')
e1T1OutputStatusIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000)))
if mibBuilder.loadTexts: e1T1OutputStatusIndex.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputStatusIndex.setDescription('Local index of the E1/T1 output status table.')
e1T1OutputPortStatusV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 1, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: e1T1OutputPortStatusV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputPortStatusV1.setDescription("The port status of the specified E1/T1 output port. Possible values are On (1) and Off (2). 'On' means there is signal on the port. For E1/T1 output port it means the system is in normal tracking mode. 'Off' means there is no signal on the port. For E1/T1 output port it means the output is squelched during some clock states.")
e1T1OutputPQLValueV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 1, 1, 1, 3), TP5000PQLVALUE()).setMaxAccess("readonly")
if mibBuilder.loadTexts: e1T1OutputPQLValueV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputPQLValueV1.setDescription('The PQL value for the specified E1/T1 output port.')
e1T1OutputConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2))
e1T1OutputConfigTable = MibTable((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1), )
if mibBuilder.loadTexts: e1T1OutputConfigTable.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputConfigTable.setDescription('This table contains configuration information for each E1/T1 output port.')
e1T1OutputConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "SYMME1T1", "e1T1OutputConfigIndex"))
if mibBuilder.loadTexts: e1T1OutputConfigEntry.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputConfigEntry.setDescription('An entry of the e1T1OutputConfigTable. Table index is ifIndex (port/interface index). Each entry has the configuration parameters for the specified E1/T1 output port: 1. Port enable state 2. Frame type 3. CRC enable state 4. SSM enable state 5. SSM bit position 6. Zero suppression on/off state 7. Output port cable length ')
e1T1OutputConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000)))
if mibBuilder.loadTexts: e1T1OutputConfigIndex.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputConfigIndex.setDescription('Local index of the E1/T1 output configuration table.')
e1T1OutputStateV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 2), PORTSTATETYPE()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputStateV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputStateV1.setDescription('E1/T1 output port enable state. Its value can be Enable (1) or Disable (2). Disabling an output port means no output is generated for that port.')
e1T1OutputFrameTypeV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 3), OUTPUTE1T1FRAMETYPE()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputFrameTypeV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputFrameTypeV1.setDescription('E1 or T1 output frame type. Supported frame types include: 1. Freq1544khz (1) 2. Freq2048khz (2) 3. CCS (3) 4. CAS (4) 5. D4 (5) 6. ESF (6) Default frame type is 2048 kHz. ')
e1T1OutputCRCStateV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 4), EnableValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputCRCStateV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputCRCStateV1.setDescription('CRC enable state can be Enable (1) or Disable (2). Disabling the CRC means that no CRC is generated for the SSM.')
e1T1OutputSSMStateV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 5), EnableValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputSSMStateV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputSSMStateV1.setDescription('SSM enable state. It can be Enable (1) or Disable (2). Disabling the output SSM means that no SSM is generated for the specified output port.')
e1T1OutputSSMBitV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(4, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputSSMBitV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputSSMBitV1.setDescription('SSM Bit position. The value range is 4 to 8. This parameter is only used for frame types ESF, CCS, or CAS.')
e1T1OutputZeroSupprV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 7), ONVALUETYPE()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputZeroSupprV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputZeroSupprV1.setDescription('The number indicates whether zero suppression (ZS) on the output port is enabled or disabled. Valid values are On (1) or Off (2). ')
e1T1OutputLengthV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputLengthV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputLengthV1.setDescription('Output cable length. ')
e1T1Conformance = ObjectIdentity((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3))
if mibBuilder.loadTexts: e1T1Conformance.setStatus('current')
if mibBuilder.loadTexts: e1T1Conformance.setDescription('This node contains conformance statement for the symmE1T1 MIB module. ')
e1T1Compliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 1))
e1T1BasicCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 1, 1)).setObjects(("SYMME1T1", "e1T1InputStatusGroup"), ("SYMME1T1", "e11T1InputConfigGroup"), ("SYMME1T1", "e11T1OutputStatusGroup"), ("SYMME1T1", "e11T1OutputConfigGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
e1T1BasicCompliance = e1T1BasicCompliance.setStatus('current')
if mibBuilder.loadTexts: e1T1BasicCompliance.setDescription('The compliance statement for SNMP entities which have E1/T1 input/output.')
e1T1UocGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 2))
e1T1InputStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 2, 1)).setObjects(("SYMME1T1", "e1T1InputPortStatusV1"), ("SYMME1T1", "e1T1InputPQLCurValueV1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
e1T1InputStatusGroup = e1T1InputStatusGroup.setStatus('current')
if mibBuilder.loadTexts: e1T1InputStatusGroup.setDescription('A collection of objects providing information applicable to E1/T1 input status group.')
e11T1InputConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 2, 2)).setObjects(("SYMME1T1", "e1T1InputFrameTypeV1"), ("SYMME1T1", "e1T1InputCRCStateV1"), ("SYMME1T1", "e1T1InputSSMStateV1"), ("SYMME1T1", "e1T1InputSSMBitV1"), ("SYMME1T1", "e1T1InputPQLValueV1"), ("SYMME1T1", "eT1InputZeroSupprV1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
e11T1InputConfigGroup = e11T1InputConfigGroup.setStatus('current')
if mibBuilder.loadTexts: e11T1InputConfigGroup.setDescription('A collection of objects providing information applicable to E1/T1 input configuration group.')
e11T1OutputStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 2, 3)).setObjects(("SYMME1T1", "e1T1OutputPortStatusV1"), ("SYMME1T1", "e1T1OutputPQLValueV1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
e11T1OutputStatusGroup = e11T1OutputStatusGroup.setStatus('current')
if mibBuilder.loadTexts: e11T1OutputStatusGroup.setDescription('A collection of objects providing information applicable to E1/T1 output status group.')
e11T1OutputConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 2, 4)).setObjects(("SYMME1T1", "e1T1OutputStateV1"), ("SYMME1T1", "e1T1OutputFrameTypeV1"), ("SYMME1T1", "e1T1OutputCRCStateV1"), ("SYMME1T1", "e1T1OutputSSMStateV1"), ("SYMME1T1", "e1T1OutputSSMBitV1"), ("SYMME1T1", "e1T1OutputLengthV1"), ("SYMME1T1", "e1T1OutputZeroSupprV1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
e11T1OutputConfigGroup = e11T1OutputConfigGroup.setStatus('current')
if mibBuilder.loadTexts: e11T1OutputConfigGroup.setDescription('A collection of objects providing information applicable to E1/T1 output configuration group.')
mibBuilder.exportSymbols("SYMME1T1", TLocalTimeOffset=TLocalTimeOffset, TLatAndLon=TLatAndLon, e1T1InputCRCStateV1=e1T1InputCRCStateV1, e1T1InputFrameTypeV1=e1T1InputFrameTypeV1, e11T1InputConfigGroup=e11T1InputConfigGroup, e1T1InputConfigTable=e1T1InputConfigTable, e11T1OutputConfigGroup=e11T1OutputConfigGroup, e1T1InputStatusGroup=e1T1InputStatusGroup, e1T1OutputStatusEntry=e1T1OutputStatusEntry, OUTPUTE1T1FRAMETYPE=OUTPUTE1T1FRAMETYPE, e1T1OutputLengthV1=e1T1OutputLengthV1, e1T1InputSSMStateV1=e1T1InputSSMStateV1, e1T1BasicCompliance=e1T1BasicCompliance, e1T1OutputStatusIndex=e1T1OutputStatusIndex, e1T1OutputStateV1=e1T1OutputStateV1, e1T1InputPortStatusV1=e1T1InputPortStatusV1, e1T1Output=e1T1Output, e1T1UocGroups=e1T1UocGroups, e1T1InputPQLValueV1=e1T1InputPQLValueV1, TSsm=TSsm, e1T1OutputStatus=e1T1OutputStatus, e11T1OutputStatusGroup=e11T1OutputStatusGroup, e1T1InputStatusIndex=e1T1InputStatusIndex, e1T1OutputFrameTypeV1=e1T1OutputFrameTypeV1, e1T1OutputStatusTable=e1T1OutputStatusTable, PYSNMP_MODULE_ID=symmE1T1, PORTSTATETYPE=PORTSTATETYPE, e1T1OutputSSMStateV1=e1T1OutputSSMStateV1, e1T1OutputPortStatusV1=e1T1OutputPortStatusV1, symmE1T1=symmE1T1, e1T1InputConfigEntry=e1T1InputConfigEntry, e1T1input=e1T1input, e1T1OutputPQLValueV1=e1T1OutputPQLValueV1, e1T1Compliances=e1T1Compliances, TAntHeight=TAntHeight, DateAndTime=DateAndTime, e1T1InputStatusEntry=e1T1InputStatusEntry, INPUTE1T1FRAMETYPE=INPUTE1T1FRAMETYPE, TP5000PQLVALUE=TP5000PQLVALUE, e1T1InputPQLCurValueV1=e1T1InputPQLCurValueV1, e1T1InputStatusTable=e1T1InputStatusTable, e1T1OutputConfigEntry=e1T1OutputConfigEntry, e1T1InputSSMBitV1=e1T1InputSSMBitV1, inputE1T1Status=inputE1T1Status, e1T1InputConfigIndex=e1T1InputConfigIndex, e1T1OutputCRCStateV1=e1T1OutputCRCStateV1, e1T1OutputConfigTable=e1T1OutputConfigTable, e1T1OutputZeroSupprV1=e1T1OutputZeroSupprV1, e1T1OutputConfig=e1T1OutputConfig, e1T1OutputConfigIndex=e1T1OutputConfigIndex, eT1InputZeroSupprV1=eT1InputZeroSupprV1, e1T1OutputSSMBitV1=e1T1OutputSSMBitV1, e1T1InputConfig=e1T1InputConfig, e1T1Conformance=e1T1Conformance)
| 123.38342 | 2,082 | 0.754504 | #
# PySNMP MIB module SYMME1T1 (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/neermitt/Dev/kusanagi/mibs.snmplabs.com/asn1/SYMME1T1
# Produced by pysmi-0.3.4 at Tue Jul 30 11:34:59 2019
# On host NEERMITT-M-J0NV platform Darwin version 18.6.0 by user neermitt
# Using Python version 3.7.4 (default, Jul 9 2019, 18:13:23)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint")
entPhysicalIndex, = mibBuilder.importSymbols("ENTITY-MIB", "entPhysicalIndex")
ifNumber, ifIndex = mibBuilder.importSymbols("IF-MIB", "ifNumber", "ifIndex")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
NotificationType, Unsigned32, Bits, iso, Counter32, MibIdentifier, ModuleIdentity, TimeTicks, IpAddress, Integer32, Gauge32, ObjectIdentity, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Unsigned32", "Bits", "iso", "Counter32", "MibIdentifier", "ModuleIdentity", "TimeTicks", "IpAddress", "Integer32", "Gauge32", "ObjectIdentity", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
EnableValue, symmPhysicalSignal, ONVALUETYPE = mibBuilder.importSymbols("SYMM-COMMON-SMI", "EnableValue", "symmPhysicalSignal", "ONVALUETYPE")
symmE1T1 = ModuleIdentity((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2))
symmE1T1.setRevisions(('2011-03-18 17:06',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: symmE1T1.setRevisionsDescriptions(('Revision 1.0',))
if mibBuilder.loadTexts: symmE1T1.setLastUpdated('201103181705Z')
if mibBuilder.loadTexts: symmE1T1.setOrganization('Symmetricom.')
if mibBuilder.loadTexts: symmE1T1.setContactInfo('Symmetricom Technical Support 1-888-367-7966 toll free USA 1-408-428-7907 worldwide Support@symmetricom.com')
if mibBuilder.loadTexts: symmE1T1.setDescription('This is the Symmetricom Common MIB for the configuration and status monitoring of E1/T1 ports in the system. It is one of the MIBs under the symmPhysicalSignal node. This MIB is organized into two main nodes: input and output. Each node is further has two tables, one for status and one for configuration.')
class TP5000PQLVALUE(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 15)
class INPUTE1T1FRAMETYPE(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("freq1544khz", 1), ("freq2048khz", 2), ("ccs", 3), ("cas", 4), ("d4", 5), ("esf", 6))
class PORTSTATETYPE(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("enable", 1), ("disable", 2))
class OUTPUTE1T1FRAMETYPE(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("freq1544khz", 1), ("freq2048khz", 2), ("ccs", 3), ("cas", 4), ("d4", 5), ("esf", 6))
class DateAndTime(TextualConvention, OctetString):
description = "A date-time specification. field octets contents range ----- ------ -------- ----- 1 1-2 year* 0..65536 2 3 month 1..12 3 4 day 1..31 4 5 hour 0..23 5 6 minutes 0..59 6 7 seconds 0..60 (use 60 for leap-second) 7 8 deci-seconds 0..9 8 9 direction from UTC '+' / '-' 9 10 hours from UTC* 0..13 10 11 minutes from UTC 0..59 * Notes: - the value of year is in network-byte order - daylight saving time in New Zealand is +13 For example, Tuesday May 26, 1992 at 1:30:15 PM EDT would be displayed as: 1992-5-26,13:30:15.0,-4:0 Note that if only local time is known, then timezone information (fields 8-10) is not present."
status = 'current'
displayHint = '2d-1d-1d,1d:1d:1d.1d,1a1d:1d'
subtypeSpec = OctetString.subtypeSpec + ConstraintsUnion(ValueSizeConstraint(8, 8), ValueSizeConstraint(11, 11), )
class TLatAndLon(TextualConvention, OctetString):
description = "antenna latitude and longitude specification. field octets contents range ----- ------ -------- ----- 1 1 +/-180 deg '+' / '-' 2 2 degree 0..180 3 3 minute 0..59 4 4 second 0..59 5 5 second fraction 0..99 +/- dd:mm:ss.ss "
status = 'current'
displayHint = '1a1d:1d:1d.1d'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(5, 5)
fixedLength = 5
class TAntHeight(TextualConvention, OctetString):
description = "antenna height specification. field octets contents range ----- ------ -------- ----- 1 1 +/- '+' / '-' 2 2-3 meter 0..10000 3 4 meter fraction 0..99 +/- hh.hh "
status = 'current'
displayHint = '1a2d.1d'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(4, 4)
fixedLength = 4
class TLocalTimeOffset(TextualConvention, OctetString):
description = "A local time offset specification. field octets contents range ----- ------ -------- ----- 1 1 direction from UTC '+' / '-' 2 2 hours from UTC* 0..13 3 3 minutes from UTC 0..59 * Notes: - the value of year is in network-byte order - The hours range is 0..13 For example, the -6 local time offset would be displayed as: -6:0 "
status = 'current'
displayHint = '1a1d:1d'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(3, 3)
fixedLength = 3
class TSsm(TextualConvention, Integer32):
description = 'The ssm hex code'
status = 'current'
displayHint = 'x'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 255)
e1T1input = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1))
inputE1T1Status = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 1))
e1T1InputStatusTable = MibTable((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 1, 1), )
if mibBuilder.loadTexts: e1T1InputStatusTable.setStatus('current')
if mibBuilder.loadTexts: e1T1InputStatusTable.setDescription('This table contains status information for each E1/T1 input port.')
e1T1InputStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "SYMME1T1", "e1T1InputStatusIndex"))
if mibBuilder.loadTexts: e1T1InputStatusEntry.setStatus('current')
if mibBuilder.loadTexts: e1T1InputStatusEntry.setDescription('An entry of the e1T1InputStatusTable. Table index is ifIndex (port/interface index). Each entry has three parameters for the specified E1/T1 input port: 1. Port enable status (enable or disable) 2. Current value of the incoming SSM 3. Port status ')
e1T1InputStatusIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000)))
if mibBuilder.loadTexts: e1T1InputStatusIndex.setStatus('current')
if mibBuilder.loadTexts: e1T1InputStatusIndex.setDescription('Local index of the E1/T1 input status table.')
e1T1InputPQLCurValueV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 1, 1, 1, 3), TP5000PQLVALUE()).setMaxAccess("readonly")
if mibBuilder.loadTexts: e1T1InputPQLCurValueV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputPQLCurValueV1.setDescription('The current PQL value of the incoming SSM on this input port.')
e1T1InputPortStatusV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 1, 1, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: e1T1InputPortStatusV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputPortStatusV1.setDescription('The port status of the specified input E1/T1 input port. Possible values are On (1) and Off (2). When the input port state is enabled, port status becomes on. When input port state is disabled, input port status is off.')
e1T1InputConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2))
e1T1InputConfigTable = MibTable((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1), )
if mibBuilder.loadTexts: e1T1InputConfigTable.setStatus('current')
if mibBuilder.loadTexts: e1T1InputConfigTable.setDescription('Configuration Table for E1/T1 input ports')
e1T1InputConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "SYMME1T1", "e1T1InputConfigIndex"))
if mibBuilder.loadTexts: e1T1InputConfigEntry.setStatus('current')
if mibBuilder.loadTexts: e1T1InputConfigEntry.setDescription('An entry of the E1/T1 input configuration table. Table index is ifIndex (port/interface). Each entry has the following configuration parameters for the selected input port: 1. Frame type 2. CRC enable state 3. SSM enable state 4. SSM bit position 5. Default PQL value that can be used to override the input SSM value 6. Zero suppression state ')
e1T1InputConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000)))
if mibBuilder.loadTexts: e1T1InputConfigIndex.setStatus('current')
if mibBuilder.loadTexts: e1T1InputConfigIndex.setDescription('Local index of the E1/T1 input configuration table.')
e1T1InputFrameTypeV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 2), INPUTE1T1FRAMETYPE()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1InputFrameTypeV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputFrameTypeV1.setDescription('E1 or T1 input frame type. Supported frame types include: 1. Freq1544khz (1) 2. Freq2048khz (2) 3. CCS (3) 4. CAS (4) 5. D4 (5) 6. ESF (6) Default frame type is 2048 kHz ')
e1T1InputCRCStateV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 3), EnableValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1InputCRCStateV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputCRCStateV1.setDescription('CRC enable state can be Enable (1) or Disable (2). Disabling the CRC means the CRC in the SSM is not used.')
e1T1InputSSMStateV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 4), EnableValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1InputSSMStateV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputSSMStateV1.setDescription("SSM enable state. It can be Enable (1) or Disable (2). Disabling the SSM means the incoming SSM is not used, and the forced (default) PQL value for this input port will be used during the reference selection. SSM is supported for only three frame types: EFS, CAS with CRC4, and CCA with CRC4. SSM should not be enabled for other frame types. If SSM is enabled for an input port, but the frame type does not support SSM or is not sending a valid SSM, then this input will be disqualified and the input PQL will be set to 'invalid.' The system will go into holdover no other qualified reference is available. ")
e1T1InputSSMBitV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(4, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1InputSSMBitV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputSSMBitV1.setDescription('SSM Bit position. The value range is 4 to 8. This parameter is only used for frame types ESF, CCS, or CAS.')
e1T1InputPQLValueV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 6), TP5000PQLVALUE()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1InputPQLValueV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputPQLValueV1.setDescription('The user assigned PQL value for the specified input. This PQL value is used when the SSM state is disabled. The range for the user assigned PQL value is 1 to 9. ')
eT1InputZeroSupprV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 7), ONVALUETYPE()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eT1InputZeroSupprV1.setStatus('current')
if mibBuilder.loadTexts: eT1InputZeroSupprV1.setDescription('The number indicates whether zero suppression (ZS) on the input port is enabled or disabled. Valid values are On (1) or Off (2). ')
e1T1Output = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2))
e1T1OutputStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 1))
e1T1OutputStatusTable = MibTable((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 1, 1), )
if mibBuilder.loadTexts: e1T1OutputStatusTable.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputStatusTable.setDescription('This table contains status information for each E1/T1 output port.')
e1T1OutputStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "SYMME1T1", "e1T1OutputStatusIndex"))
if mibBuilder.loadTexts: e1T1OutputStatusEntry.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputStatusEntry.setDescription('An entry of the e1T1OutputStatusTable. Table index is ifIndex (port/interface index). Each entry has two parameters for the specified E1/T1 input port: 1. Port status 2. Outgoing SSM value ')
e1T1OutputStatusIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000)))
if mibBuilder.loadTexts: e1T1OutputStatusIndex.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputStatusIndex.setDescription('Local index of the E1/T1 output status table.')
e1T1OutputPortStatusV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 1, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: e1T1OutputPortStatusV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputPortStatusV1.setDescription("The port status of the specified E1/T1 output port. Possible values are On (1) and Off (2). 'On' means there is signal on the port. For E1/T1 output port it means the system is in normal tracking mode. 'Off' means there is no signal on the port. For E1/T1 output port it means the output is squelched during some clock states.")
e1T1OutputPQLValueV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 1, 1, 1, 3), TP5000PQLVALUE()).setMaxAccess("readonly")
if mibBuilder.loadTexts: e1T1OutputPQLValueV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputPQLValueV1.setDescription('The PQL value for the specified E1/T1 output port.')
e1T1OutputConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2))
e1T1OutputConfigTable = MibTable((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1), )
if mibBuilder.loadTexts: e1T1OutputConfigTable.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputConfigTable.setDescription('This table contains configuration information for each E1/T1 output port.')
e1T1OutputConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "SYMME1T1", "e1T1OutputConfigIndex"))
if mibBuilder.loadTexts: e1T1OutputConfigEntry.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputConfigEntry.setDescription('An entry of the e1T1OutputConfigTable. Table index is ifIndex (port/interface index). Each entry has the configuration parameters for the specified E1/T1 output port: 1. Port enable state 2. Frame type 3. CRC enable state 4. SSM enable state 5. SSM bit position 6. Zero suppression on/off state 7. Output port cable length ')
e1T1OutputConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000)))
if mibBuilder.loadTexts: e1T1OutputConfigIndex.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputConfigIndex.setDescription('Local index of the E1/T1 output configuration table.')
e1T1OutputStateV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 2), PORTSTATETYPE()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputStateV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputStateV1.setDescription('E1/T1 output port enable state. Its value can be Enable (1) or Disable (2). Disabling an output port means no output is generated for that port.')
e1T1OutputFrameTypeV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 3), OUTPUTE1T1FRAMETYPE()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputFrameTypeV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputFrameTypeV1.setDescription('E1 or T1 output frame type. Supported frame types include: 1. Freq1544khz (1) 2. Freq2048khz (2) 3. CCS (3) 4. CAS (4) 5. D4 (5) 6. ESF (6) Default frame type is 2048 kHz. ')
e1T1OutputCRCStateV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 4), EnableValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputCRCStateV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputCRCStateV1.setDescription('CRC enable state can be Enable (1) or Disable (2). Disabling the CRC means that no CRC is generated for the SSM.')
e1T1OutputSSMStateV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 5), EnableValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputSSMStateV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputSSMStateV1.setDescription('SSM enable state. It can be Enable (1) or Disable (2). Disabling the output SSM means that no SSM is generated for the specified output port.')
e1T1OutputSSMBitV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(4, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputSSMBitV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputSSMBitV1.setDescription('SSM Bit position. The value range is 4 to 8. This parameter is only used for frame types ESF, CCS, or CAS.')
e1T1OutputZeroSupprV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 7), ONVALUETYPE()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputZeroSupprV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputZeroSupprV1.setDescription('The number indicates whether zero suppression (ZS) on the output port is enabled or disabled. Valid values are On (1) or Off (2). ')
e1T1OutputLengthV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputLengthV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputLengthV1.setDescription('Output cable length. ')
e1T1Conformance = ObjectIdentity((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3))
if mibBuilder.loadTexts: e1T1Conformance.setStatus('current')
if mibBuilder.loadTexts: e1T1Conformance.setDescription('This node contains conformance statement for the symmE1T1 MIB module. ')
e1T1Compliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 1))
e1T1BasicCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 1, 1)).setObjects(("SYMME1T1", "e1T1InputStatusGroup"), ("SYMME1T1", "e11T1InputConfigGroup"), ("SYMME1T1", "e11T1OutputStatusGroup"), ("SYMME1T1", "e11T1OutputConfigGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
e1T1BasicCompliance = e1T1BasicCompliance.setStatus('current')
if mibBuilder.loadTexts: e1T1BasicCompliance.setDescription('The compliance statement for SNMP entities which have E1/T1 input/output.')
e1T1UocGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 2))
e1T1InputStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 2, 1)).setObjects(("SYMME1T1", "e1T1InputPortStatusV1"), ("SYMME1T1", "e1T1InputPQLCurValueV1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
e1T1InputStatusGroup = e1T1InputStatusGroup.setStatus('current')
if mibBuilder.loadTexts: e1T1InputStatusGroup.setDescription('A collection of objects providing information applicable to E1/T1 input status group.')
e11T1InputConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 2, 2)).setObjects(("SYMME1T1", "e1T1InputFrameTypeV1"), ("SYMME1T1", "e1T1InputCRCStateV1"), ("SYMME1T1", "e1T1InputSSMStateV1"), ("SYMME1T1", "e1T1InputSSMBitV1"), ("SYMME1T1", "e1T1InputPQLValueV1"), ("SYMME1T1", "eT1InputZeroSupprV1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
e11T1InputConfigGroup = e11T1InputConfigGroup.setStatus('current')
if mibBuilder.loadTexts: e11T1InputConfigGroup.setDescription('A collection of objects providing information applicable to E1/T1 input configuration group.')
e11T1OutputStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 2, 3)).setObjects(("SYMME1T1", "e1T1OutputPortStatusV1"), ("SYMME1T1", "e1T1OutputPQLValueV1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
e11T1OutputStatusGroup = e11T1OutputStatusGroup.setStatus('current')
if mibBuilder.loadTexts: e11T1OutputStatusGroup.setDescription('A collection of objects providing information applicable to E1/T1 output status group.')
e11T1OutputConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 2, 4)).setObjects(("SYMME1T1", "e1T1OutputStateV1"), ("SYMME1T1", "e1T1OutputFrameTypeV1"), ("SYMME1T1", "e1T1OutputCRCStateV1"), ("SYMME1T1", "e1T1OutputSSMStateV1"), ("SYMME1T1", "e1T1OutputSSMBitV1"), ("SYMME1T1", "e1T1OutputLengthV1"), ("SYMME1T1", "e1T1OutputZeroSupprV1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
e11T1OutputConfigGroup = e11T1OutputConfigGroup.setStatus('current')
if mibBuilder.loadTexts: e11T1OutputConfigGroup.setDescription('A collection of objects providing information applicable to E1/T1 output configuration group.')
mibBuilder.exportSymbols("SYMME1T1", TLocalTimeOffset=TLocalTimeOffset, TLatAndLon=TLatAndLon, e1T1InputCRCStateV1=e1T1InputCRCStateV1, e1T1InputFrameTypeV1=e1T1InputFrameTypeV1, e11T1InputConfigGroup=e11T1InputConfigGroup, e1T1InputConfigTable=e1T1InputConfigTable, e11T1OutputConfigGroup=e11T1OutputConfigGroup, e1T1InputStatusGroup=e1T1InputStatusGroup, e1T1OutputStatusEntry=e1T1OutputStatusEntry, OUTPUTE1T1FRAMETYPE=OUTPUTE1T1FRAMETYPE, e1T1OutputLengthV1=e1T1OutputLengthV1, e1T1InputSSMStateV1=e1T1InputSSMStateV1, e1T1BasicCompliance=e1T1BasicCompliance, e1T1OutputStatusIndex=e1T1OutputStatusIndex, e1T1OutputStateV1=e1T1OutputStateV1, e1T1InputPortStatusV1=e1T1InputPortStatusV1, e1T1Output=e1T1Output, e1T1UocGroups=e1T1UocGroups, e1T1InputPQLValueV1=e1T1InputPQLValueV1, TSsm=TSsm, e1T1OutputStatus=e1T1OutputStatus, e11T1OutputStatusGroup=e11T1OutputStatusGroup, e1T1InputStatusIndex=e1T1InputStatusIndex, e1T1OutputFrameTypeV1=e1T1OutputFrameTypeV1, e1T1OutputStatusTable=e1T1OutputStatusTable, PYSNMP_MODULE_ID=symmE1T1, PORTSTATETYPE=PORTSTATETYPE, e1T1OutputSSMStateV1=e1T1OutputSSMStateV1, e1T1OutputPortStatusV1=e1T1OutputPortStatusV1, symmE1T1=symmE1T1, e1T1InputConfigEntry=e1T1InputConfigEntry, e1T1input=e1T1input, e1T1OutputPQLValueV1=e1T1OutputPQLValueV1, e1T1Compliances=e1T1Compliances, TAntHeight=TAntHeight, DateAndTime=DateAndTime, e1T1InputStatusEntry=e1T1InputStatusEntry, INPUTE1T1FRAMETYPE=INPUTE1T1FRAMETYPE, TP5000PQLVALUE=TP5000PQLVALUE, e1T1InputPQLCurValueV1=e1T1InputPQLCurValueV1, e1T1InputStatusTable=e1T1InputStatusTable, e1T1OutputConfigEntry=e1T1OutputConfigEntry, e1T1InputSSMBitV1=e1T1InputSSMBitV1, inputE1T1Status=inputE1T1Status, e1T1InputConfigIndex=e1T1InputConfigIndex, e1T1OutputCRCStateV1=e1T1OutputCRCStateV1, e1T1OutputConfigTable=e1T1OutputConfigTable, e1T1OutputZeroSupprV1=e1T1OutputZeroSupprV1, e1T1OutputConfig=e1T1OutputConfig, e1T1OutputConfigIndex=e1T1OutputConfigIndex, eT1InputZeroSupprV1=eT1InputZeroSupprV1, e1T1OutputSSMBitV1=e1T1OutputSSMBitV1, e1T1InputConfig=e1T1InputConfig, e1T1Conformance=e1T1Conformance)
| 0 | 3,021 | 205 |
773f3d414053d1514aceb10d5e3941b04bd8e910 | 989 | py | Python | alertlistener/main.py | ashleykelham/shift-your-alert-polcies-left | 1e5124b76d2ec5c6d4556d1730ea5a1bb3e6c797 | [
"Apache-2.0"
] | null | null | null | alertlistener/main.py | ashleykelham/shift-your-alert-polcies-left | 1e5124b76d2ec5c6d4556d1730ea5a1bb3e6c797 | [
"Apache-2.0"
] | null | null | null | alertlistener/main.py | ashleykelham/shift-your-alert-polcies-left | 1e5124b76d2ec5c6d4556d1730ea5a1bb3e6c797 | [
"Apache-2.0"
] | null | null | null | from google.cloud import datastore
import os
import json
client = datastore.Client()
| 28.257143 | 59 | 0.548028 | from google.cloud import datastore
import os
import json
client = datastore.Client()
def alerts(request):
kind = 'alerts-' + os.environ['ENV']
if request.method == 'POST' or request.method == 'PUT':
print(request.data)
request_json = request.get_json()
incident = request_json['incident']
state = incident['state']
name = incident['policy_name']
incident_id = incident['incident_id']
key = client.key(kind, incident_id)
if state == 'open':
entity = datastore.Entity(key=key)
entity.update({
'state': state,
'name': name
})
client.put(entity)
else:
client.delete(key)
return 'success'
elif request.method == 'GET':
query = client.query(kind=kind)
results = list(query.fetch(limit=50))
output = {
"count": len(results)
}
return json.dumps(output)
| 880 | 0 | 23 |
126955d7d8c5255831ba84354f634ab3187c2a5a | 1,787 | py | Python | 02.py | anttuov/adventofcode-2017 | 36288be78479cc5309a59e09b6db87afcd4940cb | [
"MIT"
] | null | null | null | 02.py | anttuov/adventofcode-2017 | 36288be78479cc5309a59e09b6db87afcd4940cb | [
"MIT"
] | null | null | null | 02.py | anttuov/adventofcode-2017 | 36288be78479cc5309a59e09b6db87afcd4940cb | [
"MIT"
] | null | null | null | # http://adventofcode.com/2017/day/2
code = """5048 177 5280 5058 4504 3805 5735 220 4362 1809 1521 230 772 1088 178 1794
6629 3839 258 4473 5961 6539 6870 4140 4638 387 7464 229 4173 5706 185 271
5149 2892 5854 2000 256 3995 5250 249 3916 184 2497 210 4601 3955 1110 5340
153 468 550 126 495 142 385 144 165 188 609 182 439 545 608 319
1123 104 567 1098 286 665 1261 107 227 942 1222 128 1001 122 69 139
111 1998 1148 91 1355 90 202 1522 1496 1362 1728 109 2287 918 2217 1138
426 372 489 226 344 431 67 124 120 386 348 153 242 133 112 369
1574 265 144 2490 163 749 3409 3086 154 151 133 990 1002 3168 588 2998
173 192 2269 760 1630 215 966 2692 3855 3550 468 4098 3071 162 329 3648
1984 300 163 5616 4862 586 4884 239 1839 169 5514 4226 5551 3700 216 5912
1749 2062 194 1045 2685 156 3257 1319 3199 2775 211 213 1221 198 2864 2982
273 977 89 198 85 1025 1157 1125 69 94 919 103 1299 998 809 478
1965 6989 230 2025 6290 2901 192 215 4782 6041 6672 7070 7104 207 7451 5071
1261 77 1417 1053 2072 641 74 86 91 1878 1944 2292 1446 689 2315 1379
296 306 1953 3538 248 1579 4326 2178 5021 2529 794 5391 4712 3734 261 4362
2426 192 1764 288 4431 2396 2336 854 2157 216 4392 3972 229 244 4289 1902"""
print(checksum_1(code))
print(checksum_2(code)) | 40.613636 | 84 | 0.69446 | # http://adventofcode.com/2017/day/2
def checksum_1(code):
checksum = 0
for line in code.split("\n"):
numbers = list(map(int, line.split("\t")))
checksum += max(numbers) - min(numbers)
return checksum
def checksum_2(code):
checksum = 0
for line in code.split("\n"):
numbers = list(map(int, line.split("\t")))
numbers.sort()
while len(numbers) > 1:
num = numbers.pop()
for dnum in numbers:
if num % dnum == 0:
checksum += num/dnum
return int(checksum)
code = """5048 177 5280 5058 4504 3805 5735 220 4362 1809 1521 230 772 1088 178 1794
6629 3839 258 4473 5961 6539 6870 4140 4638 387 7464 229 4173 5706 185 271
5149 2892 5854 2000 256 3995 5250 249 3916 184 2497 210 4601 3955 1110 5340
153 468 550 126 495 142 385 144 165 188 609 182 439 545 608 319
1123 104 567 1098 286 665 1261 107 227 942 1222 128 1001 122 69 139
111 1998 1148 91 1355 90 202 1522 1496 1362 1728 109 2287 918 2217 1138
426 372 489 226 344 431 67 124 120 386 348 153 242 133 112 369
1574 265 144 2490 163 749 3409 3086 154 151 133 990 1002 3168 588 2998
173 192 2269 760 1630 215 966 2692 3855 3550 468 4098 3071 162 329 3648
1984 300 163 5616 4862 586 4884 239 1839 169 5514 4226 5551 3700 216 5912
1749 2062 194 1045 2685 156 3257 1319 3199 2775 211 213 1221 198 2864 2982
273 977 89 198 85 1025 1157 1125 69 94 919 103 1299 998 809 478
1965 6989 230 2025 6290 2901 192 215 4782 6041 6672 7070 7104 207 7451 5071
1261 77 1417 1053 2072 641 74 86 91 1878 1944 2292 1446 689 2315 1379
296 306 1953 3538 248 1579 4326 2178 5021 2529 794 5391 4712 3734 261 4362
2426 192 1764 288 4431 2396 2336 854 2157 216 4392 3972 229 244 4289 1902"""
print(checksum_1(code))
print(checksum_2(code)) | 495 | 0 | 46 |
f15605049ae96ff1cf8f150412910b416ecdc828 | 723 | py | Python | Tensile/Tests/nightly/convolution_config/test_yaml.py | zjunweihit/Tensile | 68b73083c92eecc1b04eec1f006f28aea5628030 | [
"MIT"
] | null | null | null | Tensile/Tests/nightly/convolution_config/test_yaml.py | zjunweihit/Tensile | 68b73083c92eecc1b04eec1f006f28aea5628030 | [
"MIT"
] | null | null | null | Tensile/Tests/nightly/convolution_config/test_yaml.py | zjunweihit/Tensile | 68b73083c92eecc1b04eec1f006f28aea5628030 | [
"MIT"
] | null | null | null | import logging
from Tensile.SolutionStructs import Convolution
from YamlBuilder.YamlBuilder import YamlBuilder
log =logging.getLogger("testlog")
| 34.428571 | 82 | 0.676349 | import logging
from Tensile.SolutionStructs import Convolution
from YamlBuilder.YamlBuilder import YamlBuilder
log =logging.getLogger("testlog")
def test_yaml(request, tensile_client_dir, tmp_path):
z={} # problemType definition
conv = Convolution(z, 'ConvolutionForward',
config={'TensorAFormat': 'NCHW',
})
log.debug(conv.printUsage(z))
assert(z['NumIndicesC']==3)
assert(z['IndexAssignmentsA']==[0, 3, 2])
assert(z['IndexAssignmentsB']==[3, 1, 2])
assert(z['SetConstStrideA']==[[0,1]])
assert(z['SetConstStrideB']==[[2,0]])
assert(z['UseInitialStrides']==False)
YamlBuilder.run_tensile_client(request, conv, z, tensile_client_dir, tmp_path)
| 554 | 0 | 23 |
ebd882d2681a13494693917b2277401b35c97a33 | 16,311 | py | Python | search_web/__init__.py | Ankitsinghprograms/search_web | 108aa097f89b4b393cb9c844d8e490920a0557ed | [
"MIT"
] | 3 | 2021-01-14T15:27:38.000Z | 2021-12-20T09:53:04.000Z | search_web/__init__.py | Ankitsinghprograms/search_web | 108aa097f89b4b393cb9c844d8e490920a0557ed | [
"MIT"
] | 1 | 2021-02-04T20:36:40.000Z | 2021-02-04T20:36:40.000Z | search_web/__init__.py | Ankitsinghprograms/search_web | 108aa097f89b4b393cb9c844d8e490920a0557ed | [
"MIT"
] | 1 | 2021-02-04T19:52:55.000Z | 2021-02-04T19:52:55.000Z | """
This module will Help You to Search on Different Websites like Google,Youtube,etc.
You can search on more than 25 websites very easily by just 2 lines of code.
Websites Supported:-
1.Google -google_search("Python")
2.Youtube -youtube_search("Python")
3.Bing -bing_search("Python")
4.Quora -quora_search("5 Python Projects")
5.Python -python_search("Input in Python")
6.Twitter -twitter_search("Python")
7.Facebook -facebook_search("Python")
8.Pinterest -pinterest_search("Python images")
9.Wikipedia -wikipedia_search("Python_(programming_language)")
10.Amazon -amazon_search("Python Books")
11.Reddit -reddit_search("Python")
12.Imdb -imdb_search("python")
13.TripAdvisor -tripadvisor_search("London")
14.Walmart -walmart_search("python Books")
15.Craigslist -craigslist_search("Python")
16.Ebay -ebay_search("Python books")
17.LinkedIn-Job Search, People Search, Learning
18.Playstore -playstore_search("python")
19.Headline -headline_search("python")
20.Esty -esty_search("python")
21.Indeed -indeed_search("Python Developer","USA")
22.Apple -apple_search("Mac Book Pro")
23.ESPN -espn_search("Cricket")
24.Webmd -webmd_search("Python")
25.New York Times -nytimes_search("Covid-19")
26.CNN -cnn_search("Us elections 2020")
27.Best Buy- `bestbuy_search("Python")`
28.Britanica-`britannica_search("Anything")`
29.Bussiness Insider- `businessinsider__search("News")`
30.Dictionary- `dictionary_search("graphics")`
31.Gamepedia- `gamepedia_search("Minecraft")`
32.Github- `github_search("ankitsinghprograms")`
33.Home depot- `homedepot_search("News")`
34.MapQuest- `mapquest_search("California,USA")`
35.Mayo clinic- `mayoclinic_search("What to do during Fever")`
36.Medical News Today- `medicalnewstoday_search("COVID-19")`
37.Merriam Webster- `merriam_webster_search("News")`
38.Microsoft- `microsoft_search("Mac Book Pro")`
39.NIH- `nih_search("Usa News")`
40.Quizlet- `quizlet_search("Std 8")`
41.Rotten Tomatoes- `rottentomatoes_search("Water Bottle")`
42.Target- `target_search("Anything")`
43.Urban Dictionary- `urban_dictionary_search("LOL meaing in urban dictionary")`
44.USA Today- `usatoday_search("USA election")`
45.Yahoo- `yahoo_search("C++")`
46.Zillow- `zillow_search("News")`
========== Example ===========
Code is to simple Just 2 lines of Code.
------------------------------------
from pysearch import *
google_search("How to Search via pysearch module Python")
------------------------------------
=============================
=========== Version ===========
++ 0.1.3 (19/01/2021)+++++++++
~~ Bug Fixes
++++++++++++++++++++++++++++
=============================
======== Getting Errors??========
If You get error then contact me at ankitsingh300307@gmail.com
=============================
=========== Author ==========
Name-Ankit Singh
Email-ankitsingh300307@gmail.com
Github-https://github.com/Ankitsinghprograms
Country-India
============================
"""
import webbrowser
def open(link):
"""
Opening Webpage Through webbrowser module
"""
try:
webbrowser.open(link)
except:
print("EROOR UNABLE TO OPEN WEBSITE")
print("Common Errors:-\n\
~ webbrowser module error \
~ Your system doesn't have Any Webrowser \
-Try Installing modules liks Chrome,Firefox,etc.\
~ Contact to Author via email 'ankitsingh300307@gmail.com'")
def google_search(text):
"""
Search on Google (https://www.google.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
google=f"https://www.google.com/search?q={text}&oq={text}"
open(google)
def youtube_search(text):
"""
Search on Youtube (https://www.youtube.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
youtube=f"https://www.youtube.com/results?search_query={text}"
open(youtube)
def bing_search(text):
"""
Search on Bing (www.bing.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
bing=f"https://www.bing.com/search?q={text}"
open(bing)
def quora_search(text):
"""
Search on Quora (www.quora.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
quora=f"https://www.quora.com/search?q={text}"
open(quora)
def python_search(text):
"""
Search on Python.org (www.python.org)
Parameters
-----------
text:- The query which you want to search about (str)
"""
python_org=f"https://www.python.org/search/?q={text}"
open(python_org)
def twitter_search(text):
"""
Search on twitter (https://twitter.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
twitter=f"https://twitter.com/search?q={text}"
open(twitter)
def facebook_search(text):
"""
Search on Facebook (https://facebook.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
facebook=f"https://facebook.com/search/top/?q={text}"
open(facebook)
def pinterest_search(text):
"""
Search on Pinterest (https://in.pinterest.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
pinterest=f"https://in.pinterest.com/search/pins/?q={text}"
open(pinterest)
def wikipedia_search(text):
"""
Search on Wikipedia (https://en.m.wikipedia.org)
Parameters
-----------
text:- The query which you want to search about (str)
"""
wikipedia=f"https://en.m.wikipedia.org/wiki/{text}"
open(wikipedia)
def amazon_search(text):
"""
Search on amazon (https://www.amazon.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
amazon=f"https://www.amazon.com/s?k={text}"
open(amazon)
def reddit_search(text):
"""
Search on Reddit (https://www.reddit.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
reddit=f"https://www.reddit.com/search?q={text}"
open(reddit)
def imdb_search(text):
"""
Search on imdb (https://www.imdb.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
imdb=f"https://www.imdb.com/find?q={text}"
open(imdb)
def tripadvisor_search(text):
"""
Search on Tripadvisor (https://www.tripadvisor.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
tripadvisor=f"https://www.tripadvisor.com/Search?q={text}"
open(tripadvisor)
def walmart_search(text):
"""
Search on Walmart (https://www.walmart.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
walmart=f'https://www.walmart.com/search/?query={text}'
open(walmart)
def craigslist_search(text):
"""
Search on craigslist (https://kolkata.craigslist.org)
Parameters
-----------
text:- The query which you want to search about (str)
"""
craigslist=f'https://kolkata.craigslist.org/d/services/search/bbb?query={text}'
open(craigslist)
def ebay_search(text):
"""
Search on Ebay (https://www.ebay.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
ebay=f"https://www.ebay.com/sch/i.html?_nkw={text}"
open(ebay)
def linkedin_job_search(text):
"""
Search on Linkedin (https://www.linkedin.com/jobs)
Parameters
-----------
text:- The query which you want to search about (str)
"""
linkedin_job=f"https://www.linkedin.com/jobs/search?keywords={text}"
open(linkedin_job)
def linkedin_people_search(first_name,last_name):
"""
Search on Linkedin (https://www.linkedin.com/people-guest/pub)
Parameters
-----------
first_name:- First Name of the person (str)
last_name:- Last Name of the person (str)
"""
linkedin_people=f"https://www.linkedin.com/people-guest/pub/dir?firstName={first_name}&lastName={last_name}"
open(linkedin_people)
def linkedin_learning_search(text):
"""
Search on Linkedin (https://www.linkedin.com/learning)
Parameters
-----------
text:- The query which you want to search about (str)
"""
linkedin_learning=f"https://www.linkedin.com/learning/search?keywords={text}"
open(linkedin_learning)
def playstore_search(text):
"""
Search on Play Store (https://play.google.com/store)
Parameters
-----------
text:- The query which you want to search about (str)
"""
play_store=f"https://play.google.com/store/search?q={text}"
open(play_store)
def headline_search(text):
"""
Search on Headline (https://www.healthline.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
headline=f'https://www.healthline.com/search?q1={text}'
open(headline)
def esty_search(text):
"""
Search on Esty (https://www.etsy.c:om/in-en)
Parameters
-----------
text:- The query which you want to search about (str)
"""
esty=f'https://www.etsy.com/in-en/search?q={text}'
open(esty)
def indeed_search(job_title,location):
"""
Search on Indeed (https://in.indeed.com/m/jobs)
Parameters
-----------
job_title:- Name of the Job (str)
location:- Location (str)
"""
indeed=f'https://in.indeed.com/m/jobs?q={job_title}&l={location}'
open(indeed)
def apple_search(text):
"""
Search on Apple (https://www.apple.com/us)
Parameters
-----------
text:- The query which you want to search about (str)
"""
apple=f"https://www.apple.com/us/search/{text}"
open(apple)
def espn_search(text):
"""
Search on Espn (https://www.espn.in)
Parameters
-----------
text:- The query which you want to search about (str)
"""
espn=f'https://www.espn.in/search/_/q/{text}'
open(espn)
def webmd_search(text):
"""
Search on Webmd (https://www.webmd.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
webmd=f'https://www.webmd.com/search/search_results/default.aspx?query={text}'
open(webmd)
def nytimes_search(text):
"""
Search on New York Times (https://www.nytimes.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
nytimes=f'https://www.nytimes.com/search?query={text}'
open(nytimes)
def cnn_search(text):
"""
Search on CNN (https://edition.cnn.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
cnn=f'https://edition.cnn.com/search?q={text}'
open(cnn)
# Functions Added in Version- 0.1.2 (19/01/2021) are below:-
def github_search(text):
"""
Search on github (https://github.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
github="https://github.com/search?q={text}"
open(github)
def merriam_webster_search(text):
"""
Search on merriam_webster (https://www.merriam-webster.com/dictionary/)
Parameters
-----------
text:- The query which you want to search about (str)
"""
merriam_webster=f"https://www.merriam-webster.com/dictionary/{text}"
open(merriam_webster)
def gamepedia_search(text):
"""
Search on gamepedia (https://www.gamepedia.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
gamepedia=f'https://www.gamepedia.com/search?search={text}'
open(gamepedia)
def microsoft_search(text):
"""
Search on Microsoft (https://www.microsoft.com/en-in/)
Parameters
-----------
text:- The query which you want to search about (str)
"""
microsoft=f"https://www.microsoft.com/en-in/search/result.aspx?{text}"
open(microsoft)
def target_search(text):
"""
Search on target (https://www.target.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
target=f'https://www.target.com/s?searchTerm={text}'
open(target)
def homedepot_search(text):
"""
Search on homedepot (https://www.homedepot.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
homedepot=f"https://www.homedepot.com/s/{text}"
open(homedepot)
def nih_search(text):
"""
Search on NIH (https://search.nih.gov)
Parameters
-----------
text:- The query which you want to search about (str)
"""
nih=f"https://search.nih.gov/search?utf8=%E2%9C%93&affiliate=nih&query={text}&commit=Search"
open(nih)
def rottentomatoes_search(text):
"""
Search on Rotten Tomatoes (https://www.rottentomatoes.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
rottentomatoes=f"https://www.rottentomatoes.com/search?search={text}"
open(rottentomatoes)
def quizlet_search(text):
"""
Search on Quizlet (https://quizlet.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
quizlet=f"https://quizlet.com/subject/{text}/"
open(quizlet)
def mapquest_search(text):
"""
Search on Mapquest (https://www.mapquest.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
mapquest=f"https://www.mapquest.com/search/results?query={text}"
open(mapquest)
def britannica_search(text):
"""
Search on Britannica (https://www.britannica.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
britannica=f"https://www.britannica.com/search?query={text}"
open(britannica)
def businessinsider_search(text):
"""
Search on Business Insider (https://www.businessinsider.in)
Parameters
-----------
text:- The query which you want to search about (str)
"""
businessinsider=f"https://www.businessinsider.in/searchresult.cms?query={text}"
open(businessinsider)
def dictionary_search(text):
"""
Search on Dictionary (https://www.dictionary.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
dictionary=f"https://www.dictionary.com/browse/{text}/s=t"
open(dictionary)
def zillow_search(text):
"""
Search on Zillow (https://www.zillow.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
zillow=f"https://www.zillow.com/homes/{text}/"
open(zillow)
def mayoclinic_search(text):
"""
Search on Mayoclinic (https://www.mayoclinic.org)
Parameters
-----------
text:- The query which you want to search about (str)
"""
mayoclinic=f'https://www.mayoclinic.org/search/search-results?q={text}'
open(mayoclinic)
def bestbuy_search(text):
"""
Search on Bestbuy (https://www.bestbuy.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
bestbuy=f"https://www.bestbuy.com/site/searchpage.jsp?st={text}"
open(bestbuy)
def yahoo_search(text):
"""
Search on Yahoo (https://in.search.yahoo.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
yahoo=f"https://in.search.yahoo.com/search?p={text}"
open(yahoo)
def usatoday_search(text):
"""
Search on USA Today (https://www.usatoday.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
usatoday=f"https://www.usatoday.com/search/?q={text}"
open(usatoday)
def medicalnewstoday_search(text):
"""
Search on Medical News Today (https://www.medicalnewstoday.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
medicalnewstoday=f"https://www.medicalnewstoday.com/search?q={text}"
open(medicalnewstoday)
def urban_dictionary_search(text):
"""
Search on Urban Dictionary (https://www.urbandictionary.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
urban_dictionary="https://www.urbandictionary.com/define.php?term={text}"
open(urban_dictionary)
def usatoday_search(text):
"""
Search on USA Today (https://www.usnews.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
usanews=f"https://www.usnews.com/search?q={text}"
open(usanews)
| 14.195822 | 109 | 0.639691 | """
This module will Help You to Search on Different Websites like Google,Youtube,etc.
You can search on more than 25 websites very easily by just 2 lines of code.
Websites Supported:-
1.Google -google_search("Python")
2.Youtube -youtube_search("Python")
3.Bing -bing_search("Python")
4.Quora -quora_search("5 Python Projects")
5.Python -python_search("Input in Python")
6.Twitter -twitter_search("Python")
7.Facebook -facebook_search("Python")
8.Pinterest -pinterest_search("Python images")
9.Wikipedia -wikipedia_search("Python_(programming_language)")
10.Amazon -amazon_search("Python Books")
11.Reddit -reddit_search("Python")
12.Imdb -imdb_search("python")
13.TripAdvisor -tripadvisor_search("London")
14.Walmart -walmart_search("python Books")
15.Craigslist -craigslist_search("Python")
16.Ebay -ebay_search("Python books")
17.LinkedIn-Job Search, People Search, Learning
18.Playstore -playstore_search("python")
19.Headline -headline_search("python")
20.Esty -esty_search("python")
21.Indeed -indeed_search("Python Developer","USA")
22.Apple -apple_search("Mac Book Pro")
23.ESPN -espn_search("Cricket")
24.Webmd -webmd_search("Python")
25.New York Times -nytimes_search("Covid-19")
26.CNN -cnn_search("Us elections 2020")
27.Best Buy- `bestbuy_search("Python")`
28.Britanica-`britannica_search("Anything")`
29.Bussiness Insider- `businessinsider__search("News")`
30.Dictionary- `dictionary_search("graphics")`
31.Gamepedia- `gamepedia_search("Minecraft")`
32.Github- `github_search("ankitsinghprograms")`
33.Home depot- `homedepot_search("News")`
34.MapQuest- `mapquest_search("California,USA")`
35.Mayo clinic- `mayoclinic_search("What to do during Fever")`
36.Medical News Today- `medicalnewstoday_search("COVID-19")`
37.Merriam Webster- `merriam_webster_search("News")`
38.Microsoft- `microsoft_search("Mac Book Pro")`
39.NIH- `nih_search("Usa News")`
40.Quizlet- `quizlet_search("Std 8")`
41.Rotten Tomatoes- `rottentomatoes_search("Water Bottle")`
42.Target- `target_search("Anything")`
43.Urban Dictionary- `urban_dictionary_search("LOL meaing in urban dictionary")`
44.USA Today- `usatoday_search("USA election")`
45.Yahoo- `yahoo_search("C++")`
46.Zillow- `zillow_search("News")`
========== Example ===========
Code is to simple Just 2 lines of Code.
------------------------------------
from pysearch import *
google_search("How to Search via pysearch module Python")
------------------------------------
=============================
=========== Version ===========
++ 0.1.3 (19/01/2021)+++++++++
~~ Bug Fixes
++++++++++++++++++++++++++++
=============================
======== Getting Errors??========
If You get error then contact me at ankitsingh300307@gmail.com
=============================
=========== Author ==========
Name-Ankit Singh
Email-ankitsingh300307@gmail.com
Github-https://github.com/Ankitsinghprograms
Country-India
============================
"""
import webbrowser
def open(link):
"""
Opening Webpage Through webbrowser module
"""
try:
webbrowser.open(link)
except:
print("EROOR UNABLE TO OPEN WEBSITE")
print("Common Errors:-\n\
~ webbrowser module error \
~ Your system doesn't have Any Webrowser \
-Try Installing modules liks Chrome,Firefox,etc.\
~ Contact to Author via email 'ankitsingh300307@gmail.com'")
def google_search(text):
"""
Search on Google (https://www.google.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
google=f"https://www.google.com/search?q={text}&oq={text}"
open(google)
def youtube_search(text):
"""
Search on Youtube (https://www.youtube.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
youtube=f"https://www.youtube.com/results?search_query={text}"
open(youtube)
def bing_search(text):
"""
Search on Bing (www.bing.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
bing=f"https://www.bing.com/search?q={text}"
open(bing)
def quora_search(text):
"""
Search on Quora (www.quora.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
quora=f"https://www.quora.com/search?q={text}"
open(quora)
def python_search(text):
"""
Search on Python.org (www.python.org)
Parameters
-----------
text:- The query which you want to search about (str)
"""
python_org=f"https://www.python.org/search/?q={text}"
open(python_org)
def twitter_search(text):
"""
Search on twitter (https://twitter.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
twitter=f"https://twitter.com/search?q={text}"
open(twitter)
def facebook_search(text):
"""
Search on Facebook (https://facebook.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
facebook=f"https://facebook.com/search/top/?q={text}"
open(facebook)
def pinterest_search(text):
"""
Search on Pinterest (https://in.pinterest.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
pinterest=f"https://in.pinterest.com/search/pins/?q={text}"
open(pinterest)
def wikipedia_search(text):
"""
Search on Wikipedia (https://en.m.wikipedia.org)
Parameters
-----------
text:- The query which you want to search about (str)
"""
wikipedia=f"https://en.m.wikipedia.org/wiki/{text}"
open(wikipedia)
def amazon_search(text):
"""
Search on amazon (https://www.amazon.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
amazon=f"https://www.amazon.com/s?k={text}"
open(amazon)
def reddit_search(text):
"""
Search on Reddit (https://www.reddit.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
reddit=f"https://www.reddit.com/search?q={text}"
open(reddit)
def imdb_search(text):
"""
Search on imdb (https://www.imdb.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
imdb=f"https://www.imdb.com/find?q={text}"
open(imdb)
def tripadvisor_search(text):
"""
Search on Tripadvisor (https://www.tripadvisor.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
tripadvisor=f"https://www.tripadvisor.com/Search?q={text}"
open(tripadvisor)
def walmart_search(text):
"""
Search on Walmart (https://www.walmart.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
walmart=f'https://www.walmart.com/search/?query={text}'
open(walmart)
def craigslist_search(text):
"""
Search on craigslist (https://kolkata.craigslist.org)
Parameters
-----------
text:- The query which you want to search about (str)
"""
craigslist=f'https://kolkata.craigslist.org/d/services/search/bbb?query={text}'
open(craigslist)
def ebay_search(text):
"""
Search on Ebay (https://www.ebay.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
ebay=f"https://www.ebay.com/sch/i.html?_nkw={text}"
open(ebay)
def linkedin_job_search(text):
"""
Search on Linkedin (https://www.linkedin.com/jobs)
Parameters
-----------
text:- The query which you want to search about (str)
"""
linkedin_job=f"https://www.linkedin.com/jobs/search?keywords={text}"
open(linkedin_job)
def linkedin_people_search(first_name,last_name):
"""
Search on Linkedin (https://www.linkedin.com/people-guest/pub)
Parameters
-----------
first_name:- First Name of the person (str)
last_name:- Last Name of the person (str)
"""
linkedin_people=f"https://www.linkedin.com/people-guest/pub/dir?firstName={first_name}&lastName={last_name}"
open(linkedin_people)
def linkedin_learning_search(text):
"""
Search on Linkedin (https://www.linkedin.com/learning)
Parameters
-----------
text:- The query which you want to search about (str)
"""
linkedin_learning=f"https://www.linkedin.com/learning/search?keywords={text}"
open(linkedin_learning)
def playstore_search(text):
"""
Search on Play Store (https://play.google.com/store)
Parameters
-----------
text:- The query which you want to search about (str)
"""
play_store=f"https://play.google.com/store/search?q={text}"
open(play_store)
def headline_search(text):
"""
Search on Headline (https://www.healthline.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
headline=f'https://www.healthline.com/search?q1={text}'
open(headline)
def esty_search(text):
"""
Search on Esty (https://www.etsy.c:om/in-en)
Parameters
-----------
text:- The query which you want to search about (str)
"""
esty=f'https://www.etsy.com/in-en/search?q={text}'
open(esty)
def indeed_search(job_title,location):
"""
Search on Indeed (https://in.indeed.com/m/jobs)
Parameters
-----------
job_title:- Name of the Job (str)
location:- Location (str)
"""
indeed=f'https://in.indeed.com/m/jobs?q={job_title}&l={location}'
open(indeed)
def apple_search(text):
"""
Search on Apple (https://www.apple.com/us)
Parameters
-----------
text:- The query which you want to search about (str)
"""
apple=f"https://www.apple.com/us/search/{text}"
open(apple)
def espn_search(text):
"""
Search on Espn (https://www.espn.in)
Parameters
-----------
text:- The query which you want to search about (str)
"""
espn=f'https://www.espn.in/search/_/q/{text}'
open(espn)
def webmd_search(text):
"""
Search on Webmd (https://www.webmd.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
webmd=f'https://www.webmd.com/search/search_results/default.aspx?query={text}'
open(webmd)
def nytimes_search(text):
"""
Search on New York Times (https://www.nytimes.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
nytimes=f'https://www.nytimes.com/search?query={text}'
open(nytimes)
def cnn_search(text):
"""
Search on CNN (https://edition.cnn.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
cnn=f'https://edition.cnn.com/search?q={text}'
open(cnn)
# Functions Added in Version- 0.1.2 (19/01/2021) are below:-
def github_search(text):
"""
Search on github (https://github.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
github="https://github.com/search?q={text}"
open(github)
def merriam_webster_search(text):
"""
Search on merriam_webster (https://www.merriam-webster.com/dictionary/)
Parameters
-----------
text:- The query which you want to search about (str)
"""
merriam_webster=f"https://www.merriam-webster.com/dictionary/{text}"
open(merriam_webster)
def gamepedia_search(text):
"""
Search on gamepedia (https://www.gamepedia.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
gamepedia=f'https://www.gamepedia.com/search?search={text}'
open(gamepedia)
def microsoft_search(text):
"""
Search on Microsoft (https://www.microsoft.com/en-in/)
Parameters
-----------
text:- The query which you want to search about (str)
"""
microsoft=f"https://www.microsoft.com/en-in/search/result.aspx?{text}"
open(microsoft)
def target_search(text):
"""
Search on target (https://www.target.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
target=f'https://www.target.com/s?searchTerm={text}'
open(target)
def homedepot_search(text):
"""
Search on homedepot (https://www.homedepot.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
homedepot=f"https://www.homedepot.com/s/{text}"
open(homedepot)
def nih_search(text):
"""
Search on NIH (https://search.nih.gov)
Parameters
-----------
text:- The query which you want to search about (str)
"""
nih=f"https://search.nih.gov/search?utf8=%E2%9C%93&affiliate=nih&query={text}&commit=Search"
open(nih)
def rottentomatoes_search(text):
"""
Search on Rotten Tomatoes (https://www.rottentomatoes.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
rottentomatoes=f"https://www.rottentomatoes.com/search?search={text}"
open(rottentomatoes)
def quizlet_search(text):
"""
Search on Quizlet (https://quizlet.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
quizlet=f"https://quizlet.com/subject/{text}/"
open(quizlet)
def mapquest_search(text):
"""
Search on Mapquest (https://www.mapquest.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
mapquest=f"https://www.mapquest.com/search/results?query={text}"
open(mapquest)
def britannica_search(text):
"""
Search on Britannica (https://www.britannica.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
britannica=f"https://www.britannica.com/search?query={text}"
open(britannica)
def businessinsider_search(text):
"""
Search on Business Insider (https://www.businessinsider.in)
Parameters
-----------
text:- The query which you want to search about (str)
"""
businessinsider=f"https://www.businessinsider.in/searchresult.cms?query={text}"
open(businessinsider)
def dictionary_search(text):
"""
Search on Dictionary (https://www.dictionary.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
dictionary=f"https://www.dictionary.com/browse/{text}/s=t"
open(dictionary)
def zillow_search(text):
"""
Search on Zillow (https://www.zillow.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
zillow=f"https://www.zillow.com/homes/{text}/"
open(zillow)
def mayoclinic_search(text):
"""
Search on Mayoclinic (https://www.mayoclinic.org)
Parameters
-----------
text:- The query which you want to search about (str)
"""
mayoclinic=f'https://www.mayoclinic.org/search/search-results?q={text}'
open(mayoclinic)
def bestbuy_search(text):
"""
Search on Bestbuy (https://www.bestbuy.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
bestbuy=f"https://www.bestbuy.com/site/searchpage.jsp?st={text}"
open(bestbuy)
def yahoo_search(text):
"""
Search on Yahoo (https://in.search.yahoo.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
yahoo=f"https://in.search.yahoo.com/search?p={text}"
open(yahoo)
def usatoday_search(text):
"""
Search on USA Today (https://www.usatoday.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
usatoday=f"https://www.usatoday.com/search/?q={text}"
open(usatoday)
def medicalnewstoday_search(text):
"""
Search on Medical News Today (https://www.medicalnewstoday.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
medicalnewstoday=f"https://www.medicalnewstoday.com/search?q={text}"
open(medicalnewstoday)
def urban_dictionary_search(text):
"""
Search on Urban Dictionary (https://www.urbandictionary.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
urban_dictionary="https://www.urbandictionary.com/define.php?term={text}"
open(urban_dictionary)
def usatoday_search(text):
"""
Search on USA Today (https://www.usnews.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
usanews=f"https://www.usnews.com/search?q={text}"
open(usanews)
| 0 | 0 | 0 |
87a3c0f164fbcc6a42b29d8f101106b4ea766cfe | 4,696 | py | Python | common/files.py | LordKBX/EbookCollection | 3e6ba33fb012b1dbb371704094b02cece66a7e80 | [
"MIT"
] | 1 | 2021-06-03T01:44:50.000Z | 2021-06-03T01:44:50.000Z | common/files.py | LordKBX/eBookCollection | 3e6ba33fb012b1dbb371704094b02cece66a7e80 | [
"MIT"
] | null | null | null | common/files.py | LordKBX/eBookCollection | 3e6ba33fb012b1dbb371704094b02cece66a7e80 | [
"MIT"
] | null | null | null | import os, sys, shutil
import hashlib
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from common.vars import *
from common.MIME import *
def get_file_size(file_name: str, human_readable: bool = True):
"""
Get file in size in given unit like KB, MB or GB
:param file_name:
:param human_readable:
:return:
"""
size = os.path.getsize(file_name)
if human_readable is False:
return size
elif size > (1024*1024*1024):
return '{:.2f} Gb'.format(size/(1024*1024*1024))
elif size > (1024*1024):
return '{:.2f} Mb'.format(size/(1024*1024))
elif size > 1024:
return '{:.2f} Kb'.format(size/1024)
else:
return '{} bytes'.format(size)
def list_directory(directory_path: str, expected_extension: str = None):
"""
Recursive function for listing files in a folder and his sub folders
:param directory_path: path of the parsed dir
:param expected_extension: list of extension separated by |
:return: list(str)
"""
file_list = list()
for root, directories, files in os.walk(directory_path, topdown=False):
for name in files:
full_path = os.path.join(root, name)
if expected_extension is not None:
if re.search("\\.({})$".format(expected_extension), name) is None:
continue
file_list.append(full_path)
if expected_extension is None:
for name in directories:
file_list.append(os.path.join(root, name))
return file_list
| 31.945578 | 121 | 0.631814 | import os, sys, shutil
import hashlib
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from common.vars import *
from common.MIME import *
def get_file_size(file_name: str, human_readable: bool = True):
"""
Get file in size in given unit like KB, MB or GB
:param file_name:
:param human_readable:
:return:
"""
size = os.path.getsize(file_name)
if human_readable is False:
return size
elif size > (1024*1024*1024):
return '{:.2f} Gb'.format(size/(1024*1024*1024))
elif size > (1024*1024):
return '{:.2f} Mb'.format(size/(1024*1024))
elif size > 1024:
return '{:.2f} Kb'.format(size/1024)
else:
return '{} bytes'.format(size)
def get_file_type(file_path: str, return_extension: bool = False) -> str or (str, str):
file_path = file_path.replace('/', os.sep)
file_tab = file_path.split(os.sep)
ext = file_tab[len(file_tab) - 1]
file_type = ""
end = False
while end is False:
if ext in EXT_TO_TYPE:
file_type = EXT_TO_TYPE[ext][0]
end = True
else:
try:
point_pos = ext.index('.', 1)
ext = ext[point_pos:]
except Exception:
end = True
file_type = "application/octet-stream"
if return_extension is True:
return file_type, ext
else:
return file_type
def list_directory(directory_path: str, expected_extension: str = None):
"""
Recursive function for listing files in a folder and his sub folders
:param directory_path: path of the parsed dir
:param expected_extension: list of extension separated by |
:return: list(str)
"""
file_list = list()
for root, directories, files in os.walk(directory_path, topdown=False):
for name in files:
full_path = os.path.join(root, name)
if expected_extension is not None:
if re.search("\\.({})$".format(expected_extension), name) is None:
continue
file_list.append(full_path)
if expected_extension is None:
for name in directories:
file_list.append(os.path.join(root, name))
return file_list
def listing_of_directory(path: str, level: int = 1, list_base_content: list = [], list_excluded_directory: list = []):
some_dir = path.rstrip(os.path.sep)
list_of_dir = list_base_content
if os.path.isdir(some_dir):
num_sep = some_dir.count(os.path.sep)
for root, dirs, files in os.walk(some_dir):
num_sep_this = root.count(os.path.sep)
for name in dirs:
if name in list_excluded_directory: continue
if num_sep + level > num_sep_this:
list_of_dir.append(name)
return list_of_dir
def list_directory_tree(base_directory: str, ext: str = None):
list_of_file = list_directory(base_directory, ext)
list_of_file.sort()
# print(listOfFile)
tree_files = dict()
for file in list_of_file:
__list_directory_tree_recursive(file, base_directory + os.sep, tree_files)
return tree_files
def __list_directory_tree_recursive(file: str, path: str, parent_file_tree: dict):
sub_path = file.replace(path, '')
sub_path_tab = sub_path.split(os.sep)
if isinstance(sub_path_tab, list):
if len(sub_path_tab) > 1:
if sub_path_tab[0] not in parent_file_tree:
parent_file_tree[sub_path_tab[0]] = dict()
if isinstance(parent_file_tree[sub_path_tab[0]], dict):
__list_directory_tree_recursive(file, path + sub_path_tab[0] + os.sep, parent_file_tree[sub_path_tab[0]])
else:
if os.path.isfile(file): parent_file_tree[sub_path] = file
else: parent_file_tree[sub_path] = dict()
def clean_dir(src_dir: str):
for dirpath, _, _ in os.walk(src_dir, topdown=False): # Listing the files
if dirpath == src_dir: break
try: os.rmdir(dirpath)
except Exception: ''
def rmDir(src_dir: str):
shutil.rmtree(src_dir, ignore_errors=True)
def copyDir(src_dir: str, dest_dir: str):
shutil.copytree(src_dir, dest_dir, dirs_exist_ok=True)
def copyFile(src_dir: str, dest_dir: str):
shutil.copyfile(src_dir, dest_dir)
def rename(src_dir: str, dest_dir: str):
shutil.move(src_dir, dest_dir)
def hashFile(path: str):
BLOCKSIZE = 65536
hasher = hashlib.sha1()
with open(path, 'rb') as file:
buf = file.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = file.read(BLOCKSIZE)
return hasher.hexdigest()
| 2,876 | 0 | 230 |
f82d31f5b5597ffa1c312d086ef484f89b54b43b | 4,017 | py | Python | uformer/test.py | lulsheng/-AI-8- | 562365379c3e0191436537958a2519e4be905d4b | [
"Apache-2.0"
] | null | null | null | uformer/test.py | lulsheng/-AI-8- | 562365379c3e0191436537958a2519e4be905d4b | [
"Apache-2.0"
] | null | null | null | uformer/test.py | lulsheng/-AI-8- | 562365379c3e0191436537958a2519e4be905d4b | [
"Apache-2.0"
] | null | null | null | import numpy as np
import os, sys
import argparse
from tqdm import tqdm
import paddle.nn as nn
import paddle
from x2paddle.torch2paddle import DataLoader
import paddle.nn.functional as F
sys.path.append('/home/aistudio')
import scipy.io as sio
from utils.loader import get_validation_data, get_testA_data
import utils
from model import UNet
from model import Uformer
from model import Uformer_Cross
from model import Uformer_CatCross
use_gpu = True
paddle.set_device('gpu:0') if use_gpu else paddle.get_device('cpu')
# from skimage import img_as_float32
# from skimage import img_as_ubyte
# from skimage.metrics import peak_signal_noise_ratio as psnr_loss
# from skimage.metrics import structural_similarity as ssim_loss
parser = argparse.ArgumentParser(description=\
'RGB denoising evaluation on the validation set of SIDD')
parser.add_argument('--input_dir', default=\
'/home/aistudio/demoire', type=str, help=\
'Directory of validation images')
parser.add_argument('--result_dir', default='uformer/result_B',
type=str, help='Directory for results')
parser.add_argument('--weights', default=
'/home/aistudio/uformer/log/Uformer_/model_B/model_best.pdiparams', type=str, help=\
'Path to weights')
parser.add_argument('--gpus', default='0', type=str, help=\
'CUDA_VISIBLE_DEVICES')
parser.add_argument('--arch', default='Uformer', type=str, help='arch')
parser.add_argument('--batch_size', default=1, type=int, help=\
'Batch size for dataloader')
parser.add_argument('--save_images', action='store_true', help=\
'Save denoised images in result directory', default=True)
parser.add_argument('--embed_dim', type=int, default=32, help=\
'number of data loading workers')
parser.add_argument('--win_size', type=int, default=8, help=\
'number of data loading workers')
parser.add_argument('--token_projection', type=str, default='linear', help=\
'linear/conv token projection')
parser.add_argument('--token_mlp', type=str, default='leff', help=\
'ffn/leff token mlp')
parser.add_argument('--vit_dim', type=int, default=256, help='vit hidden_dim')
parser.add_argument('--vit_depth', type=int, default=12, help='vit depth')
parser.add_argument('--vit_nheads', type=int, default=8, help='vit hidden_dim')
parser.add_argument('--vit_mlp_dim', type=int, default=512, help='vit mlp_dim')
parser.add_argument('--vit_patch_size', type=int, default=16, help=\
'vit patch_size')
parser.add_argument('--global_skip', action='store_true', default=False,
help='global skip connection')
parser.add_argument('--local_skip', action='store_true', default=False,
help='local skip connection')
parser.add_argument('--vit_share', action='store_true', default=False, help
='share vit module')
parser.add_argument('--train_ps', type=int, default=256, help=\
'patch size of training sample')
args = parser.parse_args()
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
utils.mkdir(args.result_dir)
testA_dataset = get_testA_data(args.input_dir)
testA_loader = DataLoader(dataset=testA_dataset, batch_size=1, shuffle=False, num_workers=0, drop_last=False)
model_restoration= utils.get_arch(args)
# model_restoration = torch.nn.DataParallel(model_restoration)
utils.load_checkpoint(model_restoration,args.weights)
print("===>Testing using weights: ", args.weights)
model_restoration.cuda()
model_restoration.eval()
with paddle.no_grad():
psnr_val_rgb = []
ssim_val_rgb = []
for ii, data_test in enumerate(tqdm(testA_loader), 0):
rgb_noisy = data_test[0]
filenames = data_test[1]
# print(filenames)
h, w = rgb_noisy.shape[2], rgb_noisy.shape[3]
rgb_restored = model_restoration(rgb_noisy)
# print(rgb_restored)
rgb_restored = rgb_restored * 255
rgb_restored = paddle.clip(rgb_restored,0,255).cpu().numpy().squeeze().transpose((1,2,0))
if args.save_images:
utils.save_img(os.path.join(args.result_dir,filenames[0]), rgb_restored)
| 42.284211 | 109 | 0.744088 | import numpy as np
import os, sys
import argparse
from tqdm import tqdm
import paddle.nn as nn
import paddle
from x2paddle.torch2paddle import DataLoader
import paddle.nn.functional as F
sys.path.append('/home/aistudio')
import scipy.io as sio
from utils.loader import get_validation_data, get_testA_data
import utils
from model import UNet
from model import Uformer
from model import Uformer_Cross
from model import Uformer_CatCross
use_gpu = True
paddle.set_device('gpu:0') if use_gpu else paddle.get_device('cpu')
# from skimage import img_as_float32
# from skimage import img_as_ubyte
# from skimage.metrics import peak_signal_noise_ratio as psnr_loss
# from skimage.metrics import structural_similarity as ssim_loss
parser = argparse.ArgumentParser(description=\
'RGB denoising evaluation on the validation set of SIDD')
parser.add_argument('--input_dir', default=\
'/home/aistudio/demoire', type=str, help=\
'Directory of validation images')
parser.add_argument('--result_dir', default='uformer/result_B',
type=str, help='Directory for results')
parser.add_argument('--weights', default=
'/home/aistudio/uformer/log/Uformer_/model_B/model_best.pdiparams', type=str, help=\
'Path to weights')
parser.add_argument('--gpus', default='0', type=str, help=\
'CUDA_VISIBLE_DEVICES')
parser.add_argument('--arch', default='Uformer', type=str, help='arch')
parser.add_argument('--batch_size', default=1, type=int, help=\
'Batch size for dataloader')
parser.add_argument('--save_images', action='store_true', help=\
'Save denoised images in result directory', default=True)
parser.add_argument('--embed_dim', type=int, default=32, help=\
'number of data loading workers')
parser.add_argument('--win_size', type=int, default=8, help=\
'number of data loading workers')
parser.add_argument('--token_projection', type=str, default='linear', help=\
'linear/conv token projection')
parser.add_argument('--token_mlp', type=str, default='leff', help=\
'ffn/leff token mlp')
parser.add_argument('--vit_dim', type=int, default=256, help='vit hidden_dim')
parser.add_argument('--vit_depth', type=int, default=12, help='vit depth')
parser.add_argument('--vit_nheads', type=int, default=8, help='vit hidden_dim')
parser.add_argument('--vit_mlp_dim', type=int, default=512, help='vit mlp_dim')
parser.add_argument('--vit_patch_size', type=int, default=16, help=\
'vit patch_size')
parser.add_argument('--global_skip', action='store_true', default=False,
help='global skip connection')
parser.add_argument('--local_skip', action='store_true', default=False,
help='local skip connection')
parser.add_argument('--vit_share', action='store_true', default=False, help
='share vit module')
parser.add_argument('--train_ps', type=int, default=256, help=\
'patch size of training sample')
args = parser.parse_args()
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
utils.mkdir(args.result_dir)
testA_dataset = get_testA_data(args.input_dir)
testA_loader = DataLoader(dataset=testA_dataset, batch_size=1, shuffle=False, num_workers=0, drop_last=False)
model_restoration= utils.get_arch(args)
# model_restoration = torch.nn.DataParallel(model_restoration)
utils.load_checkpoint(model_restoration,args.weights)
print("===>Testing using weights: ", args.weights)
model_restoration.cuda()
model_restoration.eval()
with paddle.no_grad():
psnr_val_rgb = []
ssim_val_rgb = []
for ii, data_test in enumerate(tqdm(testA_loader), 0):
rgb_noisy = data_test[0]
filenames = data_test[1]
# print(filenames)
h, w = rgb_noisy.shape[2], rgb_noisy.shape[3]
rgb_restored = model_restoration(rgb_noisy)
# print(rgb_restored)
rgb_restored = rgb_restored * 255
rgb_restored = paddle.clip(rgb_restored,0,255).cpu().numpy().squeeze().transpose((1,2,0))
if args.save_images:
utils.save_img(os.path.join(args.result_dir,filenames[0]), rgb_restored)
| 0 | 0 | 0 |
80583d6ca6373271e97855d720a33839feef7440 | 20,630 | py | Python | server.py | Leslie-Fang/Seq2Seq-Vis | 71be2e246a7bc4c7a27912d3a735cbcf13b908ef | [
"Apache-2.0"
] | 436 | 2018-04-26T00:57:57.000Z | 2022-03-28T07:26:40.000Z | server.py | Leslie-Fang/Seq2Seq-Vis | 71be2e246a7bc4c7a27912d3a735cbcf13b908ef | [
"Apache-2.0"
] | 24 | 2018-04-28T08:09:04.000Z | 2022-02-26T03:35:37.000Z | server.py | Leslie-Fang/Seq2Seq-Vis | 71be2e246a7bc4c7a27912d3a735cbcf13b908ef | [
"Apache-2.0"
] | 72 | 2018-04-27T00:25:22.000Z | 2021-09-30T14:16:43.000Z | #!/usr/bin/env python3
import argparse
import os
import time
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
import connexion
import logging
# import umap
from flask import send_from_directory, redirect, json
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import MDS, TSNE
from copy import deepcopy
from s2s.lru import LRU
from s2s.project import S2SProject
from index.annoyVectorIndex import AnnoyVectorIndex
__author__ = 'Hendrik Strobelt, Sebastian Gehrmann, Alexander M. Rush'
CONFIG_FILE_NAME = 's2s.yaml'
projects = {}
cache_translate = LRU(50)
# cache_neighbors = LRU(20)
cache_compare = LRU(50)
pre_cached = []
logging.basicConfig(level=logging.INFO)
app = connexion.App(__name__)
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--debug", action='store_true', help=' Debug mode')
parser.add_argument("--port", default="8080", help="Port to run the app. ")
# parser.add_argument("--nocache", default=False)
parser.add_argument("--preload", action='store_true', help="Preload indices.")
parser.add_argument("--cache", type=str, default='',
help="Preload cache from dir")
parser.add_argument("--dir", type=str,
default=os.path.abspath('data'),
help='Path to project')
# parser.add_argument('-api', type=str, default='pytorch',
# choices=['pytorch', 'lua'],
# help="""The API to use.""")
args = parser.parse_args()
print(args)
# global model
# if args.api == "pytorch":
# # model = ONMTmodelAPI("model_api/data/ende_acc_15.72_ppl_912.74_e9.pt")
# model = ONMTmodelAPI("model_api/data/ende_acc_46.86_ppl_21.19_e12.pt")
# else:
# model = ONMTLuaModelAPI()
# just a simple flask route
@app.route('/')
# send everything from client as static content
@app.route('/client/<path:path>')
def send_static_client(path):
""" serves all files from ./client/ to ``/client/<path:path>``
:param path: path from api call
"""
return send_from_directory('client_dist/', path)
# noinspection SpellCheckingInspection
# ------ API routing as defined in swagger.yaml (connexion)
# def compare_translation(**request):
# pivot = request["in"]
# compare = request["compare"]
# neighbors = request.get('neighbors', [])
#
# current_project = list(projects.values())[0]
# model = current_project.model
#
# # trans_all = model.translate(in_text=[pivot]+compare)
#
# pivot_res = translate(current_project, [pivot])[0]
# pivot_attn = extract_attn(pivot_res)
# pivot_attn_l = pivot_attn.shape[0]
#
# # compare.append(pivot)
# compare_t = translate(current_project, compare)
#
# res = []
# index_orig = 0
# for cc_t_key in compare_t:
# # cc_t = model.translate(in_text=[cc])[0]
# cc_t = compare_t[cc_t_key]
# cc_attn = extract_attn(cc_t)
# dist = 10
# if cc_attn.shape[0] > 0:
# max_0 = max(cc_attn.shape[0], pivot_attn.shape[0])
# max_1 = max(cc_attn.shape[1], pivot_attn.shape[1])
#
# cc__a = np.zeros(shape=(max_0, max_1))
# cc__a[:cc_attn.shape[0], :cc_attn.shape[1]] = cc_attn
#
# cc__b = np.zeros(shape=(max_0, max_1))
# cc__b[:pivot_attn.shape[0], :pivot_attn.shape[1]] = pivot_attn
#
# dist = np.linalg.norm(cc__a - cc__b)
#
# res.append({
# "sentence": extract_sentence(cc_t),
# "attn": extract_attn(cc_t).tolist(),
# "attn_padding": (cc__a - cc__b).tolist(),
# "orig": compare[index_orig],
# "dist": dist
# })
# index_orig += 1
#
# return {"compare": res, "pivot": extract_sentence(pivot_res)}
P_METHODS = {
"pca": PCA(n_components=2, ),
"mds": MDS(),
"tsne": TSNE(init='pca'),
# 'umap': umap.UMAP(metric='cosine'),
"none": lambda x: x
}
def find_and_load_project(directory):
"""
searches for CONFIG_FILE_NAME in all subdirectories of directory
and creates data handlers for all of them
:param directory: scan directory
:return: null
"""
project_dirs = []
for root, dirs, files in os.walk(directory):
if CONFIG_FILE_NAME in files:
project_dirs.append(os.path.abspath(root))
i = 0
for p_dir in project_dirs:
dh_id = os.path.split(p_dir)[1]
cf = os.path.join(p_dir, CONFIG_FILE_NAME)
p = S2SProject(directory=p_dir, config_file=cf)
if args.preload:
p.preload_indices(['encoder', 'decoder'])
projects[dh_id] = p
i += 1
app.add_api('swagger.yaml')
if __name__ == '__main__':
args = parser.parse_args()
app.run(port=int(args.port), debug=args.debug, host="0.0.0.0")
else:
args, _ = parser.parse_known_args()
find_and_load_project(args.dir)
preload_cache(args.cache)
| 33.49026 | 80 | 0.55269 | #!/usr/bin/env python3
import argparse
import os
import time
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
import connexion
import logging
# import umap
from flask import send_from_directory, redirect, json
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import MDS, TSNE
from copy import deepcopy
from s2s.lru import LRU
from s2s.project import S2SProject
from index.annoyVectorIndex import AnnoyVectorIndex
__author__ = 'Hendrik Strobelt, Sebastian Gehrmann, Alexander M. Rush'
CONFIG_FILE_NAME = 's2s.yaml'
projects = {}
cache_translate = LRU(50)
# cache_neighbors = LRU(20)
cache_compare = LRU(50)
pre_cached = []
logging.basicConfig(level=logging.INFO)
app = connexion.App(__name__)
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--debug", action='store_true', help=' Debug mode')
parser.add_argument("--port", default="8080", help="Port to run the app. ")
# parser.add_argument("--nocache", default=False)
parser.add_argument("--preload", action='store_true', help="Preload indices.")
parser.add_argument("--cache", type=str, default='',
help="Preload cache from dir")
parser.add_argument("--dir", type=str,
default=os.path.abspath('data'),
help='Path to project')
# parser.add_argument('-api', type=str, default='pytorch',
# choices=['pytorch', 'lua'],
# help="""The API to use.""")
args = parser.parse_args()
print(args)
# global model
# if args.api == "pytorch":
# # model = ONMTmodelAPI("model_api/data/ende_acc_15.72_ppl_912.74_e9.pt")
# model = ONMTmodelAPI("model_api/data/ende_acc_46.86_ppl_21.19_e12.pt")
# else:
# model = ONMTLuaModelAPI()
# just a simple flask route
@app.route('/')
def hello_world():
return redirect('client/index.html')
# send everything from client as static content
@app.route('/client/<path:path>')
def send_static_client(path):
""" serves all files from ./client/ to ``/client/<path:path>``
:param path: path from api call
"""
return send_from_directory('client_dist/', path)
def closest_vector_n(index, v, r=5):
res = index.get_closest_x(v, k=100,
ignore_same_tgt=False,
include_distances=True,
use_vectors=True)
if r > 1:
res = [[(xx[0], round(xx[1])) for xx in yy] for yy in res]
return res
def project_states(vectors, p_method='pca', anchors=None):
# if p_method == 'umap':
# pm = umap.UMAP(n_neighbors=min(len(vectors), 10))
# else:
# pm = P_METHODS[p_method]
pm = P_METHODS[p_method]
anchors = None # TODO: remove fix
if anchors:
pm.fit(anchors)
return pm.transform(vectors)
else:
return pm.fit_transform(vectors)
# noinspection SpellCheckingInspection
def projection_hnlp(model, states, lengths):
v = np.array(states)
x_pos = model.predict(v)
# expected progression
y_pos_a = np.concatenate([(np.arange(1, l + 1, 1) / l) for l in lengths])
# For removing the coefficients
w = model.coef_
w = np.expand_dims(w, 1)
v_prime = v - np.dot(np.dot(v, w), w.T)
y_pos_b = (TSNE(n_components=1, init='pca').fit_transform(v_prime)) \
.flatten()
y_pos_c = (PCA(n_components=1).fit_transform(v_prime)) \
.flatten()
return x_pos.tolist(), y_pos_a.tolist(), y_pos_b.tolist(), y_pos_c.tolist()
def create_proj_list(xs, ys, traces):
res = []
for ii in range(len(xs)):
new_state = traces[ii].copy()
new_state['pos'] = [xs[ii], ys[ii]]
res.append(new_state)
return res
def all_neighbors(project, translations, neighbors, p_method='tsne'):
# pca = umap.UMAP()#TSNE(n_components=2)
nr_nn_for_projection = 20
res = {}
for neighborhood in neighbors:
n_cand = [[]]
states = []
nb_summary = {}
start_t = time.time()
for t_id, translation in translations.items():
index = project.get_index(neighborhood)
print('index-work starts..')
if index:
if neighborhood == 'encoder':
all_enc_states = list(
map(lambda x: x['state'], translation['encoder']))
states.append(all_enc_states)
closest_v = closest_vector_n(index, all_enc_states)
for e_id, enc in enumerate(translation['encoder']):
n_cand_local = closest_v[e_id]
enc['neighbors'] = n_cand_local
n_cand[0].append(
{'i': e_id, 't': t_id, 'type': 'enc',
'n': n_cand_local[:nr_nn_for_projection]})
if neighborhood == 'decoder':
all_states = list(map(lambda x: x['state'],
translation['decoder'][0]))
states.append(all_states)
closest_v = closest_vector_n(index, all_states)
bId = 0
# for beam in [translation['decoder'][0]]:
beam = translation['decoder'][0]
for d_id, dec in enumerate(beam):
n_cand_local = closest_v[d_id]
dec['neighbors'] = n_cand_local
if bId == 0:
n_cand[0].append(
{'i': d_id, 't': t_id, 'type': 'dec',
'n': n_cand_local[:nr_nn_for_projection]})
bId += 1
if neighborhood == 'context':
all_states = list(map(lambda x: x['context'],
translation['decoder'][0]))
states.append(all_states)
closest_v = closest_vector_n(index, all_states)
bId = 0
# for beam in translation['decoder']:
beam = translation['decoder'][0]
for d_id, dec in enumerate(beam):
n_cand_local = closest_v[d_id]
dec['neighbor_context'] = n_cand_local
if bId == 0:
n_cand[0].append(
{'i': d_id, 't': t_id, 'type': 'ctx',
'n': n_cand_local[:nr_nn_for_projection]})
bId += 1
for all_cand in n_cand[0]: # for now only first entry
# print(neighborhood, len(nb_summary), all_cand)
for n_cand_x in all_cand['n']:
cand_id = n_cand_x[0]
if cand_id in nb_summary:
nb_summary[cand_id]['occ'].append(
[n_cand_x[0], n_cand_x[1], all_cand['t'],
all_cand['i']])
else:
nb_summary[cand_id] = {
'id': cand_id,
'v': index.get_vector(cand_id),
'occ': [[n_cand_x[0], n_cand_x[1], all_cand['t'],
all_cand['i']]],
'pivot': None
}
nb_summary_list = list(nb_summary.values())
sentence_states = []
sentence_lengths = []
sentence_traces = []
# add the actual states as items to the space:
for t_id, t_states in enumerate(states):
sentence_lengths.append(len(t_states))
for s_id, state in enumerate(t_states):
sentence_traces.append({
'id': -10000 * (t_id + 1) + s_id,
'v': state,
'occ': [],
'pivot': {'trans_ID': t_id, 'word_ID': s_id}
})
# nb_summary_list.append({
# 'id': -10000 * (t_id + 1) + s_id,
# 'v': state,
# 'occ': [],
# 'pivot': {'trans_ID': t_id, 'word_ID': s_id}
# })
sentence_states.append(state)
nb_summary_list = nb_summary_list + sentence_traces
#
print('index-time:', str(time.time() - start_t))
start_t = time.time()
positions = project_states([x['v'] for x in nb_summary_list],
p_method, anchors=sentence_states)
for i in range(len(positions)):
nb_summary_list[i]['pos'] = positions[i].tolist()
# nb_summary_list[i]['v']
if project.project_model:
x_pos, y_pos_a, y_pos_b, y_pos_c = projection_hnlp(
project.project_model,
sentence_states,
sentence_lengths)
res[neighborhood + '_a'] = create_proj_list(x_pos, y_pos_a,
sentence_traces)
res[neighborhood + '_b'] = create_proj_list(x_pos, y_pos_b,
sentence_traces)
res[neighborhood + '_c'] = create_proj_list(x_pos, y_pos_c,
sentence_traces)
print('proj-time:', str(time.time() - start_t))
#
res[neighborhood] = nb_summary_list
# if 'encoder' in res and 'context' in res:
# enc_dec_states = list(map(lambda x: deepcopy(x),
# filter(lambda xx: xx['pivot'] is not None,
# res['encoder'])))
# all_decoder_list = list(map(lambda x: deepcopy(x),
# filter(lambda xx: xx['pivot'] is not None,
# res['context'])))
#
# for dec in all_decoder_list:
# dec['pivot']['trans_ID'] = 1
# enc_dec_states.append(dec)
#
# ed_pos = project_states([x['v'] for x in enc_dec_states], 'mds')
# for i in range(len(ed_pos)):
# enc_dec_states[i]['pos'] = ed_pos[i].tolist()
#
# res['enc_ctx'] = enc_dec_states
for _, nb in res.items():
for nbb in nb:
del nbb['v']
# nbb['v'] = list(map(lambda x: float(x), list(nbb['v'])))
return res
def translate(project, in_sentences, partial=[], attn_overwrite=[]):
model = project.model
translations = {}
for transID, in_sentence in enumerate(in_sentences):
par = partial[transID] if (transID < len(partial)) else []
par = [par] if len(par) else []
print(transID, in_sentence, par)
translations[transID] = model.translate(in_text=[in_sentence],
partial_decode=par,
attn_overwrite=attn_overwrite)[
0]
tgt_dict = project.dicts['i2t']['tgt']
for _, trans in translations.items():
for tk in trans['beam']:
for lbeam in tk:
lbeam['word'] = tgt_dict.get(lbeam['pred'], '??')
trans['beam_trace_words'] = []
for b_level in trans['beam_trace']:
level_collect = []
for b_trace in b_level:
trace_collect = []
for w_id in b_trace:
trace_collect.append(tgt_dict.get(w_id, '??'))
level_collect.append(trace_collect)
trans['beam_trace_words'].append(level_collect)
return translations
# ------ API routing as defined in swagger.yaml (connexion)
def get_translation(**request):
current_project = list(projects.values())[0] # type: S2SProject
in_sentence = request['in']
neighbors = request.get('neighbors', [''])
partials = request.get('partial', [''])
force_attn = request.get('force_attn', [''])
# Make empty lists empty:
partials = [] if partials == [''] else partials
neighbors = [] if neighbors == [''] else neighbors
force_attn = [] if force_attn == [''] else force_attn
attn_overwrite = []
if force_attn:
att = {}
is_key = True
key = None
for v in force_attn:
if is_key:
key = v
else:
att[key] = v
is_key = not is_key
attn_overwrite.append(att)
translation_id = in_sentence + str(partials) + str(force_attn)
translations = cache_translate.get(translation_id)
if not translations:
translations = translate(current_project, [in_sentence],
partial=partials,
attn_overwrite=attn_overwrite)
cache_translate.add(translation_id, translations)
res = translations[0]
if len(neighbors) > 0:
# neighbor_id = in_sentence + str(partials) + str(force_attn) + str(
# neighbors)
# all_n = cache_neighbors.get(neighbor_id)
if 'allNeighbors' not in res:
res['allNeighbors'] = all_neighbors(current_project, translations,
neighbors)
# cache_neighbors.add(neighbor_id, all_n)
# res['allNeighbors'] = all_n
res['request'] = request
return res
def get_translation_compare(**request):
current_project = list(projects.values())[0]
in_sentence = request['in']
compare_sentence = request['compare']
neighbors = request.get('neighbors', [])
neighbors = [] if neighbors == [''] else neighbors
key = in_sentence + ' VS ' + compare_sentence + str(neighbors)
res = cache_compare.get(key)
if res:
return res
translations = translate(current_project, [in_sentence, compare_sentence])
res = {'in': translations[0], 'compare': translations[1]}
if len(neighbors) > 0:
all_n = all_neighbors(current_project, translations, neighbors)
res['neighbors'] = all_n
cache_compare.add(key, res)
return res
def extract_sentence(x):
return ' '.join(
map(lambda y: y['token'], x['decoder'][0]))
def extract_attn(x):
return np.array(x['attn'][0])
# def compare_translation(**request):
# pivot = request["in"]
# compare = request["compare"]
# neighbors = request.get('neighbors', [])
#
# current_project = list(projects.values())[0]
# model = current_project.model
#
# # trans_all = model.translate(in_text=[pivot]+compare)
#
# pivot_res = translate(current_project, [pivot])[0]
# pivot_attn = extract_attn(pivot_res)
# pivot_attn_l = pivot_attn.shape[0]
#
# # compare.append(pivot)
# compare_t = translate(current_project, compare)
#
# res = []
# index_orig = 0
# for cc_t_key in compare_t:
# # cc_t = model.translate(in_text=[cc])[0]
# cc_t = compare_t[cc_t_key]
# cc_attn = extract_attn(cc_t)
# dist = 10
# if cc_attn.shape[0] > 0:
# max_0 = max(cc_attn.shape[0], pivot_attn.shape[0])
# max_1 = max(cc_attn.shape[1], pivot_attn.shape[1])
#
# cc__a = np.zeros(shape=(max_0, max_1))
# cc__a[:cc_attn.shape[0], :cc_attn.shape[1]] = cc_attn
#
# cc__b = np.zeros(shape=(max_0, max_1))
# cc__b[:pivot_attn.shape[0], :pivot_attn.shape[1]] = pivot_attn
#
# dist = np.linalg.norm(cc__a - cc__b)
#
# res.append({
# "sentence": extract_sentence(cc_t),
# "attn": extract_attn(cc_t).tolist(),
# "attn_padding": (cc__a - cc__b).tolist(),
# "orig": compare[index_orig],
# "dist": dist
# })
# index_orig += 1
#
# return {"compare": res, "pivot": extract_sentence(pivot_res)}
P_METHODS = {
"pca": PCA(n_components=2, ),
"mds": MDS(),
"tsne": TSNE(init='pca'),
# 'umap': umap.UMAP(metric='cosine'),
"none": lambda x: x
}
def get_close_words(**request):
current_project = list(projects.values())[0] # type: S2SProject
loc = request['loc'] # "src" or "tgt"
limit = request['limit']
p_method = request["p_method"]
t2i = current_project.dicts['t2i'][loc]
i2t = current_project.dicts['i2t'][loc]
if loc == 'src':
embeddings = current_project.embeddings[
'encoder'] # TODO: change !!
else:
embeddings = current_project.embeddings['decoder']
word = request['in']
my_vec = embeddings[t2i[word]]
matrix = embeddings[:]
matrix_norms = current_project.cached_norm(loc, matrix)
dotted = matrix.dot(my_vec)
vector_norm = np.sqrt(np.sum(my_vec * my_vec))
matrix_vector_norms = np.multiply(matrix_norms, vector_norm)
neighbors = np.divide(dotted, matrix_vector_norms)
neighbour_ids = np.argsort(neighbors)[-limit:].tolist()
names = [i2t[x] for x in neighbour_ids]
# projection methods: MDS, PCA, tSNE -- all with standard params
positions = []
if p_method != "none":
positions = P_METHODS[p_method].fit_transform(
matrix[neighbour_ids, :])
return {'word': names,
# 'word_vector': matrix[neighbour_ids, :].tolist(),
'score': neighbors[neighbour_ids].tolist(),
'pos': positions.tolist()
}
def get_neighbor_details(**request):
current_project = list(projects.values())[0]
indices = request['indices']
index = current_project.get_index(
request["vector_name"]) # type: AnnoyVectorIndex
return index.get_details(indices)
def get_info(**request):
if 'project_id' not in request:
current_project = list(projects.values())[0] # type: S2SProject
res = current_project.info()
res['pre_cached'] = pre_cached
return res
return request
def get_close_vectors(**request):
current_project = list(projects.values())[0] # type: S2SProject
# os.path.join(current_project.directory, request["vector_name"] + ".ann")
index = current_project.get_index(
request["vector_name"]) # type: AnnoyVectorIndex
closest = index.get_closest_x(request["indices"],
include_distances=True)
# print(request["vector_name"], request['index'])
return closest
def train_data_for_index(**request):
ids = request["indices"]
loc = request["loc"]
current_project = list(projects.values())[0] # type: S2SProject
res = current_project.get_train_for_index(ids, loc)
return {'loc': loc, 'ids': ids, 'res': res}
def find_and_load_project(directory):
"""
searches for CONFIG_FILE_NAME in all subdirectories of directory
and creates data handlers for all of them
:param directory: scan directory
:return: null
"""
project_dirs = []
for root, dirs, files in os.walk(directory):
if CONFIG_FILE_NAME in files:
project_dirs.append(os.path.abspath(root))
i = 0
for p_dir in project_dirs:
dh_id = os.path.split(p_dir)[1]
cf = os.path.join(p_dir, CONFIG_FILE_NAME)
p = S2SProject(directory=p_dir, config_file=cf)
if args.preload:
p.preload_indices(['encoder', 'decoder'])
projects[dh_id] = p
i += 1
app.add_api('swagger.yaml')
def preload_cache(cache):
if len(cache) > 0 and os.path.exists(cache):
all_files = [os.path.join(cache, f) for f in os.listdir(cache) if
os.path.isfile(os.path.join(cache, f))]
for file in all_files:
if file.endswith('.json'):
with open(file, 'r') as f:
a = json.load(f)
print(a['request'])
request = a['request']
neighbors = request.get('neighbors', [''])
partials = request.get('partial', [''])
force_attn = request.get('force_attn', [''])
# Make empty lists empty:
partials = [] if partials == [''] else partials
neighbors = [] if neighbors == [''] else neighbors
force_attn = [] if force_attn == [''] else force_attn
translation_id = request['in'] + str(partials) + str(
force_attn)
cache_translate.preload(translation_id, [a])
pre_cached.append(request)
if __name__ == '__main__':
args = parser.parse_args()
app.run(port=int(args.port), debug=args.debug, host="0.0.0.0")
else:
args, _ = parser.parse_known_args()
find_and_load_project(args.dir)
preload_cache(args.cache)
| 15,299 | 0 | 388 |
652cec0c60fd17c92973aec0fbbe1a9e68631d80 | 13,761 | py | Python | build_and_train.py | akshaysharma21/Genre_Classification | 4a5a8f875ba77241a1241b995b304ed895d576a5 | [
"CC0-1.0"
] | null | null | null | build_and_train.py | akshaysharma21/Genre_Classification | 4a5a8f875ba77241a1241b995b304ed895d576a5 | [
"CC0-1.0"
] | null | null | null | build_and_train.py | akshaysharma21/Genre_Classification | 4a5a8f875ba77241a1241b995b304ed895d576a5 | [
"CC0-1.0"
] | null | null | null | #imports
import pandas as pd
import os
import ast
import sklearn as skl
import sklearn.utils, sklearn.preprocessing, sklearn.decomposition, sklearn.svm
import matplotlib.pyplot as plt
import numpy as np
import pylab
import librosa
import ffmpeg
import audioread
import sklearn
import librosa.display
import datetime
import time
import keras
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Bidirectional, LSTM, Activation, GRU, Conv2D, concatenate, MaxPooling2D, Flatten, Embedding, Lambda, Reshape
from keras.optimizers import Adam, RMSprop
from keras import backend as K
#plot_file(path)
#function to plot spectrograms
#Load and trim datasets to shread out useless info
filePath = 'D:\\fma_metadata\\tracks.csv'
df_tracks = pd.read_csv(filePath, index_col=0, header=[0, 1])
print(list(df_tracks))
filter = [('set', 'split'), ('set', 'subset') , ('track', 'genre_top')]
df_sel = df_tracks[filter]
df_sel = df_sel[df_sel[filter[1]]=='small']
df_sel['track_id'] = df_sel.index
df_test = df_sel[df_sel[filter[0]]=='test']
df_valid = df_sel[df_sel[filter[0]]=='validation']
df_train = df_sel[df_sel[filter[0]]=='training']
print(df_sel.tail())
print(df_test.shape)
print(df_test.head())
print( df_train.shape)
print(df_train.head())
print(df_valid.shape)
print(df_valid.head())
print(df_sel[filter[2]].value_counts())
#Build and train the model
#creates training, testing and validation datasets.
#concatenates fragmented datasets.
# concatenate_datasets() #concatinate fragmented datasets
build_and_train_model()
# create_separate_datasets() #create training, testing and validation datasets
| 35.55814 | 147 | 0.594506 | #imports
import pandas as pd
import os
import ast
import sklearn as skl
import sklearn.utils, sklearn.preprocessing, sklearn.decomposition, sklearn.svm
import matplotlib.pyplot as plt
import numpy as np
import pylab
import librosa
import ffmpeg
import audioread
import sklearn
import librosa.display
import datetime
import time
import keras
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Bidirectional, LSTM, Activation, GRU, Conv2D, concatenate, MaxPooling2D, Flatten, Embedding, Lambda, Reshape
from keras.optimizers import Adam, RMSprop
from keras import backend as K
#plot_file(path)
#function to plot spectrograms
def plot_file(path):
currSpect=path
plt.figure(figsize=(10, 4))
librosa.display.specshow(currSpect.T,x_axis = 'time', y_axis='mel', fmax=8000)
plt.colorbar(format='%+2.0f dB')
plt.show()
print(currSpect.shape)
#Load and trim datasets to shread out useless info
filePath = 'D:\\fma_metadata\\tracks.csv'
df_tracks = pd.read_csv(filePath, index_col=0, header=[0, 1])
print(list(df_tracks))
filter = [('set', 'split'), ('set', 'subset') , ('track', 'genre_top')]
df_sel = df_tracks[filter]
df_sel = df_sel[df_sel[filter[1]]=='small']
df_sel['track_id'] = df_sel.index
df_test = df_sel[df_sel[filter[0]]=='test']
df_valid = df_sel[df_sel[filter[0]]=='validation']
df_train = df_sel[df_sel[filter[0]]=='training']
print(df_sel.tail())
print(df_test.shape)
print(df_test.head())
print( df_train.shape)
print(df_train.head())
print(df_valid.shape)
print(df_valid.head())
print(df_sel[filter[2]].value_counts())
#Build and train the model
def build_and_train_model():
xTrain = np.load("D:\\spectAr2\\final_train.npy")
yTrain = np.load("D:\\spectAr2\\genres_train.npy")
print(yTrain.shape)
xValid = np.load("D:\\spectAr2\\final_valid.npy")
yValid = np.load("D:\\spectAr2\\genres_valid.npy")
xTest = np.load("D:\\spectAr2\\final_test.npy")
yTest = np.load("D:\\spectAr2\\genres_test.npy")
yTrain=keras.utils.np_utils.to_categorical(yTrain)
yValid=keras.utils.np_utils.to_categorical(yValid)
yTest=keras.utils.np_utils.to_categorical(yTest)
#-----------------------------------------------------------------------------------
# base sequential model
#-----------------------------------------------------------------------------------
# print("start.")
# model = Sequential()
# model.add(Conv2D(filters=16, input_shape=(128,640,1), kernel_size=(1,3), padding='valid', activation='relu'))
# model.add(MaxPooling2D((2,2), strides=(2,2)))
# model.add(Conv2D(filters=32, kernel_size=(1,3), padding='valid', activation='relu'))
# model.add(MaxPooling2D((2,2), strides=(2,2)))
# model.add(Conv2D(filters=64, kernel_size=(1,3), padding='valid', activation='relu'))
# model.add(MaxPooling2D((2,2),strides=(2,2)))
# model.add(Conv2D(filters=128, kernel_size=(1,3), padding='valid', activation='relu'))
# model.add(MaxPooling2D((4,4), strides=(4,4)))
# model.add(Conv2D(filters=64, kernel_size=(1,3), padding='valid', activation='relu'))
# model.add(MaxPooling2D((4,4), strides=(4,4)))
# model.add(Flatten())
# model.add( Dense(8, activation = 'softmax', name='preds'))
#-------------------------------------------------------------------------------------
# parallel CNN/RNN model
#-------------------------------------------------------------------------------------
inp = Input((128,640,1))
c1 = Conv2D(filters=16, kernel_size=(1,3), padding='valid', activation='relu')(inp)
pool1 = MaxPooling2D((2,2), strides=(2,2))(c1)
c2 = Conv2D(filters=32, kernel_size=(1,3), padding='valid', activation='relu')(pool1)
pool2 = MaxPooling2D((2,2), strides=(2,2))(c2)
c3 = Conv2D(filters=64, kernel_size=(1,3), padding='valid', activation='relu')(pool2)
pool3 = MaxPooling2D((2,2),strides=(2,2))(c3)
c4 = Conv2D(filters=64, kernel_size=(1,3), padding='valid', activation='relu')(pool3)
pool4 = MaxPooling2D((4,4), strides=(4,4))(c4)
c5 = Conv2D(filters=64, kernel_size=(1,3), padding='valid', activation='relu')(pool4)
pool5 = MaxPooling2D((4,4), strides=(4,4))(c5)
flat1 = Flatten()(pool5)
# GRU block
GRUPool = MaxPooling2D((2,4), strides = (2,4))(inp)
squeezed = Lambda(lambda x: K.squeeze(x, axis= -1))(lstmPool)
lstm = Bidirectional(GRU(64))(squeezed)
concatenated = concatenate([flat1, lstm], axis = 1)
output = Dense(8, activation='softmax')(concatenated)
model = Model(inp, output)
optimizer = RMSprop(lr=0.0005)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
print(model.summary)
result = model.fit(xTrain, yTrain, batch_size=64, epochs=50, validation_data=(xValid,yValid), verbose=1)
score = model.evaluate(xTest, yTest, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
def make_filename(num):
result = "%i" % (num)
while len(result)<6:
result = "0"+result
result = result+".npy"
return result
#creates training, testing and validation datasets.
def create_separate_datasets():
genres_train = []
genres_valid = []
genres_test = []
for roots, dirs, files in os.walk("D:\\spectrograms2"):
for index, rows in df_train.iterrows():
temp = make_filename(index)
if temp in files:
genre = str(rows[('track', 'genre_top')])
if(genre == 'International'):
genres_train.append(0)
elif(genre == 'Electronic'):
genres_train.append(1)
elif(genre=='Hip-Hop'):
genres_train.append(2)
elif (genre == 'Folk'):
genres_train.append(3)
elif (genre == 'Instrumental'):
genres_train.append(4)
elif (genre == 'Pop'):
genres_train.append(5)
elif (genre == 'Experimental'):
genres_train.append(6)
elif (genre == 'Rock'):
genres_train.append(7)
for index, rows in df_valid.iterrows():
genre = str(rows[('track', 'genre_top')])
if(genre == 'International'):
genres_valid.append(0)
elif(genre == 'Electronic'):
genres_valid.append(1)
elif(genre=='Hip-Hop'):
genres_valid.append(2)
elif (genre == 'Folk'):
genres_valid.append(3)
elif (genre == 'Instrumental'):
genres_valid.append(4)
elif (genre == 'Pop'):
genres_valid.append(5)
elif (genre == 'Experimental'):
genres_valid.append(6)
elif (genre == 'Rock'):
genres_valid.append(7)
for index, rows in df_test.iterrows():
genre = str(rows[('track', 'genre_top')])
if (genre == 'International'):
genres_test.append(0)
elif (genre == 'Electronic'):
genres_test.append(1)
elif (genre == 'Hip-Hop'):
genres_test.append(2)
elif (genre == 'Folk'):
genres_test.append(3)
elif (genre == 'Instrumental'):
genres_test.append(4)
elif (genre == 'Pop'):
genres_test.append(5)
elif (genre == 'Experimental'):
genres_test.append(6)
elif (genre == 'Rock'):
genres_test.append(7)
genres_test=np.array(genres_test)
genres_train=np.array(genres_train)
genres_valid=np.array(genres_valid)
print(genres_train.shape)
np.save("D:\\spectAr2\\genres_valid.npy", genres_valid)
np.save("D:\\spectAr2\\genres_test.npy", genres_test)
np.save("D:\\spectAr2\\genres_train.npy", genres_train)
print(genres_valid)
print(genres_train)
print(genres_test)
spect_train = np.empty((0,128,640))
spect_test = np.empty((0,128,640))
spect_valid = np.empty((0,128,640))
print("start")
start_time = time.time()
foo = 1
count = 1
num = 1
for roots, dirs, files in os.walk("D:\\spectrograms2"):
for file in files:
# if(num>5):
# break
try:
if(count> 5):
count = 1
spect_train = np.expand_dims(spect_train, axis=3)
spect_test = np.expand_dims(spect_test, axis=3)
spect_valid = np.expand_dims(spect_valid, axis=3)
np.save("D:\\spectAr2\\spect_train" + str(num) + ".npy", spect_train)
np.save("D:\\spectAr2\\spect_test" + str(num) + ".npy", spect_test)
np.save("D:\\spectAr2\\spect_valid" + str(num) + ".npy", spect_valid)
print("Curr is: " + str(num))
print(spect_train.shape)
print(spect_test.shape)
print(spect_valid.shape)
spect_train = np.empty((0, 128, 640))
spect_test = np.empty((0, 128, 640))
spect_valid = np.empty((0, 128, 640))
num+=1
# print(file)
f=np.load("D:\\spectrograms2\\"+ file)
# plot_file(f)
curr = int(file[:6])
if curr in df_train.index:
spect_train = np.append(spect_train, [f], axis=0)
elif curr in df_test.index:
spect_test = np.append(spect_test, [f], axis=0)
elif curr in df_valid.index:
spect_valid = np.append(spect_valid, [f], axis=0)
foo+=1
if (foo > 100):
foo=1
count += 1
print(count)
except:
print("Couldn't process: "+ file)
print(count)
spect_train = np.expand_dims(spect_train, axis=3)
spect_test = np.expand_dims(spect_test, axis=3)
spect_valid = np.expand_dims(spect_valid, axis=3)
np.save("D:\\spectAr2\\spect_train" + str(num) + ".npy", spect_train)
np.save("D:\\spectAr2\\spect_test" + str(num) + ".npy", spect_test)
np.save("D:\\spectAr2\\spect_valid" + str(num) + ".npy", spect_valid)
print("Curr is: " + str(num))
print(spect_train.shape)
print(spect_test.shape)
print(spect_valid.shape)
print("end")
print(time.time()-start_time)
print(spect_train.shape)
print(spect_test.shape)
print(spect_valid.shape)
#concatenates fragmented datasets.
def concatenate_datasets():
at=np.load("D:\\spectAr2\\spect_test1.npy")
bt=np.load("D:\\spectAr2\\spect_test2.npy")
ct=np.load("D:\\spectAr2\\spect_test3.npy")
dt=np.load("D:\\spectAr2\\spect_test4.npy")
et=np.load("D:\\spectAr2\\spect_test5.npy")
ft=np.load("D:\\spectAr2\\spect_test6.npy")
gt=np.load("D:\\spectAr2\\spect_test7.npy")
ht=np.load("D:\\spectAr2\\spect_test8.npy")
it=np.load("D:\\spectAr2\\spect_test9.npy")
jt=np.load("D:\\spectAr2\\spect_test10.npy")
kt=np.load("D:\\spectAr2\\spect_test11.npy")
lt=np.load("D:\\spectAr2\\spect_test12.npy")
mt=np.load("D:\\spectAr2\\spect_test13.npy")
nt=np.load("D:\\spectAr2\\spect_test14.npy")
ot=np.load("D:\\spectAr2\\spect_test15.npy")
pt=np.load("D:\\spectAr2\\spect_test16.npy")
atr=np.load("D:\\spectAr2\\spect_train1.npy")
btr=np.load("D:\\spectAr2\\spect_train2.npy")
ctr=np.load("D:\\spectAr2\\spect_train3.npy")
dtr=np.load("D:\\spectAr2\\spect_train4.npy")
etr=np.load("D:\\spectAr2\\spect_train5.npy")
ftr=np.load("D:\\spectAr2\\spect_train6.npy")
gtr=np.load("D:\\spectAr2\\spect_train7.npy")
htr=np.load("D:\\spectAr2\\spect_train8.npy")
itr=np.load("D:\\spectAr2\\spect_train9.npy")
jtr=np.load("D:\\spectAr2\\spect_train10.npy")
ktr=np.load("D:\\spectAr2\\spect_train11.npy")
ltr=np.load("D:\\spectAr2\\spect_train12.npy")
mtr=np.load("D:\\spectAr2\\spect_train13.npy")
ntr=np.load("D:\\spectAr2\\spect_train14.npy")
otr=np.load("D:\\spectAr2\\spect_train15.npy")
ptr=np.load("D:\\spectAr2\\spect_train16.npy")
av=np.load("D:\\spectAr2\\spect_valid1.npy")
bv=np.load("D:\\spectAr2\\spect_valid2.npy")
cv=np.load("D:\\spectAr2\\spect_valid3.npy")
dv=np.load("D:\\spectAr2\\spect_valid4.npy")
ev=np.load("D:\\spectAr2\\spect_valid5.npy")
fv=np.load("D:\\spectAr2\\spect_valid6.npy")
gv=np.load("D:\\spectAr2\\spect_valid7.npy")
hv=np.load("D:\\spectAr2\\spect_valid8.npy")
iv=np.load("D:\\spectAr2\\spect_valid9.npy")
jv=np.load("D:\\spectAr2\\spect_valid10.npy")
kv=np.load("D:\\spectAr2\\spect_valid11.npy")
lv=np.load("D:\\spectAr2\\spect_valid12.npy")
mv=np.load("D:\\spectAr2\\spect_valid13.npy")
nv=np.load("D:\\spectAr2\\spect_valid14.npy")
ov=np.load("D:\\spectAr2\\spect_valid15.npy")
pv=np.load("D:\\spectAr2\\spect_valid16.npy")
print(df_train.shape)
result1 = np.concatenate((at,bt,ct,dt,et,ft,gt,ht,it,jt,kt,lt,mt,nt,ot,pt), axis=0)
print("YAY!")
result2 = np.concatenate((atr,btr,ctr,dtr,etr,ftr,gtr,htr,itr,jtr,ktr,ltr,mtr,ntr,otr,ptr), axis=0)
result3 = np.concatenate((av,bv,cv,dv,ev,fv,gv,hv,iv,jv,kv,lv,mv,nv,ov,pv), axis=0)
np.save("D:\\spectAr2\\final_test.npy", result1)
np.save("D:\\spectAr2\\final_valid.npy", result3)
np.save("D:\\spectAr2\\final_train.npy", result2)
print(result1.shape)
print(result2.shape)
print(result3.shape)
# concatenate_datasets() #concatinate fragmented datasets
build_and_train_model()
# create_separate_datasets() #create training, testing and validation datasets
| 11,993 | 0 | 111 |
b3e541794ca93c4f765c5a04c082c60d42c8e272 | 4,667 | py | Python | get_embeddings_ML.py | pauhsg/PS_SentimentAnalysis | fb7ebe4d2b05e1f86205708a13b5e238a0e1cba9 | [
"Apache-1.1"
] | null | null | null | get_embeddings_ML.py | pauhsg/PS_SentimentAnalysis | fb7ebe4d2b05e1f86205708a13b5e238a0e1cba9 | [
"Apache-1.1"
] | null | null | null | get_embeddings_ML.py | pauhsg/PS_SentimentAnalysis | fb7ebe4d2b05e1f86205708a13b5e238a0e1cba9 | [
"Apache-1.1"
] | null | null | null | #!/usr/bin/env python3
import numpy as np
import pandas as pd
import re
regex = re.compile('[^A-Za-zÀ-ÿ]')
def extract_mean_word_vectors(data, vocabulary, embeddings):
'''
extracts mean of word vectors for each tweet
'''
print('> extracting mean of word vectors')
# get vocab equivalence to tweet words
idx_data = [[vocabulary.get((regex.sub(' ', ' '.join(regex.sub(' ', t).split()))), -1) for t in line.strip().split()] for line in data]
idx_data = [[t for t in tokens if t>=0] for tokens in idx_data]
# get dense vector equivalence to tweet words
data_tweets_word_vector = [[embeddings[wd2voc][:] for wd2voc in tweet_words] for tweet_words in idx_data]
# get mean word vector of each tweet
data_tweets_mean_vector = [np.mean(wordvectors,axis=0) for wordvectors in data_tweets_word_vector]
return idx_data, data_tweets_word_vector, data_tweets_mean_vector
def process_train_ML(pos, neg, vocabulary, embeddings, dim_emb):
'''
given the positive and negative tweets data, the vocabulary, the word embeddings
and the embedding dimension, extracts mean of word vectors per tweets, and outputs
a dataframe containing all pos and neg tweets, their labels (1 for pos/ -1 for neg)
and their mean word vectors, then shuffles the rows and also outputs the X matrix
containing mean word vectors and the vector y containinf the labels, ready to be
used into ML algorithms
'''
print('> process pos and neg datas to get X and y to perform ML')
# seperate list of tweets in lines
#pos = [x.strip() for x in pos[0].split(',')]
#neg = [x.strip() for x in neg[0].split(',')]
# extract mean word embeddings
idx_pos_tweets, pos_tweets_word_vector, pos_tweets_mean_vector = extract_mean_word_vectors(pos, vocabulary, embeddings)
idx_neg_tweets, neg_tweets_word_vector, neg_tweets_mean_vector = extract_mean_word_vectors(neg, vocabulary, embeddings)
# create labels
label_pos = [1] * len(pos)
#create a df
pos_df = pd.DataFrame(list(zip(label_pos, pos, idx_pos_tweets, pos_tweets_word_vector, pos_tweets_mean_vector)),\
columns=["Sentiment","Tweet","Token_idx","Words_Vectors","Mean_Word_Vector"])
del label_pos
# create labels
label_neg = [-1] * len(neg)
# create a df
neg_df = pd.DataFrame(list(zip(label_neg, neg, idx_neg_tweets, neg_tweets_word_vector, neg_tweets_mean_vector)),\
columns=["Sentiment","Tweet","Token_idx","Words_Vectors","Mean_Word_Vector"]) #create a df
del label_neg
# regroup the dfs, ignore index in order to get new ones (->no duplicate)
full_df = pd.concat([pos_df,neg_df],ignore_index=True) #regroup the dfs, ignore index in order to get new ones (->no duplicate)
# shuffle the rows
full_df = full_df.sample(frac=1)
print('> X and y informations:')
# get X matrix
X = full_df['Mean_Word_Vector'].to_numpy()
X = [x if not np.isnan(x).any() else np.zeros((dim_emb,)) for x in X]
X = np.concatenate(X, axis=0).reshape((full_df.shape[0], dim_emb))
print('X shape:', X.shape)
# get y
y = full_df['Sentiment'].to_numpy()
print('y shape:', y.shape)
return full_df, X, y
def process_test_ML(test, vocabulary, embeddings, dim_emb):
'''
given test set, the vocabulary, the word embeddings and the embedding dimension,
extracts mean of word vectors per tweets, and outputs a dataframe containing all tweets,
their labels (1 for pos/ -1 for neg) and their mean word vectors, and also outputs
the testx matrix containing mean word vectors and ready to be put in ML algorithms
'''
print('> process test data to get X_test and perform ML')
# extract mean word embeddings
idx_test_tweets,test_tweets_word_vector,test_tweets_mean_vector = extract_mean_word_vectors(test, vocabulary, embeddings)
# create labels
test_ids = np.linspace(1,10000,10000, dtype=int)
# create a df
test_df = pd.DataFrame(list(zip(test_ids, test, idx_test_tweets,test_tweets_word_vector,test_tweets_mean_vector)),\
columns=["Tweet_submission_id","Tweet","Token_idx","Words_Vectors","Mean_Word_Vector"])
del test_ids
print('> X_test informations:')
# get X_test matrix
X_test = test_df['Mean_Word_Vector'].to_numpy()
X_test = [x if not np.isnan(x).any() else np.zeros((dim_emb,)) for x in X_test]
X_test = np.concatenate(X_test, axis=0).reshape((test_df.shape[0], dim_emb))
print('X_test shape:', X_test.shape)
return test_df, X_test
| 40.938596 | 139 | 0.686737 | #!/usr/bin/env python3
import numpy as np
import pandas as pd
import re
regex = re.compile('[^A-Za-zÀ-ÿ]')
def extract_mean_word_vectors(data, vocabulary, embeddings):
'''
extracts mean of word vectors for each tweet
'''
print('> extracting mean of word vectors')
# get vocab equivalence to tweet words
idx_data = [[vocabulary.get((regex.sub(' ', ' '.join(regex.sub(' ', t).split()))), -1) for t in line.strip().split()] for line in data]
idx_data = [[t for t in tokens if t>=0] for tokens in idx_data]
# get dense vector equivalence to tweet words
data_tweets_word_vector = [[embeddings[wd2voc][:] for wd2voc in tweet_words] for tweet_words in idx_data]
# get mean word vector of each tweet
data_tweets_mean_vector = [np.mean(wordvectors,axis=0) for wordvectors in data_tweets_word_vector]
return idx_data, data_tweets_word_vector, data_tweets_mean_vector
def process_train_ML(pos, neg, vocabulary, embeddings, dim_emb):
'''
given the positive and negative tweets data, the vocabulary, the word embeddings
and the embedding dimension, extracts mean of word vectors per tweets, and outputs
a dataframe containing all pos and neg tweets, their labels (1 for pos/ -1 for neg)
and their mean word vectors, then shuffles the rows and also outputs the X matrix
containing mean word vectors and the vector y containinf the labels, ready to be
used into ML algorithms
'''
print('> process pos and neg datas to get X and y to perform ML')
# seperate list of tweets in lines
#pos = [x.strip() for x in pos[0].split(',')]
#neg = [x.strip() for x in neg[0].split(',')]
# extract mean word embeddings
idx_pos_tweets, pos_tweets_word_vector, pos_tweets_mean_vector = extract_mean_word_vectors(pos, vocabulary, embeddings)
idx_neg_tweets, neg_tweets_word_vector, neg_tweets_mean_vector = extract_mean_word_vectors(neg, vocabulary, embeddings)
# create labels
label_pos = [1] * len(pos)
#create a df
pos_df = pd.DataFrame(list(zip(label_pos, pos, idx_pos_tweets, pos_tweets_word_vector, pos_tweets_mean_vector)),\
columns=["Sentiment","Tweet","Token_idx","Words_Vectors","Mean_Word_Vector"])
del label_pos
# create labels
label_neg = [-1] * len(neg)
# create a df
neg_df = pd.DataFrame(list(zip(label_neg, neg, idx_neg_tweets, neg_tweets_word_vector, neg_tweets_mean_vector)),\
columns=["Sentiment","Tweet","Token_idx","Words_Vectors","Mean_Word_Vector"]) #create a df
del label_neg
# regroup the dfs, ignore index in order to get new ones (->no duplicate)
full_df = pd.concat([pos_df,neg_df],ignore_index=True) #regroup the dfs, ignore index in order to get new ones (->no duplicate)
# shuffle the rows
full_df = full_df.sample(frac=1)
print('> X and y informations:')
# get X matrix
X = full_df['Mean_Word_Vector'].to_numpy()
X = [x if not np.isnan(x).any() else np.zeros((dim_emb,)) for x in X]
X = np.concatenate(X, axis=0).reshape((full_df.shape[0], dim_emb))
print('X shape:', X.shape)
# get y
y = full_df['Sentiment'].to_numpy()
print('y shape:', y.shape)
return full_df, X, y
def process_test_ML(test, vocabulary, embeddings, dim_emb):
'''
given test set, the vocabulary, the word embeddings and the embedding dimension,
extracts mean of word vectors per tweets, and outputs a dataframe containing all tweets,
their labels (1 for pos/ -1 for neg) and their mean word vectors, and also outputs
the testx matrix containing mean word vectors and ready to be put in ML algorithms
'''
print('> process test data to get X_test and perform ML')
# extract mean word embeddings
idx_test_tweets,test_tweets_word_vector,test_tweets_mean_vector = extract_mean_word_vectors(test, vocabulary, embeddings)
# create labels
test_ids = np.linspace(1,10000,10000, dtype=int)
# create a df
test_df = pd.DataFrame(list(zip(test_ids, test, idx_test_tweets,test_tweets_word_vector,test_tweets_mean_vector)),\
columns=["Tweet_submission_id","Tweet","Token_idx","Words_Vectors","Mean_Word_Vector"])
del test_ids
print('> X_test informations:')
# get X_test matrix
X_test = test_df['Mean_Word_Vector'].to_numpy()
X_test = [x if not np.isnan(x).any() else np.zeros((dim_emb,)) for x in X_test]
X_test = np.concatenate(X_test, axis=0).reshape((test_df.shape[0], dim_emb))
print('X_test shape:', X_test.shape)
return test_df, X_test
| 0 | 0 | 0 |
b23ea33c7149b1654de0bad4da0b73703df6ace9 | 1,861 | py | Python | parametrize/utils.py | MrMrRobat/parametrize | 3da69a3695bf9af66f194d21e0596d31e958dfa9 | [
"MIT"
] | 7 | 2021-05-09T22:31:08.000Z | 2021-10-20T12:27:40.000Z | parametrize/utils.py | Bobronium/parametrize | 3da69a3695bf9af66f194d21e0596d31e958dfa9 | [
"MIT"
] | 2 | 2021-05-15T19:22:42.000Z | 2021-11-21T11:24:10.000Z | parametrize/utils.py | Bobronium/parametrize | 3da69a3695bf9af66f194d21e0596d31e958dfa9 | [
"MIT"
] | 1 | 2021-11-21T11:01:16.000Z | 2021-11-21T11:01:16.000Z | import sys
from inspect import Signature
from types import CodeType, FunctionType
from typing import Any, Tuple
if sys.version_info >= (3, 8):
copy_code = CodeType.replace
else:
PY_36_37_CODE_ARGS: Tuple[str, ...] = (
"co_argcount",
"co_kwonlyargcount",
"co_nlocals",
"co_stacksize",
"co_flags",
"co_code",
"co_consts",
"co_names",
"co_varnames",
"co_filename",
"co_name",
"co_firstlineno",
"co_lnotab",
"co_freevars",
"co_cellvars",
)
def copy_code(code: CodeType, **update: Any) -> CodeType:
"""
Create a copy of code object with changed attributes
"""
new_args = [update.pop(arg, getattr(code, arg)) for arg in PY_36_37_CODE_ARGS]
if update:
raise TypeError(f"Unexpected code attribute(s): {update}")
return CodeType(*new_args)
def copy_func(f: FunctionType, name, defaults, signature: Signature):
"""
Makes exact copy of a function object with given name and defaults
"""
new_defaults = []
kw_only_defaults = f.__kwdefaults__.copy() if f.__kwdefaults__ else {}
for key, param in signature.parameters.items():
if param.kind is param.KEYWORD_ONLY:
if key in defaults:
kw_only_defaults[key] = defaults.pop(key)
elif key in defaults:
new_defaults.append(defaults.pop(key))
elif param.default is not param.empty:
new_defaults.append(param.default)
new_func = FunctionType(
code=copy_code(f.__code__, co_name=name),
globals=f.__globals__,
name=name,
argdefs=tuple(new_defaults),
closure=f.__closure__,
)
new_func.__kwdefaults__ = kw_only_defaults
new_func.__dict__.update(f.__dict__)
return new_func
| 29.078125 | 86 | 0.619022 | import sys
from inspect import Signature
from types import CodeType, FunctionType
from typing import Any, Tuple
if sys.version_info >= (3, 8):
copy_code = CodeType.replace
else:
PY_36_37_CODE_ARGS: Tuple[str, ...] = (
"co_argcount",
"co_kwonlyargcount",
"co_nlocals",
"co_stacksize",
"co_flags",
"co_code",
"co_consts",
"co_names",
"co_varnames",
"co_filename",
"co_name",
"co_firstlineno",
"co_lnotab",
"co_freevars",
"co_cellvars",
)
def copy_code(code: CodeType, **update: Any) -> CodeType:
"""
Create a copy of code object with changed attributes
"""
new_args = [update.pop(arg, getattr(code, arg)) for arg in PY_36_37_CODE_ARGS]
if update:
raise TypeError(f"Unexpected code attribute(s): {update}")
return CodeType(*new_args)
def copy_func(f: FunctionType, name, defaults, signature: Signature):
"""
Makes exact copy of a function object with given name and defaults
"""
new_defaults = []
kw_only_defaults = f.__kwdefaults__.copy() if f.__kwdefaults__ else {}
for key, param in signature.parameters.items():
if param.kind is param.KEYWORD_ONLY:
if key in defaults:
kw_only_defaults[key] = defaults.pop(key)
elif key in defaults:
new_defaults.append(defaults.pop(key))
elif param.default is not param.empty:
new_defaults.append(param.default)
new_func = FunctionType(
code=copy_code(f.__code__, co_name=name),
globals=f.__globals__,
name=name,
argdefs=tuple(new_defaults),
closure=f.__closure__,
)
new_func.__kwdefaults__ = kw_only_defaults
new_func.__dict__.update(f.__dict__)
return new_func
| 0 | 0 | 0 |
7aedf0c8817932b2eee18bc17c9088890bda9cdb | 1,839 | py | Python | personalcrm/inventory/views.py | carlossgv/personalcrm-repo | 2fbe824a62d8976ce09817d75dfac7291ca3d28d | [
"MIT"
] | 2 | 2021-02-11T12:46:06.000Z | 2021-02-22T01:32:35.000Z | personalcrm/inventory/views.py | carlossgv/personalcrm-repo | 2fbe824a62d8976ce09817d75dfac7291ca3d28d | [
"MIT"
] | null | null | null | personalcrm/inventory/views.py | carlossgv/personalcrm-repo | 2fbe824a62d8976ce09817d75dfac7291ca3d28d | [
"MIT"
] | null | null | null | from .forms import NewProductForm
from django.db import models
from django.shortcuts import render, resolve_url
from django.http.response import JsonResponse
from quote.models import Product, Brand, User
# ! INVENTORY VIEWS
| 27.863636 | 82 | 0.612289 | from .forms import NewProductForm
from django.db import models
from django.shortcuts import render, resolve_url
from django.http.response import JsonResponse
from quote.models import Product, Brand, User
# ! INVENTORY VIEWS
def home(request):
return render(request, "inventory/index.html")
def get_inventory_index(request):
products = Product.objects.all()
return JsonResponse([product.serialize() for product in products], safe=False)
def new_product_form(request):
if request.method == "POST":
form = NewProductForm(request.POST)
user = request.user
if form.is_valid():
creator = User.objects.get(username=user)
pn = form.cleaned_data["pn"]
brand = Brand.objects.get(pk=form.cleaned_data["brand"])
print(form.cleaned_data)
title = form.cleaned_data["title"]
description = form.cleaned_data["description"]
list_price = form.cleaned_data["list_price"]
multiplier = form.cleaned_data["multiplier"]
stock = form.cleaned_data["stock"]
note = form.cleaned_data["note"]
product = Product(
pn=pn,
brand=brand,
title=title,
description=description,
creator = creator,
list_price=list_price,
multiplier=multiplier,
stock=stock,
note=note,
)
product.save()
return render(request, "inventory/create-product.html", {"form": form})
else:
form = NewProductForm()
return render(request, "inventory/create-product.html", {"form": form})
def product_info(request, pn):
product = Product.objects.get(pn=pn)
return JsonResponse(product.serialize(), safe=False)
| 1,521 | 0 | 91 |
72ff98015af5b3f8f31a269e778f01cf72613909 | 4,336 | py | Python | utils/tif.py | anhp95/forest_attr_segment | 51a412efc6f78d3c7ce90da59b348a3cabc3cb09 | [
"MIT"
] | null | null | null | utils/tif.py | anhp95/forest_attr_segment | 51a412efc6f78d3c7ce90da59b348a3cabc3cb09 | [
"MIT"
] | null | null | null | utils/tif.py | anhp95/forest_attr_segment | 51a412efc6f78d3c7ce90da59b348a3cabc3cb09 | [
"MIT"
] | null | null | null | from osgeo import gdal
import glob
import os
import numpy as np
| 31.882353 | 159 | 0.65429 | from osgeo import gdal
import glob
import os
import numpy as np
def read_tif(path_):
ds = gdal.Open(path_)
nodata_value = 65535
ds.GetRasterBand(1).SetNoDataValue(nodata_value)
band = ds.GetRasterBand(1)
arr = band.ReadAsArray()
[rows, cols] = arr.shape
metadata = {
"transform": ds.GetGeoTransform(),
"prj": ds.GetProjection(),
"rows": rows,
"cols": cols,
}
return (arr, metadata)
def write_tif(out_arr, metadata, filename, nodata_value=-9999):
driver = gdal.GetDriverByName("GTiff")
transfrom = metadata["transform"]
prj = metadata["prj"]
rows = metadata["rows"]
cols = metadata["cols"]
number_of_bands = 1
band_type = gdal.GDT_Float32
out_data = driver.Create(filename, cols, rows, number_of_bands, band_type)
out_data.SetGeoTransform(transfrom)
out_data.SetProjection(prj)
out_data.GetRasterBand(1).WriteArray(out_arr)
out_data.GetRasterBand(1).SetNoDataValue(nodata_value)
out_data.FlushCache()
out_data = None
def resample(ref_file, input_file, output_file):
# open reference file and get resolution
reference = gdal.Open(ref_file, 0) # this opens the file in only reading mode
referenceTrans = reference.GetGeoTransform()
x_res = referenceTrans[1]
y_res = -referenceTrans[5] # make sure this value is positive
# call gdal Warp
kwargs = {"format": "GTiff", "xRes": x_res, "yRes": y_res}
ds = gdal.Warp(output_file, input_file, **kwargs)
return ds
def main_resample(ref_file, input_dir):
# ref_file = r"D:\co2_data\DL\large_img\sentinel\S2_RAW\s2l2\3\GRANULE\L2A_T53SQV_A015701_20180625T013653\IMG_DATA\R10m\T53SQV_20180625T013651_B02_10m.jp2"
# input_dir = r"D:\co2_data\DL\large_img\sentinel\S2_RAW\s2l2\2\GRANULE\L2A_T53SPV_A015701_20180625T013653\IMG_DATA\R20m"
input_list = glob.glob(os.path.join(input_dir, "*.jp2"))
for input_file in input_list:
output_file = input_file.replace("20m.jp2", "10m.tif")
resample(ref_file, input_file, output_file)
print(output_file)
def merge():
base_dir = r"D:\co2_data\DL\large_img\sentinel\S2_RAW\preprocessed_s2"
list_1 = glob.glob(os.path.join(base_dir, "2", "*.tif"))
list_2 = glob.glob(os.path.join(base_dir, "3", "*.tif"))
for tif_1, tif_2 in zip(list_1, list_2):
filename = tif_1[-11:]
outfile = os.path.join(base_dir, "merged", filename)
vrt = gdal.BuildVRT("merged.vrt", [tif_1, tif_2])
gdal.Translate(outfile, vrt, xRes=10, yRes=-10)
print(outfile)
vrt = None
def clip():
tono_shp = r"D:\SHP\tono\Tono.shp"
base_dir = r"D:\co2_data\DL\large_img\sentinel\S2_RAW\preprocessed_s2"
list_tif = glob.glob(os.path.join(base_dir, "merged", "*.tif"))
for tif in list_tif:
outfile = tif.replace("merged", "clip")
gdal_cmd = f"gdalwarp -srcnodata -9999 -dstnodata -9999 -cutline {tono_shp} -crop_to_cutline -dstalpha {tif} {outfile}"
os.system(gdal_cmd)
print(outfile)
def dn_to_ref_norm():
# dn = 1e4 * reflectance
base_dir = r"D:\co2_data\DL\large_img\sentinel\S2_RAW\preprocessed_s2"
list_tif = glob.glob(os.path.join(base_dir, "clip", "*.tif"))
nodata = 0
for tif in list_tif:
arr, metadata = read_tif(tif)
rows, cols = arr.shape
ref_arr = arr.reshape(-1) / 1e4
non_zeor_index = np.where(ref_arr > 0)[0]
non_zeor_arr = ref_arr[ref_arr > nodata]
norm_arr = (non_zeor_arr - np.min(non_zeor_arr)) / (
np.max(non_zeor_arr) - np.min(non_zeor_arr)
)
ref_arr[non_zeor_index] = norm_arr
write_tif(ref_arr.reshape(rows, cols), metadata, tif.replace("clip", "norm"))
print("tif")
def norm_l1_img(list_tif):
nodata = 0
nodata_rgb = 66356
nodata_l2a = 2147483647
for tif in list_tif:
preprocessed_file_path = tif.replace(r"clip", r"preprocessed_clip")
if not os.path.exists(preprocessed_file_path):
print(tif)
arr, metadata = read_tif(tif)
arr = np.nan_to_num(arr, nan=nodata)
_max = arr[arr != nodata].max()
_min = arr[arr != nodata].min()
arr = (arr - _min) / (_max - _min)
write_tif(arr, metadata, preprocessed_file_path)
| 4,080 | 0 | 184 |
75bcdef2b0e596ed34971c0c6c64a759413453a2 | 39 | py | Python | python/dlr/metadata.py | ylc/neo-ai-dlr | a8afcafbcc8c43b734faf092b2f4dd7e8fe40ea0 | [
"Apache-2.0"
] | 446 | 2019-01-24T02:04:17.000Z | 2022-03-16T13:45:32.000Z | python/dlr/metadata.py | ylc/neo-ai-dlr | a8afcafbcc8c43b734faf092b2f4dd7e8fe40ea0 | [
"Apache-2.0"
] | 179 | 2019-01-24T10:03:34.000Z | 2022-03-19T02:06:56.000Z | python/dlr/metadata.py | ylc/neo-ai-dlr | a8afcafbcc8c43b734faf092b2f4dd7e8fe40ea0 | [
"Apache-2.0"
] | 111 | 2019-01-24T20:51:45.000Z | 2022-02-18T06:22:40.000Z | NAME = ['DLRModel']
VERSION = "1.9.1"
| 9.75 | 19 | 0.564103 | NAME = ['DLRModel']
VERSION = "1.9.1"
| 0 | 0 | 0 |
0471a7c04e04c6ff309ba4131754c43b9d7d3c93 | 591 | py | Python | src/tests/test_example_task.py | kazqvaizer/arq-sqlalchemy-boilerplate | c14596ed358a061e6eb2a380f4bd962242b123f3 | [
"MIT"
] | 6 | 2021-12-20T14:49:14.000Z | 2022-03-21T14:32:49.000Z | src/tests/test_example_task.py | kazqvaizer/arq-sqlalchemy-boilerplate | c14596ed358a061e6eb2a380f4bd962242b123f3 | [
"MIT"
] | null | null | null | src/tests/test_example_task.py | kazqvaizer/arq-sqlalchemy-boilerplate | c14596ed358a061e6eb2a380f4bd962242b123f3 | [
"MIT"
] | null | null | null | import pytest
from sqlalchemy import func
from sqlalchemy.future import select
from app.models import ExampleModel
from app.tasks import example_task
pytestmark = pytest.mark.asyncio
| 24.625 | 81 | 0.756345 | import pytest
from sqlalchemy import func
from sqlalchemy.future import select
from app.models import ExampleModel
from app.tasks import example_task
pytestmark = pytest.mark.asyncio
async def test_creates_new_entry(session):
await example_task()
count = (await session.execute(select(func.count(ExampleModel.id)))).scalar()
assert count == 1
async def test_creates_new_entry_as_many_times_as_task_called(session):
for _ in range(3):
await example_task()
count = (await session.execute(select(func.count(ExampleModel.id)))).scalar()
assert count == 3
| 358 | 0 | 46 |
eab37f54fd7dbe0be73d3f145040820fb90e8486 | 1,529 | py | Python | features_fixer/scaler/abstract.py | LudwikBielczynski/features_fixer | 43114e3d986265a1e6e34644d3734a361d3fa926 | [
"MIT"
] | null | null | null | features_fixer/scaler/abstract.py | LudwikBielczynski/features_fixer | 43114e3d986265a1e6e34644d3734a361d3fa926 | [
"MIT"
] | null | null | null | features_fixer/scaler/abstract.py | LudwikBielczynski/features_fixer | 43114e3d986265a1e6e34644d3734a361d3fa926 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Optional
import pandas as pd
if TYPE_CHECKING:
from sklearn.base import TransformerMixin
| 28.849057 | 95 | 0.561805 | from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Optional
import pandas as pd
if TYPE_CHECKING:
from sklearn.base import TransformerMixin
class ScalerAbstract(ABC):
@property
@abstractmethod
def scaler(self) -> 'TransformerMixin':
pass
# TODO: Scale only numeric columns
def _get_columns(self, df: pd.DataFrame, columns: Optional[list[str]] = None) -> list[str]:
if columns is None:
columns = df.columns
return columns
def transform(self,
df: pd.DataFrame,
columns: Optional[list[str]] = None,
transpose: bool = False,
) -> pd.DataFrame:
columns = self._get_columns(df, columns)
if transpose:
self.scaler.fit(df[columns].T)
df.loc[:, columns] = self.scaler.transform(df[columns].T).T
else:
self.scaler.fit(df[columns])
df.loc[:, columns] = self.scaler.transform(df[columns])
return df
def inverse_transform(self,
df: pd.DataFrame,
columns: Optional[list[str]] = None,
transpose: bool = False,
) -> pd.DataFrame:
columns = self._get_columns(df, columns)
if transpose:
df.loc[:, columns] = self.scaler.inverse_transform(df[columns].T).T
else:
df.loc[:, columns] = self.scaler.inverse_transform(df[columns])
return df
| 1,154 | 187 | 23 |
d174b348a6a886e5ebfeb1998bd2eefa9286562d | 34,287 | py | Python | savu/plugins/plugin_tools.py | nghia-vo/Savu | 1cf7343c141224643b2e1fb2f05e74448bc4fd58 | [
"Apache-2.0"
] | null | null | null | savu/plugins/plugin_tools.py | nghia-vo/Savu | 1cf7343c141224643b2e1fb2f05e74448bc4fd58 | [
"Apache-2.0"
] | null | null | null | savu/plugins/plugin_tools.py | nghia-vo/Savu | 1cf7343c141224643b2e1fb2f05e74448bc4fd58 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: plugin_tools
:platform: Unix
:synopsis: Plugin tools
.. moduleauthor:: Jessica Verschoyle <scientificsoftware@diamond.ac.uk>
"""
import os
import copy
import json
import logging
from colorama import Fore
from collections import OrderedDict
import savu.plugins.utils as pu
from savu.data.meta_data import MetaData
import savu.plugins.docstring_parser as doc
import scripts.config_generator.parameter_utils as param_u
from savu.data.plugin_list import CitationInformation
logger = logging.getLogger("documentationLog")
class PluginParameters(object):
"""Save the parameters for the plugin and base classes to a
dictionary. The parameters are in yaml format inside the
define_parameter function. These are read and checked for problems.
"""
def populate_parameters(self, tools_list):
""" Set parameter definitions and default parameter values """
# set the parameter definitions
# populates the dictionary returned by self.get_param_definitions()
list(map(lambda tool_class:
self._set_parameter_definitions(tool_class), tools_list))
# set the default parameter values
# populates the dictionary returned by self.get_param_values()
self._populate_default_parameters()
def _populate_default_parameters(self):
"""
This method should populate all the required parameters with
default values. It is used for checking to see if parameter
values are appropriate
"""
p_defs = self.get_param_definitions()
self.set_docstring(self.get_doc())
self.parameters = \
OrderedDict([(k, v['default']) for k, v in p_defs.items()])
# parameters holds current values, this is edited outside of the
# tools class so default and dependency display values are updated here
self.update_dependent_defaults()
self.check_dependencies(self.parameters)
self._get_plugin().set_parameters(self.parameters)
def _set_parameters_this_instance(self, indices):
""" Determines the parameters for this instance of the plugin, in the
case of parameter tuning.
param np.ndarray indices: the index of the current value in the
parameter tuning list.
"""
dims = set(self.multi_params_dict.keys())
count = 0
for dim in dims:
info = self.multi_params_dict[dim]
name = info['label'].split('_param')[0]
self.parameters[name] = info['values'][indices[count]]
count += 1
def _set_parameter_definitions(self, tool_class):
"""Load the parameters for each base class, c, check the
dataset visibility, check data types, set dictionary values.
"""
param_info_dict = self._load_param_from_doc(tool_class)
if param_info_dict:
for p_name, p_value in param_info_dict.items():
if p_name in self.param.get_dictionary():
for k,v in p_value.items():
self.param[p_name][k] = v
else:
self.param.set(p_name, p_value)
self._check_param_defs(tool_class)
def _check_param_defs(self, tool_class):
"""Check the parameter definitions for errors
:param tool_class: tool_class to use for error message
"""
pdefs = self.param.get_dictionary()
# Remove ignored parameters
self._remove_ignored_params(pdefs)
# Check if the required keys are included
self._check_required_keys(pdefs, tool_class)
# Check that option values are valid
self._check_options(pdefs, tool_class)
# Check that the visibility is valid
self._check_visibility(pdefs, tool_class)
# Check that the dtype is valid
self._check_dtype(pdefs, tool_class)
# Use a display option to apply to dependent parameters later.
self._set_display(pdefs)
for k,v in pdefs.items():
# Change empty OrderedDict to dict (due to yaml loader)
if isinstance(v['default'], OrderedDict):
v['default'] = json.loads(json.dumps(v['default']))
# Change the string to an integer, float, list, str, dict
if not self.default_dependency_dict_exists(v):
v['default'] = pu._dumps(v['default'])
def _load_param_from_doc(self, tool_class):
"""Find the parameter information from the method docstring.
This is provided in a yaml format.
"""
# *** TO DO turn the dtype entry into a string
param_info_dict = None
if hasattr(tool_class, "define_parameters"):
yaml_text = tool_class.define_parameters.__doc__
if yaml_text and yaml_text.strip():
# If yaml_text is not None and not empty or consisting of spaces
param_info_dict = doc.load_yaml_doc(yaml_text)
if param_info_dict:
if not isinstance(param_info_dict, OrderedDict):
error_msg = (
f"The parameters have not been read "
f"in correctly for {tool_class.__name__}"
)
raise Exception(error_msg)
return param_info_dict
def check_for_default(self, mod_param, mod_value):
"""If the value is changed to be 'default', then set the original
default value. If the default contains a dictionary, then search
for the correct value
"""
param_info_dict = self.param.get_dictionary()
if str(mod_value) == "default":
if self.default_dependency_dict_exists(param_info_dict[mod_param]):
mod_value = self.get_dependent_default(param_info_dict[mod_param])
else:
mod_value = param_info_dict[mod_param]["default"]
return mod_value
def _check_required_keys(self, param_info_dict, tool_class):
"""Check the four keys ['dtype', 'description', 'visibility',
'default'] are included inside the dictionary given for each
parameter.
"""
required_keys = ["dtype", "description", "visibility", "default"]
missing_keys = False
missing_key_dict = {}
for p_key, p in param_info_dict.items():
all_keys = p.keys()
if p.get("visibility"):
if p.get("visibility") == "hidden":
# For hidden keys, only require a default value key
required_keys = ["default"]
else:
required_keys = ["visibility"]
if not all(d in all_keys for d in required_keys):
missing_key_dict[p_key] = set(required_keys) - set(all_keys)
missing_keys = True
if missing_keys:
print(
f"{tool_class.__name__} doesn't contain all of the "
f"required keys."
)
for param, missing_values in missing_key_dict.items():
print(f"The missing required keys for '{param}' are:")
print(*missing_values, sep=", ")
logger.error(f"ERROR: Missing keys inside {tool_class.__name__}")
raise Exception(f"Please edit {tool_class.__name__}")
def _check_dtype(self, param_info_dict, tool_class):
"""
Make sure that the dtype input is valid and that the default value is
compatible
"""
plugin_error_str = f"There was an error with {tool_class.__name__}"
for p_key, p_dict in param_info_dict.items():
dtype = p_dict.get("dtype")
if dtype:
dtype = dtype.replace(" ", "")
try:
pvalid, error_str = param_u.is_valid_dtype(dtype)
if not pvalid:
raise Exception("Invalid parameter definition %s:\n %s"
% (p_key, error_str))
except IndexError:
print(plugin_error_str)
if not self.default_dependency_dict_exists(p_dict):
default_value = pu._dumps(p_dict["default"])
pvalid, error_str = param_u.is_valid(p_key, default_value,
p_dict, check=True)
if not pvalid:
raise Exception(f"{plugin_error_str}: {error_str}")
def _check_visibility(self, param_info_dict, tool_class):
"""Make sure that the visibility choice is valid"""
visibility_levels = [
"basic",
"intermediate",
"advanced",
"datasets",
"hidden",
]
visibility_valid = True
for p_key, p in param_info_dict.items():
# Check dataset visibility level is correct
self._check_data_keys(p_key, p)
# Check that the data types are valid choices
if p["visibility"] not in visibility_levels:
print(
f"Inside {tool_class.__name__} the {p_key}"
f" parameter is assigned an invalid visibility "
f"level '{p['visibility']}'"
)
print("Valid choices are:")
print(*visibility_levels, sep=", ")
visibility_valid = False
if not visibility_valid:
raise Exception(
f"Please change the file for {tool_class.__name__}"
)
def _check_data_keys(self, p_key, p):
"""Make sure that the visibility of dataset parameters is 'datasets'
so that the display order is unchanged.
"""
datasets = ["in_datasets", "out_datasets"]
exceptions = ["hidden"]
if p_key in datasets:
if p["visibility"] != "datasets" \
and p["visibility"] not in exceptions:
p["visibility"] = "datasets"
def _check_options(self, param_info_dict, tool_class):
"""Make sure that option verbose descriptions match the actual
options
"""
options_valid = True
for p_key, p in param_info_dict.items():
desc = param_info_dict[p_key].get("description")
# desc not present for hidden keys
if desc and isinstance(desc, dict):
options = param_info_dict[p_key].get("options")
option_desc = desc.get("options")
if options and option_desc:
# Check that there is not an invalid option description
# inside the option list.
invalid_option = [
opt for opt in option_desc if opt not in options
]
if invalid_option:
options_valid = False
break
if options_valid is False:
raise Exception(
f"Please check the parameter options for {tool_class.__name__}"
)
def _remove_ignored_params(self, param_info_dict):
"""Remove any parameters with visibility = ignore"""
p_dict_copy = param_info_dict.copy()
for p_key, p in p_dict_copy.items():
visibility = param_info_dict[p_key].get("visibility")
if visibility == "ignore":
del param_info_dict[p_key]
def _set_display(self, param_info_dict):
"""Initially, set all of the parameters to display 'on'
This is later altered when dependent parameters need to be shown
or hidden
"""
for k, v in param_info_dict.items():
v["display"] = "on"
def update_dependent_defaults(self):
"""
Fix default values for parameters that have a dependency on the value
of another parameter, and are in dictionary form.
"""
for name, pdict in self.get_param_definitions().items():
if self.default_dependency_dict_exists(pdict):
self.parameters[name] = self.get_dependent_default(pdict)
def default_dependency_dict_exists(self, pdict):
""" Check that the parameter default value is in a format with
the parent parameter string and the dependent value
e.g. default:
algorithm: FGP
and not an actual default value to be set
e.g. default: {'2':5}
:param pdict: The parameter definition dictionary
:return: True if the default dictionary contains the
correct format
"""
if pdict["default"] and isinstance(pdict["default"], dict):
if "dict" not in pdict["dtype"]:
return True
else:
parent_name = list(pdict['default'].keys())[0]
if parent_name in self.get_param_definitions():
return True
return False
def get_dependent_default(self, child):
"""
Recursive function to replace a dictionary of default parameters with
a single value.
Parameters
----------
child : dict
The parameter definition dictionary of the dependent parameter.
Returns1
-------
value
The correct default value based on the current value of the
dependency, or parent, parameter.
"""
pdefs = self.get_param_definitions()
parent_name = list(child['default'].keys())[0]
parent = self.does_exist(parent_name, pdefs)
# if the parent default is a dictionary then apply the function
# recursively
if isinstance(parent['default'], dict):
self.parameters[parent_name] = \
self.get_dependent_default(parent['default'])
return child['default'][parent_name][self.parameters[parent_name]]
def warn_dependents(self, mod_param, mod_value):
"""
Find dependents of a modified parameter # complete the docstring
"""
# find dependents
for name, pdict in self.get_param_definitions().items():
if self.default_dependency_dict_exists(pdict):
default = pdict['default']
parent_name = list(default.keys())[0]
if parent_name == mod_param:
if mod_value in default[parent_name].keys():
value = default[parent_name][mod_value]
desc = pdict['description']
self.make_recommendation(
name, desc, parent_name, value)
def check_dependencies(self, parameters):
"""Determine which parameter values are dependent on a parent
value and whether they should be hidden or shown
"""
param_info_dict = self.param.get_dictionary()
dep_list = {
k: v["dependency"]
for k, v in param_info_dict.items()
if "dependency" in v
}
for p_name, dependency in dep_list.items():
if isinstance(dependency, OrderedDict):
# There is a dictionary of dependency values
parent_param_name = list(dependency.keys())[0]
# The choices which must be in the parent value
parent_choice_list = dependency[parent_param_name]
if parent_param_name in parameters:
"""Check that the parameter is in the current plug in
This is relevant for base classes which have several
dependent classes
"""
parent_value = parameters[parent_param_name]
if str(parent_value) in parent_choice_list:
param_info_dict[p_name]["display"] = "on"
else:
param_info_dict[p_name]["display"] = "off"
else:
if dependency in parameters:
parent_value = parameters[dependency]
if parent_value is None or str(parent_value) == "None":
param_info_dict[p_name]["display"] = "off"
else:
param_info_dict[p_name]["display"] = "on"
def set_plugin_list_parameters(self, input_parameters):
"""
This method is called after the plugin has been created by the
pipeline framework. It replaces ``self.parameters``
default values with those given in the input process list. It
checks for multi parameter strings, eg. 57;68;56;
:param dict input_parameters: A dictionary of the input parameters
for this plugin, or None if no customisation is required.
"""
for key in input_parameters.keys():
if key in self.parameters.keys():
new_value = input_parameters[key]
self.__check_multi_params(
self.parameters, new_value, key
)
else:
error = (
f"Parameter '{key}' is not valid for plugin "
f"{self.plugin_class.name}. \nTry opening and re-saving "
f"the process list in the configurator to auto remove "
f"\nobsolete parameters."
)
raise ValueError(error)
def __check_multi_params(self, parameters, value, key):
"""
Convert parameter value to a list if it uses parameter tuning
and set associated parameters, so the framework knows the new size
of the data and which plugins to re-run.
:param parameters: Dictionary of parameters and current values
:param value: Value to set parameter to
:param key: Parameter name
:return:
"""
if param_u.is_multi_param(key, value):
value, error_str = pu.convert_multi_params(key, value)
if not error_str:
parameters[key] = value
label = key + "_params." + type(value[0]).__name__
self.alter_multi_params_dict(
len(self.get_multi_params_dict()),
{"label": label, "values": value},
)
self.append_extra_dims(len(value))
else:
parameters[key] = value
def _get_expand_dict(self, preview, expand_dim):
"""Create dict for expand syntax
:param preview: Preview parameter value
:param expand_dim: Number of dimensions to return dict for
:return: dict
"""
expand_dict = {}
preview_val = pu._dumps(preview)
if not preview_val:
# In the case that there is an empty dict, display the default
preview_val = []
if isinstance( preview_val, dict):
for key, prev_list in preview_val.items():
expand_dict[key] = self._get_expand_dict(prev_list, expand_dim)
return expand_dict
elif isinstance(preview_val, list):
if expand_dim == "all":
expand_dict = \
self._output_all_dimensions(preview_val,
self._get_dimensions(preview_val))
else:
pu.check_valid_dimension(expand_dim, preview_val)
dim_key = f"dim{expand_dim}"
expand_dict[dim_key] = \
self._dim_slice_output(preview_val, expand_dim)
else:
raise ValueError("This preview value was not a recognised list "
"or dictionary. This expand command currenty "
"only works with those two data type.")
return expand_dict
def _get_dimensions(self, preview_list):
"""
:param preview_list: The preview parameter list
:return: Dimensions to display
"""
return 1 if not preview_list else len(preview_list)
def _output_all_dimensions(self, preview_list, dims):
"""Compile output string lines for all dimensions
:param preview_list: The preview parameter list
:param dims: Number of dimensions to display
:return: dict
"""
prev_dict = {}
for dim in range(1, dims + 1):
dim_key = f"dim{dim}"
prev_dict[dim_key] = self._dim_slice_output(preview_list, dim)
return prev_dict
def _dim_slice_output(self, preview_list, dim):
"""If there are multiple values in list format
Only save the values for the dimensions chosen
:param preview_list: The preview parameter list
:param dim: dimension to return the slice notation dictionary for
:return slice notation dictionary
"""
if not preview_list:
# If empty
preview_display_value = ":"
else:
preview_display_value = preview_list[dim - 1]
prev_val = self._set_all_syntax(preview_display_value)
return self._get_slice_notation_dict(prev_val)
def _get_slice_notation_dict(self, val):
"""Create a dict for slice notation information,
start:stop:step (and chunk if provided)
:param val: The list value in slice notation
:return: dictionary of slice notation
"""
import itertools
basic_slice_keys = ["start", "stop", "step"]
all_slice_keys = [*basic_slice_keys, "chunk"]
slice_dict = {}
if pu.is_slice_notation(val):
val_list = val.split(":")
if len(val_list) < 3:
# Make sure the start stop step slice keys are always shown,
# even when blank
val_list.append("")
for slice_name, v in zip(all_slice_keys, val_list):
# Only print up to the shortest list.
# (Only show the chunk value if it is in val_list)
slice_dict[slice_name] = v
else:
val_list = [val]
for slice_name, v in itertools.zip_longest(
basic_slice_keys, val_list, fillvalue=""
):
slice_dict[slice_name] = v
return slice_dict
def _set_all_syntax(self, val, replacement_str=""):
"""Remove additional spaces from val, replace colon when 'all'
data is selected
:param val: Slice notation value
:param replacement_str: String to replace ':' with
:return:
"""
if isinstance(val, str):
if pu.is_slice_notation(val):
if val == ":":
val = replacement_str
else:
val = val.strip()
else:
val = val.strip()
return val
def get_multi_params_dict(self):
""" Get the multi parameter dictionary. """
return self.multi_params_dict
def get_extra_dims(self):
""" Get the extra dimensions. """
return self.extra_dims
"""
@dataclass
class Parameter:
''' Descriptor of Parameter Information for plugins
'''
visibility: int
datatype: specific_type
description: str
default: int
Options: Optional[[str]]
dependency: Optional[]
def _get_param(self):
param_dict = {}
param_dict['visibility'] = self.visibility
param_dict['type'] = self.dtype
param_dict['description'] = self.description
# and the remaining keys
return param_dict
"""
class PluginCitations(object):
"""Get this citation dictionary so get_dictionary of the metadata type
should return a dictionary of all the citation info as taken from
docstring
"""
def set_cite(self, tools_list):
"""Set the citations for each of the tools classes
:param tools_list: List containing tool classes of parent plugins
"""
list(
map(
lambda tool_class: self._set_plugin_citations(tool_class),
tools_list
)
)
def _set_plugin_citations(self, tool_class):
""" Load the parameters for each base class and set values"""
citations = self._load_cite_from_doc(tool_class)
if citations:
for citation in citations.values():
if self._citation_keys_valid(citation, tool_class):
new_citation = CitationInformation(**citation)
self.cite.set(new_citation.name, new_citation)
else:
print(f"The citation for {tool_class.__name__} "
f"was not saved.")
def _citation_keys_valid(self, new_citation, tool_class):
"""Check that required citation keys are present. Return false if
required keys are missing
"""
required_keys = ["description"]
# Inside the fresnel filter there is only a description
citation_keys = [k for k in new_citation.keys()]
# Check that all of the required keys are contained inside the
# citation definition
check_keys = all(item in citation_keys for item in required_keys)
citation_keys_valid = False if check_keys is False else True
all_keys = [
"short_name_article",
"description",
"bibtex",
"endnote",
"doi",
"dependency",
]
# Keys which are not used
additional_keys = [k for k in citation_keys if k not in all_keys]
if additional_keys:
print(f"Please only use the following keys inside the citation"
f" definition for {tool_class.__name__}:")
print(*all_keys, sep=", ")
print("The incorrect keys used:", additional_keys)
return citation_keys_valid
def _load_cite_from_doc(self, tool_class):
"""Find the citation information from the method docstring.
This is provided in a yaml format.
:param tool_class: Tool to retrieve citation docstring from
:return: All citations from this tool class
"""
all_c = OrderedDict()
# Seperate the citation methods. __dict__ returns instance attributes.
citation_methods = {key: value
for key, value in tool_class.__dict__.items()
if key.startswith('citation')}
for c_method_name, c_method in citation_methods.items():
yaml_text = c_method.__doc__
if yaml_text is not None:
yaml_text = self.seperate_description(yaml_text)
current_citation = doc.load_yaml_doc(yaml_text)
if not isinstance(current_citation, OrderedDict):
print(f"The citation information has not been read in "
f"correctly for {tool_class.__name__}.")
else:
all_c[c_method_name] = current_citation
return all_c
def seperate_description(self, yaml_text):
"""Change the format of the docstring to retain new lines for the
endnote and bibtex and create a key for the description so that
it be read as a yaml file
:param yaml_text:
:return: Reformatted yaml text
"""
description = doc.remove_new_lines(yaml_text.partition("bibtex:")[0])
desc_str = " description:" + description
bibtex_text = \
yaml_text.partition("bibtex:")[2].partition("endnote:")[0]
end_text = \
yaml_text.partition("bibtex:")[2].partition("endnote:")[2]
if bibtex_text and end_text:
final_str = desc_str + '\n bibtex: |' + bibtex_text \
+ 'endnote: |' + end_text
elif end_text:
final_str = desc_str + '\n endnote: |' + end_text
elif bibtex_text:
final_str = desc_str + '\n bibtex: |' + bibtex_text
else:
final_str = desc_str
return final_str
class PluginDocumentation(object):
"""Get this documentation dictionary so get_dictionary of
the metadata type should return a dictionary of all the
documentation details taken from docstring
"""
def set_warn(self, tools_list):
"""Remove new lines and save config warnings for the child tools
class only.
"""
config_str = tools_list[-1].config_warn.__doc__
if config_str and "\n\n" in config_str:
# Separate multiple warnings with two new lines \n\n
config_warn_list = [doc.remove_new_lines(l)
for l in config_str.split("\n\n")]
config_str = '\n'.join(config_warn_list)
return config_str
def set_doc_link(self):
"""If there is a restructured text documentation file inside the
doc/source/documentation folder, then save the link to the page.
"""
# determine Savu base path
savu_base_path = \
os.path.dirname(os.path.realpath(__file__)).split("savu")[0]
# Locate documentation file
doc_folder = savu_base_path + "doc/source/documentation"
module_path = \
self.plugin_class.__module__.replace(".", "/").replace("savu", "")
file_ = module_path + "_doc"
file_name = file_ + ".rst"
file_path = doc_folder + file_name
sphinx_link = 'https://savu.readthedocs.io/en/latest/' \
'documentation' + file_
if os.path.isfile(file_path):
self.doc.set("documentation_link", sphinx_link)
class PluginTools(PluginParameters, PluginCitations, PluginDocumentation):
"""Holds all of the parameter, citation and documentation information
for one plugin class - cls"""
def _find_tools(self):
"""Using the method resolution order, find base class tools"""
tool_list = []
for tool_class in self.plugin_class.__class__.__mro__[::-1]:
plugin_tools_id = tool_class.__module__ + "_tools"
p_tools = pu.get_tools_class(plugin_tools_id)
if p_tools:
tool_list.append(p_tools)
return tool_list
def _set_tools_data(self):
"""Populate the parameters, citations and documentation
with information from all of the tools classes
"""
self.populate_parameters(self.tools_list)
self.set_cite(self.tools_list)
self.set_doc(self.tools_list)
def get_param_definitions(self):
"""
Returns
-------
dict
Original parameter definitions read from tools file.
"""
return self.param.get_dictionary()
def get_param_values(self):
"""
Returns
-------
dict
Plugin parameter values for this instance.
"""
return self.parameters
| 38.9625 | 85 | 0.593082 | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: plugin_tools
:platform: Unix
:synopsis: Plugin tools
.. moduleauthor:: Jessica Verschoyle <scientificsoftware@diamond.ac.uk>
"""
import os
import copy
import json
import logging
from colorama import Fore
from collections import OrderedDict
import savu.plugins.utils as pu
from savu.data.meta_data import MetaData
import savu.plugins.docstring_parser as doc
import scripts.config_generator.parameter_utils as param_u
from savu.data.plugin_list import CitationInformation
logger = logging.getLogger("documentationLog")
class PluginParameters(object):
"""Save the parameters for the plugin and base classes to a
dictionary. The parameters are in yaml format inside the
define_parameter function. These are read and checked for problems.
"""
def __init__(self):
super(PluginParameters, self).__init__()
self.param = MetaData(ordered=True)
self.docstring_info = {}
self.parameters = {}
self.multi_params_dict = {}
self.extra_dims = []
def populate_parameters(self, tools_list):
""" Set parameter definitions and default parameter values """
# set the parameter definitions
# populates the dictionary returned by self.get_param_definitions()
list(map(lambda tool_class:
self._set_parameter_definitions(tool_class), tools_list))
# set the default parameter values
# populates the dictionary returned by self.get_param_values()
self._populate_default_parameters()
def initialise(self, params):
# Override default parameter values with plugin list entries
self.set_plugin_list_parameters(copy.deepcopy(params))
self._get_plugin().set_parameters(self.parameters)
def _populate_default_parameters(self):
"""
This method should populate all the required parameters with
default values. It is used for checking to see if parameter
values are appropriate
"""
p_defs = self.get_param_definitions()
self.set_docstring(self.get_doc())
self.parameters = \
OrderedDict([(k, v['default']) for k, v in p_defs.items()])
# parameters holds current values, this is edited outside of the
# tools class so default and dependency display values are updated here
self.update_dependent_defaults()
self.check_dependencies(self.parameters)
self._get_plugin().set_parameters(self.parameters)
def set_docstring(self, doc_str):
self.docstring_info['info'] = doc_str.get('verbose')
self.docstring_info['warn'] = doc_str.get('warn')
self.docstring_info['documentation_link'] = doc_str.get('documentation_link')
self.docstring_info['synopsis'] = doc.find_synopsis(self._get_plugin())
def _set_parameters_this_instance(self, indices):
""" Determines the parameters for this instance of the plugin, in the
case of parameter tuning.
param np.ndarray indices: the index of the current value in the
parameter tuning list.
"""
dims = set(self.multi_params_dict.keys())
count = 0
for dim in dims:
info = self.multi_params_dict[dim]
name = info['label'].split('_param')[0]
self.parameters[name] = info['values'][indices[count]]
count += 1
def _set_parameter_definitions(self, tool_class):
"""Load the parameters for each base class, c, check the
dataset visibility, check data types, set dictionary values.
"""
param_info_dict = self._load_param_from_doc(tool_class)
if param_info_dict:
for p_name, p_value in param_info_dict.items():
if p_name in self.param.get_dictionary():
for k,v in p_value.items():
self.param[p_name][k] = v
else:
self.param.set(p_name, p_value)
self._check_param_defs(tool_class)
def _check_param_defs(self, tool_class):
"""Check the parameter definitions for errors
:param tool_class: tool_class to use for error message
"""
pdefs = self.param.get_dictionary()
# Remove ignored parameters
self._remove_ignored_params(pdefs)
# Check if the required keys are included
self._check_required_keys(pdefs, tool_class)
# Check that option values are valid
self._check_options(pdefs, tool_class)
# Check that the visibility is valid
self._check_visibility(pdefs, tool_class)
# Check that the dtype is valid
self._check_dtype(pdefs, tool_class)
# Use a display option to apply to dependent parameters later.
self._set_display(pdefs)
for k,v in pdefs.items():
# Change empty OrderedDict to dict (due to yaml loader)
if isinstance(v['default'], OrderedDict):
v['default'] = json.loads(json.dumps(v['default']))
# Change the string to an integer, float, list, str, dict
if not self.default_dependency_dict_exists(v):
v['default'] = pu._dumps(v['default'])
def _load_param_from_doc(self, tool_class):
"""Find the parameter information from the method docstring.
This is provided in a yaml format.
"""
# *** TO DO turn the dtype entry into a string
param_info_dict = None
if hasattr(tool_class, "define_parameters"):
yaml_text = tool_class.define_parameters.__doc__
if yaml_text and yaml_text.strip():
# If yaml_text is not None and not empty or consisting of spaces
param_info_dict = doc.load_yaml_doc(yaml_text)
if param_info_dict:
if not isinstance(param_info_dict, OrderedDict):
error_msg = (
f"The parameters have not been read "
f"in correctly for {tool_class.__name__}"
)
raise Exception(error_msg)
return param_info_dict
def check_for_default(self, mod_param, mod_value):
"""If the value is changed to be 'default', then set the original
default value. If the default contains a dictionary, then search
for the correct value
"""
param_info_dict = self.param.get_dictionary()
if str(mod_value) == "default":
if self.default_dependency_dict_exists(param_info_dict[mod_param]):
mod_value = self.get_dependent_default(param_info_dict[mod_param])
else:
mod_value = param_info_dict[mod_param]["default"]
return mod_value
def _check_required_keys(self, param_info_dict, tool_class):
"""Check the four keys ['dtype', 'description', 'visibility',
'default'] are included inside the dictionary given for each
parameter.
"""
required_keys = ["dtype", "description", "visibility", "default"]
missing_keys = False
missing_key_dict = {}
for p_key, p in param_info_dict.items():
all_keys = p.keys()
if p.get("visibility"):
if p.get("visibility") == "hidden":
# For hidden keys, only require a default value key
required_keys = ["default"]
else:
required_keys = ["visibility"]
if not all(d in all_keys for d in required_keys):
missing_key_dict[p_key] = set(required_keys) - set(all_keys)
missing_keys = True
if missing_keys:
print(
f"{tool_class.__name__} doesn't contain all of the "
f"required keys."
)
for param, missing_values in missing_key_dict.items():
print(f"The missing required keys for '{param}' are:")
print(*missing_values, sep=", ")
logger.error(f"ERROR: Missing keys inside {tool_class.__name__}")
raise Exception(f"Please edit {tool_class.__name__}")
def _check_dtype(self, param_info_dict, tool_class):
"""
Make sure that the dtype input is valid and that the default value is
compatible
"""
plugin_error_str = f"There was an error with {tool_class.__name__}"
for p_key, p_dict in param_info_dict.items():
dtype = p_dict.get("dtype")
if dtype:
dtype = dtype.replace(" ", "")
try:
pvalid, error_str = param_u.is_valid_dtype(dtype)
if not pvalid:
raise Exception("Invalid parameter definition %s:\n %s"
% (p_key, error_str))
except IndexError:
print(plugin_error_str)
if not self.default_dependency_dict_exists(p_dict):
default_value = pu._dumps(p_dict["default"])
pvalid, error_str = param_u.is_valid(p_key, default_value,
p_dict, check=True)
if not pvalid:
raise Exception(f"{plugin_error_str}: {error_str}")
def _check_visibility(self, param_info_dict, tool_class):
"""Make sure that the visibility choice is valid"""
visibility_levels = [
"basic",
"intermediate",
"advanced",
"datasets",
"hidden",
]
visibility_valid = True
for p_key, p in param_info_dict.items():
# Check dataset visibility level is correct
self._check_data_keys(p_key, p)
# Check that the data types are valid choices
if p["visibility"] not in visibility_levels:
print(
f"Inside {tool_class.__name__} the {p_key}"
f" parameter is assigned an invalid visibility "
f"level '{p['visibility']}'"
)
print("Valid choices are:")
print(*visibility_levels, sep=", ")
visibility_valid = False
if not visibility_valid:
raise Exception(
f"Please change the file for {tool_class.__name__}"
)
def _check_data_keys(self, p_key, p):
"""Make sure that the visibility of dataset parameters is 'datasets'
so that the display order is unchanged.
"""
datasets = ["in_datasets", "out_datasets"]
exceptions = ["hidden"]
if p_key in datasets:
if p["visibility"] != "datasets" \
and p["visibility"] not in exceptions:
p["visibility"] = "datasets"
def _check_options(self, param_info_dict, tool_class):
"""Make sure that option verbose descriptions match the actual
options
"""
options_valid = True
for p_key, p in param_info_dict.items():
desc = param_info_dict[p_key].get("description")
# desc not present for hidden keys
if desc and isinstance(desc, dict):
options = param_info_dict[p_key].get("options")
option_desc = desc.get("options")
if options and option_desc:
# Check that there is not an invalid option description
# inside the option list.
invalid_option = [
opt for opt in option_desc if opt not in options
]
if invalid_option:
options_valid = False
break
if options_valid is False:
raise Exception(
f"Please check the parameter options for {tool_class.__name__}"
)
def _remove_ignored_params(self, param_info_dict):
"""Remove any parameters with visibility = ignore"""
p_dict_copy = param_info_dict.copy()
for p_key, p in p_dict_copy.items():
visibility = param_info_dict[p_key].get("visibility")
if visibility == "ignore":
del param_info_dict[p_key]
def _set_display(self, param_info_dict):
"""Initially, set all of the parameters to display 'on'
This is later altered when dependent parameters need to be shown
or hidden
"""
for k, v in param_info_dict.items():
v["display"] = "on"
def update_dependent_defaults(self):
"""
Fix default values for parameters that have a dependency on the value
of another parameter, and are in dictionary form.
"""
for name, pdict in self.get_param_definitions().items():
if self.default_dependency_dict_exists(pdict):
self.parameters[name] = self.get_dependent_default(pdict)
def default_dependency_dict_exists(self, pdict):
""" Check that the parameter default value is in a format with
the parent parameter string and the dependent value
e.g. default:
algorithm: FGP
and not an actual default value to be set
e.g. default: {'2':5}
:param pdict: The parameter definition dictionary
:return: True if the default dictionary contains the
correct format
"""
if pdict["default"] and isinstance(pdict["default"], dict):
if "dict" not in pdict["dtype"]:
return True
else:
parent_name = list(pdict['default'].keys())[0]
if parent_name in self.get_param_definitions():
return True
return False
def does_exist(self, key, ddict):
if not key in ddict:
raise Exception("The dependency %s does not exist" % key)
return ddict[key]
def get_dependent_default(self, child):
"""
Recursive function to replace a dictionary of default parameters with
a single value.
Parameters
----------
child : dict
The parameter definition dictionary of the dependent parameter.
Returns1
-------
value
The correct default value based on the current value of the
dependency, or parent, parameter.
"""
pdefs = self.get_param_definitions()
parent_name = list(child['default'].keys())[0]
parent = self.does_exist(parent_name, pdefs)
# if the parent default is a dictionary then apply the function
# recursively
if isinstance(parent['default'], dict):
self.parameters[parent_name] = \
self.get_dependent_default(parent['default'])
return child['default'][parent_name][self.parameters[parent_name]]
def warn_dependents(self, mod_param, mod_value):
"""
Find dependents of a modified parameter # complete the docstring
"""
# find dependents
for name, pdict in self.get_param_definitions().items():
if self.default_dependency_dict_exists(pdict):
default = pdict['default']
parent_name = list(default.keys())[0]
if parent_name == mod_param:
if mod_value in default[parent_name].keys():
value = default[parent_name][mod_value]
desc = pdict['description']
self.make_recommendation(
name, desc, parent_name, value)
def make_recommendation(self, child_name, desc, parent_name, value):
if isinstance(desc, dict):
desc["range"] = f"The recommended value with the chosen " \
f"{parent_name} would be {value}"
recommendation = f"It's recommended that you update {child_name}"\
f" to {value}"
print(Fore.RED + recommendation + Fore.RESET)
def check_dependencies(self, parameters):
"""Determine which parameter values are dependent on a parent
value and whether they should be hidden or shown
"""
param_info_dict = self.param.get_dictionary()
dep_list = {
k: v["dependency"]
for k, v in param_info_dict.items()
if "dependency" in v
}
for p_name, dependency in dep_list.items():
if isinstance(dependency, OrderedDict):
# There is a dictionary of dependency values
parent_param_name = list(dependency.keys())[0]
# The choices which must be in the parent value
parent_choice_list = dependency[parent_param_name]
if parent_param_name in parameters:
"""Check that the parameter is in the current plug in
This is relevant for base classes which have several
dependent classes
"""
parent_value = parameters[parent_param_name]
if str(parent_value) in parent_choice_list:
param_info_dict[p_name]["display"] = "on"
else:
param_info_dict[p_name]["display"] = "off"
else:
if dependency in parameters:
parent_value = parameters[dependency]
if parent_value is None or str(parent_value) == "None":
param_info_dict[p_name]["display"] = "off"
else:
param_info_dict[p_name]["display"] = "on"
def set_plugin_list_parameters(self, input_parameters):
"""
This method is called after the plugin has been created by the
pipeline framework. It replaces ``self.parameters``
default values with those given in the input process list. It
checks for multi parameter strings, eg. 57;68;56;
:param dict input_parameters: A dictionary of the input parameters
for this plugin, or None if no customisation is required.
"""
for key in input_parameters.keys():
if key in self.parameters.keys():
new_value = input_parameters[key]
self.__check_multi_params(
self.parameters, new_value, key
)
else:
error = (
f"Parameter '{key}' is not valid for plugin "
f"{self.plugin_class.name}. \nTry opening and re-saving "
f"the process list in the configurator to auto remove "
f"\nobsolete parameters."
)
raise ValueError(error)
def __check_multi_params(self, parameters, value, key):
"""
Convert parameter value to a list if it uses parameter tuning
and set associated parameters, so the framework knows the new size
of the data and which plugins to re-run.
:param parameters: Dictionary of parameters and current values
:param value: Value to set parameter to
:param key: Parameter name
:return:
"""
if param_u.is_multi_param(key, value):
value, error_str = pu.convert_multi_params(key, value)
if not error_str:
parameters[key] = value
label = key + "_params." + type(value[0]).__name__
self.alter_multi_params_dict(
len(self.get_multi_params_dict()),
{"label": label, "values": value},
)
self.append_extra_dims(len(value))
else:
parameters[key] = value
def _get_expand_dict(self, preview, expand_dim):
"""Create dict for expand syntax
:param preview: Preview parameter value
:param expand_dim: Number of dimensions to return dict for
:return: dict
"""
expand_dict = {}
preview_val = pu._dumps(preview)
if not preview_val:
# In the case that there is an empty dict, display the default
preview_val = []
if isinstance( preview_val, dict):
for key, prev_list in preview_val.items():
expand_dict[key] = self._get_expand_dict(prev_list, expand_dim)
return expand_dict
elif isinstance(preview_val, list):
if expand_dim == "all":
expand_dict = \
self._output_all_dimensions(preview_val,
self._get_dimensions(preview_val))
else:
pu.check_valid_dimension(expand_dim, preview_val)
dim_key = f"dim{expand_dim}"
expand_dict[dim_key] = \
self._dim_slice_output(preview_val, expand_dim)
else:
raise ValueError("This preview value was not a recognised list "
"or dictionary. This expand command currenty "
"only works with those two data type.")
return expand_dict
def _get_dimensions(self, preview_list):
"""
:param preview_list: The preview parameter list
:return: Dimensions to display
"""
return 1 if not preview_list else len(preview_list)
def _output_all_dimensions(self, preview_list, dims):
"""Compile output string lines for all dimensions
:param preview_list: The preview parameter list
:param dims: Number of dimensions to display
:return: dict
"""
prev_dict = {}
for dim in range(1, dims + 1):
dim_key = f"dim{dim}"
prev_dict[dim_key] = self._dim_slice_output(preview_list, dim)
return prev_dict
def _dim_slice_output(self, preview_list, dim):
"""If there are multiple values in list format
Only save the values for the dimensions chosen
:param preview_list: The preview parameter list
:param dim: dimension to return the slice notation dictionary for
:return slice notation dictionary
"""
if not preview_list:
# If empty
preview_display_value = ":"
else:
preview_display_value = preview_list[dim - 1]
prev_val = self._set_all_syntax(preview_display_value)
return self._get_slice_notation_dict(prev_val)
def _get_slice_notation_dict(self, val):
"""Create a dict for slice notation information,
start:stop:step (and chunk if provided)
:param val: The list value in slice notation
:return: dictionary of slice notation
"""
import itertools
basic_slice_keys = ["start", "stop", "step"]
all_slice_keys = [*basic_slice_keys, "chunk"]
slice_dict = {}
if pu.is_slice_notation(val):
val_list = val.split(":")
if len(val_list) < 3:
# Make sure the start stop step slice keys are always shown,
# even when blank
val_list.append("")
for slice_name, v in zip(all_slice_keys, val_list):
# Only print up to the shortest list.
# (Only show the chunk value if it is in val_list)
slice_dict[slice_name] = v
else:
val_list = [val]
for slice_name, v in itertools.zip_longest(
basic_slice_keys, val_list, fillvalue=""
):
slice_dict[slice_name] = v
return slice_dict
def _set_all_syntax(self, val, replacement_str=""):
"""Remove additional spaces from val, replace colon when 'all'
data is selected
:param val: Slice notation value
:param replacement_str: String to replace ':' with
:return:
"""
if isinstance(val, str):
if pu.is_slice_notation(val):
if val == ":":
val = replacement_str
else:
val = val.strip()
else:
val = val.strip()
return val
def get_multi_params_dict(self):
""" Get the multi parameter dictionary. """
return self.multi_params_dict
def alter_multi_params_dict(self, key, value):
self.multi_params_dict[key] = value
def get_extra_dims(self):
""" Get the extra dimensions. """
return self.extra_dims
def set_extra_dims(self, value):
self.extra_dims = value
def append_extra_dims(self, value):
self.extra_dims.append(value)
def define_parameters(self):
pass
"""
@dataclass
class Parameter:
''' Descriptor of Parameter Information for plugins
'''
visibility: int
datatype: specific_type
description: str
default: int
Options: Optional[[str]]
dependency: Optional[]
def _get_param(self):
param_dict = {}
param_dict['visibility'] = self.visibility
param_dict['type'] = self.dtype
param_dict['description'] = self.description
# and the remaining keys
return param_dict
"""
class PluginCitations(object):
"""Get this citation dictionary so get_dictionary of the metadata type
should return a dictionary of all the citation info as taken from
docstring
"""
def __init__(self):
super(PluginCitations, self).__init__()
self.cite = MetaData(ordered=True)
def set_cite(self, tools_list):
"""Set the citations for each of the tools classes
:param tools_list: List containing tool classes of parent plugins
"""
list(
map(
lambda tool_class: self._set_plugin_citations(tool_class),
tools_list
)
)
def _set_plugin_citations(self, tool_class):
""" Load the parameters for each base class and set values"""
citations = self._load_cite_from_doc(tool_class)
if citations:
for citation in citations.values():
if self._citation_keys_valid(citation, tool_class):
new_citation = CitationInformation(**citation)
self.cite.set(new_citation.name, new_citation)
else:
print(f"The citation for {tool_class.__name__} "
f"was not saved.")
def _citation_keys_valid(self, new_citation, tool_class):
"""Check that required citation keys are present. Return false if
required keys are missing
"""
required_keys = ["description"]
# Inside the fresnel filter there is only a description
citation_keys = [k for k in new_citation.keys()]
# Check that all of the required keys are contained inside the
# citation definition
check_keys = all(item in citation_keys for item in required_keys)
citation_keys_valid = False if check_keys is False else True
all_keys = [
"short_name_article",
"description",
"bibtex",
"endnote",
"doi",
"dependency",
]
# Keys which are not used
additional_keys = [k for k in citation_keys if k not in all_keys]
if additional_keys:
print(f"Please only use the following keys inside the citation"
f" definition for {tool_class.__name__}:")
print(*all_keys, sep=", ")
print("The incorrect keys used:", additional_keys)
return citation_keys_valid
def _load_cite_from_doc(self, tool_class):
"""Find the citation information from the method docstring.
This is provided in a yaml format.
:param tool_class: Tool to retrieve citation docstring from
:return: All citations from this tool class
"""
all_c = OrderedDict()
# Seperate the citation methods. __dict__ returns instance attributes.
citation_methods = {key: value
for key, value in tool_class.__dict__.items()
if key.startswith('citation')}
for c_method_name, c_method in citation_methods.items():
yaml_text = c_method.__doc__
if yaml_text is not None:
yaml_text = self.seperate_description(yaml_text)
current_citation = doc.load_yaml_doc(yaml_text)
if not isinstance(current_citation, OrderedDict):
print(f"The citation information has not been read in "
f"correctly for {tool_class.__name__}.")
else:
all_c[c_method_name] = current_citation
return all_c
def seperate_description(self, yaml_text):
"""Change the format of the docstring to retain new lines for the
endnote and bibtex and create a key for the description so that
it be read as a yaml file
:param yaml_text:
:return: Reformatted yaml text
"""
description = doc.remove_new_lines(yaml_text.partition("bibtex:")[0])
desc_str = " description:" + description
bibtex_text = \
yaml_text.partition("bibtex:")[2].partition("endnote:")[0]
end_text = \
yaml_text.partition("bibtex:")[2].partition("endnote:")[2]
if bibtex_text and end_text:
final_str = desc_str + '\n bibtex: |' + bibtex_text \
+ 'endnote: |' + end_text
elif end_text:
final_str = desc_str + '\n endnote: |' + end_text
elif bibtex_text:
final_str = desc_str + '\n bibtex: |' + bibtex_text
else:
final_str = desc_str
return final_str
class PluginDocumentation(object):
"""Get this documentation dictionary so get_dictionary of
the metadata type should return a dictionary of all the
documentation details taken from docstring
"""
def __init__(self):
super(PluginDocumentation, self).__init__()
self.doc = MetaData()
def set_doc(self, tools_list):
# Use the tools class at the 'top'
doc_lines = tools_list[-1].__doc__.splitlines()
doc_lines = [line.strip() for line in doc_lines if line]
docstring = " ".join(doc_lines)
self.doc.set("verbose", docstring)
self.doc.set("warn", self.set_warn(tools_list))
self.set_doc_link()
def set_warn(self, tools_list):
"""Remove new lines and save config warnings for the child tools
class only.
"""
config_str = tools_list[-1].config_warn.__doc__
if config_str and "\n\n" in config_str:
# Separate multiple warnings with two new lines \n\n
config_warn_list = [doc.remove_new_lines(l)
for l in config_str.split("\n\n")]
config_str = '\n'.join(config_warn_list)
return config_str
def set_doc_link(self):
"""If there is a restructured text documentation file inside the
doc/source/documentation folder, then save the link to the page.
"""
# determine Savu base path
savu_base_path = \
os.path.dirname(os.path.realpath(__file__)).split("savu")[0]
# Locate documentation file
doc_folder = savu_base_path + "doc/source/documentation"
module_path = \
self.plugin_class.__module__.replace(".", "/").replace("savu", "")
file_ = module_path + "_doc"
file_name = file_ + ".rst"
file_path = doc_folder + file_name
sphinx_link = 'https://savu.readthedocs.io/en/latest/' \
'documentation' + file_
if os.path.isfile(file_path):
self.doc.set("documentation_link", sphinx_link)
def config_warn(self):
pass
class PluginTools(PluginParameters, PluginCitations, PluginDocumentation):
"""Holds all of the parameter, citation and documentation information
for one plugin class - cls"""
def __init__(self, cls):
super(PluginTools, self).__init__()
self.plugin_class = cls
self.tools_list = self._find_tools()
self._set_tools_data()
def _get_plugin(self):
return self.plugin_class
def _find_tools(self):
"""Using the method resolution order, find base class tools"""
tool_list = []
for tool_class in self.plugin_class.__class__.__mro__[::-1]:
plugin_tools_id = tool_class.__module__ + "_tools"
p_tools = pu.get_tools_class(plugin_tools_id)
if p_tools:
tool_list.append(p_tools)
return tool_list
def _set_tools_data(self):
"""Populate the parameters, citations and documentation
with information from all of the tools classes
"""
self.populate_parameters(self.tools_list)
self.set_cite(self.tools_list)
self.set_doc(self.tools_list)
def get_param_definitions(self):
"""
Returns
-------
dict
Original parameter definitions read from tools file.
"""
return self.param.get_dictionary()
def get_param_values(self):
"""
Returns
-------
dict
Plugin parameter values for this instance.
"""
return self.parameters
def get_citations(self):
return self.cite.get_dictionary()
def get_doc(self):
return self.doc.get_dictionary()
| 2,216 | 0 | 467 |
91c4447f40df9a13e4fd627871df6a5b7f822dad | 6,370 | py | Python | pylib/cqlshlib/test/ansi_colors.py | tomitakazutaka/cassandra | 5a04115973932202203d479db37eedc5a2d81778 | [
"Apache-2.0"
] | null | null | null | pylib/cqlshlib/test/ansi_colors.py | tomitakazutaka/cassandra | 5a04115973932202203d479db37eedc5a2d81778 | [
"Apache-2.0"
] | null | null | null | pylib/cqlshlib/test/ansi_colors.py | tomitakazutaka/cassandra | 5a04115973932202203d479db37eedc5a2d81778 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import re
import six
LIGHT = 0o10
ansi_CSI = '\x1b['
ansi_seq = re.compile(re.escape(ansi_CSI) + r'(?P<params>[\x20-\x3f]*)(?P<final>[\x40-\x7e])')
ansi_cmd_SGR = 'm' # set graphics rendition
color_defs = (
(000, 'k', 'black'),
(0o01, 'r', 'dark red'),
(0o02, 'g', 'dark green'),
(0o03, 'w', 'brown', 'dark yellow'),
(0o04, 'b', 'dark blue'),
(0o05, 'm', 'dark magenta', 'dark purple'),
(0o06, 'c', 'dark cyan'),
(0o07, 'n', 'light grey', 'light gray', 'neutral', 'dark white'),
(0o10, 'B', 'dark grey', 'dark gray', 'light black'),
(0o11, 'R', 'red', 'light red'),
(0o12, 'G', 'green', 'light green'),
(0o13, 'Y', 'yellow', 'light yellow'),
(0o14, 'B', 'blue', 'light blue'),
(0o15, 'M', 'magenta', 'purple', 'light magenta', 'light purple'),
(0o16, 'C', 'cyan', 'light cyan'),
(0o17, 'W', 'white', 'light white'),
)
colors_by_num = {}
colors_by_letter = {}
colors_by_name = {}
letters_by_num = {}
for colordef in color_defs:
colorcode = colordef[0]
colorletter = colordef[1]
colors_by_num[colorcode] = nameset = set(colordef[2:])
colors_by_letter[colorletter] = colorcode
letters_by_num[colorcode] = colorletter
for c in list(nameset):
# equivalent names without spaces
nameset.add(c.replace(' ', ''))
for c in list(nameset):
# with "bright" being an alias for "light"
nameset.add(c.replace('light', 'bright'))
for c in nameset:
colors_by_name[c] = colorcode
| 32.5 | 104 | 0.574411 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import re
import six
LIGHT = 0o10
ansi_CSI = '\x1b['
ansi_seq = re.compile(re.escape(ansi_CSI) + r'(?P<params>[\x20-\x3f]*)(?P<final>[\x40-\x7e])')
ansi_cmd_SGR = 'm' # set graphics rendition
color_defs = (
(000, 'k', 'black'),
(0o01, 'r', 'dark red'),
(0o02, 'g', 'dark green'),
(0o03, 'w', 'brown', 'dark yellow'),
(0o04, 'b', 'dark blue'),
(0o05, 'm', 'dark magenta', 'dark purple'),
(0o06, 'c', 'dark cyan'),
(0o07, 'n', 'light grey', 'light gray', 'neutral', 'dark white'),
(0o10, 'B', 'dark grey', 'dark gray', 'light black'),
(0o11, 'R', 'red', 'light red'),
(0o12, 'G', 'green', 'light green'),
(0o13, 'Y', 'yellow', 'light yellow'),
(0o14, 'B', 'blue', 'light blue'),
(0o15, 'M', 'magenta', 'purple', 'light magenta', 'light purple'),
(0o16, 'C', 'cyan', 'light cyan'),
(0o17, 'W', 'white', 'light white'),
)
colors_by_num = {}
colors_by_letter = {}
colors_by_name = {}
letters_by_num = {}
for colordef in color_defs:
colorcode = colordef[0]
colorletter = colordef[1]
colors_by_num[colorcode] = nameset = set(colordef[2:])
colors_by_letter[colorletter] = colorcode
letters_by_num[colorcode] = colorletter
for c in list(nameset):
# equivalent names without spaces
nameset.add(c.replace(' ', ''))
for c in list(nameset):
# with "bright" being an alias for "light"
nameset.add(c.replace('light', 'bright'))
for c in nameset:
colors_by_name[c] = colorcode
class ColoredChar(object):
def __init__(self, c, colorcode):
self.c = c
self._colorcode = colorcode
def colorcode(self):
return self._colorcode
def plain(self):
return self.c
def __getattr__(self, name):
return getattr(self.c, name)
def ansi_color(self):
clr = str(30 + (0o7 & self._colorcode))
if self._colorcode & 0o10:
clr = '1;' + clr
return clr
def __str__(self):
return "<%s '%r'>" % (self.__class__.__name__, self.colored_repr())
__repr__ = __str__
def colored_version(self):
return '%s0;%sm%s%s0m' % (ansi_CSI, self.ansi_color(), self.c, ansi_CSI)
def colored_repr(self):
if self.c == "'":
crepr = r"\'"
elif self.c == '"':
crepr = self.c
else:
crepr = repr(self.c)[1:-1]
return '%s0;%sm%s%s0m' % (ansi_CSI, self.ansi_color(), crepr, ansi_CSI)
def colortag(self):
return lookup_letter_from_code(self._colorcode)
class ColoredText(object):
def __init__(self, source=''):
if isinstance(source, six.text_type):
plain, colors = self.parse_ansi_colors(source)
self.chars = list(map(ColoredChar, plain, colors))
else:
# expected that source is an iterable of ColoredChars (or duck-typed as such)
self.chars = tuple(source)
def splitlines(self):
lines = [[]]
for c in self.chars:
if c.plain() == '\n':
lines.append([])
else:
lines[-1].append(c)
return [self.__class__(line) for line in lines]
def plain(self):
return ''.join([c.plain() for c in self.chars])
def __getitem__(self, index):
return self.chars[index]
@classmethod
def parse_ansi_colors(cls, source):
# note: strips all control sequences, even if not SGRs.
colors = []
plain = ''
last = 0
curclr = 0
for match in ansi_seq.finditer(source):
prevsegment = source[last:match.start()]
plain += prevsegment
colors.extend([curclr] * len(prevsegment))
if match.group('final') == ansi_cmd_SGR:
try:
curclr = cls.parse_sgr_param(curclr, match.group('params'))
except ValueError:
pass
last = match.end()
prevsegment = source[last:]
plain += prevsegment
colors.extend([curclr] * len(prevsegment))
return ''.join(plain), colors
@staticmethod
def parse_sgr_param(curclr, paramstr):
oldclr = curclr
args = list(map(int, paramstr.split(';')))
for a in args:
if a == 0:
curclr = lookup_colorcode('neutral')
elif a == 1:
curclr |= LIGHT
elif 30 <= a <= 37:
curclr = (curclr & LIGHT) | (a - 30)
else:
# not supported renditions here; ignore for now
pass
return curclr
def __repr__(self):
return "<%s '%s'>" % (self.__class__.__name__, ''.join([c.colored_repr() for c in self.chars]))
__str__ = __repr__
def __iter__(self):
return iter(self.chars)
def colored_version(self):
return ''.join([c.colored_version() for c in self.chars])
def colortags(self):
return ''.join([c.colortag() for c in self.chars])
def lookup_colorcode(name):
return colors_by_name[name]
def lookup_colorname(code):
return colors_by_num.get(code, 'Unknown-color-0%o' % code)
def lookup_colorletter(letter):
return colors_by_letter[letter]
def lookup_letter_from_code(code):
letr = letters_by_num.get(code, ' ')
if letr == 'n':
letr = ' '
return letr
| 3,182 | 644 | 148 |
e0beb0bdd73cc0eda15d5ef0ac50d51f281f3f2e | 3,022 | py | Python | CNN/utils.py | ostrowskaaa/ReDust | e9ed30dd13f8b7dc32e4d9099df2bb22aa1d2ecb | [
"MIT"
] | 1 | 2020-12-27T22:09:32.000Z | 2020-12-27T22:09:32.000Z | CNN/utils.py | ostrowskaaa/ReDust | e9ed30dd13f8b7dc32e4d9099df2bb22aa1d2ecb | [
"MIT"
] | 5 | 2020-11-13T17:53:07.000Z | 2021-03-30T09:36:02.000Z | CNN/utils.py | ostrowskaaa/ReDust | e9ed30dd13f8b7dc32e4d9099df2bb22aa1d2ecb | [
"MIT"
] | null | null | null | import os
import numpy as np
from cv2 import cv2
from PIL import Image
import matplotlib.pyplot as plt
from tensorflow import keras
from keras.preprocessing.image import array_to_img, img_to_array, load_img
PATH = os.getcwd()
## ----- LOAD DATA ------
## ----- IMAGE AUGMENTATION -----
| 36.409639 | 136 | 0.605228 | import os
import numpy as np
from cv2 import cv2
from PIL import Image
import matplotlib.pyplot as plt
from tensorflow import keras
from keras.preprocessing.image import array_to_img, img_to_array, load_img
PATH = os.getcwd()
## ----- LOAD DATA ------
def load_data(folderName, photoType):
folderPath = os.path.join(PATH, 'data/', folderName, '')
data = os.listdir(folderPath)
dataList = []
for sample in data:
imgPath = folderPath + sample
img = cv2.imread(imgPath, photoType)
dataList.append(img)
return np.array(dataList) / 255.
## ----- IMAGE AUGMENTATION -----
def dataset_augmentation(images_array):
dataset = []
for image in images_array:
horizontal_flipped_img = cv2.flip(image, 1)
dataset.append(horizontal_flipped_img)
vertical_flipped_img = cv2.flip(image, 0)
dataset.append(vertical_flipped_img)
angles = [10, 15, 20]
for angle in angles:
height, width = image.shape[:2]
matrix = cv2.getRotationMatrix2D((int(width/2), int(height/2)), angle, 1)
rotated_img = cv2.warpAffine(image, matrix, (width, height))
dataset.append(rotated_img)
return np.array(dataset)
def plot_acc_loss(accuracy_values, loss_values, saving_name):
# plot training ACCURACY VALUES
fig = plt.figure()
gs = fig.add_gridspec(ncols = 1,nrows = 2)
plt.subplot(gs[0])
plt.plot(accuracy_values)
plt.ylabel('Accuracy')
plt.legend(['Train data'], loc = 'upper left')
# plot training LOSS VALUES
plt.subplot(gs[1])
plt.plot(loss_values)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train data'], loc = 'upper left')
plt.tight_layout()
fig.savefig(os.path.join(PATH, 'results','') + '{}.png'.format(saving_name), dpi = fig.dpi)
def plot_predictions(predictions, test_masks, saving_name):
# plot 6 predicted masks and 6 original masks
fig2 = plt.figure(constrained_layout = True)
gs1 = fig2.add_gridspec(ncols = 3, nrows = 4)
img_number = 0
for row in range(4):
for col in range(3):
if row < 2:
mask_predicted = keras.preprocessing.image.array_to_img(predictions[img_number], scale = True)
fig2.add_subplot(gs1[row, col])
plt.axis('off')
plt.imshow(mask_predicted, cmap = 'gray')
else:
mask_original = keras.preprocessing.image.array_to_img(test_masks[img_number - 6].reshape((100, 100, 1)), scale = True)
plt.subplot(gs1[row,col])
plt.axis('off')
plt.imshow(mask_original, cmap = 'gray')
img_number += 1
fig2.suptitle('Predicted masks (on top) vs original ones', fontsize = 16)
fig2.savefig(os.path.join(PATH, 'results', '') + '{}_masks.png'.format(saving_name), dpi = fig2.dpi)
| 2,614 | 0 | 96 |
ad176cf21c5302e22fe80155ad9383c77b3bf96b | 4,093 | py | Python | taobao_spider/taobao_spider/taobao_spider/spiders/TaobaoSpider.py | lj28478416/Taobao_Spider | d18675efc4bf8938bfe08f9f0e5de2e26d58a390 | [
"Apache-2.0"
] | null | null | null | taobao_spider/taobao_spider/taobao_spider/spiders/TaobaoSpider.py | lj28478416/Taobao_Spider | d18675efc4bf8938bfe08f9f0e5de2e26d58a390 | [
"Apache-2.0"
] | null | null | null | taobao_spider/taobao_spider/taobao_spider/spiders/TaobaoSpider.py | lj28478416/Taobao_Spider | d18675efc4bf8938bfe08f9f0e5de2e26d58a390 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
import json
from jsonpath import jsonpath
import re
from ..items import TaobaoSpiderItem
from ..settings import cookies
from urllib import parse
error_num = 0
| 44.978022 | 118 | 0.524554 | # -*- coding: utf-8 -*-
import scrapy
import json
from jsonpath import jsonpath
import re
from ..items import TaobaoSpiderItem
from ..settings import cookies
from urllib import parse
error_num = 0
class TaobaospiderSpider(scrapy.Spider):
name = 'TaobaoSpider'
# allowed_domains = ['taobao.com', 'tmall.com']
start_urls = [
'https://s.m.taobao.com/search?event_submit_do_new_search_auction=1'\
'&_input_charset=utf-8&topSearch=1&atype=b&searchfrom=1&'\
'action=home%3Aredirect_app_action&from=1&q=帆布鞋&sst=1&n=20&'\
'buying=buyitnow&m=api4h5&abtest=5&wlsort=5&page='
+ str(num) for num in range(1, 101)]
def parse(self, response):
response_str = response.body.decode()
response_json = json.loads(response_str)
for goods_info in jsonpath(response_json, '$..listItem')[0]:
item = TaobaoSpiderItem()
item['rate'] = []
item['count'] = 0
item['url'] = jsonpath(goods_info,'$..url')[0]
item['item_id'] = jsonpath(goods_info,'$..item_id')[0]
item['title'] = jsonpath(goods_info,'$..title')[0]
item['user_id'] = jsonpath(goods_info,'$..userId')[0]
item['price_wap'] = jsonpath(goods_info,'$..priceWap')[0]
item['buy_num'] = jsonpath(goods_info,'$..act')[0]
base_url = 'https://rate.tmall.com/list_detail_rate.htm?'
itemId = 'itemId=' + item['item_id'] + '&'
sellerId = 'sellerId=' + item['user_id'] + '&'
for page in range(1, 11):
item['count_302'] = 0
item['error_num'] = 0
second_url = '&order=3¤tPage=' + str(
page) + '&pageSize=10&&callback=_DLP_2533_der_3_currentPage_' + str(
page) + '_pageSize_10_'
url = base_url + itemId + sellerId + second_url
yield scrapy.Request(url, callback=self.parse_rate, meta={"item": item})
def parse_rate(self,response):
item = response.meta['item']
if response.status == 302:
item['error_num'] = 0
patten1 = re.compile(r"smReturn=(.+?)&smSign=")
url = re.search(patten1,response.url).group(1)
url = parse.unquote(url)
if item['count_302'] >= 20:
item['count_302'] = 0
response_list = []
print(url)
print("I Cant Kill ThisOne!Fuck TaoBao!")
for goods_info in response_list:
item['rate'].append(goods_info)
item['count'] += 1
yield item
else:
item['count_302'] += 1
print('kill_302__{}'.format(item['count_302']))
yield scrapy.Request(url, callback=self.parse_rate, meta={"item": item},dont_filter=True)
else:
try:
item['count_302'] = 0
response_str = response.body.decode('GBK')
item['error_num'] = 0
patten = re.compile(r'"rateContent":"(.*?)","', re.S)
response_list = re.findall(patten, response_str)
for goods_info in response_list:
item['rate'].append(goods_info)
item['count'] += 1
# print('success__{}'.format(item['count']))
yield item
except UnicodeDecodeError:
if item['error_num'] >= 20:
item['error_num'] = 0
global error_num
error_num += 1
print("UnicodeError__{}".format(error_num))
response_list = []
for goods_info in response_list:
item['rate'].append(goods_info)
item['count'] += 1
yield item
else:
print(response.url)
item['error_num'] += 1
yield scrapy.Request(response.url, callback=self.parse_rate, meta={"item": item},dont_filter=True)
| 3,386 | 493 | 22 |
76abd7e5c30c875c4d15aacee98dd95000ad0604 | 360 | py | Python | django_cbtools/signals.py | smarttradeapp/django_couchbase | d96b32977bdb0dbf1a6e891ab29ea7b69ac6bed4 | [
"BSD-3-Clause"
] | 6 | 2016-06-23T08:21:43.000Z | 2018-07-19T09:42:32.000Z | django_cbtools/signals.py | smarttradeapp/django_cbtools | d96b32977bdb0dbf1a6e891ab29ea7b69ac6bed4 | [
"BSD-3-Clause"
] | 26 | 2015-10-17T08:59:36.000Z | 2021-06-10T17:48:41.000Z | django_cbtools/signals.py | smarttradeapp/django_couchbase | d96b32977bdb0dbf1a6e891ab29ea7b69ac6bed4 | [
"BSD-3-Clause"
] | 8 | 2015-11-28T13:47:19.000Z | 2020-12-15T12:58:00.000Z | from django.db.models.signals import ModelSignal
cb_pre_save = ModelSignal(providing_args=["instance"], use_caching=True)
cb_post_save = ModelSignal(providing_args=["instance", "created"], use_caching=True)
cb_pre_delete = ModelSignal(providing_args=["instance"], use_caching=True)
cb_post_delete = ModelSignal(providing_args=["instance"], use_caching=True)
| 45 | 84 | 0.808333 | from django.db.models.signals import ModelSignal
cb_pre_save = ModelSignal(providing_args=["instance"], use_caching=True)
cb_post_save = ModelSignal(providing_args=["instance", "created"], use_caching=True)
cb_pre_delete = ModelSignal(providing_args=["instance"], use_caching=True)
cb_post_delete = ModelSignal(providing_args=["instance"], use_caching=True)
| 0 | 0 | 0 |
6b3f7a1b7b9bdbaf348944e853bd1155f8a549ef | 1,540 | py | Python | app/auth/views.py | annaadhiambo/The-Last-Pitch | cf611b846059df57f6f02ff69d9029946487aa6d | [
"MIT"
] | null | null | null | app/auth/views.py | annaadhiambo/The-Last-Pitch | cf611b846059df57f6f02ff69d9029946487aa6d | [
"MIT"
] | null | null | null | app/auth/views.py | annaadhiambo/The-Last-Pitch | cf611b846059df57f6f02ff69d9029946487aa6d | [
"MIT"
] | null | null | null | from flask import render_template,redirect,url_for, flash,request
from flask_login import login_user,logout_user,login_required
from . import auth
from ..models import User
from .forms import LoginForm,RegistrationForm
from .. import db
from ..email import mail_message
from flask_http_response import success, result, error
@auth.route('/login', methods=['GET', 'POST'])
@auth.route('/logout')
@login_required
@auth.route('api/register', methods=["POST"])
| 38.5 | 104 | 0.719481 | from flask import render_template,redirect,url_for, flash,request
from flask_login import login_user,logout_user,login_required
from . import auth
from ..models import User
from .forms import LoginForm,RegistrationForm
from .. import db
from ..email import mail_message
from flask_http_response import success, result, error
@auth.route('/login', methods=['GET', 'POST'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email=login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "pitch_login"
return render_template('auth/login.html',login_form = login_form, title = title)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
@auth.route('api/register', methods=["POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,password =form.password.data)
user.save_user()
return success.return_response(message='User created Successfully', status=200)
else:
return error.return_response(message='User not created', status=500)
return success.return_response(message='Successfully Completed', status=200)
| 1,008 | 0 | 66 |
edb742b662287057e388a5535dd4c2d409e7dfb8 | 356 | py | Python | build/lib/django_simple_file_handler/urls.py | jonathanrickard/django-simple-file-handler | f714b93b941b3a677a8fd2a2eb425afaaa0a2d62 | [
"MIT"
] | 5 | 2020-09-17T16:41:01.000Z | 2021-05-21T22:42:56.000Z | build/lib/django_simple_file_handler/urls.py | jonathanrickard/django-simple-file-handler | f714b93b941b3a677a8fd2a2eb425afaaa0a2d62 | [
"MIT"
] | null | null | null | build/lib/django_simple_file_handler/urls.py | jonathanrickard/django-simple-file-handler | f714b93b941b3a677a8fd2a2eb425afaaa0a2d62 | [
"MIT"
] | 1 | 2021-01-09T13:04:38.000Z | 2021-01-09T13:04:38.000Z | from django.urls import (
path,
)
from .views import (
proxy_document,
proxy_pdf,
)
app_name = 'django_simple_file_handler'
urlpatterns = [
path(
'documents/<proxy_slug>',
proxy_document,
name='proxy_document',
),
path(
'pdf/<proxy_slug>',
proxy_pdf,
name='proxy_pdf',
),
]
| 13.185185 | 39 | 0.564607 | from django.urls import (
path,
)
from .views import (
proxy_document,
proxy_pdf,
)
app_name = 'django_simple_file_handler'
urlpatterns = [
path(
'documents/<proxy_slug>',
proxy_document,
name='proxy_document',
),
path(
'pdf/<proxy_slug>',
proxy_pdf,
name='proxy_pdf',
),
]
| 0 | 0 | 0 |
8f19b7cff4fb05679aff138284ee6c3240db012b | 3,209 | py | Python | conversationinsights-mynlu/mynlu/parsers/__init__.py | osswangxining/iot-app-enabler-conversation | 7c40a89072b1730869260320d77aa9856412424a | [
"Apache-2.0"
] | 4 | 2017-10-19T05:11:52.000Z | 2018-06-26T02:54:55.000Z | mynlu/parsers/__init__.py | osswangxining/conversationinsights-mynlu | d2a174af770a496ef220330cb6d5428f5f0114f6 | [
"Apache-2.0"
] | null | null | null | mynlu/parsers/__init__.py | osswangxining/conversationinsights-mynlu | d2a174af770a496ef220330cb6d5428f5f0114f6 | [
"Apache-2.0"
] | 1 | 2018-06-26T02:54:57.000Z | 2018-06-26T02:54:57.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import io
import json
import logging
import os
import copy
from builtins import object
from builtins import str
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Text
import mynlu
from mynlu import pipeline
from mynlu.config.mynluconfig import MyNLUConfig
from mynlu.pipeline import MissingArgumentError
from mynlu.trainers import TrainingData, Message
from mynlu.utils import create_dir
from mynlu.pipeline.plugin import Plugin, PluginFactory
from mynlu import pipeline
| 35.263736 | 104 | 0.682456 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import io
import json
import logging
import os
import copy
from builtins import object
from builtins import str
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Text
import mynlu
from mynlu import pipeline
from mynlu.config.mynluconfig import MyNLUConfig
from mynlu.pipeline import MissingArgumentError
from mynlu.trainers import TrainingData, Message
from mynlu.utils import create_dir
from mynlu.pipeline.plugin import Plugin, PluginFactory
from mynlu import pipeline
class Interpreter(object):
# Defines all attributes (and their default values) that will be returned by `parse`
@staticmethod
def default_output_attributes():
return {"intent": {"name": "", "confidence": 0.0}, "entities": []}
@staticmethod
def load(model_metadata, config, plugin_factory=None, skip_valdation=False):
# type: (Metadata, MyNLUConfig, Optional[PluginFactory], bool) -> Interpreter
context = {}
if plugin_factory is None:
# If no builder is passed, every interpreter creation will result in a new builder.
plugin_factory = PluginFactory()
_pipeline = []
if not skip_valdation:
pipeline.validate_requirements(model_metadata.pipeline)
for plugin_name in model_metadata.pipeline:
plugin = plugin_factory.load_plugin(
plugin_name, model_metadata.model_dir, model_metadata, **context)
try:
updates = plugin.provide_context()
if updates:
context.update(updates)
_pipeline.append(plugin)
except MissingArgumentError as e:
raise Exception("Failed to initialize plugin '{}'. {}".format(plugin_name, e))
return Interpreter(_pipeline, context, model_metadata)
def __init__(self, pipeline, context, model_metadata=None):
# type: (List[Component], Dict[Text, Any], Optional[Metadata]) -> None
self.pipeline = pipeline
self.context = context if context is not None else {}
self.model_metadata = model_metadata
def parse(self, text, time=None):
# type: (Text) -> Dict[Text, Any]
"""Parse the input text, classify it and return an object containing its intent and entities."""
if not text:
# Not all plugins are able to handle empty strings. So we need to prevent that...
# This default return will not contain all output attributes of all plugins,
# but in the end, no one should pass an empty string in the first place.
output = self.default_output_attributes()
output["text"] = ""
return output
message = Message(text, self.default_output_attributes(), time=time)
for p in self.pipeline:
p.process(message, **self.context)
output = self.default_output_attributes()
output.update(message.as_dict(only_output_properties=True))
return output | 1,382 | 1,097 | 23 |
1895571fa5dca0d9aea1f7538b31f47d0d2ee10a | 4,699 | py | Python | AutomatedTesting/Gem/PythonTests/physics/TestSuite_Main.py | aaarsene/o3de | 37e3b0226958974defd14dd6d808e8557dcd7345 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-09-13T00:01:12.000Z | 2021-09-13T00:01:12.000Z | AutomatedTesting/Gem/PythonTests/physics/TestSuite_Main.py | aaarsene/o3de | 37e3b0226958974defd14dd6d808e8557dcd7345 | [
"Apache-2.0",
"MIT"
] | null | null | null | AutomatedTesting/Gem/PythonTests/physics/TestSuite_Main.py | aaarsene/o3de | 37e3b0226958974defd14dd6d808e8557dcd7345 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-07-20T11:07:25.000Z | 2021-07-20T11:07:25.000Z | """
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
# This suite consists of all test cases that are passing and have been verified.
import pytest
import os
import sys
from .FileManagement import FileManagement as fm
from ly_test_tools import LAUNCHERS
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../automatedtesting_shared')
from base import TestAutomationBase
revert_physics_config = fm.file_revert_list(['physxdebugconfiguration.setreg', 'physxdefaultsceneconfiguration.setreg', 'physxsystemconfiguration.setreg'], 'AutomatedTesting/Registry')
@pytest.mark.SUITE_main
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
@pytest.mark.parametrize("project", ["AutomatedTesting"]) | 52.211111 | 184 | 0.786125 | """
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
# This suite consists of all test cases that are passing and have been verified.
import pytest
import os
import sys
from .FileManagement import FileManagement as fm
from ly_test_tools import LAUNCHERS
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../automatedtesting_shared')
from base import TestAutomationBase
revert_physics_config = fm.file_revert_list(['physxdebugconfiguration.setreg', 'physxdefaultsceneconfiguration.setreg', 'physxsystemconfiguration.setreg'], 'AutomatedTesting/Registry')
@pytest.mark.SUITE_main
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
@pytest.mark.parametrize("project", ["AutomatedTesting"])
class TestAutomation(TestAutomationBase):
def test_C111111_RigidBody_EnablingGravityWorksUsingNotificationsPoC(self, request, workspace, editor, launcher_platform):
from . import C111111_RigidBody_EnablingGravityWorksUsingNotificationsPoC as test_module
self._run_test(request, workspace, editor, test_module)
@revert_physics_config
def test_C5932041_PhysXForceRegion_LocalSpaceForceOnRigidBodies(self, request, workspace, editor, launcher_platform):
from . import C5932041_PhysXForceRegion_LocalSpaceForceOnRigidBodies as test_module
self._run_test(request, workspace, editor, test_module)
@revert_physics_config
@fm.file_override('physxsystemconfiguration.setreg','C4044459_Material_DynamicFriction.setreg_override', 'AutomatedTesting/Registry')
def test_C4044459_Material_DynamicFriction(self, request, workspace, editor, launcher_platform):
from . import C4044459_Material_DynamicFriction as test_module
self._run_test(request, workspace, editor, test_module)
@revert_physics_config
def test_C15425929_Undo_Redo(self, request, workspace, editor, launcher_platform):
from . import C15425929_Undo_Redo as test_module
self._run_test(request, workspace, editor, test_module)
@revert_physics_config
def test_C4976243_Collision_SameCollisionGroupDiffCollisionLayers(self, request, workspace, editor,
launcher_platform):
from . import C4976243_Collision_SameCollisionGroupDiffCollisionLayers as test_module
self._run_test(request, workspace, editor, test_module)
@revert_physics_config
def test_C14654881_CharacterController_SwitchLevels(self, request, workspace, editor, launcher_platform):
from . import C14654881_CharacterController_SwitchLevels as test_module
self._run_test(request, workspace, editor, test_module)
def test_C17411467_AddPhysxRagdollComponent(self, request, workspace, editor, launcher_platform):
from . import C17411467_AddPhysxRagdollComponent as test_module
self._run_test(request, workspace, editor, test_module)
@revert_physics_config
def test_C12712453_ScriptCanvas_MultipleRaycastNode(self, request, workspace, editor, launcher_platform):
from . import C12712453_ScriptCanvas_MultipleRaycastNode as test_module
# Fixme: unexpected_lines = ["Assert"] + test_module.Lines.unexpected
self._run_test(request, workspace, editor, test_module)
@revert_physics_config
@fm.file_override('physxsystemconfiguration.setreg','C4982593_PhysXCollider_CollisionLayer.setreg_override', 'AutomatedTesting/Registry')
def test_C4982593_PhysXCollider_CollisionLayerTest(self, request, workspace, editor, launcher_platform):
from . import C4982593_PhysXCollider_CollisionLayerTest as test_module
self._run_test(request, workspace, editor, test_module)
@revert_physics_config
def test_C18243586_Joints_HingeLeadFollowerCollide(self, request, workspace, editor, launcher_platform):
from . import C18243586_Joints_HingeLeadFollowerCollide as test_module
self._run_test(request, workspace, editor, test_module)
@revert_physics_config
def test_C4982803_Enable_PxMesh_Option(self, request, workspace, editor, launcher_platform):
from . import C4982803_Enable_PxMesh_Option as test_module
self._run_test(request, workspace, editor, test_module)
@revert_physics_config
def test_C24308873_CylinderShapeCollider_CollidesWithPhysXTerrain(self, request, workspace, editor, launcher_platform):
from . import C24308873_CylinderShapeCollider_CollidesWithPhysXTerrain as test_module
self._run_test(request, workspace, editor, test_module) | 2,888 | 910 | 22 |
2bbc5d5059204941b3bf3be674c045711a8fd26f | 690 | py | Python | tests/dataaccess/test_dataaccess.py | sebastian-lemke/tt | 2ff1ec23d206632446b08e56add1a8852af7415a | [
"MIT"
] | 13 | 2020-06-23T18:05:14.000Z | 2022-01-26T17:37:59.000Z | tests/dataaccess/test_dataaccess.py | sebastian-lemke/tt | 2ff1ec23d206632446b08e56add1a8852af7415a | [
"MIT"
] | 6 | 2020-03-26T00:47:52.000Z | 2022-02-15T13:03:05.000Z | tests/dataaccess/test_dataaccess.py | sebastian-lemke/tt | 2ff1ec23d206632446b08e56add1a8852af7415a | [
"MIT"
] | 5 | 2021-02-12T15:58:39.000Z | 2021-11-17T09:27:26.000Z | from unittest import TestCase
from tt.dataaccess.utils import *
| 34.5 | 92 | 0.711594 | from unittest import TestCase
from tt.dataaccess.utils import *
class TestDataaccess(TestCase):
def test_basic_json_datastore_creation(self):
datastore = get_data_store()
self.assertIsNotNone(datastore, 'Should not be none, but is')
def test_loading_json_datastore(self):
datastore = get_data_store()
data = datastore.load()
self.assertIsNotNone(data, 'Should not be none, but is')
self.assertIsNotNone(data['work'], 'Data should have empty work list, but doesn\'t')
def test_wrong_datastore_type_generates_exception(self):
with self.assertRaises(NonexistentDatasource):
datastore = get_data_store("XML")
| 511 | 10 | 104 |
85b2a7b8b24924295f931bb9285470d455b9b0b0 | 346 | py | Python | lib/algo/quark/test.py | tuaris/TidePool | 5d44fa11a7de3f5c05130d0ad4030671806cd501 | [
"BSD-3-Clause"
] | 3 | 2018-06-30T12:10:50.000Z | 2021-09-18T16:10:12.000Z | lib/algo/max_hash/test.py | tuaris/TidePool | 5d44fa11a7de3f5c05130d0ad4030671806cd501 | [
"BSD-3-Clause"
] | null | null | null | lib/algo/max_hash/test.py | tuaris/TidePool | 5d44fa11a7de3f5c05130d0ad4030671806cd501 | [
"BSD-3-Clause"
] | 9 | 2017-04-12T22:59:49.000Z | 2021-09-18T16:10:25.000Z | import quark_hash
import weakref
import binascii
import StringIO
from binascii import unhexlify
teststart = '700000005d385ba114d079970b29a9418fd0549e7d68a95c7f168621a314201000000000578586d149fd07b22f3a8a347c516de7052f034d2b76ff68e0d6ecff9b77a45489e3fd511732011df0731000';
testbin = unhexlify(teststart)
hash_bin = quark_hash.getPoWHash(testbin) | 34.6 | 175 | 0.904624 | import quark_hash
import weakref
import binascii
import StringIO
from binascii import unhexlify
teststart = '700000005d385ba114d079970b29a9418fd0549e7d68a95c7f168621a314201000000000578586d149fd07b22f3a8a347c516de7052f034d2b76ff68e0d6ecff9b77a45489e3fd511732011df0731000';
testbin = unhexlify(teststart)
hash_bin = quark_hash.getPoWHash(testbin) | 0 | 0 | 0 |
9e8f3ff788d8e8c6f67d82306f60aa368476c03b | 2,898 | py | Python | responsibility/responsibility.py | guwenbo/DesignPattern-Python | 3b7228595e9dd6ae5cb0df06b2b8a74ce2d774a9 | [
"Apache-2.0"
] | null | null | null | responsibility/responsibility.py | guwenbo/DesignPattern-Python | 3b7228595e9dd6ae5cb0df06b2b8a74ce2d774a9 | [
"Apache-2.0"
] | null | null | null | responsibility/responsibility.py | guwenbo/DesignPattern-Python | 3b7228595e9dd6ae5cb0df06b2b8a74ce2d774a9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
责任链模式
"""
if __name__ == '__main__':
hb = ConcreteHandlerB(Level(2))
ha = ConcreteHandlerA(Level(1), hb)
req = Request(Level(2), "Request with Level 2")
ha.handle_request(req)
| 21 | 83 | 0.612491 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
责任链模式
"""
class Level(object):
def __init__(self, value=None):
self._value = value
def __str__(self):
return str(self.value)
def equals(self, level):
if isinstance(level, Level):
return self.value == level.value
return False
@property
def value(self):
return self._value
@value.setter
def value(self, val):
self._value = val
class Request(object):
def __init__(self, level=None, content=''):
self._level = level
self._content = content
def __str__(self):
return "Request { level : %s , content : %s }" % (self.level, self.content)
@property
def level(self):
return self._level
@level.setter
def level(self, lev):
self._level = lev
@property
def content(self):
return self._content
@content.setter
def content(self, cont):
self._content = cont
class Response(object):
def __init__(self, message=''):
self._message = message
def __str__(self):
return "Response { message : %s }" % self.message
@property
def message(self):
return self._message
@message.setter
def message(self, msg):
self._message = msg
class AbstractHandler(object):
def __init__(self, level=None, next_handler=None):
self._level = level
self._next_handler = next_handler
def process(self, request):
pass
def handle_request(self, request):
response = None
if self.level.equals(request.level):
response = self.process(request)
else:
if self.next_handler:
response = self.next_handler.handle_request(request)
else:
print("Can not handle this request %s" % request)
return response
@property
def level(self):
return self._level
@level.setter
def level(self, lev):
self._level = lev
@property
def next_handler(self):
return self._next_handler
@next_handler.setter
def next_handler(self, handler):
self._next_handler = handler
class ConcreteHandlerA(AbstractHandler):
def __init__(self, level=None, next_handler=None):
super().__init__(level, next_handler)
def process(self, request):
print("Concrete Handler A handle the Request : %s" % request)
class ConcreteHandlerB(AbstractHandler):
def __init__(self, level=None, next_handler=None):
super().__init__(level, next_handler)
def process(self, request):
print("Concrete Handler B handle the Request : %s" % request)
if __name__ == '__main__':
hb = ConcreteHandlerB(Level(2))
ha = ConcreteHandlerA(Level(1), hb)
req = Request(Level(2), "Request with Level 2")
ha.handle_request(req)
| 1,546 | 846 | 246 |
fb78fdfa960004a592bf5e8ced007003751dcda2 | 6,671 | py | Python | csalt/CASA_scripts/format_data.py | seanandrews/dsalt | 797c6c085b6cd0c82fa2a7c8b47d39d49815e7a6 | [
"MIT"
] | 2 | 2021-04-28T23:12:09.000Z | 2021-05-11T19:56:07.000Z | csalt/CASA_scripts/format_data.py | seanandrews/dsalt | 797c6c085b6cd0c82fa2a7c8b47d39d49815e7a6 | [
"MIT"
] | null | null | null | csalt/CASA_scripts/format_data.py | seanandrews/dsalt | 797c6c085b6cd0c82fa2a7c8b47d39d49815e7a6 | [
"MIT"
] | 1 | 2021-08-11T19:07:23.000Z | 2021-08-11T19:07:23.000Z | """
This CASA script (optionally) reduces an available (concatenated) MS by
time-averaging and sub-selecting a given velocity range. It is called
inside csalt.synthesize.make_data(), or can be used as a standalone script
for a real dataset as
casa -c format_data.py configs/gen_<cfg_file> <arg1>
where <cfg_file> is the relevant part of the configuration input filename
and <arg1> is an *optional* argument that contains a (string) filename
extension (usually "pure" or "noisy" in the csalt.synthesize framework).
(This *will* change when we update to full CASA v6.x.)
This script will output ...
"""
import os, sys
import numpy as np
import scipy.constants as sc
import h5py
"""
Parse inputs and load relevant information.
"""
# Ingest input arguments
bounds_ingest = False
if len(sys.argv) == 3:
cfg_file = sys.argv[-1]
_ext = ''
elif len(sys.argv) == 6:
cfg_file = sys.argv[-4]
_ext = '_'+sys.argv[-3]
Vbounds_lo = np.float(sys.argv[-2])
Vbounds_hi = np.float(sys.argv[-1])
bounds_ingest = True
else:
cfg_file = sys.argv[-2]
_ext = '_'+sys.argv[-1]
# Make sure the configuration file exists
if os.path.exists(cfg_file+'.py'):
execfile(cfg_file+'.py')
else:
print('Could not find input configuration file!')
sys.exit()
if bounds_ingest:
V_bounds = np.array([Vbounds_lo, Vbounds_hi])
print(' ')
print(V_bounds)
print(' ')
# Make sure outdir exists
if reduced_dir[-1] != '/': reduced_dir += '/'
outdir = reduced_dir+basename+'/'
if not os.path.exists(outdir):
os.system('mkdir '+outdir)
# Load the "raw" MS datafile contents
in_MS += _ext
if not os.path.exists(in_MS+'.ms'):
print('Could not find the input "raw" MS file!')
print('"'+in_MS+'"'+' does not seem to exist.')
sys.exit()
tb.open(in_MS+'.ms')
spw_col = tb.getcol('DATA_DESC_ID')
obs_col = tb.getcol('OBSERVATION_ID')
field_col = tb.getcol('FIELD_ID')
tb.close()
# Identify the unique EBs inside the MS datafile
obs_ids = np.unique(obs_col)
nEB = len(obs_ids)
"""
Separate the individual EBs and time-average as specified by user.
The individual MS files are only stored temporarily during manipulations.
"""
for EB in range(nEB):
spws = np.unique(spw_col[np.where(obs_col == obs_ids[EB])])
if len(spws) == 1:
spw_str = str(spws[0])
else:
spw_str = "%d~%d" % (spws[0], spws[-1])
fields = np.unique(field_col[np.where(obs_col == obs_ids[EB])])
if len(fields) == 1:
field_str = str(fields[0])
else:
field_str = "%d~%d" % (fields[0], fields[-1])
os.system('rm -rf '+dataname+'_tmp'+str(EB)+'.ms*')
split(vis=in_MS+'.ms', outputvis=dataname+'_tmp'+str(EB)+'.ms',
spw=spw_str, field=field_str, datacolumn='data', timebin=tavg[EB],
keepflags=False)
# Create an HDF5 file, and populate the top-level group with basic info
os.system('rm -rf '+dataname+_ext+'.DATA.h5')
f = h5py.File(dataname+_ext+'.DATA.h5', "w")
f.attrs["nobs"] = nEB
f.attrs["original_MS"] = in_MS+'.ms'
f.attrs["V_bounds"] = V_bounds
f.attrs["tavg"] = tavg
f.close()
# Loop through each EB
concat_files = []
for EB in range(nEB):
# Get data
tb.open(dataname+'_tmp'+str(EB)+'.ms')
data_all = np.squeeze(tb.getcol('DATA'))
u, v = tb.getcol('UVW')[0,:], tb.getcol('UVW')[1,:]
wgt_all = tb.getcol('WEIGHT')
times = tb.getcol('TIME')
tb.close()
# Parse timestamps
tstamps = np.unique(times)
tstamp_ID = np.empty_like(times)
for istamp in range(len(tstamps)):
tstamp_ID[times == tstamps[istamp]] = istamp
# Get TOPO frequencies
tb.open(dataname+'_tmp'+str(EB)+'.ms/SPECTRAL_WINDOW')
nu_TOPO_all = np.squeeze(tb.getcol('CHAN_FREQ'))
tb.close()
# Calculate LSRK frequencies for each timestamp
nu_LSRK_all = np.empty((len(tstamps), len(nu_TOPO_all)))
ms.open(dataname+'_tmp'+str(EB)+'.ms')
for istamp in range(len(tstamps)):
nu_LSRK_all[istamp,:] = ms.cvelfreqs(mode='channel', outframe='LSRK',
obstime=str(tstamps[istamp])+'s')
ms.close()
# Identify channel boundaries for the requested LSRK range
V_LSRK_all = sc.c * (1 - nu_LSRK_all / nu_rest)
chslo = np.argmin(np.abs(V_LSRK_all - V_bounds[0]), axis=1)
chshi = np.argmin(np.abs(V_LSRK_all - V_bounds[1]), axis=1)
if np.diff(nu_TOPO_all)[0] < 0:
chlo, chhi = chslo.min(), chshi.max()
else:
chlo, chhi = chshi.min(), chslo.max()
print(' ')
# Set channel pads around data of interest
bp_def = 3
lo_bp, hi_bp = chlo - bp_def, len(nu_TOPO_all) - chhi - bp_def - 1
if np.logical_and((lo_bp >= bp_def), (hi_bp >= bp_def)):
bounds_pad = bp_def
elif np.logical_or((lo_bp <= 0), (hi_bp <= 0)):
bounds_pad = 0
else:
bounds_pad = np.min([lo_bp, hi_bp])
# Slice out the data of interest
nu_TOPO = nu_TOPO_all[chlo-bounds_pad:chhi+bounds_pad+1]
nu_LSRK = nu_LSRK_all[:,chlo-bounds_pad:chhi+bounds_pad+1]
data = data_all[:,chlo-bounds_pad:chhi+bounds_pad+1,:]
if wgt_all.shape == data_all.shape:
wgt = wgt_all[:,chlo-bounds_pad:chhi+bounds_pad+1,:]
else:
wgt = wgt_all
# Pack the data into the HDF5 output file
f = h5py.File(dataname+_ext+'.DATA.h5', "a")
f.create_dataset('EB'+str(EB)+'/um', data=u)
f.create_dataset('EB'+str(EB)+'/vm', data=v)
f.create_dataset('EB'+str(EB)+'/vis_real', data=data.real)
f.create_dataset('EB'+str(EB)+'/vis_imag', data=data.imag)
f.create_dataset('EB'+str(EB)+'/weights', data=wgt)
f.create_dataset('EB'+str(EB)+'/nu_TOPO', data=nu_TOPO)
f.create_dataset('EB'+str(EB)+'/nu_LSRK', data=nu_LSRK)
f.create_dataset('EB'+str(EB)+'/tstamp_ID', data=tstamp_ID)
f.close()
# Split off a MS with the "reduced" data from this EB
if not os.path.exists(reduced_dir+basename+'/subMS'):
os.system('mkdir '+reduced_dir+basename+'/subMS')
sub_ = reduced_dir+basename+'/subMS/'+basename+_ext+'_EB'+str(EB)+'.DATA.ms'
os.system('rm -rf '+sub_)
spwtag = '0:'+str(chlo-bounds_pad)+'~'+str(chhi+bounds_pad)
split(vis=dataname+'_tmp'+str(EB)+'.ms', outputvis=sub_,
datacolumn='data', spw=spwtag)
concat_files += [sub_]
# Concatenate the MS files
os.system('rm -rf '+dataname+_ext+'.DATA.ms')
if len(concat_files) > 1:
concat(vis=concat_files, concatvis=dataname+_ext+'.DATA.ms',
dirtol='0.1arcsec', copypointing=False)
else:
os.system('cp -r '+concat_files[0]+' '+dataname+_ext+'.DATA.ms')
# Cleanup
os.system('rm -rf '+dataname+'_tmp*.ms*')
os.system('rm -rf *.last')
| 32.227053 | 80 | 0.640534 | """
This CASA script (optionally) reduces an available (concatenated) MS by
time-averaging and sub-selecting a given velocity range. It is called
inside csalt.synthesize.make_data(), or can be used as a standalone script
for a real dataset as
casa -c format_data.py configs/gen_<cfg_file> <arg1>
where <cfg_file> is the relevant part of the configuration input filename
and <arg1> is an *optional* argument that contains a (string) filename
extension (usually "pure" or "noisy" in the csalt.synthesize framework).
(This *will* change when we update to full CASA v6.x.)
This script will output ...
"""
import os, sys
import numpy as np
import scipy.constants as sc
import h5py
"""
Parse inputs and load relevant information.
"""
# Ingest input arguments
bounds_ingest = False
if len(sys.argv) == 3:
cfg_file = sys.argv[-1]
_ext = ''
elif len(sys.argv) == 6:
cfg_file = sys.argv[-4]
_ext = '_'+sys.argv[-3]
Vbounds_lo = np.float(sys.argv[-2])
Vbounds_hi = np.float(sys.argv[-1])
bounds_ingest = True
else:
cfg_file = sys.argv[-2]
_ext = '_'+sys.argv[-1]
# Make sure the configuration file exists
if os.path.exists(cfg_file+'.py'):
execfile(cfg_file+'.py')
else:
print('Could not find input configuration file!')
sys.exit()
if bounds_ingest:
V_bounds = np.array([Vbounds_lo, Vbounds_hi])
print(' ')
print(V_bounds)
print(' ')
# Make sure outdir exists
if reduced_dir[-1] != '/': reduced_dir += '/'
outdir = reduced_dir+basename+'/'
if not os.path.exists(outdir):
os.system('mkdir '+outdir)
# Load the "raw" MS datafile contents
in_MS += _ext
if not os.path.exists(in_MS+'.ms'):
print('Could not find the input "raw" MS file!')
print('"'+in_MS+'"'+' does not seem to exist.')
sys.exit()
tb.open(in_MS+'.ms')
spw_col = tb.getcol('DATA_DESC_ID')
obs_col = tb.getcol('OBSERVATION_ID')
field_col = tb.getcol('FIELD_ID')
tb.close()
# Identify the unique EBs inside the MS datafile
obs_ids = np.unique(obs_col)
nEB = len(obs_ids)
"""
Separate the individual EBs and time-average as specified by user.
The individual MS files are only stored temporarily during manipulations.
"""
for EB in range(nEB):
spws = np.unique(spw_col[np.where(obs_col == obs_ids[EB])])
if len(spws) == 1:
spw_str = str(spws[0])
else:
spw_str = "%d~%d" % (spws[0], spws[-1])
fields = np.unique(field_col[np.where(obs_col == obs_ids[EB])])
if len(fields) == 1:
field_str = str(fields[0])
else:
field_str = "%d~%d" % (fields[0], fields[-1])
os.system('rm -rf '+dataname+'_tmp'+str(EB)+'.ms*')
split(vis=in_MS+'.ms', outputvis=dataname+'_tmp'+str(EB)+'.ms',
spw=spw_str, field=field_str, datacolumn='data', timebin=tavg[EB],
keepflags=False)
# Create an HDF5 file, and populate the top-level group with basic info
os.system('rm -rf '+dataname+_ext+'.DATA.h5')
f = h5py.File(dataname+_ext+'.DATA.h5', "w")
f.attrs["nobs"] = nEB
f.attrs["original_MS"] = in_MS+'.ms'
f.attrs["V_bounds"] = V_bounds
f.attrs["tavg"] = tavg
f.close()
# Loop through each EB
concat_files = []
for EB in range(nEB):
# Get data
tb.open(dataname+'_tmp'+str(EB)+'.ms')
data_all = np.squeeze(tb.getcol('DATA'))
u, v = tb.getcol('UVW')[0,:], tb.getcol('UVW')[1,:]
wgt_all = tb.getcol('WEIGHT')
times = tb.getcol('TIME')
tb.close()
# Parse timestamps
tstamps = np.unique(times)
tstamp_ID = np.empty_like(times)
for istamp in range(len(tstamps)):
tstamp_ID[times == tstamps[istamp]] = istamp
# Get TOPO frequencies
tb.open(dataname+'_tmp'+str(EB)+'.ms/SPECTRAL_WINDOW')
nu_TOPO_all = np.squeeze(tb.getcol('CHAN_FREQ'))
tb.close()
# Calculate LSRK frequencies for each timestamp
nu_LSRK_all = np.empty((len(tstamps), len(nu_TOPO_all)))
ms.open(dataname+'_tmp'+str(EB)+'.ms')
for istamp in range(len(tstamps)):
nu_LSRK_all[istamp,:] = ms.cvelfreqs(mode='channel', outframe='LSRK',
obstime=str(tstamps[istamp])+'s')
ms.close()
# Identify channel boundaries for the requested LSRK range
V_LSRK_all = sc.c * (1 - nu_LSRK_all / nu_rest)
chslo = np.argmin(np.abs(V_LSRK_all - V_bounds[0]), axis=1)
chshi = np.argmin(np.abs(V_LSRK_all - V_bounds[1]), axis=1)
if np.diff(nu_TOPO_all)[0] < 0:
chlo, chhi = chslo.min(), chshi.max()
else:
chlo, chhi = chshi.min(), chslo.max()
print(' ')
# Set channel pads around data of interest
bp_def = 3
lo_bp, hi_bp = chlo - bp_def, len(nu_TOPO_all) - chhi - bp_def - 1
if np.logical_and((lo_bp >= bp_def), (hi_bp >= bp_def)):
bounds_pad = bp_def
elif np.logical_or((lo_bp <= 0), (hi_bp <= 0)):
bounds_pad = 0
else:
bounds_pad = np.min([lo_bp, hi_bp])
# Slice out the data of interest
nu_TOPO = nu_TOPO_all[chlo-bounds_pad:chhi+bounds_pad+1]
nu_LSRK = nu_LSRK_all[:,chlo-bounds_pad:chhi+bounds_pad+1]
data = data_all[:,chlo-bounds_pad:chhi+bounds_pad+1,:]
if wgt_all.shape == data_all.shape:
wgt = wgt_all[:,chlo-bounds_pad:chhi+bounds_pad+1,:]
else:
wgt = wgt_all
# Pack the data into the HDF5 output file
f = h5py.File(dataname+_ext+'.DATA.h5', "a")
f.create_dataset('EB'+str(EB)+'/um', data=u)
f.create_dataset('EB'+str(EB)+'/vm', data=v)
f.create_dataset('EB'+str(EB)+'/vis_real', data=data.real)
f.create_dataset('EB'+str(EB)+'/vis_imag', data=data.imag)
f.create_dataset('EB'+str(EB)+'/weights', data=wgt)
f.create_dataset('EB'+str(EB)+'/nu_TOPO', data=nu_TOPO)
f.create_dataset('EB'+str(EB)+'/nu_LSRK', data=nu_LSRK)
f.create_dataset('EB'+str(EB)+'/tstamp_ID', data=tstamp_ID)
f.close()
# Split off a MS with the "reduced" data from this EB
if not os.path.exists(reduced_dir+basename+'/subMS'):
os.system('mkdir '+reduced_dir+basename+'/subMS')
sub_ = reduced_dir+basename+'/subMS/'+basename+_ext+'_EB'+str(EB)+'.DATA.ms'
os.system('rm -rf '+sub_)
spwtag = '0:'+str(chlo-bounds_pad)+'~'+str(chhi+bounds_pad)
split(vis=dataname+'_tmp'+str(EB)+'.ms', outputvis=sub_,
datacolumn='data', spw=spwtag)
concat_files += [sub_]
# Concatenate the MS files
os.system('rm -rf '+dataname+_ext+'.DATA.ms')
if len(concat_files) > 1:
concat(vis=concat_files, concatvis=dataname+_ext+'.DATA.ms',
dirtol='0.1arcsec', copypointing=False)
else:
os.system('cp -r '+concat_files[0]+' '+dataname+_ext+'.DATA.ms')
# Cleanup
os.system('rm -rf '+dataname+'_tmp*.ms*')
os.system('rm -rf *.last')
| 0 | 0 | 0 |
d44d5ec5c5d6b32d6e144b89c329e7a0745bad9a | 2,776 | py | Python | examples.py | ofirmanor/nicetable | a0110161657dc9da66ffb334677132f7f3af8511 | [
"MIT"
] | 6 | 2019-01-10T16:15:11.000Z | 2022-03-28T01:42:37.000Z | examples.py | ofirmanor/nicetable | a0110161657dc9da66ffb334677132f7f3af8511 | [
"MIT"
] | 1 | 2021-01-08T06:25:29.000Z | 2021-01-08T06:25:29.000Z | examples.py | ofirmanor/nicetable | a0110161657dc9da66ffb334677132f7f3af8511 | [
"MIT"
] | 1 | 2021-01-07T23:58:20.000Z | 2021-01-07T23:58:20.000Z | # Example: printing the list of builtin layouts
import json
from nicetable.nicetable import NiceTable
# from __future__ import annotations # only for Python 3.7 and up?
out = NiceTable(['Layout', 'Description'])
for layout in NiceTable.builtin_layouts():
out.append(layout)
print(out)
# Example: printing the sample JSON in two layouts
out = NiceTable(['Name', 'Type', 'Height(cm)', ' Weight(kg)'], layout='default')
for pokemon in json.loads(NiceTable.SAMPLE_JSON):
out.append([pokemon['name'], pokemon['type'], pokemon['height'], pokemon['weight']])
print('-- default format --\n')
print(out)
out.layout = 'csv'
out.sep_vertical = '|'
print('-- CSV with a pipe separator --\n')
print(out)
# Example: printing all the formatting settings in md layout
out = NiceTable(['Setting', 'Type', 'Default', 'Description'], layout='md')
for setting in NiceTable.FORMATTING_SETTINGS:
out.append(setting)
print(out)
# Example: custom layout
out = MyNiceTable(['Layout', 'Description'], layout='winter_columns')
for layout in MyNiceTable.builtin_layouts():
out.append(layout)
print(out)
# Example: setting column-level options
out = NiceTable(['Name', 'Type', 'Height(cm)', ' Weight(kg)'])
for pokemon in json.loads(NiceTable.SAMPLE_JSON):
out.append([pokemon['name'], pokemon['type'], pokemon['height'], pokemon['weight']])
# set column options by position
out.set_col_options(0, adjust='center')
# set column options by column name
out.set_col_options('Type',
func=lambda x: x.lower() if x != 'Electric' else None,
none_string='N/A')
# Example: different numeric alignments
out = NiceTable(['standard left', 'standard center', 'standard right', 'strict_left', 'strict_center', 'strict_right'])
n_list = [6.901, 6.1, 122]
[out.append([n] * 6) for n in n_list]
out.col_adjust = ['left', 'center', 'right', 'strict_left', 'strict_center', 'strict_right']
print(out)
# Example: long text
out = NiceTable(['Code', 'Product Description(Long)'])
out.append([1, 'Boeing 777. Batteries not included. May contain nuts.'])
out.append([2, 'Sack of sand'])
print(out)
out.value_max_len = 19
print(out)
out.value_too_long_policy = 'truncate'
print(out)
# Example: newlines
out = NiceTable(['Code', 'Product Description\n(Long)']) \
.append([1, 'Boeing 777\nBatteries not included.\nMay contain nuts.']) \
.append([2, 'Sack of sand'])
print(out)
out.value_newline_replace = '\\n'
print(out)
| 34.271605 | 120 | 0.668588 | # Example: printing the list of builtin layouts
import json
from nicetable.nicetable import NiceTable
# from __future__ import annotations # only for Python 3.7 and up?
out = NiceTable(['Layout', 'Description'])
for layout in NiceTable.builtin_layouts():
out.append(layout)
print(out)
# Example: printing the sample JSON in two layouts
out = NiceTable(['Name', 'Type', 'Height(cm)', ' Weight(kg)'], layout='default')
for pokemon in json.loads(NiceTable.SAMPLE_JSON):
out.append([pokemon['name'], pokemon['type'], pokemon['height'], pokemon['weight']])
print('-- default format --\n')
print(out)
out.layout = 'csv'
out.sep_vertical = '|'
print('-- CSV with a pipe separator --\n')
print(out)
# Example: printing all the formatting settings in md layout
out = NiceTable(['Setting', 'Type', 'Default', 'Description'], layout='md')
for setting in NiceTable.FORMATTING_SETTINGS:
out.append(setting)
print(out)
# Example: custom layout
class MyNiceTable(NiceTable):
def _layout_as_winter_columns(self) -> None:
"""Table with a winter-themed separator. Quite Ugly."""
self.sep_vertical = '❄☂🌧☂❄'
self.sep_cross = '❄☂🌧☂❄'
self.sep_horizontal = 'ˣ'
out = MyNiceTable(['Layout', 'Description'], layout='winter_columns')
for layout in MyNiceTable.builtin_layouts():
out.append(layout)
print(out)
# Example: setting column-level options
out = NiceTable(['Name', 'Type', 'Height(cm)', ' Weight(kg)'])
for pokemon in json.loads(NiceTable.SAMPLE_JSON):
out.append([pokemon['name'], pokemon['type'], pokemon['height'], pokemon['weight']])
# set column options by position
out.set_col_options(0, adjust='center')
# set column options by column name
out.set_col_options('Type',
func=lambda x: x.lower() if x != 'Electric' else None,
none_string='N/A')
# Example: different numeric alignments
out = NiceTable(['standard left', 'standard center', 'standard right', 'strict_left', 'strict_center', 'strict_right'])
n_list = [6.901, 6.1, 122]
[out.append([n] * 6) for n in n_list]
out.col_adjust = ['left', 'center', 'right', 'strict_left', 'strict_center', 'strict_right']
print(out)
# Example: long text
out = NiceTable(['Code', 'Product Description(Long)'])
out.append([1, 'Boeing 777. Batteries not included. May contain nuts.'])
out.append([2, 'Sack of sand'])
print(out)
out.value_max_len = 19
print(out)
out.value_too_long_policy = 'truncate'
print(out)
# Example: newlines
out = NiceTable(['Code', 'Product Description\n(Long)']) \
.append([1, 'Boeing 777\nBatteries not included.\nMay contain nuts.']) \
.append([2, 'Sack of sand'])
print(out)
out.value_newline_replace = '\\n'
print(out)
| 0 | 252 | 23 |
824a966bc3f98387de4dd6b19297de88ea1261a3 | 505 | py | Python | forwarding_bot/vk/_bot.py | dhvcc/VK-TG-transfer-bot | 4a1570ca6f2b905bc015412b527ac58039eb2cef | [
"MIT"
] | 4 | 2020-09-19T18:59:01.000Z | 2020-10-13T13:34:45.000Z | forwarding_bot/vk/_bot.py | dhvcc/VK-TG-transfer-bot | 4a1570ca6f2b905bc015412b527ac58039eb2cef | [
"MIT"
] | 11 | 2020-09-24T09:07:06.000Z | 2022-03-10T12:21:34.000Z | forwarding_bot/vk/_bot.py | dhvcc/VK-TG-transfer-bot | 4a1570ca6f2b905bc015412b527ac58039eb2cef | [
"MIT"
] | 2 | 2020-10-11T13:30:09.000Z | 2020-10-22T07:08:21.000Z | import logging
from vkbottle import User
from forwarding_bot.vk._middleware import middleware_bp
from ._blueprint import bot_bp
logger = logging.getLogger(__name__)
| 22.954545 | 55 | 0.667327 | import logging
from vkbottle import User
from forwarding_bot.vk._middleware import middleware_bp
from ._blueprint import bot_bp
logger = logging.getLogger(__name__)
class VKBot:
def __init__(self, token: str):
self.bot = User(token)
self.bot.set_blueprints(middleware_bp, bot_bp)
def start(self) -> None:
logger.info("Starting VKBot")
try:
self.bot.run_polling()
except Exception as e:
logger.exception(f"VK bot crashed. {e}")
| 269 | -9 | 76 |
dc0c6eb6c805d3ba32f728bbde54860ebb404adc | 5,022 | py | Python | choicesenum/django/fields.py | gustavo-depaula/python-choicesenum | f0ed55da28c444a99595137f3a60ed9d900c3312 | [
"BSD-3-Clause"
] | null | null | null | choicesenum/django/fields.py | gustavo-depaula/python-choicesenum | f0ed55da28c444a99595137f3a60ed9d900c3312 | [
"BSD-3-Clause"
] | null | null | null | choicesenum/django/fields.py | gustavo-depaula/python-choicesenum | f0ed55da28c444a99595137f3a60ed9d900c3312 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import, unicode_literals
from django.core import checks
from django.db import models
from django.utils.translation import gettext_lazy as _
try:
from django.utils.module_loading import import_string
except ImportError: # pragma: no cover, Django 1.6 compat
from django.utils.module_loading import import_by_path as import_string
import six
from .compat import Creator
from ..enums import ChoicesEnum
| 34.163265 | 95 | 0.579052 | # coding: utf-8
from __future__ import absolute_import, unicode_literals
from django.core import checks
from django.db import models
from django.utils.translation import gettext_lazy as _
try:
from django.utils.module_loading import import_string
except ImportError: # pragma: no cover, Django 1.6 compat
from django.utils.module_loading import import_by_path as import_string
import six
from .compat import Creator
from ..enums import ChoicesEnum
class FieldErrors(ChoicesEnum):
E01 = 'choicesenum.E01', _("{cls} has `null=True` but {enum} does not have an item with "
"value `None`.")
E02 = 'choicesenum.E02', _("{cls}: '{default}' is not a valid default for '{enum}'.")
class EnumFieldMixin(object):
def __init__(self, enum=None, **kwargs):
choices = kwargs.pop('choices', None)
if enum and isinstance(enum, six.string_types):
enum = import_string(enum)
if choices is None and enum:
choices = enum.choices()
kwargs['choices'] = choices
self.enum = enum
super(EnumFieldMixin, self).__init__(**kwargs)
def check(self, **kwargs):
try:
errors = super(EnumFieldMixin, self).check(**kwargs)
except BaseException:
errors = []
errors.extend(self._check_null(**kwargs))
errors.extend(self._check_default(**kwargs))
return errors
def _check_null(self, **kwargs):
if self.null:
try:
self.enum(None)
return []
except ValueError:
return [
checks.Error(
FieldErrors.E01.display.format(
cls=self.__class__.__name__, enum=self.enum,),
obj=self,
id=FieldErrors.E01,
hint=_('Add an enum item with `None` as value, eg.: '
'`UNDEFINED = None`, or turn `null=False`.'),
)
]
return []
def _check_default(self, **kwargs):
try:
default = self.get_default()
self.enum(default)
return []
except ValueError:
return [
checks.Error(
FieldErrors.E02.display.format(
cls=self.__class__.__name__, default=default, enum=self.enum,),
obj=self,
id=FieldErrors.E02,
hint=_('Add an enum item with `{0!r}` as value, eg.: `UNDEFINED = {0!r}`, '
'or inform a valid default value.').format(default),
)
]
def contribute_to_class(self, cls, name):
# Retain to_python behaviour for < Django 1.8 with removal
# of SubfieldBase
super(EnumFieldMixin, self).contribute_to_class(cls, name)
setattr(cls, name, Creator(self, cls))
def to_python(self, value):
if isinstance(value, self.enum):
return value
cleaned_value = super(EnumFieldMixin, self).to_python(value)
return self.enum(cleaned_value)
def from_db_value(self, value, *args, **kwargs):
try:
return self.enum(value)
except ValueError:
# We need to return None here even if the enum has no None value
# to support models being fetched via select_related.
# The row converters are run before the pk=None check in
# the default Django model managers.
if value is None:
return value
# For certain operations on Postgres ArrayFields and Array
# aggregation, we need to handle list values
if type(value) == list:
return list(filter(None, [
self.from_db_value(x, *args, **kwargs)
for x in value
]))
raise
def get_prep_value(self, value):
enum_value = self.to_python(value)
return getattr(enum_value, 'value', value)
def deconstruct(self):
name, path, args, kwargs = super(EnumFieldMixin, self).deconstruct()
if 'default' in kwargs and self.default:
kwargs['default'] = self.to_python(self.default).value
if self.enum:
kwargs["enum"] = self.enum
if 'choices' in kwargs: # pragma: no cover
del kwargs["choices"]
return name, path, args, kwargs
def get_base_classes(cls_type): # pragma: no cover, already covered by tox matrix
if hasattr(models, 'SubfieldBase'):
return six.with_metaclass(models.SubfieldBase, EnumFieldMixin, cls_type)
class EnumFieldBase(EnumFieldMixin, cls_type):
pass
return EnumFieldBase
class EnumCharField(get_base_classes(models.CharField)):
description = "A string enum field"
class EnumIntegerField(get_base_classes(models.IntegerField)):
description = "An integer enum field"
| 3,790 | 408 | 358 |
eae86aa1429fe11b7f7053d2e37f0b1844ebbabd | 79 | py | Python | src/ssg/__init__.py | a-rokay/static-site-generator | f44a6951beeb68bd5ae5cc3b02e71c636d853dc2 | [
"MIT"
] | 1 | 2021-09-21T22:45:17.000Z | 2021-09-21T22:45:17.000Z | src/ssg/__init__.py | a-rokay/static-site-generator | f44a6951beeb68bd5ae5cc3b02e71c636d853dc2 | [
"MIT"
] | 11 | 2021-09-13T14:12:34.000Z | 2021-10-04T21:09:18.000Z | src/ssg/__init__.py | a-rokay/static-site-generator | f44a6951beeb68bd5ae5cc3b02e71c636d853dc2 | [
"MIT"
] | 5 | 2021-09-14T19:29:43.000Z | 2021-11-04T19:58:06.000Z | import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).parent))
| 15.8 | 51 | 0.797468 | import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).parent))
| 0 | 0 | 0 |
6263e624ebab3c675c7c6fecb0b26b6fa51802dc | 2,288 | py | Python | hatch_vcs/version_source.py | ofek/hatch-vcs | c5388d67192d9bf88191927a35f51705121784a1 | [
"MIT"
] | null | null | null | hatch_vcs/version_source.py | ofek/hatch-vcs | c5388d67192d9bf88191927a35f51705121784a1 | [
"MIT"
] | 1 | 2022-03-08T04:07:09.000Z | 2022-03-18T05:41:17.000Z | hatch_vcs/version_source.py | ofek/hatch-vcs | c5388d67192d9bf88191927a35f51705121784a1 | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: 2022-present Ofek Lev <oss@ofek.dev>
#
# SPDX-License-Identifier: MIT
from hatchling.version.source.plugin.interface import VersionSourceInterface
| 32.685714 | 77 | 0.668706 | # SPDX-FileCopyrightText: 2022-present Ofek Lev <oss@ofek.dev>
#
# SPDX-License-Identifier: MIT
from hatchling.version.source.plugin.interface import VersionSourceInterface
class VCSVersionSource(VersionSourceInterface):
PLUGIN_NAME = 'vcs'
def __init__(self, *args, **kwargs):
super(VCSVersionSource, self).__init__(*args, **kwargs)
self.__config_tag_pattern = None
self.__config_fallback_version = None
self.__config_raw_options = None
@property
def config_tag_pattern(self):
if self.__config_tag_pattern is None:
tag_pattern = self.config.get('tag-pattern', '')
if not isinstance(tag_pattern, str):
raise TypeError('option `tag-pattern` must be a string')
self.__config_tag_pattern = tag_pattern
return self.__config_tag_pattern
@property
def config_fallback_version(self):
if self.__config_fallback_version is None:
fallback_version = self.config.get('fallback-version', '')
if not isinstance(fallback_version, str):
raise TypeError('option `fallback-version` must be a string')
self.__config_fallback_version = fallback_version
return self.__config_fallback_version
@property
def config_raw_options(self):
if self.__config_raw_options is None:
raw_options = self.config.get('raw-options', {})
if not isinstance(raw_options, dict):
raise TypeError('option `raw-options` must be a table')
self.__config_raw_options = raw_options
return self.__config_raw_options
def get_version_data(self):
from copy import deepcopy
from setuptools_scm import get_version
config = deepcopy(self.config_raw_options)
config['root'] = self.root
config.setdefault('tag_regex', self.config_tag_pattern)
# Only set for non-empty strings
if self.config_fallback_version:
config['fallback_version'] = self.config_fallback_version
# Writing only occurs when the build hook is enabled
config.pop('write_to', None)
config.pop('write_to_template', None)
version = get_version(**config)
return {'version': version}
| 1,864 | 227 | 23 |
d73ac3c9452edba0b0b59e8de232220b9ef33e42 | 1,087 | py | Python | scripts/find_zone.py | MrCerealKiller/agros_paths | afbae25280541ddef2dff0bba69b052068d998d0 | [
"MIT"
] | null | null | null | scripts/find_zone.py | MrCerealKiller/agros_paths | afbae25280541ddef2dff0bba69b052068d998d0 | [
"MIT"
] | null | null | null | scripts/find_zone.py | MrCerealKiller/agros_paths | afbae25280541ddef2dff0bba69b052068d998d0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from geodesy.utm import gridZone
def main():
"""
Simple utility script to find the UTM zone of WGS84 coords
"""
TAG = "[find_zone.main] "
lat = rospy.get_param('~lat', None)
lon = rospy.get_param('~lon', None)
# Check that at least lat and lon are provided
missing_args = []
if not lat:
missing_args.append('lat (double) ')
if not lon:
missing_args.append('lon (double) ')
# If missing, report and exit
if missing_args:
msg = ('Missing params: ')
for arg in missing_args:
msg = msg + arg
rospy.logerr(TAG + msg)
rospy.loginfo('exiting...')
return
try:
lat = float(lat)
lon = float(lon)
rospy.loginfo(TAG + '\n' +
'\tLatitude: {}\n'.format(lat) +
'\tLongitude: {}\n'.format(lon))
zone, band = gridZone(lat, lon)
rospy.loginfo(TAG + 'UTM zone of given coords:\n\n' +
'\t{}{}\n'.format(zone, band))
except Error as e:
rospy.logerr(TAG + 'Encountered error: {}'.format(e))
rospy.loginfo('exiting...')
return
if __name__ == '__main__':
rospy.init_node('zone_finder')
main()
| 20.903846 | 60 | 0.636615 | #!/usr/bin/env python
import rospy
from geodesy.utm import gridZone
def main():
"""
Simple utility script to find the UTM zone of WGS84 coords
"""
TAG = "[find_zone.main] "
lat = rospy.get_param('~lat', None)
lon = rospy.get_param('~lon', None)
# Check that at least lat and lon are provided
missing_args = []
if not lat:
missing_args.append('lat (double) ')
if not lon:
missing_args.append('lon (double) ')
# If missing, report and exit
if missing_args:
msg = ('Missing params: ')
for arg in missing_args:
msg = msg + arg
rospy.logerr(TAG + msg)
rospy.loginfo('exiting...')
return
try:
lat = float(lat)
lon = float(lon)
rospy.loginfo(TAG + '\n' +
'\tLatitude: {}\n'.format(lat) +
'\tLongitude: {}\n'.format(lon))
zone, band = gridZone(lat, lon)
rospy.loginfo(TAG + 'UTM zone of given coords:\n\n' +
'\t{}{}\n'.format(zone, band))
except Error as e:
rospy.logerr(TAG + 'Encountered error: {}'.format(e))
rospy.loginfo('exiting...')
return
if __name__ == '__main__':
rospy.init_node('zone_finder')
main()
| 0 | 0 | 0 |
7ac35ca1f56e6121c03a2f94c88ea62eb273c365 | 417 | py | Python | gcloud/filter_functions.py | pantheon-ci-bot/etl-framework | 36d4c0d5c26ddd7c0bb2d2b99e3138b50a21c46f | [
"MIT"
] | 2 | 2017-03-01T20:09:06.000Z | 2019-02-08T17:10:16.000Z | gcloud/filter_functions.py | pantheon-ci-bot/etl-framework | 36d4c0d5c26ddd7c0bb2d2b99e3138b50a21c46f | [
"MIT"
] | 40 | 2015-10-10T15:02:21.000Z | 2020-03-17T22:32:04.000Z | gcloud/filter_functions.py | pantheon-ci-bot/etl-framework | 36d4c0d5c26ddd7c0bb2d2b99e3138b50a21c46f | [
"MIT"
] | 2 | 2018-11-14T21:50:58.000Z | 2022-03-07T20:59:27.000Z | """set of filter functions"""
import datetime
import uuid
def choose_current_date_partition():
"""gets the parition for current date"""
return datetime.date.today().strftime('$%Y%m%d')
def add_bigquery_insert_uuid(row):
"""formats output_row and adds a uuid to be inserted"""
output_row = dict()
output_row["insertId"] = str(uuid.uuid1())
output_row["json"] = row
return output_row
| 20.85 | 59 | 0.688249 | """set of filter functions"""
import datetime
import uuid
def choose_current_date_partition():
"""gets the parition for current date"""
return datetime.date.today().strftime('$%Y%m%d')
def add_bigquery_insert_uuid(row):
"""formats output_row and adds a uuid to be inserted"""
output_row = dict()
output_row["insertId"] = str(uuid.uuid1())
output_row["json"] = row
return output_row
| 0 | 0 | 0 |
4e8f4745274985a263ec4883d895332a482b971e | 2,937 | py | Python | server/problem_server/ocr_server.py | jun0911-cmyk/s-class-version-3.0 | b485b643f2cc62e81ca15bce52e6aff843f3459d | [
"MIT"
] | null | null | null | server/problem_server/ocr_server.py | jun0911-cmyk/s-class-version-3.0 | b485b643f2cc62e81ca15bce52e6aff843f3459d | [
"MIT"
] | null | null | null | server/problem_server/ocr_server.py | jun0911-cmyk/s-class-version-3.0 | b485b643f2cc62e81ca15bce52e6aff843f3459d | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
import pytesseract
import cv2
import json
import time
from tensorflow import keras
pytesseract.pytesseract.tesseract_cmd = r'C:/Program Files/Tesseract-OCR/tesseract.exe'
img_height = 180
img_width = 180
image_name = 'test1.jpg'
model_name = '1627062415'
class_names = ['drawing', 'paper', 'problem']
model_path = 'C:/Users/jun09/OneDrive/Desktop/s-class_system_version/s-class_version-3/server/problem_server/model/cnn_model/' + model_name
image_path = 'C:/Users/jun09/OneDrive/desktop/s-class_system_version/s-class_version-3/server/problem_server/test_image/' + image_name
accuracy, score_class_name = accuracy_calculation()
if score_class_name == 'problem' and accuracy > 70.0:
print('Extracting text...')
ocr_problem_text = ocr_image(image_path)
json_data = create_json(ocr_problem_text)
response_msg = response_json(json_data)
print(response_msg)
elif score_class_name == 'problem' and accuracy < 70.0:
print(
"The image was not accurately recognized. Please select another image or re-recognize it. Current measured accuracy: {:.2f}".format(accuracy)
)
else:
print(
"Failed to extract text, less than 70% accuracy or not problematic. Measured Results : {}, result accuracy : {:.2f}%"
.format(score_class_name, accuracy)
) | 31.580645 | 149 | 0.695267 | import tensorflow as tf
import numpy as np
import pytesseract
import cv2
import json
import time
from tensorflow import keras
pytesseract.pytesseract.tesseract_cmd = r'C:/Program Files/Tesseract-OCR/tesseract.exe'
img_height = 180
img_width = 180
image_name = 'test1.jpg'
model_name = '1627062415'
class_names = ['drawing', 'paper', 'problem']
model_path = 'C:/Users/jun09/OneDrive/Desktop/s-class_system_version/s-class_version-3/server/problem_server/model/cnn_model/' + model_name
image_path = 'C:/Users/jun09/OneDrive/desktop/s-class_system_version/s-class_version-3/server/problem_server/test_image/' + image_name
def load_model():
return tf.keras.models.load_model(model_path)
def load_image():
return keras.preprocessing.image.load_img(image_path, target_size=(img_height, img_width))
def image_array():
load_img = load_image()
img_array = keras.preprocessing.image.img_to_array(load_img)
return tf.expand_dims(img_array, 0)
def predict_image():
img_array = image_array()
model = load_model()
return model.predict(img_array)
def accuracy_calculation():
predictions = predict_image()
score = tf.nn.softmax(predictions[0])
accuracy = 100 * np.max(score)
score_class_name = class_names[np.argmax(score)]
return accuracy, score_class_name
def ocr_image(image_name):
ocr_predict_image = cv2.imread(image_name)
ocr_config = '-l kor+eng+equ --oem 3 --psm 11'
return pytesseract.image_to_string(ocr_predict_image, config=ocr_config)
def create_json(ocr_data):
t = time.time()
json_data = {
"response": [
{
"response_id": int(t),
"response_text": ocr_data,
"response_classification": "ocr",
"image_name": image_name,
"predict_model_name": model_name
}
]
}
return json_data
def response_json(response_data):
message = ''
try:
with open("sendData.json", "w", encoding="UTF-8-sig") as json_file:
json_file.write(json.dumps(response_data, ensure_ascii=False))
message = "successed! {}".format(response_data)
except:
message = "Response Error"
return message
accuracy, score_class_name = accuracy_calculation()
if score_class_name == 'problem' and accuracy > 70.0:
print('Extracting text...')
ocr_problem_text = ocr_image(image_path)
json_data = create_json(ocr_problem_text)
response_msg = response_json(json_data)
print(response_msg)
elif score_class_name == 'problem' and accuracy < 70.0:
print(
"The image was not accurately recognized. Please select another image or re-recognize it. Current measured accuracy: {:.2f}".format(accuracy)
)
else:
print(
"Failed to extract text, less than 70% accuracy or not problematic. Measured Results : {}, result accuracy : {:.2f}%"
.format(score_class_name, accuracy)
) | 1,409 | 0 | 184 |
77ff6b6bfaabc0cad8a99a72c1b833c554b753c8 | 11,240 | py | Python | spy/compiler.py | pcostesi/spy | 6a352df1053fea60dd6bbc9932bc946e8fae1e3e | [
"OLDAP-2.6"
] | 1 | 2017-09-01T21:21:24.000Z | 2017-09-01T21:21:24.000Z | spy/compiler.py | pcostesi/spy | 6a352df1053fea60dd6bbc9932bc946e8fae1e3e | [
"OLDAP-2.6"
] | null | null | null | spy/compiler.py | pcostesi/spy | 6a352df1053fea60dd6bbc9932bc946e8fae1e3e | [
"OLDAP-2.6"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011, 2012 Pablo A. Costesich <pcostesi@alu.itba.edu.ar>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Dev Team nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from spy.core import Instruction, Bytecode
from spy.optimizations import register_relocation, dce
import re
from StringIO import StringIO
# This is a hand-crafted top-down parser (it's not recursive).
# Although ad-hoc, it's good enough for this project (no external dependencies)
NOP, TAG, KWORD, IDENTIFIER, NUMBER, ARROW, NEQ, OP, LB, RB, NL = range(11)
TOKEN_NAMES = "NOP TAG KWORD IDENTIFIER NUMBER ARROW NEQ OP LB RB NL".split()
_PATTERNS = (
(re.compile(r"[ \t\f]"), NOP),
(re.compile(r"\n"), NL),
(re.compile(r"\["), LB),
(re.compile(r"\]"), RB),
(re.compile(r"[a-eA-E]([1-9][0-9]*)?"), TAG),
(re.compile(r"[yY]|([xXzZ]([1-9][0-9]*)?)"), IDENTIFIER),
(re.compile("#.*"), NOP),
(re.compile("<-"), ARROW),
(re.compile(r"\+|-"), OP),
(re.compile("!="), NEQ),
(re.compile(r"[0-9]+"), NUMBER),
(re.compile(r"\w+"), KWORD),
)
def _match_some(regexes, line, n_line, n_col):
"""Match patterns in order. Returns a tuple of match and token type or
raises SyntaxError."""
for regex, token in regexes:
match = regex.match(line, n_col)
if match is not None:
return match, token
error = "No rules to match input (does not conform this grammar) \n"
error += "At line %d, column %d" % (n_line, n_col)
error += "\n\t%s" % line
error += "\n" if line[-1] != "\n" else ""
error += "\t" + "_" * (n_col - 1) + "/\\" + "_" * (len(line) - n_col - 2)
raise SyntaxError(error)
def tokenize(input_file):
"""Tokenizes a file and yields matches in a format similar to
generate_tokens in the stdlib module tokenize"""
n_line = 1
for line in input_file.readlines():
n_col, n_stop = 0, 0
maxcol = len(line)
while n_col < maxcol and maxcol > 1:
match, token = _match_some(_PATTERNS, line, n_line, n_col)
n_col, n_stop = match.span()
matchline = match.string[n_col : n_stop]
t_start, t_stop = (n_line, n_col), (n_line, n_stop)
n_col = n_stop
if token == NOP:
continue
yield token, t_start, t_stop, matchline
n_line += 1
class Matcher(object):
"Stateful matcher that keeps the lookahead and matching info"
@property
@property
@property
def match(self, *expect, **kwargs):
"""Matches a series of tokens (and epsilon-productions) and advances
the token stream.
*expect: list of expected tokens. None is the epsilon-production.
**kwargs:
- test: runs a test function and raises SyntaxError on false.
Raises SyntaxError on EOF, failed tests
"""
try:
self.lookahead = self.tokens.next()
except StopIteration:
if None not in expect:
raise SyntaxError("Unexpected end of file")
self.lookahead = None
return self
if not expect:
return self
for tok in expect:
if tok == self.lookahead[0]:
if callable(kwargs.get('test')):
if not kwargs['test'](self.lookahead):
raise SyntaxError("Failed test.")
return self
raise SyntaxError("Token '%s'(%s) does not match %s" %
(self.symbol, TOKEN_NAMES[self.token], list(TOKEN_NAMES[i] for i in expect)))
def parse(tokens):
"Parse a stream of tokens generated by tokenize"
matcher = Matcher(tokens)
while matcher.lookahead != None:
if matcher.token == LB:
yield _match_LB(matcher)
elif matcher.token == KWORD:
yield _match_KWORD(matcher)
elif matcher.token == IDENTIFIER:
yield _match_IDEN(matcher)
elif matcher.token == NL:
matcher.match(NL, LB, IDENTIFIER, KWORD, None)
else:
raise SyntaxError("Unexpected symbol '%s': line %d, column %d" %
((matcher.symbol,) + matcher.span))
| 36.258065 | 89 | 0.589057 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011, 2012 Pablo A. Costesich <pcostesi@alu.itba.edu.ar>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Dev Team nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from spy.core import Instruction, Bytecode
from spy.optimizations import register_relocation, dce
import re
from StringIO import StringIO
# This is a hand-crafted top-down parser (it's not recursive).
# Although ad-hoc, it's good enough for this project (no external dependencies)
NOP, TAG, KWORD, IDENTIFIER, NUMBER, ARROW, NEQ, OP, LB, RB, NL = range(11)
TOKEN_NAMES = "NOP TAG KWORD IDENTIFIER NUMBER ARROW NEQ OP LB RB NL".split()
_PATTERNS = (
(re.compile(r"[ \t\f]"), NOP),
(re.compile(r"\n"), NL),
(re.compile(r"\["), LB),
(re.compile(r"\]"), RB),
(re.compile(r"[a-eA-E]([1-9][0-9]*)?"), TAG),
(re.compile(r"[yY]|([xXzZ]([1-9][0-9]*)?)"), IDENTIFIER),
(re.compile("#.*"), NOP),
(re.compile("<-"), ARROW),
(re.compile(r"\+|-"), OP),
(re.compile("!="), NEQ),
(re.compile(r"[0-9]+"), NUMBER),
(re.compile(r"\w+"), KWORD),
)
def _match_some(regexes, line, n_line, n_col):
"""Match patterns in order. Returns a tuple of match and token type or
raises SyntaxError."""
for regex, token in regexes:
match = regex.match(line, n_col)
if match is not None:
return match, token
error = "No rules to match input (does not conform this grammar) \n"
error += "At line %d, column %d" % (n_line, n_col)
error += "\n\t%s" % line
error += "\n" if line[-1] != "\n" else ""
error += "\t" + "_" * (n_col - 1) + "/\\" + "_" * (len(line) - n_col - 2)
raise SyntaxError(error)
def tokenize(input_file):
"""Tokenizes a file and yields matches in a format similar to
generate_tokens in the stdlib module tokenize"""
n_line = 1
for line in input_file.readlines():
n_col, n_stop = 0, 0
maxcol = len(line)
while n_col < maxcol and maxcol > 1:
match, token = _match_some(_PATTERNS, line, n_line, n_col)
n_col, n_stop = match.span()
matchline = match.string[n_col : n_stop]
t_start, t_stop = (n_line, n_col), (n_line, n_stop)
n_col = n_stop
if token == NOP:
continue
yield token, t_start, t_stop, matchline
n_line += 1
class Matcher(object):
"Stateful matcher that keeps the lookahead and matching info"
def __init__(self, tokens):
self.tokens = iter(tokens)
self.lookahead = None
self.match()
@property
def symbol(self):
if self.lookahead:
return self.lookahead[3]
@property
def span(self):
if self.lookahead:
return self.lookahead[1][1], self.lookahead[2][1]
@property
def token(self):
if self.lookahead:
return self.lookahead[0]
def match(self, *expect, **kwargs):
"""Matches a series of tokens (and epsilon-productions) and advances
the token stream.
*expect: list of expected tokens. None is the epsilon-production.
**kwargs:
- test: runs a test function and raises SyntaxError on false.
Raises SyntaxError on EOF, failed tests
"""
try:
self.lookahead = self.tokens.next()
except StopIteration:
if None not in expect:
raise SyntaxError("Unexpected end of file")
self.lookahead = None
return self
if not expect:
return self
for tok in expect:
if tok == self.lookahead[0]:
if callable(kwargs.get('test')):
if not kwargs['test'](self.lookahead):
raise SyntaxError("Failed test.")
return self
raise SyntaxError("Token '%s'(%s) does not match %s" %
(self.symbol, TOKEN_NAMES[self.token], list(TOKEN_NAMES[i] for i in expect)))
def is_kword(*kword):
def g(token):
if token[0] == KWORD and token[3] not in kword:
raise SyntaxError("Keyword mismatch")
return True
return g
def equals(word):
def g(token):
if token[3] != word:
raise SyntaxError("Word mismatch (got '%s' instead of '%s')" %
(token[3], word))
return True
return g
def valid_number(token):
if int(token[3]) > 0:
return True
raise SyntaxError("Invalid number")
def _match_LB(matcher):
matcher.match(TAG)
tag = matcher.symbol
matcher.match(RB).match(IDENTIFIER, KWORD, None)
return Instruction.TAG, 0, tag
def _match_KWORD(matcher):
if matcher.symbol == "goto":
matcher.match(TAG)
tag = matcher.symbol
matcher.match(NL, LB, IDENTIFIER, KWORD, None)
return Instruction.JMP, 0, tag
elif matcher.symbol != "if":
raise SyntaxError("Expected 'if' or 'goto', got %s" % matcher.symbol)
matcher.match(IDENTIFIER)
iden = matcher.symbol
matcher.match(NEQ)
matcher.match(NUMBER, test=equals("0"))
matcher.match(KWORD, test=is_kword("goto"))
matcher.match(TAG)
tag = matcher.symbol
matcher.match(NL, LB, IDENTIFIER, KWORD, None)
return Instruction.JNZ, iden, tag
def _match_IDEN(matcher):
iden = matcher.symbol
matcher.match(ARROW)
#.match(IDENTIFIER, test=equals(iden)) \
matcher.match(IDENTIFIER, NUMBER)
iden2 = matcher.symbol
tok = matcher.token
matcher.match(OP, NL, None)
if matcher.token == OP:
op = Instruction.INC if matcher.symbol == "+" else Instruction.DEC
matcher.match(NUMBER, test=valid_number)
num = int(matcher.symbol)
matcher.match(NL, None, LB, IDENTIFIER, KWORD)
return op, iden, num
elif iden == iden2:
return Instruction.NOP, iden, 0
elif tok == NUMBER:
return Instruction.VAR, iden, int(iden2)
else:
return Instruction.CPY, iden, iden2
def parse(tokens):
"Parse a stream of tokens generated by tokenize"
matcher = Matcher(tokens)
while matcher.lookahead != None:
if matcher.token == LB:
yield _match_LB(matcher)
elif matcher.token == KWORD:
yield _match_KWORD(matcher)
elif matcher.token == IDENTIFIER:
yield _match_IDEN(matcher)
elif matcher.token == NL:
matcher.match(NL, LB, IDENTIFIER, KWORD, None)
else:
raise SyntaxError("Unexpected symbol '%s': line %d, column %d" %
((matcher.symbol,) + matcher.span))
def godel_exponents(f, no_abc=True):
b = 0
a = 0
pair = lambda x, y: (2 ** x) * (2 * y + 1) - 1
tag = lambda val: ("abcde".index(val[0].lower()) + 1) * int(val[1:] or 1)
for op, var, val in parse(tokenize(f)):
if op == Instruction.TAG:
a = tag(val)
continue
c = 2 * int(var[1:] or 1) if var[0] != "y" else 0
c = c - 1 if var[0] == "x" else c
if op == Instruction.NOP:
b = 0
elif op == Instruction.JNZ:
b = tag(val) + 2
elif op == Instruction.INC:
b = 1
elif op == Instruction.DEC:
b = 2
if no_abc:
yield pair(a, pair(b, c))
else:
yield a, b, c, pair(a, pair(b, c))
a = 0
class Compiler(object):
def __init__(self, f, optimization=0, padding=0):
self.f = f
self.optimization = optimization
self.padding = padding
self.program = []
def _translate_jumps(self):
tags = {}
for idx, (op, var, val) in enumerate(self.program):
if op == Instruction.TAG:
tags[val.lower()] = idx + 1
for idx, (op, var, val) in enumerate(self.program):
if op in (Instruction.JNZ, Instruction.JMP):
self.program[idx] = op, var, tags.get(val.lower(), 0)
def _translate_varnames(self):
for idx, (op, var, val) in enumerate(self.program):
if op not in (Instruction.TAG, Instruction.CPY):
self.program[idx] = op, Instruction.var_to_num(var), int(val)
elif op == Instruction.CPY:
var1 = Instruction.var_to_num(var)
var2 = Instruction.var_to_num(val)
self.program[idx] = op, var1, var2
def _translate_tags(self):
for idx, (op, var, val) in enumerate(self.program):
if op == Instruction.TAG:
var = ord(val[0].lower()) - ord('a')
val = 0 if len(val) == 1 else int(val[1:])
self.program[idx] = op, var, val
def tokenize(self, f=None):
if f is None:
f = self.f
return tokenize(self.f)
def parse(self, tokens=None):
if tokens is None:
tokens = self.tokenize()
return parse(tokens)
def tac(self):
# tac takes multiple iterations to output useful code.
# there is no need to optimize this, as programs are quite short and
# readabilty is a plus.
self.program = list(self.parse())
self._translate_jumps()
self._translate_tags()
self._translate_varnames()
def compile(self):
self.tac()
if self.optimization:
self.program = dce(self.program)
self.program = register_relocation(self.program)
return Bytecode(Instruction(*i) for i in self.program)
@classmethod
def compile_string(cls, code):
compiler = cls(StringIO(code))
return compiler.compile()
| 4,852 | 289 | 300 |
2c4bbed56d2060a94133f5d39838f35c71f142a4 | 3,003 | py | Python | fMRI Tasks/WASABI bodymap/Stimulus Timing Test.py | canlab/WASABI_public | c10a33fcd8959ff9798eeec099a3f8954531661d | [
"MIT"
] | 1 | 2021-11-16T09:59:14.000Z | 2021-11-16T09:59:14.000Z | fMRI Tasks/WASABI bodymap/Stimulus Timing Test.py | canlab/WASABI_public | c10a33fcd8959ff9798eeec099a3f8954531661d | [
"MIT"
] | null | null | null | fMRI Tasks/WASABI bodymap/Stimulus Timing Test.py | canlab/WASABI_public | c10a33fcd8959ff9798eeec099a3f8954531661d | [
"MIT"
] | null | null | null | import timeit
# from datetime import datetime
from medocControl import *
from psychopy import core
import random
while True:
# startTime = timeit.default_timer()
# poll_for_change('IDLE')
# core.wait(5)
# command = random.randint(101,171)
command = 117
if poll_for_change('IDLE', poll_max=-1):
# startTime = datetime.now()
startTime = timeit.default_timer()
print("Running " + str(command))
sendCommand('select_tp', command)
# print("start time: " + str(startTime))
# if poll_for_change('READY'): sendCommand('start'); print("First start command took: " + str(timeit.default_timer() - startTime) + "s past polling")
# startTime2 = timeit.default_timer()
# if poll_for_change('RUNNING'): sendCommand('start'); print("Second start command took " + str(timeit.default_timer() - startTime2) + "s past polling")
# print("Selected TP at: " + str(timeit.default_timer()-startTime))
if poll_for_change('RUNNING'): sendCommand('trigger')
# print("end polling time: {}".format(datetime.now() - startTime))
print("Stim started " + str(timeit.default_timer() - startTime) + "s past polling")
# print("Stim started " + str(timeit.default_timer()-startTime) + " past polling")
# core.wait(5)
# print("Polling prior to first trigger: " + str(timeit.default_timer()-startTime))
startTime2 = timeit.default_timer()
# startTime2 = datetime.now()
# jitter = random.randint(1,5)
# core.wait(jitter)
# core.wait(jitter + 13)
# poll_for_change('IDLE')
# startTime3 = timeit.default_timer()
command = random.randint(101,171)
command = 170
if poll_for_change('IDLE', poll_max=-1):
# print("Post-trigger selection latency: " + str(timeit.default_timer()-startTime2));
# print("stimclock: " + str(timeit.default_timer()))
print("Post-stimulation selection latency: " + str(timeit.default_timer()-startTime2));
# print("stimclock: {}".format(datetime.now() - startTime))
# print("Post-stimulation selection latency {}".format(datetime.now() - startTime2) + " past polling")
# print("Running " + str(command));
sendCommand('select_tp', command)
# if poll_for_change('READY'): sendCommand('start')
# if poll_for_change('RUNNING'): sendCommand('start')
# print("stimclock: " + str(timeit.default_timer()))
print("Second stimulation begins at : " + str(timeit.default_timer()-startTime))
# print("end time: {}".format(datetime.now() - startTime))
# print("Second stimulation started {}".format(datetime.now() - startTime) + " past polling")
# core.wait(5)
# if poll_for_change('RUNNING'):
# print("Post-trigger trigger latency: " + str(timeit.default_timer()-startTime2) + ' (it took ' + str(timeit.default_timer()-startTime3) + ' )')
# sendCommand('trigger')
# print("Second Stim Trigger: " + str(timeit.default_timer()-startTime))
# core.wait(13) | 48.435484 | 156 | 0.651682 | import timeit
# from datetime import datetime
from medocControl import *
from psychopy import core
import random
while True:
# startTime = timeit.default_timer()
# poll_for_change('IDLE')
# core.wait(5)
# command = random.randint(101,171)
command = 117
if poll_for_change('IDLE', poll_max=-1):
# startTime = datetime.now()
startTime = timeit.default_timer()
print("Running " + str(command))
sendCommand('select_tp', command)
# print("start time: " + str(startTime))
# if poll_for_change('READY'): sendCommand('start'); print("First start command took: " + str(timeit.default_timer() - startTime) + "s past polling")
# startTime2 = timeit.default_timer()
# if poll_for_change('RUNNING'): sendCommand('start'); print("Second start command took " + str(timeit.default_timer() - startTime2) + "s past polling")
# print("Selected TP at: " + str(timeit.default_timer()-startTime))
if poll_for_change('RUNNING'): sendCommand('trigger')
# print("end polling time: {}".format(datetime.now() - startTime))
print("Stim started " + str(timeit.default_timer() - startTime) + "s past polling")
# print("Stim started " + str(timeit.default_timer()-startTime) + " past polling")
# core.wait(5)
# print("Polling prior to first trigger: " + str(timeit.default_timer()-startTime))
startTime2 = timeit.default_timer()
# startTime2 = datetime.now()
# jitter = random.randint(1,5)
# core.wait(jitter)
# core.wait(jitter + 13)
# poll_for_change('IDLE')
# startTime3 = timeit.default_timer()
command = random.randint(101,171)
command = 170
if poll_for_change('IDLE', poll_max=-1):
# print("Post-trigger selection latency: " + str(timeit.default_timer()-startTime2));
# print("stimclock: " + str(timeit.default_timer()))
print("Post-stimulation selection latency: " + str(timeit.default_timer()-startTime2));
# print("stimclock: {}".format(datetime.now() - startTime))
# print("Post-stimulation selection latency {}".format(datetime.now() - startTime2) + " past polling")
# print("Running " + str(command));
sendCommand('select_tp', command)
# if poll_for_change('READY'): sendCommand('start')
# if poll_for_change('RUNNING'): sendCommand('start')
# print("stimclock: " + str(timeit.default_timer()))
print("Second stimulation begins at : " + str(timeit.default_timer()-startTime))
# print("end time: {}".format(datetime.now() - startTime))
# print("Second stimulation started {}".format(datetime.now() - startTime) + " past polling")
# core.wait(5)
# if poll_for_change('RUNNING'):
# print("Post-trigger trigger latency: " + str(timeit.default_timer()-startTime2) + ' (it took ' + str(timeit.default_timer()-startTime3) + ' )')
# sendCommand('trigger')
# print("Second Stim Trigger: " + str(timeit.default_timer()-startTime))
# core.wait(13) | 0 | 0 | 0 |
e038dd34934518811ef8ccaa09dd09f2275f1656 | 5,351 | py | Python | saefportal/analyzer/analyzers/analyzer_comparison_dataset.py | harry-consulting/SAEF1 | 055d6e492ba76f90e3248b9da2985fdfe0c6b430 | [
"BSD-2-Clause"
] | null | null | null | saefportal/analyzer/analyzers/analyzer_comparison_dataset.py | harry-consulting/SAEF1 | 055d6e492ba76f90e3248b9da2985fdfe0c6b430 | [
"BSD-2-Clause"
] | null | null | null | saefportal/analyzer/analyzers/analyzer_comparison_dataset.py | harry-consulting/SAEF1 | 055d6e492ba76f90e3248b9da2985fdfe0c6b430 | [
"BSD-2-Clause"
] | 1 | 2020-12-16T15:02:52.000Z | 2020-12-16T15:02:52.000Z | from __future__ import absolute_import, unicode_literals
from saefportal.settings import COMPARISON_PROFILE_THRESHOLD
from .analyzer import Analyzer
from analyzer.models import ActualColumnProfile, ExpectedColumnProfile
from analyzer.enums import Column
| 44.966387 | 116 | 0.646982 | from __future__ import absolute_import, unicode_literals
from saefportal.settings import COMPARISON_PROFILE_THRESHOLD
from .analyzer import Analyzer
from analyzer.models import ActualColumnProfile, ExpectedColumnProfile
from analyzer.enums import Column
def _calculate_change(ratio):
result = 0
for value in ratio.values():
result += value['ratio'] * COMPARISON_PROFILE_THRESHOLD
return result / len(ratio)
def _retrieve_column_definitions(columns_actual, columns_expected):
columns_actual_definitions = {}
columns_expected_definitions = {}
for actual_column in columns_actual:
columns_actual_definitions[actual_column.name] = (
actual_column.datatype, actual_column.nullable, actual_column.order)
for expected_column in columns_expected:
columns_expected_definitions[expected_column.name] = (
expected_column.datatype, expected_column.nullable, expected_column.order)
return columns_actual_definitions, columns_expected_definitions
def _column_change(columns_actual, columns_expected, value, ignore_deleted=True):
column_dict = {'added': [], 'deleted': [], 'changes': []}
for name, actual_column in columns_actual.items():
if name not in columns_expected:
column_dict['added'].append(name)
elif actual_column[value] != columns_expected[name][value]:
column_dict['changes'].append(name)
if not ignore_deleted:
for name in columns_expected.keys():
if name not in columns_actual:
column_dict['deleted'].append(name)
return column_dict['added'], column_dict['deleted'], column_dict['changes']
def _columns_sum_uniqueness(columns):
sum_value = 0
for column in columns:
sum_value += column.uniqueness if column.uniqueness else 0
return sum_value
def _ratio_difference_abs(actual, expected):
if expected == 0:
return {'actual': actual, 'expected': expected, 'ratio': 0}
ratio = abs(actual - expected) / expected
return {'actual': actual, 'expected': expected, 'ratio': ratio}
def _ratio_difference(columns, denominator):
ratio = len(columns) / denominator
is_change = True if len(columns) != 0 else False
return {'changes': is_change, 'columns': columns, 'ratio': ratio}
class AnalyzerComparisonDataset(Analyzer):
def __init__(self, actual_dataset_profile, expected_dataset_profile):
super().__init__()
self.actual_dataset_profile = actual_dataset_profile
self.expected_dataset_profile = expected_dataset_profile
def _execute_session(self):
columns_actual = ActualColumnProfile.objects.filter(dataset_profile_id=self.actual_dataset_profile.pk)
columns_expected = ExpectedColumnProfile.objects.filter(dataset_profile_id=self.expected_dataset_profile.pk)
ratio = {'row_count_ratio': _ratio_difference_abs(self.actual_dataset_profile.row_count,
self.expected_dataset_profile.row_count),
'column_count_ratio': _ratio_difference_abs(self.actual_dataset_profile.column_count,
self.expected_dataset_profile.column_count)}
columns_actual_definitions, columns_expected_definitions = _retrieve_column_definitions(columns_actual,
columns_expected)
added_columns, deleted_columns, changed_order_columns = _column_change(columns_actual_definitions,
columns_expected_definitions,
Column.ORDER.value,
False)
ratio['column_order_ratio'] = _ratio_difference(changed_order_columns,
self.expected_dataset_profile.column_count)
ratio['column_deleted_ratio'] = _ratio_difference(deleted_columns,
self.expected_dataset_profile.column_count)
ratio['column_added_ratio'] = _ratio_difference(added_columns,
self.expected_dataset_profile.column_count)
_, _, changed_datatype_columns = _column_change(columns_actual_definitions,
columns_expected_definitions,
Column.TYPE.value)
ratio['column_datatype_ratio'] = _ratio_difference(changed_datatype_columns,
self.expected_dataset_profile.column_count)
actual_uniqueness_sum = _columns_sum_uniqueness(columns_actual)
expected_uniqueness_sum = _columns_sum_uniqueness(columns_expected)
ratio['column_uniqueness_ratio'] = _ratio_difference_abs(actual_uniqueness_sum, expected_uniqueness_sum)
ratio['degree_of_change'] = _calculate_change(ratio)
self.actual_dataset_profile.dataset_run.result = {"degree_of_change": ratio['degree_of_change']}
self.actual_dataset_profile.dataset_run.save()
return ratio
| 4,854 | 21 | 214 |
73365aad101d42d6fcbe4ac64ca9be0306fcea28 | 1,977 | py | Python | utils/metrics.py | ishine/TensorflowASR-1 | c0bcaec124dd96138ac401dfbde0b8c795737bb1 | [
"Apache-2.0"
] | 1 | 2021-06-15T01:19:21.000Z | 2021-06-15T01:19:21.000Z | utils/metrics.py | ishine/TensorflowASR-1 | c0bcaec124dd96138ac401dfbde0b8c795737bb1 | [
"Apache-2.0"
] | null | null | null | utils/metrics.py | ishine/TensorflowASR-1 | c0bcaec124dd96138ac401dfbde0b8c795737bb1 | [
"Apache-2.0"
] | 1 | 2020-12-29T01:26:14.000Z | 2020-12-29T01:26:14.000Z |
import numpy as np
import tensorflow as tf
from utils.xer import wer
from utils.tools import bytes_to_string
class ErrorRate(tf.keras.metrics.Metric):
""" Metric for WER and CER """
| 37.301887 | 97 | 0.643399 |
import numpy as np
import tensorflow as tf
from utils.xer import wer
from utils.tools import bytes_to_string
def wer(decode: np.ndarray, target: np.ndarray) -> (tf.Tensor, tf.Tensor):
decode = bytes_to_string(decode)
target = bytes_to_string(target)
dis = 0.0
length = 0.0
for dec, tar in zip(decode, target):
words = set(dec.split() + tar.split())
word2char = dict(zip(words, range(len(words))))
new_decode = [chr(word2char[w]) for w in dec.split()]
new_target = [chr(word2char[w]) for w in tar.split()]
dis += distance.edit_distance(''.join(new_decode), ''.join(new_target))
length += len(tar.split())
return tf.convert_to_tensor(dis, tf.float32), tf.convert_to_tensor(length, tf.float32)
def cer(decode: np.ndarray, target: np.ndarray) -> (tf.Tensor, tf.Tensor):
decode = bytes_to_string(decode)
target = bytes_to_string(target)
dis = 0
length = 0
for dec, tar in zip(decode, target):
dis += distance.edit_distance(dec, tar)
length += len(tar)
return tf.convert_to_tensor(dis, tf.float32), tf.convert_to_tensor(length, tf.float32)
class ErrorRate(tf.keras.metrics.Metric):
""" Metric for WER and CER """
def __init__(self, func, name="error_rate", **kwargs):
super(ErrorRate, self).__init__(name=name, **kwargs)
self.numerator = self.add_weight(name=f"{name}_numerator", initializer="zeros")
self.denominator = self.add_weight(name=f"{name}_denominator", initializer="zeros")
self.func = func
def update_state(self, decode: tf.Tensor, target: tf.Tensor):
n, d = tf.numpy_function(self.func, inp=[decode, target], Tout=[tf.float32, tf.float32])
self.numerator.assign_add(n)
self.denominator.assign_add(d)
def result(self):
if self.denominator == 0.0: return 0.0
return (self.numerator / self.denominator) * 100
| 1,638 | 0 | 137 |
2ddd4fdcd5f09910c9beb02228c810ec663b635d | 10,536 | py | Python | tests/environments/test_views.py | waqaskhan540/bullet-train-api | bd0d9ba9c8e4a05a2bd498ce4e69a4f33e1c4199 | [
"BSD-3-Clause"
] | null | null | null | tests/environments/test_views.py | waqaskhan540/bullet-train-api | bd0d9ba9c8e4a05a2bd498ce4e69a4f33e1c4199 | [
"BSD-3-Clause"
] | null | null | null | tests/environments/test_views.py | waqaskhan540/bullet-train-api | bd0d9ba9c8e4a05a2bd498ce4e69a4f33e1c4199 | [
"BSD-3-Clause"
] | null | null | null | from unittest import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from environments.models import Environment, Identity
from features.models import Feature, FeatureState
from organisations.models import Organisation
from projects.models import Project
from tests.utils import Helper
| 47.674208 | 95 | 0.576784 | from unittest import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from environments.models import Environment, Identity
from features.models import Feature, FeatureState
from organisations.models import Organisation
from projects.models import Project
from tests.utils import Helper
class EnvironmentTestCase(TestCase):
env_post_template_wout_webhook = '{"name": %s, "project": %d}'
env_post_template_with_webhook = '{"name": "%s", "project": %d, ' \
'"webhooks_enabled": "%r", "webhook_url": "%s"}'
fs_put_template = '{ "id" : %d, "enabled" : "%r", "feature_state_value" : "%s" }'
def set_up(self):
client = APIClient()
user = Helper.create_ffadminuser()
client.force_authenticate(user=user)
return client
def test_should_create_environments_with_or_without_webhooks(self):
# Given
client = self.set_up()
# When
response_with_webhook = client.post('/api/v1/environments/',
data=self.env_post_template_with_webhook % (
"Test Env with Webhooks",
1,
True,
"https://sometesturl.org"
), content_type="application/json")
response_wout_webhook = client.post('/api/v1/environments/',
data=self.env_post_template_wout_webhook % (
"Test Env without Webhooks",
1
), content_type="application/json")
# Then
self.assertTrue(response_with_webhook.status_code, 201)
self.assertTrue(Environment.objects.get(name="Test Env with Webhooks").webhook_url)
self.assertTrue(response_wout_webhook.status_code, 201)
def test_should_return_identities_for_an_environment(self):
client = self.set_up()
# Given
identifierOne = 'user1'
identifierTwo = 'user2'
organisation = Organisation(name='ssg')
organisation.save()
project = Project(name='project1', organisation=organisation)
project.save()
environment = Environment(name='environment1', project=project)
environment.save()
identityOne = Identity(identifier=identifierOne, environment=environment)
identityOne.save()
identityTwo = Identity(identifier=identifierTwo, environment=environment)
identityTwo.save()
# When
response = client.get('/api/v1/environments/%s/identities/' % environment.api_key)
# Then
self.assertEquals(response.data['results'][0]['identifier'], identifierOne)
self.assertEquals(response.data['results'][1]['identifier'], identifierTwo)
def test_should_update_value_of_feature_state(self):
# Given
client = self.set_up()
project = Project.objects.get(name="test project")
feature = Feature(name="feature", project=project)
feature.save()
environment = Environment.objects.get(name="test env")
feature_state = FeatureState.objects.get(feature=feature, environment=environment)
# When
response = client.put("/api/v1/environments/%s/featurestates/%d/" %
(environment.api_key, feature_state.id),
data=self.fs_put_template % (feature_state.id,
True,
"This is a value"),
content_type='application/json') # should change enabled to True
# Then
self.assertEquals(response.status_code, status.HTTP_200_OK)
feature_state.refresh_from_db()
self.assertEquals(feature_state.get_feature_state_value(), "This is a value")
self.assertEquals(feature_state.enabled, True)
Helper.clean_up()
class IdentityTestCase(TestCase):
identifier = 'user1'
put_template = '{ "enabled" : "%r" }'
post_template = '{ "feature" : "%s", "enabled" : "%r" }'
feature_states_url = '/api/v1/environments/%s/identities/%s/featurestates/'
feature_states_detail_url = feature_states_url + "%d/"
identities_url = '/api/v1/environments/%s/identities/%s/'
def set_up(self):
client = APIClient()
user = Helper.create_ffadminuser()
client.force_authenticate(user=user)
return client
def test_should_return_identities_list_when_requested(self):
# Given
client = self.set_up()
identity, project = Helper.generate_database_models(identifier=self.identifier)
# When
response = client.get(self.identities_url % (identity.environment.api_key,
identity.identifier))
# Then
self.assertEquals(response.status_code, 200)
Helper.clean_up()
def test_should_create_identityFeature_when_post(self):
# Given
client = self.set_up()
environment = Environment.objects.get(name="test env")
identity = Identity.objects.create(environment=environment, identifier="testidentity")
project = Project.objects.get(name="test project")
feature = Feature(name='feature1', project=project)
feature.save()
# When
response = client.post(self.feature_states_url % (identity.environment.api_key,
identity.identifier),
data=self.post_template % (feature.id, True),
content_type='application/json')
# Then
identityFeature = identity.identity_features
self.assertEquals(response.status_code, status.HTTP_201_CREATED)
self.assertEquals(identityFeature.count(), 1)
Helper.clean_up()
def test_should_return_BadRequest_when_duplicate_identityFeature_is_posted(self):
# Given
client = self.set_up()
identity, project = Helper.generate_database_models(self.identifier)
feature = Feature(name='feature2', project=project)
feature.save()
# When
initialResponse = client.post(self.feature_states_url % (identity.environment.api_key,
identity.identifier),
data=self.post_template % (feature.id, True),
content_type='application/json')
secondResponse = client.post(self.feature_states_url % (identity.environment.api_key,
identity.identifier),
data=self.post_template % (feature.id, True),
content_type='application/json')
# Then
identityFeature = identity.identity_features
self.assertEquals(initialResponse.status_code, status.HTTP_201_CREATED)
self.assertEquals(secondResponse.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEquals(identityFeature.count(), 1)
Helper.clean_up()
def test_should_change_enabled_state_when_put(self):
# Given
client = self.set_up()
organisation = Organisation.objects.get(name="test org")
project = Project.objects.get(name="test project", organisation=organisation)
feature = Feature(name='feature1', project=project)
feature.save()
environment = Environment.objects.get(name="test env")
identity = Identity(identifier="test_identity", environment=environment)
identity.save()
feature_state = FeatureState(feature=feature,
identity=identity,
enabled=False,
environment=environment)
feature_state.save()
# When
response = client.put(self.feature_states_detail_url % (identity.environment.api_key,
identity.identifier,
feature_state.id),
data=self.put_template % True,
content_type='application/json')
feature_state.refresh_from_db()
# Then
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(feature_state.enabled, True)
Helper.clean_up()
def test_should_remove_identityfeature_when_delete(self):
# Given
client = self.set_up()
organisation = Organisation.objects.get(name="test org")
project = Project.objects.get(name="test project", organisation=organisation)
feature_one = Feature(name='feature1', project=project)
feature_one.save()
feature_two = Feature(name='feature2', project=project)
feature_two.save()
environment = Environment.objects.get(name="test env")
identity = Identity(identifier="test_identity", environment=environment)
identity.save()
environment = Environment.objects.get(name="test env")
identity_feature_one = FeatureState(feature=feature_one,
identity=identity,
enabled=False,
environment=environment)
identity_feature_one.save()
identity_feature_two = FeatureState(feature=feature_two,
identity=identity,
enabled=True,
environment=environment)
identity_feature_two.save()
# When
client.delete(self.feature_states_detail_url % (identity.environment.api_key,
identity.identifier,
identity_feature_one.id),
content_type='application/json')
# Then
identity_features = FeatureState.objects.filter(identity=identity)
self.assertEquals(identity_features.count(), 1)
Helper.clean_up() | 9,227 | 937 | 46 |
7ba651cd035605544502236d142bdb383f2797b4 | 16,843 | py | Python | mpython_conn.py | labplus-cn/mpython_conn | 566a281afed56cac0adad3885cb51d748be63e73 | [
"MIT"
] | 1 | 2020-06-16T08:03:56.000Z | 2020-06-16T08:03:56.000Z | mpython_conn.py | labplus-cn/mpython_conn | 566a281afed56cac0adad3885cb51d748be63e73 | [
"MIT"
] | null | null | null | mpython_conn.py | labplus-cn/mpython_conn | 566a281afed56cac0adad3885cb51d748be63e73 | [
"MIT"
] | 2 | 2020-09-24T05:23:03.000Z | 2020-11-25T06:39:15.000Z | # -*- coding:utf-8 -*-
# @Time : 2020/06/10
# @Author : Wu Wen Jie(6692776@qq.com)
# @FileName : mpython_conn.py
# @Description : A transfer protocol between mPython board and PC python
# @Version : 0.3.2
from serial.tools.list_ports import comports as list_serial_ports
from serial import Serial
import threading
import time
import atexit
import unicodedata
import inspect
import ctypes
import sys
def _async_raise(tid, exctype):
"""raises the exception, performs cleanup if needed"""
tid = ctypes.c_long(tid)
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@atexit.register
| 34.026263 | 117 | 0.48869 | # -*- coding:utf-8 -*-
# @Time : 2020/06/10
# @Author : Wu Wen Jie(6692776@qq.com)
# @FileName : mpython_conn.py
# @Description : A transfer protocol between mPython board and PC python
# @Version : 0.3.2
from serial.tools.list_ports import comports as list_serial_ports
from serial import Serial
import threading
import time
import atexit
import unicodedata
import inspect
import ctypes
import sys
def _async_raise(tid, exctype):
"""raises the exception, performs cleanup if needed"""
tid = ctypes.c_long(tid)
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def stop_thread(thread):
_async_raise(thread.ident, SystemExit)
class controller():
_instance = None
def __new__(cls, *args, **kw):
if not cls._instance:
cls._instance = super(controller, cls).__new__(cls, *args, **kw)
return cls._instance
SENSORS = {
'A': 0,
'B': 0,
'P': 0,
'Y': 0,
'T': 0,
'H': 0,
'O': 0,
'N': 0,
'L': 0,
'S': 0,
'E': 0,
'aX': 0,
'aY': 0,
'aZ': 0,
'dir': '',
'pinD': [0] * 17,
'pinA': [0, 0, 0],
'G': 0 # 姿态 0正常 1摇晃 2抛起
}
def analysis_data(self, data):
if data[0] == '{':
try:
data = eval(data)
d = ("0000000" + str(bin(data['d']))[2:])[-8:]
sys.stdout.flush()
if self.SENSORS['A'] != ('1' == d[6]):
if self.SENSORS['A']:
threading.Thread(target=self.on_a_released).start()
else:
threading.Thread(target=self.on_a_pressed).start()
self.SENSORS['A'] = ('1' == d[6])
if self.SENSORS['B'] != ('1' == d[7]):
if self.SENSORS['B']:
threading.Thread(target=self.on_b_released).start()
else:
threading.Thread(target=self.on_b_pressed).start()
self.SENSORS['B'] = ('1' == d[7])
if self.SENSORS['P'] != ('1' == d[0]):
if self.SENSORS['P']:
threading.Thread(target=self.on_p_released).start()
else:
threading.Thread(target=self.on_p_pressed).start()
self.SENSORS['P'] = ('1' == d[0])
if self.SENSORS['Y'] != ('1' == d[1]):
if self.SENSORS['Y']:
threading.Thread(target=self.on_y_released).start()
else:
threading.Thread(target=self.on_y_pressed).start()
self.SENSORS['Y'] = ('1' == d[1])
if self.SENSORS['T'] != ('1' == d[2]):
if self.SENSORS['T']:
threading.Thread(target=self.on_t_released).start()
else:
threading.Thread(target=self.on_t_pressed).start()
self.SENSORS['T'] = ('1' == d[2])
if self.SENSORS['H'] != ('1' == d[3]):
if self.SENSORS['H']:
threading.Thread(target=self.on_h_released).start()
else:
threading.Thread(target=self.on_h_pressed).start()
self.SENSORS['H'] = ('1' == d[3])
if self.SENSORS['O'] != ('1' == d[4]):
if self.SENSORS['O']:
threading.Thread(target=self.on_o_released).start()
else:
threading.Thread(target=self.on_o_pressed).start()
self.SENSORS['O'] = ('1' == d[4])
if self.SENSORS['N'] != ('1' == d[5]):
if self.SENSORS['N']:
threading.Thread(target=self.on_n_released).start()
else:
threading.Thread(target=self.on_n_pressed).start()
self.SENSORS['N'] = ('1' == d[5])
if self.SENSORS['G'] != data['t']:
if 1 == data['t']:
threading.Thread(target=self.on_shaken).start()
elif 2 == data['t']:
threading.Thread(target=self.on_jumped).start()
self.SENSORS['G'] = data['t']
self.SENSORS['L'] = data['l']
self.SENSORS['S'] = data['s']
self.SENSORS['E'] = data['e']
self.SENSORS['aX'] = data['x']
self.SENSORS['aY'] = data['y']
self.SENSORS['aZ'] = data['z']
if data['x'] < -0.3:
if 'F' != self.SENSORS['dir']:
self.SENSORS['dir'] = 'F'
threading.Thread(target=self.on_tilt_forward).start()
elif data['x'] > 0.3:
if 'B' != self.SENSORS['dir']:
self.SENSORS['dir'] = 'B'
threading.Thread(target=self.on_tilt_back).start()
elif data['y'] < -0.3:
if 'R' != self.SENSORS['dir']:
self.SENSORS['dir'] = 'R'
threading.Thread(target=self.on_tilt_right).start()
elif data['y'] > 0.3:
if 'L' != self.SENSORS['dir']:
self.SENSORS['dir'] = 'L'
threading.Thread(target=self.on_tilt_left).start()
else:
if '' != self.SENSORS['dir']:
self.SENSORS['dir'] = ''
threading.Thread(target=self.on_tilt_none).start()
if 'd0' in data: self.SENSORS['pinD'][0] = (1 == data['d0'])
if 'd1' in data: self.SENSORS['pinD'][1] = (1 == data['d1'])
if 'd2' in data: self.SENSORS['pinD'][2] = (1 == data['d2'])
if 'd8' in data: self.SENSORS['pinD'][8] = (1 == data['d8'])
if 'd9' in data: self.SENSORS['pinD'][9] = (1 == data['d9'])
if 'd10' in data: self.SENSORS['pinD'][10] = (1 == data['d10'])
if 'd13' in data: self.SENSORS['pinD'][13] = (1 == data['d13'])
if 'd14' in data: self.SENSORS['pinD'][14] = (1 == data['d14'])
if 'd15' in data: self.SENSORS['pinD'][15] = (1 == data['d15'])
if 'd16' in data: self.SENSORS['pinD'][16] = (1 == data['d16'])
if 'a0' in data: self.SENSORS['pinA'][0] = data['a0']
if 'a1' in data: self.SENSORS['pinA'][1] = data['a1']
if 'a2' in data: self.SENSORS['pinA'][2] = data['a2']
except:
pass
def find_device(self):
ports = list_serial_ports()
for port in ports:
if "VID:PID=10C4:EA60" in port[2].upper():
return port[0]
return None
def on_serial_read(self):
while True:
try:
if self._serial.in_waiting:
s = self._serial.readline().decode('utf-8')
self.analysis_data( s )
except:
if self.enable: print("serial error")
break
def break_first(self):
for i in range(3):
self._serial.write(b'\r\x03')
time.sleep(0.01)
time.sleep(0.1)
def reboot(self):
for i in range(3):
self._serial.write(b'\r\x03')
time.sleep(0.01)
self._serial.write(b'\x04')
def disconnect(self):
self.enable = False
for i in range(3):
self._serial.write(b'\r\x03')
time.sleep(0.01)
self.send("import os;os.rename('main.bak','main.py')")
self._serial.write(b'\x04')
time.sleep(0.5)
stop_thread(self.read_thread)
if self._serial.isOpen(): self._serial.close()
def send(self, message):
if self._serial is None: return
message = message + "\r\n"
message = message.encode("utf-8")
self._serial.write(message)
time.sleep(0.11 + len(message)/512)
def __init__(self, port=''):
if '' == port:
port = self.find_device()
self._port = port
self._serial = Serial(port, 115200, timeout=1, parity='N')
self._serial.setDTR(True)
self._serial.setRTS(True)
self.enable = True
self.read_thread = threading.Thread(target=self.on_serial_read)
self.read_thread.start()
self.break_first()
self.send("import os;os.rename('main.py','main.bak')")
self._serial.write(b'\x04')
time.sleep(0.5)
self.send("import mpython_online")
def __del__(self):
print("Disconnected")
## 指令
def set_digital(self, pin, value):
"""
@param pin: 0, 1, 8, 9, 13, 14, 15, 16
@param value: 0, 1
"""
if not pin in [0, 1, 8, 9, 13, 14, 15, 16]: return
command = "MPythonPin({},PinMode.OUT).write_digital({})".format(pin, value)
self.send(command)
def set_analog(self, pin, value):
"""
@param pin: 0, 1, 8, 9, 13, 14, 15, 16
@param value: 0~1023
"""
if not pin in [0, 1, 8, 9, 13, 14, 15, 16]: return
command = "MPythonPin({},PinMode.PWM).write_analog({})".format(pin, value)
self.send(command)
def set_servo(self, pin, value):
"""
@param pin: 0, 1, 8, 9, 13, 14, 15, 16
"""
if not pin in [0, 1, 8, 9, 13, 14, 15, 16]: return
command = "Servo({}).write_angle({})".format(pin, value)
self.send(command)
def set_rgb(self, r, g, b, index=-1):
"""
@param index: 0, 1, 2, -1(代表全部,可不填)
"""
if index in [0, 1, 2]:
command = "rgb[{}]=({},{},{});rgb.write();time.sleep_ms(1)".format(index, r, g, b)
else:
command = "rgb.fill(({},{},{}));rgb.write();time.sleep_ms(1)".format(r, g, b)
self.send(command)
def set_rgb_off(self, index=-1):
"""
@param index: 0, 1, 2, -1(代表全部,可不填)
"""
if index in [0, 1, 2]:
command = "rgb[{}]=(0,0,0);rgb.write();time.sleep_ms(1)".format(index)
else:
command = "rgb.fill((0,0,0));rgb.write();time.sleep_ms(1)"
self.send(command)
def to_unicode(self, text):
text = unicodedata.normalize('NFKC', text)
ret = ''
for v in text:
if '\u4e00' <= v <= '\u9fff':
ret = ret + hex(ord(v)).upper().replace('0X', '\\u')
else:
ret = ret + v
return ret
def oled_clear(self):
command = "oled.fill(0);oled.show()"
self.send(command)
def oled_clear_line(self, line):
"""
@param pin: 1, 2, 3, 4
"""
if not line in [1, 2, 3, 4]: return
command = "oled.fill_rect(0,{},128,16,0);oled.show()".format((line-1) * 16)
self.send(command)
def oled_display_line(self, text, line):
"""
@param pin: 1, 2, 3, 4
"""
if not line in [1, 2, 3, 4]: return
command = "oled.DispChar('{}',0,{},1);oled.show()".format(self.to_unicode(text), (line-1) * 16)
self.send(command)
def oled_display_text(self, text, x, y):
command = "oled.DispChar('{}',{},{},1);oled.show()".format(self.to_unicode(text), x, y)
self.send(command)
def oled_draw_point(self, x, y, mode=1):
"""
@param mode: 1绘制(默认) 0擦除
"""
command = "oled.pixel({},{},{});oled.show()".format(x, y, mode)
self.send(command)
def oled_draw_line(self, x1, y1, x2, y2, mode=1):
"""
@param mode: 1绘制(默认) 0擦除
"""
command = "oled.line({},{},{},{},{});oled.show()".format(x1, y1, x2, y2, mode)
self.send(command)
def oled_draw_vhline(self, x, y, len, dir='h', mode=1):
"""
@param dir: h水平 v垂直
@param mode: 1绘制(默认) 0擦除
"""
command = "oled.{}line({},{},{},{});oled.show()".format(dir, x, y, len, mode)
self.send(command)
def oled_draw_rectangle(self, x, y, w, h, fill=0, mode=1):
"""
@param fill: 1实心 0空心(默认)
@param mode: 1绘制(默认) 0擦除
"""
fill_mode = "fill_" if 1 == fill else ""
command = "oled.{}rect({},{},{},{},{});oled.show()".format(fill_mode, x, y, w, h, mode)
self.send(command)
def oled_draw_circle(self, x, y, r, fill=0, mode=1):
"""
@param fill: 1实心 0空心(默认)
@param mode: 1绘制(默认) 0擦除
"""
fill_mode = "fill_" if 1 == fill else ""
command = "oled.{}circle({},{},{},{});oled.show()".format(fill_mode, x, y, r, mode)
self.send(command)
def oled_draw_triangle(self, x1, y1, x2, y2, x3, y3, fill=0, mode=1):
"""
@param fill: 1实心 0空心(默认)
@param mode: 1绘制(默认) 0擦除
"""
fill_mode = "fill_" if 1 == fill else ""
command = "oled.{}triangle({},{},{},{},{},{},{});oled.show()".format(fill_mode, x1, y1, x2, y2, x3, y3, mode)
self.send(command)
def stop_music(self):
"""
停止播放音乐
"""
command = "music.pitch(4)\r\nmusic.stop()"
self.send(command)
def play_tone(self, pitch=131):
"""
播放连续音调
@param pitch: 频率,例: 131
"""
command = "music.pitch({})".format(pitch)
self.send(command)
def play_note(self, note='C3:1'):
"""
播放单个音符
@param note: 音符,例: C3:1
"""
command = "music.play('{}')".format(note)
self.send(command)
def set_motor_power(self, motor_id=1, power=100):
"""
驱动掌控宝M1、M2马达
@motor_id: 马达序号,取值1(默认)或2
@power: 马达能量,取值-100~100,大于0正转,小于0反转,0停止
"""
motor_id = 1 if motor_id == 1 else 2
power = int(power)
if power > 100: power = 100
if power < 100: power = -100
command = "import parrot;parrot.set_speed({},{})".format(motor_id, power)
self.send(command)
def set_motor_stop(self, motor_id=1):
"""
停止掌控宝M1、M2马达
@motor_id: 马达序号,取值1(默认)或2
"""
motor_id = 1 if motor_id == 1 else 2
command = "import parrot;parrot.set_speed({},0)".format(motor_id)
self.send(command)
## 属性
def get_digital(self, pin):
"""
@param pin: 0, 1, 2, 8, 9, 10, 13, 14, 15, 16
"""
if not pin in [0, 1, 2, 8, 9, 10, 13, 14, 15, 16]: return 0
command = "_pind[{}]=1".format(pin)
self.send(command)
return self.SENSORS['pinD'][pin]
def get_analog(self, pin):
"""
@param pin: 0, 1, 2
"""
if not pin in [0, 1, 2]: return 0
command = "_pina[{}]=1".format(pin)
self.send(command)
return self.SENSORS['pinA'][pin]
def get_button(self, button):
button = button.upper()
if not button in ['P', 'Y', 'T', 'H', 'O', 'N', 'A', 'B']: return False
return self.SENSORS[button]
def get_acceleration(self, axis):
axis = axis.upper()
if not axis in ['X', 'Y', 'Z']: return 0
return self.SENSORS['a' + axis]
def get_light(self):
return self.SENSORS['L']
def get_sound(self):
return self.SENSORS['S']
def get_ext(self):
return self.SENSORS['E']
## 事件
def on_a_pressed(self): pass
def on_a_released(self): pass
def on_b_pressed(self): pass
def on_b_released(self): pass
def on_p_pressed(self): pass
def on_p_released(self): pass
def on_y_pressed(self): pass
def on_y_released(self): pass
def on_t_pressed(self): pass
def on_t_released(self): pass
def on_h_pressed(self): pass
def on_h_released(self): pass
def on_o_pressed(self): pass
def on_o_released(self): pass
def on_n_pressed(self): pass
def on_n_released(self): pass
def on_shaken(self): pass
def on_jumped(self): pass
def on_tilt_forward(self):pass
def on_tilt_back(self):pass
def on_tilt_right(self):pass
def on_tilt_left(self):pass
def on_tilt_none(self):pass
@atexit.register
def atexit_fun():
controller().disconnect()
| 8,664 | 7,393 | 68 |
c6496b78286a6cb875dc7d76cb7bb13bb97a0c5d | 5,069 | py | Python | data.py | zxr931120/WGAN-for-studying | be05f7d0a32b571f899e4aa82135465262a0a7f7 | [
"MIT"
] | 132 | 2019-06-28T02:39:19.000Z | 2022-03-05T10:04:17.000Z | data.py | zxr931120/WGAN-for-studying | be05f7d0a32b571f899e4aa82135465262a0a7f7 | [
"MIT"
] | 1 | 2019-11-14T00:49:50.000Z | 2019-11-15T09:36:52.000Z | data.py | zxr931120/WGAN-for-studying | be05f7d0a32b571f899e4aa82135465262a0a7f7 | [
"MIT"
] | 32 | 2019-08-03T20:43:31.000Z | 2021-09-22T03:23:31.000Z | import torchlib
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
# ==============================================================================
# = custom dataset =
# ==============================================================================
# ==============================================================================
# = debug =
# ==============================================================================
# import imlib as im
# import numpy as np
# import pylib as py
# data_loader, _ = make_celeba_dataset(py.glob('data/img_align_celeba', '*.jpg'), batch_size=64)
# for img_batch in data_loader:
# for img in img_batch.numpy():
# img = np.transpose(img, (1, 2, 0))
# im.imshow(img)
# im.show()
| 37.272059 | 151 | 0.580588 | import torchlib
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
class OnlyImage(Dataset):
def __init__(self, img_label_dataset):
self.img_label_dataset = img_label_dataset
def __len__(self):
return len(self.img_label_dataset)
def __getitem__(self, i):
return self.img_label_dataset[i][0]
def make_32x32_dataset(dataset, batch_size, drop_remainder=True, shuffle=True, num_workers=4, pin_memory=False):
if dataset == 'mnist':
transform = transforms.Compose([
transforms.Resize(size=(32, 32)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5])
])
dataset = datasets.MNIST('data/MNIST', transform=transform, download=True)
img_shape = [32, 32, 1]
elif dataset == 'fashion_mnist':
transform = transforms.Compose([
transforms.Resize(size=(32, 32)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5])
])
dataset = datasets.FashionMNIST('data/FashionMNIST', transform=transform, download=True)
img_shape = [32, 32, 1]
elif dataset == 'cifar10':
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
dataset = datasets.CIFAR10('data/CIFAR10', transform=transform, download=True)
img_shape = [32, 32, 3]
else:
raise NotImplementedError
dataset = OnlyImage(dataset)
data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, drop_last=drop_remainder, pin_memory=pin_memory)
return data_loader, img_shape
def make_celeba_dataset(img_paths, batch_size, resize=64, drop_remainder=True, shuffle=True, num_workers=4, pin_memory=False):
crop_size = 108
offset_height = (218 - crop_size) // 2
offset_width = (178 - crop_size) // 2
crop = lambda x: x[:, offset_height:offset_height + crop_size, offset_width:offset_width + crop_size]
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(crop),
transforms.ToPILImage(),
transforms.Resize(size=(resize, resize)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
dataset = torchlib.DiskImageDataset(img_paths, map_fn=transform)
data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, drop_last=drop_remainder, pin_memory=pin_memory)
img_shape = (resize, resize, 3)
return data_loader, img_shape
def make_anime_dataset(img_paths, batch_size, resize=64, drop_remainder=True, shuffle=True, num_workers=4, pin_memory=False):
transform = transforms.Compose([
transforms.Resize(size=(resize, resize)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
dataset = torchlib.DiskImageDataset(img_paths, map_fn=transform)
data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, drop_last=drop_remainder, pin_memory=pin_memory)
img_shape = (resize, resize, 3)
return data_loader, img_shape
# ==============================================================================
# = custom dataset =
# ==============================================================================
def make_custom_datset(img_paths, batch_size, resize=64, drop_remainder=True, shuffle=True, num_workers=4, pin_memory=False):
transform = transforms.Compose([
# ======================================
# = custom =
# ======================================
..., # custom preprocessings
# ======================================
# = custom =
# ======================================
transforms.Resize(size=(resize, resize)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
dataset = torchlib.DiskImageDataset(img_paths, map_fn=transform)
data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, drop_last=drop_remainder, pin_memory=pin_memory)
img_shape = (resize, resize, 3)
return data_loader, img_shape
# ==============================================================================
# = debug =
# ==============================================================================
# import imlib as im
# import numpy as np
# import pylib as py
# data_loader, _ = make_celeba_dataset(py.glob('data/img_align_celeba', '*.jpg'), batch_size=64)
# for img_batch in data_loader:
# for img in img_batch.numpy():
# img = np.transpose(img, (1, 2, 0))
# im.imshow(img)
# im.show()
| 3,943 | 4 | 196 |
4a8006cc1715e24a963b1356d0f168161b4b4b96 | 2,150 | py | Python | dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/mb_20030223.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | null | null | null | dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/mb_20030223.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | null | null | null | dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/mb_20030223.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | 1 | 2020-07-26T03:57:45.000Z | 2020-07-26T03:57:45.000Z | # a pretty straightforward Muenchian grouping test
from Xml.Xslt import test_harness
sheet_1 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="html" indent="yes"/>
<xsl:key name="skills-by-mark" match="skill" use="@mark"/>
<xsl:template match="skills">
<table>
<!-- process a set consisting of the first skill element for each mark -->
<xsl:for-each select="skill[count(.|key('skills-by-mark',@mark)[1])=1]">
<tr>
<td><b><xsl:value-of select="concat(@mark,' skills:')"/></b></td>
<td>
<!-- process all skill elements having the current skill's mark -->
<xsl:for-each select="key('skills-by-mark',@mark)">
<xsl:value-of select="@name"/>
<xsl:if test="position()!=last()"><br/></xsl:if>
</xsl:for-each>
</td>
</tr>
</xsl:for-each>
</table>
</xsl:template>
</xsl:stylesheet>"""
source_1 = """<skills>
<skill mark="excellent" name="excellentskill"/>
<skill mark="excellent" name="excellent skill"/>
<skill mark="good" name="goodskill"/>
<skill mark="good" name="goodskill"/>
<skill mark="basic" name="basicskill"/>
<skill mark="basic" name="basicskill"/>
<skill mark="excellent" name="excellentskill"/>
<skill mark="good" name="goodskill"/>
<skill mark="basic" name="basicskill"/>
</skills>"""
expected_1 = """<table>
<tr>
<td><b>excellent skills:</b></td>
<td>excellentskill
<br>excellent skill
<br>excellentskill
</td>
</tr>
<tr>
<td><b>good skills:</b></td>
<td>goodskill
<br>goodskill
<br>goodskill
</td>
</tr>
<tr>
<td><b>basic skills:</b></td>
<td>basicskill
<br>basicskill
<br>basicskill
</td>
</tr>
</table>"""
| 29.054054 | 80 | 0.587907 | # a pretty straightforward Muenchian grouping test
from Xml.Xslt import test_harness
sheet_1 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="html" indent="yes"/>
<xsl:key name="skills-by-mark" match="skill" use="@mark"/>
<xsl:template match="skills">
<table>
<!-- process a set consisting of the first skill element for each mark -->
<xsl:for-each select="skill[count(.|key('skills-by-mark',@mark)[1])=1]">
<tr>
<td><b><xsl:value-of select="concat(@mark,' skills:')"/></b></td>
<td>
<!-- process all skill elements having the current skill's mark -->
<xsl:for-each select="key('skills-by-mark',@mark)">
<xsl:value-of select="@name"/>
<xsl:if test="position()!=last()"><br/></xsl:if>
</xsl:for-each>
</td>
</tr>
</xsl:for-each>
</table>
</xsl:template>
</xsl:stylesheet>"""
source_1 = """<skills>
<skill mark="excellent" name="excellentskill"/>
<skill mark="excellent" name="excellent skill"/>
<skill mark="good" name="goodskill"/>
<skill mark="good" name="goodskill"/>
<skill mark="basic" name="basicskill"/>
<skill mark="basic" name="basicskill"/>
<skill mark="excellent" name="excellentskill"/>
<skill mark="good" name="goodskill"/>
<skill mark="basic" name="basicskill"/>
</skills>"""
expected_1 = """<table>
<tr>
<td><b>excellent skills:</b></td>
<td>excellentskill
<br>excellent skill
<br>excellentskill
</td>
</tr>
<tr>
<td><b>good skills:</b></td>
<td>goodskill
<br>goodskill
<br>goodskill
</td>
</tr>
<tr>
<td><b>basic skills:</b></td>
<td>basicskill
<br>basicskill
<br>basicskill
</td>
</tr>
</table>"""
def Test(tester):
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_1)
test_harness.XsltTest(tester, source, [sheet], expected_1,
title='ordinary Muenchian grouping with keys')
return
| 245 | 0 | 23 |
9fa78deb2a18cf18221d0916963fbbc9bcac1a13 | 1,255 | py | Python | cheatsheet-startup-python/example-basic/advance/iteration-fun.py | seniortesting/cheatsheet-startup-parent | de0047d58e245427efa992f0f19c30bdec4f70f4 | [
"MIT"
] | null | null | null | cheatsheet-startup-python/example-basic/advance/iteration-fun.py | seniortesting/cheatsheet-startup-parent | de0047d58e245427efa992f0f19c30bdec4f70f4 | [
"MIT"
] | null | null | null | cheatsheet-startup-python/example-basic/advance/iteration-fun.py | seniortesting/cheatsheet-startup-parent | de0047d58e245427efa992f0f19c30bdec4f70f4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from collections.abc import Iterable
if __name__ == '__main__':
# 字典
d = {'a': 1, 'b': 2, 'c': 3}
for key in d:
print(key,d[key])
# 字符串
for x in 'abc':
print(x)
# 对象是否客迭代
iter=isinstance(['a','b','c'], Iterable)
print(iter)
# 得到对应的下标,需要将可迭代对象加上emumerate
for index,value in enumerate([1,3,52]):
print('index: ',index, 'value: ',value)
# 或者通过range
for x in range(10):
print(x)
# 测试
if findMinAndMax([]) != (None, None):
print('测试失败!')
elif findMinAndMax([7]) != (7, 7):
print('测试失败!')
elif findMinAndMax([7, 1]) != (1, 7):
print('测试失败!')
elif findMinAndMax([7, 1, 3, 9, 5]) != (1, 9):
print('测试失败!')
else:
print('测试成功!')
# 迭代器
# list、tuple、dict、set、str
# generator,包括生成器和带yield的generator function
print(isinstance([],Iterable))
| 20.916667 | 50 | 0.508367 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from collections.abc import Iterable
def findMinAndMax(L):
if L is None:
raise TypeError('不能为空')
if len(L)==0:
return None,None
# 采用循环得到对应的最小值最大值
max=min=L[0]
for i in range(len(L)):
if L[i]>=max:
max= L[i]
else:
if L[i]<=min:
min=L[i]
return (min,max)
if __name__ == '__main__':
# 字典
d = {'a': 1, 'b': 2, 'c': 3}
for key in d:
print(key,d[key])
# 字符串
for x in 'abc':
print(x)
# 对象是否客迭代
iter=isinstance(['a','b','c'], Iterable)
print(iter)
# 得到对应的下标,需要将可迭代对象加上emumerate
for index,value in enumerate([1,3,52]):
print('index: ',index, 'value: ',value)
# 或者通过range
for x in range(10):
print(x)
# 测试
if findMinAndMax([]) != (None, None):
print('测试失败!')
elif findMinAndMax([7]) != (7, 7):
print('测试失败!')
elif findMinAndMax([7, 1]) != (1, 7):
print('测试失败!')
elif findMinAndMax([7, 1, 3, 9, 5]) != (1, 9):
print('测试失败!')
else:
print('测试成功!')
# 迭代器
# list、tuple、dict、set、str
# generator,包括生成器和带yield的generator function
print(isinstance([],Iterable))
| 328 | 0 | 23 |
607f482de3979ae3f1e14134f8ff6f16b946d08b | 2,182 | py | Python | utils/convert_to_xz.py | glebkuznetsov/cloudbiolinux | ec47b90ce72f6cd1bc4e5422231f19d41378b122 | [
"MIT"
] | 122 | 2015-01-04T13:23:27.000Z | 2022-01-18T22:52:12.000Z | utils/convert_to_xz.py | glebkuznetsov/cloudbiolinux | ec47b90ce72f6cd1bc4e5422231f19d41378b122 | [
"MIT"
] | 170 | 2015-02-09T18:03:49.000Z | 2021-11-14T02:32:09.000Z | utils/convert_to_xz.py | glebkuznetsov/cloudbiolinux | ec47b90ce72f6cd1bc4e5422231f19d41378b122 | [
"MIT"
] | 107 | 2015-01-06T06:10:04.000Z | 2022-02-10T17:25:34.000Z | #!/usr/bin/env python
"""Convert gzipped files on s3 biodata to xz compression format.
This conversion is designed to save time and space for download.
Some download utilities to speed things up:
axel, aria2, lftp
"""
import os
import sys
import socket
import subprocess
import boto
import fabric.api as fabric
if __name__ == "__main__":
bucket_name = "biodata"
main(bucket_name)
| 34.634921 | 85 | 0.64253 | #!/usr/bin/env python
"""Convert gzipped files on s3 biodata to xz compression format.
This conversion is designed to save time and space for download.
Some download utilities to speed things up:
axel, aria2, lftp
"""
import os
import sys
import socket
import subprocess
import boto
import fabric.api as fabric
def main(bucket_name):
conn = boto.connect_s3()
bucket = conn.get_bucket("biodata")
for s3_item in bucket.list("genomes/"):
if s3_item.name.endswith(".gz"):
print "xzipping", s3_item.name
local_file = os.path.basename(s3_item.name)
local_xz = "%s.xz" % os.path.splitext(local_file)[0]
if not os.path.exists(local_xz):
if not os.path.exists(local_file):
download_parallel(s3_item.generate_url(7200))
#s3_item.get_contents_to_filename(local_file)
local_xz = gzip_to_xz(local_file)
swap_s3_item(local_xz, bucket, s3_item)
os.remove(local_xz)
def download_parallel(url):
host = socket.gethostbyaddr(socket.gethostname())[0]
user = os.environ["USER"]
with fabric.settings(host_string="%s@%s" % (user, host)):
ncores = fabric.run("cat /proc/cpuinfo | grep processor | wc -l")
with fabric.cd(os.getcwd()):
fabric.run("axel -a -n %s '%s'" % (ncores, url), shell=False)
#fabric.run("aria2c -j %s -s %s '%s'" % (ncores, ncores, url),
# shell=False)
def swap_s3_item(xz_file, bucket, orig_s3_item):
print " Uploading to S3"
assert os.path.exists(xz_file)
new_name = orig_s3_item.name.replace(".gz", ".xz")
upload_script = os.path.join(os.path.dirname(__file__), "s3_multipart_upload.py")
cl = ["python2.6", upload_script, xz_file, bucket.name, new_name]
subprocess.check_call(cl)
orig_s3_item.delete()
def gzip_to_xz(local_file):
cl = ["gunzip", local_file]
subprocess.check_call(cl)
tar_file, _ = os.path.splitext(local_file)
cl = ["xz", "-z", tar_file]
subprocess.check_call(cl)
return "%s.xz" % tar_file
if __name__ == "__main__":
bucket_name = "biodata"
main(bucket_name)
| 1,698 | 0 | 92 |
ef8c27e1cefe34dc5b34a9efd165229c09281d61 | 8,381 | py | Python | wyze_sdk/service/scale_service.py | RebelTat/wyze-sdk | 249b1df71e80c97fc2e5fb5977431b887f7361c2 | [
"Unlicense"
] | 132 | 2021-05-03T12:33:58.000Z | 2022-03-31T18:03:26.000Z | wyze_sdk/service/scale_service.py | RebelTat/wyze-sdk | 249b1df71e80c97fc2e5fb5977431b887f7361c2 | [
"Unlicense"
] | 51 | 2021-05-01T20:24:20.000Z | 2022-03-30T20:37:05.000Z | wyze_sdk/service/scale_service.py | RebelTat/wyze-sdk | 249b1df71e80c97fc2e5fb5977431b887f7361c2 | [
"Unlicense"
] | 21 | 2021-05-05T14:01:38.000Z | 2022-03-29T02:58:39.000Z | from __future__ import annotations
from datetime import datetime
from typing import Optional, Sequence, Tuple, Union
from wyze_sdk.models import datetime_to_epoch
from .base import ExServiceClient, WyzeResponse
class ScaleServiceClient(ExServiceClient):
"""
Scale service client is the wrapper on the requests to https://wyze-scale-service.wyzecam.com
"""
WYZE_API_URL = "https://wyze-scale-service.wyzecam.com"
WYZE_APP_ID = "scap_41183d5d0bac498d"
def get_device_setting(self, *, did: str, **kwargs) -> WyzeResponse:
"""
Get the settings for the scale.
See: com.wyze.ihealth.d.a.m
"""
kwargs.update({'device_id': did})
return self.api_call('/plugin/scale/get_device_setting', http_verb="GET", params=kwargs)
def get_device_member(self, *, did: str, **kwargs) -> WyzeResponse:
"""
Get the users associated with the scale.
See: com.wyze.ihealth.d.a.j
"""
kwargs.update({'device_id': did})
return self.api_call('/plugin/scale/get_device_member', http_verb="GET", params=kwargs)
def get_family_member(self, *, did: str, **kwargs) -> WyzeResponse:
"""
Get the users associated with the scale.
See: com.wyze.ihealth.d.a.o
"""
kwargs.update({'device_id': did})
return self.api_call('/plugin/scale/get_family_member', http_verb="GET", params=kwargs)
def get_user_preference(self, *, did: str, **kwargs) -> WyzeResponse:
"""
Get the scale-related preferences for the current user.
See: com.wyze.ihealth.d.a.p
"""
kwargs.update({'device_id': did})
return self.api_call('/plugin/scale/get_user_preference', http_verb="GET", params=kwargs)
def get_token(self, *, did: str, **kwargs) -> WyzeResponse:
"""
Get binding token for the scale.
See: com.wyze.ihealth.d.a.c
"""
kwargs.update({'device_id': did})
return self.api_call('/plugin/scale/get_token', http_verb="GET", params=kwargs)
def get_user_device_relation(self, *, did: str, user_id: str, **kwargs) -> WyzeResponse:
"""
Get the relationship of the users associated with the scale.
See: com.wyze.ihealth.d.a.d
"""
kwargs.update({'device_id': did, 'user_id': user_id})
return self.api_call('/plugin/scale/get_user_device_relation', http_verb="GET", params=kwargs)
def update_device_setting(self, *, did: str, model: str, firmware_ver: str, mac: str, unit: str, broadcast: int, **kwargs) -> WyzeResponse:
"""
Update the settings of scale.
See: com.wyze.ihealth.d.a.f
"""
kwargs.update({'device_id': did, 'device_model': model, 'firmware_ver': firmware_ver, 'mac': mac, 'unit': unit, 'broadcast': broadcast})
return self.api_call('/plugin/scale/update_device_setting', json=kwargs)
def get_user_profile(self):
"""
Get the scale-related data from the user's profile.
See: com.wyze.ihealth.d.a.a and com.samsung.android.sdk.healthdata.HealthUserProfile
"""
return self.api_call('/app/v2/platform/get_user_profile', http_verb="GET")
def update_user_profile(self, *, logo_url: str, nickname: str, gender: str, birth_date: str, height: str, height_unit: str, body_type: str, occupation: str, **kwargs) -> WyzeResponse:
"""
Set scale-related data to the user's profile.
See: com.wyze.ihealth.d.a.l and com.samsung.android.sdk.healthdata.HealthUserProfile
"""
kwargs.update({'logo_url': logo_url, 'nickname': nickname, 'gender': gender, 'birthDate': birth_date, 'height': height, 'height_unit': height_unit, 'body_type': body_type, 'occupation': occupation})
return self.api_call('/app/v2/platform/update_user_profile', json=kwargs)
def get_goal_weight(self, *, user_id: str, **kwargs) -> WyzeResponse:
"""
Get the goal weight from the user's profile.
See: com.wyze.ihealth.d.b.v
"""
kwargs.update({'family_member_id': user_id})
return self.api_call('/plugin/scale/get_goal_weight', http_verb="GET", params=kwargs)
def get_heart_rate_record_list(self, *, user_id: Optional[str] = None, record_number: Optional[int] = 1, measure_ts: Optional[int] = None, **kwargs) -> WyzeResponse:
"""
Get the heart rate records from the user's profile.
See: com.wyze.ihealth.d.b.b
"""
if user_id:
kwargs.update({'family_member_id': user_id})
kwargs.update({'record_number': str(record_number)})
if measure_ts:
kwargs.update({'measure_ts': str(measure_ts)})
return self.api_call('/plugin/scale/get_heart_rate_record_list', http_verb="GET", params=kwargs)
def get_latest_records(self, *, user_id: Optional[str] = None, **kwargs) -> WyzeResponse:
"""
Get the latest records from the user's profile.
See: com.wyze.ihealth.d.b.t
"""
if user_id:
kwargs.update({'family_member_id': user_id})
return self.api_call('/plugin/scale/get_latest_record', http_verb="GET", params=kwargs)
def get_records(self, *, user_id: Optional[str] = None, start_time: datetime, end_time: datetime, **kwargs) -> WyzeResponse:
"""
Get a range of records from the user's profile.
See: com.wyze.ihealth.d.b.i and com.samsung.android.sdk.healthdata.HealthConstants.SessionMeasurement
"""
if user_id:
kwargs.update({'family_member_id': user_id})
kwargs.update({'start_time': str(0), 'end_time': str(datetime_to_epoch(end_time))})
return self.api_call('/plugin/scale/get_record_range', http_verb="GET", params=kwargs)
def delete_goal_weight(self, *, user_id: Optional[str] = None, **kwargs) -> WyzeResponse:
"""
Removes the goal weight from the user's profile.
See: com.wyze.ihealth.d.b.j
"""
if user_id:
kwargs.update({'family_member_id': user_id})
return self.api_call('/plugin/scale/delete_goal_weight', http_verb="GET", params=kwargs)
def add_heart_rate_record(self, *, did: str, user_id: str, measure_ts: int, heart_rate: int, **kwargs) -> WyzeResponse:
"""
Add a heart rate record to the user's profile.
See: com.wyze.ihealth.d.b.p
"""
kwargs.update({'device_id': did, 'family_member_id': user_id, 'measure_ts': measure_ts, 'heart_rate': str(heart_rate)})
return self.api_call('/plugin/scale/get_latest_record', json=kwargs)
def add_weight_record(self, *, did: str, mac: str, user_id: str, measure_ts: int, measure_type: int = 1, weight: float, **kwargs) -> WyzeResponse:
"""
Add a weight-only record to the user's profile.
See: com.wyze.ihealth.d.b.k
"""
kwargs.update({'device_id': did, 'mac': mac, 'family_member_id': user_id, 'measure_ts': measure_ts, 'measure_type': measure_type, 'weight': weight})
return self.api_call('/plugin/scale/get_latest_record', json=kwargs)
def delete_record(self, *, data_id=Union[int, Sequence[int]], **kwargs) -> WyzeResponse:
"""
Delete health records from the user's profile.
See: com.wyze.ihealth.d.b.u
"""
if isinstance(data_id, (list, Tuple)):
kwargs.update({"data_id_list": ",".join(data_id)})
else:
kwargs.update({"data_id_list": [data_id]})
return self.api_call('/plugin/scale/delete_record', json=kwargs)
| 40.293269 | 206 | 0.635485 | from __future__ import annotations
from datetime import datetime
from typing import Optional, Sequence, Tuple, Union
from wyze_sdk.models import datetime_to_epoch
from .base import ExServiceClient, WyzeResponse
class ScaleServiceClient(ExServiceClient):
"""
Scale service client is the wrapper on the requests to https://wyze-scale-service.wyzecam.com
"""
WYZE_API_URL = "https://wyze-scale-service.wyzecam.com"
WYZE_APP_ID = "scap_41183d5d0bac498d"
def __init__(
self,
token: Optional[str] = None,
base_url: Optional[str] = WYZE_API_URL,
):
super().__init__(token=token, base_url=base_url)
def api_call(
self,
api_method: str,
*,
http_verb: str = "POST",
params: dict = None,
json: dict = None,
request_specific_headers: Optional[dict] = None,
) -> WyzeResponse:
# create the time-based nonce
nonce = self.request_verifier.clock.nonce()
return super().api_call(
api_method,
http_verb=http_verb,
params=params,
json=json,
headers=self._get_headers(request_specific_headers=request_specific_headers, nonce=nonce),
nonce=nonce,
)
def get_device_setting(self, *, did: str, **kwargs) -> WyzeResponse:
"""
Get the settings for the scale.
See: com.wyze.ihealth.d.a.m
"""
kwargs.update({'device_id': did})
return self.api_call('/plugin/scale/get_device_setting', http_verb="GET", params=kwargs)
def get_device_member(self, *, did: str, **kwargs) -> WyzeResponse:
"""
Get the users associated with the scale.
See: com.wyze.ihealth.d.a.j
"""
kwargs.update({'device_id': did})
return self.api_call('/plugin/scale/get_device_member', http_verb="GET", params=kwargs)
def get_family_member(self, *, did: str, **kwargs) -> WyzeResponse:
"""
Get the users associated with the scale.
See: com.wyze.ihealth.d.a.o
"""
kwargs.update({'device_id': did})
return self.api_call('/plugin/scale/get_family_member', http_verb="GET", params=kwargs)
def get_user_preference(self, *, did: str, **kwargs) -> WyzeResponse:
"""
Get the scale-related preferences for the current user.
See: com.wyze.ihealth.d.a.p
"""
kwargs.update({'device_id': did})
return self.api_call('/plugin/scale/get_user_preference', http_verb="GET", params=kwargs)
def get_token(self, *, did: str, **kwargs) -> WyzeResponse:
"""
Get binding token for the scale.
See: com.wyze.ihealth.d.a.c
"""
kwargs.update({'device_id': did})
return self.api_call('/plugin/scale/get_token', http_verb="GET", params=kwargs)
def get_user_device_relation(self, *, did: str, user_id: str, **kwargs) -> WyzeResponse:
"""
Get the relationship of the users associated with the scale.
See: com.wyze.ihealth.d.a.d
"""
kwargs.update({'device_id': did, 'user_id': user_id})
return self.api_call('/plugin/scale/get_user_device_relation', http_verb="GET", params=kwargs)
def update_device_setting(self, *, did: str, model: str, firmware_ver: str, mac: str, unit: str, broadcast: int, **kwargs) -> WyzeResponse:
"""
Update the settings of scale.
See: com.wyze.ihealth.d.a.f
"""
kwargs.update({'device_id': did, 'device_model': model, 'firmware_ver': firmware_ver, 'mac': mac, 'unit': unit, 'broadcast': broadcast})
return self.api_call('/plugin/scale/update_device_setting', json=kwargs)
def get_user_profile(self):
"""
Get the scale-related data from the user's profile.
See: com.wyze.ihealth.d.a.a and com.samsung.android.sdk.healthdata.HealthUserProfile
"""
return self.api_call('/app/v2/platform/get_user_profile', http_verb="GET")
def update_user_profile(self, *, logo_url: str, nickname: str, gender: str, birth_date: str, height: str, height_unit: str, body_type: str, occupation: str, **kwargs) -> WyzeResponse:
"""
Set scale-related data to the user's profile.
See: com.wyze.ihealth.d.a.l and com.samsung.android.sdk.healthdata.HealthUserProfile
"""
kwargs.update({'logo_url': logo_url, 'nickname': nickname, 'gender': gender, 'birthDate': birth_date, 'height': height, 'height_unit': height_unit, 'body_type': body_type, 'occupation': occupation})
return self.api_call('/app/v2/platform/update_user_profile', json=kwargs)
def get_goal_weight(self, *, user_id: str, **kwargs) -> WyzeResponse:
"""
Get the goal weight from the user's profile.
See: com.wyze.ihealth.d.b.v
"""
kwargs.update({'family_member_id': user_id})
return self.api_call('/plugin/scale/get_goal_weight', http_verb="GET", params=kwargs)
def get_heart_rate_record_list(self, *, user_id: Optional[str] = None, record_number: Optional[int] = 1, measure_ts: Optional[int] = None, **kwargs) -> WyzeResponse:
"""
Get the heart rate records from the user's profile.
See: com.wyze.ihealth.d.b.b
"""
if user_id:
kwargs.update({'family_member_id': user_id})
kwargs.update({'record_number': str(record_number)})
if measure_ts:
kwargs.update({'measure_ts': str(measure_ts)})
return self.api_call('/plugin/scale/get_heart_rate_record_list', http_verb="GET", params=kwargs)
def get_latest_records(self, *, user_id: Optional[str] = None, **kwargs) -> WyzeResponse:
"""
Get the latest records from the user's profile.
See: com.wyze.ihealth.d.b.t
"""
if user_id:
kwargs.update({'family_member_id': user_id})
return self.api_call('/plugin/scale/get_latest_record', http_verb="GET", params=kwargs)
def get_records(self, *, user_id: Optional[str] = None, start_time: datetime, end_time: datetime, **kwargs) -> WyzeResponse:
"""
Get a range of records from the user's profile.
See: com.wyze.ihealth.d.b.i and com.samsung.android.sdk.healthdata.HealthConstants.SessionMeasurement
"""
if user_id:
kwargs.update({'family_member_id': user_id})
kwargs.update({'start_time': str(0), 'end_time': str(datetime_to_epoch(end_time))})
return self.api_call('/plugin/scale/get_record_range', http_verb="GET", params=kwargs)
def delete_goal_weight(self, *, user_id: Optional[str] = None, **kwargs) -> WyzeResponse:
"""
Removes the goal weight from the user's profile.
See: com.wyze.ihealth.d.b.j
"""
if user_id:
kwargs.update({'family_member_id': user_id})
return self.api_call('/plugin/scale/delete_goal_weight', http_verb="GET", params=kwargs)
def add_heart_rate_record(self, *, did: str, user_id: str, measure_ts: int, heart_rate: int, **kwargs) -> WyzeResponse:
"""
Add a heart rate record to the user's profile.
See: com.wyze.ihealth.d.b.p
"""
kwargs.update({'device_id': did, 'family_member_id': user_id, 'measure_ts': measure_ts, 'heart_rate': str(heart_rate)})
return self.api_call('/plugin/scale/get_latest_record', json=kwargs)
def add_weight_record(self, *, did: str, mac: str, user_id: str, measure_ts: int, measure_type: int = 1, weight: float, **kwargs) -> WyzeResponse:
"""
Add a weight-only record to the user's profile.
See: com.wyze.ihealth.d.b.k
"""
kwargs.update({'device_id': did, 'mac': mac, 'family_member_id': user_id, 'measure_ts': measure_ts, 'measure_type': measure_type, 'weight': weight})
return self.api_call('/plugin/scale/get_latest_record', json=kwargs)
def delete_record(self, *, data_id=Union[int, Sequence[int]], **kwargs) -> WyzeResponse:
"""
Delete health records from the user's profile.
See: com.wyze.ihealth.d.b.u
"""
if isinstance(data_id, (list, Tuple)):
kwargs.update({"data_id_list": ",".join(data_id)})
else:
kwargs.update({"data_id_list": [data_id]})
return self.api_call('/plugin/scale/delete_record', json=kwargs)
| 735 | 0 | 54 |
9a240829f2fd3ac6b2f5b53c6e23ecb372c84fb8 | 2,343 | py | Python | api/migrations/0001_initial.py | lirandepira/winccoa-web | cf1eef22937956c46bbc4ef0e0ab1cc88cdea4b1 | [
"MIT"
] | null | null | null | api/migrations/0001_initial.py | lirandepira/winccoa-web | cf1eef22937956c46bbc4ef0e0ab1cc88cdea4b1 | [
"MIT"
] | null | null | null | api/migrations/0001_initial.py | lirandepira/winccoa-web | cf1eef22937956c46bbc4ef0e0ab1cc88cdea4b1 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.7 on 2018-07-06 14:37
from django.db import migrations, models
import django.db.models.deletion
| 42.6 | 114 | 0.556978 | # Generated by Django 2.0.7 on 2018-07-06 14:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Elements',
fields=[
('element_id', models.IntegerField(primary_key=True, serialize=False)),
('event', models.IntegerField()),
('alert', models.IntegerField()),
('element_name', models.CharField(max_length=400)),
('unit', models.CharField(max_length=400)),
('alias', models.CharField(max_length=400)),
('group_name', models.CharField(max_length=400)),
('comment', models.CharField(max_length=400)),
],
),
migrations.CreateModel(
name='EventHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ts', models.DateTimeField()),
('value_number', models.BinaryField()),
('status', models.IntegerField(blank=True, null=True)),
('manager', models.IntegerField()),
('user', models.IntegerField()),
('base', models.IntegerField()),
('text', models.CharField(max_length=400)),
('value_string', models.CharField(max_length=400)),
('value_timestamp', models.DateTimeField()),
('corrvalue_number', models.BinaryField()),
('olvalue_number', models.BinaryField()),
('corrvalue_string', models.CharField(max_length=400)),
('olvalue_string', models.CharField(max_length=400)),
('corrvalue_timestamp', models.DateTimeField()),
('olvalue_timestamp', models.DateTimeField()),
('offvalue_number', models.BinaryField()),
('offvalue_string', models.CharField(max_length=400)),
('offvalue_timestamp', models.DateTimeField()),
('archive', models.IntegerField()),
('element_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Elements')),
],
),
]
| 0 | 2,196 | 23 |
7d6eb927a7acd0fcfeabf330830474d337f8fc64 | 429 | py | Python | setup.py | msaltz96/wigs | 35b6bedbac670d48a5d0b088f681daf38756abfa | [
"Apache-2.0"
] | null | null | null | setup.py | msaltz96/wigs | 35b6bedbac670d48a5d0b088f681daf38756abfa | [
"Apache-2.0"
] | null | null | null | setup.py | msaltz96/wigs | 35b6bedbac670d48a5d0b088f681daf38756abfa | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
setup(
name="pre_wigs_validation",
version="0.1.0",
description="Pre-WIG Validator for Linux",
author="steno",
author_email="steno@amazon.com",
packages=find_packages(exclude=["tests"]),
include_package_data=True,
install_requires=["requests", "dataclasses", "distro", "PrettyTable"]
# Maybe include dev dependencies in a txt file
)
| 28.6 | 74 | 0.682984 | from setuptools import setup, find_packages
setup(
name="pre_wigs_validation",
version="0.1.0",
description="Pre-WIG Validator for Linux",
author="steno",
author_email="steno@amazon.com",
packages=find_packages(exclude=["tests"]),
include_package_data=True,
install_requires=["requests", "dataclasses", "distro", "PrettyTable"]
# Maybe include dev dependencies in a txt file
)
| 0 | 0 | 0 |
07391be648f1e47b967aff8fdaf3134d1e0d69b5 | 54,550 | py | Python | OED/cnn_seq2seq.py | adrianomundo/II2202-research-methodology-scientific-writing | 39a7a07e1dabbd988f9b3e0c5c41a36a6d292df8 | [
"Apache-2.0"
] | 1 | 2021-01-30T11:03:05.000Z | 2021-01-30T11:03:05.000Z | OED/cnn_seq2seq.py | adrianomundo/II2202-research-methodology-scientific-writing | 39a7a07e1dabbd988f9b3e0c5c41a36a6d292df8 | [
"Apache-2.0"
] | null | null | null | OED/cnn_seq2seq.py | adrianomundo/II2202-research-methodology-scientific-writing | 39a7a07e1dabbd988f9b3e0c5c41a36a6d292df8 | [
"Apache-2.0"
] | null | null | null | import pathlib
import sys
import tensorflow as tf
import numpy as np
from tensorflow.python.ops import init_ops
from tensorflow.python.ops.rnn_cell_impl import _Linear, LSTMStateTuple
from tensorflow.python.ops import variable_scope as vs
from utils import *
if __name__ == '__main__':
batch_num = 1
hidden_num = 4
# step_num = 8
iteration = 30
ensemble_space = 10
learning_rate = 1e-3
multivariate = True
partition = True
save_model = False
try:
sys.argv[1]
except IndexError:
for n in range(1, 7):
# file name parameter
dataset = n
if dataset == 1:
file_name = './GD/data/Genesis_AnomalyLabels.csv'
print(file_name)
k_partition = 40
abnormal_data, abnormal_label = ReadGDDataset(file_name)
elem_num = 18
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 2.5)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
if dataset == 2:
file_name = './HSS/data/HRSS_anomalous_standard.csv'
print(file_name)
k_partition = 80
abnormal_data, abnormal_label = ReadHSSDataset(file_name)
elem_num = 18
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
if dataset == 3:
for root, dirs, _ in os.walk('./YAHOO/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = ReadS5Dataset(file_name)
elem_num = 1
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 4:
for root, dirs, _ in os.walk('./NAB/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = ReadNABDataset(file_name)
elem_num = 1
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 5:
for root, dirs, files in os.walk('./2D/test'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = Read2DDataset(file_name)
elem_num = 2
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(
splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(
abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 6:
k_partition = 2
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for root, dirs, _ in os.walk('./UAH/'):
for dir in dirs:
folder_name = os.path.join(root, dir)
print(folder_name)
abnormal_data, abnormal_label = ReadUAHDataset(folder_name)
elem_num = 4
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label,
_part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, cks = RunModel(
splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
print('########################################')
precision, recall, f1 = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(precision, recall, f1)
_, _, roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(roc_auc))
_, _, pr_auc = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(pr_auc))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
print('########################################')
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 7:
for root, dirs, files in os.walk('./ECG/'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = ReadECGDataset(file_name)
elem_num = 3
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data,
abnormal_label,
_part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(
splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label,
y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label,
final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(
abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data,
abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
else:
# file name parameter
dataset = int(sys.argv[1])
if dataset == 1:
file_name = './GD/data/Genesis_AnomalyLabels.csv'
print(file_name)
k_partition = 40
abnormal_data, abnormal_label = ReadGDDataset(file_name)
elem_num = 18
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 2.5)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
if dataset == 2:
file_name = './HSS/data/HRSS_anomalous_standard.csv'
print(file_name)
k_partition = 80
abnormal_data, abnormal_label = ReadHSSDataset(file_name)
elem_num = 18
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
if dataset == 3:
for root, dirs, _ in os.walk('./YAHOO/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = ReadS5Dataset(file_name)
elem_num = 1
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 4:
for root, dirs, _ in os.walk('./NAB/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = ReadNABDataset(file_name)
elem_num = 1
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 5:
for root, dirs, files in os.walk('./2D/test'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = Read2DDataset(file_name)
elem_num = 2
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label,
_part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(
splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label,
final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(
abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data,
abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 6:
k_partition = 2
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for root, dirs, _ in os.walk('./UAH/'):
for dir in dirs:
folder_name = os.path.join(root, dir)
print(folder_name)
abnormal_data, abnormal_label = ReadUAHDataset(folder_name)
elem_num = 4
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label,
_part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, cks = RunModel(
splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
print('########################################')
precision, recall, f1 = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(precision, recall, f1)
_, _, roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(roc_auc))
_, _, pr_auc = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(pr_auc))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
print('########################################')
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 7:
for root, dirs, files in os.walk('./ECG/'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = ReadECGDataset(file_name)
elem_num = 3
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label,
_part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(
splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label,
final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(
abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data,
abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
| 60.949721 | 201 | 0.486893 | import pathlib
import sys
import tensorflow as tf
import numpy as np
from tensorflow.python.ops import init_ops
from tensorflow.python.ops.rnn_cell_impl import _Linear, LSTMStateTuple
from tensorflow.python.ops import variable_scope as vs
from utils import *
def conv1d_relu(_x, _w, _b):
return tf.nn.relu(tf.nn.bias_add(tf.nn.conv1d(_x, _w, stride=1, padding='SAME'), _b))
def conv1d_sigmoid(_x, _w, _b):
return tf.nn.sigmoid(tf.nn.bias_add(tf.nn.conv1d(_x, _w, stride=1, padding='SAME'), _b))
def Model(_abnormal_data, _abnormal_label):
g = tf.Graph()
with g.as_default():
# placeholder list
p_input = tf.placeholder(tf.float32, shape=(batch_num, _abnormal_data.shape[1], _abnormal_data.shape[2]))
# Weight and Bias for convolution encoding
wc1_enc = tf.Variable(tf.random_normal([5, elem_num, 64]))
bc1_enc = tf.Variable(tf.random_normal([64]))
wc2_enc = tf.Variable(tf.random_normal([5, 64, 32]))
bc2_enc = tf.Variable(tf.random_normal([32]))
wc3_enc = tf.Variable(tf.random_normal([5, 32, 16]))
bc3_enc = tf.Variable(tf.random_normal([16]))
# Weight and Bias for convolution decoding
wc1_dec = tf.Variable(tf.random_normal([5, 16, 32]))
bc1_dec = tf.Variable(tf.random_normal([32]))
wc2_dec = tf.Variable(tf.random_normal([5, 32, 64]))
bc2_dec = tf.Variable(tf.random_normal([64]))
wc3_dec = tf.Variable(tf.random_normal([5, 64, elem_num]))
bc3_dec = tf.Variable(tf.random_normal([elem_num]))
# with tf.device('/device:GPU:0'):
with tf.variable_scope('encoder'):
# Conv 1st layer
conv1_enc = conv1d_relu(p_input, wc1_enc, bc1_enc)
# Conv 2nd layer
conv2_enc = conv1d_relu(conv1_enc, wc2_enc, bc2_enc)
# Conv 3rd layer
conv3_enc = conv1d_relu(conv2_enc, wc3_enc, bc3_enc)
# with tf.device('/device:GPU:1'):
with tf.variable_scope('decoder'):
# Conv 1st layer
conv1_dec = conv1d_relu(conv3_enc, wc1_dec, bc1_dec)
# Conv 2nd layer
conv2_dec = conv1d_relu(conv1_dec, wc2_dec, bc2_dec)
# Conv 3rd layer
dec_outputs = conv1d_sigmoid(conv2_dec, wc3_dec, bc3_dec)
loss = tf.reduce_mean(tf.square(p_input - dec_outputs))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
return g, p_input, dec_outputs, loss, optimizer, saver
def RunModel(_abnormal_data, _abnormal_label):
graph, p_input, dec_outputs, loss, optimizer, saver = Model(_abnormal_data, _abnormal_label)
config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
with tf.Session(graph=graph, config=config) as sess:
sess.run(tf.global_variables_initializer())
for i in range(iteration):
"""Random sequences.
Every sequence has size batch_num * step_num * elem_num
Each step number increases 1 by 1.
An initial number of each sequence is in the range from 0 to 19.
(ex. [8. 9. 10. 11. 12. 13. 14. 15])
"""
(loss_val, _) = sess.run([loss, optimizer], {p_input: _abnormal_data})
# print('iter %d:' % (i + 1), loss_val)
if not partition:
save_path = saver.save(sess, './saved_model/' + pathlib.Path(file_name).parts[0] + '/cnn_seq2seq_' + os.path.basename(file_name) + '.ckpt')
print("Model saved in path: %s" % save_path)
(input_, output_) = sess.run([p_input, dec_outputs], {p_input: _abnormal_data})
error = SquareErrorDataPoints(input_, output_)
# np.savetxt('./saved_result/' + pathlib.Path(file_name).parts[0] + '/cnn_seq2seq_' + os.path.basename(file_name) + '_error.txt', error, delimiter=',') # X is an array
zscore = Z_Score(error)
# np.savetxt('./saved_result/' + pathlib.Path(file_name).parts[0] + '/cnn_seq2seq_' + os.path.basename(file_name) + '_zscore.txt', zscore, delimiter=',') # X is an array
y_pred = CreateLabelBasedOnZscore(zscore, 3)
if not partition:
score_pred_label = np.c_[error, y_pred, _abnormal_label]
np.savetxt('./saved_result/' + pathlib.Path(file_name).parts[0] + '/cnn_seq2seq_' + os.path.basename(file_name) + '_score.txt', score_pred_label, delimiter=',') # X is an array
p, r, f = CalculatePrecisionRecallF1Metrics(_abnormal_label, y_pred)
if not partition:
PrintPrecisionRecallF1Metrics(p, r, f)
# k_number = [20, 40, 60, 80, 100]
# for k in k_number:
# precision_at_k = CalculatePrecisionAtK(_abnormal_label, error, k, _type=1)
# print('precision at ' + str(k) + '=' + str(precision_at_k))
fpr, tpr, average_roc_auc = CalculateROCAUCMetrics(_abnormal_label, error)
# PlotROCAUC(fpr, tpr, roc_auc)
if not partition:
print('roc_auc=' + str(average_roc_auc))
precision_curve, recall_curve, average_precision = CalculatePrecisionRecallCurve(_abnormal_label, error)
# PlotPrecisionRecallCurve(precision_curve, recall_curve, average_precision)
if not partition:
print('pr_auc=' + str(average_precision))
cks = CalculateCohenKappaMetrics(_abnormal_label, y_pred)
if not partition:
print('cks=' + str(cks))
return error, p, r, f, average_roc_auc, average_precision, cks
if __name__ == '__main__':
batch_num = 1
hidden_num = 4
# step_num = 8
iteration = 30
ensemble_space = 10
learning_rate = 1e-3
multivariate = True
partition = True
save_model = False
try:
sys.argv[1]
except IndexError:
for n in range(1, 7):
# file name parameter
dataset = n
if dataset == 1:
file_name = './GD/data/Genesis_AnomalyLabels.csv'
print(file_name)
k_partition = 40
abnormal_data, abnormal_label = ReadGDDataset(file_name)
elem_num = 18
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 2.5)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
if dataset == 2:
file_name = './HSS/data/HRSS_anomalous_standard.csv'
print(file_name)
k_partition = 80
abnormal_data, abnormal_label = ReadHSSDataset(file_name)
elem_num = 18
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
if dataset == 3:
for root, dirs, _ in os.walk('./YAHOO/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = ReadS5Dataset(file_name)
elem_num = 1
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 4:
for root, dirs, _ in os.walk('./NAB/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = ReadNABDataset(file_name)
elem_num = 1
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 5:
for root, dirs, files in os.walk('./2D/test'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = Read2DDataset(file_name)
elem_num = 2
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(
splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(
abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 6:
k_partition = 2
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for root, dirs, _ in os.walk('./UAH/'):
for dir in dirs:
folder_name = os.path.join(root, dir)
print(folder_name)
abnormal_data, abnormal_label = ReadUAHDataset(folder_name)
elem_num = 4
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label,
_part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, cks = RunModel(
splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
print('########################################')
precision, recall, f1 = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(precision, recall, f1)
_, _, roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(roc_auc))
_, _, pr_auc = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(pr_auc))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
print('########################################')
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 7:
for root, dirs, files in os.walk('./ECG/'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = ReadECGDataset(file_name)
elem_num = 3
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data,
abnormal_label,
_part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(
splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label,
y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label,
final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(
abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data,
abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
else:
# file name parameter
dataset = int(sys.argv[1])
if dataset == 1:
file_name = './GD/data/Genesis_AnomalyLabels.csv'
print(file_name)
k_partition = 40
abnormal_data, abnormal_label = ReadGDDataset(file_name)
elem_num = 18
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 2.5)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
if dataset == 2:
file_name = './HSS/data/HRSS_anomalous_standard.csv'
print(file_name)
k_partition = 80
abnormal_data, abnormal_label = ReadHSSDataset(file_name)
elem_num = 18
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
if dataset == 3:
for root, dirs, _ in os.walk('./YAHOO/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = ReadS5Dataset(file_name)
elem_num = 1
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 4:
for root, dirs, _ in os.walk('./NAB/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = ReadNABDataset(file_name)
elem_num = 1
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 5:
for root, dirs, files in os.walk('./2D/test'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = Read2DDataset(file_name)
elem_num = 2
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label,
_part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(
splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label,
final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(
abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data,
abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 6:
k_partition = 2
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for root, dirs, _ in os.walk('./UAH/'):
for dir in dirs:
folder_name = os.path.join(root, dir)
print(folder_name)
abnormal_data, abnormal_label = ReadUAHDataset(folder_name)
elem_num = 4
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label,
_part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, cks = RunModel(
splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
print('########################################')
precision, recall, f1 = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(precision, recall, f1)
_, _, roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(roc_auc))
_, _, pr_auc = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(pr_auc))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
print('########################################')
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 7:
for root, dirs, files in os.walk('./ECG/'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = ReadECGDataset(file_name)
elem_num = 3
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label,
_part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(
splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label,
final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(
abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data,
abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
| 5,232 | 0 | 92 |
7c3a1e64eb7f90975d8a37e4689e534e9971ded6 | 9,746 | py | Python | tests/parsers/sqlite_plugins/firefox.py | Defense-Cyber-Crime-Center/plaso | 4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47 | [
"Apache-2.0"
] | 2 | 2016-02-18T12:46:29.000Z | 2022-03-13T03:04:59.000Z | tests/parsers/sqlite_plugins/firefox.py | Defense-Cyber-Crime-Center/plaso | 4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47 | [
"Apache-2.0"
] | null | null | null | tests/parsers/sqlite_plugins/firefox.py | Defense-Cyber-Crime-Center/plaso | 4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47 | [
"Apache-2.0"
] | 6 | 2016-12-18T08:05:36.000Z | 2021-04-06T14:19:11.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Mozilla Firefox history database plugin."""
import collections
import unittest
from plaso.formatters import firefox as _ # pylint: disable=unused-import
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import firefox
from tests.parsers.sqlite_plugins import test_lib
class FirefoxHistoryPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Mozilla Firefox history database plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = firefox.FirefoxHistoryPlugin()
def testProcessPriorTo24(self):
"""Tests the Process function on a Firefox History database file."""
# This is probably version 23 but potentially an older version.
test_file = self._GetTestFilePath([u'places.sqlite'])
cache = sqlite.SQLiteCache()
event_queue_consumer = self._ParseDatabaseFileWithPlugin(
self._plugin, test_file, cache)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
# The places.sqlite file contains 205 events (1 page visit,
# 2 x 91 bookmark records, 2 x 3 bookmark annotations,
# 2 x 8 bookmark folders).
# However there are three events that do not have a timestamp
# so the test file will show 202 extracted events.
self.assertEqual(len(event_objects), 202)
# Check the first page visited event.
event_object = event_objects[0]
self.assertEqual(event_object.data_type, u'firefox:places:page_visited')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.PAGE_VISITED)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-07-01 11:16:21.371935')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_url = u'http://news.google.com/'
self.assertEqual(event_object.url, expected_url)
expected_title = u'Google News'
self.assertEqual(event_object.title, expected_title)
expected_msg = (
u'{0:s} ({1:s}) [count: 1] Host: news.google.com '
u'(URL not typed directly) Transition: TYPED').format(
expected_url, expected_title)
expected_short = u'URL: {0:s}'.format(expected_url)
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
# Check the first bookmark event.
event_object = event_objects[1]
self.assertEqual(event_object.data_type, u'firefox:places:bookmark')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.ADDED_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-07-01 11:13:59.266344')
self.assertEqual(event_object.timestamp, expected_timestamp)
# Check the second bookmark event.
event_object = event_objects[2]
self.assertEqual(event_object.data_type, u'firefox:places:bookmark')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.MODIFICATION_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-07-01 11:13:59.267198')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_url = (
u'place:folder=BOOKMARKS_MENU&folder=UNFILED_BOOKMARKS&folder=TOOLBAR&'
u'sort=12&excludeQueries=1&excludeItemIfParentHasAnnotation=livemark%2F'
u'feedURI&maxResults=10&queryType=1')
self.assertEqual(event_object.url, expected_url)
expected_title = u'Recently Bookmarked'
self.assertEqual(event_object.title, expected_title)
expected_msg = (
u'Bookmark URL {0:s} ({1:s}) [folder=BOOKMARKS_MENU&'
u'folder=UNFILED_BOOKMARKS&folder=TOOLBAR&sort=12&excludeQueries=1&'
u'excludeItemIfParentHasAnnotation=livemark%2FfeedURI&maxResults=10&'
u'queryType=1] visit count 0').format(
expected_title, expected_url)
expected_short = (
u'Bookmarked Recently Bookmarked '
u'(place:folder=BOOKMARKS_MENU&folder=UNFILED_BO...')
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
# Check the first bookmark annotation event.
event_object = event_objects[183]
self.assertEqual(
event_object.data_type, u'firefox:places:bookmark_annotation')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.CREATION_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-07-01 11:13:59.267146')
self.assertEqual(event_object.timestamp, expected_timestamp)
# Check another bookmark annotation event.
event_object = event_objects[184]
self.assertEqual(
event_object.data_type, u'firefox:places:bookmark_annotation')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.CREATION_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-07-01 11:13:59.267605')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_url = u'place:sort=14&type=6&maxResults=10&queryType=1'
self.assertEqual(event_object.url, expected_url)
expected_title = u'Recent Tags'
self.assertEqual(event_object.title, expected_title)
expected_msg = (
u'Bookmark Annotation: [RecentTags] to bookmark '
u'[{0:s}] ({1:s})').format(
expected_title, expected_url)
expected_short = u'Bookmark Annotation: Recent Tags'
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
# Check the second last bookmark folder event.
event_object = event_objects[200]
self.assertEqual(event_object.data_type, u'firefox:places:bookmark_folder')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.ADDED_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-03-21 10:05:01.553774')
self.assertEqual(event_object.timestamp, expected_timestamp)
# Check the last bookmark folder event.
event_object = event_objects[201]
self.assertEqual(
event_object.data_type, u'firefox:places:bookmark_folder')
self.assertEqual(
event_object.timestamp_desc,
eventdata.EventTimestamp.MODIFICATION_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-07-01 11:14:11.766851')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_title = u'Latest Headlines'
self.assertEqual(event_object.title, expected_title)
expected_msg = expected_title
expected_short = expected_title
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
def testProcessVersion25(self):
"""Tests the Process function on a Firefox History database file v 25."""
test_file = self._GetTestFilePath([u'places_new.sqlite'])
cache = sqlite.SQLiteCache()
event_queue_consumer = self._ParseDatabaseFileWithPlugin(
self._plugin, test_file, cache)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
# The places.sqlite file contains 84 events:
# 34 page visits.
# 28 bookmarks
# 14 bookmark folders
# 8 annotations
self.assertEqual(len(event_objects), 84)
counter = collections.Counter()
for event_object in event_objects:
counter[event_object.data_type] += 1
self.assertEqual(counter[u'firefox:places:bookmark'], 28)
self.assertEqual(counter[u'firefox:places:page_visited'], 34)
self.assertEqual(counter[u'firefox:places:bookmark_folder'], 14)
self.assertEqual(counter[u'firefox:places:bookmark_annotation'], 8)
random_event = event_objects[10]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-10-30 21:57:11.281942')
self.assertEqual(random_event.timestamp, expected_timestamp)
expected_short = u'URL: http://code.google.com/p/plaso'
expected_msg = (
u'http://code.google.com/p/plaso [count: 1] Host: code.google.com '
u'(URL not typed directly) Transition: TYPED')
self._TestGetMessageStrings(random_event, expected_msg, expected_short)
class FirefoxDownloadsPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Mozilla Firefox downloads database plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = firefox.FirefoxDownloadsPlugin()
def testProcessVersion25(self):
"""Tests the Process function on a Firefox Downloads database file."""
test_file = self._GetTestFilePath([u'downloads.sqlite'])
cache = sqlite.SQLiteCache()
event_queue_consumer = self._ParseDatabaseFileWithPlugin(
self._plugin, test_file, cache)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
# The downloads.sqlite file contains 2 events (1 download).
self.assertEqual(len(event_objects), 2)
# Check the first page visited event.
event_object = event_objects[0]
self.assertEqual(event_object.data_type, u'firefox:downloads:download')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.START_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-07-18 18:59:59.312000')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_url = (
u'https://plaso.googlecode.com/files/'
u'plaso-static-1.0.1-win32-vs2008.zip')
self.assertEqual(event_object.url, expected_url)
expected_full_path = u'file:///D:/plaso-static-1.0.1-win32-vs2008.zip'
self.assertEqual(event_object.full_path, expected_full_path)
self.assertEqual(event_object.received_bytes, 15974599)
self.assertEqual(event_object.total_bytes, 15974599)
if __name__ == '__main__':
unittest.main()
| 37.057034 | 80 | 0.736199 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Mozilla Firefox history database plugin."""
import collections
import unittest
from plaso.formatters import firefox as _ # pylint: disable=unused-import
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import firefox
from tests.parsers.sqlite_plugins import test_lib
class FirefoxHistoryPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Mozilla Firefox history database plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = firefox.FirefoxHistoryPlugin()
def testProcessPriorTo24(self):
"""Tests the Process function on a Firefox History database file."""
# This is probably version 23 but potentially an older version.
test_file = self._GetTestFilePath([u'places.sqlite'])
cache = sqlite.SQLiteCache()
event_queue_consumer = self._ParseDatabaseFileWithPlugin(
self._plugin, test_file, cache)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
# The places.sqlite file contains 205 events (1 page visit,
# 2 x 91 bookmark records, 2 x 3 bookmark annotations,
# 2 x 8 bookmark folders).
# However there are three events that do not have a timestamp
# so the test file will show 202 extracted events.
self.assertEqual(len(event_objects), 202)
# Check the first page visited event.
event_object = event_objects[0]
self.assertEqual(event_object.data_type, u'firefox:places:page_visited')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.PAGE_VISITED)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-07-01 11:16:21.371935')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_url = u'http://news.google.com/'
self.assertEqual(event_object.url, expected_url)
expected_title = u'Google News'
self.assertEqual(event_object.title, expected_title)
expected_msg = (
u'{0:s} ({1:s}) [count: 1] Host: news.google.com '
u'(URL not typed directly) Transition: TYPED').format(
expected_url, expected_title)
expected_short = u'URL: {0:s}'.format(expected_url)
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
# Check the first bookmark event.
event_object = event_objects[1]
self.assertEqual(event_object.data_type, u'firefox:places:bookmark')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.ADDED_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-07-01 11:13:59.266344')
self.assertEqual(event_object.timestamp, expected_timestamp)
# Check the second bookmark event.
event_object = event_objects[2]
self.assertEqual(event_object.data_type, u'firefox:places:bookmark')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.MODIFICATION_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-07-01 11:13:59.267198')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_url = (
u'place:folder=BOOKMARKS_MENU&folder=UNFILED_BOOKMARKS&folder=TOOLBAR&'
u'sort=12&excludeQueries=1&excludeItemIfParentHasAnnotation=livemark%2F'
u'feedURI&maxResults=10&queryType=1')
self.assertEqual(event_object.url, expected_url)
expected_title = u'Recently Bookmarked'
self.assertEqual(event_object.title, expected_title)
expected_msg = (
u'Bookmark URL {0:s} ({1:s}) [folder=BOOKMARKS_MENU&'
u'folder=UNFILED_BOOKMARKS&folder=TOOLBAR&sort=12&excludeQueries=1&'
u'excludeItemIfParentHasAnnotation=livemark%2FfeedURI&maxResults=10&'
u'queryType=1] visit count 0').format(
expected_title, expected_url)
expected_short = (
u'Bookmarked Recently Bookmarked '
u'(place:folder=BOOKMARKS_MENU&folder=UNFILED_BO...')
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
# Check the first bookmark annotation event.
event_object = event_objects[183]
self.assertEqual(
event_object.data_type, u'firefox:places:bookmark_annotation')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.CREATION_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-07-01 11:13:59.267146')
self.assertEqual(event_object.timestamp, expected_timestamp)
# Check another bookmark annotation event.
event_object = event_objects[184]
self.assertEqual(
event_object.data_type, u'firefox:places:bookmark_annotation')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.CREATION_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-07-01 11:13:59.267605')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_url = u'place:sort=14&type=6&maxResults=10&queryType=1'
self.assertEqual(event_object.url, expected_url)
expected_title = u'Recent Tags'
self.assertEqual(event_object.title, expected_title)
expected_msg = (
u'Bookmark Annotation: [RecentTags] to bookmark '
u'[{0:s}] ({1:s})').format(
expected_title, expected_url)
expected_short = u'Bookmark Annotation: Recent Tags'
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
# Check the second last bookmark folder event.
event_object = event_objects[200]
self.assertEqual(event_object.data_type, u'firefox:places:bookmark_folder')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.ADDED_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-03-21 10:05:01.553774')
self.assertEqual(event_object.timestamp, expected_timestamp)
# Check the last bookmark folder event.
event_object = event_objects[201]
self.assertEqual(
event_object.data_type, u'firefox:places:bookmark_folder')
self.assertEqual(
event_object.timestamp_desc,
eventdata.EventTimestamp.MODIFICATION_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-07-01 11:14:11.766851')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_title = u'Latest Headlines'
self.assertEqual(event_object.title, expected_title)
expected_msg = expected_title
expected_short = expected_title
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
def testProcessVersion25(self):
"""Tests the Process function on a Firefox History database file v 25."""
test_file = self._GetTestFilePath([u'places_new.sqlite'])
cache = sqlite.SQLiteCache()
event_queue_consumer = self._ParseDatabaseFileWithPlugin(
self._plugin, test_file, cache)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
# The places.sqlite file contains 84 events:
# 34 page visits.
# 28 bookmarks
# 14 bookmark folders
# 8 annotations
self.assertEqual(len(event_objects), 84)
counter = collections.Counter()
for event_object in event_objects:
counter[event_object.data_type] += 1
self.assertEqual(counter[u'firefox:places:bookmark'], 28)
self.assertEqual(counter[u'firefox:places:page_visited'], 34)
self.assertEqual(counter[u'firefox:places:bookmark_folder'], 14)
self.assertEqual(counter[u'firefox:places:bookmark_annotation'], 8)
random_event = event_objects[10]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-10-30 21:57:11.281942')
self.assertEqual(random_event.timestamp, expected_timestamp)
expected_short = u'URL: http://code.google.com/p/plaso'
expected_msg = (
u'http://code.google.com/p/plaso [count: 1] Host: code.google.com '
u'(URL not typed directly) Transition: TYPED')
self._TestGetMessageStrings(random_event, expected_msg, expected_short)
class FirefoxDownloadsPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Mozilla Firefox downloads database plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = firefox.FirefoxDownloadsPlugin()
def testProcessVersion25(self):
"""Tests the Process function on a Firefox Downloads database file."""
test_file = self._GetTestFilePath([u'downloads.sqlite'])
cache = sqlite.SQLiteCache()
event_queue_consumer = self._ParseDatabaseFileWithPlugin(
self._plugin, test_file, cache)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
# The downloads.sqlite file contains 2 events (1 download).
self.assertEqual(len(event_objects), 2)
# Check the first page visited event.
event_object = event_objects[0]
self.assertEqual(event_object.data_type, u'firefox:downloads:download')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.START_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-07-18 18:59:59.312000')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_url = (
u'https://plaso.googlecode.com/files/'
u'plaso-static-1.0.1-win32-vs2008.zip')
self.assertEqual(event_object.url, expected_url)
expected_full_path = u'file:///D:/plaso-static-1.0.1-win32-vs2008.zip'
self.assertEqual(event_object.full_path, expected_full_path)
self.assertEqual(event_object.received_bytes, 15974599)
self.assertEqual(event_object.total_bytes, 15974599)
if __name__ == '__main__':
unittest.main()
| 0 | 0 | 0 |
b2ca930e892e3b76aaffe81353c72803618798b2 | 1,826 | py | Python | mgz/body/embedded.py | Namek/aoc-mgz | 0c8196dccb550a48ecc375d7861138ef88b53716 | [
"MIT"
] | 117 | 2015-03-07T10:55:58.000Z | 2022-03-18T18:22:01.000Z | mgz/body/embedded.py | Namek/aoc-mgz | 0c8196dccb550a48ecc375d7861138ef88b53716 | [
"MIT"
] | 71 | 2015-10-02T00:05:07.000Z | 2022-03-25T16:47:56.000Z | mgz/body/embedded.py | Namek/aoc-mgz | 0c8196dccb550a48ecc375d7861138ef88b53716 | [
"MIT"
] | 41 | 2015-03-07T02:50:59.000Z | 2021-09-13T06:16:12.000Z | """Embedded Structures.
Various structure can be embedded in the body, without an operation header.
Saved Chapters:
A saved chapter is a header structure embedded in the body.
There is no command identifier, so the command type is actually
the first field of the header - length/offset. Applying the `subheader`
struct at this point will parse the embedded header.
This section is a work in progress.
"""
from construct import (Array, Computed, Embedded, GreedyBytes, If, Int16ul,
Int32ul, Padding, Peek, String, Struct, Switch)
from mgz import subheader
# pylint: disable=invalid-name
# Embedded chat message
chat = Struct(
"subtype"/Computed("chat"),
"data"/Struct(
"length"/Computed(lambda ctx: ctx._._._.op),
"text"/String(lambda ctx: ctx._._._.op, padchar=b'\x00',
trimdir='right', encoding='latin1'),
)
)
# Embedded header (aka saved chapter)
header = Struct(
"subtype"/Computed("savedchapter"),
"data"/Struct(
"header_length"/Computed(lambda ctx: ctx._._._.op - ctx._._._.start),
Embedded(subheader)
)
)
# Unknown embedded structure - looks like a partial action?
other = Struct(
"subtype"/Computed("unknown"),
"data"/Struct(
Padding(4),
"num_ints"/Int32ul,
If(lambda ctx: ctx.num_ints < 0xff, Array(
lambda ctx: ctx.num_ints, Int32ul
)),
Padding(12)
)
)
# Anything we don't recognize - just consume the remainder
default = Struct(
"subtype"/Computed("default"),
GreedyBytes
)
# Embedded structures identified by first byte (for now)
embedded = "embedded"/Struct(
"marker"/Peek(Int16ul),
Embedded("data"/Switch(lambda ctx: ctx.marker, {
0: header,
9024: chat,
65535: other
}, default=default))
)
| 25.71831 | 77 | 0.651698 | """Embedded Structures.
Various structure can be embedded in the body, without an operation header.
Saved Chapters:
A saved chapter is a header structure embedded in the body.
There is no command identifier, so the command type is actually
the first field of the header - length/offset. Applying the `subheader`
struct at this point will parse the embedded header.
This section is a work in progress.
"""
from construct import (Array, Computed, Embedded, GreedyBytes, If, Int16ul,
Int32ul, Padding, Peek, String, Struct, Switch)
from mgz import subheader
# pylint: disable=invalid-name
# Embedded chat message
chat = Struct(
"subtype"/Computed("chat"),
"data"/Struct(
"length"/Computed(lambda ctx: ctx._._._.op),
"text"/String(lambda ctx: ctx._._._.op, padchar=b'\x00',
trimdir='right', encoding='latin1'),
)
)
# Embedded header (aka saved chapter)
header = Struct(
"subtype"/Computed("savedchapter"),
"data"/Struct(
"header_length"/Computed(lambda ctx: ctx._._._.op - ctx._._._.start),
Embedded(subheader)
)
)
# Unknown embedded structure - looks like a partial action?
other = Struct(
"subtype"/Computed("unknown"),
"data"/Struct(
Padding(4),
"num_ints"/Int32ul,
If(lambda ctx: ctx.num_ints < 0xff, Array(
lambda ctx: ctx.num_ints, Int32ul
)),
Padding(12)
)
)
# Anything we don't recognize - just consume the remainder
default = Struct(
"subtype"/Computed("default"),
GreedyBytes
)
# Embedded structures identified by first byte (for now)
embedded = "embedded"/Struct(
"marker"/Peek(Int16ul),
Embedded("data"/Switch(lambda ctx: ctx.marker, {
0: header,
9024: chat,
65535: other
}, default=default))
)
| 0 | 0 | 0 |
20fab8022fd83af53ea7e0c5365056dc64940d56 | 302 | py | Python | Alphabet/Small_Alphabet/Static_Small_Letters_For_Loop/FOR_LOOP_p.py | Polamreddykrishnareddy/PatternPackage | 893ab468a637cd70a0dd8f6d60f7f5c75a3db58f | [
"MIT"
] | null | null | null | Alphabet/Small_Alphabet/Static_Small_Letters_For_Loop/FOR_LOOP_p.py | Polamreddykrishnareddy/PatternPackage | 893ab468a637cd70a0dd8f6d60f7f5c75a3db58f | [
"MIT"
] | null | null | null | Alphabet/Small_Alphabet/Static_Small_Letters_For_Loop/FOR_LOOP_p.py | Polamreddykrishnareddy/PatternPackage | 893ab468a637cd70a0dd8f6d60f7f5c75a3db58f | [
"MIT"
] | null | null | null | #p
for row in range(13):
for col in range(6):
if (col==0 or row==0 and col!=5) or (row==1 and col==5)or (row==2 and col==5)or (row==3 and col==5)or (row==4 and col==5) or (row==5 and col!=5):#p
print("*",end=" ")
else:
print(" ",end=" ")
print()
| 33.555556 | 156 | 0.463576 | #p
for row in range(13):
for col in range(6):
if (col==0 or row==0 and col!=5) or (row==1 and col==5)or (row==2 and col==5)or (row==3 and col==5)or (row==4 and col==5) or (row==5 and col!=5):#p
print("*",end=" ")
else:
print(" ",end=" ")
print()
| 0 | 0 | 0 |
50300afeadf136fdce23a714dcbd8df510dd386c | 1,817 | py | Python | pdf_with_js/plugin.py | vuquangtrong/mkdocs-pdf-with-js-plugin | 38053565854f5322ecec34775c956f353011f895 | [
"MIT"
] | 1 | 2022-01-21T13:43:32.000Z | 2022-01-21T13:43:32.000Z | pdf_with_js/plugin.py | vuquangtrong/mkdocs-pdf-with-js-plugin | 38053565854f5322ecec34775c956f353011f895 | [
"MIT"
] | null | null | null | pdf_with_js/plugin.py | vuquangtrong/mkdocs-pdf-with-js-plugin | 38053565854f5322ecec34775c956f353011f895 | [
"MIT"
] | 1 | 2021-07-10T17:19:19.000Z | 2021-07-10T17:19:19.000Z |
from mkdocs.config import config_options
from mkdocs.plugins import BasePlugin
from pdf_with_js.printer import Printer
import random
| 28.390625 | 88 | 0.600991 |
from mkdocs.config import config_options
from mkdocs.plugins import BasePlugin
from pdf_with_js.printer import Printer
import random
class PdfWithJS(BasePlugin):
config_scheme = (
('enable', config_options.Type(bool, default=True)),
('add_download_button', config_options.Type(bool, default=False)),
('display_header_footer', config_options.Type(bool, default=False)),
('header_template', config_options.Type(str, default='')),
('footer_template', config_options.Type(str, default='')),
)
def __init__(self):
self.printer = Printer()
pass
def on_config(self, config, **kwargs):
self.enabled = self.config['enable']
self.add_download_button = self.config['add_download_button']
self.printer.set_config (
self.config['display_header_footer'],
self.config['header_template'],
self.config['footer_template']
)
return config
def on_nav(self, nav, config, files):
return nav
def on_post_page(self, output_content, page, config, **kwargs):
if not self.enabled:
return
page_paths = self.printer.add_page(page, config)
if self.add_download_button:
output_content = self.printer.add_download_link(output_content, page_paths)
return output_content
def on_post_build(self, config):
if not self.enabled:
return
self.printer.print_pages()
def on_env(self, env, config, files):
env.filters['shuffle'] = self.do_shuffle
def do_shuffle(self, seq):
try:
random.shuffle(seq)
return seq
except:
return seq
| 1,057 | 595 | 24 |
29f37d595042f56159568888fa71e6a5a6d64268 | 1,110 | py | Python | prediction/src/tests/test_classification.py | yasiriqbal1/concept-to-clinic-1 | 3b7d34a6b31e8d3924934f3e5c990c49813c670e | [
"MIT"
] | 346 | 2017-08-04T12:26:11.000Z | 2018-10-16T06:51:45.000Z | prediction/src/tests/test_classification.py | yasiriqbal1/concept-to-clinic-1 | 3b7d34a6b31e8d3924934f3e5c990c49813c670e | [
"MIT"
] | 296 | 2017-08-02T10:17:05.000Z | 2018-07-31T05:29:43.000Z | prediction/src/tests/test_classification.py | yasiriqbal1/concept-to-clinic-1 | 3b7d34a6b31e8d3924934f3e5c990c49813c670e | [
"MIT"
] | 159 | 2017-08-04T07:34:52.000Z | 2018-10-16T18:34:08.000Z | from ..algorithms.classify import trained_model
| 37 | 99 | 0.744144 | from ..algorithms.classify import trained_model
def test_classify_predict_load(metaimage_path, model_path):
assert not trained_model.predict(metaimage_path, [], model_path)
def test_classify_dicom(dicom_paths, nodule_locations, model_path):
predicted = trained_model.predict(dicom_paths[0], nodule_locations, model_path)
assert predicted
assert 0 <= predicted[0]['p_concerning'] <= 1
def test_classify_real_nodule_small_dicom(dicom_path_003, model_path):
predicted = trained_model.predict(dicom_path_003, [{'x': 302, 'y': 287, 'z': 12}], model_path)
assert predicted
assert 0.3 <= predicted[0]['p_concerning'] <= 1
def test_classify_real_nodule_full_dicom(dicom_paths, model_path):
predicted = trained_model.predict(dicom_paths[2], [{'x': 302, 'y': 287, 'z': 187}], model_path)
assert predicted
assert 0.3 <= predicted[0]['p_concerning'] <= 1
def test_classify_luna(metaimage_path, luna_nodule, model_path):
predicted = trained_model.predict(metaimage_path, [luna_nodule], model_path)
assert predicted
assert 0 <= predicted[0]['p_concerning'] <= 1
| 942 | 0 | 115 |
51d712db70f6674f1a70dd0d199f6f41778c599a | 2,014 | py | Python | data_masking_methods/mask_the_info.py | GeorgeManakanatas/PPDM | 9e6af80681db497447197cac14b26b99e588f231 | [
"MIT",
"Unlicense"
] | 3 | 2016-11-18T07:24:39.000Z | 2019-07-06T07:45:15.000Z | data_masking_methods/mask_the_info.py | GeorgeManakanatas/PPDM | 9e6af80681db497447197cac14b26b99e588f231 | [
"MIT",
"Unlicense"
] | 2 | 2017-02-14T15:24:34.000Z | 2019-11-25T19:18:05.000Z | data_masking_methods/mask_the_info.py | GeorgeManakanatas/PPDM | 9e6af80681db497447197cac14b26b99e588f231 | [
"MIT",
"Unlicense"
] | 3 | 2017-12-19T07:04:24.000Z | 2021-08-20T15:42:13.000Z | '''
information masking section
'''
import logging
import timeit
from . import encrypt_the_info
from . import null_the_info
def masking_method_selection(start_dataframe, mask_col, mask_method,
save_to_file, masked_file, logger):
'''
Basic check that all input is properly provided and filtering through the
various options if no error occurs. Logging and timer handled here as well.
Arguments:
start_dataframe: the dataframe to mask
mask_col(list): list of column numbers for the attributes to mask
mask_method(str): the way the attributes should be masked
save_to_file(bool): true to save the dataframe to temporary file
masked_file(str): the file name for the output file
logger: custom logging function
Returns:
dataframe with masked properties
'''
total_mask_time_start = timeit.default_timer()
logger.info('running masking method : ' + str(mask_method) +
' on columns : ' + str(mask_col))
logger.info('dataframe before masking : ' + str(start_dataframe.shape))
# should be a list with selection in the future
if mask_method == 'encrypt':
start_dataframe = encrypt_the_info.encrypt_the_proper_columns(
start_dataframe, mask_col)
elif mask_method == 'replace':
start_dataframe = null_the_info.null_the_proper_columns(
start_dataframe, mask_col)
else:
logger.info('improper masking method provided : '+str(mask_method))
return False
# logging the outcome
logger.info('dataframe after masking : '+str(start_dataframe.shape))
# saving to file if that option was set to True
if save_to_file:
start_dataframe.to_csv(masked_file, index=False, header=False)
total_mask_time_stop = timeit.default_timer()
# logging the excecution time
logger.info(" Total masking time is:" +
str(total_mask_time_stop-total_mask_time_start))
return start_dataframe | 38.730769 | 79 | 0.695631 | '''
information masking section
'''
import logging
import timeit
from . import encrypt_the_info
from . import null_the_info
def masking_method_selection(start_dataframe, mask_col, mask_method,
save_to_file, masked_file, logger):
'''
Basic check that all input is properly provided and filtering through the
various options if no error occurs. Logging and timer handled here as well.
Arguments:
start_dataframe: the dataframe to mask
mask_col(list): list of column numbers for the attributes to mask
mask_method(str): the way the attributes should be masked
save_to_file(bool): true to save the dataframe to temporary file
masked_file(str): the file name for the output file
logger: custom logging function
Returns:
dataframe with masked properties
'''
total_mask_time_start = timeit.default_timer()
logger.info('running masking method : ' + str(mask_method) +
' on columns : ' + str(mask_col))
logger.info('dataframe before masking : ' + str(start_dataframe.shape))
# should be a list with selection in the future
if mask_method == 'encrypt':
start_dataframe = encrypt_the_info.encrypt_the_proper_columns(
start_dataframe, mask_col)
elif mask_method == 'replace':
start_dataframe = null_the_info.null_the_proper_columns(
start_dataframe, mask_col)
else:
logger.info('improper masking method provided : '+str(mask_method))
return False
# logging the outcome
logger.info('dataframe after masking : '+str(start_dataframe.shape))
# saving to file if that option was set to True
if save_to_file:
start_dataframe.to_csv(masked_file, index=False, header=False)
total_mask_time_stop = timeit.default_timer()
# logging the excecution time
logger.info(" Total masking time is:" +
str(total_mask_time_stop-total_mask_time_start))
return start_dataframe | 0 | 0 | 0 |
b7a2b031b62930c8b996cd893bd4bcce4bd6e6ea | 2,398 | py | Python | domains/deliver/problems/manual/problem5_OF.py | patras91/rae_release | 0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30 | [
"BSD-3-Clause"
] | 1 | 2021-09-28T12:56:56.000Z | 2021-09-28T12:56:56.000Z | domains/deliver/problems/manual/problem5_OF.py | patras91/rae_release | 0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30 | [
"BSD-3-Clause"
] | null | null | null | domains/deliver/problems/manual/problem5_OF.py | patras91/rae_release | 0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30 | [
"BSD-3-Clause"
] | 1 | 2022-03-31T16:30:39.000Z | 2022-03-31T16:30:39.000Z | __author__ = 'mason'
from domain_orderFulfillment import *
from timer import DURATION
from state import state
import numpy as np
'''
Several objects to choose from, need to consider weights
Same as problem 4 but only 1 robot
'''
DURATION.TIME = {
'lookupDB': GetCostOfLookup,
'wrap': GetCostOfWrap,
'pickup': GetCostOfPickup,
'putdown': GetCostOfPutdown,
'loadMachine': GetCostOfLoad,
'moveRobot': GetCostOfMove,
'acquireRobot': 1,
'freeRobot': 1,
'wait': 5
}
DURATION.COUNTER = {
'lookupDB': GetCostOfLookup,
'wrap': GetCostOfWrap,
'pickup': GetCostOfPickup,
'putdown': GetCostOfPutdown,
'loadMachine': GetCostOfLoad,
'moveRobot': GetCostOfMove,
'acquireRobot': 1,
'freeRobot': 1,
'wait': 5
}
rv.LOCATIONS = [1, 2, 3, 4, 5, 6, 7]
rv.FACTORY1 = frozenset({1, 2, 3, 4, 6, 7, 5})
rv.FACTORY_UNION = rv.FACTORY1
rv.SHIPPING_DOC = {rv.FACTORY1: 4}
rv.GROUND_EDGES = {1: [2], 2: [1, 3], 3: [2, 4], 4: [3, 5], 5: [4, 6], 6: [5, 7], 7: [6]}
rv.GROUND_WEIGHTS = {(1,2): 1, (2,3): 1, (3,4): 5, (4,5): 8, (5,6): 5, (6,7): 1}
rv.ROBOTS = {'r1': rv.FACTORY1}
rv.ROBOT_CAPACITY = {'r1': 10}
rv.MACHINES = {'m1': rv.FACTORY1}
rv.PALLETS = {'p1'}
tasks = {
1: [['orderStart', ['type1', 'type2']]],
2: [['orderStart', ['type2', 'type1']]],
}
eventsEnv = {
} | 24.469388 | 96 | 0.584654 | __author__ = 'mason'
from domain_orderFulfillment import *
from timer import DURATION
from state import state
import numpy as np
'''
Several objects to choose from, need to consider weights
Same as problem 4 but only 1 robot
'''
def GetCostOfMove(id, r, loc1, loc2, dist):
return 1 + dist
def GetCostOfLookup(id, item):
return max(1, np.random.beta(2, 2))
def GetCostOfWrap(id, orderName, m, item):
return max(1, np.random.normal(5, .5))
def GetCostOfPickup(id, r, item):
return max(1, np.random.normal(4, 1))
def GetCostOfPutdown(id, r, item):
return max(1, np.random.normal(4, 1))
def GetCostOfLoad(id, orderName, r, m, item):
return max(1, np.random.normal(3, .5))
DURATION.TIME = {
'lookupDB': GetCostOfLookup,
'wrap': GetCostOfWrap,
'pickup': GetCostOfPickup,
'putdown': GetCostOfPutdown,
'loadMachine': GetCostOfLoad,
'moveRobot': GetCostOfMove,
'acquireRobot': 1,
'freeRobot': 1,
'wait': 5
}
DURATION.COUNTER = {
'lookupDB': GetCostOfLookup,
'wrap': GetCostOfWrap,
'pickup': GetCostOfPickup,
'putdown': GetCostOfPutdown,
'loadMachine': GetCostOfLoad,
'moveRobot': GetCostOfMove,
'acquireRobot': 1,
'freeRobot': 1,
'wait': 5
}
rv.LOCATIONS = [1, 2, 3, 4, 5, 6, 7]
rv.FACTORY1 = frozenset({1, 2, 3, 4, 6, 7, 5})
rv.FACTORY_UNION = rv.FACTORY1
rv.SHIPPING_DOC = {rv.FACTORY1: 4}
rv.GROUND_EDGES = {1: [2], 2: [1, 3], 3: [2, 4], 4: [3, 5], 5: [4, 6], 6: [5, 7], 7: [6]}
rv.GROUND_WEIGHTS = {(1,2): 1, (2,3): 1, (3,4): 5, (4,5): 8, (5,6): 5, (6,7): 1}
rv.ROBOTS = {'r1': rv.FACTORY1}
rv.ROBOT_CAPACITY = {'r1': 10}
rv.MACHINES = {'m1': rv.FACTORY1}
rv.PALLETS = {'p1'}
def ResetState():
state.OBJECTS = {'o1': True, 'o2': True, 'o3': True, 'o4': True, 'o5': True}
state.OBJ_WEIGHT = {'o1': 7, 'o2': 3, 'o3': 1, 'o4': 6, 'o5': 3}
state.OBJ_CLASS = {'type1': ['o1', 'o4'], 'type2': ['o2', 'o3', 'o5']}
state.loc = {'r1': 2, 'r2': 1, 'm1': 3, 'o1': 2, 'o2': 1, 'o3':7, 'o4': 1, 'o5': 6, 'p1': 4}
state.load = {'r1': NIL, 'r2': NIL,}
state.busy = {'r1': False, 'r2': False, 'm1': False, 'fixer1': False}
state.numUses = {'m1': 1}
state.var1 = {'temp': 'r1', 'temp1': 'r1', 'temp2': 1, 'redoId': 0}
state.shouldRedo = {}
tasks = {
1: [['orderStart', ['type1', 'type2']]],
2: [['orderStart', ['type2', 'type1']]],
}
eventsEnv = {
} | 893 | 0 | 161 |
d682a95c426da85437977b5f33eeeb297e0afda2 | 1,754 | py | Python | trackpal/visu.py | sommerc/trackpal | f62e6a4467af8a5988b5bce0c20872bc3e325cae | [
"BSD-3-Clause"
] | 1 | 2022-01-10T16:48:23.000Z | 2022-01-10T16:48:23.000Z | trackpal/visu.py | sommerc/trackpal | f62e6a4467af8a5988b5bce0c20872bc3e325cae | [
"BSD-3-Clause"
] | null | null | null | trackpal/visu.py | sommerc/trackpal | f62e6a4467af8a5988b5bce0c20872bc3e325cae | [
"BSD-3-Clause"
] | null | null | null | """Track visualization"""
from matplotlib import pyplot as plt
def plot_trj(
trj,
coords=None,
ax=None,
scale=None,
line_fmt="x:",
line_color=None,
line_label="Trajectory",
line_width=None,
marker_size=None,
alpha=None,
start_end=(True, True),
):
"""[summary]
Args:
trj (pandas.DataFrame): tracks to plot
coords (list): The names of the x/y coodrinate column names
ax (optional): matplotlib axes to plot in. Defaults to None.
scale (int, optional): length of scale bar. Defaults to 10.
line_fmt (str, optional): Defaults to "x:".
line_color (str, optional): Defaults to "gray".
line_label (str, optional): Defaults to "Trajectory".
line_width ([type], optional): Defaults to None.
marker_size ([type], optional): Defaults to None.
alpha ([type], optional): Defaults to None.
start_end (tuple, optional): Show marker for start/end of track. Defaults to (True, True).
"""
if not ax:
ax = plt.gca()
if not coords:
coords = trj.coords
ax.plot(
*(trj[coords].values.T),
line_fmt,
color=line_color,
label=line_label,
lw=line_width,
markersize=marker_size,
alpha=alpha
)
if start_end[0]:
ax.plot(*trj[coords].iloc[0].T, "o", color="lightgreen")
if start_end[1]:
ax.plot(*trj[coords].iloc[-1].T, "o", color="red")
ax.axis("off")
if scale is not None:
ax.plot(
[trj[coords[0]].mean() - scale / 2, trj[coords[0]].mean() + scale / 2],
[trj[coords[1]].min() - 3, trj[coords[1]].min() - 3],
"k-",
lw=3,
)
ax.set_aspect(1.0)
| 26.179104 | 98 | 0.566135 | """Track visualization"""
from matplotlib import pyplot as plt
def plot_trj(
trj,
coords=None,
ax=None,
scale=None,
line_fmt="x:",
line_color=None,
line_label="Trajectory",
line_width=None,
marker_size=None,
alpha=None,
start_end=(True, True),
):
"""[summary]
Args:
trj (pandas.DataFrame): tracks to plot
coords (list): The names of the x/y coodrinate column names
ax (optional): matplotlib axes to plot in. Defaults to None.
scale (int, optional): length of scale bar. Defaults to 10.
line_fmt (str, optional): Defaults to "x:".
line_color (str, optional): Defaults to "gray".
line_label (str, optional): Defaults to "Trajectory".
line_width ([type], optional): Defaults to None.
marker_size ([type], optional): Defaults to None.
alpha ([type], optional): Defaults to None.
start_end (tuple, optional): Show marker for start/end of track. Defaults to (True, True).
"""
if not ax:
ax = plt.gca()
if not coords:
coords = trj.coords
ax.plot(
*(trj[coords].values.T),
line_fmt,
color=line_color,
label=line_label,
lw=line_width,
markersize=marker_size,
alpha=alpha
)
if start_end[0]:
ax.plot(*trj[coords].iloc[0].T, "o", color="lightgreen")
if start_end[1]:
ax.plot(*trj[coords].iloc[-1].T, "o", color="red")
ax.axis("off")
if scale is not None:
ax.plot(
[trj[coords[0]].mean() - scale / 2, trj[coords[0]].mean() + scale / 2],
[trj[coords[1]].min() - 3, trj[coords[1]].min() - 3],
"k-",
lw=3,
)
ax.set_aspect(1.0)
| 0 | 0 | 0 |
9ade5536cc926d194f758f83ecb6495530677c35 | 6,136 | py | Python | src/gluonts/model/testutil.py | dotgc/gluon-ts | e14ad69058e58e1ce51c40551674318341781331 | [
"Apache-2.0"
] | 1 | 2021-08-03T09:09:31.000Z | 2021-08-03T09:09:31.000Z | src/gluonts/model/testutil.py | Happiness20/gluon-ts | e14ad69058e58e1ce51c40551674318341781331 | [
"Apache-2.0"
] | null | null | null | src/gluonts/model/testutil.py | Happiness20/gluon-ts | e14ad69058e58e1ce51c40551674318341781331 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Iterator
# Third-party imports
import numpy as np
# First-party imports
from gluonts.core.component import validated
from gluonts.dataset.common import Dataset
from gluonts.model.estimator import Estimator
from gluonts.model.forecast import Forecast, SampleForecast
from gluonts.model.predictor import RepresentablePredictor
class IdentityPredictor(RepresentablePredictor):
"""
A `Predictor` that uses the last `prediction_length` observations
to predict the future.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
num_samples
Number of samples to include in the forecasts. Not that the samples
produced by this predictor will all be identical.
"""
@validated()
class ConstantPredictor(RepresentablePredictor):
"""
A `Predictor` that always produces the same forecast.
Parameters
----------
samples
Samples to use to construct SampleForecast objects for every
prediction.
freq
Frequency of the predicted data.
"""
@validated()
class MeanPredictor(RepresentablePredictor):
"""
A :class:`Predictor` that predicts the mean of the last `context_length`
elements of the input target.
Parameters
----------
context_length
Length of the target context used to condition the predictions.
prediction_length
Length of the prediction horizon.
num_eval_samples
Number of samples to use to construct :class:`SampleForecast` objects
for every prediction.
freq
Frequency of the predicted data.
"""
@validated()
class MeanEstimator(Estimator):
"""
An `Estimator` that computes the mean targets in the training data,
in the trailing `prediction_length` observations, and produces
a `ConstantPredictor` that always predicts such mean value.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
num_samples
Number of samples to include in the forecasts. Not that the samples
produced by this predictor will all be identical.
"""
@validated()
| 31.792746 | 79 | 0.636082 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Iterator
# Third-party imports
import numpy as np
# First-party imports
from gluonts.core.component import validated
from gluonts.dataset.common import Dataset
from gluonts.model.estimator import Estimator
from gluonts.model.forecast import Forecast, SampleForecast
from gluonts.model.predictor import RepresentablePredictor
class IdentityPredictor(RepresentablePredictor):
"""
A `Predictor` that uses the last `prediction_length` observations
to predict the future.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
num_samples
Number of samples to include in the forecasts. Not that the samples
produced by this predictor will all be identical.
"""
@validated()
def __init__(
self, prediction_length: int, freq: str, num_samples: int
) -> None:
super().__init__(prediction_length, freq)
assert num_samples > 0, "The value of `num_samples` should be > 0"
self.num_samples = num_samples
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
for item in dataset:
prediction = item["target"][-self.prediction_length :]
samples = np.broadcast_to(
array=np.expand_dims(prediction, 0),
shape=(self.num_samples, self.prediction_length),
)
yield SampleForecast(
samples=samples,
start_date=item["start"],
freq=self.freq,
item_id=item["id"] if "id" in item else None,
)
class ConstantPredictor(RepresentablePredictor):
"""
A `Predictor` that always produces the same forecast.
Parameters
----------
samples
Samples to use to construct SampleForecast objects for every
prediction.
freq
Frequency of the predicted data.
"""
@validated()
def __init__(self, samples: np.ndarray, freq: str) -> None:
super().__init__(samples.shape[1], freq)
self.samples = samples
def predict(self, dataset: Dataset, **kwargs) -> Iterator[SampleForecast]:
for item in dataset:
yield SampleForecast(
samples=self.samples,
start_date=item["start"],
freq=self.freq,
item_id=item["id"] if "id" in item else None,
)
class MeanPredictor(RepresentablePredictor):
"""
A :class:`Predictor` that predicts the mean of the last `context_length`
elements of the input target.
Parameters
----------
context_length
Length of the target context used to condition the predictions.
prediction_length
Length of the prediction horizon.
num_eval_samples
Number of samples to use to construct :class:`SampleForecast` objects
for every prediction.
freq
Frequency of the predicted data.
"""
@validated()
def __init__(
self,
context_length: int,
prediction_length: int,
num_eval_samples: int,
freq: str,
) -> None:
super().__init__(prediction_length, freq)
self.context_length = context_length
self.num_eval_samples = num_eval_samples
self.shape = (self.num_eval_samples, self.prediction_length)
def predict(self, dataset: Dataset, **kwargs) -> Iterator[SampleForecast]:
for item in dataset:
mean = np.mean(item["target"][-self.context_length :])
yield SampleForecast(
samples=mean * np.ones(shape=self.shape),
start_date=item["start"],
freq=self.freq,
item_id=item["id"] if "id" in item else None,
)
class MeanEstimator(Estimator):
"""
An `Estimator` that computes the mean targets in the training data,
in the trailing `prediction_length` observations, and produces
a `ConstantPredictor` that always predicts such mean value.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
num_samples
Number of samples to include in the forecasts. Not that the samples
produced by this predictor will all be identical.
"""
@validated()
def __init__(
self, prediction_length: int, freq: str, num_eval_samples: int
) -> None:
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert num_eval_samples > 0, "The value of `num_samples` should be > 0"
self.prediction_length = prediction_length
self.freq = freq
self.num_eval_samples = num_eval_samples
def train(self, training_data: Dataset) -> ConstantPredictor:
# import itertools
# from gluonts.dataset.common import ListDataset
#
# training_data = ListDataset(
# data_iter=itertools.chain(training_data, training_data),
# freq=self.freq,
# )
contexts = np.broadcast_to(
array=[
item["target"][-self.prediction_length :]
for item in training_data
],
shape=(len(training_data), self.prediction_length),
)
samples = np.broadcast_to(
array=contexts.mean(axis=0),
shape=(self.num_eval_samples, self.prediction_length),
)
return ConstantPredictor(samples=samples, freq=self.freq)
| 3,078 | 0 | 212 |
a55c169171ded7deb8167391e08f38271824a4d9 | 1,610 | py | Python | examples/oauth.py | Code-in-the-House/foursquare_api | 47f710802bad581d36c7e8ec28612bccf1f9fe41 | [
"MIT"
] | null | null | null | examples/oauth.py | Code-in-the-House/foursquare_api | 47f710802bad581d36c7e8ec28612bccf1f9fe41 | [
"MIT"
] | null | null | null | examples/oauth.py | Code-in-the-House/foursquare_api | 47f710802bad581d36c7e8ec28612bccf1f9fe41 | [
"MIT"
] | null | null | null | import pyfoursquare as foursquare
# == OAuth2 Authentication ==
#
# This mode of authentication is the required one for Foursquare
# The client id and client secret can be found on your application's Details
# page located at https://foursquare.com/oauth/
client_id = "E50NJYAFUAPXPAKU5XQNBTXPGKRRSNUGAYWTUUH3RKJ22HH4"
client_secret = "3LQHT1LGX2MVUXKRNLY0ZFKNWIXKNNQDTLYD5UFX4WPAF0GM"
callback = 'http://127.0.0.1:8000/'
auth = foursquare.OauthHandler(client_id, client_secret, callback)
#First Redirect the user who wish to authenticate to.
#It will be create the authorization url for your app
auth_url = auth.get_authorization_url()
print 'Please authorize: ' + auth_url
#If the user accepts, it will be redirected back
#to your registered REDIRECT_URI.
#It will give you a code as
#https://YOUR_REGISTERED_REDIRECT_URI/?code=CODE
code = raw_input('The code: ').strip()
#Now your server will make a request for
#the access token. You can save this
#for future access for your app for this user
access_token = auth.get_access_token(code)
print 'Your access token is ' + access_token
#Now let's create an API
api = foursquare.API(auth)
#Now you can access the Foursquare API!
result = api.venues_search(query='Burburinho', ll='-8.063542,-34.872891')
#You can acess as a Model
print dir(result[0])
#Access all its attributes
print result[0].name
"""
If you already have the access token for this user
you can go until lines 1- 13, and then get at
your database the access token for this user and
set the access token.
auth.set_access_token('ACCESS_TOKEN')
Now you can go on by the line 33.
"""
| 29.272727 | 76 | 0.77205 | import pyfoursquare as foursquare
# == OAuth2 Authentication ==
#
# This mode of authentication is the required one for Foursquare
# The client id and client secret can be found on your application's Details
# page located at https://foursquare.com/oauth/
client_id = "E50NJYAFUAPXPAKU5XQNBTXPGKRRSNUGAYWTUUH3RKJ22HH4"
client_secret = "3LQHT1LGX2MVUXKRNLY0ZFKNWIXKNNQDTLYD5UFX4WPAF0GM"
callback = 'http://127.0.0.1:8000/'
auth = foursquare.OauthHandler(client_id, client_secret, callback)
#First Redirect the user who wish to authenticate to.
#It will be create the authorization url for your app
auth_url = auth.get_authorization_url()
print 'Please authorize: ' + auth_url
#If the user accepts, it will be redirected back
#to your registered REDIRECT_URI.
#It will give you a code as
#https://YOUR_REGISTERED_REDIRECT_URI/?code=CODE
code = raw_input('The code: ').strip()
#Now your server will make a request for
#the access token. You can save this
#for future access for your app for this user
access_token = auth.get_access_token(code)
print 'Your access token is ' + access_token
#Now let's create an API
api = foursquare.API(auth)
#Now you can access the Foursquare API!
result = api.venues_search(query='Burburinho', ll='-8.063542,-34.872891')
#You can acess as a Model
print dir(result[0])
#Access all its attributes
print result[0].name
"""
If you already have the access token for this user
you can go until lines 1- 13, and then get at
your database the access token for this user and
set the access token.
auth.set_access_token('ACCESS_TOKEN')
Now you can go on by the line 33.
"""
| 0 | 0 | 0 |
2547d96a6a4b442bf0ff50913fed0095c52574cf | 438 | py | Python | Handin/handin2.py | thesombady/Fytb14 | e630e8d9ff4512849d124426b5754b6b44d89069 | [
"MIT"
] | null | null | null | Handin/handin2.py | thesombady/Fytb14 | e630e8d9ff4512849d124426b5754b6b44d89069 | [
"MIT"
] | null | null | null | Handin/handin2.py | thesombady/Fytb14 | e630e8d9ff4512849d124426b5754b6b44d89069 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
mg = 10
xlist = np.linspace(0,np.pi/2,100)
f1 = 3*mg/2*(np.sin(xlist)*np.cos(xlist)*3/2-np.cos(xlist))
f2 = 3*mg/2*(-np.sin(xlist)+(3*np.sin(xlist)**2-1)/2) + mg
plt.plot(xlist, f1, '-', markersize=1, label = r"$F_x$")
plt.plot(xlist, f2, '-', markersize=1, label = r"$F_y$")
plt.title("Constraint forces")
plt.xlabel(r"$\varphi$")
plt.ylabel("mg")
plt.grid()
plt.legend()
plt.show()
| 24.333333 | 59 | 0.643836 | import numpy as np
import matplotlib.pyplot as plt
mg = 10
xlist = np.linspace(0,np.pi/2,100)
f1 = 3*mg/2*(np.sin(xlist)*np.cos(xlist)*3/2-np.cos(xlist))
f2 = 3*mg/2*(-np.sin(xlist)+(3*np.sin(xlist)**2-1)/2) + mg
plt.plot(xlist, f1, '-', markersize=1, label = r"$F_x$")
plt.plot(xlist, f2, '-', markersize=1, label = r"$F_y$")
plt.title("Constraint forces")
plt.xlabel(r"$\varphi$")
plt.ylabel("mg")
plt.grid()
plt.legend()
plt.show()
| 0 | 0 | 0 |
c0d954c3b6047cbbc2c0a18b91acaa42bdc69750 | 1,323 | py | Python | graphs/tests/test_graphs.py | Sewar-web/data-structures-and-algorithms1 | d94f706fb3a30c114bd08a6c2d9b7ed269bc9a21 | [
"MIT"
] | null | null | null | graphs/tests/test_graphs.py | Sewar-web/data-structures-and-algorithms1 | d94f706fb3a30c114bd08a6c2d9b7ed269bc9a21 | [
"MIT"
] | null | null | null | graphs/tests/test_graphs.py | Sewar-web/data-structures-and-algorithms1 | d94f706fb3a30c114bd08a6c2d9b7ed269bc9a21 | [
"MIT"
] | 1 | 2021-06-26T09:19:43.000Z | 2021-06-26T09:19:43.000Z | from graphs import __version__
from graphs.graph import Vertix ,Edge,Graph
graph = Graph()
| 22.810345 | 104 | 0.568405 | from graphs import __version__
from graphs.graph import Vertix ,Edge,Graph
def test_version():
assert __version__ == '0.1.0'
graph = Graph()
def test_add_node():
graph.add_node('a')
assert graph.get_nodes()=={'a': []}
def test_edge():
graph=Graph()
graph.add_node('a')
graph.add_node('b')
graph.add_edge('a', 'b', 8)
assert graph.get_neighbors('a') == 'a :edge--> b ... weight: 8'
def test_nodes():
graph=Graph()
graph.add_node('a')
graph.add_node('b')
assert graph.get_nodes() == {'a': [],'b': []}
def test_neighbor():
graph.add_node('a')
graph.add_node('b')
graph.add_node('c')
graph.add_node('d')
graph.add_node('b')
graph.add_edge('a', 'b', 5)
graph.add_edge('a', 'd', 3)
graph.add_edge('b', 'd', 1)
graph.add_edge('a', 'a', 2)
assert graph.get_nodes() == {'a': [['b', 5], ['d', 3], ['a', 2]], 'b': [['d', 1]], 'c': [], 'd': []}
def test_size():
graph.add_node('a')
graph.add_node('b')
graph.add_node('c')
graph.add_node('d')
graph.add_node('e')
assert graph.size() == 5
def test_one_node():
graph.add_node('s')
graph.add_edge('s','s')
assert graph.get_neighbors('s') == 's :edge--> s ... weight: 1'
def test_empty_graph():
graph = Graph()
assert graph.get_nodes() == None
| 1,044 | 0 | 183 |
e065295369c34fdafb30ecef1f98567848098232 | 1,332 | py | Python | core/test.py | fazilaltinel/pytorch-dann-resnet | fe6abb274065c26c918d71c38babe2ec894d8293 | [
"MIT"
] | 4 | 2021-02-08T10:32:48.000Z | 2021-09-15T05:43:20.000Z | core/test.py | fazilaltinel/pytorch-dann-resnet | fe6abb274065c26c918d71c38babe2ec894d8293 | [
"MIT"
] | null | null | null | core/test.py | fazilaltinel/pytorch-dann-resnet | fe6abb274065c26c918d71c38babe2ec894d8293 | [
"MIT"
] | 2 | 2021-02-25T20:17:41.000Z | 2021-08-29T08:06:39.000Z | import torch.utils.data
import torch.nn as nn
def test(model, data_loader, device, loggi, flag):
"""Evaluate model for dataset."""
# set eval state for Dropout and BN layers
model.eval()
# init loss and accuracy
loss_ = 0.0
acc_ = 0.0
acc_domain_ = 0.0
n_total = 0
# set loss function
criterion = nn.CrossEntropyLoss()
# evaluate network
for (images, labels) in data_loader:
images = images.to(device)
labels = labels.to(device) #labels = labels.squeeze(1)
size = len(labels)
if flag == 'target':
labels_domain = torch.ones(size).long().to(device)
else:
labels_domain = torch.zeros(size).long().to(device)
preds, domain = model(images, alpha=0)
loss_ += criterion(preds, labels).item()
pred_cls = preds.data.max(1)[1]
pred_domain = domain.data.max(1)[1]
acc_ += pred_cls.eq(labels.data).sum().item()
acc_domain_ += pred_domain.eq(labels_domain.data).sum().item()
n_total += size
loss = loss_ / n_total
acc = acc_ / n_total
acc_domain = acc_domain_ / n_total
loggi.info("{}: Avg Loss = {:.6f}, Avg Accuracy = {:.2%}, {}/{}, Avg Domain Accuracy = {:2%}".format(flag, loss, acc, acc_, n_total, acc_domain))
return loss, acc, acc_domain
| 29.6 | 149 | 0.605105 | import torch.utils.data
import torch.nn as nn
def test(model, data_loader, device, loggi, flag):
"""Evaluate model for dataset."""
# set eval state for Dropout and BN layers
model.eval()
# init loss and accuracy
loss_ = 0.0
acc_ = 0.0
acc_domain_ = 0.0
n_total = 0
# set loss function
criterion = nn.CrossEntropyLoss()
# evaluate network
for (images, labels) in data_loader:
images = images.to(device)
labels = labels.to(device) #labels = labels.squeeze(1)
size = len(labels)
if flag == 'target':
labels_domain = torch.ones(size).long().to(device)
else:
labels_domain = torch.zeros(size).long().to(device)
preds, domain = model(images, alpha=0)
loss_ += criterion(preds, labels).item()
pred_cls = preds.data.max(1)[1]
pred_domain = domain.data.max(1)[1]
acc_ += pred_cls.eq(labels.data).sum().item()
acc_domain_ += pred_domain.eq(labels_domain.data).sum().item()
n_total += size
loss = loss_ / n_total
acc = acc_ / n_total
acc_domain = acc_domain_ / n_total
loggi.info("{}: Avg Loss = {:.6f}, Avg Accuracy = {:.2%}, {}/{}, Avg Domain Accuracy = {:2%}".format(flag, loss, acc, acc_, n_total, acc_domain))
return loss, acc, acc_domain
| 0 | 0 | 0 |
ab16331c01263b1f5184c5ed899245585e948a04 | 6,467 | py | Python | tests/licensedcode/data/datadriven/lic2/2206-misc-python/interval.py | s4-2/scancode-toolkit | 8931b42e2630b94d0cabc834dfb3c16f01f82321 | [
"Apache-2.0",
"CC-BY-4.0"
] | 1,511 | 2015-07-01T15:29:03.000Z | 2022-03-30T13:40:05.000Z | tests/licensedcode/data/datadriven/lic2/2206-misc-python/interval.py | s4-2/scancode-toolkit | 8931b42e2630b94d0cabc834dfb3c16f01f82321 | [
"Apache-2.0",
"CC-BY-4.0"
] | 2,695 | 2015-07-01T16:01:35.000Z | 2022-03-31T19:17:44.000Z | backend/env/lib/python3.7/site-packages/aniso8601/interval.py | US579/Seddit | 116a676efd0fa31c8cc6fe4c723b739203d9428b | [
"MIT"
] | 540 | 2015-07-01T15:08:19.000Z | 2022-03-31T12:13:11.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
from aniso8601.builders import TupleBuilder
from aniso8601.builders.python import PythonTimeBuilder
from aniso8601.date import parse_date
from aniso8601.duration import parse_duration
from aniso8601.exceptions import ISOFormatError
from aniso8601.time import parse_datetime
| 40.41875 | 79 | 0.613422 | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
from aniso8601.builders import TupleBuilder
from aniso8601.builders.python import PythonTimeBuilder
from aniso8601.date import parse_date
from aniso8601.duration import parse_duration
from aniso8601.exceptions import ISOFormatError
from aniso8601.time import parse_datetime
def parse_interval(isointervalstr, intervaldelimiter='/',
datetimedelimiter='T', builder=PythonTimeBuilder):
#Given a string representing an ISO 8601 interval, return an
#interval built by the given builder. Valid formats are:
#
#<start>/<end>
#<start>/<duration>
#<duration>/<end>
#
#The <start> and <end> values can represent dates, or datetimes,
#not times.
#
#The format:
#
#<duration>
#
#Is expressly not supported as there is no way to provide the additional
#required context.
if isointervalstr[0] == 'R':
raise ISOFormatError('ISO 8601 repeating intervals must be parsed '
'with parse_repeating_interval.')
return _parse_interval(isointervalstr, builder,
intervaldelimiter, datetimedelimiter)
def parse_repeating_interval(isointervalstr, intervaldelimiter='/',
datetimedelimiter='T', builder=PythonTimeBuilder):
#Given a string representing an ISO 8601 interval repeating, return an
#interval built by the given builder. Valid formats are:
#
#Rnn/<interval>
#R/<interval>
if isointervalstr[0] != 'R':
raise ISOFormatError('ISO 8601 repeating interval must start '
'with an R.')
#Parse the number of iterations
iterationpart, intervalpart = isointervalstr.split(intervaldelimiter, 1)
if len(iterationpart) > 1:
R = False
Rnn = iterationpart[1:]
else:
R = True
Rnn = None
interval = _parse_interval(intervalpart, TupleBuilder,
intervaldelimiter, datetimedelimiter)
return builder.build_repeating_interval(R=R, Rnn=Rnn, interval=interval)
def _parse_interval(isointervalstr, builder, intervaldelimiter='/',
datetimedelimiter='T'):
#Returns a tuple containing the start of the interval, the end of the
#interval, and or the interval duration
firstpart, secondpart = isointervalstr.split(intervaldelimiter)
if firstpart[0] == 'P':
#<duration>/<end>
#Notice that these are not returned 'in order' (earlier to later), this
#is to maintain consistency with parsing <start>/<end> durations, as
#well as making repeating interval code cleaner. Users who desire
#durations to be in order can use the 'sorted' operator.
#We need to figure out if <end> is a date, or a datetime
if secondpart.find(datetimedelimiter) != -1:
#<end> is a datetime
duration = parse_duration(firstpart, builder=TupleBuilder)
enddatetime = parse_datetime(secondpart,
delimiter=datetimedelimiter,
builder=TupleBuilder)
return builder.build_interval(end=enddatetime,
duration=duration)
#<end> must just be a date
duration = parse_duration(firstpart, builder=TupleBuilder)
enddate = parse_date(secondpart, builder=TupleBuilder)
return builder.build_interval(end=enddate, duration=duration)
elif secondpart[0] == 'P':
#<start>/<duration>
#We need to figure out if <start> is a date, or a datetime
if firstpart.find(datetimedelimiter) != -1:
#<start> is a datetime
duration = parse_duration(secondpart, builder=TupleBuilder)
startdatetime = parse_datetime(firstpart,
delimiter=datetimedelimiter,
builder=TupleBuilder)
return builder.build_interval(start=startdatetime,
duration=duration)
#<start> must just be a date
duration = parse_duration(secondpart, builder=TupleBuilder)
startdate = parse_date(firstpart, builder=TupleBuilder)
return builder.build_interval(start=startdate,
duration=duration)
#<start>/<end>
if (firstpart.find(datetimedelimiter) != -1
and secondpart.find(datetimedelimiter) != -1):
#Both parts are datetimes
start_datetime = parse_datetime(firstpart,
delimiter=datetimedelimiter,
builder=TupleBuilder)
end_datetime = parse_datetime(secondpart,
delimiter=datetimedelimiter,
builder=TupleBuilder)
return builder.build_interval(start=start_datetime,
end=end_datetime)
elif (firstpart.find(datetimedelimiter) != -1
and secondpart.find(datetimedelimiter) == -1):
#First part is a datetime, second part is a date
start_datetime = parse_datetime(firstpart,
delimiter=datetimedelimiter,
builder=TupleBuilder)
end_date = parse_date(secondpart, builder=TupleBuilder)
return builder.build_interval(start=start_datetime,
end=end_date)
elif (firstpart.find(datetimedelimiter) == -1
and secondpart.find(datetimedelimiter) != -1):
#First part is a date, second part is a datetime
start_date = parse_date(firstpart, builder=TupleBuilder)
end_datetime = parse_datetime(secondpart,
delimiter=datetimedelimiter,
builder=TupleBuilder)
return builder.build_interval(start=start_date,
end=end_datetime)
#Both parts are dates
start_date = parse_date(firstpart, builder=TupleBuilder)
end_date = parse_date(secondpart, builder=TupleBuilder)
return builder.build_interval(start=start_date, end=end_date)
| 5,914 | 0 | 69 |
ee4087c26b6609704e5da238bf4a0da044dbb850 | 1,602 | py | Python | run.py | bri25yu/jiant | 88a21a9fed7f5e9d6a42a8129d2c63a2f8e0f30a | [
"MIT"
] | null | null | null | run.py | bri25yu/jiant | 88a21a9fed7f5e9d6a42a8129d2c63a2f8e0f30a | [
"MIT"
] | null | null | null | run.py | bri25yu/jiant | 88a21a9fed7f5e9d6a42a8129d2c63a2f8e0f30a | [
"MIT"
] | null | null | null | import argparse
from jiant.proj.simple import runscript as run
import jiant.scripts.download_data.runscript as downloader
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--task_name")
parser.add_argument("-d", "--data_dir")
parser.add_argument("-e", "--exp_dir")
parser.add_argument("-m", "--model_name_or_path")
parser.add_argument("-c", "--compression_type")
parser.add_argument("-cc", "--compression_config")
parser.add_argument("-lr", "--learning_rate", default=1e-5, type=float)
parser.add_argument("-s", "--seed", default=42, type=int)
args = parser.parse_args()
main(
args.task_name,
args.data_dir,
args.exp_dir,
args.model_name_or_path,
args.compression_type,
args.compression_config,
args.learning_rate,
args.seed,
)
| 25.83871 | 75 | 0.6598 | import argparse
from jiant.proj.simple import runscript as run
import jiant.scripts.download_data.runscript as downloader
def main(
task_name,
data_dir,
exp_dir,
model_name_or_path,
compression_type,
compression_config,
learning_rate,
seed,
):
downloader.download_data([task_name], data_dir)
# Set up the arguments for the Simple API
args = run.RunConfiguration(
run_name="simple",
exp_dir=exp_dir,
data_dir=data_dir,
hf_pretrained_model_name_or_path=model_name_or_path,
compression_type=compression_type,
compression_config=compression_config,
tasks=task_name,
train_batch_size=32,
num_train_epochs=100,
learning_rate=learning_rate,
seed=seed,
do_save_best=True,
)
run.run_simple(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--task_name")
parser.add_argument("-d", "--data_dir")
parser.add_argument("-e", "--exp_dir")
parser.add_argument("-m", "--model_name_or_path")
parser.add_argument("-c", "--compression_type")
parser.add_argument("-cc", "--compression_config")
parser.add_argument("-lr", "--learning_rate", default=1e-5, type=float)
parser.add_argument("-s", "--seed", default=42, type=int)
args = parser.parse_args()
main(
args.task_name,
args.data_dir,
args.exp_dir,
args.model_name_or_path,
args.compression_type,
args.compression_config,
args.learning_rate,
args.seed,
)
| 693 | 0 | 23 |
98670a49ae2d29567afb74eef9cd125362c57e38 | 10,350 | py | Python | main/views.py | sirodoht/oscarator.com | 5c20e38a0db4d8937e849e493c698767ce49d2ba | [
"MIT"
] | 1 | 2022-02-16T21:47:20.000Z | 2022-02-16T21:47:20.000Z | main/views.py | sirodoht/oscarator.com | 5c20e38a0db4d8937e849e493c698767ce49d2ba | [
"MIT"
] | 3 | 2022-02-16T13:40:59.000Z | 2022-02-16T13:42:23.000Z | main/views.py | sirodoht/oscarator.com | 5c20e38a0db4d8937e849e493c698767ce49d2ba | [
"MIT"
] | null | null | null | from django.contrib import messages
from django.contrib.auth import authenticate, login as dj_login, logout as dj_logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import (
AuthenticationForm,
PasswordChangeForm,
PasswordResetForm,
SetPasswordForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import redirect, render
from django.utils.http import urlsafe_base64_decode
from django.views.decorators.http import (
require_http_methods,
require_POST,
require_safe,
)
from main import forms, models
from oscarator import settings
INTERNAL_RESET_URL_TOKEN = "confirmation"
INTERNAL_RESET_SESSION_TOKEN = "_password_reset_token"
@require_safe
@require_safe
@require_POST
@require_safe
@require_POST
@require_http_methods(["HEAD", "GET", "POST"])
@require_http_methods(["HEAD", "GET", "POST"])
@require_http_methods(["HEAD", "GET", "POST"])
@login_required
@require_http_methods(["HEAD", "GET", "POST"])
@login_required
| 35.204082 | 129 | 0.608986 | from django.contrib import messages
from django.contrib.auth import authenticate, login as dj_login, logout as dj_logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import (
AuthenticationForm,
PasswordChangeForm,
PasswordResetForm,
SetPasswordForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import redirect, render
from django.utils.http import urlsafe_base64_decode
from django.views.decorators.http import (
require_http_methods,
require_POST,
require_safe,
)
from main import forms, models
from oscarator import settings
INTERNAL_RESET_URL_TOKEN = "confirmation"
INTERNAL_RESET_SESSION_TOKEN = "_password_reset_token"
@require_safe
def index(request):
if not request.user.is_authenticated:
return redirect("main:enter")
user_vote_count = models.Vote.objects.filter(
user=request.user, entry__year=settings.CURRENT_YEAR
).count()
# build all users' entries dict
all_users_entries = {}
users = User.objects.all().order_by("?")
for u in users:
all_users_entries[u.username] = []
votes = models.Vote.objects.filter(entry__year=settings.CURRENT_YEAR)
for v in votes:
all_users_entries[v.user.username].append(v.entry)
# calculate all users successful predictions
user_wins = []
if not settings.VOTING_ENABLED:
user_wins_dict = {}
for u in users:
user_wins_dict[u.username] = 0
for u in users:
votes = models.Vote.objects.filter(
user=u, entry__year=settings.CURRENT_YEAR
)
for v in votes:
if v.entry.is_winner:
user_wins_dict[u.username] += 1
values = user_wins_dict.values()
lim = len(values)
values_de = list(values)
for i in range(lim):
for k, v in user_wins_dict.items():
# find max
max_value = max(values_de)
if max_value == v:
user_wins.append({k: v})
values_de[values_de.index(max_value)] = 0
for i in user_wins:
key = list(i.keys())[0]
if i[key] == 0:
del i[key]
return render(
request,
"main/index.html",
{
"users": users,
"user_wins": user_wins,
"all_users_entries": all_users_entries,
"user_vote_count": user_vote_count,
"current_year": settings.CURRENT_YEAR,
"voting_enabled": settings.VOTING_ENABLED,
},
)
@require_safe
def enter(request):
if request.user.is_authenticated:
return redirect("main:index")
next_url = request.GET.get("next")
return render(
request,
"main/enter.html",
{"login_form": AuthenticationForm(), "next": str(next_url or "")},
)
@require_POST
def login(request):
form = forms.LoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data.get("username")
if not User.objects.filter(username=username).exists():
messages.error(request, "Username does not exist.")
return redirect("main:enter")
password = form.cleaned_data.get("password")
user = authenticate(request, username=username, password=password)
if user is not None:
dj_login(request, user)
next_url = request.POST.get("next")
if next_url:
return redirect(next_url)
else:
return redirect("main:index")
else:
messages.error(request, "Invalid password.")
return redirect("main:enter")
return render(request, "main/enter.html")
@require_safe
def logout(request):
if not request.user.is_authenticated:
return redirect("main:enter")
dj_logout(request)
messages.info(request, "You have been logged out.")
return redirect(settings.LOGOUT_REDIRECT_URL)
@require_POST
def join(request):
form = forms.JoinForm(request.POST)
if form.is_valid():
username = form.cleaned_data.get("username")
password = form.cleaned_data.get("password")
email = form.cleaned_data.get("email")
if (
User.objects.filter(username=username).exists()
and User.objects.filter(email=email).exists()
):
messages.error(
request,
"Both the username and the email are registered. <a href='/reset-password'>Reset password</a>?",
)
return redirect("main:enter")
if User.objects.filter(username=username).exists():
messages.error(request, "Username exists. Please try another.")
return redirect("main:enter")
if len(email) > 0 and User.objects.filter(email=email).exists():
messages.error(
request,
"This email is connected to an existing account. <a href='/reset-password'>Reset password</a>?",
)
return redirect("main:enter")
user = User(username=username, email=email)
user.set_password(password)
user.save()
user = authenticate(request, username=username, password=password)
dj_login(request, user)
messages.success(request, "Welcome to Oscarator!")
return redirect("main:index")
else:
messages.error(request, "Invalid submission. Please try again.")
return redirect("main:enter")
return render(request, "main/enter.html")
@require_http_methods(["HEAD", "GET", "POST"])
def forgot(request):
if request.method == "POST":
form = PasswordResetForm(request.POST)
if form.is_valid():
form.save(
from_email=settings.DEFAULT_FROM_EMAIL,
request=request,
email_template_name="main/password_reset_email.txt",
)
messages.success(request, "Password reset email sent!")
else:
form = PasswordResetForm()
return render(request, "main/forgot.html", {"form": form})
@require_http_methods(["HEAD", "GET", "POST"])
def forgot_confirm(request, uidb64, token):
if request.method == "POST":
uid = urlsafe_base64_decode(uidb64).decode()
user = User.objects.get(pk=uid)
form = SetPasswordForm(user, request.POST)
if form.is_valid():
form.save()
messages.success(request, "Your password has changed.")
return redirect("main:index")
else:
messages.error(
request,
"Please <span onclick='history.back(-1)' style='cursor: pointer; text-decoration: underline;'>try again</span>.",
)
return render(request, "main/forgot_confirm.html", {"form": form})
else:
uid = urlsafe_base64_decode(uidb64).decode()
user = User.objects.get(pk=uid)
validlink = False
if user is not None:
if token == INTERNAL_RESET_URL_TOKEN:
session_token = request.session.get(INTERNAL_RESET_SESSION_TOKEN)
if default_token_generator.check_token(user, session_token):
# If the token is valid, display the password reset form.
validlink = True
else:
if default_token_generator.check_token(user, token):
# Store the token in the session and redirect to the
# password reset form at a URL without the token. That
# avoids the possibility of leaking the token in the
# HTTP Referer header.
request.session[INTERNAL_RESET_SESSION_TOKEN] = token
redirect_url = request.path.replace(token, INTERNAL_RESET_URL_TOKEN)
return HttpResponseRedirect(redirect_url)
form = SetPasswordForm(user)
return render(
request, "main/forgot_confirm.html", {"form": form, "validlink": validlink}
)
@require_http_methods(["HEAD", "GET", "POST"])
@login_required
def user(request, username):
if request.method == "POST":
form = forms.VoteForm(request.POST)
if form.is_valid():
entry_id = form.cleaned_data.get("entry")
entry = models.Entry.objects.get(id=entry_id)
if settings.VOTING_ENABLED:
models.Vote.objects.filter(
entry__category=entry.category, user=request.user
).delete()
models.Vote.objects.create(user=request.user, entry=entry)
return JsonResponse(status=200, data={})
else:
form = forms.VoteForm()
# build this user's votes dict
categories = models.Category.objects.all()
entries = models.Entry.objects.filter(year=settings.CURRENT_YEAR)
user = User.objects.get(username=username)
user_votes = {}
for c in categories:
user_votes[c.name] = []
for e in entries:
if e.year == settings.CURRENT_YEAR:
user_votes[e.category.name].append(e)
# calculate user successful predictions
user_wins = 0
# for c in categories:
# for e in c.entry_set.all():
# for v in e.vote_set.all():
# if v.user == user and v.entry == e and v.entry.is_winner:
# user_wins += 1
return render(
request,
"main/user.html",
{
"form": form,
"categories": categories,
"user": user,
"user_wins": user_wins,
"user_votes": user_votes,
},
)
@require_http_methods(["HEAD", "GET", "POST"])
@login_required
def preferences(request):
if request.method == "POST":
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
form.save()
messages.success(request, "Your password was successfully updated!")
return redirect("main:index")
else:
messages.error(request, "Please correct the error below.")
else:
form = PasswordChangeForm(request.user)
return render(request, "main/settings.html", {"form": form})
| 8,999 | 0 | 198 |
101fcee678ce67d5dc1b945bc5521cc6e6232b86 | 98 | py | Python | clitooltester/__init__.py | dfirlabs/clitooltester | ffe23b7b7458212d150390f476cda74e89fc97e1 | [
"Apache-2.0"
] | null | null | null | clitooltester/__init__.py | dfirlabs/clitooltester | ffe23b7b7458212d150390f476cda74e89fc97e1 | [
"Apache-2.0"
] | null | null | null | clitooltester/__init__.py | dfirlabs/clitooltester | ffe23b7b7458212d150390f476cda74e89fc97e1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Command line tool tester (CLIToolTester)."""
__version__ = '20191217'
| 19.6 | 47 | 0.642857 | # -*- coding: utf-8 -*-
"""Command line tool tester (CLIToolTester)."""
__version__ = '20191217'
| 0 | 0 | 0 |
5c1f339722fbcdb5287bb5f5decebeea9ca758b7 | 331 | py | Python | users/migrations/0004_remove_profile_user.py | piotrzegarek/Numerical-computational-Site | 3c18ca1086135eddb23dfbe015882708e1e0c321 | [
"MIT"
] | 1 | 2021-12-03T15:51:49.000Z | 2021-12-03T15:51:49.000Z | users/migrations/0004_remove_profile_user.py | piotrzegarek/Numerical-computational-Site | 3c18ca1086135eddb23dfbe015882708e1e0c321 | [
"MIT"
] | null | null | null | users/migrations/0004_remove_profile_user.py | piotrzegarek/Numerical-computational-Site | 3c18ca1086135eddb23dfbe015882708e1e0c321 | [
"MIT"
] | 1 | 2021-12-03T15:52:37.000Z | 2021-12-03T15:52:37.000Z | # Generated by Django 3.2.9 on 2021-12-20 20:36
from django.db import migrations
| 18.388889 | 51 | 0.595166 | # Generated by Django 3.2.9 on 2021-12-20 20:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0003_delete_user_profile_user'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='user',
),
]
| 0 | 225 | 23 |
6dfdc54ff94e2ff04365c1be87003f5d42e21011 | 6,657 | py | Python | dci_downloader/api.py | redhat-cip/dci-downloader | d7ad184f502e145b9ccf2618b8af6d82ac7daa8a | [
"Apache-2.0"
] | null | null | null | dci_downloader/api.py | redhat-cip/dci-downloader | d7ad184f502e145b9ccf2618b8af6d82ac7daa8a | [
"Apache-2.0"
] | 3 | 2019-10-30T20:39:06.000Z | 2021-03-31T07:55:00.000Z | dci_downloader/api.py | redhat-cip/dci-downloader | d7ad184f502e145b9ccf2618b8af6d82ac7daa8a | [
"Apache-2.0"
] | 2 | 2020-01-12T05:27:00.000Z | 2020-06-19T02:39:24.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import requests
import sys
import time
from functools import wraps
from multiprocessing import Pool
from dci_downloader.fs import create_parent_dir
from dciclient.v1.api.context import build_signature_context
from dciclient.v1.api import component as dci_component
from dciclient.v1.api import topic as dci_topic
from dciclient.v1.api import remoteci as dci_remoteci
FIVE_SECONDS = 5
TEN_SECONDS = 10
# We'll allow 5 seconds to connect & 10 seconds to get an answer
REQUESTS_TIMEOUT = (FIVE_SECONDS, TEN_SECONDS)
@retry()
@retry()
@retry()
| 29.852018 | 87 | 0.633018 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import requests
import sys
import time
from functools import wraps
from multiprocessing import Pool
from dci_downloader.fs import create_parent_dir
from dciclient.v1.api.context import build_signature_context
from dciclient.v1.api import component as dci_component
from dciclient.v1.api import topic as dci_topic
from dciclient.v1.api import remoteci as dci_remoteci
FIVE_SECONDS = 5
TEN_SECONDS = 10
# We'll allow 5 seconds to connect & 10 seconds to get an answer
REQUESTS_TIMEOUT = (FIVE_SECONDS, TEN_SECONDS)
def check_repo_is_accessible():
try:
requests.get(
"https://repo.distributed-ci.io/",
timeout=REQUESTS_TIMEOUT,
)
except requests.exceptions.Timeout:
print("Timeout. dci-downloader cannot access repo.distributed-ci.io server.")
if os.getenv("HTTP_PROXY") or os.getenv("HTTPS_PROXY"):
print("You configured a proxy. Check your proxy information.")
print("Exiting...")
sys.exit(1)
def get_topic(topic_name):
context = build_signature_context()
t = dci_topic.list(context, where="name:%s" % topic_name)
t.raise_for_status()
topics = t.json()["topics"]
if len(topics) == 0:
print("Ensure you have access to topic %s" % topic_name)
print("Contact your EPM for more information.")
return
return topics[0]
def get_topic_by_id(topic_id):
context = build_signature_context()
c = dci_topic.get(context, topic_id)
topic = c.json()["topic"]
if len(topic) == 0:
print("Ensure that topic %s exists or that you have access" % topic_id)
print("Contact your EPM for more information.")
return
return topic
def get_component_by_id(component_id):
context = build_signature_context()
c = dci_component.get(context, component_id)
component = c.json()["component"]
if len(component) == 0:
print("Ensure that component %s exists or that you have access" % component_id)
print("Contact your EPM for more information.")
return
return component
def get_components_per_topic(
topic_id, sort="-created_at", limit=100, offset=0, where=""
):
response = dci_topic.list_components(
context=build_signature_context(),
id=topic_id,
sort=sort,
limit=limit,
offset=offset,
where=where,
).json()
return response["components"]
def get_components(topic, filters=[]):
returned_components = []
tag_per_type = {filter["type"]: filter["tag"] for filter in filters}
for component_type in topic["component_types"]:
component_type = component_type.lower()
where = "type:%s,state:active" % component_type
if component_type in tag_per_type:
where += ",tags:%s" % tag_per_type[component_type]
components = get_components_per_topic(
topic_id=topic["id"],
sort="-created_at",
limit=1,
offset=0,
where=where,
)
returned_components.extend(components)
return returned_components
def get_keys(remoteci_id):
context = build_signature_context()
remoteci = dci_remoteci.get(context, remoteci_id).json()["remoteci"]
res = dci_remoteci.refresh_keys(context, id=remoteci_id, etag=remoteci["etag"])
if res.status_code == 201:
return res.json()["keys"]
def cert_is_valid(cert_file):
try:
context = build_signature_context()
with open(cert_file, "r") as f:
cert = f.read()
uri = "%s/certs/check" % context.dci_cs_api
r = context.session.post(uri, json={"cert": cert})
return r.status_code == 204
except Exception:
return False
def get_base_url(topic, component):
return "https://repo.distributed-ci.io/%s/%s/%s" % (
topic["product_id"],
topic["id"],
component["id"],
)
def retry(tries=3, delay=2, multiplier=2):
def decorated_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
_tries = tries
_delay = delay
while _tries:
try:
return f(*args, **kwargs)
except KeyboardInterrupt:
raise
except Exception as e:
print("%s, retrying in %d seconds..." % (str(e), _delay))
time.sleep(_delay)
_tries -= 1
if not _tries:
raise
_delay *= multiplier
return f(*args, **kwargs)
return f_retry
return decorated_retry
@retry()
def get_files_list(base_url, settings):
print("Download DCI file list, it may take a few seconds")
files_list_url = "%s/dci_files_list.json" % base_url
key = settings["dci_key_file"]
cert = settings["dci_cert_file"]
r = requests.get(files_list_url, cert=(cert, key), timeout=REQUESTS_TIMEOUT)
r.raise_for_status()
return r.json()
@retry()
def get_container_images_list(base_url, settings):
containers_list_url = "%s/images_list.yaml" % base_url
key = settings["dci_key_file"]
cert = settings["dci_cert_file"]
r = requests.get(containers_list_url, cert=(cert, key), timeout=REQUESTS_TIMEOUT)
r.raise_for_status()
return r.content
@retry()
def download_file(file, cert, key, file_index, nb_files):
destination = file["destination"]
print("(%d/%d): %s" % (file_index, nb_files, destination))
create_parent_dir(destination)
r = requests.get(
file["source"], stream=True, cert=(cert, key), timeout=REQUESTS_TIMEOUT
)
r.raise_for_status()
with open(destination, "wb") as f:
for chunk in r.iter_content(chunk_size=512 * 1024):
f.write(chunk)
return file
def download_file_unpack(args):
try:
return download_file(*args)
except KeyboardInterrupt:
raise RuntimeError("KeyboardInterrupt")
def download_files(files, settings):
nb_files = len(files)
cert = settings["dci_cert_file"]
key = settings["dci_key_file"]
enhanced_files = [[f, cert, key, i + 1, nb_files] for i, f in enumerate(files)]
executor = Pool(processes=4)
error = None
try:
executor.map(download_file_unpack, enhanced_files, chunksize=1)
executor.close()
except KeyboardInterrupt as e:
executor.terminate()
error = e
except Exception as e:
executor.terminate()
error = e
finally:
executor.join()
del executor
if error is not None:
raise error
| 5,698 | 0 | 342 |
d5ff2fd7abf3a164f7893984f9e9753ff69d86e5 | 56 | py | Python | apps/categories/__init__.py | ExpoAshique/ProveBanking__s | f0b45fffea74d00d14014be27aa50fe5f42f6903 | [
"MIT"
] | null | null | null | apps/categories/__init__.py | ExpoAshique/ProveBanking__s | f0b45fffea74d00d14014be27aa50fe5f42f6903 | [
"MIT"
] | null | null | null | apps/categories/__init__.py | ExpoAshique/ProveBanking__s | f0b45fffea74d00d14014be27aa50fe5f42f6903 | [
"MIT"
] | null | null | null | default_app_config = 'categories.apps.CategoriesConfig'
| 28 | 55 | 0.857143 | default_app_config = 'categories.apps.CategoriesConfig'
| 0 | 0 | 0 |
bfc96a64d5994b6250f5d552e7ec22eab2022391 | 108 | py | Python | rbcz/__init__.py | smcl/rbcz.py | 2bcdea1e12d3800d4b9b3e1d3523dec43bf3026b | [
"MIT"
] | null | null | null | rbcz/__init__.py | smcl/rbcz.py | 2bcdea1e12d3800d4b9b3e1d3523dec43bf3026b | [
"MIT"
] | null | null | null | rbcz/__init__.py | smcl/rbcz.py | 2bcdea1e12d3800d4b9b3e1d3523dec43bf3026b | [
"MIT"
] | 2 | 2016-10-11T16:04:15.000Z | 2022-03-02T10:26:42.000Z | # flake8: noqa
from .rbcz import (
read_statement,
read_statements,
read_statements_from_imap
)
| 15.428571 | 29 | 0.722222 | # flake8: noqa
from .rbcz import (
read_statement,
read_statements,
read_statements_from_imap
)
| 0 | 0 | 0 |
01d9136911dbdb303db7d50afa63b120f335fc5a | 405 | py | Python | 1017/main.py | pauvrepetit/leetcode | 6ad093cf543addc4dfa52d72a8e3c0d05a23b771 | [
"MIT"
] | null | null | null | 1017/main.py | pauvrepetit/leetcode | 6ad093cf543addc4dfa52d72a8e3c0d05a23b771 | [
"MIT"
] | null | null | null | 1017/main.py | pauvrepetit/leetcode | 6ad093cf543addc4dfa52d72a8e3c0d05a23b771 | [
"MIT"
] | null | null | null | # 1017. 负二进制转换
#
# 20200801
# huao
# 观察奇数位上的1,如果该位置为1,那么使用负二进制表示时,会比实际二进制时少2**(i+1)
# 把这个差值加进去,并进行处理加完以后的值
# 处理完以后,得到的数字的二进制表示就是原数的负二进制表示
sol = Solution()
print(sol.baseNeg2(4))
| 19.285714 | 50 | 0.511111 | # 1017. 负二进制转换
#
# 20200801
# huao
# 观察奇数位上的1,如果该位置为1,那么使用负二进制表示时,会比实际二进制时少2**(i+1)
# 把这个差值加进去,并进行处理加完以后的值
# 处理完以后,得到的数字的二进制表示就是原数的负二进制表示
class Solution:
def baseNeg2(self, N: int) -> str:
i = 0
while N >= (1 << i):
if i % 2 == 1 and (N & (1 << i) != 0):
N += (2 << i)
i += 1
return bin(N)[2:]
sol = Solution()
print(sol.baseNeg2(4))
| 182 | -6 | 49 |
4006b5f5866aa4c23e9a2b16b930191035e700e8 | 287 | py | Python | back/utils/types/chat.py | azakharau/chatify | 8e85285ecbac8e1dac5b14af7b2b591ba3ccc1c2 | [
"MIT"
] | null | null | null | back/utils/types/chat.py | azakharau/chatify | 8e85285ecbac8e1dac5b14af7b2b591ba3ccc1c2 | [
"MIT"
] | null | null | null | back/utils/types/chat.py | azakharau/chatify | 8e85285ecbac8e1dac5b14af7b2b591ba3ccc1c2 | [
"MIT"
] | null | null | null | import typing
from dataclasses import dataclass
from utils.mixins import DataMixin
@dataclass()
| 22.076923 | 43 | 0.738676 | import typing
from dataclasses import dataclass
from utils.mixins import DataMixin
@dataclass()
class Chat(DataMixin):
id: typing.Optional[int] = None
username: typing.Optional[str] = None
first_name: typing.Optional[str] = None
last_name: typing.Optional[str] = None
| 0 | 166 | 22 |
ae37de956d3521db164fb139cc78bd28636dc393 | 4,194 | py | Python | fmfexporter/adapters/polarion/connectors/jira/fmf_jira.py | rh-messaging-qe/fmfexporter | a2db70bf5d3e89d418063b1890924e6ec7cbeed6 | [
"Apache-2.0"
] | 4 | 2019-02-15T11:44:01.000Z | 2021-08-20T16:46:30.000Z | fmfexporter/adapters/polarion/connectors/jira/fmf_jira.py | rh-messaging-qe/fmfexporter | a2db70bf5d3e89d418063b1890924e6ec7cbeed6 | [
"Apache-2.0"
] | 14 | 2019-02-14T18:51:04.000Z | 2020-01-16T14:12:45.000Z | fmfexporter/adapters/polarion/connectors/jira/fmf_jira.py | rh-messaging-qe/fmfexporter | a2db70bf5d3e89d418063b1890924e6ec7cbeed6 | [
"Apache-2.0"
] | 2 | 2019-02-25T10:06:54.000Z | 2019-05-06T20:26:16.000Z | import configparser
import jira
class JiraConfig(object):
"""
PolarionConfig represents data that must be provided through
config (ini) file (to enable communication with the polarion importer APIs)
"""
KEY_SECTION = 'jira'
KEY_PROJECT = 'project'
KEY_URL = 'url'
KEY_USERNAME = 'username'
KEY_PASSWORD = 'password'
KEY_TC_WI = "testcase_work_item"
KEY_QE_TC = "qe_test_coverage"
KEY_VER_IR = "verified_in_release"
@property
def project(self) -> str:
"""
Returns the parsed jira project name
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_PROJECT]
@property
def url(self) -> str:
"""
Returns the parsed jira project url
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_URL]
@property
def username(self) -> str:
"""
Returns the parsed jira username
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_USERNAME]
@property
def password(self) -> str:
"""
Returns the parsed jira password
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_PASSWORD]
@property
def test_case_work_item_custom_field(self) -> str:
"""
Returns the parsed jira custom field for test case work item
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_TC_WI]
@property
def qe_test_coverage_custom_field(self) -> str:
"""
Returns the parsed jira custom field for qe test coverage
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_QE_TC]
@property
def verified_release_custom_field(self) -> str:
"""
Returns the parsed jira custom field for verified in release
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_VER_IR] or None
| 34.377049 | 120 | 0.584883 | import configparser
import jira
class JiraConfig(object):
"""
PolarionConfig represents data that must be provided through
config (ini) file (to enable communication with the polarion importer APIs)
"""
KEY_SECTION = 'jira'
KEY_PROJECT = 'project'
KEY_URL = 'url'
KEY_USERNAME = 'username'
KEY_PASSWORD = 'password'
KEY_TC_WI = "testcase_work_item"
KEY_QE_TC = "qe_test_coverage"
KEY_VER_IR = "verified_in_release"
def __init__(self, config_file):
self.config = configparser.ConfigParser()
self.config.read(config_file)
assert JiraConfig.KEY_SECTION in self.config.sections()
@property
def project(self) -> str:
"""
Returns the parsed jira project name
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_PROJECT]
@property
def url(self) -> str:
"""
Returns the parsed jira project url
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_URL]
@property
def username(self) -> str:
"""
Returns the parsed jira username
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_USERNAME]
@property
def password(self) -> str:
"""
Returns the parsed jira password
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_PASSWORD]
@property
def test_case_work_item_custom_field(self) -> str:
"""
Returns the parsed jira custom field for test case work item
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_TC_WI]
@property
def qe_test_coverage_custom_field(self) -> str:
"""
Returns the parsed jira custom field for qe test coverage
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_QE_TC]
@property
def verified_release_custom_field(self) -> str:
"""
Returns the parsed jira custom field for verified in release
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_VER_IR] or None
class FMFJiraPopulator(object):
TEST_WI = 'test-work-item'
QE_TEST_COV = 'qe-test-coverage'
VERIFIED_IN_REL = 'verified-in-release'
def __init__(self, config_file):
self.config = JiraConfig(config_file)
credentials = (self.config.username, self.config.password)
self.jira_login = jira.JIRA(self.config.url,
basic_auth=credentials)
def populate_testcases(self, tc_list: list):
tc_list_len = len(tc_list)
tc_counter = 1
for tc in tc_list: # type: PolarionTestCase
list_tcwi = []
for defect in tc.defects:
if defect.jira != "":
if "http" in defect['jira']:
defect_key = defect['jira'][defect['jira'].rfind("/") + 1:]
else:
defect_key = defect['jira']
print("Populating %s test case %s of %s (%s)" % (self.config.url + "/browse/" + defect_key,
tc_counter, tc_list_len, tc.id))
issue = self.jira_login.issue(defect_key)
list_tcwi = issue.raw.get("fields").get(self.config.test_case_work_item_custom_field)
if list_tcwi is None:
list_tcwi = [tc.test_case_work_item_url]
else:
list_tcwi.append(tc.test_case_work_item_url)
updated_fields = {
self.config.test_case_work_item_custom_field: ",".join(list_tcwi),
self.config.qe_test_coverage_custom_field: {"value": "+"},
}
if self.config.verified_release_custom_field:
updated_fields[self.config.verified_release_custom_field] = [{"value": "Verified in a release"}]
issue.update(fields=updated_fields)
tc_counter += 1
| 1,934 | 176 | 50 |
77527d7a3513596852d1eb0ff8d8935a98e0cc26 | 67 | py | Python | Lib/ensurepip/__main__.py | Hadron/python | 73137f499ed658169f49273eee46845e3b53e800 | [
"PSF-2.0"
] | 2,557 | 2016-07-19T22:20:45.000Z | 2022-01-25T10:53:35.000Z | build/cmd/win32/Python27/Lib/ensurepip/__main__.py | IamBaoMouMou/AliOS-Things | 195a9160b871b3d78de6f8cf6c2ab09a71977527 | [
"Apache-2.0"
] | 1,360 | 2016-07-20T02:06:42.000Z | 2021-07-27T12:46:40.000Z | build/cmd/win32/Python27/Lib/ensurepip/__main__.py | IamBaoMouMou/AliOS-Things | 195a9160b871b3d78de6f8cf6c2ab09a71977527 | [
"Apache-2.0"
] | 607 | 2016-07-20T03:34:04.000Z | 2022-01-05T14:57:09.000Z | import ensurepip
if __name__ == "__main__":
ensurepip._main()
| 13.4 | 26 | 0.701493 | import ensurepip
if __name__ == "__main__":
ensurepip._main()
| 0 | 0 | 0 |
a84cd8dc54134f048fe048f26a419cfc349e13ac | 1,742 | py | Python | configuration.py | sayatul/trytond-sale-payment-gateway | 7ddcc77d946f4e3a71944477c3e83d7d06866cf7 | [
"BSD-3-Clause"
] | null | null | null | configuration.py | sayatul/trytond-sale-payment-gateway | 7ddcc77d946f4e3a71944477c3e83d7d06866cf7 | [
"BSD-3-Clause"
] | 6 | 2015-09-05T00:18:30.000Z | 2016-11-14T07:57:14.000Z | configuration.py | sayatul/trytond-sale-payment-gateway | 7ddcc77d946f4e3a71944477c3e83d7d06866cf7 | [
"BSD-3-Clause"
] | 15 | 2015-08-11T13:01:31.000Z | 2017-07-14T06:36:21.000Z | # -*- coding: utf-8 -*-
from trytond.model import fields
from trytond.pool import PoolMeta
__metaclass__ = PoolMeta
__all__ = ['SaleConfiguration']
| 27.650794 | 69 | 0.633754 | # -*- coding: utf-8 -*-
from trytond.model import fields
from trytond.pool import PoolMeta
__metaclass__ = PoolMeta
__all__ = ['SaleConfiguration']
class SaleConfiguration:
__name__ = "sale.configuration"
payment_authorize_on = fields.Selection(
"get_authorize_options", "Authorize payments", required=True,
)
payment_capture_on = fields.Selection(
"get_capture_options", "Capture payments", required=True,
)
@classmethod
def __setup__(cls):
super(SaleConfiguration, cls).__setup__()
cls._error_messages.update({
"auth_before_capture":
"Payment authorization must happen before capture"
})
@classmethod
def validate(cls, records):
super(SaleConfiguration, cls).validate(records)
for record in records:
record.validate_payment_combination()
def validate_payment_combination(self):
if self.payment_authorize_on == 'sale_process' and \
self.payment_capture_on == 'sale_confirm':
self.raise_user_error("auth_before_capture")
@staticmethod
def default_payment_authorize_on():
return "sale_confirm"
@staticmethod
def default_payment_capture_on():
return "sale_process"
@classmethod
def get_authorize_options(cls):
return [
("manual", "manually"),
("sale_confirm", "when order is confirmed."),
("sale_process", "when order is processed."),
]
@classmethod
def get_capture_options(cls):
return [
("manual", "manually"),
("sale_confirm", "when order is confirmed."),
("sale_process", "when order is processed."),
]
| 1,000 | 569 | 23 |
c8d43407f8c0512e2138628dadf82f2039fd8241 | 3,383 | py | Python | polimorfo/utils/maskutils.py | ciuffredaluca/polimorfo | 3a18b9fe95c5843c2f9528812109a40412db6ba9 | [
"Apache-2.0"
] | 3 | 2020-02-03T09:29:55.000Z | 2020-10-29T16:38:01.000Z | polimorfo/utils/maskutils.py | ciuffredaluca/polimorfo | 3a18b9fe95c5843c2f9528812109a40412db6ba9 | [
"Apache-2.0"
] | 164 | 2020-02-06T16:09:34.000Z | 2022-03-01T14:44:09.000Z | polimorfo/utils/maskutils.py | ciuffredaluca/polimorfo | 3a18b9fe95c5843c2f9528812109a40412db6ba9 | [
"Apache-2.0"
] | 5 | 2020-02-19T15:04:44.000Z | 2021-11-17T15:31:48.000Z | import cv2
import numpy as np
import pycocotools.mask as mask_util
from matplotlib.pyplot import contour
__all__ = [
"mask_to_polygon",
"polygons_to_mask",
"area",
"bbox",
"coco_poygons_to_mask",
]
def mask_to_polygon(
mask, min_score: float = 0.5, approx: float = 0.0, relative: bool = True
):
"""generate polygons from masks
Args:
mask (np.ndarray): a binary mask
min_score (float, optional): [description]. Defaults to 0.5.
approx (float, optional): it approximate the polygons to reduce the number of points. Defaults to 0.0
relative (bool, optional): it the value of the approximation is computed on the relative amount of point or with respect to all the points
Returns:
[type]: [description]
"""
mask = (mask > min_score).astype(np.uint8)
mask = cv2.copyMakeBorder(mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0)
contours, hierarchy = cv2.findContours(
mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE, offset=(-1, -1)
)
polygons = []
for cnt in contours:
if approx > 0:
if relative:
epsilon = approx * cv2.arcLength(cnt, True)
else:
epsilon = approx
approx_poly = cv2.approxPolyDP(cnt, epsilon, True)
else:
approx_poly = cnt
# we need to draw a least a box
if len(approx_poly) >= 4:
approx_flattened = approx_poly.flatten().tolist()
polygons.append(approx_flattened)
return polygons
def polygons_to_mask(polygons, height, width):
"""convert polygons to mask. Filter all the polygons with less than 4 points
Args:
polygons ([type]): [description]
height ([type]): [description]
width ([type]): [description]
Returns:
[type]: a mask of format num_classes, heigth, width
"""
polygons = [polygon for polygon in polygons if len(polygon) >= 8]
if len(polygons) == 0:
return np.zeros((height, width), np.uint8)
rle = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rle)
return mask_util.decode(rle)[:, :]
def bbox_from_mask(mask):
"""return the bounding box from the given mask
Args:
mask ([type]): [description]
Returns:
List: a list of format [x_min, y_min, w, h]
"""
pairs = np.argwhere(mask == True)
if len(pairs) == 0:
return None, None, None, None
min_row = min(pairs[:, 0])
max_row = max(pairs[:, 0])
min_col = min(pairs[:, 1])
max_col = max(pairs[:, 1])
w = max_col - min_col
h = max_row - min_row
return [float(min_col), float(min_row), float(w), float(h)]
| 27.958678 | 146 | 0.618386 | import cv2
import numpy as np
import pycocotools.mask as mask_util
from matplotlib.pyplot import contour
__all__ = [
"mask_to_polygon",
"polygons_to_mask",
"area",
"bbox",
"coco_poygons_to_mask",
]
def mask_to_polygon(
mask, min_score: float = 0.5, approx: float = 0.0, relative: bool = True
):
"""generate polygons from masks
Args:
mask (np.ndarray): a binary mask
min_score (float, optional): [description]. Defaults to 0.5.
approx (float, optional): it approximate the polygons to reduce the number of points. Defaults to 0.0
relative (bool, optional): it the value of the approximation is computed on the relative amount of point or with respect to all the points
Returns:
[type]: [description]
"""
mask = (mask > min_score).astype(np.uint8)
mask = cv2.copyMakeBorder(mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0)
contours, hierarchy = cv2.findContours(
mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE, offset=(-1, -1)
)
polygons = []
for cnt in contours:
if approx > 0:
if relative:
epsilon = approx * cv2.arcLength(cnt, True)
else:
epsilon = approx
approx_poly = cv2.approxPolyDP(cnt, epsilon, True)
else:
approx_poly = cnt
# we need to draw a least a box
if len(approx_poly) >= 4:
approx_flattened = approx_poly.flatten().tolist()
polygons.append(approx_flattened)
return polygons
def polygons_to_mask(polygons, height, width):
"""convert polygons to mask. Filter all the polygons with less than 4 points
Args:
polygons ([type]): [description]
height ([type]): [description]
width ([type]): [description]
Returns:
[type]: a mask of format num_classes, heigth, width
"""
polygons = [polygon for polygon in polygons if len(polygon) >= 8]
if len(polygons) == 0:
return np.zeros((height, width), np.uint8)
rle = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rle)
return mask_util.decode(rle)[:, :]
def area(mask, min_score=0.5):
mask = (mask > min_score).astype(np.uint8)
return int(mask.sum())
def bbox(
polygons,
height,
width,
):
p = mask_util.frPyObjects(polygons, height, width)
p = mask_util.merge(p)
bbox_xywh = mask_util.toBbox(p)
return bbox_xywh
def bbox_from_mask(mask):
"""return the bounding box from the given mask
Args:
mask ([type]): [description]
Returns:
List: a list of format [x_min, y_min, w, h]
"""
pairs = np.argwhere(mask == True)
if len(pairs) == 0:
return None, None, None, None
min_row = min(pairs[:, 0])
max_row = max(pairs[:, 0])
min_col = min(pairs[:, 1])
max_col = max(pairs[:, 1])
w = max_col - min_col
h = max_row - min_row
return [float(min_col), float(min_row), float(w), float(h)]
def coco_poygons_to_mask(segmentations, height, width) -> np.ndarray:
masks = []
for polygons in segmentations:
mask = polygons_to_mask(polygons, height, width)
# mask = np.any(mask, axis=2)
masks.append(mask)
if masks:
masks = np.stack(masks, axis=0)
else:
masks = np.zeros((height, width), dtype=np.uint8)
return masks
| 610 | 0 | 69 |
14fb4704b2aa70285eb22f68b4c42da396f281c5 | 340 | py | Python | configs/local_variables.py | AhmedStohy/EagerMOT | 59569a5f8ab350c82ac4b45216571c780a444fc1 | [
"MIT"
] | null | null | null | configs/local_variables.py | AhmedStohy/EagerMOT | 59569a5f8ab350c82ac4b45216571c780a444fc1 | [
"MIT"
] | null | null | null | configs/local_variables.py | AhmedStohy/EagerMOT | 59569a5f8ab350c82ac4b45216571c780a444fc1 | [
"MIT"
] | null | null | null | MOUNT_PATH = "" # in case you are mounting data storage externally
SPLIT = 'training'
KITTI_WORK_DIR = "./"
KITTI_DATA_DIR = "../input/kitti-3d-object-detection-dataset"
NUSCENES_WORK_DIR = MOUNT_PATH + "/storage/slurm/kimal/eagermot_workspace/nuscenes"
NUSCENES_DATA_DIR = MOUNT_PATH + "/storage/slurm/kimal/datasets_original/nuscenes"
| 37.777778 | 83 | 0.779412 | MOUNT_PATH = "" # in case you are mounting data storage externally
SPLIT = 'training'
KITTI_WORK_DIR = "./"
KITTI_DATA_DIR = "../input/kitti-3d-object-detection-dataset"
NUSCENES_WORK_DIR = MOUNT_PATH + "/storage/slurm/kimal/eagermot_workspace/nuscenes"
NUSCENES_DATA_DIR = MOUNT_PATH + "/storage/slurm/kimal/datasets_original/nuscenes"
| 0 | 0 | 0 |
86159c4cd3091b817a2ba5bb813ab8ddb70374f0 | 12,566 | py | Python | src/prism-fruit/Games-DQL/examples/games/car/networkx/algorithms/isomorphism/matchhelpers.py | kushgrover/apt-vs-dift | 250f64e6c442f6018cab65ec6979d9568a842f57 | [
"MIT"
] | null | null | null | src/prism-fruit/Games-DQL/examples/games/car/networkx/algorithms/isomorphism/matchhelpers.py | kushgrover/apt-vs-dift | 250f64e6c442f6018cab65ec6979d9568a842f57 | [
"MIT"
] | null | null | null | src/prism-fruit/Games-DQL/examples/games/car/networkx/algorithms/isomorphism/matchhelpers.py | kushgrover/apt-vs-dift | 250f64e6c442f6018cab65ec6979d9568a842f57 | [
"MIT"
] | null | null | null | """Functions which help end users define customize node_match and
edge_match functions to use during isomorphism checks.
"""
from itertools import permutations
import types
import networkx as nx
__all__ = ['categorical_node_match',
'categorical_edge_match',
'categorical_multiedge_match',
'numerical_node_match',
'numerical_edge_match',
'numerical_multiedge_match',
'generic_node_match',
'generic_edge_match',
'generic_multiedge_match',
]
def copyfunc(f, name=None):
"""Returns a deepcopy of a function."""
try:
return types.FunctionType(f.func_code, f.func_globals, name or f.name,
f.func_defaults, f.func_closure)
except AttributeError:
return types.FunctionType(f.__code__, f.__globals__, name or f.name,
f.__defaults__, f.__closure__)
def allclose(x, y, rtol=1.0000000000000001e-05, atol=1e-08):
"""Returns True if x and y are sufficiently close, elementwise.
Parameters
----------
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
"""
# assume finite weights, see numpy.allclose() for reference
for xi, yi in zip(x,y):
if not ( abs(xi-yi) <= atol + rtol * abs(yi) ):
return False
return True
def close(x, y, rtol=1.0000000000000001e-05, atol=1e-08):
"""Returns True if x and y are sufficiently close.
Parameters
----------
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
"""
# assume finite weights, see numpy.allclose() for reference
return abs(x-y) <= atol + rtol * abs(y)
categorical_doc = """
Returns a comparison function for a categorical node attribute.
The value(s) of the attr(s) must be hashable and comparable via the ==
operator since they are placed into a set([]) object. If the sets from
G1 and G2 are the same, then the constructed function returns True.
Parameters
----------
attr : string | list
The categorical node attribute to compare, or a list of categorical
node attributes to compare.
default : value | list
The default value for the categorical node attribute, or a list of
default values for the categorical node attributes.
Returns
-------
match : function
The customized, categorical `node_match` function.
Examples
--------
>>> import networkx.algorithms.isomorphism as iso
>>> nm = iso.categorical_node_match('size', 1)
>>> nm = iso.categorical_node_match(['color', 'size'], ['red', 2])
"""
categorical_edge_match = copyfunc(categorical_node_match, 'categorical_edge_match')
# Docstrings for categorical functions.
categorical_node_match.__doc__ = categorical_doc
categorical_edge_match.__doc__ = categorical_doc.replace('node', 'edge')
tmpdoc = categorical_doc.replace('node', 'edge')
tmpdoc = tmpdoc.replace('categorical_edge_match', 'categorical_multiedge_match')
categorical_multiedge_match.__doc__ = tmpdoc
numerical_doc = """
Returns a comparison function for a numerical node attribute.
The value(s) of the attr(s) must be numerical and sortable. If the
sorted list of values from G1 and G2 are the same within some
tolerance, then the constructed function returns True.
Parameters
----------
attr : string | list
The numerical node attribute to compare, or a list of numerical
node attributes to compare.
default : value | list
The default value for the numerical node attribute, or a list of
default values for the numerical node attributes.
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
Returns
-------
match : function
The customized, numerical `node_match` function.
Examples
--------
>>> import networkx.algorithms.isomorphism as iso
>>> nm = iso.numerical_node_match('weight', 1.0)
>>> nm = iso.numerical_node_match(['weight', 'linewidth'], [.25, .5])
"""
numerical_edge_match = copyfunc(numerical_node_match, 'numerical_edge_match')
# Docstrings for numerical functions.
numerical_node_match.__doc__ = numerical_doc
numerical_edge_match.__doc__ = numerical_doc.replace('node', 'edge')
tmpdoc = numerical_doc.replace('node', 'edge')
tmpdoc = tmpdoc.replace('numerical_edge_match', 'numerical_multiedge_match')
numerical_multiedge_match.__doc__ = tmpdoc
generic_doc = """
Returns a comparison function for a generic attribute.
The value(s) of the attr(s) are compared using the specified
operators. If all the attributes are equal, then the constructed
function returns True.
Parameters
----------
attr : string | list
The node attribute to compare, or a list of node attributes
to compare.
default : value | list
The default value for the node attribute, or a list of
default values for the node attributes.
op : callable | list
The operator to use when comparing attribute values, or a list
of operators to use when comparing values for each attribute.
Returns
-------
match : function
The customized, generic `node_match` function.
Examples
--------
>>> from operator import eq
>>> from networkx.algorithms.isomorphism.matchhelpers import close
>>> from networkx.algorithms.isomorphism import generic_node_match
>>> nm = generic_node_match('weight', 1.0, close)
>>> nm = generic_node_match('color', 'red', eq)
>>> nm = generic_node_match(['weight', 'color'], [1.0, 'red'], [close, eq])
"""
generic_edge_match = copyfunc(generic_node_match, 'generic_edge_match')
def generic_multiedge_match(attr, default, op):
"""Returns a comparison function for a generic attribute.
The value(s) of the attr(s) are compared using the specified
operators. If all the attributes are equal, then the constructed
function returns True. Potentially, the constructed edge_match
function can be slow since it must verify that no isomorphism
exists between the multiedges before it returns False.
Parameters
----------
attr : string | list
The edge attribute to compare, or a list of node attributes
to compare.
default : value | list
The default value for the edge attribute, or a list of
default values for the dgeattributes.
op : callable | list
The operator to use when comparing attribute values, or a list
of operators to use when comparing values for each attribute.
Returns
-------
match : function
The customized, generic `edge_match` function.
Examples
--------
>>> from operator import eq
>>> from networkx.algorithms.isomorphism.matchhelpers import close
>>> from networkx.algorithms.isomorphism import generic_node_match
>>> nm = generic_node_match('weight', 1.0, close)
>>> nm = generic_node_match('color', 'red', eq)
>>> nm = generic_node_match(['weight', 'color'],
... [1.0, 'red'],
... [close, eq])
...
"""
# This is slow, but generic.
# We must test every possible isomorphism between the edges.
if nx.utils.is_string_like(attr):
else:
attrs = list(zip(attr, default)) # Python 3
return match
# Docstrings for numerical functions.
generic_node_match.__doc__ = generic_doc
generic_edge_match.__doc__ = generic_doc.replace('node', 'edge')
| 36.213256 | 87 | 0.614993 | """Functions which help end users define customize node_match and
edge_match functions to use during isomorphism checks.
"""
from itertools import permutations
import types
import networkx as nx
__all__ = ['categorical_node_match',
'categorical_edge_match',
'categorical_multiedge_match',
'numerical_node_match',
'numerical_edge_match',
'numerical_multiedge_match',
'generic_node_match',
'generic_edge_match',
'generic_multiedge_match',
]
def copyfunc(f, name=None):
"""Returns a deepcopy of a function."""
try:
return types.FunctionType(f.func_code, f.func_globals, name or f.name,
f.func_defaults, f.func_closure)
except AttributeError:
return types.FunctionType(f.__code__, f.__globals__, name or f.name,
f.__defaults__, f.__closure__)
def allclose(x, y, rtol=1.0000000000000001e-05, atol=1e-08):
"""Returns True if x and y are sufficiently close, elementwise.
Parameters
----------
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
"""
# assume finite weights, see numpy.allclose() for reference
for xi, yi in zip(x,y):
if not ( abs(xi-yi) <= atol + rtol * abs(yi) ):
return False
return True
def close(x, y, rtol=1.0000000000000001e-05, atol=1e-08):
"""Returns True if x and y are sufficiently close.
Parameters
----------
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
"""
# assume finite weights, see numpy.allclose() for reference
return abs(x-y) <= atol + rtol * abs(y)
categorical_doc = """
Returns a comparison function for a categorical node attribute.
The value(s) of the attr(s) must be hashable and comparable via the ==
operator since they are placed into a set([]) object. If the sets from
G1 and G2 are the same, then the constructed function returns True.
Parameters
----------
attr : string | list
The categorical node attribute to compare, or a list of categorical
node attributes to compare.
default : value | list
The default value for the categorical node attribute, or a list of
default values for the categorical node attributes.
Returns
-------
match : function
The customized, categorical `node_match` function.
Examples
--------
>>> import networkx.algorithms.isomorphism as iso
>>> nm = iso.categorical_node_match('size', 1)
>>> nm = iso.categorical_node_match(['color', 'size'], ['red', 2])
"""
def categorical_node_match(attr, default):
if nx.utils.is_string_like(attr):
def match(data1, data2):
return data1.get(attr, default) == data2.get(attr, default)
else:
attrs = list(zip(attr, default)) # Python 3
def match(data1, data2):
values1 = set([data1.get(attr, d) for attr, d in attrs])
values2 = set([data2.get(attr, d) for attr, d in attrs])
return values1 == values2
return match
categorical_edge_match = copyfunc(categorical_node_match, 'categorical_edge_match')
def categorical_multiedge_match(attr, default):
if nx.utils.is_string_like(attr):
def match(datasets1, datasets2):
values1 = set([data.get(attr, default) for data in datasets1.values()])
values2 = set([data.get(attr, default) for data in datasets2.values()])
return values1 == values2
else:
attrs = list(zip(attr, default)) # Python 3
def match(datasets1, datasets2):
values1 = set([])
for data1 in datasets1.values():
x = tuple( data1.get(attr, d) for attr, d in attrs )
values1.add(x)
values2 = set([])
for data2 in datasets2.values():
x = tuple( data2.get(attr, d) for attr, d in attrs )
values2.add(x)
return values1 == values2
return match
# Docstrings for categorical functions.
categorical_node_match.__doc__ = categorical_doc
categorical_edge_match.__doc__ = categorical_doc.replace('node', 'edge')
tmpdoc = categorical_doc.replace('node', 'edge')
tmpdoc = tmpdoc.replace('categorical_edge_match', 'categorical_multiedge_match')
categorical_multiedge_match.__doc__ = tmpdoc
numerical_doc = """
Returns a comparison function for a numerical node attribute.
The value(s) of the attr(s) must be numerical and sortable. If the
sorted list of values from G1 and G2 are the same within some
tolerance, then the constructed function returns True.
Parameters
----------
attr : string | list
The numerical node attribute to compare, or a list of numerical
node attributes to compare.
default : value | list
The default value for the numerical node attribute, or a list of
default values for the numerical node attributes.
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
Returns
-------
match : function
The customized, numerical `node_match` function.
Examples
--------
>>> import networkx.algorithms.isomorphism as iso
>>> nm = iso.numerical_node_match('weight', 1.0)
>>> nm = iso.numerical_node_match(['weight', 'linewidth'], [.25, .5])
"""
def numerical_node_match(attr, default, rtol=1.0000000000000001e-05, atol=1e-08):
if nx.utils.is_string_like(attr):
def match(data1, data2):
return close(data1.get(attr, default),
data2.get(attr, default),
rtol=rtol, atol=atol)
else:
attrs = list(zip(attr, default)) # Python 3
def match(data1, data2):
values1 = [data1.get(attr, d) for attr, d in attrs]
values2 = [data2.get(attr, d) for attr, d in attrs]
return allclose(values1, values2, rtol=rtol, atol=atol)
return match
numerical_edge_match = copyfunc(numerical_node_match, 'numerical_edge_match')
def numerical_multiedge_match(attr, default, rtol=1.0000000000000001e-05, atol=1e-08):
if nx.utils.is_string_like(attr):
def match(datasets1, datasets2):
values1 = sorted([data.get(attr, default) for data in datasets1.values()])
values2 = sorted([data.get(attr, default) for data in datasets2.values()])
return allclose(values1, values2, rtol=rtol, atol=atol)
else:
attrs = list(zip(attr, default)) # Python 3
def match(datasets1, datasets2):
values1 = []
for data1 in datasets1.values():
x = tuple( data1.get(attr, d) for attr, d in attrs )
values1.append(x)
values2 = []
for data2 in datasets2.values():
x = tuple( data2.get(attr, d) for attr, d in attrs )
values2.append(x)
values1.sort()
values2.sort()
for xi, yi in zip(values1, values2):
if not allclose(xi, yi, rtol=rtol, atol=atol):
return False
else:
return True
return match
# Docstrings for numerical functions.
numerical_node_match.__doc__ = numerical_doc
numerical_edge_match.__doc__ = numerical_doc.replace('node', 'edge')
tmpdoc = numerical_doc.replace('node', 'edge')
tmpdoc = tmpdoc.replace('numerical_edge_match', 'numerical_multiedge_match')
numerical_multiedge_match.__doc__ = tmpdoc
generic_doc = """
Returns a comparison function for a generic attribute.
The value(s) of the attr(s) are compared using the specified
operators. If all the attributes are equal, then the constructed
function returns True.
Parameters
----------
attr : string | list
The node attribute to compare, or a list of node attributes
to compare.
default : value | list
The default value for the node attribute, or a list of
default values for the node attributes.
op : callable | list
The operator to use when comparing attribute values, or a list
of operators to use when comparing values for each attribute.
Returns
-------
match : function
The customized, generic `node_match` function.
Examples
--------
>>> from operator import eq
>>> from networkx.algorithms.isomorphism.matchhelpers import close
>>> from networkx.algorithms.isomorphism import generic_node_match
>>> nm = generic_node_match('weight', 1.0, close)
>>> nm = generic_node_match('color', 'red', eq)
>>> nm = generic_node_match(['weight', 'color'], [1.0, 'red'], [close, eq])
"""
def generic_node_match(attr, default, op):
if nx.utils.is_string_like(attr):
def match(data1, data2):
return op(data1.get(attr, default), data2.get(attr, default))
else:
attrs = list(zip(attr, default, op)) # Python 3
def match(data1, data2):
for attr, d, operator in attrs:
if not operator(data1.get(attr, d), data2.get(attr, d)):
return False
else:
return True
return match
generic_edge_match = copyfunc(generic_node_match, 'generic_edge_match')
def generic_multiedge_match(attr, default, op):
"""Returns a comparison function for a generic attribute.
The value(s) of the attr(s) are compared using the specified
operators. If all the attributes are equal, then the constructed
function returns True. Potentially, the constructed edge_match
function can be slow since it must verify that no isomorphism
exists between the multiedges before it returns False.
Parameters
----------
attr : string | list
The edge attribute to compare, or a list of node attributes
to compare.
default : value | list
The default value for the edge attribute, or a list of
default values for the dgeattributes.
op : callable | list
The operator to use when comparing attribute values, or a list
of operators to use when comparing values for each attribute.
Returns
-------
match : function
The customized, generic `edge_match` function.
Examples
--------
>>> from operator import eq
>>> from networkx.algorithms.isomorphism.matchhelpers import close
>>> from networkx.algorithms.isomorphism import generic_node_match
>>> nm = generic_node_match('weight', 1.0, close)
>>> nm = generic_node_match('color', 'red', eq)
>>> nm = generic_node_match(['weight', 'color'],
... [1.0, 'red'],
... [close, eq])
...
"""
# This is slow, but generic.
# We must test every possible isomorphism between the edges.
if nx.utils.is_string_like(attr):
def match(datasets1, datasets2):
values1 = [data.get(attr, default) for data in datasets1.values()]
values2 = [data.get(attr, default) for data in datasets2.values()]
for vals2 in permutations(values2):
for xi, yi in zip(values1, vals2):
if not op(xi, yi):
# This is not an isomorphism, go to next permutation.
break
else:
# Then we found an isomorphism.
return True
else:
# Then there are no isomorphisms between the multiedges.
return False
else:
attrs = list(zip(attr, default)) # Python 3
def match(datasets1, datasets2):
values1 = []
for data1 in datasets1.values():
x = tuple( data1.get(attr, d) for attr, d in attrs )
values1.append(x)
values2 = []
for data2 in datasets2.values():
x = tuple( data2.get(attr, d) for attr, d in attrs )
values2.append(x)
for vals2 in permutations(values2):
for xi, yi, operator in zip(values1, vals2, op):
if not operator(xi, yi):
return False
else:
return True
return match
# Docstrings for numerical functions.
generic_node_match.__doc__ = generic_doc
generic_edge_match.__doc__ = generic_doc.replace('node', 'edge')
| 4,777 | 0 | 187 |
0fecc9d9f31301ad24bcca0b4ce46445f90eacb9 | 2,471 | py | Python | xyzspaces/exceptions.py | isaac-philip/xyz-spaces-python | 67b06efdc4a76934c54c1a828087a27cad26aa5d | [
"Apache-2.0"
] | 1 | 2021-02-20T10:14:36.000Z | 2021-02-20T10:14:36.000Z | xyzspaces/exceptions.py | GhostUser/xyz-spaces-python | 646aaa74a180871318f3e9aa12acc8e25a1f3b33 | [
"Apache-2.0"
] | null | null | null | xyzspaces/exceptions.py | GhostUser/xyz-spaces-python | 646aaa74a180871318f3e9aa12acc8e25a1f3b33 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2019-2020 HERE Europe B.V.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# License-Filename: LICENSE
"""This module defines API exceptions."""
class AuthenticationError(Exception):
"""Exception raised when authentication fails."""
pass
class ApiError(Exception):
"""Exception raised for API HTTP response status codes not in [200...300).
The exception value will be the response object returned by :mod:`requests`
which provides access to all its attributes, eg. :attr:`status_code`,
:attr:`reason` and :attr:`text`, etc.
Example:
>>> try:
>>> api = HubApi(credentials="MY-XYZ-TOKEN")
>>> api.get("/hub/nope").json()
>>> except ApiError as e:
>>> resp = e.value.args[0]
>>> if resp.status_code == 404 and resp.reason == "Not Found":
>>> ...
"""
def __str__(self):
"""Return a string from the HTTP response causing the exception.
The string simply lists the repsonse's status code, reason and text
content, separated with commas.
"""
resp = self.args[0]
return f"{resp.status_code}, {resp.reason}, {resp.text}"
class TooManyRequestsException(Exception):
"""Exception raised for API HTTP response status code 429.
This is a dedicated exception to be used with the `backoff` package, because
it requires a specific exception class.
The exception value will be the response object returned by :mod:`requests`
which provides access to all its attributes, eg. :attr:`status_code`,
:attr:`reason` and :attr:`text`, etc.
"""
def __str__(self):
"""Return a string from the HTTP response causing the exception.
The string simply lists the repsonse's status code, reason and text
content, separated with commas.
"""
resp = self.args[0]
return f"{resp.status_code}, {resp.reason}, {resp.text}"
| 33.391892 | 80 | 0.673007 | # Copyright (C) 2019-2020 HERE Europe B.V.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# License-Filename: LICENSE
"""This module defines API exceptions."""
class AuthenticationError(Exception):
"""Exception raised when authentication fails."""
pass
class ApiError(Exception):
"""Exception raised for API HTTP response status codes not in [200...300).
The exception value will be the response object returned by :mod:`requests`
which provides access to all its attributes, eg. :attr:`status_code`,
:attr:`reason` and :attr:`text`, etc.
Example:
>>> try:
>>> api = HubApi(credentials="MY-XYZ-TOKEN")
>>> api.get("/hub/nope").json()
>>> except ApiError as e:
>>> resp = e.value.args[0]
>>> if resp.status_code == 404 and resp.reason == "Not Found":
>>> ...
"""
def __str__(self):
"""Return a string from the HTTP response causing the exception.
The string simply lists the repsonse's status code, reason and text
content, separated with commas.
"""
resp = self.args[0]
return f"{resp.status_code}, {resp.reason}, {resp.text}"
class TooManyRequestsException(Exception):
"""Exception raised for API HTTP response status code 429.
This is a dedicated exception to be used with the `backoff` package, because
it requires a specific exception class.
The exception value will be the response object returned by :mod:`requests`
which provides access to all its attributes, eg. :attr:`status_code`,
:attr:`reason` and :attr:`text`, etc.
"""
def __str__(self):
"""Return a string from the HTTP response causing the exception.
The string simply lists the repsonse's status code, reason and text
content, separated with commas.
"""
resp = self.args[0]
return f"{resp.status_code}, {resp.reason}, {resp.text}"
| 0 | 0 | 0 |
4f6b026bc1bf782ffd0f6ba994d47f95685a42c3 | 1,482 | py | Python | utils/parse_action.py | liu4lin/UniRE | fb31801161758e50762f9a70820b71aefb5c5515 | [
"MIT"
] | 87 | 2021-07-12T02:35:50.000Z | 2022-03-31T12:44:49.000Z | utils/parse_action.py | liu4lin/UniRE | fb31801161758e50762f9a70820b71aefb5c5515 | [
"MIT"
] | 10 | 2021-07-29T01:35:50.000Z | 2022-03-03T04:05:42.000Z | utils/parse_action.py | liu4lin/UniRE | fb31801161758e50762f9a70820b71aefb5c5515 | [
"MIT"
] | 12 | 2021-07-18T09:06:07.000Z | 2022-03-31T12:44:51.000Z | import configargparse
import logging
import os
class StoreLoggingLevelAction(configargparse.Action):
"""This class converts string into logging level
"""
LEVELS = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET
}
CHOICES = list(LEVELS.keys()) + [str(_) for _ in LEVELS.values()]
def __call__(self, parser, namespace, value, option_string=None):
"""This function gets the key 'value' in the LEVELS, or just uses value
"""
level = StoreLoggingLevelAction.LEVELS.get(value, value)
setattr(namespace, self.dest, level)
class CheckPathAction(configargparse.Action):
"""This class checks file path, if not exits, then create dir(file)
"""
def __call__(self, parser, namespace, value, option_string=None):
"""This function checks file path, if not exits, then create dir(file)
"""
parent_path = os.path.dirname(value)
if not os.path.exists(parent_path):
os.makedirs(parent_path)
setattr(namespace, self.dest, value)
| 31.531915 | 79 | 0.645074 | import configargparse
import logging
import os
class StoreLoggingLevelAction(configargparse.Action):
"""This class converts string into logging level
"""
LEVELS = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET
}
CHOICES = list(LEVELS.keys()) + [str(_) for _ in LEVELS.values()]
def __init__(self, option_strings, dest, help=None, **kwargs):
super().__init__(option_strings, dest, help=help, **kwargs)
def __call__(self, parser, namespace, value, option_string=None):
"""This function gets the key 'value' in the LEVELS, or just uses value
"""
level = StoreLoggingLevelAction.LEVELS.get(value, value)
setattr(namespace, self.dest, level)
class CheckPathAction(configargparse.Action):
"""This class checks file path, if not exits, then create dir(file)
"""
def __init__(self, option_strings, dest, help=None, **kwargs):
super().__init__(option_strings, dest, help=help, **kwargs)
def __call__(self, parser, namespace, value, option_string=None):
"""This function checks file path, if not exits, then create dir(file)
"""
parent_path = os.path.dirname(value)
if not os.path.exists(parent_path):
os.makedirs(parent_path)
setattr(namespace, self.dest, value)
| 218 | 0 | 54 |
6d88e4a24d0a05e8b054112c932ce362d81cabbe | 7,811 | py | Python | neutron_lbaas_dashboard/dashboards/project/loadbalancersv2/workflows/update_lb.py | mail2nsrajesh/neutron-lbaas-dashboard | 789e7cea2d949960114754614a63c4a4ef2f0c11 | [
"Apache-2.0"
] | null | null | null | neutron_lbaas_dashboard/dashboards/project/loadbalancersv2/workflows/update_lb.py | mail2nsrajesh/neutron-lbaas-dashboard | 789e7cea2d949960114754614a63c4a4ef2f0c11 | [
"Apache-2.0"
] | null | null | null | neutron_lbaas_dashboard/dashboards/project/loadbalancersv2/workflows/update_lb.py | mail2nsrajesh/neutron-lbaas-dashboard | 789e7cea2d949960114754614a63c4a4ef2f0c11 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015, eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext as _
from neutron_lbaas_dashboard import api
from create_lb import * # noqa
INDEX_URL = "horizon:projects:loadbalancersv2:index"
READ_ONLY = {'readonly': 'readonly'}
| 40.682292 | 79 | 0.547433 | # Copyright 2015, eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext as _
from neutron_lbaas_dashboard import api
from create_lb import * # noqa
INDEX_URL = "horizon:projects:loadbalancersv2:index"
READ_ONLY = {'readonly': 'readonly'}
class UpdateLBDetailsAction(SetLBDetailsAction):
address = forms.CharField(widget=forms.widgets.Input(attrs=READ_ONLY),
label=_('IP'),
required=True)
name = forms.CharField(widget=forms.widgets.Input(attrs=READ_ONLY),
label=_('Name'),
required=False)
port = forms.IntegerField(widget=forms.widgets.Input(attrs=READ_ONLY),
label=_("LB Port"),
required=False,
min_value=1,
max_value=65535,
help_text=_("LB Port on which "
"LB is listening."))
is_update = True
def __init__(self, request, context, *args, **kwargs):
super(UpdateLBDetailsAction, self).__init__(request, context, *args,
**kwargs)
self.fields['address'].initial = context['address']
class Meta(object):
name = _("LB Details")
help_text_template = ("project/loadbalancersv2/"
"_launch_lb_help.html")
class UpdateLBDetails(SetLBDetails):
action_class = UpdateLBDetailsAction
template_name = "project/loadbalancersv2/update_lb_step.html"
class UpdateSSLAction(UploadSSLAction):
update_cert = forms.BooleanField(label='Update SSL Certificate',
required=False,
widget=forms.CheckboxInput())
# def clean(self):
# cleaned_data = super(UploadSSLAction, self).clean()
# data = self.data
# protocol = data.get('source_type')
# if protocol == 'HTTPS':
# update_cert = data.get('update_cert')
# if update_cert:
# use_common_cert = data.get('use_common_cert')
# if not use_common_cert:
# # check to see if ssl cert is provided
# cert_name = data.get('cert_name')
# cert = data.get('cert')
# private_key = data.get('private_key')
#
# if (not cert_name) \
# or (not cert) \
# or (not private_key):
# raise forms.ValidationError(
# _('Please provide all certificate parameters.'))
# return cleaned_data
class Meta(object):
name = _("SSL Certificate")
help_text_template = ("project/loadbalancersv2/_ssl_cert_help.html")
class UpdateSSLStep(UploadSSLStep):
action_class = UpdateSSLAction
contributes = ("cert_name", "cert", "private_key",
"chain_cert", 'use_common_cert', "update_cert")
template_name = "project/loadbalancersv2/update_ssl_cert.html"
def contribute(self, data, context):
post = self.workflow.request.POST
context['cert_name'] = post['cert_name'] if 'cert_name' in post else ''
context['cert'] = post['cert'] if 'cert' in post else ''
context['private_key'] = post[
'private_key'] if 'private_key' in post else ''
context['chain_cert'] = post[
'chain_cert'] if 'chain_cert' in post else ''
context['use_common_cert'] = post[
'use_common_cert'] if 'use_common_cert' in post else ''
context['update_cert'] = post[
'update_cert'] if 'update_cert' in post else ''
return context
class UpdateInstancesAction(SelectInstancesAction):
def __init__(self, request, *args, **kwargs):
super(UpdateInstancesAction, self).__init__(request, *args, **kwargs)
err_msg = _('Unable to retrieve members list. '
'Please try again later.')
pre_selectd = []
try:
pre_selectd = args[0]['selected_members']
except Exception:
exceptions.handle(request, err_msg)
self.fields[self.get_member_field_name('member')].initial = pre_selectd
class Meta(object):
name = _("Instances")
slug = "select_instances"
class UpdateInstancesStep(SelectInstancesStep):
action_class = UpdateInstancesAction
depends_on = ("loadbalancer_id",)
contributes = ("wanted_members", "selected_members",
"loadbalancer_id", "instances_details",
"monitor", "instance_port")
class UpdateLoadBalancer(LaunchLoadBalancer):
slug = "update_loadbalancer"
name = _("Edit Load Balancer")
finalize_button_name = _("Update")
success_message = _('Updated load balancer "%s".')
failure_message = _('Unable to modify load balancer "%s".')
success_url = "horizon:project:loadbalancersv2:index"
default_steps = (UpdateLBDetails,
UpdateSSLStep,
SelectMonitorStep,
UpdateInstancesStep)
attrs = {'data-help-text': 'Updating LB may take a few minutes'}
def format_status_message(self, message):
return message % self.context.get('name', 'unknown load balancer')
def handle(self, request, context):
try:
protocol = context['source_type']
api.lbui.vip_create(request,
update=True,
loadbalancer_id=context['loadbalancer_id'],
address=context['address'],
name=context['name'],
description=context['description'],
lb_method=context['lb_method'],
monitor=context['monitor'],
protocol=protocol,
port=context['port'],
instance_port=context['instance_port'],
wanted_members=context['wanted_members'],
instances_details=context['instances_details'],
cert_name=context['cert_name'],
cert=context['cert'],
private_key=context['private_key'],
chain_cert=context['chain_cert'],
use_common_cert=True if context[
'use_common_cert'] == 'on' else False,
update_cert=True if context[
'update_cert'] == 'on' else False,
interval=context['interval'],
timeout=context['timeout'],
send=context['send'],
receive=context['receive'],
)
return True
except Exception as e:
exceptions.handle(request, e.message, ignore=False)
return False
| 3,222 | 3,599 | 161 |
82b8b737fabe070987bf8bb0715e662f67642ee9 | 40,874 | py | Python | BootloaderCorePkg/Tools/CfgDataTool.py | liux2085/slimbootloader | 24a16ac20eb86b64135843074f9a85d6e60ab54a | [
"BSD-2-Clause-NetBSD",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"BSD-2-Clause-Patent"
] | 1 | 2021-12-30T17:05:53.000Z | 2021-12-30T17:05:53.000Z | BootloaderCorePkg/Tools/CfgDataTool.py | liux2085/slimbootloader | 24a16ac20eb86b64135843074f9a85d6e60ab54a | [
"BSD-2-Clause-NetBSD",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"BSD-2-Clause-Patent"
] | null | null | null | BootloaderCorePkg/Tools/CfgDataTool.py | liux2085/slimbootloader | 24a16ac20eb86b64135843074f9a85d6e60ab54a | [
"BSD-2-Clause-NetBSD",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"BSD-2-Clause-Patent"
] | null | null | null | ## @ CfgDataTool.py
#
# Copyright (c) 2017 - 2020, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import sys
import collections
sys.dont_write_bytecode = True
from IfwiUtility import *
from CommonUtility import *
CFGDATA_INT_GUID = b'\xD0\x6C\x6E\x01\x34\x48\x7E\x4C\xBC\xFE\x41\xDF\xB8\x8A\x6A\x6D'
if __name__ == '__main__':
sys.exit(Main())
| 42.844864 | 261 | 0.589103 | ## @ CfgDataTool.py
#
# Copyright (c) 2017 - 2020, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import sys
import collections
sys.dont_write_bytecode = True
from IfwiUtility import *
from CommonUtility import *
CFGDATA_INT_GUID = b'\xD0\x6C\x6E\x01\x34\x48\x7E\x4C\xBC\xFE\x41\xDF\xB8\x8A\x6A\x6D'
class CCfgData:
DEBUG_FLAG_PARSE = (1 << 0)
DUMP_FLAG_INPUT = (1 << 0)
DUMP_FLAG_OUTPUT = (1 << 1)
DUMP_FLAG_VERBOSE = (1 << 7)
class CDATA_BLOB_HEADER(Structure):
ATTR_SIGNED = 1 << 0
ATTR_MERGED = 1 << 7
_pack_ = 1
_fields_ = [
('Signature', ARRAY(c_char, 4)),
('HeaderLength', c_uint8),
('Attribute', c_uint8),
('Svn', c_uint8),
('Reserved', ARRAY(c_char, 1)),
('UsedLength', c_uint32),
('TotalLength', c_uint32),
]
class CDATA_COND(Structure):
_pack_ = 1
_fields_ = [('Value', c_uint32)]
class CDATA_HEADER(Structure):
FLAG_ITEM_TYPE_NORMAL = 0
FLAG_ITEM_TYPE_ARRAY = 1
FLAG_ITEM_TYPE_REFER = 2
FLAG_ITEM_TYPE_MASK = 3
_pack_ = 1
_fields_ = [
('ConditionNum', c_uint32, 2),
('Length', c_uint32, 10),
('Flags', c_uint32, 4),
('Version', c_uint32, 4),
('Tag', c_uint32, 12),
]
class CDATA_PLATFORM_ID(Structure):
TAG = 0x0F0
_pack_ = 1
_fields_ = [('PlatformId', c_uint16), ('Reserved', c_uint16)]
class CDATA_REFERENCE(Structure):
_pack_ = 1
_fields_ = [('PlatformId', c_uint16), ('Tag', c_uint16, 12), ('IsInternal', c_uint16, 1), ('Reserved', c_uint16, 3)]
class CDATA_ITEM_ARRAY(Structure):
_pack_ = 1
_fields_ = [
('HeaderSize', c_uint8),
('BasePlatformId', c_uint8),
('ItemSize', c_uint16),
('ItemCount', c_uint16),
('ItemIdBitOff', c_uint8),
('ItemIdBitLen', c_uint8),
('ItemValidBitOff', c_uint8),
('ItemUnused', c_uint8),
]
def __init__ (self):
self._Debug = 0 & CCfgData.DEBUG_FLAG_PARSE
self.PlatformId = None
self.CfgDataBase = collections.OrderedDict()
self.CfgDataPid = {}
self.CfgDataItems = []
self.CfgDataDataArrayDict = {}
self.CfgDataArrayPidDict = {}
def NormalizePid (self, PlatformId):
if (PlatformId & ~0x1F):
raise Exception(
"Invalid platform ID 0x%04X, should be in range from 0 to 31!\n"
% PlatformId)
return PlatformId
def DumpTags (self, Flag, CfgItemList):
Offset = sizeof(CCfgData.CDATA_BLOB_HEADER)
for CfgItem in CfgItemList:
CfgData = CfgItem[0]
CfgTagHdr = CCfgData.CDATA_HEADER.from_buffer(CfgData[0])
DataCond = CCfgData.CDATA_COND.from_buffer(CfgData[1])
PrintData = True
IsArray = False
ExtraInfo = []
if CfgItem[3]:
ExtraInfo.append ('Built-In')
if (CfgTagHdr.Flags & CCfgData.CDATA_HEADER.FLAG_ITEM_TYPE_MASK) == \
CCfgData.CDATA_HEADER.FLAG_ITEM_TYPE_ARRAY:
ArrayInfo = CCfgData.CDATA_ITEM_ARRAY.from_buffer(CfgData[2])
if ArrayInfo.BasePlatformId < 0x80:
ExtraInfo.append ('BasePid:0x%02X' % ArrayInfo.BasePlatformId)
ExtraInfo.append ('Array:%d*%d' % (ArrayInfo.ItemSize, ArrayInfo.ItemCount))
IsArray = True
if (CfgTagHdr.Flags & CCfgData.CDATA_HEADER.FLAG_ITEM_TYPE_MASK) == \
CCfgData.CDATA_HEADER.FLAG_ITEM_TYPE_REFER:
Reference = CCfgData.CDATA_REFERENCE.from_buffer(CfgData[2])
if Reference.IsInternal:
Internal = '[Int]'
else:
Internal = ''
ExtraInfo.append ('ReferPid:0x%02X%s' % (Reference.PlatformId, Internal))
if Reference.Tag != CfgTagHdr.Tag:
ExtraInfo.append ('ReferTag:0x%03X' % Reference.Tag)
PrintData = False
Extra = ', '.join (ExtraInfo)
if Extra:
Extra = '(%s)' % Extra
print (" TAG %03X: MSK=%08X LEN=%04X OFF=%04X %s" %
(CfgTagHdr.Tag, CfgItem[1], CfgTagHdr.Length * 4, Offset, Extra))
if Flag & CCfgData.DUMP_FLAG_VERBOSE:
if PrintData:
if not IsArray:
print_bytes (CfgData[2], 5)
else:
Offset = 0
DataOffset = sizeof(CCfgData.CDATA_ITEM_ARRAY)
BitMaskLen = ArrayInfo.HeaderSize - DataOffset
print(" ARRAY HEADER:")
print_bytes (CfgData[2][:DataOffset], 5, Offset)
Offset += DataOffset
print(" ARRAY MASK:")
print_bytes (CfgData[2][DataOffset:DataOffset+BitMaskLen], 5, Offset)
Offset += BitMaskLen
if ArrayInfo.ItemCount > 0:
print(" ARRAY DATA:")
ArrayData = CfgData[2][ArrayInfo.HeaderSize:]
DataOffset = 0
for Idx in range (ArrayInfo.ItemCount):
print_bytes (ArrayData[DataOffset:DataOffset + ArrayInfo.ItemSize], 5, Offset)
DataOffset += ArrayInfo.ItemSize
Offset += ArrayInfo.ItemSize
Offset += CfgTagHdr.Length * 4
def Dump (self, Flag, Input = True):
if Flag & CCfgData.DUMP_FLAG_INPUT:
print("%sPUT:" % ("IN" if Input else "OUT"))
for CfgFile, (CfgItemList, CfgBlobHeader, IsBuiltIn) in list(self.CfgDataBase.items()):
if CfgFile in self.CfgDataPid:
Pid = self.CfgDataPid[CfgFile]
else:
Pid = 0
BuiltIn = '*' if IsBuiltIn else ''
print("PID=%04X LEN=%04X (%s%s)" % (Pid, CfgBlobHeader.UsedLength, CfgFile, BuiltIn))
self.DumpTags (Flag, CfgItemList)
if Flag & CCfgData.DUMP_FLAG_OUTPUT:
print("MERGED:")
self.DumpTags (Flag, self.CfgDataItems)
print('')
def ProcessCfgArray (self, Header, Data, PidMask, CfgBinFile):
ArrayInfo = CCfgData.CDATA_ITEM_ARRAY.from_buffer(Data)
ActualLen = ArrayInfo.ItemCount * ArrayInfo.ItemSize + ArrayInfo.HeaderSize + \
sizeof(Header) + sizeof(CCfgData.CDATA_COND) * Header.ConditionNum
if ArrayInfo.ItemSize % 4 > 0:
raise Exception(
"Each config item size must be DWORD aligned in TAG '%03X'!" %
Header.Tag)
if Header.Length != ActualLen // 4:
raise Exception(
"Invalid array item count/size field in TAG '0x%03X'!" %
Header.Tag)
BitMaskLen = ArrayInfo.HeaderSize - sizeof (ArrayInfo)
ByteWidth = (ArrayInfo.ItemCount + 7) // 8
if ByteWidth < 2:
ByteWidth = 2
if BitMaskLen < ByteWidth:
raise Exception(
"Bit mask array is too small in TAG '0x%03X', at least %d bytes required!"
% (Header.Tag, ByteWidth))
BitMaskDat = bytearray('1' * ArrayInfo.ItemCount + '0' *
(BitMaskLen * 8 - ArrayInfo.ItemCount), 'utf-8')
ItemValidByteOffset = ArrayInfo.ItemValidBitOff // 8
ItemValidByteMask = 1 << (ArrayInfo.ItemValidBitOff & (8 - 1))
DataOff = ArrayInfo.HeaderSize
ArrayTagKey = '%03X' % Header.Tag
if ArrayInfo.BasePlatformId == 0x80:
# The bit mask has been processed for base table
if ArrayTagKey in self.CfgDataDataArrayDict:
raise Exception(
"Base configuration already exists for TAG '0x%s'!" % ArrayTagKey)
Pid = (PidMask&-PidMask).bit_length() - 1
if Pid < 0:
raise Exception("Invalid condition value '%08X'!" % PidMask)
self.CfgDataDataArrayDict[ArrayTagKey] = []
self.CfgDataArrayPidDict[ArrayTagKey] = Pid
while DataOff < len(Data):
self.CfgDataDataArrayDict[ArrayTagKey].append(Data[DataOff:DataOff +
ArrayInfo.ItemSize])
DataOff += ArrayInfo.ItemSize
elif ArrayInfo.BasePlatformId == 0xFF:
# The bit mask has not been processed yet for base table
if ItemValidByteOffset >= ArrayInfo.ItemSize:
raise Exception(
"Item valid byte offset (%d) must be less than the item size (%d) in TAG '0x%03X'!"
% (ItemValidByteOffset, ArrayInfo.ItemSize, Header.Tag))
if ArrayTagKey in self.CfgDataDataArrayDict:
if self.CfgDataPid[CfgBinFile] == self.CfgDataArrayPidDict[ArrayTagKey]:
ArrayInfo.BasePlatformId = 0x80
else:
ArrayInfo.BasePlatformId = self.CfgDataArrayPidDict[ArrayTagKey]
else:
# Mark it as a base config item
ArrayInfo.BasePlatformId = 0x80
self.CfgDataDataArrayDict[ArrayTagKey] = []
self.CfgDataArrayPidDict[ArrayTagKey] = self.CfgDataPid[CfgBinFile]
# Check the invliad flag and remove those items
ItemDict = {}
RemovedItem = 0
Index = 0
DataLen = len(Data)
while DataOff < DataLen:
Remove = False
if ArrayInfo.BasePlatformId == 0x80:
# Check ItemID to make sure it is unique
ItemId = get_bits_from_bytes (Data[DataOff:DataOff + ArrayInfo.ItemSize], ArrayInfo.ItemIdBitOff, ArrayInfo.ItemIdBitLen)
if ItemId not in ItemDict.keys():
ItemDict[ItemId] = 1
else:
raise Exception("ItemId '0x%X' is not unique indicated by ItemIdBitOff/ItemIdBitLen in array header !" % ItemId)
# It is a base table, remove marker and assemble mask
if Data[DataOff + ItemValidByteOffset] & ItemValidByteMask:
Data[DataOff + ItemValidByteOffset] = Data[
DataOff + ItemValidByteOffset] ^ ItemValidByteMask
BitMaskDat[Index] = ord('0')
self.CfgDataDataArrayDict[ArrayTagKey].append(Data[DataOff:DataOff +
ArrayInfo.ItemSize])
else:
if ArrayTagKey in self.CfgDataDataArrayDict:
if Data[DataOff:DataOff + ArrayInfo.ItemSize] != self.CfgDataDataArrayDict[ArrayTagKey][Index]:
BitMaskDat[Index] = ord('0')
else:
Remove = True
if Data[DataOff + ItemValidByteOffset] & ItemValidByteMask:
Remove = True
if Remove:
Data[DataOff:] = Data[DataOff + ArrayInfo.ItemSize:] + b'\x00' * ArrayInfo.ItemSize
DataLen -= ArrayInfo.ItemSize
RemovedItem += 1
else:
DataOff += ArrayInfo.ItemSize
Index += 1
ArrayInfo.ItemCount -= RemovedItem
Header.Length -= (RemovedItem * ArrayInfo.ItemSize) // 4
# Update mask
BitWidth = BitMaskLen * 8
MaskHexStr = '{0:0{w}x}'.format(int(BitMaskDat.decode()[::-1], 2), w=BitWidth // 4)
BinData = bytearray.fromhex(MaskHexStr)[::-1]
Offset = sizeof (CCfgData.CDATA_ITEM_ARRAY)
Data[Offset:Offset + BitMaskLen] = BinData
return DataLen
def Parse (self, CfgBinFile):
if CfgBinFile.endswith('*'):
IsBuiltIn = True
CfgBinFile = CfgBinFile[:-1]
else:
IsBuiltIn = False
if self._Debug & CCfgData.DEBUG_FLAG_PARSE:
MiscStr = ' built-in' if IsBuiltIn else ''
print ("Parsing%s config binary '%s'" % (MiscStr, CfgBinFile))
with open(CfgBinFile, "rb") as Fin:
FileData = bytearray(Fin.read())
CfgBlobHeader = CCfgData.CDATA_BLOB_HEADER.from_buffer(FileData)
if CfgBlobHeader.Signature != b'CFGD':
raise Exception("Invalid config binary file '%s' !" % CfgBinFile)
IsMergedCfg = True if CfgBlobHeader.Attribute & CCfgData.CDATA_BLOB_HEADER.ATTR_MERGED else False
CfgItemList = []
Length = min(len(FileData), CfgBlobHeader.UsedLength)
# Find Platform ID first
if not IsMergedCfg:
Offset = sizeof(CCfgData.CDATA_BLOB_HEADER)
while Offset < Length:
CfgTagHdr = CCfgData.CDATA_HEADER.from_buffer(FileData, Offset)
CfgDlen = CfgTagHdr.Length * 4
if CfgTagHdr.Tag == CCfgData.CDATA_PLATFORM_ID.TAG:
NextOff = Offset + sizeof(CCfgData.CDATA_HEADER) + CfgTagHdr.ConditionNum * sizeof(CCfgData.CDATA_COND)
DataBin = FileData[NextOff:Offset + CfgDlen]
Pid = CCfgData.CDATA_PLATFORM_ID.from_buffer(DataBin)
self.CfgDataPid[CfgBinFile] = self.NormalizePid(Pid.PlatformId)
break
Offset += CfgDlen
if CfgBinFile not in self.CfgDataPid:
raise Exception("TAG PlatformId cannot be found in file '%s'!" % CfgBinFile)
# Add config tags
Offset = sizeof(CCfgData.CDATA_BLOB_HEADER)
while Offset < Length:
CfgTagHdr = CCfgData.CDATA_HEADER.from_buffer(FileData, Offset)
NextOff = Offset + sizeof(CCfgData.CDATA_HEADER)
CondBin = bytearray()
for Idx in range(0, CfgTagHdr.ConditionNum):
CondBin.extend(FileData[NextOff:NextOff + sizeof(
CCfgData.CDATA_COND)])
NextOff += sizeof(CCfgData.CDATA_COND)
CfgDlen = CfgTagHdr.Length * 4
DataBin = FileData[NextOff:Offset + CfgDlen]
DataCond = CCfgData.CDATA_COND.from_buffer(CondBin)
if IsMergedCfg:
PidMask = DataCond.Value
else:
PidMask = 1 << self.CfgDataPid[CfgBinFile]
DataCond.Value = 0x00000000
if self._Debug & CCfgData.DEBUG_FLAG_PARSE:
print (" TAG %03X: OFF=%04X PIDMSK=%08X LEN=%04X" %
(CfgTagHdr.Tag, Offset, PidMask, CfgDlen))
if (CfgTagHdr.Flags & CCfgData.CDATA_HEADER.FLAG_ITEM_TYPE_MASK) == \
CCfgData.CDATA_HEADER.FLAG_ITEM_TYPE_ARRAY:
DataLen = self.ProcessCfgArray(CfgTagHdr, DataBin, PidMask, CfgBinFile)
else:
DataLen = len(DataBin)
CfgItemList.append([(bytearray(CfgTagHdr), CondBin, DataBin[:DataLen]), PidMask, PidMask, IsBuiltIn])
Offset += CfgDlen
if (Offset != Length) or (Length % 4 != 0):
raise Exception("Invalid CFGDATA binary blob format for file '%s' !" % CfgBinFile)
self.CfgDataBase[CfgBinFile] = (CfgItemList, CfgBlobHeader, IsBuiltIn)
def Merge(self, CfgItem, PidMask):
CfgData = CfgItem[0]
# Try to find a match on TAG and DATA
Idx = next((i for i, v in enumerate(self.CfgDataItems) if v[0] == CfgData), -1)
if Idx >= 0:
# Found one. Change the MASK to reuse the existing data
self.CfgDataItems[Idx][1] |= PidMask
return
Append = True
CfgTagHdr = CCfgData.CDATA_HEADER.from_buffer(CfgData[0])
if CfgTagHdr.Tag == CCfgData.CDATA_PLATFORM_ID.TAG:
if not self.PlatformId:
self.PlatformId = CCfgData.CDATA_PLATFORM_ID.TAG
DataCond = CCfgData.CDATA_COND.from_buffer(CfgData[1])
DataCond.Value = 0x00000000
Pid = CCfgData.CDATA_PLATFORM_ID.from_buffer(CfgData[2])
Pid.PlatformId = 0
else:
Append = False
if Append:
OrgPidMask = CfgItem[1]
IsBuiltIn = CfgItem[3]
CfgTag = 0
NewPidMask = PidMask | OrgPidMask
if len(CfgData[2]) > 4:
# Try to find a match on DATA only
Idx = next((i for i, v in enumerate(self.CfgDataItems) if v[0][2] == CfgData[2]), -1)
if Idx >= 0:
RefCfgTagHdr = CCfgData.CDATA_HEADER.from_buffer(self.CfgDataItems[Idx][0][0])
CfgTag = RefCfgTagHdr.Tag
NewPidMask = self.CfgDataItems[Idx][2]
CfgData = (CfgData[0], CfgData[1], '\x00')
IsBuiltIn = IsBuiltIn or self.CfgDataItems[Idx][3]
self.CfgDataItems.append ([CfgData, NewPidMask, OrgPidMask, IsBuiltIn, CfgTag])
def Create(self, CfgOutFile, PlatformIdStr):
self.CfgDataItems = []
self.PlatformId = None
if PlatformIdStr:
if PlatformIdStr.startswith('0x'):
PlatformId = int(PlatformIdStr, 16)
else:
PlatformId = int(PlatformIdStr)
else:
PlatformId = -1
if PlatformId >= 32 and PlatformId != 0xFF:
raise Exception("Invalid platfrom ID '%d' is specified !" % PlatformId)
for CfgFile, (CfgItemList, CfgBlobHeader, IsBuiltIn) in list(self.CfgDataBase.items()):
for CfgItem in CfgItemList:
if CfgBlobHeader.Attribute & CCfgData.CDATA_BLOB_HEADER.ATTR_MERGED:
PidMask = 0
else:
PidMask = (1 << self.CfgDataPid[CfgFile]) & 0xFFFFFFFF
self.Merge(CfgItem, PidMask)
# CfgGrp: (HEADER, CONDITION, DATA)
PidMaskUpdate = []
BinDat = bytearray()
for CfgItem in self.CfgDataItems:
IsBuiltIn = CfgItem[3]
ReferTag = CfgItem[4]
TagHdr, CondBin, DataBin = CfgItem[0]
CfgDataHdr = CCfgData.CDATA_HEADER.from_buffer(TagHdr)
if CfgDataHdr.Tag == CCfgData.CDATA_PLATFORM_ID.TAG:
if PlatformId >=0:
print("Set platform ID to %d" % PlatformId)
DataCond = CCfgData.CDATA_COND.from_buffer(CondBin)
DataCond.Value = 0xFFFFFFFF
Pid = CCfgData.CDATA_PLATFORM_ID.from_buffer(DataBin)
Pid.PlatformId = PlatformId
BinDat.extend (TagHdr + CondBin + DataBin)
continue
CfgDataCond = CCfgData.CDATA_COND.from_buffer(CondBin)
CfgDataCond.Value = CfgItem[1]
if IsBuiltIn or ReferTag > 0:
NewPidMask = CfgItem[1]
OldPidMask = CfgItem[2]
if ((NewPidMask != OldPidMask) and (OldPidMask != 0)) or (ReferTag > 0):
# A built-in pidmask cannot be modified directly.
# Build a speical refer tag to reuse this item
if ReferTag > 0:
PidMask = OldPidMask
BasePid = NewPidMask.bit_length() - 1
else:
PidMask = NewPidMask ^ OldPidMask
BasePid = OldPidMask.bit_length() - 1
CfgTagHdr = CCfgData.CDATA_HEADER()
CfgDataCond = CCfgData.CDATA_COND()
CfgDataRefer = CCfgData.CDATA_REFERENCE()
CfgTagHdr.ConditionNum = 1
CfgTagHdr.Tag = CfgDataHdr.Tag
CfgTagHdr.Flags = CCfgData.CDATA_HEADER.FLAG_ITEM_TYPE_REFER
CfgTagHdr.Length = (sizeof(CfgDataRefer) + sizeof(CfgTagHdr) + \
sizeof(CfgDataCond) * CfgTagHdr.ConditionNum + 3) // 4
CfgDataCond.Value = PidMask
CfgDataRefer.PlatformId = BasePid
CfgDataRefer.IsInternal = 1 if IsBuiltIn else 0
CfgTag = CfgDataHdr.Tag if ReferTag == 0 else ReferTag
CfgDataRefer.Tag = CfgTag
BinDat.extend (bytearray(CfgTagHdr) + bytearray(CfgDataCond) + bytearray(CfgDataRefer))
else:
BinDat.extend (TagHdr + CondBin + DataBin)
CfgdHdr = CCfgData.CDATA_BLOB_HEADER()
CfgdHdr.Signature = b'CFGD'
CfgdHdr.Attribute = CCfgData.CDATA_BLOB_HEADER.ATTR_MERGED
CfgdHdr.HeaderLength = sizeof(CfgdHdr)
CfgdHdr.UsedLength = len(BinDat) + CfgdHdr.HeaderLength
CfgdHdr.TotalLength = CfgdHdr.UsedLength
with open(CfgOutFile, "wb") as Fout:
Fout.write (CfgdHdr)
Fout.write (BinDat)
def GetCfgDataByTag (CfgData, Pid, Tag, IsInternal = False):
Idx = 0 if IsInternal else 1
CfgFile, (CfgItemList, CfgBlobHdr, IsBuiltIn) = list(CfgData[Idx].CfgDataBase.items())[0]
for CfgItem in CfgItemList:
TagHdr, CondBin, DataBin = CfgItem[0]
CfgTagHdr = CCfgData.CDATA_HEADER.from_buffer(TagHdr)
if CfgTagHdr.Tag != Tag:
continue
if (CfgTagHdr.Tag != CCfgData.CDATA_PLATFORM_ID.TAG) and (CfgItem[1] & (1 << Pid) == 0):
continue
if (CfgTagHdr.Flags & CCfgData.CDATA_HEADER.FLAG_ITEM_TYPE_MASK) == CCfgData.CDATA_HEADER.FLAG_ITEM_TYPE_ARRAY:
ArrayInfo = CCfgData.CDATA_ITEM_ARRAY.from_buffer(DataBin)
Offset = ArrayInfo.HeaderSize
MaskOff = sizeof(ArrayInfo)
MaskLen = Offset - MaskOff
if ArrayInfo.BasePlatformId < 0x80:
RefPid = ArrayInfo.BasePlatformId
TagHdr, CondBin, BaseDataBin = GetCfgDataByTag (CfgData, RefPid, Tag, True)
CurrArrayInfo = CCfgData.CDATA_ITEM_ARRAY.from_buffer(DataBin)
BaseArrayInfo = CCfgData.CDATA_ITEM_ARRAY.from_buffer(BaseDataBin)
NewDataBin = bytearray (BaseDataBin)
# Copy entries from base table
ItemDict = {}
ItemLen = BaseArrayInfo.ItemSize
for Idx1 in range (BaseArrayInfo.ItemCount):
Off1 = Offset + Idx1 * ItemLen
BaseItem = BaseDataBin[Off1 : Off1 + ItemLen]
ItemId = get_bits_from_bytes (BaseItem, BaseArrayInfo.ItemIdBitOff, BaseArrayInfo.ItemIdBitLen)
NewItem = NewDataBin[Off1 : Off1 + ItemLen]
if DataBin[MaskOff + (Idx1 >> 3)] & (1 << (Idx1 & 7)):
set_bits_to_bytes (NewItem, BaseArrayInfo.ItemValidBitOff, 1, 0)
else:
ItemDict[ItemId] = Idx1
set_bits_to_bytes (NewItem, BaseArrayInfo.ItemValidBitOff, 1, 1)
NewDataBin[Off1 : Off1 + ItemLen] = NewItem
for Idx2 in range (CurrArrayInfo.ItemCount):
Off2 = Offset + Idx2 * ItemLen
CurrItem = DataBin[Off2 : Off2 + ItemLen]
ItemId = get_bits_from_bytes (CurrItem, BaseArrayInfo.ItemIdBitOff, BaseArrayInfo.ItemIdBitLen)
Idx1 = ItemDict[ItemId]
Off1 = Offset + Idx1 * ItemLen
NewDataBin[Off1 : Off1 + ItemLen] = CurrItem
elif ArrayInfo.BasePlatformId == 0x80:
NewDataBin = bytearray (DataBin)
# Zero masks and base pid
NewDataBin[MaskOff : MaskOff + MaskLen] = b'\x00' * MaskLen
NewArrayInfo = CCfgData.CDATA_ITEM_ARRAY.from_buffer(NewDataBin)
NewArrayInfo.BasePlatformId = 0xFF
DataBin = NewDataBin
elif (CfgTagHdr.Flags & CCfgData.CDATA_HEADER.FLAG_ITEM_TYPE_MASK) == CCfgData.CDATA_HEADER.FLAG_ITEM_TYPE_REFER:
Refer = CCfgData.CDATA_REFERENCE.from_buffer(DataBin)
TagHdrInt, CondBinInt, DataBin = GetCfgDataByTag (CfgData, Refer.PlatformId, Refer.Tag, True)
return TagHdr, CondBin, DataBin
if Idx == 1:
# Try to find it in internal database
return GetCfgDataByTag (CfgData, Pid, Tag, True)
else:
raise Exception ('Could not find TAG:0x%03X for PID:0x%02X in internal or external CFGDATA !' % Tag)
def CmdExport(Args):
BrdNameDict = {}
if Args.board_name_list:
Parts = Args.board_name_list.split(',')
for Part in Parts:
Info = Part.split(':')
if len(Info) == 2:
BrdNameDict[int(Info[0],0)] = Info[1].strip()
OutputDir = Args.output_dir
if not os.path.exists(OutputDir):
os.mkdir (OutputDir)
# Locate CFGDATA in BIOS region
IfwiBin = bytearray (get_file_data(Args.ifwi_file))
IfwiParser = IFWI_PARSER ()
Ifwi = IfwiParser.parse_ifwi_binary (IfwiBin)
Cfgs = IfwiParser.find_components(Ifwi, 'CNFG')
if not Cfgs:
IsBpdt = True
Cfgs = IfwiParser.find_components(Ifwi, 'CFGD')
else:
IsBpdt = False
PartFmt = '/RD%%d/'
if len(Cfgs) == 0:
print ("ERROR: Conld not find external CFGDATA !")
return -1
# Adjust path to point to proper boot partition
Bp = int(Args.boot_part)
CfgdPath = ''
for Cfgd in Cfgs:
CfgdPath = IfwiParser.get_component_path (Cfgd)
if IsBpdt:
PartStr = '/BP%d/' % Bp
else:
PartStr = '/RD%d/' % Bp
if PartStr in CfgdPath:
break
# For non-redundant layout, just use the 1st CFGD found
if CfgdPath == '':
print ('INFO: No redundant boot partition found !')
CfgdPath = Cfgs[0]
# Locate Stage1B image
Stage1bName = 'IBB' if IsBpdt else 'SG1B'
Stage1bPath = '/'.join(CfgdPath.split('/')[:-1]) + '/%s' % Stage1bName
Stage1bComp = IfwiParser.locate_component (Ifwi, Stage1bPath)
if not Stage1bComp:
print ('ERROR: Failed to extract external STAGE1B !')
return -2
# Decompress Stage1B image if required
Stage1bBin = IfwiBin[Stage1bComp.offset : Stage1bComp.offset + Stage1bComp.length]
if Stage1bBin[0:2] == b'LZ':
if Args.tool_dir == '':
print ("ERROR: '-t' is required to specify compress tool directory !")
return -3
Stage1bLz = OutputDir + '/Stage1b.lz'
Stage1bFd = OutputDir + '/Stage1b.fd'
gen_file_from_object (Stage1bLz, Stage1bBin)
decompress (Stage1bLz, Stage1bFd, tool_dir = Args.tool_dir)
Stage1bBin = bytearray (get_file_data (Stage1bFd))
# Locate and generate internal CFGDATA
Offset = Stage1bBin.find (CFGDATA_INT_GUID)
if Offset < 0:
print ('ERROR: Failed to locate internal CFGDATA !')
return -4
Offset += 0x1C
CfgBlobHeader = CCfgData.CDATA_BLOB_HEADER.from_buffer(Stage1bBin, Offset)
if CfgBlobHeader.Signature != b'CFGD':
print ('ERROR: Invalid internal CFGDATA format !')
return -5
CfgDataInt = Stage1bBin[Offset : Offset + CfgBlobHeader.TotalLength]
CfgBinIntFile = OutputDir + '/CfgDataInt.bin'
gen_file_from_object (CfgBinIntFile, CfgDataInt)
# Generate external CFGDATA
CfgBinExtFile = OutputDir + '/CfgDataExt.bin'
gen_file_from_object (CfgBinExtFile, IfwiBin[Cfgd.offset : Cfgd.offset + Cfgd.length])
# Parse CFGDATA blobs
CfgDataInt = CCfgData()
CfgDataInt.Parse(CfgBinIntFile)
CfgDataExt = CCfgData()
CfgDataExt.Parse(CfgBinExtFile)
# Generate CfgDataDef blob
CfgFile, (CfgIntItemList, CfgIntBlobHdr, IsBuiltIn) = list(CfgDataInt.CfgDataBase.items())[0]
CfgDef = bytearray(CfgIntBlobHdr)
TagDict = collections.OrderedDict()
for Idx, CfgIntItem in enumerate(CfgIntItemList):
TagHdr, CondBin, DataBin = CfgIntItem[0]
CfgTagHdr = CCfgData.CDATA_HEADER.from_buffer(TagHdr)
if CfgTagHdr.Tag in TagDict.keys():
break
else:
TagDict[CfgTagHdr.Tag] = Idx
CfgDef.extend(TagHdr + CondBin + DataBin)
CfgDefLen = len(CfgDef)
CfgDefBlobHdr = CCfgData.CDATA_BLOB_HEADER.from_buffer(bytearray(CfgIntBlobHdr))
CfgDefBlobHdr.UsedLength = CfgDefLen
CfgDefBlobHdr.TotalLength = CfgDefLen
CfgDefBlobHdr.Attribute = 0
# Collect available platform ID
PidMask = 0
CfgFile, (CfgExtItemList, CfgExtBlobHdr, IsBuiltIn) = list(CfgDataExt.CfgDataBase.items())[0]
for CfgItem in CfgExtItemList:
PidMask |= CfgItem[1]
# Export board specific external CFGDATA
for Pid in range(32):
if (1 << Pid) & PidMask == 0:
continue
print ('Exporting external CFGDATA for PlatformID = 0x%02X' % Pid)
CfgDataBrd = bytearray (CfgDefBlobHdr)
CfgData = [CfgDataInt, CfgDataExt]
for Tag in TagDict.keys():
TagHdr, CondBin, DataBin = GetCfgDataByTag (CfgData, Pid, Tag)
CfgTagHdr = CCfgData.CDATA_HEADER.from_buffer(TagHdr)
CondBin = b'\x00' * sizeof(CCfgData.CDATA_COND)
TagHdr = bytearray (CfgIntItemList[TagDict[Tag]][0][0])
NewData = bytearray (DataBin)
if CfgTagHdr.Tag == CCfgData.CDATA_PLATFORM_ID.TAG:
PidCfg = CCfgData.CDATA_PLATFORM_ID.from_buffer(NewData)
PidCfg.PlatformId = Pid
CfgDataBrd.extend (TagHdr + CondBin + NewData)
if Pid in BrdNameDict.keys():
Ext = BrdNameDict[Pid]
else:
Ext = 'CfgDataExt_%02X' % Pid
gen_file_from_object (OutputDir + '/%s.bin' % Ext, CfgDataBrd)
def CmdView(Args):
CfgData = CCfgData()
for CfgBinFile in Args.cfg_in_file:
CfgData.Parse(CfgBinFile)
Flag = CCfgData.DUMP_FLAG_INPUT
if Args.dbg_lvl > 2:
Flag |= CCfgData.DUMP_FLAG_VERBOSE
CfgData.Dump(Flag)
def CmdMerge(Args):
CfgData = CCfgData()
for CfgBinFile in Args.cfg_in_file:
CfgData.Parse(CfgBinFile)
if Args.dbg_lvl > 0:
Flag = CCfgData.DUMP_FLAG_INPUT
if Args.dbg_lvl > 2:
Flag |= CCfgData.DUMP_FLAG_VERBOSE
CfgData.Dump (Flag)
CfgData.Create(Args.cfg_out_file, Args.platform_id)
if Args.dbg_lvl > 1:
Flag = CCfgData.DUMP_FLAG_OUTPUT
if Args.dbg_lvl > 2:
Flag |= CCfgData.DUMP_FLAG_VERBOSE
CfgData.Dump (Flag)
if Args.dbg_lvl > 0:
CfgData = CCfgData()
CfgData.Parse(Args.cfg_out_file)
Flag = CCfgData.DUMP_FLAG_INPUT
if Args.dbg_lvl > 2:
Flag |= CCfgData.DUMP_FLAG_VERBOSE
CfgData.Dump(Flag, False)
print ("%d config binary files were merged successfully!" % len(Args.cfg_in_file))
def CmdSign(Args):
Fd = open (Args.cfg_in_file, 'rb')
FileData = bytearray (Fd.read ())
Fd.close ()
CfgBlobHeader = CCfgData.CDATA_BLOB_HEADER.from_buffer(FileData)
if CfgBlobHeader.Signature != b'CFGD':
raise Exception("Invalid config binary file '%s' !" % CfgDataFile)
CfgBlobHeader.Attribute |= CCfgData.CDATA_BLOB_HEADER.ATTR_SIGNED
CfgBlobHeader.Svn = Args.svn
TmpFile = Args.cfg_in_file + '.tmp'
Fd = open (TmpFile, 'wb')
Fd.write (FileData)
Fd.close ()
if Args.hash_alg == 'AUTO':
Args.hash_alg = adjust_hash_type(Args.cfg_pri_key)
rsa_sign_file (Args.cfg_pri_key, None, Args.hash_alg, Args.sign_scheme, TmpFile, Args.cfg_out_file, True, True)
if os.path.exists(TmpFile):
os.remove(TmpFile)
print ("Config file was signed successfully!")
def CmdExtract(Args):
Found = False
TagNo = int(Args.cfg_tag, 0)
CfgData = CCfgData()
for CfgBinFile in Args.cfg_in_file:
CfgData.Parse(CfgBinFile)
for CfgFile, (CfgItemList, CfgBlobHeader, IsBuiltIn) in list(CfgData.CfgDataBase.items()):
for CfgItem in CfgItemList:
TagHdr, CondBin, DataBin = CfgItem[0]
CfgTagHdr = CCfgData.CDATA_HEADER.from_buffer(TagHdr)
if CfgTagHdr.Tag == TagNo:
Found = True
break
if Found:
break
if Found:
BinDat = bytearray()
BinDat.extend (TagHdr + CondBin + DataBin)
print_bytes (BinDat)
if Args.cfg_out_file != None:
with open(Args.cfg_out_file, "wb") as Fout:
Fout.write (BinDat)
print ("Config data (Tag=0x%X) was saved to a file - %s" % (TagNo, Args.cfg_out_file))
else:
print ("Config data (Tag=0x%X) was not found!" % TagNo)
def CmdReplace(Args):
IfwiParser = IFWI_PARSER ()
CfgFile = Args.cfg_in_file
if not os.path.exists(CfgFile):
raise Exception("Cannot find CFGDATA binary file '%s'" % CfgFile)
IfwiImgIn = Args.ifwi_in_file
if not os.path.exists(IfwiImgIn):
raise Exception("Cannot find IFWI image file '%s'" % IfwiImgIn)
# Get cfg binary
Fh = open(CfgFile, 'rb')
CfgBins = bytearray(Fh.read())
Fh.close()
CfgHdr = CCfgData.CDATA_BLOB_HEADER.from_buffer(CfgBins)
if CfgHdr.Signature != b'CFGD':
raise Exception("Invalid CFGDATA image file '%s'" % CfgFile)
if not CfgHdr.Attribute & CCfgData.CDATA_BLOB_HEADER.ATTR_MERGED:
raise Exception("CFGDATA image file '%s' is not merged yet!" % CfgFile)
# Get flash image
Fh = open(IfwiImgIn, 'rb')
BiosBins = bytearray(Fh.read())
Fh.close()
CfgLen = len(CfgBins)
# Check to see if the IFWI is
if Args.pdr:
# CFGDATA in PDR region
RegionName = 'pdr'
else:
# Assume CFGDATA in BIOS region
RegionName = 'bios'
CompList = []
StartOff = 0
EndOff = 0
if IfwiParser.is_ifwi_image(BiosBins):
#Check if it has BPDT
SpiDesc = SPI_DESCRIPTOR.from_buffer(BiosBins, 0)
Comp = IfwiParser.find_ifwi_region(SpiDesc, RegionName)
if len(Comp) < 1:
raise Exception("Cannot not find CFGDATA in SPI flash region '%s' !" % RegionName)
if not CompList:
if RegionName == 'bios':
Ifwi = IfwiParser.parse_ifwi_binary (BiosBins)
cfgs = IfwiParser.find_components(Ifwi, 'CNFG')
if not cfgs:
cfgs = IfwiParser.find_components(Ifwi, 'CFGD')
for cfgd in cfgs:
print (IfwiParser.get_component_path (cfgd))
CompList.append((cfgd.offset, cfgd.length))
else:
# For PDR region, always assume CFGDATA starts from offset 0
CfgBlobHeader = CCfgData.CDATA_BLOB_HEADER.from_buffer(BiosBins[StartOff:])
if CfgBlobHeader.Signature != b'CFGD':
raise Exception("Cannot not find CFGDATA in SPI flash PDR region!")
if CfgBlobHeader.TotalLength > EndOff - StartOff:
raise Exception("Invalid CFGDATA length in PDR region ")
CompList = [(StartOff, EndOff - StartOff)]
for Offset, Size in CompList:
if Offset < 0 or Offset >= len(BiosBins):
raise Exception("Invalid CFGDATA region offset 0x%X!" % Offset)
if CfgLen > Size:
raise Exception("CfgData file size 0x%X shall not be greater than CFGDATA region size 0x%X !" % (CfgLen, Size))
print("Patching CFGDATA region at image offset 0x%X (len: 0x%X)!" % (Offset, Size))
BiosBins[Offset:Offset + CfgLen] = CfgBins
IfwiImgOut = Args.ifwi_out_file
if not IfwiImgOut:
IfwiImgOut = IfwiImgIn
Fh = open(IfwiImgOut, 'wb')
Fh.write(BiosBins)
Fh.close()
if len(CompList):
print("%d CFGDATA region has been patched successfully !" % len(
CompList))
else:
print("No CFGDATA region has been patched!")
return
def Main():
#
# Parse the options and args
#
ArgParser = argparse.ArgumentParser()
SubParser = ArgParser.add_subparsers(help='command')
ViewParser = SubParser.add_parser('view', help='display config data')
ViewParser.add_argument('cfg_in_file',
type=str,
nargs='+',
help='Configuration input binary file')
ViewParser.add_argument('-v', dest='dbg_lvl', type=int, help='Display verbose info:: 0,1,2.Default=0', default = 0)
ViewParser.set_defaults(func=CmdView)
MergeParser = SubParser.add_parser('merge', help='merge config data')
MergeParser.add_argument('cfg_in_file',
type=str,
nargs='+',
help='Configuration input binary file(s) - Input files can be: xxx.rom generated from BCT, xxx.bin generated from SBL source. xxx.bin* - Star represents internal cfg data bin generated from source to be added to merged cfg_out_file')
MergeParser.add_argument('-o', dest='cfg_out_file', type=str, help='Specify Configuration output binary file name to be generated', required=True)
MergeParser.add_argument('-p', dest='platform_id', type=str, help='Force a given platform ID to be used', default = '')
MergeParser.add_argument('-v', dest='dbg_lvl', type=int, help='Display verbose info: 0,1,2.Default=0', default = 0)
MergeParser.set_defaults(func=CmdMerge)
SignParser = SubParser.add_parser('sign', help='sign external config data')
SignParser.add_argument('cfg_in_file',
type=str,
help='Configuration binary file')
SignParser.add_argument('-o', dest='cfg_out_file', type=str, help='Signed configuration output binary file name to be generated', required=True)
SignParser.add_argument('-k', dest='cfg_pri_key', type=str, help='Key Id or Private key file (PEM format) used to sign configuration data', required=True)
SignParser.add_argument('-a', dest='hash_alg', type=str, choices=['SHA2_256', 'SHA2_384', 'AUTO'], help='Hash Type for signing. For AUTO hash type will be choosen based on key length', default = 'AUTO')
SignParser.add_argument('-s', dest='sign_scheme', type=str, choices=['RSA_PKCS1', 'RSA_PSS'], help='Signing Scheme', default = 'RSA_PSS')
SignParser.add_argument('-svn', dest='svn', type=int, help='Security version number for Config Data', default = 0)
SignParser.set_defaults(func=CmdSign)
ExtractParser = SubParser.add_parser('extract', help='extract a single config data to a file')
ExtractParser.add_argument('cfg_in_file',
type=str,
nargs='+',
help='Configuration input binary file')
ExtractParser.add_argument('-t', dest='cfg_tag', type=str, help='Specify tag value to be extracted', required=True)
ExtractParser.add_argument('-o', dest='cfg_out_file', type=str, help='Specify Configuration output binary file name to be generated')
ExtractParser.set_defaults(func=CmdExtract)
ReplaceParser = SubParser.add_parser('replace', help='Replace config data blob within a IFWI')
ReplaceParser.add_argument('cfg_in_file',
type=str,
help='Configuration input binary file')
ReplaceParser.add_argument('-i', dest='ifwi_in_file', type=str, help='Specify IFWI input binary file', required=True)
ReplaceParser.add_argument('-o', dest='ifwi_out_file', type=str, help='Specify IFWI output binary file', default='')
ReplaceParser.add_argument('-p', dest='pdr', action='store_true', help='Replace CFGDATA in PDR region', default=False)
ReplaceParser.set_defaults(func=CmdReplace)
ExportParser = SubParser.add_parser('export', help='Export board external CFGDATA from BIOS or IFWI file')
ExportParser.add_argument('-i', dest='ifwi_file', type=str, help='Specify BIOS or IFWI input binary file', required=True)
ExportParser.add_argument('-b', dest='boot_part', choices=['0', '1'], help='Specify which boot partition to export CFGDATA from', default = '0')
ExportParser.add_argument('-o', dest='output_dir', type=str, help='Specify output directory', default='.')
ExportParser.add_argument('-t', dest='tool_dir', type=str, help='Specify compress tool directory', default='')
ExportParser.add_argument('-n', dest='board_name_list', type=str, help='Specify board name to id map list', default='')
ExportParser.set_defaults(func=CmdExport)
Args = ArgParser.parse_args()
return Args.func(Args)
if __name__ == '__main__':
sys.exit(Main())
| 38,250 | 2,010 | 207 |
4c3038da2879a528c4bde532c6d49541704ed5cb | 1,182 | py | Python | src/eve_utils/skel/win-svc/win_service.py | pointw-dev/eve-utils | 2af7d28232e0d22975cc8141fda27a3870694223 | [
"MIT"
] | null | null | null | src/eve_utils/skel/win-svc/win_service.py | pointw-dev/eve-utils | 2af7d28232e0d22975cc8141fda27a3870694223 | [
"MIT"
] | null | null | null | src/eve_utils/skel/win-svc/win_service.py | pointw-dev/eve-utils | 2af7d28232e0d22975cc8141fda27a3870694223 | [
"MIT"
] | null | null | null | import win32serviceutil
import win32service
import win32event
import servicemanager
from eve_service import EveService
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(EveWindowsService)
| 31.945946 | 107 | 0.709814 | import win32serviceutil
import win32service
import win32event
import servicemanager
from eve_service import EveService
class EveWindowsService(win32serviceutil.ServiceFramework):
_svc_name_ = "{$project_name}"
_svc_display_name_ = "{$project_name} API"
# _svc_description_ = "Service description goes here." # TODO: replace with config or some such
def __init__(self, args):
self._eve = EveService()
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcDoRun(self):
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE, servicemanager.PYS_SERVICE_STARTED,
(self._svc_name_, ''))
self._eve.start()
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE, servicemanager.PYS_SERVICE_STOPPED,
(self._svc_name_, ''))
def SvcStop(self):
self._eve.stop()
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(EveWindowsService)
| 649 | 302 | 23 |
711ca7392e0cd8dddd20d6ed9ebbcc510f934cf0 | 212 | py | Python | dyrapy/tests/test_load_ouvidoria.py | rafasgj/dyrapy | 14e1af5ee05c98e2e2aaf0e5c58525161a19e81c | [
"BSD-3-Clause"
] | null | null | null | dyrapy/tests/test_load_ouvidoria.py | rafasgj/dyrapy | 14e1af5ee05c98e2e2aaf0e5c58525161a19e81c | [
"BSD-3-Clause"
] | 15 | 2020-12-10T17:56:13.000Z | 2020-12-24T20:45:56.000Z | dyrapy/tests/test_load_ouvidoria.py | rafasgj/dyrapy | 14e1af5ee05c98e2e2aaf0e5c58525161a19e81c | [
"BSD-3-Clause"
] | 1 | 2021-01-10T23:46:22.000Z | 2021-01-10T23:46:22.000Z | # Copyright (c) 2020 Marco Mangan <marco.mangan@gmail.com>
# License: BSD 3 clause
from dyrapy.datasets import load_ouvidoria
| 21.2 | 58 | 0.745283 | # Copyright (c) 2020 Marco Mangan <marco.mangan@gmail.com>
# License: BSD 3 clause
from dyrapy.datasets import load_ouvidoria
def test_load_ouvidoria():
data = load_ouvidoria()
assert data is not None
| 61 | 0 | 23 |
c2f1ebc02477d71494c8090daa7cd99f87a2a44c | 3,698 | py | Python | tests/test_core.py | beasteers/tflit | 4fec6255c9fc6d406ec4df3df70dfc282eca0c46 | [
"MIT"
] | 7 | 2020-11-22T08:08:56.000Z | 2022-03-18T13:00:43.000Z | tests/test_core.py | beasteers/tflit | 4fec6255c9fc6d406ec4df3df70dfc282eca0c46 | [
"MIT"
] | 2 | 2021-07-06T07:56:30.000Z | 2021-10-05T21:11:21.000Z | tests/test_core.py | beasteers/tflit | 4fec6255c9fc6d406ec4df3df70dfc282eca0c46 | [
"MIT"
] | 2 | 2020-12-06T19:33:47.000Z | 2021-03-11T11:34:56.000Z | import os
import glob
import json
import tflit
import pytest
import numpy as np
model_dir = os.path.join(os.path.dirname(__file__), 'models')
model_file = os.path.join(model_dir, '{}.tflite')
model_info_file = os.path.join(model_dir, '{}.json')
@pytest.mark.parametrize('name', [
os.path.splitext(os.path.basename(f))[0]
for f in glob.glob(model_file.format('*'))
])
# Utilities
| 31.606838 | 104 | 0.688751 | import os
import glob
import json
import tflit
import pytest
import numpy as np
model_dir = os.path.join(os.path.dirname(__file__), 'models')
model_file = os.path.join(model_dir, '{}.tflite')
model_info_file = os.path.join(model_dir, '{}.json')
@pytest.mark.parametrize('name', [
os.path.splitext(os.path.basename(f))[0]
for f in glob.glob(model_file.format('*'))
])
def test_model(name):
with open(model_info_file.format(name), 'r') as f:
info = json.load(f)
model = tflit.Model(model_file.format(name))
model.summary()
# check model shapes, replace batch dimension with 1
assert replace_none(model.input_shape) == replace_none(info['input_shape'])
assert replace_none(model.output_shape) == replace_none(info['output_shape'])
# check names
assert model.input_names == info['input_names']
# assert model.output_names == info['output_names']
assert model.dtype.__name__ == info['dtype']
# load arrays from json info
to_array = lambda x: np.asarray(x, dtype=model.dtype)
X_test = apply_maybe_list(to_array, info['X_test'], model.multi_input)
y_pred = apply_maybe_list(to_array, info['y_pred'], model.multi_output)
# predict on test data
y_pred_tfl = model.predict_batch(X_test)
# check outputs
assert [y.shape for y in y_pred_tfl] == [y.shape for y in y_pred]
assert np.allclose(
np.asarray(list(flatten(y_pred_tfl))),
np.asarray(list(flatten(y_pred))),
rtol=1e-4, atol=1e-5)
# apply batching
BATCH_SIZE = 32
N_BATCH = 1
faux_batch = lambda x, size=None: np.concatenate([x]*(size or N_BATCH*BATCH_SIZE))
X_test_batch = apply_maybe_list(faux_batch, X_test, model.multi_input)
y_pred_batch = apply_maybe_list(faux_batch, y_pred, model.multi_output)
y_pred_batch_tfl = model.predict(X_test_batch)
assert len(list(model.predict_each_batch(X_test_batch))) == 1.*BATCH_SIZE*N_BATCH
# check outputs
assert [y.shape for y in y_pred_batch_tfl] == [y.shape for y in y_pred_batch]
assert np.allclose(
np.asarray(list(flatten(y_pred_batch_tfl))),
np.asarray(list(flatten(y_pred_batch))),
rtol=1e-4, atol=1e-5)
# test batch size
model.set_batch_size(32)
assert [s[0] == 32 for s in model.input_shapes]
model.summary()
# apply batching
X_test_batch = apply_maybe_list(faux_batch, X_test, model.multi_input)
y_pred_batch = apply_maybe_list(faux_batch, y_pred, model.multi_output)
y_pred_batch_tfl = model.predict(X_test_batch)
assert model.batch_size == 32
assert len(list(model.predict_each_batch(X_test_batch))) == 1.*BATCH_SIZE*N_BATCH / model.batch_size
# check outputs
assert [y.shape for y in y_pred_batch_tfl] == [y.shape for y in y_pred_batch]
assert np.allclose(
np.asarray(list(flatten(y_pred_batch_tfl))),
np.asarray(list(flatten(y_pred_batch))),
rtol=1e-4, atol=1e-5)
model = tflit.Model(model_file.format(name), num_threads=4)
y_pred_batch_tfl = model.predict(X_test_batch)
assert [y.shape for y in y_pred_batch_tfl] == [y.shape for y in y_pred_batch]
assert np.allclose(
np.asarray(list(flatten(y_pred_batch_tfl))),
np.asarray(list(flatten(y_pred_batch))),
rtol=1e-4, atol=1e-5)
# Utilities
def replace_none(xs, value=1):
return [
replace_none(x, value) for x in xs
] if isinstance(xs, (list, tuple)) else (value if xs is None else xs)
def flatten(xs):
try:
yield from (xi for x in xs for xi in flatten(x))
except TypeError:
yield xs
def apply_maybe_list(func, lst, bool):
return [func(x) for x in lst] if bool else func(lst)
| 3,208 | 0 | 91 |
7a7b2acdee66b15f486f0fa8d28ad92cce760ee9 | 518 | py | Python | game.py | chirag1992m/AdversarialShortestPathGame | 0af125eb17aa3c4587a314486e9a6854c771e119 | [
"MIT"
] | 2 | 2017-10-01T11:05:12.000Z | 2017-10-05T22:36:29.000Z | game.py | chirag1992m/AdversarialShortestPathGame | 0af125eb17aa3c4587a314486e9a6854c771e119 | [
"MIT"
] | null | null | null | game.py | chirag1992m/AdversarialShortestPathGame | 0af125eb17aa3c4587a314486e9a6854c771e119 | [
"MIT"
] | null | null | null | import argparse
from core.game_looper import GameLooper
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the Adversarial Game Server.")
parser.add_argument('--port', '--p', type=int, default=8080, help='Port to run the server on')
parser.add_argument('--game-file', default='sample/advshort.txt',
help='The game layout file to be loaded.')
args = parser.parse_args()
game = GameLooper('', args.port, args.game_file)
game.run_game_loop()
| 32.375 | 98 | 0.677606 | import argparse
from core.game_looper import GameLooper
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the Adversarial Game Server.")
parser.add_argument('--port', '--p', type=int, default=8080, help='Port to run the server on')
parser.add_argument('--game-file', default='sample/advshort.txt',
help='The game layout file to be loaded.')
args = parser.parse_args()
game = GameLooper('', args.port, args.game_file)
game.run_game_loop()
| 0 | 0 | 0 |
19626e7eeaefb2748ef981d8cb658a323ff5ccdd | 11,615 | py | Python | lib_auth.py | kwinkunks/modelr_app | 3ada397e562b5c7e43048b3fa4b632bad9ab1dda | [
"Apache-2.0"
] | null | null | null | lib_auth.py | kwinkunks/modelr_app | 3ada397e562b5c7e43048b3fa4b632bad9ab1dda | [
"Apache-2.0"
] | null | null | null | lib_auth.py | kwinkunks/modelr_app | 3ada397e562b5c7e43048b3fa4b632bad9ab1dda | [
"Apache-2.0"
] | 1 | 2020-06-29T05:06:10.000Z | 2020-06-29T05:06:10.000Z | """
Functions related user logins, signups, password authentications,
logouts, etc ...
"""
from lib_db import User, UserID, Group, VerifyUser
from google.appengine.api import mail
import hashlib
import random
import re
import string
import stripe
PASS_RE = re.compile(r"^.{3,20}$")
EMAIL_RE = re.compile(r"^[\S]+@[\S]+\.[\S]+$")
# Define an exception for authentication errors
def get_cookie_string(email):
"""
Creates a cookie string to use for authenticating users.
user_id|encrypted_password
"""
user = User.all().filter("email =", email).fetch(1)[0]
name = 'user'
value = str(user.user_id) + '|' + str(user.password)
return '%s=%s; Path=/' % (name, value)
def make_salt():
"""
Create a random string to salt up the encryption
"""
return ''.join(
[random.choice(string.letters + string.digits)
for i in range(10)])
def encrypt_password(password, salt):
"""
Encrypts a password with sha256, but should be upgraded to bcrypt
once google has that python library in app engine.
"""
return hashlib.sha256(password + salt).hexdigest()
def make_userid():
"""
Generates the next user id number from the database.
"""
uid = UserID.all().fetch(1)
if not len(uid):
uid = UserID(next_id=1)
else:
uid = uid[0]
# update ids
current_id = uid.next_id
next_id = current_id + 1
uid.next_id = next_id
uid.put()
return current_id
def signup(email, password, parent=None):
"""
Checks for valid inputs then adds a user to the User database.
"""
exists = User.all().ancestor(parent).filter("email =", email)
if (exists.fetch(1)):
raise AuthExcept("Account Exists")
if not EMAIL_RE.match(email):
raise AuthExcept("Invalid Email")
if not PASS_RE.match(password):
raise AuthExcept("Invalid Password")
salt = make_salt()
encrypted_password = encrypt_password(password, salt)
temp_id = hashlib.sha256(make_salt()).hexdigest()
# Set up groups. See if the email domain exists
groups = ['public']
domain = email.split('@')[1]
g = Group.all().ancestor(parent).filter("name =", domain).fetch(1)
if g:
groups.append(domain)
user = VerifyUser(email=email, password=encrypted_password,
salt=salt, temp_id=temp_id,
group=groups, parent=parent)
user.put()
print("http://modelr.io/verify_email?user_id=%s" %
str(user.temp_id))
mail.send_mail(sender="Hello <hello@modelr.io>",
to="<%s>" % user.email,
subject="Modelr email verification",
body="""
Welcome to Modelr!
We need to verify your email address. Click the link below to validate your account and continue to billing.
http://modelr.io/verify_email?user_id=%s
Cheers,
Matt, Evan, and Ben
""" % str(user.temp_id))
return temp_id
def verify_signup(user_id, parent):
"""
Checks that a user id is in the queue to be added. The temporary
user id is sent through email verification. Raises a AuthExcept if
the id is invalid, otherwise returns the temporary user object
from the database.
:param user_id: User id from email verification
:param parent: Ancestor database of the temporary user
:returns the temporary user object.
"""
u = VerifyUser.all().ancestor(parent).filter("temp_id =", user_id)
verified_user = u.fetch(1)
# Check for success
if not verified_user:
raise AuthExcept("Verification Failed")
return verified_user[0]
def initialize_user(email, stripe_id, parent, tax_code, price, tax):
"""
Takes a verified user email from the authentication queue and adds
it to the permanent database with a stripe id.
:param verified_email: email of the verified user to add.
:param stripe_id: The stripe customer id of the user.
:param parent: The ancestor database key to use for the database.
:param tax_code: The tax code for the user
(province abbrieviation)
"""
verified_filter = VerifyUser.all()\
.ancestor(parent)\
.filter("email =", email)
verified_user = verified_filter.fetch(1)
if not verified_user:
raise AuthExcept("verification failed")
verified_user = verified_user[0]
# Make new user and populate
user = User(parent=parent)
user.user_id = make_userid()
user.email = verified_user.email
user.password = verified_user.password
user.salt = verified_user.salt
user.group = verified_user.group
user.stripe_id = stripe_id
user.tax_code = tax_code
for group in user.group:
g = Group.all().ancestor(parent).filter("name =",
group).fetch(1)
g[0].allowed_users.append(user.user_id)
g[0].put()
user.put()
# remove the temporary user from the queue
verified_user.delete()
# send a payment confirmation email
mail.send_mail(sender="Hello <hello@modelr.io>",
to="<%s>" % user.email,
subject="Modelr subscription confirmation",
body="""
Welcome to Modelr!
You are now subscribed to Modelr! Your receipt is below.
To unsubscribe, please reply to this email or log in to Modelr and check your user settings.
Cheers,
Matt, Evan, and Ben
=======================
modelr.io
=======================
Monthly fee USD{0:.2f}
Sales tax USD{1:.2f}
Total USD{2:.2f}
========================
Modelr is a product of
Agile Geoscience Ltd
Nova Scotia - Canada
Canada Revenue Agency
reg # 840217913RT0001
========================
""".format(price/100., tax/100., (price+tax)/100.))
def signin(email, password, parent):
"""
Checks if a email and password are valid. Will throw a AuthExcept
if they are not.
"""
user = User.all().ancestor(parent).filter("email =",
email).fetch(1)
if not user:
raise AuthExcept('invalid email')
user = user[0]
encrypted_password = encrypt_password(password, user.salt)
if not encrypted_password == user.password:
raise AuthExcept('invalid password')
def verify(userid, password, ancestor):
"""
Verifies that the userid and encrypted password from a cookie
match the database
"""
try:
user = User.all().ancestor(ancestor)\
.filter("user_id =",
int(userid)).fetch(1)[0]
verified = (user.password == password)
return user
except IndexError:
verified = False
def authenticate(func):
"""
Wrapper function for methods that require a logged in
user
"""
return authenticate_and_call
def send_message(subject, message):
"""
Sends us a message from a user or non-user.
"""
# send the message
mail.send_mail(sender="Hello <hello@modelr.io>",
to="hello@modelr.io",
subject=subject,
body=message)
def forgot_password(email, parent):
"""
Sets a new password after the user forgot it.
"""
user = User.all().ancestor(parent).filter("email =",
email).fetch(1)
if not user:
raise AuthExcept('invalid email')
user = user[0]
new = generate_password()
# send a new password email
mail.send_mail(sender="Hello <hello@modelr.io>",
to="<%s>" % user.email,
subject="Modelr password reset",
body="""
Here's your new password!
%s
Please sign in with this new password, and then change it in your
profile page.
http://modelr.io/signin?redirect=settings
Cheers,
Matt, Evan, and Ben
""" % new
)
# Change it in the database
user.password = encrypt_password(new, user.salt)
user.put()
def reset_password(user, current_pword, new_password,
verify):
"""
Resets the password at the user's request.
:param user: The user database object requesting the password
change.
:param current_pword: The user's current password to verify.
:param new_password: The user's new password.
:param verify: The new password verification.
"""
# This check should be done in the javascript on the page
if new_password != verify:
raise AuthExcept("New password verification failed")
# Check if the original password matches the database
if encrypt_password(current_pword, user.salt) != user.password:
raise AuthExcept("Incorrect password")
# Update the password in the database
user.password = encrypt_password(new_password, user.salt)
# Save it in the database
user.put()
def cancel_subscription(user):
"""
Delete the user. See notes in DeleteHandler() in main.py
"""
try:
stripe_customer = stripe.Customer.retrieve(user.stripe_id)
# Check for extra invoices, ie Taxes, that also need
# to be cancelled.
invoice_items = stripe.InvoiceItem.all(customer=stripe_customer)
for invoice in invoice_items.data:
invoice_id = invoice["id"]
# get the invoice and delete it if we can
invoice_obj = stripe.InvoiceItem.retrieve(invoice_id)
try:
invoice_obj.delete()
except:
msg = """
invoice # {0} not deleted from stripe id {1}
""".format(invoice_id, user.stripe_id)
send_message("invoice not deleted",
msg)
sub_id = stripe_customer.subscriptions["data"][0]["id"]
stripe_customer.subscriptions\
.retrieve(sub_id).delete(at_period_end=True)
user.unsubscribed = True
user.put()
# TODO MailChimp
except Exception as e:
print e
raise AuthExcept("Failed to unsubscribe user: " + user.email)
mail.send_mail(sender="Hello <hello@modelr.io>",
to="<%s>" % user.email,
subject="Modelr account deleted",
body="""
You have unsubscribed from Modelr. Your account will be deleted
at the end of the billing cycle.
Thank you for using Modelr. We hope to meet again some day.
Cheers,
Matt, Evan, and Ben
""")
| 27.920673 | 109 | 0.606974 | """
Functions related user logins, signups, password authentications,
logouts, etc ...
"""
from lib_db import User, UserID, Group, VerifyUser
from google.appengine.api import mail
import hashlib
import random
import re
import string
import stripe
PASS_RE = re.compile(r"^.{3,20}$")
EMAIL_RE = re.compile(r"^[\S]+@[\S]+\.[\S]+$")
# Define an exception for authentication errors
class AuthExcept(Exception):
msg = ''
def __init__(self, msg):
self.msg = msg
def get_cookie_string(email):
"""
Creates a cookie string to use for authenticating users.
user_id|encrypted_password
"""
user = User.all().filter("email =", email).fetch(1)[0]
name = 'user'
value = str(user.user_id) + '|' + str(user.password)
return '%s=%s; Path=/' % (name, value)
def make_salt():
"""
Create a random string to salt up the encryption
"""
return ''.join(
[random.choice(string.letters + string.digits)
for i in range(10)])
def encrypt_password(password, salt):
"""
Encrypts a password with sha256, but should be upgraded to bcrypt
once google has that python library in app engine.
"""
return hashlib.sha256(password + salt).hexdigest()
def make_userid():
"""
Generates the next user id number from the database.
"""
uid = UserID.all().fetch(1)
if not len(uid):
uid = UserID(next_id=1)
else:
uid = uid[0]
# update ids
current_id = uid.next_id
next_id = current_id + 1
uid.next_id = next_id
uid.put()
return current_id
def make_user(email, password, parent=None, user_id=None):
if User.all().ancestor(parent).filter("email =", email).fetch(1):
raise AuthExcept("email exists")
if user_id is None:
user_id = make_userid()
salt = make_salt()
encrypted_password = encrypt_password(password, salt)
admin_user = User(user_id=user_id,
parent=parent,
email=email,
password=encrypted_password,
salt=salt)
admin_user.put()
return admin_user
def signup(email, password, parent=None):
"""
Checks for valid inputs then adds a user to the User database.
"""
exists = User.all().ancestor(parent).filter("email =", email)
if (exists.fetch(1)):
raise AuthExcept("Account Exists")
if not EMAIL_RE.match(email):
raise AuthExcept("Invalid Email")
if not PASS_RE.match(password):
raise AuthExcept("Invalid Password")
salt = make_salt()
encrypted_password = encrypt_password(password, salt)
temp_id = hashlib.sha256(make_salt()).hexdigest()
# Set up groups. See if the email domain exists
groups = ['public']
domain = email.split('@')[1]
g = Group.all().ancestor(parent).filter("name =", domain).fetch(1)
if g:
groups.append(domain)
user = VerifyUser(email=email, password=encrypted_password,
salt=salt, temp_id=temp_id,
group=groups, parent=parent)
user.put()
print("http://modelr.io/verify_email?user_id=%s" %
str(user.temp_id))
mail.send_mail(sender="Hello <hello@modelr.io>",
to="<%s>" % user.email,
subject="Modelr email verification",
body="""
Welcome to Modelr!
We need to verify your email address. Click the link below to validate your account and continue to billing.
http://modelr.io/verify_email?user_id=%s
Cheers,
Matt, Evan, and Ben
""" % str(user.temp_id))
return temp_id
def verify_signup(user_id, parent):
"""
Checks that a user id is in the queue to be added. The temporary
user id is sent through email verification. Raises a AuthExcept if
the id is invalid, otherwise returns the temporary user object
from the database.
:param user_id: User id from email verification
:param parent: Ancestor database of the temporary user
:returns the temporary user object.
"""
u = VerifyUser.all().ancestor(parent).filter("temp_id =", user_id)
verified_user = u.fetch(1)
# Check for success
if not verified_user:
raise AuthExcept("Verification Failed")
return verified_user[0]
def initialize_user(email, stripe_id, parent, tax_code, price, tax):
"""
Takes a verified user email from the authentication queue and adds
it to the permanent database with a stripe id.
:param verified_email: email of the verified user to add.
:param stripe_id: The stripe customer id of the user.
:param parent: The ancestor database key to use for the database.
:param tax_code: The tax code for the user
(province abbrieviation)
"""
verified_filter = VerifyUser.all()\
.ancestor(parent)\
.filter("email =", email)
verified_user = verified_filter.fetch(1)
if not verified_user:
raise AuthExcept("verification failed")
verified_user = verified_user[0]
# Make new user and populate
user = User(parent=parent)
user.user_id = make_userid()
user.email = verified_user.email
user.password = verified_user.password
user.salt = verified_user.salt
user.group = verified_user.group
user.stripe_id = stripe_id
user.tax_code = tax_code
for group in user.group:
g = Group.all().ancestor(parent).filter("name =",
group).fetch(1)
g[0].allowed_users.append(user.user_id)
g[0].put()
user.put()
# remove the temporary user from the queue
verified_user.delete()
# send a payment confirmation email
mail.send_mail(sender="Hello <hello@modelr.io>",
to="<%s>" % user.email,
subject="Modelr subscription confirmation",
body="""
Welcome to Modelr!
You are now subscribed to Modelr! Your receipt is below.
To unsubscribe, please reply to this email or log in to Modelr and check your user settings.
Cheers,
Matt, Evan, and Ben
=======================
modelr.io
=======================
Monthly fee USD{0:.2f}
Sales tax USD{1:.2f}
Total USD{2:.2f}
========================
Modelr is a product of
Agile Geoscience Ltd
Nova Scotia - Canada
Canada Revenue Agency
reg # 840217913RT0001
========================
""".format(price/100., tax/100., (price+tax)/100.))
def signin(email, password, parent):
"""
Checks if a email and password are valid. Will throw a AuthExcept
if they are not.
"""
user = User.all().ancestor(parent).filter("email =",
email).fetch(1)
if not user:
raise AuthExcept('invalid email')
user = user[0]
encrypted_password = encrypt_password(password, user.salt)
if not encrypted_password == user.password:
raise AuthExcept('invalid password')
def verify(userid, password, ancestor):
"""
Verifies that the userid and encrypted password from a cookie
match the database
"""
try:
user = User.all().ancestor(ancestor)\
.filter("user_id =",
int(userid)).fetch(1)[0]
verified = (user.password == password)
return user
except IndexError:
verified = False
def authenticate(func):
"""
Wrapper function for methods that require a logged in
user
"""
def authenticate_and_call(self, *args, **kwargs):
user = self.verify()
if user is None:
self.redirect('/signup')
return
else:
return func(self, user, *args, **kwargs)
return authenticate_and_call
def send_message(subject, message):
"""
Sends us a message from a user or non-user.
"""
# send the message
mail.send_mail(sender="Hello <hello@modelr.io>",
to="hello@modelr.io",
subject=subject,
body=message)
def forgot_password(email, parent):
"""
Sets a new password after the user forgot it.
"""
user = User.all().ancestor(parent).filter("email =",
email).fetch(1)
if not user:
raise AuthExcept('invalid email')
user = user[0]
def generate_password(size=8,
chars=(string.ascii_uppercase +
string.digits)):
return ''.join(random.choice(chars) for x in range(size))
new = generate_password()
# send a new password email
mail.send_mail(sender="Hello <hello@modelr.io>",
to="<%s>" % user.email,
subject="Modelr password reset",
body="""
Here's your new password!
%s
Please sign in with this new password, and then change it in your
profile page.
http://modelr.io/signin?redirect=settings
Cheers,
Matt, Evan, and Ben
""" % new
)
# Change it in the database
user.password = encrypt_password(new, user.salt)
user.put()
def reset_password(user, current_pword, new_password,
verify):
"""
Resets the password at the user's request.
:param user: The user database object requesting the password
change.
:param current_pword: The user's current password to verify.
:param new_password: The user's new password.
:param verify: The new password verification.
"""
# This check should be done in the javascript on the page
if new_password != verify:
raise AuthExcept("New password verification failed")
# Check if the original password matches the database
if encrypt_password(current_pword, user.salt) != user.password:
raise AuthExcept("Incorrect password")
# Update the password in the database
user.password = encrypt_password(new_password, user.salt)
# Save it in the database
user.put()
def cancel_subscription(user):
"""
Delete the user. See notes in DeleteHandler() in main.py
"""
try:
stripe_customer = stripe.Customer.retrieve(user.stripe_id)
# Check for extra invoices, ie Taxes, that also need
# to be cancelled.
invoice_items = stripe.InvoiceItem.all(customer=stripe_customer)
for invoice in invoice_items.data:
invoice_id = invoice["id"]
# get the invoice and delete it if we can
invoice_obj = stripe.InvoiceItem.retrieve(invoice_id)
try:
invoice_obj.delete()
except:
msg = """
invoice # {0} not deleted from stripe id {1}
""".format(invoice_id, user.stripe_id)
send_message("invoice not deleted",
msg)
sub_id = stripe_customer.subscriptions["data"][0]["id"]
stripe_customer.subscriptions\
.retrieve(sub_id).delete(at_period_end=True)
user.unsubscribed = True
user.put()
# TODO MailChimp
except Exception as e:
print e
raise AuthExcept("Failed to unsubscribe user: " + user.email)
mail.send_mail(sender="Hello <hello@modelr.io>",
to="<%s>" % user.email,
subject="Modelr account deleted",
body="""
You have unsubscribed from Modelr. Your account will be deleted
at the end of the billing cycle.
Thank you for using Modelr. We hope to meet again some day.
Cheers,
Matt, Evan, and Ben
""")
| 938 | 47 | 98 |
c50c7e581651abdcce05c979c772b57ce8ec489d | 689 | py | Python | MODBUS_RTU_3.py | pratikgharte/Python_GUI_EC_PH | 5dd6a166ffc59a55a45e711481261644879a3d8a | [
"MIT"
] | null | null | null | MODBUS_RTU_3.py | pratikgharte/Python_GUI_EC_PH | 5dd6a166ffc59a55a45e711481261644879a3d8a | [
"MIT"
] | null | null | null | MODBUS_RTU_3.py | pratikgharte/Python_GUI_EC_PH | 5dd6a166ffc59a55a45e711481261644879a3d8a | [
"MIT"
] | null | null | null | from pymodbus.client.sync import ModbusSerialClient
client = ModbusSerialClient(
method='rtu',
port='/dev/ttyS0',
baudrate=9600,
timeout=3,
parity='N',
stopbits=1,
bytesize=8
)
if client.connect(): # Trying for connect to Modbus Server/Slave
'''Reading from a holding register with the below content.'''
res = client.read_holding_registers(address=1, count=1, unit=1)
'''Reading from a discrete register with the below content.'''
# res = client.read_discrete_inputs(address=1, count=1, unit=1)
if not res.isError():
print(res.registers)
else:
print(res)
else:
print('Cannot connect to the Modbus Server/Slave') | 26.5 | 67 | 0.674891 | from pymodbus.client.sync import ModbusSerialClient
client = ModbusSerialClient(
method='rtu',
port='/dev/ttyS0',
baudrate=9600,
timeout=3,
parity='N',
stopbits=1,
bytesize=8
)
if client.connect(): # Trying for connect to Modbus Server/Slave
'''Reading from a holding register with the below content.'''
res = client.read_holding_registers(address=1, count=1, unit=1)
'''Reading from a discrete register with the below content.'''
# res = client.read_discrete_inputs(address=1, count=1, unit=1)
if not res.isError():
print(res.registers)
else:
print(res)
else:
print('Cannot connect to the Modbus Server/Slave') | 0 | 0 | 0 |
1b04120573fe644b3ee867b0c0f0d3fb5ecc57f5 | 74,685 | py | Python | subject/tests/integration/legacy_functional/test_v1_api.py | laoyigrace/subject | e6ed989fdc250917a19788112b22322b73b3550f | [
"Apache-2.0"
] | null | null | null | subject/tests/integration/legacy_functional/test_v1_api.py | laoyigrace/subject | e6ed989fdc250917a19788112b22322b73b3550f | [
"Apache-2.0"
] | null | null | null | subject/tests/integration/legacy_functional/test_v1_api.py | laoyigrace/subject | e6ed989fdc250917a19788112b22322b73b3550f | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import hashlib
import os
import tempfile
from oslo_serialization import jsonutils
from oslo_utils import units
import testtools
from subject.common import timeutils
from subject.tests.integration.legacy_functional import base
from subject.tests.utils import minimal_headers
FIVE_KB = 5 * units.Ki
FIVE_GB = 5 * units.Gi
| 43.070934 | 83 | 0.581255 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import hashlib
import os
import tempfile
from oslo_serialization import jsonutils
from oslo_utils import units
import testtools
from subject.common import timeutils
from subject.tests.integration.legacy_functional import base
from subject.tests.utils import minimal_headers
FIVE_KB = 5 * units.Ki
FIVE_GB = 5 * units.Gi
class TestApi(base.ApiTest):
def test_get_head_simple_post(self):
# 0. GET /subjects
# Verify no public subjects
path = "/v1/subjects"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"subjects": []}', content)
# 1. GET /subjects/detail
# Verify no public subjects
path = "/v1/subjects/detail"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"subjects": []}', content)
# 2. POST /subjects with public subject named Subject1
# attribute and no custom properties. Verify a 200 OK is returned
subject_data = "*" * FIVE_KB
headers = minimal_headers('Subject1')
path = "/v1/subjects"
response, content = self.http.request(path, 'POST', headers=headers,
body=subject_data)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
subject_id = data['subject']['id']
self.assertEqual(hashlib.md5(subject_data).hexdigest(),
data['subject']['checksum'])
self.assertEqual(FIVE_KB, data['subject']['size'])
self.assertEqual("Subject1", data['subject']['name'])
self.assertTrue(data['subject']['is_public'])
# 3. HEAD subject
# Verify subject found now
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual("Subject1", response['x-subject-meta-name'])
# 4. GET subject
# Verify all information on subject we just added is correct
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_subject_headers = {
'x-subject-meta-id': subject_id,
'x-subject-meta-name': 'Subject1',
'x-subject-meta-is_public': 'True',
'x-subject-meta-status': 'active',
'x-subject-meta-disk_format': 'raw',
'x-subject-meta-container_format': 'ovf',
'x-subject-meta-size': str(FIVE_KB)}
expected_std_headers = {
'content-length': str(FIVE_KB),
'content-type': 'application/octet-stream'}
for expected_key, expected_value in expected_subject_headers.items():
self.assertEqual(expected_value, response[expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
response[expected_key]))
for expected_key, expected_value in expected_std_headers.items():
self.assertEqual(expected_value, response[expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
response[expected_key]))
self.assertEqual("*" * FIVE_KB, content)
self.assertEqual(hashlib.md5("*" * FIVE_KB).hexdigest(),
hashlib.md5(content).hexdigest())
# 5. GET /subjects
# Verify no public subjects
path = "/v1/subjects"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_result = {"subjects": [
{"container_format": "ovf",
"disk_format": "raw",
"id": subject_id,
"name": "Subject1",
"checksum": "c2e5db72bd7fd153f53ede5da5a06de3",
"size": 5120}]}
self.assertEqual(expected_result, jsonutils.loads(content))
# 6. GET /subjects/detail
# Verify subject and all its metadata
path = "/v1/subjects/detail"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_subject = {
"status": "active",
"name": "Subject1",
"deleted": False,
"container_format": "ovf",
"disk_format": "raw",
"id": subject_id,
"is_public": True,
"deleted_at": None,
"properties": {},
"size": 5120}
subject = jsonutils.loads(content)
for expected_key, expected_value in expected_subject.items():
self.assertEqual(expected_value, subject['subjects'][0][expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
subject['subjects'][0][expected_key]))
# 7. PUT subject with custom properties of "distro" and "arch"
# Verify 200 returned
headers = {'X-Subject-Meta-Property-Distro': 'Ubuntu',
'X-Subject-Meta-Property-Arch': 'x86_64'}
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'PUT', headers=headers)
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual("x86_64", data['subject']['properties']['arch'])
self.assertEqual("Ubuntu", data['subject']['properties']['distro'])
# 8. GET /subjects/detail
# Verify subject and all its metadata
path = "/v1/subjects/detail"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_subject = {
"status": "active",
"name": "Subject1",
"deleted": False,
"container_format": "ovf",
"disk_format": "raw",
"id": subject_id,
"is_public": True,
"deleted_at": None,
"properties": {'distro': 'Ubuntu', 'arch': 'x86_64'},
"size": 5120}
subject = jsonutils.loads(content)
for expected_key, expected_value in expected_subject.items():
self.assertEqual(expected_value, subject['subjects'][0][expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
subject['subjects'][0][expected_key]))
# 9. PUT subject and remove a previously existing property.
headers = {'X-Subject-Meta-Property-Arch': 'x86_64'}
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'PUT', headers=headers)
self.assertEqual(200, response.status)
path = "/v1/subjects/detail"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)['subjects'][0]
self.assertEqual(1, len(data['properties']))
self.assertEqual("x86_64", data['properties']['arch'])
# 10. PUT subject and add a previously deleted property.
headers = {'X-Subject-Meta-Property-Distro': 'Ubuntu',
'X-Subject-Meta-Property-Arch': 'x86_64'}
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'PUT', headers=headers)
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
path = "/v1/subjects/detail"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)['subjects'][0]
self.assertEqual(2, len(data['properties']))
self.assertEqual("x86_64", data['properties']['arch'])
self.assertEqual("Ubuntu", data['properties']['distro'])
self.assertNotEqual(data['created_at'], data['updated_at'])
# DELETE subject
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'DELETE')
self.assertEqual(200, response.status)
def test_queued_process_flow(self):
"""
We test the process flow where a user registers an subject
with Glance but does not immediately upload an subject file.
Later, the user uploads an subject file using a PUT operation.
We track the changing of subject status throughout this process.
0. GET /subjects
- Verify no public subjects
1. POST /subjects with public subject named Subject1 with no location
attribute and no subject data.
- Verify 201 returned
2. GET /subjects
- Verify one public subject
3. HEAD subject
- Verify subject now in queued status
4. PUT subject with subject data
- Verify 200 returned
5. HEAD subjects
- Verify subject now in active status
6. GET /subjects
- Verify one public subject
"""
# 0. GET /subjects
# Verify no public subjects
path = "/v1/subjects"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"subjects": []}', content)
# 1. POST /subjects with public subject named Subject1
# with no location or subject data
headers = minimal_headers('Subject1')
path = "/v1/subjects"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
self.assertIsNone(data['subject']['checksum'])
self.assertEqual(0, data['subject']['size'])
self.assertEqual('ovf', data['subject']['container_format'])
self.assertEqual('raw', data['subject']['disk_format'])
self.assertEqual("Subject1", data['subject']['name'])
self.assertTrue(data['subject']['is_public'])
subject_id = data['subject']['id']
# 2. GET /subjects
# Verify 1 public subject
path = "/v1/subjects"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(subject_id, data['subjects'][0]['id'])
self.assertIsNone(data['subjects'][0]['checksum'])
self.assertEqual(0, data['subjects'][0]['size'])
self.assertEqual('ovf', data['subjects'][0]['container_format'])
self.assertEqual('raw', data['subjects'][0]['disk_format'])
self.assertEqual("Subject1", data['subjects'][0]['name'])
# 3. HEAD /subjects
# Verify status is in queued
path = "/v1/subjects/%s" % (subject_id)
response, content = self.http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual("Subject1", response['x-subject-meta-name'])
self.assertEqual("queued", response['x-subject-meta-status'])
self.assertEqual('0', response['x-subject-meta-size'])
self.assertEqual(subject_id, response['x-subject-meta-id'])
# 4. PUT subject with subject data, verify 200 returned
subject_data = "*" * FIVE_KB
headers = {'Content-Type': 'application/octet-stream'}
path = "/v1/subjects/%s" % (subject_id)
response, content = self.http.request(path, 'PUT', headers=headers,
body=subject_data)
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(hashlib.md5(subject_data).hexdigest(),
data['subject']['checksum'])
self.assertEqual(FIVE_KB, data['subject']['size'])
self.assertEqual("Subject1", data['subject']['name'])
self.assertTrue(data['subject']['is_public'])
# 5. HEAD /subjects
# Verify status is in active
path = "/v1/subjects/%s" % (subject_id)
response, content = self.http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual("Subject1", response['x-subject-meta-name'])
self.assertEqual("active", response['x-subject-meta-status'])
# 6. GET /subjects
# Verify 1 public subject still...
path = "/v1/subjects"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(hashlib.md5(subject_data).hexdigest(),
data['subjects'][0]['checksum'])
self.assertEqual(subject_id, data['subjects'][0]['id'])
self.assertEqual(FIVE_KB, data['subjects'][0]['size'])
self.assertEqual('ovf', data['subjects'][0]['container_format'])
self.assertEqual('raw', data['subjects'][0]['disk_format'])
self.assertEqual("Subject1", data['subjects'][0]['name'])
# DELETE subject
path = "/v1/subjects/%s" % (subject_id)
response, content = self.http.request(path, 'DELETE')
self.assertEqual(200, response.status)
def test_v1_not_enabled(self):
self.config(enable_v1_api=False)
path = "/v1/subjects"
response, content = self.http.request(path, 'GET')
self.assertEqual(300, response.status)
def test_v1_enabled(self):
self.config(enable_v1_api=True)
path = "/v1/subjects"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
def test_zero_initial_size(self):
"""
A test to ensure that an subject with size explicitly set to zero
has status that immediately transitions to active.
"""
# 1. POST /subjects with public subject named Subject1
# attribute and a size of zero.
# Verify a 201 OK is returned
headers = {'Content-Type': 'application/octet-stream',
'X-Subject-Meta-Size': '0',
'X-Subject-Meta-Name': 'Subject1',
'X-Subject-Meta-disk_format': 'raw',
'X-subject-Meta-container_format': 'ovf',
'X-Subject-Meta-Is-Public': 'True'}
path = "/v1/subjects"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
subject = jsonutils.loads(content)['subject']
self.assertEqual('active', subject['status'])
# 2. HEAD subject-location
# Verify subject size is zero and the status is active
path = response.get('location')
response, content = self.http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual('0', response['x-subject-meta-size'])
self.assertEqual('active', response['x-subject-meta-status'])
# 3. GET subject-location
# Verify subject content is empty
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual(0, len(content))
def test_traceback_not_consumed(self):
"""
A test that errors coming from the POST API do not
get consumed and print the actual error message, and
not something like <traceback object at 0x1918d40>
:see https://bugs.launchpad.net/subject/+bug/755912
"""
# POST /subjects with binary data, but not setting
# Content-Type to application/octet-stream, verify a
# 400 returned and that the error is readable.
with tempfile.NamedTemporaryFile() as test_data_file:
test_data_file.write("XXX")
test_data_file.flush()
path = "/v1/subjects"
headers = minimal_headers('Subject1')
headers['Content-Type'] = 'not octet-stream'
response, content = self.http.request(path, 'POST',
body=test_data_file.name,
headers=headers)
self.assertEqual(400, response.status)
expected = "Content-Type must be application/octet-stream"
self.assertIn(expected, content,
"Could not find '%s' in '%s'" % (expected, content))
def test_filtered_subjects(self):
"""
Set up four test subjects and ensure each query param filter works
"""
# 0. GET /subjects
# Verify no public subjects
path = "/v1/subjects"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"subjects": []}', content)
subject_ids = []
# 1. POST /subjects with three public subjects, and one private subject
# with various attributes
headers = {'Content-Type': 'application/octet-stream',
'X-Subject-Meta-Name': 'Subject1',
'X-Subject-Meta-Status': 'active',
'X-Subject-Meta-Container-Format': 'ovf',
'X-Subject-Meta-Disk-Format': 'vdi',
'X-Subject-Meta-Size': '19',
'X-Subject-Meta-Is-Public': 'True',
'X-Subject-Meta-Protected': 'True',
'X-Subject-Meta-Property-pants': 'are on'}
path = "/v1/subjects"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
self.assertEqual("are on", data['subject']['properties']['pants'])
self.assertTrue(data['subject']['is_public'])
subject_ids.append(data['subject']['id'])
headers = {'Content-Type': 'application/octet-stream',
'X-Subject-Meta-Name': 'My Subject!',
'X-Subject-Meta-Status': 'active',
'X-Subject-Meta-Container-Format': 'ovf',
'X-Subject-Meta-Disk-Format': 'vhd',
'X-Subject-Meta-Size': '20',
'X-Subject-Meta-Is-Public': 'True',
'X-Subject-Meta-Protected': 'False',
'X-Subject-Meta-Property-pants': 'are on'}
path = "/v1/subjects"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
self.assertEqual("are on", data['subject']['properties']['pants'])
self.assertTrue(data['subject']['is_public'])
subject_ids.append(data['subject']['id'])
headers = {'Content-Type': 'application/octet-stream',
'X-Subject-Meta-Name': 'My Subject!',
'X-Subject-Meta-Status': 'saving',
'X-Subject-Meta-Container-Format': 'ami',
'X-Subject-Meta-Disk-Format': 'ami',
'X-Subject-Meta-Size': '21',
'X-Subject-Meta-Is-Public': 'True',
'X-Subject-Meta-Protected': 'False',
'X-Subject-Meta-Property-pants': 'are off'}
path = "/v1/subjects"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
self.assertEqual("are off", data['subject']['properties']['pants'])
self.assertTrue(data['subject']['is_public'])
subject_ids.append(data['subject']['id'])
headers = {'Content-Type': 'application/octet-stream',
'X-Subject-Meta-Name': 'My Private Subject',
'X-Subject-Meta-Status': 'active',
'X-Subject-Meta-Container-Format': 'ami',
'X-Subject-Meta-Disk-Format': 'ami',
'X-Subject-Meta-Size': '22',
'X-Subject-Meta-Is-Public': 'False',
'X-Subject-Meta-Protected': 'False'}
path = "/v1/subjects"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
self.assertFalse(data['subject']['is_public'])
subject_ids.append(data['subject']['id'])
# 2. GET /subjects
# Verify three public subjects
path = "/v1/subjects"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(3, len(data['subjects']))
# 3. GET /subjects with name filter
# Verify correct subjects returned with name
params = "name=My%20Subject!"
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(2, len(data['subjects']))
for subject in data['subjects']:
self.assertEqual("My Subject!", subject['name'])
# 4. GET /subjects with status filter
# Verify correct subjects returned with status
params = "status=queued"
path = "/v1/subjects/detail?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(3, len(data['subjects']))
for subject in data['subjects']:
self.assertEqual("queued", subject['status'])
params = "status=active"
path = "/v1/subjects/detail?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(0, len(data['subjects']))
# 5. GET /subjects with container_format filter
# Verify correct subjects returned with container_format
params = "container_format=ovf"
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(2, len(data['subjects']))
for subject in data['subjects']:
self.assertEqual("ovf", subject['container_format'])
# 6. GET /subjects with disk_format filter
# Verify correct subjects returned with disk_format
params = "disk_format=vdi"
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(1, len(data['subjects']))
for subject in data['subjects']:
self.assertEqual("vdi", subject['disk_format'])
# 7. GET /subjects with size_max filter
# Verify correct subjects returned with size <= expected
params = "size_max=20"
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(2, len(data['subjects']))
for subject in data['subjects']:
self.assertLessEqual(subject['size'], 20)
# 8. GET /subjects with size_min filter
# Verify correct subjects returned with size >= expected
params = "size_min=20"
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(2, len(data['subjects']))
for subject in data['subjects']:
self.assertGreaterEqual(subject['size'], 20)
# 9. Get /subjects with is_public=None filter
# Verify correct subjects returned with property
# Bug lp:803656 Support is_public in filtering
params = "is_public=None"
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(4, len(data['subjects']))
# 10. Get /subjects with is_public=False filter
# Verify correct subjects returned with property
# Bug lp:803656 Support is_public in filtering
params = "is_public=False"
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(1, len(data['subjects']))
for subject in data['subjects']:
self.assertEqual("My Private Subject", subject['name'])
# 11. Get /subjects with is_public=True filter
# Verify correct subjects returned with property
# Bug lp:803656 Support is_public in filtering
params = "is_public=True"
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(3, len(data['subjects']))
for subject in data['subjects']:
self.assertNotEqual(subject['name'], "My Private Subject")
# 12. Get /subjects with protected=False filter
# Verify correct subjects returned with property
params = "protected=False"
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(2, len(data['subjects']))
for subject in data['subjects']:
self.assertNotEqual(subject['name'], "Subject1")
# 13. Get /subjects with protected=True filter
# Verify correct subjects returned with property
params = "protected=True"
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(1, len(data['subjects']))
for subject in data['subjects']:
self.assertEqual("Subject1", subject['name'])
# 14. GET /subjects with property filter
# Verify correct subjects returned with property
params = "property-pants=are%20on"
path = "/v1/subjects/detail?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(2, len(data['subjects']))
for subject in data['subjects']:
self.assertEqual("are on", subject['properties']['pants'])
# 15. GET /subjects with property filter and name filter
# Verify correct subjects returned with property and name
# Make sure you quote the url when using more than one param!
params = "name=My%20Subject!&property-pants=are%20on"
path = "/v1/subjects/detail?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(1, len(data['subjects']))
for subject in data['subjects']:
self.assertEqual("are on", subject['properties']['pants'])
self.assertEqual("My Subject!", subject['name'])
# 16. GET /subjects with past changes-since filter
yesterday = timeutils.isotime(timeutils.utcnow() -
datetime.timedelta(1))
params = "changes-since=%s" % yesterday
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(3, len(data['subjects']))
# one timezone west of Greenwich equates to an hour ago
# taking care to pre-urlencode '+' as '%2B', otherwise the timezone
# '+' is wrongly decoded as a space
# TODO(eglynn): investigate '+' --> <SPACE> decoding, an artifact
# of WSGI/webob dispatch?
now = timeutils.utcnow()
hour_ago = now.strftime('%Y-%m-%dT%H:%M:%S%%2B01:00')
params = "changes-since=%s" % hour_ago
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(3, len(data['subjects']))
# 17. GET /subjects with future changes-since filter
tomorrow = timeutils.isotime(timeutils.utcnow() +
datetime.timedelta(1))
params = "changes-since=%s" % tomorrow
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(0, len(data['subjects']))
# one timezone east of Greenwich equates to an hour from now
now = timeutils.utcnow()
hour_hence = now.strftime('%Y-%m-%dT%H:%M:%S-01:00')
params = "changes-since=%s" % hour_hence
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(0, len(data['subjects']))
# 18. GET /subjects with size_min filter
# Verify correct subjects returned with size >= expected
params = "size_min=-1"
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(400, response.status)
self.assertIn("filter size_min got -1", content)
# 19. GET /subjects with size_min filter
# Verify correct subjects returned with size >= expected
params = "size_max=-1"
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(400, response.status)
self.assertIn("filter size_max got -1", content)
# 20. GET /subjects with size_min filter
# Verify correct subjects returned with size >= expected
params = "min_ram=-1"
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(400, response.status)
self.assertIn("Bad value passed to filter min_ram got -1", content)
# 21. GET /subjects with size_min filter
# Verify correct subjects returned with size >= expected
params = "protected=imalittleteapot"
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(400, response.status)
self.assertIn("protected got imalittleteapot", content)
# 22. GET /subjects with size_min filter
# Verify correct subjects returned with size >= expected
params = "is_public=imalittleteapot"
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(400, response.status)
self.assertIn("is_public got imalittleteapot", content)
def test_limited_subjects(self):
"""
Ensure marker and limit query params work
"""
# 0. GET /subjects
# Verify no public subjects
path = "/v1/subjects"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"subjects": []}', content)
subject_ids = []
# 1. POST /subjects with three public subjects with various attributes
headers = minimal_headers('Subject1')
path = "/v1/subjects"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
subject_ids.append(jsonutils.loads(content)['subject']['id'])
headers = minimal_headers('Subject2')
path = "/v1/subjects"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
subject_ids.append(jsonutils.loads(content)['subject']['id'])
headers = minimal_headers('Subject3')
path = "/v1/subjects"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
subject_ids.append(jsonutils.loads(content)['subject']['id'])
# 2. GET /subjects with all subjects
path = "/v1/subjects"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
subjects = jsonutils.loads(content)['subjects']
self.assertEqual(3, len(subjects))
# 3. GET /subjects with limit of 2
# Verify only two subjects were returned
params = "limit=2"
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)['subjects']
self.assertEqual(2, len(data))
self.assertEqual(subjects[0]['id'], data[0]['id'])
self.assertEqual(subjects[1]['id'], data[1]['id'])
# 4. GET /subjects with marker
# Verify only two subjects were returned
params = "marker=%s" % subjects[0]['id']
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)['subjects']
self.assertEqual(2, len(data))
self.assertEqual(subjects[1]['id'], data[0]['id'])
self.assertEqual(subjects[2]['id'], data[1]['id'])
# 5. GET /subjects with marker and limit
# Verify only one subject was returned with the correct id
params = "limit=1&marker=%s" % subjects[1]['id']
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)['subjects']
self.assertEqual(1, len(data))
self.assertEqual(subjects[2]['id'], data[0]['id'])
# 6. GET /subjects/detail with marker and limit
# Verify only one subject was returned with the correct id
params = "limit=1&marker=%s" % subjects[1]['id']
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)['subjects']
self.assertEqual(1, len(data))
self.assertEqual(subjects[2]['id'], data[0]['id'])
# DELETE subjects
for subject_id in subject_ids:
path = "/v1/subjects/%s" % (subject_id)
response, content = self.http.request(path, 'DELETE')
self.assertEqual(200, response.status)
def test_ordered_subjects(self):
"""
Set up three test subjects and ensure each query param filter works
"""
# 0. GET /subjects
# Verify no public subjects
path = "/v1/subjects"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"subjects": []}', content)
# 1. POST /subjects with three public subjects with various attributes
subject_ids = []
headers = {'Content-Type': 'application/octet-stream',
'X-Subject-Meta-Name': 'Subject1',
'X-Subject-Meta-Status': 'active',
'X-Subject-Meta-Container-Format': 'ovf',
'X-Subject-Meta-Disk-Format': 'vdi',
'X-Subject-Meta-Size': '19',
'X-Subject-Meta-Is-Public': 'True'}
path = "/v1/subjects"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
subject_ids.append(jsonutils.loads(content)['subject']['id'])
headers = {'Content-Type': 'application/octet-stream',
'X-Subject-Meta-Name': 'ASDF',
'X-Subject-Meta-Status': 'active',
'X-Subject-Meta-Container-Format': 'bare',
'X-Subject-Meta-Disk-Format': 'iso',
'X-Subject-Meta-Size': '2',
'X-Subject-Meta-Is-Public': 'True'}
path = "/v1/subjects"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
subject_ids.append(jsonutils.loads(content)['subject']['id'])
headers = {'Content-Type': 'application/octet-stream',
'X-Subject-Meta-Name': 'XYZ',
'X-Subject-Meta-Status': 'saving',
'X-Subject-Meta-Container-Format': 'ami',
'X-Subject-Meta-Disk-Format': 'ami',
'X-Subject-Meta-Size': '5',
'X-Subject-Meta-Is-Public': 'True'}
path = "/v1/subjects"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
subject_ids.append(jsonutils.loads(content)['subject']['id'])
# 2. GET /subjects with no query params
# Verify three public subjects sorted by created_at desc
path = "/v1/subjects"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(3, len(data['subjects']))
self.assertEqual(subject_ids[2], data['subjects'][0]['id'])
self.assertEqual(subject_ids[1], data['subjects'][1]['id'])
self.assertEqual(subject_ids[0], data['subjects'][2]['id'])
# 3. GET /subjects sorted by name asc
params = 'sort_key=name&sort_dir=asc'
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(3, len(data['subjects']))
self.assertEqual(subject_ids[1], data['subjects'][0]['id'])
self.assertEqual(subject_ids[0], data['subjects'][1]['id'])
self.assertEqual(subject_ids[2], data['subjects'][2]['id'])
# 4. GET /subjects sorted by size desc
params = 'sort_key=size&sort_dir=desc'
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(3, len(data['subjects']))
self.assertEqual(subject_ids[0], data['subjects'][0]['id'])
self.assertEqual(subject_ids[2], data['subjects'][1]['id'])
self.assertEqual(subject_ids[1], data['subjects'][2]['id'])
# 5. GET /subjects sorted by size desc with a marker
params = 'sort_key=size&sort_dir=desc&marker=%s' % subject_ids[0]
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(2, len(data['subjects']))
self.assertEqual(subject_ids[2], data['subjects'][0]['id'])
self.assertEqual(subject_ids[1], data['subjects'][1]['id'])
# 6. GET /subjects sorted by name asc with a marker
params = 'sort_key=name&sort_dir=asc&marker=%s' % subject_ids[2]
path = "/v1/subjects?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(0, len(data['subjects']))
# DELETE subjects
for subject_id in subject_ids:
path = "/v1/subjects/%s" % (subject_id)
response, content = self.http.request(path, 'DELETE')
self.assertEqual(200, response.status)
def test_duplicate_subject_upload(self):
"""
Upload initial subject, then attempt to upload duplicate subject
"""
# 0. GET /subjects
# Verify no public subjects
path = "/v1/subjects"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"subjects": []}', content)
# 1. POST /subjects with public subject named Subject1
headers = {'Content-Type': 'application/octet-stream',
'X-Subject-Meta-Name': 'Subject1',
'X-Subject-Meta-Status': 'active',
'X-Subject-Meta-Container-Format': 'ovf',
'X-Subject-Meta-Disk-Format': 'vdi',
'X-Subject-Meta-Size': '19',
'X-Subject-Meta-Is-Public': 'True'}
path = "/v1/subjects"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
subject = jsonutils.loads(content)['subject']
# 2. POST /subjects with public subject named Subject1, and ID: 1
headers = {'Content-Type': 'application/octet-stream',
'X-Subject-Meta-Name': 'Subject1 Update',
'X-Subject-Meta-Status': 'active',
'X-Subject-Meta-Container-Format': 'ovf',
'X-Subject-Meta-Disk-Format': 'vdi',
'X-Subject-Meta-Size': '19',
'X-Subject-Meta-Id': subject['id'],
'X-Subject-Meta-Is-Public': 'True'}
path = "/v1/subjects"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(409, response.status)
def test_delete_not_existing(self):
"""
We test the following:
0. GET /subjects/1
- Verify 404
1. DELETE /subjects/1
- Verify 404
"""
# 0. GET /subjects
# Verify no public subjects
path = "/v1/subjects"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"subjects": []}', content)
# 1. DELETE /subjects/1
# Verify 404 returned
path = "/v1/subjects/1"
response, content = self.http.request(path, 'DELETE')
self.assertEqual(404, response.status)
def _do_test_post_subject_content_bad_format(self, format):
"""
We test that missing container/disk format fails with 400 "Bad Request"
:see https://bugs.launchpad.net/subject/+bug/933702
"""
# Verify no public subjects
path = "/v1/subjects"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
subjects = jsonutils.loads(content)['subjects']
self.assertEqual(0, len(subjects))
path = "/v1/subjects"
# POST /subjects without given format being specified
headers = minimal_headers('Subject1')
headers['X-Subject-Meta-' + format] = 'bad_value'
with tempfile.NamedTemporaryFile() as test_data_file:
test_data_file.write("XXX")
test_data_file.flush()
response, content = self.http.request(path, 'POST',
headers=headers,
body=test_data_file.name)
self.assertEqual(400, response.status)
type = format.replace('_format', '')
expected = "Invalid %s format 'bad_value' for subject" % type
self.assertIn(expected, content,
"Could not find '%s' in '%s'" % (expected, content))
# make sure the subject was not created
# Verify no public subjects
path = "/v1/subjects"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
subjects = jsonutils.loads(content)['subjects']
self.assertEqual(0, len(subjects))
def test_post_subject_content_bad_container_format(self):
self._do_test_post_subject_content_bad_format('container_format')
def test_post_subject_content_bad_disk_format(self):
self._do_test_post_subject_content_bad_format('disk_format')
def _do_test_put_subject_content_missing_format(self, format):
"""
We test that missing container/disk format only fails with
400 "Bad Request" when the subject content is PUT (i.e. not
on the original POST of a queued subject).
:see https://bugs.launchpad.net/subject/+bug/937216
"""
# POST queued subject
path = "/v1/subjects"
headers = {
'X-Subject-Meta-Name': 'Subject1',
'X-Subject-Meta-Is-Public': 'True',
}
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
subject_id = data['subject']['id']
self.addDetail('subject_data', testtools.content.json_content(data))
# PUT subject content subjects without given format being specified
path = "/v1/subjects/%s" % (subject_id)
headers = minimal_headers('Subject1')
del headers['X-Subject-Meta-' + format]
with tempfile.NamedTemporaryFile() as test_data_file:
test_data_file.write("XXX")
test_data_file.flush()
response, content = self.http.request(path, 'PUT',
headers=headers,
body=test_data_file.name)
self.assertEqual(400, response.status)
type = format.replace('_format', '').capitalize()
expected = "%s format is not specified" % type
self.assertIn(expected, content,
"Could not find '%s' in '%s'" % (expected, content))
def test_put_subject_content_bad_container_format(self):
self._do_test_put_subject_content_missing_format('container_format')
def test_put_subject_content_bad_disk_format(self):
self._do_test_put_subject_content_missing_format('disk_format')
def _do_test_mismatched_attribute(self, attribute, value):
"""
Test mismatched attribute.
"""
subject_data = "*" * FIVE_KB
headers = minimal_headers('Subject1')
headers[attribute] = value
path = "/v1/subjects"
response, content = self.http.request(path, 'POST', headers=headers,
body=subject_data)
self.assertEqual(400, response.status)
subjects_dir = os.path.join(self.test_dir, 'subjects')
subject_count = len([name for name in os.listdir(subjects_dir)
if os.path.isfile(os.path.join(subjects_dir, name))])
self.assertEqual(0, subject_count)
def test_mismatched_size(self):
"""
Test mismatched size.
"""
self._do_test_mismatched_attribute('x-subject-meta-size',
str(FIVE_KB + 1))
def test_mismatched_checksum(self):
"""
Test mismatched checksum.
"""
self._do_test_mismatched_attribute('x-subject-meta-checksum',
'foobar')
class TestApiWithFakeAuth(base.ApiTest):
def __init__(self, *args, **kwargs):
super(TestApiWithFakeAuth, self).__init__(*args, **kwargs)
self.api_flavor = 'fakeauth'
self.registry_flavor = 'fakeauth'
def test_ownership(self):
# Add an subject with admin privileges and ensure the owner
# can be set to something other than what was used to authenticate
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
create_headers = {
'X-Subject-Meta-Name': 'MySubject',
'X-Subject-Meta-disk_format': 'raw',
'X-Subject-Meta-container_format': 'ovf',
'X-Subject-Meta-Is-Public': 'True',
'X-Subject-Meta-Owner': 'tenant2',
}
create_headers.update(auth_headers)
path = "/v1/subjects"
response, content = self.http.request(path, 'POST',
headers=create_headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
subject_id = data['subject']['id']
path = "/v1/subjects/%s" % (subject_id)
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(200, response.status)
self.assertEqual('tenant2', response['x-subject-meta-owner'])
# Now add an subject without admin privileges and ensure the owner
# cannot be set to something other than what was used to authenticate
auth_headers = {
'X-Auth-Token': 'user1:tenant1:role1',
}
create_headers.update(auth_headers)
path = "/v1/subjects"
response, content = self.http.request(path, 'POST',
headers=create_headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
subject_id = data['subject']['id']
# We have to be admin to see the owner
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
create_headers.update(auth_headers)
path = "/v1/subjects/%s" % (subject_id)
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(200, response.status)
self.assertEqual('tenant1', response['x-subject-meta-owner'])
# Make sure the non-privileged user can't update their owner either
update_headers = {
'X-Subject-Meta-Name': 'MySubject2',
'X-Subject-Meta-Owner': 'tenant2',
'X-Auth-Token': 'user1:tenant1:role1',
}
path = "/v1/subjects/%s" % (subject_id)
response, content = self.http.request(path, 'PUT',
headers=update_headers)
self.assertEqual(200, response.status)
# We have to be admin to see the owner
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
path = "/v1/subjects/%s" % (subject_id)
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(200, response.status)
self.assertEqual('tenant1', response['x-subject-meta-owner'])
# An admin user should be able to update the owner
auth_headers = {
'X-Auth-Token': 'user1:tenant3:admin',
}
update_headers = {
'X-Subject-Meta-Name': 'MySubject2',
'X-Subject-Meta-Owner': 'tenant2',
}
update_headers.update(auth_headers)
path = "/v1/subjects/%s" % (subject_id)
response, content = self.http.request(path, 'PUT',
headers=update_headers)
self.assertEqual(200, response.status)
path = "/v1/subjects/%s" % (subject_id)
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(200, response.status)
self.assertEqual('tenant2', response['x-subject-meta-owner'])
def test_subject_visibility_to_different_users(self):
owners = ['admin', 'tenant1', 'tenant2', 'none']
visibilities = {'public': 'True', 'private': 'False'}
subject_ids = {}
for owner in owners:
for visibility, is_public in visibilities.items():
name = '%s-%s' % (owner, visibility)
headers = {
'Content-Type': 'application/octet-stream',
'X-Subject-Meta-Name': name,
'X-Subject-Meta-Status': 'active',
'X-Subject-Meta-Is-Public': is_public,
'X-Subject-Meta-Owner': owner,
'X-Auth-Token': 'createuser:createtenant:admin',
}
path = "/v1/subjects"
response, content = self.http.request(path, 'POST',
headers=headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
subject_ids[name] = data['subject']['id']
def list_subjects(tenant, role='', is_public=None):
auth_token = 'user:%s:%s' % (tenant, role)
headers = {'X-Auth-Token': auth_token}
path = "/v1/subjects/detail"
if is_public is not None:
path += '?is_public=%s' % is_public
response, content = self.http.request(path, 'GET', headers=headers)
self.assertEqual(200, response.status)
return jsonutils.loads(content)['subjects']
# 1. Known user sees public and their own subjects
subjects = list_subjects('tenant1')
self.assertEqual(5, len(subjects))
for subject in subjects:
self.assertTrue(subject['is_public'] or subject['owner'] == 'tenant1')
# 2. Unknown user sees only public subjects
subjects = list_subjects('none')
self.assertEqual(4, len(subjects))
for subject in subjects:
self.assertTrue(subject['is_public'])
# 3. Unknown admin sees only public subjects
subjects = list_subjects('none', role='admin')
self.assertEqual(4, len(subjects))
for subject in subjects:
self.assertTrue(subject['is_public'])
# 4. Unknown admin, is_public=none, shows all subjects
subjects = list_subjects('none', role='admin', is_public='none')
self.assertEqual(8, len(subjects))
# 5. Unknown admin, is_public=true, shows only public subjects
subjects = list_subjects('none', role='admin', is_public='true')
self.assertEqual(4, len(subjects))
for subject in subjects:
self.assertTrue(subject['is_public'])
# 6. Unknown admin, is_public=false, sees only private subjects
subjects = list_subjects('none', role='admin', is_public='false')
self.assertEqual(4, len(subjects))
for subject in subjects:
self.assertFalse(subject['is_public'])
# 7. Known admin sees public and their own subjects
subjects = list_subjects('admin', role='admin')
self.assertEqual(5, len(subjects))
for subject in subjects:
self.assertTrue(subject['is_public'] or subject['owner'] == 'admin')
# 8. Known admin, is_public=none, shows all subjects
subjects = list_subjects('admin', role='admin', is_public='none')
self.assertEqual(8, len(subjects))
# 9. Known admin, is_public=true, sees all public and their subjects
subjects = list_subjects('admin', role='admin', is_public='true')
self.assertEqual(5, len(subjects))
for subject in subjects:
self.assertTrue(subject['is_public'] or subject['owner'] == 'admin')
# 10. Known admin, is_public=false, sees all private subjects
subjects = list_subjects('admin', role='admin', is_public='false')
self.assertEqual(4, len(subjects))
for subject in subjects:
self.assertFalse(subject['is_public'])
def test_property_protections(self):
# Enable property protection
self.config(property_protection_file=self.property_file)
self.init()
CREATE_HEADERS = {
'X-Subject-Meta-Name': 'MySubject',
'X-Subject-Meta-disk_format': 'raw',
'X-Subject-Meta-container_format': 'ovf',
'X-Subject-Meta-Is-Public': 'True',
'X-Subject-Meta-Owner': 'tenant2',
}
# Create an subject for role member with extra properties
# Raises 403 since user is not allowed to create 'foo'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:member',
}
custom_props = {
'x-subject-meta-property-foo': 'bar'
}
auth_headers.update(custom_props)
auth_headers.update(CREATE_HEADERS)
path = "/v1/subjects"
response, content = self.http.request(path, 'POST',
headers=auth_headers)
self.assertEqual(403, response.status)
# Create an subject for role member without 'foo'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:member',
}
custom_props = {
'x-subject-meta-property-x_owner_foo': 'o_s_bar',
}
auth_headers.update(custom_props)
auth_headers.update(CREATE_HEADERS)
path = "/v1/subjects"
response, content = self.http.request(path, 'POST',
headers=auth_headers)
self.assertEqual(201, response.status)
# Returned subject entity should have 'x_owner_foo'
data = jsonutils.loads(content)
self.assertEqual('o_s_bar',
data['subject']['properties']['x_owner_foo'])
# Create an subject for role spl_role with extra properties
auth_headers = {
'X-Auth-Token': 'user1:tenant1:spl_role',
}
custom_props = {
'X-Subject-Meta-Property-spl_create_prop': 'create_bar',
'X-Subject-Meta-Property-spl_read_prop': 'read_bar',
'X-Subject-Meta-Property-spl_update_prop': 'update_bar',
'X-Subject-Meta-Property-spl_delete_prop': 'delete_bar'
}
auth_headers.update(custom_props)
auth_headers.update(CREATE_HEADERS)
path = "/v1/subjects"
response, content = self.http.request(path, 'POST',
headers=auth_headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
subject_id = data['subject']['id']
# Attempt to update two properties, one protected(spl_read_prop), the
# other not(spl_update_prop). Request should be forbidden.
auth_headers = {
'X-Auth-Token': 'user1:tenant1:spl_role',
}
custom_props = {
'X-Subject-Meta-Property-spl_read_prop': 'r',
'X-Subject-Meta-Property-spl_update_prop': 'u',
'X-Glance-Registry-Purge-Props': 'False'
}
auth_headers.update(auth_headers)
auth_headers.update(custom_props)
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(403, response.status)
# Attempt to create properties which are forbidden
auth_headers = {
'X-Auth-Token': 'user1:tenant1:spl_role',
}
custom_props = {
'X-Subject-Meta-Property-spl_new_prop': 'new',
'X-Glance-Registry-Purge-Props': 'True'
}
auth_headers.update(auth_headers)
auth_headers.update(custom_props)
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(403, response.status)
# Attempt to update, create and delete properties
auth_headers = {
'X-Auth-Token': 'user1:tenant1:spl_role',
}
custom_props = {
'X-Subject-Meta-Property-spl_create_prop': 'create_bar',
'X-Subject-Meta-Property-spl_read_prop': 'read_bar',
'X-Subject-Meta-Property-spl_update_prop': 'u',
'X-Glance-Registry-Purge-Props': 'True'
}
auth_headers.update(auth_headers)
auth_headers.update(custom_props)
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(200, response.status)
# Returned subject entity should reflect the changes
subject = jsonutils.loads(content)
# 'spl_update_prop' has update permission for spl_role
# hence the value has changed
self.assertEqual('u', subject['subject']['properties']['spl_update_prop'])
# 'spl_delete_prop' has delete permission for spl_role
# hence the property has been deleted
self.assertNotIn('spl_delete_prop', subject['subject']['properties'])
# 'spl_create_prop' has create permission for spl_role
# hence the property has been created
self.assertEqual('create_bar',
subject['subject']['properties']['spl_create_prop'])
# Subject Deletion should work
auth_headers = {
'X-Auth-Token': 'user1:tenant1:spl_role',
}
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'DELETE',
headers=auth_headers)
self.assertEqual(200, response.status)
# This subject should be no longer be directly accessible
auth_headers = {
'X-Auth-Token': 'user1:tenant1:spl_role',
}
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(404, response.status)
def test_property_protections_special_chars(self):
# Enable property protection
self.config(property_protection_file=self.property_file)
self.init()
CREATE_HEADERS = {
'X-Subject-Meta-Name': 'MySubject',
'X-Subject-Meta-disk_format': 'raw',
'X-Subject-Meta-container_format': 'ovf',
'X-Subject-Meta-Is-Public': 'True',
'X-Subject-Meta-Owner': 'tenant2',
'X-Subject-Meta-Size': '0',
}
# Create an subject
auth_headers = {
'X-Auth-Token': 'user1:tenant1:member',
}
auth_headers.update(CREATE_HEADERS)
path = "/v1/subjects"
response, content = self.http.request(path, 'POST',
headers=auth_headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
subject_id = data['subject']['id']
# Verify both admin and unknown role can create properties marked with
# '@'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
custom_props = {
'X-Subject-Meta-Property-x_all_permitted_admin': '1'
}
auth_headers.update(custom_props)
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(200, response.status)
subject = jsonutils.loads(content)
self.assertEqual('1',
subject['subject']['properties']['x_all_permitted_admin'])
auth_headers = {
'X-Auth-Token': 'user1:tenant1:joe_soap',
}
custom_props = {
'X-Subject-Meta-Property-x_all_permitted_joe_soap': '1',
'X-Glance-Registry-Purge-Props': 'False'
}
auth_headers.update(custom_props)
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(200, response.status)
subject = jsonutils.loads(content)
self.assertEqual(
'1', subject['subject']['properties']['x_all_permitted_joe_soap'])
# Verify both admin and unknown role can read properties marked with
# '@'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(200, response.status)
self.assertEqual('1', response.get(
'x-subject-meta-property-x_all_permitted_admin'))
self.assertEqual('1', response.get(
'x-subject-meta-property-x_all_permitted_joe_soap'))
auth_headers = {
'X-Auth-Token': 'user1:tenant1:joe_soap',
}
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(200, response.status)
self.assertEqual('1', response.get(
'x-subject-meta-property-x_all_permitted_admin'))
self.assertEqual('1', response.get(
'x-subject-meta-property-x_all_permitted_joe_soap'))
# Verify both admin and unknown role can update properties marked with
# '@'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
custom_props = {
'X-Subject-Meta-Property-x_all_permitted_admin': '2',
'X-Glance-Registry-Purge-Props': 'False'
}
auth_headers.update(custom_props)
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(200, response.status)
subject = jsonutils.loads(content)
self.assertEqual('2',
subject['subject']['properties']['x_all_permitted_admin'])
auth_headers = {
'X-Auth-Token': 'user1:tenant1:joe_soap',
}
custom_props = {
'X-Subject-Meta-Property-x_all_permitted_joe_soap': '2',
'X-Glance-Registry-Purge-Props': 'False'
}
auth_headers.update(custom_props)
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(200, response.status)
subject = jsonutils.loads(content)
self.assertEqual(
'2', subject['subject']['properties']['x_all_permitted_joe_soap'])
# Verify both admin and unknown role can delete properties marked with
# '@'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
custom_props = {
'X-Subject-Meta-Property-x_all_permitted_joe_soap': '2',
'X-Glance-Registry-Purge-Props': 'True'
}
auth_headers.update(custom_props)
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(200, response.status)
subject = jsonutils.loads(content)
self.assertNotIn('x_all_permitted_admin', subject['subject']['properties'])
auth_headers = {
'X-Auth-Token': 'user1:tenant1:joe_soap',
}
custom_props = {
'X-Glance-Registry-Purge-Props': 'True'
}
auth_headers.update(custom_props)
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(200, response.status)
subject = jsonutils.loads(content)
self.assertNotIn('x_all_permitted_joe_soap',
subject['subject']['properties'])
# Verify neither admin nor unknown role can create a property protected
# with '!'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
custom_props = {
'X-Subject-Meta-Property-x_none_permitted_admin': '1'
}
auth_headers.update(custom_props)
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(403, response.status)
auth_headers = {
'X-Auth-Token': 'user1:tenant1:joe_soap',
}
custom_props = {
'X-Subject-Meta-Property-x_none_permitted_joe_soap': '1'
}
auth_headers.update(custom_props)
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(403, response.status)
# Verify neither admin nor unknown role can read properties marked with
# '!'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
custom_props = {
'X-Subject-Meta-Property-x_none_read': '1'
}
auth_headers.update(custom_props)
auth_headers.update(CREATE_HEADERS)
path = "/v1/subjects"
response, content = self.http.request(path, 'POST',
headers=auth_headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
subject_id = data['subject']['id']
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(200, response.status)
self.assertRaises(KeyError,
response.get, 'X-Subject-Meta-Property-x_none_read')
auth_headers = {
'X-Auth-Token': 'user1:tenant1:joe_soap',
}
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(200, response.status)
self.assertRaises(KeyError,
response.get, 'X-Subject-Meta-Property-x_none_read')
# Verify neither admin nor unknown role can update properties marked
# with '!'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
custom_props = {
'X-Subject-Meta-Property-x_none_update': '1'
}
auth_headers.update(custom_props)
auth_headers.update(CREATE_HEADERS)
path = "/v1/subjects"
response, content = self.http.request(path, 'POST',
headers=auth_headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
subject_id = data['subject']['id']
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
custom_props = {
'X-Subject-Meta-Property-x_none_update': '2'
}
auth_headers.update(custom_props)
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(403, response.status)
auth_headers = {
'X-Auth-Token': 'user1:tenant1:joe_soap',
}
custom_props = {
'X-Subject-Meta-Property-x_none_update': '2'
}
auth_headers.update(custom_props)
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(403, response.status)
# Verify neither admin nor unknown role can delete properties marked
# with '!'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
custom_props = {
'X-Subject-Meta-Property-x_none_delete': '1'
}
auth_headers.update(custom_props)
auth_headers.update(CREATE_HEADERS)
path = "/v1/subjects"
response, content = self.http.request(path, 'POST',
headers=auth_headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
subject_id = data['subject']['id']
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
custom_props = {
'X-Glance-Registry-Purge-Props': 'True'
}
auth_headers.update(custom_props)
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(403, response.status)
auth_headers = {
'X-Auth-Token': 'user1:tenant1:joe_soap',
}
custom_props = {
'X-Glance-Registry-Purge-Props': 'True'
}
auth_headers.update(custom_props)
path = "/v1/subjects/%s" % subject_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(403, response.status)
| 34,972 | 38,619 | 180 |
da4c3a059b82cec21a272172a71fea3e27b71ddf | 14,056 | py | Python | runexp.py | behzadhaghgoo/roper | c420f468d869c3a3ad3c6cc0b4dcb74049eba302 | [
"MIT"
] | null | null | null | runexp.py | behzadhaghgoo/roper | c420f468d869c3a3ad3c6cc0b4dcb74049eba302 | [
"MIT"
] | null | null | null | runexp.py | behzadhaghgoo/roper | c420f468d869c3a3ad3c6cc0b4dcb74049eba302 | [
"MIT"
] | null | null | null | import random
import math
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
from tqdm import tqdm
from ray.tune import run, Trainable, sample_from
from dqn import DQN, update_target
from loss import TDLoss, StableTDLoss
from pbuffer import PrioritizedBuffer
from env import get_env
USE_CUDA = torch.cuda.is_available()
Variable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda() if USE_CUDA else autograd.Variable(*args, **kwargs)
beta_start = 0.4
beta_frames = 1000
BETA_BY_FRAME = lambda frame_idx: min(1.0, beta_start + frame_idx * (1.0 - beta_start) / beta_frames)
epsilon_start = 1.0
epsilon_final = 0.01
epsilon_decay = 500
EPSILON_BY_FRAME = lambda frame_idx: epsilon_final + (epsilon_start - epsilon_final) * math.exp(-1. * frame_idx / epsilon_decay)
if __name__ == '__main__':
trainable = MyTrainable()
trainable._setup()
print("train result: ", trainable._train())
# def train(self):
# config = self.config
# '''
# hyperparams
# method - 'average_over_batch', 'PER'
# var
# mean
# decision_eps,
# alpha, beta,
# hardcoded, cnn,
# invert_actions = False,
# num_frames = 30000,
# num_val_trials = 10,
# batch_size = 32,
# gamma = 0.99,
# num_trials = 5,
# USE_CUDA = False,
# device = "",
# eps = 1.,
# avg_stored=False
# '''
# if USE_CUDA:
# device = torch.device("cuda")
# """Args:"""
# losses = []
# all_rewards = []
# standard_val_rewards = []
# noisy_val_rewards = []
# states_count_ratios = []
# episode_reward = 0
# # Initialize state
# noisyGame = False
# state = config['env'].reset()
# state = np.append(state, float(noisyGame))
# meta_state = (state, float(noisyGame))
# # Initialize replay buffer, model, TD loss, and optimizers
# result_df = pd.DataFrame()
# theta = 1.
# power = config['theta']
# all_standard_val_rewards = []
# all_proportions = []
# std_weights = []
# noisy_weights = []
# std_buffer_example_count = []
# noisy_buffer_example_count = []
# for t in range(num_trials):
# if cnn:
# current_model = CnnDQN(env.observation_space.shape, env.action_space.n)
# target_model = CnnDQN(env.observation_space.shape, env.action_space.n)
# else:
# current_model = DQN(env.observation_space.shape[0] + 1, env.action_space.n)
# target_model = DQN(env.observation_space.shape[0] + 1, env.action_space.n)
# td_loss = TDLoss(method=config['method'])
# optimizer = optim.Adam(current_model.parameters())
# # # Single GPU Code
# if USE_CUDA:
# current_model = current_model.cuda()
# target_model = target_model.cuda()
# if config['method']=='average_over_buffer':
# replay_buffer = AugmentedPrioritizedBuffer(int(1e6))
# else:
# replay_buffer = PrioritizedBuffer(int(1e6))
# print("trial number: {}".format(t))
# for frame_idx in range(1, config['num_frames'] + 1):
# epsilon = EPSILON_BY_FRAME(frame_idx)
# original_action = current_model.act(state, epsilon)
# # If in noisy environment, make action random with probability eps
# if noisyGame and random.uniform(0,1) < config['decision_eps']:
# if invert_actions:
# actual_action = 1 - original_action # invert
# else:
# actual_action = original_action
# else:
# actual_action = original_action
# next_state, reward, done, _ = config['env'].step(actual_action)
# # If in noisy environment, make reward completely random
# if noisyGame:
# reward *= np.random.normal(config['mean'], var)
# if not cnn:
# next_state = np.append(next_state, float(noisyGame))
# meta_next_state = (next_state, float(noisyGame))
# # store q values and hidden states in buffer
# if config['method']=='average_over_buffer':
# state_var = Variable(torch.FloatTensor(np.float32(state)))
# with torch.no_grad():
# q_values, hiddens = current_model.forward(state_var, config['return_latent'] = "last")
# replay_buffer.push(meta_state, original_action, reward, meta_next_state, done, hiddens, q_values)
# else:
# replay_buffer.push(meta_state, original_action, reward, meta_next_state, done)
# meta_state = meta_next_state
# episode_reward += reward
# if done:
# noisyGame = 1-noisyGame
# state = env.reset()
# state = np.append(state, float(noisyGame))
# meta_state = (state, float(noisyGame))
# all_rewards.append(episode_reward)
# episode_reward = 0
# if len(replay_buffer) > batch_size and frame_idx % 4 == 0:
# beta = BETA_BY_FRAME(frame_idx)
# loss = td_loss.compute(current_model, target_model, beta, replay_buffer, optimizer)
# losses.append(loss.data.tolist())
# if frame_idx % 200 == 0:
# all_standard_val_rewards.append(test(val_env, False, eps, num_val_trials, current_model))
# all_proportions.append(float(replay_buffer.states_count[1]) / (float(replay_buffer.states_count[1]) + float(replay_buffer.states_count[0])))
# weight_dict = replay_buffer.get_average_weight_by_env()
# std_weights.append(weight_dictconfig['std_avg'])
# noisy_weights.append(weight_dictconfig['noisy_avg'])
# std_buffer_example_count.append(weight_dictconfig['std_count'])
# noisy_buffer_example_count.append(weight_dictconfig['noisy_count'])
# # plot(frame_idx, all_rewards, losses, standard_val_rewards, noisy_val_rewards, states_count_ratios)
# if frame_idx % 1000 == 0:
# print("Frame {}".format(frame_idx))
# update_target(current_model, target_model)
# print(len(all_proportions))
# result_dfconfig['frame'] = 200*np.arange(len(all_proportions)) % num_frames
# result_dfconfig['trial_num'] = np.floor(200 *np.arange(len(all_proportions)) / num_frames)
# result_dfconfig['val_reward'] = all_standard_val_rewards
# result_dfconfig['proportion'] = all_proportions
# result_dfconfig['std_weights'] = std_weights
# result_dfconfig['noisy_weights'] = noisy_weights
# result_dfconfig['std_buffer_example_count'] = std_buffer_example_count
# result_dfconfig['noisy_buffer_example_count'] = noisy_buffer_example_count
# return result_df
| 36.509091 | 161 | 0.575982 | import random
import math
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
from tqdm import tqdm
from ray.tune import run, Trainable, sample_from
from dqn import DQN, update_target
from loss import TDLoss, StableTDLoss
from pbuffer import PrioritizedBuffer
from env import get_env
USE_CUDA = torch.cuda.is_available()
Variable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda() if USE_CUDA else autograd.Variable(*args, **kwargs)
beta_start = 0.4
beta_frames = 1000
BETA_BY_FRAME = lambda frame_idx: min(1.0, beta_start + frame_idx * (1.0 - beta_start) / beta_frames)
epsilon_start = 1.0
epsilon_final = 0.01
epsilon_decay = 500
EPSILON_BY_FRAME = lambda frame_idx: epsilon_final + (epsilon_start - epsilon_final) * math.exp(-1. * frame_idx / epsilon_decay)
class MyTrainable(object):# Trainable
def _setup(self):
self.env_id = "CartPole-v0"
self.env, self.val_env = get_env(self.env_id)
self.config = {
"num_trials": 5,
"num_frames": 30000,
"num_val_trials" : 10,
"batch_size" : 32,
"gamma" : 0.99,
"method": 'our',
"var": 1.,
"mean": 0.,
"decision_eps": 1.,
"theta" : 1.,
"cnn": False,
"invert_actions" : False,
"return_latent" : 'second_hidden',
"num_val_trials" : 10,
"batch_size" : 32,
"gamma" : 0.99,
"num_trials" : 5,
"USE_CUDA" : True,
"device" : "",
"eps": 1.,
"num_workers": 0,
"num_gpus": 0,
# These params are tuned from a fixed starting value.
"lr": 1e-4
# These params start off randomly drawn from a set.
}
def _train(self):
hardcoded_reward = self.train_helper(True)
non_hardcoded_reward = self.train_helper(False)
return non_hardcoded_reward - hardcoded_reward
def train_helper(self, hardcoded):
config = self.config
losses = []
all_rewards = []
standard_val_rewards = []
noisy_val_rewards = []
states_count_ratios = []
episode_reward = 0
# Initialize state
noisyGame = False
state = self.env.reset()
state = np.append(state, float(noisyGame))
meta_state = (state, float(noisyGame))
# Initialize replay buffer, model, TD loss, and optimizers
# result_df = pd.DataFrame()
power = config['theta']
all_standard_val_rewards = []
all_proportions = []
std_weights = []
noisy_weights = []
std_buffer_example_count = []
noisy_buffer_example_count = []
for t in range(config['num_trials']):
print("state: ", state)
#self.training_iteration += 1
if config['cnn']:
current_model = CnnDQN(self.env.observation_space.shape, self.env.action_space.n)
target_model = CnnDQN(self.env.observation_space.shape, self.env.action_space.n)
else:
current_model = DQN(self.env.observation_space.shape[0] + 1, self.env.action_space.n)
target_model = DQN(self.env.observation_space.shape[0] + 1, self.env.action_space.n)
if config['method'] == 'ours':
td_loss = StableTDLoss()
else:
td_loss = TDLoss()
optimizer = optim.Adam(current_model.parameters(), lr=config['lr'])
if USE_CUDA:
current_model = current_model.cuda()
target_model = target_model.cuda()
replay_buffer = PrioritizedBuffer(int(1e6))
print("trial number: {}".format(t))
for frame_idx in range(1, config['num_frames'] + 1):
epsilon = EPSILON_BY_FRAME(frame_idx)
original_action = current_model.act(state, epsilon)
# If in noisy environment, make action random with probability eps
if noisyGame and random.uniform(0,1) < config['decision_eps']:
if config['invert_actions']:
actual_action = 1 - original_action # invert
else:
actual_action = original_action
else:
actual_action = original_action
next_state, reward, done, _ = self.env.step(actual_action)
# If in noisy environment, make reward completely random
if noisyGame:
reward *= np.random.normal(config['mean'], config['var'])
if not config['cnn']:
next_state = np.append(next_state, float(noisyGame))
meta_next_state = (next_state, float(noisyGame))
# store q values and hidden states in buffer
replay_buffer.push(meta_state, original_action, reward, meta_next_state, done)
meta_state = meta_next_state
episode_reward += reward
if done:
noisyGame = 1-noisyGame
state = self.env.reset()
state = np.append(state, float(noisyGame))
meta_state = (state, float(noisyGame))
all_rewards.append(episode_reward)
episode_reward = 0
if len(replay_buffer) > config['batch_size'] and frame_idx % 4 == 0:
beta = BETA_BY_FRAME(frame_idx)
loss = td_loss.compute(current_model, target_model, beta, replay_buffer, optimizer)
losses.append(loss.data.tolist())
if frame_idx % 200 == 0:
all_standard_val_rewards.append(self._test(False, config['eps'], config['num_val_trials'], current_model))
all_proportions.append(float(replay_buffer.states_count[1]) / (float(replay_buffer.states_count[1]) + float(replay_buffer.states_count[0])))
weight_dict = replay_buffer.get_average_weight_by_env()
std_weights.append(weight_dictconfig['std_avg'])
noisy_weights.append(weight_dictconfig['noisy_avg'])
std_buffer_example_count.append(weight_dictconfig['std_count'])
noisy_buffer_example_count.append(weight_dictconfig['noisy_count'])
if frame_idx % 1000 == 0:
print("Frame {}".format(frame_idx))
update_target(current_model, target_model)
#return tune.TrainingResult(timesteps_this_iter=n, mean_loss=validation_loss)
return np.mean(all_standard_val_rewards)
def _test(noisyGame, eps, num_val_trials, current_model):
rewards = []
for i in range(num_val_trials):
epsilon = 0
episode_reward = 0
state = self.val_env.reset()
state = np.append(state, float(noisyGame))
with torch.no_grad():
while True:
original_action = current_model.act(state, epsilon)
if original_action != int(original_action):
original_action = original_action.numpy()[0]
actual_action = original_action
next_state, reward, done, _ = self.val_env.step(actual_action)
next_state = np.append(next_state, float(noisyGame))
state = next_state
episode_reward += reward
if done:
rewards.append(episode_reward)
break
return np.mean(rewards)
if __name__ == '__main__':
trainable = MyTrainable()
trainable._setup()
print("train result: ", trainable._train())
# def train(self):
# config = self.config
# '''
# hyperparams
# method - 'average_over_batch', 'PER'
# var
# mean
# decision_eps,
# alpha, beta,
# hardcoded, cnn,
# invert_actions = False,
# num_frames = 30000,
# num_val_trials = 10,
# batch_size = 32,
# gamma = 0.99,
# num_trials = 5,
# USE_CUDA = False,
# device = "",
# eps = 1.,
# avg_stored=False
# '''
# if USE_CUDA:
# device = torch.device("cuda")
# """Args:"""
# losses = []
# all_rewards = []
# standard_val_rewards = []
# noisy_val_rewards = []
# states_count_ratios = []
# episode_reward = 0
# # Initialize state
# noisyGame = False
# state = config['env'].reset()
# state = np.append(state, float(noisyGame))
# meta_state = (state, float(noisyGame))
# # Initialize replay buffer, model, TD loss, and optimizers
# result_df = pd.DataFrame()
# theta = 1.
# power = config['theta']
# all_standard_val_rewards = []
# all_proportions = []
# std_weights = []
# noisy_weights = []
# std_buffer_example_count = []
# noisy_buffer_example_count = []
# for t in range(num_trials):
# if cnn:
# current_model = CnnDQN(env.observation_space.shape, env.action_space.n)
# target_model = CnnDQN(env.observation_space.shape, env.action_space.n)
# else:
# current_model = DQN(env.observation_space.shape[0] + 1, env.action_space.n)
# target_model = DQN(env.observation_space.shape[0] + 1, env.action_space.n)
# td_loss = TDLoss(method=config['method'])
# optimizer = optim.Adam(current_model.parameters())
# # # Single GPU Code
# if USE_CUDA:
# current_model = current_model.cuda()
# target_model = target_model.cuda()
# if config['method']=='average_over_buffer':
# replay_buffer = AugmentedPrioritizedBuffer(int(1e6))
# else:
# replay_buffer = PrioritizedBuffer(int(1e6))
# print("trial number: {}".format(t))
# for frame_idx in range(1, config['num_frames'] + 1):
# epsilon = EPSILON_BY_FRAME(frame_idx)
# original_action = current_model.act(state, epsilon)
# # If in noisy environment, make action random with probability eps
# if noisyGame and random.uniform(0,1) < config['decision_eps']:
# if invert_actions:
# actual_action = 1 - original_action # invert
# else:
# actual_action = original_action
# else:
# actual_action = original_action
# next_state, reward, done, _ = config['env'].step(actual_action)
# # If in noisy environment, make reward completely random
# if noisyGame:
# reward *= np.random.normal(config['mean'], var)
# if not cnn:
# next_state = np.append(next_state, float(noisyGame))
# meta_next_state = (next_state, float(noisyGame))
# # store q values and hidden states in buffer
# if config['method']=='average_over_buffer':
# state_var = Variable(torch.FloatTensor(np.float32(state)))
# with torch.no_grad():
# q_values, hiddens = current_model.forward(state_var, config['return_latent'] = "last")
# replay_buffer.push(meta_state, original_action, reward, meta_next_state, done, hiddens, q_values)
# else:
# replay_buffer.push(meta_state, original_action, reward, meta_next_state, done)
# meta_state = meta_next_state
# episode_reward += reward
# if done:
# noisyGame = 1-noisyGame
# state = env.reset()
# state = np.append(state, float(noisyGame))
# meta_state = (state, float(noisyGame))
# all_rewards.append(episode_reward)
# episode_reward = 0
# if len(replay_buffer) > batch_size and frame_idx % 4 == 0:
# beta = BETA_BY_FRAME(frame_idx)
# loss = td_loss.compute(current_model, target_model, beta, replay_buffer, optimizer)
# losses.append(loss.data.tolist())
# if frame_idx % 200 == 0:
# all_standard_val_rewards.append(test(val_env, False, eps, num_val_trials, current_model))
# all_proportions.append(float(replay_buffer.states_count[1]) / (float(replay_buffer.states_count[1]) + float(replay_buffer.states_count[0])))
# weight_dict = replay_buffer.get_average_weight_by_env()
# std_weights.append(weight_dictconfig['std_avg'])
# noisy_weights.append(weight_dictconfig['noisy_avg'])
# std_buffer_example_count.append(weight_dictconfig['std_count'])
# noisy_buffer_example_count.append(weight_dictconfig['noisy_count'])
# # plot(frame_idx, all_rewards, losses, standard_val_rewards, noisy_val_rewards, states_count_ratios)
# if frame_idx % 1000 == 0:
# print("Frame {}".format(frame_idx))
# update_target(current_model, target_model)
# print(len(all_proportions))
# result_dfconfig['frame'] = 200*np.arange(len(all_proportions)) % num_frames
# result_dfconfig['trial_num'] = np.floor(200 *np.arange(len(all_proportions)) / num_frames)
# result_dfconfig['val_reward'] = all_standard_val_rewards
# result_dfconfig['proportion'] = all_proportions
# result_dfconfig['std_weights'] = std_weights
# result_dfconfig['noisy_weights'] = noisy_weights
# result_dfconfig['std_buffer_example_count'] = std_buffer_example_count
# result_dfconfig['noisy_buffer_example_count'] = noisy_buffer_example_count
# return result_df
| 6,881 | 16 | 131 |
0ed859f4957ad6b26162065f01fc1831972bca3f | 2,517 | py | Python | tests/ions/test_quintic_spline.py | shankar1729/qimpy | 5a4c1ea1fedc88909d426ce54101d6d07fa82e8c | [
"BSD-3-Clause"
] | 3 | 2021-05-25T00:11:50.000Z | 2022-01-30T21:49:00.000Z | tests/ions/test_quintic_spline.py | shankar1729/qimpy | 5a4c1ea1fedc88909d426ce54101d6d07fa82e8c | [
"BSD-3-Clause"
] | 2 | 2021-09-28T19:18:38.000Z | 2021-11-23T13:23:17.000Z | tests/ions/test_quintic_spline.py | shankar1729/qimpy | 5a4c1ea1fedc88909d426ce54101d6d07fa82e8c | [
"BSD-3-Clause"
] | null | null | null | import qimpy as qp
import torch
import pytest
@pytest.mark.mpi_skip
def main():
"""Run test and additionally plot for visual inspection."""
import matplotlib.pyplot as plt
qp.utils.log_config()
qp.rc.init()
# Plot a single blip function for testing:
plt.figure()
coeff = torch.zeros(12)
coeff[5] = 1
t = torch.linspace(0.0, 12.0, 101, device=qp.rc.device)
for deriv in range(5):
plt.plot(
t.to(qp.rc.cpu),
qp.ions.quintic_spline.Interpolator(t, 2.0, deriv)(coeff).to(qp.rc.cpu),
label=f"Deriv: {deriv}",
)
plt.axhline(0, color="k", ls="dotted")
plt.legend()
# Generate test data:
dx, x_fine, y_fine, y_prime_fine, y_coeff = test_interpolator()
# Plot results:
plt.figure()
plt.plot(
x_fine.to(qp.rc.cpu),
y_fine.to(qp.rc.cpu),
"k--",
label="Reference data",
zorder=10,
)
plt.plot(
x_fine.to(qp.rc.cpu),
y_prime_fine.to(qp.rc.cpu),
"k:",
label="Reference derivative",
zorder=10,
)
for deriv in range(5):
plt.plot(
x_fine.to(qp.rc.cpu),
qp.ions.quintic_spline.Interpolator(x_fine, dx, deriv)(y_coeff).to(
qp.rc.cpu
),
label=f"Interpolant (deriv: {deriv})",
lw=3,
)
plt.axhline(0, color="k", ls="dotted")
plt.legend()
plt.show()
if __name__ == "__main__":
main()
| 27.659341 | 84 | 0.564164 | import qimpy as qp
import torch
import pytest
@pytest.mark.mpi_skip
def test_interpolator():
def f_test(x):
"""Non-trivial test function with correct symmetries"""
return torch.exp(-torch.sin(0.01 * x * x)) * torch.cos(0.1 * x)
def f_test_prime(x):
"""Analytical derivative of above."""
return -torch.exp(-torch.sin(0.01 * x * x)) * (
torch.sin(0.1 * x) * 0.1
+ torch.cos(0.01 * x * x) * 0.02 * x * torch.cos(0.1 * x)
)
dx = 0.1
x = torch.arange(0.0, 40.0, dx, device=qp.rc.device)
x_fine = torch.linspace(x.min(), x.max() - 1e-6 * dx, 2001, device=qp.rc.device)
y = f_test(x)
y_fine = f_test(x_fine)
y_prime_fine = f_test_prime(x_fine)
y_coeff = qp.ions.quintic_spline.get_coeff(y) # blip coefficients
assert (
y_fine - qp.ions.quintic_spline.Interpolator(x_fine, dx, 0)(y_coeff)
).norm() < dx ** 4
assert (
y_prime_fine - qp.ions.quintic_spline.Interpolator(x_fine, dx, 1)(y_coeff)
).norm() < dx ** 3
return dx, x_fine, y_fine, y_prime_fine, y_coeff
def main():
"""Run test and additionally plot for visual inspection."""
import matplotlib.pyplot as plt
qp.utils.log_config()
qp.rc.init()
# Plot a single blip function for testing:
plt.figure()
coeff = torch.zeros(12)
coeff[5] = 1
t = torch.linspace(0.0, 12.0, 101, device=qp.rc.device)
for deriv in range(5):
plt.plot(
t.to(qp.rc.cpu),
qp.ions.quintic_spline.Interpolator(t, 2.0, deriv)(coeff).to(qp.rc.cpu),
label=f"Deriv: {deriv}",
)
plt.axhline(0, color="k", ls="dotted")
plt.legend()
# Generate test data:
dx, x_fine, y_fine, y_prime_fine, y_coeff = test_interpolator()
# Plot results:
plt.figure()
plt.plot(
x_fine.to(qp.rc.cpu),
y_fine.to(qp.rc.cpu),
"k--",
label="Reference data",
zorder=10,
)
plt.plot(
x_fine.to(qp.rc.cpu),
y_prime_fine.to(qp.rc.cpu),
"k:",
label="Reference derivative",
zorder=10,
)
for deriv in range(5):
plt.plot(
x_fine.to(qp.rc.cpu),
qp.ions.quintic_spline.Interpolator(x_fine, dx, deriv)(y_coeff).to(
qp.rc.cpu
),
label=f"Interpolant (deriv: {deriv})",
lw=3,
)
plt.axhline(0, color="k", ls="dotted")
plt.legend()
plt.show()
if __name__ == "__main__":
main()
| 1,001 | 0 | 22 |
19c9b4b16e51705aa897a6210e587cdfaf2db4bc | 285 | py | Python | TreeNode/models.py | huzing2524/myScripts | 3cf1cfe84bd911cf04ff5c557c8fd6420978bb21 | [
"MIT"
] | 2 | 2021-02-23T04:28:18.000Z | 2021-06-07T23:11:03.000Z | TreeNode/models.py | huzing2524/myScripts | 3cf1cfe84bd911cf04ff5c557c8fd6420978bb21 | [
"MIT"
] | null | null | null | TreeNode/models.py | huzing2524/myScripts | 3cf1cfe84bd911cf04ff5c557c8fd6420978bb21 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
| 25.909091 | 67 | 0.74386 | from django.db import models
# Create your models here.
class IndexTree(models.Model):
name = models.CharField(max_length=50, verbose_name='名称')
parent_id = models.IntegerField(null=True, verbose_name='父类id')
remark = models.CharField(max_length=50, verbose_name='备注')
| 0 | 215 | 23 |
bbb6c6787b96b5631dde0a84acddef3a760c2122 | 19,293 | py | Python | mamosa/synthetic/synthetic.py | sindrehenriksen/mamosa | 477131ff82550701753c1950128b3191ee37bebb | [
"MIT"
] | 1 | 2021-11-01T16:43:17.000Z | 2021-11-01T16:43:17.000Z | mamosa/synthetic/synthetic.py | sindrehenriksen/mamosa | 477131ff82550701753c1950128b3191ee37bebb | [
"MIT"
] | null | null | null | mamosa/synthetic/synthetic.py | sindrehenriksen/mamosa | 477131ff82550701753c1950128b3191ee37bebb | [
"MIT"
] | null | null | null | import numpy as np
import bruges
import scipy.stats
import scipy.linalg
import warnings
from scipy.ndimage import gaussian_filter
from typing import Tuple, Union, List, Optional, Callable, Any
# TODO: Add support for horizons that "stop"/"vanish" (i.e. a layer is eroded).
class SyntheticData:
"""Class for generating synthetic geo-volumes and seismic therefrom.
This class can do the following:
- Generate semi-realistic random synthetic horizons inn a subsurface volume of
the desired size (number of voxels). The horizons cover the entire volume.
- Generate simple (unrealistic), parallel faults.
- Generate synthetic seismic data from the synthetic subsurface volume.
Args:
shape (Tuple[int, int, int]): Shape of the synthetic geo-volume, on the format
(I, X, T).
Attributes:
I: Number of ilines, > 0.
X: Number of xlines, > 0.
T: Number of tlines, > 0.
n_horizons: Number of horizons in geo-volume, > 0.
horizons: List of length n_horizons of ndarray of int, shape (I, X). Element
(I, X) of list element h gives the height of horizon h in (I, X) - only one
horizon point per horizon per trace is supported. -1 indicates out of
bounds, i.e. the horizon is not in the geo-volume.
facies: ndarray of int, shape (I, X, T). Facies start at horizons (inclusive)
and continue to next horizon (exclusive) in t-direction. I.e.
n_facies = n_horizons + 1. The array contains integers from 0 to n_horizons.
seismic: ndarray of float, shape (I, X, T). Synthetic seismic.
wavelet: array_like; list of wavelet amplitudes.
reflection_coeffs: List of reflection coefficients, one for each horizon. Each
can be a float (constant coefficients across horizons) or an (I*X) array.
-1 < reflection coefficient < 1.
oob_horizons: List of horizons that are partly or entirely out of bounds, i.e.
some/all points of the horizon not in the geo-volume.
"""
@property
def shape(self) -> Tuple[int, int, int]:
"""Shape property.
Returns:
Tuple[int, int, int]: Shape of geo-volume (I*X*T).
"""
return self.I, self.X, self.T
@property
def reflection_coeffs_array(self) -> Optional[np.ndarray]:
"""Reflection coefficient array property.
Returns:
np.ndarray: Shape (I*X*T); array of reflection coefficients.
"""
if self.reflection_coeffs is None:
return None
else:
r_array = np.zeros(self.shape)
ii, xx = np.mgrid[: self.I, : self.X]
for i in range(self.n_horizons):
h = self.horizons[i] # type: ignore
r_array[ii, xx, h] = self.reflection_coeffs[i]
return r_array
@property
def noise(self) -> np.ndarray:
"""Noise property.
Subtracting noise from self.seismic gives noise-free seismic.
Returns:
np.ndarray: Shape (I*X*T); array of noise contribution to seismic.
"""
if self._blur_noise is not None:
return self._blur_noise
if self._systematic_noise is not None:
if self._white_noise is not None:
return self._systematic_noise + self._white_noise
return self._systematic_noise
if self._white_noise is not None:
return self._white_noise
return np.zeros(self.shape)
def generate_horizons(
self,
n_horizons: int,
min_distance: int = 5,
volatility: float = 0.6,
trend_size: float = 1,
trend_length: int = 30,
fault_xlines: Union[int, List[int]] = None,
fault_size: Union[int, List[int]] = 5,
generate_reflection_coeffs: bool = True,
reflection_coeff_volatility: float = 0.005,
reflection_coeff_seeds: List[float] = None,
) -> np.ndarray:
"""Generate synthetic horizons.
Generate random synthetic horizons in the defined synthetic geo-volume.
Args:
n_horizons: int > 0. Number of horizons to be generated.
min_distance: int >= 0. Minimum distance between the horizons (and top
horizon and 0).
volatility: float > 0. Decides the volatility of the horizons.
trend_size: float > 0. Decides how significant trends the horizons have.
trend_length: float > 0. Decides how long the trends last for.
fault_xlines: Create faults at these xlines.
fault_size: List of size of fault jumps, or size of all jumps if just an
integer. Ignored if fault_xlines is None.
generate_reflection_coeffs: If True, generate random, non-constant
reflection coefficients.
reflection_coeff_volatility: float > 0. Volatility of the reflection
coefficients.
reflection_coeff_seeds: Initial values that the random reflection
coefficients will fluctuate around.
Returns:
List of horizon numpy arrays of size (I*X).
"""
# Reset:
self.facies = None
self.seismic = None
self.oob_horizons = []
self.n_horizons = n_horizons
if reflection_coeff_seeds is not None:
msg = (
"Please provide a reflection coefficient seed value for each horizon, "
"if any."
)
assert len(reflection_coeff_seeds) == self.n_horizons, msg
# TODO: Should respect bounds from _generate_horizons.
self.horizons = self._generate_overlapping_horizons(
volatility,
trend_length,
trend_size,
generate_reflection_coeffs,
reflection_coeff_volatility,
reflection_coeff_seeds,
)
self.horizons = self._set_min_distance(min_distance)
if fault_xlines is not None:
if isinstance(fault_xlines, int):
fault_xlines = [fault_xlines]
if isinstance(fault_size, int):
fault_size = [fault_size] * len(fault_xlines)
else:
assert len(fault_size) == len(fault_xlines)
for x, size in zip(fault_xlines, fault_size):
self.horizons = self.create_fault(x, size)
self.horizons = self._move_above_zero(min_distance)
self.horizons = self._set_oob() # set points above top of vol to 0
return self.horizons
def _generate_overlapping_horizons(
self,
volatility: float,
trend_length: int,
trend_size: float,
generate_reflection_coeffs: bool,
reflection_coeff_volatility: float,
reflection_coeff_seeds: Optional[List[float]],
) -> np.ndarray:
"""Generate horizons independently. They will overlap."""
horizons = np.zeros((self.n_horizons, self.I, self.X))
if generate_reflection_coeffs:
self.reflection_coeffs = np.zeros((self.n_horizons, self.I, self.X))
# Create trend vectors
i_trend = self._get_trend_vec(self.I, trend_size, trend_length)
x_trend = self._get_trend_vec(self.X, trend_size, trend_length)
# Generate one horizon at a time according to a random process using
# the trend vectors
for h in range(0, self.n_horizons):
horizons[h] = self._generate_horizon(i_trend, x_trend, _jump_r)
if generate_reflection_coeffs:
rel_vol = reflection_coeff_volatility / volatility
for h in range(0, self.n_horizons):
# Trend might be decreasing with increasing depth
flip = np.random.choice((-1, 1))
if reflection_coeff_seeds is None:
seed = None
else:
seed = reflection_coeff_seeds[h]
self.reflection_coeffs[h] = self._generate_horizon( # type: ignore
flip * i_trend, flip * x_trend, _jump_c, True, seed
)
# horizons should be integer-valued.
horizons = horizons.round().astype(int)
return horizons
def _generate_horizon(
self,
i_trend: np.ndarray,
x_trend: np.ndarray,
jump: Callable,
reflection_coeff: bool = False,
reflection_coeff_seed: float = None,
) -> np.ndarray:
"""Generate and return a single horizon or horizon reflection coefficients."""
iline_edge = np.zeros(self.I)
xline_edge = np.zeros(self.X)
if reflection_coeff:
if reflection_coeff_seed is not None:
iline_edge[0] = reflection_coeff_seed
xline_edge[0] = reflection_coeff_seed
else:
# Init range (-0.25, -0.1) or (0.1, 0.25)
iline_edge[0] = np.random.uniform(-0.15, 0.15)
iline_edge[0] += np.sign(iline_edge[0]) * 0.1
xline_edge[0] = iline_edge[0]
high = 0.3 * np.sign(iline_edge[0])
low = 0.05 * np.sign(iline_edge[0])
if high < low:
high, low = (low, high)
else:
high = np.inf
low = -high
# Generate the horizon along the edges iline = 0 and xline = 0.
for i in range(1, self.I):
iline_edge[i] = (iline_edge[i - 1] + jump(i_trend[i])).clip(low, high)
for x in range(1, self.X):
xline_edge[x] = (xline_edge[x - 1] + jump(x_trend[x])).clip(low, high)
horizon = np.zeros((self.I, self.X))
horizon[:, 0] = iline_edge
horizon[0, :] = xline_edge
# Generate the rest of the horizon.
for i in range(1, self.I):
for x in range(1, self.X):
i_jump = jump(i_trend[i])
x_jump = jump(x_trend[x])
horizon[i, x] = (
0.5 * (horizon[i - 1, x] + i_jump + horizon[i, x - 1] + x_jump)
).clip(low, high)
return horizon
def _get_trend_vec(
self, n: int, trend_size: float, trend_length: int
) -> np.ndarray:
"""Get trend of a random walk with trend."""
trend = trend_size * np.random.randn(n)
trend[0] = 0
trend = self._moving_average(trend, trend_length)
return trend
@staticmethod
def _moving_average(a: np.ndarray, n: int) -> np.ndarray:
"""Moving average of a, window size = n."""
b = np.copy(a)
b = np.insert(b, 0, np.full(n, a[0]))
s = np.cumsum(b)
res = (s[n:] - s[:-n]) / n
return res
def _set_min_distance(self, min_distance: int) -> np.ndarray:
"""Move horizons to fulfill minimum distance specification."""
for j in range(1, self.n_horizons):
diff = self.horizons[j] - self.horizons[j - 1] # type: ignore
min_diff = diff.min()
if min_diff < min_distance:
dist = np.random.randint(min_distance, 3 * min_distance)
self.horizons[j] += dist - min_diff # type: ignore
return self.horizons
def create_fault(self, fault_xline: int, fault_size: int) -> np.ndarray:
"""Create a fault at a xline fault_xline.
Args:
fault_xline: Xline to create fault at.
fault_size: Size of fault.
Returns:
See class attribute self.horizons.
"""
self.horizons[:, :, fault_xline:] += fault_size # type: ignore
return self.horizons
def _move_above_zero(self, min_distance: int) -> np.ndarray:
"""Make sure that the top horizon is a little above 0 (below seabed)."""
h_min = self.horizons[0].min() # type: ignore
self.horizons -= h_min
self.horizons += np.random.randint(0, self.T // min(10, self.T))
self.horizons += min_distance
return self.horizons
def _set_oob(self) -> np.ndarray:
"""Remove parts of horizons above (geologically below) defined geo-volume."""
oob = self.horizons > (self.T - 1) # type: ignore
if oob.sum() > 0: # type: ignore
self.horizons[oob] = -1 # type: ignore
for h in range(self.n_horizons - 1, -1, -1):
n_out = oob[h].sum() # type: ignore
if n_out > 0:
I, X = self.I, self.X
warnings.warn(
f"horizon {h} is "
f'{"partly" if n_out < (I*X) else "entirely"} '
f"out of bounds."
)
self.oob_horizons.append(h)
else:
break
return self.horizons
def horizon_volume(self, horizon_number: int) -> Optional[np.ndarray]:
"""Produce horizon volume for a single horizon.
This function transforms the generated horizon into a binary numpy array of
dimensions (I, X, T). The horizon is represented by the ones.
Args:
horizon_number: Which horizon to generate volume for.
Returns:
binary ndarray of size (I*X*T) if horizon is (partly) within bounds, None
otherwise.
"""
horizon = self.ixtn_horizons()
horizon = horizon[horizon[:, 3] == horizon_number]
if horizon.size == 0:
warnings.warn(f"horizon {horizon_number} is not in volume.")
return None
horizon_vol = np.zeros(self.shape)
horizon_vol[horizon[:, 0], horizon[:, 1], horizon[:, 2]] = 1
return horizon_vol
def ixtn_horizons(self) -> np.ndarray:
"""Produce horizon coords.
This function transforms the generated horizons into a numpy array of dimensions
(n_horizon_points, 4) with rows (I, X, T, n_horizon).
Returns:
ndarray of horizon coords; shape (n_horizon_points, 4).
"""
in_bounds = self.horizons > -1 # type: ignore
s = in_bounds.sum() # type: ignore
ixtn = np.empty(shape=(s, 4), dtype=int)
nix = np.argwhere(in_bounds)
ixtn[:, :2] = nix[:, 1:]
ixtn[:, 3] = nix[:, 0]
ixtn[:, 2] = self.horizons[nix[:, 0], nix[:, 1], nix[:, 2]] # type: ignore
return ixtn
def get_facies(self) -> np.ndarray:
"""Generate facies array.
Returns:
ndarray of int, shape (I, X, T). See class attribute docstring (facies) for
description.
"""
ixtn = self.ixtn_horizons()
facies = np.zeros(self.shape, dtype=int)
facies[ixtn[:, 0], ixtn[:, 1], ixtn[:, 2]] = 1
for t in range(1, self.T):
facies[:, :, t] = facies[:, :, t] + facies[:, :, (t - 1)]
self.facies = facies
return facies
def generate_synthetic_seismic(
self,
reflection_coeffs: Union[float, List[Union[float, np.ndarray]]] = None,
systematic_sigma: float = 0,
white_sigma: float = 0,
blur_sigma: float = 0,
wavelet_frequency: int = 40,
):
"""Generate synthetic seismic.
Create synthetic seismic using instance horizons and coefficients, or provided
(constant) coefficients.
Args:
reflection_coeffs: See class attributes.
systematic_sigma: Systematic noise added if not None; higher means more
noise.
white_sigma: White noise added if not None; higher means more noise.
blur_sigma: Seismic blurred if not None; higher means more blurred.
wavelet_frequency: Frequency of wavelet passed to bruges.filters.ricker() to
define wavelet.
Returns:
ndarray of float, shape (I, X, T).
"""
if reflection_coeffs is not None:
if isinstance(reflection_coeffs, float):
self.reflection_coeffs = np.array(reflection_coeffs).reshape(1)
else:
self.reflection_coeffs = np.array(reflection_coeffs)
msg = (
"Please provide one reflection coefficient constant/array for each"
"horizon."
)
assert len(self.reflection_coeffs) == self.n_horizons, msg
assert np.all(np.abs(self.reflection_coeffs) < 1), "Max 100% reflected."
if self.reflection_coeffs is None:
warnings.warn("No reflection coefficients. Cannot generate seismic.")
return
dt = 0.005
# For some reason, odd length of the wave gives two spike points, we want one...
even_T = self.T - self.T % 2
duration = min(0.100, 0.005 * even_T) # n_steps <= self.T
wave = bruges.filters.ricker(duration=duration, dt=dt, f=wavelet_frequency)
# ... but we want odd length
wave = np.delete(wave, 0)
self.wavelet = wave
# TODO: Quicker to use convolution_matrix here?
reflection_arr = self.reflection_coeffs_array
seismic = np.apply_along_axis(
lambda r: np.convolve(r, wave, mode="same"), axis=-1, arr=reflection_arr
)
self.seismic = seismic
if systematic_sigma > 0:
first_col = np.zeros(self.T)
l = wave.size // 2 + 1
first_col[:l] = wave[(l - 1) :]
convolution_matrix = scipy.linalg.toeplitz(first_col)
self._systematic_sigma = systematic_sigma
W = convolution_matrix
covariance_matrix = systematic_sigma ** 2 * W @ W.T
dist = scipy.stats.multivariate_normal(np.zeros(self.T), covariance_matrix)
self._systematic_noise = dist.rvs((self.I, self.X))
seismic += self._systematic_noise
else:
self._systematic_sigma = 0
if white_sigma > 0:
self._white_sigma = white_sigma
self._white_noise = np.random.normal(np.zeros(seismic.shape), white_sigma)
seismic += self._white_noise
else:
self._white_sigma = 0
if blur_sigma > 0:
self._blur_sigma = blur_sigma
seismic = gaussian_filter(seismic, sigma=[blur_sigma, blur_sigma, 0])
self._blur_noise = self.seismic - seismic
else:
self._blur_sigma = 0
self.seismic = seismic
return seismic
| 39.616016 | 88 | 0.584616 | import numpy as np
import bruges
import scipy.stats
import scipy.linalg
import warnings
from scipy.ndimage import gaussian_filter
from typing import Tuple, Union, List, Optional, Callable, Any
# TODO: Add support for horizons that "stop"/"vanish" (i.e. a layer is eroded).
class SyntheticData:
"""Class for generating synthetic geo-volumes and seismic therefrom.
This class can do the following:
- Generate semi-realistic random synthetic horizons inn a subsurface volume of
the desired size (number of voxels). The horizons cover the entire volume.
- Generate simple (unrealistic), parallel faults.
- Generate synthetic seismic data from the synthetic subsurface volume.
Args:
shape (Tuple[int, int, int]): Shape of the synthetic geo-volume, on the format
(I, X, T).
Attributes:
I: Number of ilines, > 0.
X: Number of xlines, > 0.
T: Number of tlines, > 0.
n_horizons: Number of horizons in geo-volume, > 0.
horizons: List of length n_horizons of ndarray of int, shape (I, X). Element
(I, X) of list element h gives the height of horizon h in (I, X) - only one
horizon point per horizon per trace is supported. -1 indicates out of
bounds, i.e. the horizon is not in the geo-volume.
facies: ndarray of int, shape (I, X, T). Facies start at horizons (inclusive)
and continue to next horizon (exclusive) in t-direction. I.e.
n_facies = n_horizons + 1. The array contains integers from 0 to n_horizons.
seismic: ndarray of float, shape (I, X, T). Synthetic seismic.
wavelet: array_like; list of wavelet amplitudes.
reflection_coeffs: List of reflection coefficients, one for each horizon. Each
can be a float (constant coefficients across horizons) or an (I*X) array.
-1 < reflection coefficient < 1.
oob_horizons: List of horizons that are partly or entirely out of bounds, i.e.
some/all points of the horizon not in the geo-volume.
"""
def __init__(self, shape: Tuple[int, int, int]):
self.I, self.X, self.T = shape
self.n_horizons = 0
self.horizons: Optional[np.ndarray] = None
self.facies: Optional[np.ndarray] = None
self.seismic: Optional[np.ndarray] = None
self.wavelet: Any = None
self.reflection_coeffs: Optional[np.ndarray] = None
self.oob_horizons: List[int] = []
self._systematic_sigma = 0.0
self._white_sigma = 0.0
self._blur_sigma = 0.0
self._systematic_noise: Optional[np.ndarray] = None
self._white_noise: Optional[np.ndarray] = None
self._blur_noise: Optional[np.ndarray] = None
@property
def shape(self) -> Tuple[int, int, int]:
"""Shape property.
Returns:
Tuple[int, int, int]: Shape of geo-volume (I*X*T).
"""
return self.I, self.X, self.T
@property
def reflection_coeffs_array(self) -> Optional[np.ndarray]:
"""Reflection coefficient array property.
Returns:
np.ndarray: Shape (I*X*T); array of reflection coefficients.
"""
if self.reflection_coeffs is None:
return None
else:
r_array = np.zeros(self.shape)
ii, xx = np.mgrid[: self.I, : self.X]
for i in range(self.n_horizons):
h = self.horizons[i] # type: ignore
r_array[ii, xx, h] = self.reflection_coeffs[i]
return r_array
@property
def noise(self) -> np.ndarray:
"""Noise property.
Subtracting noise from self.seismic gives noise-free seismic.
Returns:
np.ndarray: Shape (I*X*T); array of noise contribution to seismic.
"""
if self._blur_noise is not None:
return self._blur_noise
if self._systematic_noise is not None:
if self._white_noise is not None:
return self._systematic_noise + self._white_noise
return self._systematic_noise
if self._white_noise is not None:
return self._white_noise
return np.zeros(self.shape)
def generate_horizons(
self,
n_horizons: int,
min_distance: int = 5,
volatility: float = 0.6,
trend_size: float = 1,
trend_length: int = 30,
fault_xlines: Union[int, List[int]] = None,
fault_size: Union[int, List[int]] = 5,
generate_reflection_coeffs: bool = True,
reflection_coeff_volatility: float = 0.005,
reflection_coeff_seeds: List[float] = None,
) -> np.ndarray:
"""Generate synthetic horizons.
Generate random synthetic horizons in the defined synthetic geo-volume.
Args:
n_horizons: int > 0. Number of horizons to be generated.
min_distance: int >= 0. Minimum distance between the horizons (and top
horizon and 0).
volatility: float > 0. Decides the volatility of the horizons.
trend_size: float > 0. Decides how significant trends the horizons have.
trend_length: float > 0. Decides how long the trends last for.
fault_xlines: Create faults at these xlines.
fault_size: List of size of fault jumps, or size of all jumps if just an
integer. Ignored if fault_xlines is None.
generate_reflection_coeffs: If True, generate random, non-constant
reflection coefficients.
reflection_coeff_volatility: float > 0. Volatility of the reflection
coefficients.
reflection_coeff_seeds: Initial values that the random reflection
coefficients will fluctuate around.
Returns:
List of horizon numpy arrays of size (I*X).
"""
# Reset:
self.facies = None
self.seismic = None
self.oob_horizons = []
self.n_horizons = n_horizons
if reflection_coeff_seeds is not None:
msg = (
"Please provide a reflection coefficient seed value for each horizon, "
"if any."
)
assert len(reflection_coeff_seeds) == self.n_horizons, msg
# TODO: Should respect bounds from _generate_horizons.
self.horizons = self._generate_overlapping_horizons(
volatility,
trend_length,
trend_size,
generate_reflection_coeffs,
reflection_coeff_volatility,
reflection_coeff_seeds,
)
self.horizons = self._set_min_distance(min_distance)
if fault_xlines is not None:
if isinstance(fault_xlines, int):
fault_xlines = [fault_xlines]
if isinstance(fault_size, int):
fault_size = [fault_size] * len(fault_xlines)
else:
assert len(fault_size) == len(fault_xlines)
for x, size in zip(fault_xlines, fault_size):
self.horizons = self.create_fault(x, size)
self.horizons = self._move_above_zero(min_distance)
self.horizons = self._set_oob() # set points above top of vol to 0
return self.horizons
def _generate_overlapping_horizons(
self,
volatility: float,
trend_length: int,
trend_size: float,
generate_reflection_coeffs: bool,
reflection_coeff_volatility: float,
reflection_coeff_seeds: Optional[List[float]],
) -> np.ndarray:
"""Generate horizons independently. They will overlap."""
horizons = np.zeros((self.n_horizons, self.I, self.X))
if generate_reflection_coeffs:
self.reflection_coeffs = np.zeros((self.n_horizons, self.I, self.X))
# Create trend vectors
i_trend = self._get_trend_vec(self.I, trend_size, trend_length)
x_trend = self._get_trend_vec(self.X, trend_size, trend_length)
def _jump_r(trend):
return volatility * np.random.randn() + trend
# Generate one horizon at a time according to a random process using
# the trend vectors
for h in range(0, self.n_horizons):
horizons[h] = self._generate_horizon(i_trend, x_trend, _jump_r)
if generate_reflection_coeffs:
rel_vol = reflection_coeff_volatility / volatility
def _jump_c(trend):
return reflection_coeff_volatility * np.random.randn() + rel_vol * trend
for h in range(0, self.n_horizons):
# Trend might be decreasing with increasing depth
flip = np.random.choice((-1, 1))
if reflection_coeff_seeds is None:
seed = None
else:
seed = reflection_coeff_seeds[h]
self.reflection_coeffs[h] = self._generate_horizon( # type: ignore
flip * i_trend, flip * x_trend, _jump_c, True, seed
)
# horizons should be integer-valued.
horizons = horizons.round().astype(int)
return horizons
def _generate_horizon(
self,
i_trend: np.ndarray,
x_trend: np.ndarray,
jump: Callable,
reflection_coeff: bool = False,
reflection_coeff_seed: float = None,
) -> np.ndarray:
"""Generate and return a single horizon or horizon reflection coefficients."""
iline_edge = np.zeros(self.I)
xline_edge = np.zeros(self.X)
if reflection_coeff:
if reflection_coeff_seed is not None:
iline_edge[0] = reflection_coeff_seed
xline_edge[0] = reflection_coeff_seed
else:
# Init range (-0.25, -0.1) or (0.1, 0.25)
iline_edge[0] = np.random.uniform(-0.15, 0.15)
iline_edge[0] += np.sign(iline_edge[0]) * 0.1
xline_edge[0] = iline_edge[0]
high = 0.3 * np.sign(iline_edge[0])
low = 0.05 * np.sign(iline_edge[0])
if high < low:
high, low = (low, high)
else:
high = np.inf
low = -high
# Generate the horizon along the edges iline = 0 and xline = 0.
for i in range(1, self.I):
iline_edge[i] = (iline_edge[i - 1] + jump(i_trend[i])).clip(low, high)
for x in range(1, self.X):
xline_edge[x] = (xline_edge[x - 1] + jump(x_trend[x])).clip(low, high)
horizon = np.zeros((self.I, self.X))
horizon[:, 0] = iline_edge
horizon[0, :] = xline_edge
# Generate the rest of the horizon.
for i in range(1, self.I):
for x in range(1, self.X):
i_jump = jump(i_trend[i])
x_jump = jump(x_trend[x])
horizon[i, x] = (
0.5 * (horizon[i - 1, x] + i_jump + horizon[i, x - 1] + x_jump)
).clip(low, high)
return horizon
def _get_trend_vec(
self, n: int, trend_size: float, trend_length: int
) -> np.ndarray:
"""Get trend of a random walk with trend."""
trend = trend_size * np.random.randn(n)
trend[0] = 0
trend = self._moving_average(trend, trend_length)
return trend
@staticmethod
def _moving_average(a: np.ndarray, n: int) -> np.ndarray:
"""Moving average of a, window size = n."""
b = np.copy(a)
b = np.insert(b, 0, np.full(n, a[0]))
s = np.cumsum(b)
res = (s[n:] - s[:-n]) / n
return res
def _set_min_distance(self, min_distance: int) -> np.ndarray:
"""Move horizons to fulfill minimum distance specification."""
for j in range(1, self.n_horizons):
diff = self.horizons[j] - self.horizons[j - 1] # type: ignore
min_diff = diff.min()
if min_diff < min_distance:
dist = np.random.randint(min_distance, 3 * min_distance)
self.horizons[j] += dist - min_diff # type: ignore
return self.horizons
def create_fault(self, fault_xline: int, fault_size: int) -> np.ndarray:
"""Create a fault at a xline fault_xline.
Args:
fault_xline: Xline to create fault at.
fault_size: Size of fault.
Returns:
See class attribute self.horizons.
"""
self.horizons[:, :, fault_xline:] += fault_size # type: ignore
return self.horizons
def _move_above_zero(self, min_distance: int) -> np.ndarray:
"""Make sure that the top horizon is a little above 0 (below seabed)."""
h_min = self.horizons[0].min() # type: ignore
self.horizons -= h_min
self.horizons += np.random.randint(0, self.T // min(10, self.T))
self.horizons += min_distance
return self.horizons
def _set_oob(self) -> np.ndarray:
"""Remove parts of horizons above (geologically below) defined geo-volume."""
oob = self.horizons > (self.T - 1) # type: ignore
if oob.sum() > 0: # type: ignore
self.horizons[oob] = -1 # type: ignore
for h in range(self.n_horizons - 1, -1, -1):
n_out = oob[h].sum() # type: ignore
if n_out > 0:
I, X = self.I, self.X
warnings.warn(
f"horizon {h} is "
f'{"partly" if n_out < (I*X) else "entirely"} '
f"out of bounds."
)
self.oob_horizons.append(h)
else:
break
return self.horizons
def horizon_volume(self, horizon_number: int) -> Optional[np.ndarray]:
"""Produce horizon volume for a single horizon.
This function transforms the generated horizon into a binary numpy array of
dimensions (I, X, T). The horizon is represented by the ones.
Args:
horizon_number: Which horizon to generate volume for.
Returns:
binary ndarray of size (I*X*T) if horizon is (partly) within bounds, None
otherwise.
"""
horizon = self.ixtn_horizons()
horizon = horizon[horizon[:, 3] == horizon_number]
if horizon.size == 0:
warnings.warn(f"horizon {horizon_number} is not in volume.")
return None
horizon_vol = np.zeros(self.shape)
horizon_vol[horizon[:, 0], horizon[:, 1], horizon[:, 2]] = 1
return horizon_vol
def ixtn_horizons(self) -> np.ndarray:
"""Produce horizon coords.
This function transforms the generated horizons into a numpy array of dimensions
(n_horizon_points, 4) with rows (I, X, T, n_horizon).
Returns:
ndarray of horizon coords; shape (n_horizon_points, 4).
"""
in_bounds = self.horizons > -1 # type: ignore
s = in_bounds.sum() # type: ignore
ixtn = np.empty(shape=(s, 4), dtype=int)
nix = np.argwhere(in_bounds)
ixtn[:, :2] = nix[:, 1:]
ixtn[:, 3] = nix[:, 0]
ixtn[:, 2] = self.horizons[nix[:, 0], nix[:, 1], nix[:, 2]] # type: ignore
return ixtn
def get_facies(self) -> np.ndarray:
"""Generate facies array.
Returns:
ndarray of int, shape (I, X, T). See class attribute docstring (facies) for
description.
"""
ixtn = self.ixtn_horizons()
facies = np.zeros(self.shape, dtype=int)
facies[ixtn[:, 0], ixtn[:, 1], ixtn[:, 2]] = 1
for t in range(1, self.T):
facies[:, :, t] = facies[:, :, t] + facies[:, :, (t - 1)]
self.facies = facies
return facies
def generate_synthetic_seismic(
self,
reflection_coeffs: Union[float, List[Union[float, np.ndarray]]] = None,
systematic_sigma: float = 0,
white_sigma: float = 0,
blur_sigma: float = 0,
wavelet_frequency: int = 40,
):
"""Generate synthetic seismic.
Create synthetic seismic using instance horizons and coefficients, or provided
(constant) coefficients.
Args:
reflection_coeffs: See class attributes.
systematic_sigma: Systematic noise added if not None; higher means more
noise.
white_sigma: White noise added if not None; higher means more noise.
blur_sigma: Seismic blurred if not None; higher means more blurred.
wavelet_frequency: Frequency of wavelet passed to bruges.filters.ricker() to
define wavelet.
Returns:
ndarray of float, shape (I, X, T).
"""
if reflection_coeffs is not None:
if isinstance(reflection_coeffs, float):
self.reflection_coeffs = np.array(reflection_coeffs).reshape(1)
else:
self.reflection_coeffs = np.array(reflection_coeffs)
msg = (
"Please provide one reflection coefficient constant/array for each"
"horizon."
)
assert len(self.reflection_coeffs) == self.n_horizons, msg
assert np.all(np.abs(self.reflection_coeffs) < 1), "Max 100% reflected."
if self.reflection_coeffs is None:
warnings.warn("No reflection coefficients. Cannot generate seismic.")
return
dt = 0.005
# For some reason, odd length of the wave gives two spike points, we want one...
even_T = self.T - self.T % 2
duration = min(0.100, 0.005 * even_T) # n_steps <= self.T
wave = bruges.filters.ricker(duration=duration, dt=dt, f=wavelet_frequency)
# ... but we want odd length
wave = np.delete(wave, 0)
self.wavelet = wave
# TODO: Quicker to use convolution_matrix here?
reflection_arr = self.reflection_coeffs_array
seismic = np.apply_along_axis(
lambda r: np.convolve(r, wave, mode="same"), axis=-1, arr=reflection_arr
)
self.seismic = seismic
if systematic_sigma > 0:
first_col = np.zeros(self.T)
l = wave.size // 2 + 1
first_col[:l] = wave[(l - 1) :]
convolution_matrix = scipy.linalg.toeplitz(first_col)
self._systematic_sigma = systematic_sigma
W = convolution_matrix
covariance_matrix = systematic_sigma ** 2 * W @ W.T
dist = scipy.stats.multivariate_normal(np.zeros(self.T), covariance_matrix)
self._systematic_noise = dist.rvs((self.I, self.X))
seismic += self._systematic_noise
else:
self._systematic_sigma = 0
if white_sigma > 0:
self._white_sigma = white_sigma
self._white_noise = np.random.normal(np.zeros(seismic.shape), white_sigma)
seismic += self._white_noise
else:
self._white_sigma = 0
if blur_sigma > 0:
self._blur_sigma = blur_sigma
seismic = gaussian_filter(seismic, sigma=[blur_sigma, blur_sigma, 0])
self._blur_noise = self.seismic - seismic
else:
self._blur_sigma = 0
self.seismic = seismic
return seismic
| 791 | 0 | 93 |
72b783b94092542b07e16ea1a17cad1c4d010de7 | 2,046 | py | Python | Lib/encodings/iso8859_8.py | marcosptf/cpython-2.0.1 | 73c739a764e8b1dc84640e73b880bc66e1916bca | [
"PSF-2.0"
] | 5 | 2022-03-26T21:53:36.000Z | 2022-03-30T21:47:20.000Z | Lib/encodings/iso8859_8.py | marcosptf/cpython-2.0.1 | 73c739a764e8b1dc84640e73b880bc66e1916bca | [
"PSF-2.0"
] | 6 | 2020-11-18T15:48:14.000Z | 2021-05-03T21:20:50.000Z | Lib/encodings/iso8859_8.py | marcosptf/cpython-2.0.1 | 73c739a764e8b1dc84640e73b880bc66e1916bca | [
"PSF-2.0"
] | 2 | 2015-07-16T08:14:13.000Z | 2022-03-27T01:55:17.000Z | """ Python Character Mapping Codec generated from '8859-8.TXT'.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
### Codec APIs
### encodings module API
### Decoding Map
decoding_map = {
0x00aa: 0x00d7, # MULTIPLICATION SIGN
0x00af: 0x203e, # OVERLINE
0x00ba: 0x00f7, # DIVISION SIGN
0x00df: 0x2017, # DOUBLE LOW LINE
0x00e0: 0x05d0, # HEBREW LETTER ALEF
0x00e1: 0x05d1, # HEBREW LETTER BET
0x00e2: 0x05d2, # HEBREW LETTER GIMEL
0x00e3: 0x05d3, # HEBREW LETTER DALET
0x00e4: 0x05d4, # HEBREW LETTER HE
0x00e5: 0x05d5, # HEBREW LETTER VAV
0x00e6: 0x05d6, # HEBREW LETTER ZAYIN
0x00e7: 0x05d7, # HEBREW LETTER HET
0x00e8: 0x05d8, # HEBREW LETTER TET
0x00e9: 0x05d9, # HEBREW LETTER YOD
0x00ea: 0x05da, # HEBREW LETTER FINAL KAF
0x00eb: 0x05db, # HEBREW LETTER KAF
0x00ec: 0x05dc, # HEBREW LETTER LAMED
0x00ed: 0x05dd, # HEBREW LETTER FINAL MEM
0x00ee: 0x05de, # HEBREW LETTER MEM
0x00ef: 0x05df, # HEBREW LETTER FINAL NUN
0x00f0: 0x05e0, # HEBREW LETTER NUN
0x00f1: 0x05e1, # HEBREW LETTER SAMEKH
0x00f2: 0x05e2, # HEBREW LETTER AYIN
0x00f3: 0x05e3, # HEBREW LETTER FINAL PE
0x00f4: 0x05e4, # HEBREW LETTER PE
0x00f5: 0x05e5, # HEBREW LETTER FINAL TSADI
0x00f6: 0x05e6, # HEBREW LETTER TSADI
0x00f7: 0x05e7, # HEBREW LETTER QOF
0x00f8: 0x05e8, # HEBREW LETTER RESH
0x00f9: 0x05e9, # HEBREW LETTER SHIN
0x00fa: 0x05ea, # HEBREW LETTER TAV
}
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k
| 26.230769 | 68 | 0.709677 | """ Python Character Mapping Codec generated from '8859-8.TXT'.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = {
0x00aa: 0x00d7, # MULTIPLICATION SIGN
0x00af: 0x203e, # OVERLINE
0x00ba: 0x00f7, # DIVISION SIGN
0x00df: 0x2017, # DOUBLE LOW LINE
0x00e0: 0x05d0, # HEBREW LETTER ALEF
0x00e1: 0x05d1, # HEBREW LETTER BET
0x00e2: 0x05d2, # HEBREW LETTER GIMEL
0x00e3: 0x05d3, # HEBREW LETTER DALET
0x00e4: 0x05d4, # HEBREW LETTER HE
0x00e5: 0x05d5, # HEBREW LETTER VAV
0x00e6: 0x05d6, # HEBREW LETTER ZAYIN
0x00e7: 0x05d7, # HEBREW LETTER HET
0x00e8: 0x05d8, # HEBREW LETTER TET
0x00e9: 0x05d9, # HEBREW LETTER YOD
0x00ea: 0x05da, # HEBREW LETTER FINAL KAF
0x00eb: 0x05db, # HEBREW LETTER KAF
0x00ec: 0x05dc, # HEBREW LETTER LAMED
0x00ed: 0x05dd, # HEBREW LETTER FINAL MEM
0x00ee: 0x05de, # HEBREW LETTER MEM
0x00ef: 0x05df, # HEBREW LETTER FINAL NUN
0x00f0: 0x05e0, # HEBREW LETTER NUN
0x00f1: 0x05e1, # HEBREW LETTER SAMEKH
0x00f2: 0x05e2, # HEBREW LETTER AYIN
0x00f3: 0x05e3, # HEBREW LETTER FINAL PE
0x00f4: 0x05e4, # HEBREW LETTER PE
0x00f5: 0x05e5, # HEBREW LETTER FINAL TSADI
0x00f6: 0x05e6, # HEBREW LETTER TSADI
0x00f7: 0x05e7, # HEBREW LETTER QOF
0x00f8: 0x05e8, # HEBREW LETTER RESH
0x00f9: 0x05e9, # HEBREW LETTER SHIN
0x00fa: 0x05ea, # HEBREW LETTER TAV
}
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k
| 233 | 73 | 162 |
cc3465e8b6a7b04d148b0d77d9ac749acd991189 | 4,161 | py | Python | simulated_uploaders/simulate_upload_rnaseq_fastq.py | BD2KGenomics/dcc-spinnaker-client | 72a6cb94a8db4be707cb1f63513b2f2712edb663 | [
"Apache-2.0"
] | null | null | null | simulated_uploaders/simulate_upload_rnaseq_fastq.py | BD2KGenomics/dcc-spinnaker-client | 72a6cb94a8db4be707cb1f63513b2f2712edb663 | [
"Apache-2.0"
] | 36 | 2016-11-21T17:05:18.000Z | 2020-10-12T15:19:52.000Z | simulated_uploaders/simulate_upload_rnaseq_fastq.py | BD2KGenomics/dcc-spinnaker-client | 72a6cb94a8db4be707cb1f63513b2f2712edb663 | [
"Apache-2.0"
] | 6 | 2016-12-12T19:37:13.000Z | 2021-10-10T21:24:54.000Z | import argparse
import subprocess
import os
import time
import random
if __name__ == "__main__":
main()
| 62.104478 | 480 | 0.694545 | import argparse
import subprocess
import os
import time
import random
def getOptions():
parser = argparse.ArgumentParser(description='Directory that contains Json files.')
parser.add_argument("-f1", "--fastq-r1-path", default="https://s3.amazonaws.com/oconnor-test-bucket/sample-data/ERR030886_1.fastq.gz", help="Path to R1 RNASeq fastq")
parser.add_argument("-f2", "--fastq-r2-path", default="https://s3.amazonaws.com/oconnor-test-bucket/sample-data/ERR030886_2.fastq.gz", help="Path to R2 RNASeq fastq")
parser.add_argument("-i", "--input-metadata-schema", default="input_metadata.json", help="flattened json schema file for input metadata")
parser.add_argument("-m", "--metadata-schema", default="metadata_schema.json", help="flattened json schema file for metadata")
parser.add_argument("-d", "--output-dir", default="output_metadata", help="output directory. In the case of colliding file names, the older file will be overwritten.")
parser.add_argument("-r", "--receipt-file", default="receipt.tsv", help="receipt file name. This tsv file is the receipt of the upload, with UUIDs filled in.")
parser.add_argument("--storage-access-token", default="NA", help="access token for storage system looks something like 12345678-abcd-1234-abcdefghijkl.")
parser.add_argument("--metadata-server-url", default="https://storage.ucsc-cgl.org:8444", help="URL for metadata server.")
parser.add_argument("--storage-server-url", default="https://storage.ucsc-cgl.org:5431", help="URL for storage server.")
parser.add_argument("--ucsc-storage-client-path", default="ucsc-storage-client", help="Location of client.")
args = parser.parse_args()
return args
def main():
args = getOptions()
# prep download
url_arr = args.fastq_r1_path.split("/")
if not os.path.isfile(url_arr[-1]):
cmd = "curl -k %s > %s" % (args.fastq_r1_path, 'ERR030886_1.fastq.gz')
print "DOWNLOADING: "+cmd
result = subprocess.call(cmd, shell=True)
if (result != 0):
print "PROBLEMS DOWNLOADING"
url_arr = args.fastq_r2_path.split("/")
if not os.path.isfile(url_arr[-1]):
cmd = "curl -k %s > %s" % (args.fastq_r2_path, 'ERR030886_2.fastq.gz')
print "DOWNLOADING: "+cmd
result = subprocess.call(cmd, shell=True)
if (result != 0):
print "PROBLEMS DOWNLOADING"
# main loop for upload
upload_count = 0
while True:
upload_count += 1
print "LOOP UPLOAD: "+str(upload_count)
# create template
specimen = '{0:05}'.format(random.randint(1, 1000000))
template = '''Program Project Center Name Submitter Donor ID Donor UUID Submitter Specimen ID Specimen UUID Submitter Specimen Type Submitter Experimental Design Submitter Sample ID Sample UUID Analysis Type Workflow Name Workflow Version File Type File Path Upload File ID Data Bundle ID Metadata.json
TEST TEST UCSC S%s S%sa Normal - blood derived RNA-Seq S%sa1 sequence_upload Spinnaker 1.0.0 fastq.gz ERR030886_1.fastq.gz
TEST TEST UCSC S%s S%sa Normal - blood derived RNA-Seq S%sa1 sequence_upload Spinnaker 1.0.0 fastq.gz ERR030886_2.fastq.gz''' % (str(specimen), str(specimen), str(specimen), str(specimen), str(specimen), str(specimen))
f = open('sample.tsv', 'w')
print >>f, template
f.close()
# execute upload
cmd = "rm -rf output_metadata; mkdir -p output_metadata; python ../spinnaker.py --input-metadata-schema %s --metadata-schema %s --output-dir output_metadata --receipt-file receipt.tsv --storage-access-token %s --metadata-server-url %s --storage-server-url %s --force-upload --storage-client-path %s sample.tsv" % (args.input_metadata_schema, args.metadata_schema, args.storage_access_token, args.metadata_server_url, args.storage_server_url, args.ucsc_storage_client_path)
print "CMD: %s" % cmd
result = subprocess.call(cmd, shell=True)
if (result != 0):
print "PROBLEMS UPLOADING"
# sleep random time before next upload
print "PAUSING..."
time.sleep(random.randint(30, 60))
if __name__ == "__main__":
main()
| 4,006 | 0 | 46 |
3683ba322d5167e3d034bb48b5b53e0c6869da79 | 2,026 | py | Python | Python/tdw/FBOutput/EnvironmentColliderIntersection.py | ricklentz/tdw | da40eec151acae20b28d6486defb4358d96adb0e | [
"BSD-2-Clause"
] | null | null | null | Python/tdw/FBOutput/EnvironmentColliderIntersection.py | ricklentz/tdw | da40eec151acae20b28d6486defb4358d96adb0e | [
"BSD-2-Clause"
] | null | null | null | Python/tdw/FBOutput/EnvironmentColliderIntersection.py | ricklentz/tdw | da40eec151acae20b28d6486defb4358d96adb0e | [
"BSD-2-Clause"
] | null | null | null | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: FBOutput
import tdw.flatbuffers
| 39.72549 | 163 | 0.715202 | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: FBOutput
import tdw.flatbuffers
class EnvironmentColliderIntersection(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsEnvironmentColliderIntersection(cls, buf, offset):
n = tdw.flatbuffers.encode.Get(tdw.flatbuffers.packer.uoffset, buf, offset)
x = EnvironmentColliderIntersection()
x.Init(buf, n + offset)
return x
# EnvironmentColliderIntersection
def Init(self, buf, pos):
self._tab = tdw.flatbuffers.table.Table(buf, pos)
# EnvironmentColliderIntersection
def ObjectId(self):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(tdw.flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# EnvironmentColliderIntersection
def Direction(self):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = o + self._tab.Pos
from .Vector3 import Vector3
obj = Vector3()
obj.Init(self._tab.Bytes, x)
return obj
return None
# EnvironmentColliderIntersection
def Distance(self):
o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(tdw.flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
def EnvironmentColliderIntersectionStart(builder): builder.StartObject(3)
def EnvironmentColliderIntersectionAddObjectId(builder, objectId): builder.PrependInt32Slot(0, objectId, 0)
def EnvironmentColliderIntersectionAddDirection(builder, direction): builder.PrependStructSlot(1, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(direction), 0)
def EnvironmentColliderIntersectionAddDistance(builder, distance): builder.PrependFloat32Slot(2, distance, 0.0)
def EnvironmentColliderIntersectionEnd(builder): return builder.EndObject()
| 1,422 | 354 | 134 |