content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import sys
from django.apps import AppConfig
from django.db.models.signals import post_save
| [
11748,
25064,
198,
198,
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
12683,
874,
1330,
1281,
62,
21928,
628
] | 3.357143 | 28 |
from dataclasses import dataclass
from typing import overload
from .words import Word
@overload
@overload
@dataclass
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass(repr=False)
@dataclass
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
19720,
1330,
31754,
198,
6738,
764,
10879,
1330,
9678,
198,
198,
31,
2502,
2220,
198,
31,
2502,
2220,
628,
628,
198,
31,
19608,
330,
31172,
628,
198,
31,
19608,
330,
31172,
7,... | 2.356322 | 174 |
#
# PySNMP MIB module SYMME1T1 (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/neermitt/Dev/kusanagi/mibs.snmplabs.com/asn1/SYMME1T1
# Produced by pysmi-0.3.4 at Tue Jul 30 11:34:59 2019
# On host NEERMITT-M-J0NV platform Darwin version 18.6.0 by user neermitt
# Using Python version 3.7.4 (default, Jul 9 2019, 18:13:23)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint")
entPhysicalIndex, = mibBuilder.importSymbols("ENTITY-MIB", "entPhysicalIndex")
ifNumber, ifIndex = mibBuilder.importSymbols("IF-MIB", "ifNumber", "ifIndex")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
NotificationType, Unsigned32, Bits, iso, Counter32, MibIdentifier, ModuleIdentity, TimeTicks, IpAddress, Integer32, Gauge32, ObjectIdentity, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Unsigned32", "Bits", "iso", "Counter32", "MibIdentifier", "ModuleIdentity", "TimeTicks", "IpAddress", "Integer32", "Gauge32", "ObjectIdentity", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
EnableValue, symmPhysicalSignal, ONVALUETYPE = mibBuilder.importSymbols("SYMM-COMMON-SMI", "EnableValue", "symmPhysicalSignal", "ONVALUETYPE")
symmE1T1 = ModuleIdentity((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2))
symmE1T1.setRevisions(('2011-03-18 17:06',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: symmE1T1.setRevisionsDescriptions(('Revision 1.0',))
if mibBuilder.loadTexts: symmE1T1.setLastUpdated('201103181705Z')
if mibBuilder.loadTexts: symmE1T1.setOrganization('Symmetricom.')
if mibBuilder.loadTexts: symmE1T1.setContactInfo('Symmetricom Technical Support 1-888-367-7966 toll free USA 1-408-428-7907 worldwide Support@symmetricom.com')
if mibBuilder.loadTexts: symmE1T1.setDescription('This is the Symmetricom Common MIB for the configuration and status monitoring of E1/T1 ports in the system. It is one of the MIBs under the symmPhysicalSignal node. This MIB is organized into two main nodes: input and output. Each node is further has two tables, one for status and one for configuration.')
e1T1input = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1))
inputE1T1Status = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 1))
e1T1InputStatusTable = MibTable((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 1, 1), )
if mibBuilder.loadTexts: e1T1InputStatusTable.setStatus('current')
if mibBuilder.loadTexts: e1T1InputStatusTable.setDescription('This table contains status information for each E1/T1 input port.')
e1T1InputStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "SYMME1T1", "e1T1InputStatusIndex"))
if mibBuilder.loadTexts: e1T1InputStatusEntry.setStatus('current')
if mibBuilder.loadTexts: e1T1InputStatusEntry.setDescription('An entry of the e1T1InputStatusTable. Table index is ifIndex (port/interface index). Each entry has three parameters for the specified E1/T1 input port: 1. Port enable status (enable or disable) 2. Current value of the incoming SSM 3. Port status ')
e1T1InputStatusIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000)))
if mibBuilder.loadTexts: e1T1InputStatusIndex.setStatus('current')
if mibBuilder.loadTexts: e1T1InputStatusIndex.setDescription('Local index of the E1/T1 input status table.')
e1T1InputPQLCurValueV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 1, 1, 1, 3), TP5000PQLVALUE()).setMaxAccess("readonly")
if mibBuilder.loadTexts: e1T1InputPQLCurValueV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputPQLCurValueV1.setDescription('The current PQL value of the incoming SSM on this input port.')
e1T1InputPortStatusV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 1, 1, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: e1T1InputPortStatusV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputPortStatusV1.setDescription('The port status of the specified input E1/T1 input port. Possible values are On (1) and Off (2). When the input port state is enabled, port status becomes on. When input port state is disabled, input port status is off.')
e1T1InputConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2))
e1T1InputConfigTable = MibTable((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1), )
if mibBuilder.loadTexts: e1T1InputConfigTable.setStatus('current')
if mibBuilder.loadTexts: e1T1InputConfigTable.setDescription('Configuration Table for E1/T1 input ports')
e1T1InputConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "SYMME1T1", "e1T1InputConfigIndex"))
if mibBuilder.loadTexts: e1T1InputConfigEntry.setStatus('current')
if mibBuilder.loadTexts: e1T1InputConfigEntry.setDescription('An entry of the E1/T1 input configuration table. Table index is ifIndex (port/interface). Each entry has the following configuration parameters for the selected input port: 1. Frame type 2. CRC enable state 3. SSM enable state 4. SSM bit position 5. Default PQL value that can be used to override the input SSM value 6. Zero suppression state ')
e1T1InputConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000)))
if mibBuilder.loadTexts: e1T1InputConfigIndex.setStatus('current')
if mibBuilder.loadTexts: e1T1InputConfigIndex.setDescription('Local index of the E1/T1 input configuration table.')
e1T1InputFrameTypeV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 2), INPUTE1T1FRAMETYPE()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1InputFrameTypeV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputFrameTypeV1.setDescription('E1 or T1 input frame type. Supported frame types include: 1. Freq1544khz (1) 2. Freq2048khz (2) 3. CCS (3) 4. CAS (4) 5. D4 (5) 6. ESF (6) Default frame type is 2048 kHz ')
e1T1InputCRCStateV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 3), EnableValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1InputCRCStateV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputCRCStateV1.setDescription('CRC enable state can be Enable (1) or Disable (2). Disabling the CRC means the CRC in the SSM is not used.')
e1T1InputSSMStateV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 4), EnableValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1InputSSMStateV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputSSMStateV1.setDescription("SSM enable state. It can be Enable (1) or Disable (2). Disabling the SSM means the incoming SSM is not used, and the forced (default) PQL value for this input port will be used during the reference selection. SSM is supported for only three frame types: EFS, CAS with CRC4, and CCA with CRC4. SSM should not be enabled for other frame types. If SSM is enabled for an input port, but the frame type does not support SSM or is not sending a valid SSM, then this input will be disqualified and the input PQL will be set to 'invalid.' The system will go into holdover no other qualified reference is available. ")
e1T1InputSSMBitV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(4, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1InputSSMBitV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputSSMBitV1.setDescription('SSM Bit position. The value range is 4 to 8. This parameter is only used for frame types ESF, CCS, or CAS.')
e1T1InputPQLValueV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 6), TP5000PQLVALUE()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1InputPQLValueV1.setStatus('current')
if mibBuilder.loadTexts: e1T1InputPQLValueV1.setDescription('The user assigned PQL value for the specified input. This PQL value is used when the SSM state is disabled. The range for the user assigned PQL value is 1 to 9. ')
eT1InputZeroSupprV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 1, 2, 1, 1, 7), ONVALUETYPE()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eT1InputZeroSupprV1.setStatus('current')
if mibBuilder.loadTexts: eT1InputZeroSupprV1.setDescription('The number indicates whether zero suppression (ZS) on the input port is enabled or disabled. Valid values are On (1) or Off (2). ')
e1T1Output = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2))
e1T1OutputStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 1))
e1T1OutputStatusTable = MibTable((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 1, 1), )
if mibBuilder.loadTexts: e1T1OutputStatusTable.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputStatusTable.setDescription('This table contains status information for each E1/T1 output port.')
e1T1OutputStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "SYMME1T1", "e1T1OutputStatusIndex"))
if mibBuilder.loadTexts: e1T1OutputStatusEntry.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputStatusEntry.setDescription('An entry of the e1T1OutputStatusTable. Table index is ifIndex (port/interface index). Each entry has two parameters for the specified E1/T1 input port: 1. Port status 2. Outgoing SSM value ')
e1T1OutputStatusIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000)))
if mibBuilder.loadTexts: e1T1OutputStatusIndex.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputStatusIndex.setDescription('Local index of the E1/T1 output status table.')
e1T1OutputPortStatusV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 1, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: e1T1OutputPortStatusV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputPortStatusV1.setDescription("The port status of the specified E1/T1 output port. Possible values are On (1) and Off (2). 'On' means there is signal on the port. For E1/T1 output port it means the system is in normal tracking mode. 'Off' means there is no signal on the port. For E1/T1 output port it means the output is squelched during some clock states.")
e1T1OutputPQLValueV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 1, 1, 1, 3), TP5000PQLVALUE()).setMaxAccess("readonly")
if mibBuilder.loadTexts: e1T1OutputPQLValueV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputPQLValueV1.setDescription('The PQL value for the specified E1/T1 output port.')
e1T1OutputConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2))
e1T1OutputConfigTable = MibTable((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1), )
if mibBuilder.loadTexts: e1T1OutputConfigTable.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputConfigTable.setDescription('This table contains configuration information for each E1/T1 output port.')
e1T1OutputConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "SYMME1T1", "e1T1OutputConfigIndex"))
if mibBuilder.loadTexts: e1T1OutputConfigEntry.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputConfigEntry.setDescription('An entry of the e1T1OutputConfigTable. Table index is ifIndex (port/interface index). Each entry has the configuration parameters for the specified E1/T1 output port: 1. Port enable state 2. Frame type 3. CRC enable state 4. SSM enable state 5. SSM bit position 6. Zero suppression on/off state 7. Output port cable length ')
e1T1OutputConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000)))
if mibBuilder.loadTexts: e1T1OutputConfigIndex.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputConfigIndex.setDescription('Local index of the E1/T1 output configuration table.')
e1T1OutputStateV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 2), PORTSTATETYPE()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputStateV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputStateV1.setDescription('E1/T1 output port enable state. Its value can be Enable (1) or Disable (2). Disabling an output port means no output is generated for that port.')
e1T1OutputFrameTypeV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 3), OUTPUTE1T1FRAMETYPE()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputFrameTypeV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputFrameTypeV1.setDescription('E1 or T1 output frame type. Supported frame types include: 1. Freq1544khz (1) 2. Freq2048khz (2) 3. CCS (3) 4. CAS (4) 5. D4 (5) 6. ESF (6) Default frame type is 2048 kHz. ')
e1T1OutputCRCStateV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 4), EnableValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputCRCStateV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputCRCStateV1.setDescription('CRC enable state can be Enable (1) or Disable (2). Disabling the CRC means that no CRC is generated for the SSM.')
e1T1OutputSSMStateV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 5), EnableValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputSSMStateV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputSSMStateV1.setDescription('SSM enable state. It can be Enable (1) or Disable (2). Disabling the output SSM means that no SSM is generated for the specified output port.')
e1T1OutputSSMBitV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(4, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputSSMBitV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputSSMBitV1.setDescription('SSM Bit position. The value range is 4 to 8. This parameter is only used for frame types ESF, CCS, or CAS.')
e1T1OutputZeroSupprV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 7), ONVALUETYPE()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputZeroSupprV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputZeroSupprV1.setDescription('The number indicates whether zero suppression (ZS) on the output port is enabled or disabled. Valid values are On (1) or Off (2). ')
e1T1OutputLengthV1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 2, 2, 1, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: e1T1OutputLengthV1.setStatus('current')
if mibBuilder.loadTexts: e1T1OutputLengthV1.setDescription('Output cable length. ')
e1T1Conformance = ObjectIdentity((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3))
if mibBuilder.loadTexts: e1T1Conformance.setStatus('current')
if mibBuilder.loadTexts: e1T1Conformance.setDescription('This node contains conformance statement for the symmE1T1 MIB module. ')
e1T1Compliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 1))
e1T1BasicCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 1, 1)).setObjects(("SYMME1T1", "e1T1InputStatusGroup"), ("SYMME1T1", "e11T1InputConfigGroup"), ("SYMME1T1", "e11T1OutputStatusGroup"), ("SYMME1T1", "e11T1OutputConfigGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
e1T1BasicCompliance = e1T1BasicCompliance.setStatus('current')
if mibBuilder.loadTexts: e1T1BasicCompliance.setDescription('The compliance statement for SNMP entities which have E1/T1 input/output.')
e1T1UocGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 2))
e1T1InputStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 2, 1)).setObjects(("SYMME1T1", "e1T1InputPortStatusV1"), ("SYMME1T1", "e1T1InputPQLCurValueV1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
e1T1InputStatusGroup = e1T1InputStatusGroup.setStatus('current')
if mibBuilder.loadTexts: e1T1InputStatusGroup.setDescription('A collection of objects providing information applicable to E1/T1 input status group.')
e11T1InputConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 2, 2)).setObjects(("SYMME1T1", "e1T1InputFrameTypeV1"), ("SYMME1T1", "e1T1InputCRCStateV1"), ("SYMME1T1", "e1T1InputSSMStateV1"), ("SYMME1T1", "e1T1InputSSMBitV1"), ("SYMME1T1", "e1T1InputPQLValueV1"), ("SYMME1T1", "eT1InputZeroSupprV1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
e11T1InputConfigGroup = e11T1InputConfigGroup.setStatus('current')
if mibBuilder.loadTexts: e11T1InputConfigGroup.setDescription('A collection of objects providing information applicable to E1/T1 input configuration group.')
e11T1OutputStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 2, 3)).setObjects(("SYMME1T1", "e1T1OutputPortStatusV1"), ("SYMME1T1", "e1T1OutputPQLValueV1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
e11T1OutputStatusGroup = e11T1OutputStatusGroup.setStatus('current')
if mibBuilder.loadTexts: e11T1OutputStatusGroup.setDescription('A collection of objects providing information applicable to E1/T1 output status group.')
e11T1OutputConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 2, 3, 2, 4)).setObjects(("SYMME1T1", "e1T1OutputStateV1"), ("SYMME1T1", "e1T1OutputFrameTypeV1"), ("SYMME1T1", "e1T1OutputCRCStateV1"), ("SYMME1T1", "e1T1OutputSSMStateV1"), ("SYMME1T1", "e1T1OutputSSMBitV1"), ("SYMME1T1", "e1T1OutputLengthV1"), ("SYMME1T1", "e1T1OutputZeroSupprV1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
e11T1OutputConfigGroup = e11T1OutputConfigGroup.setStatus('current')
if mibBuilder.loadTexts: e11T1OutputConfigGroup.setDescription('A collection of objects providing information applicable to E1/T1 output configuration group.')
mibBuilder.exportSymbols("SYMME1T1", TLocalTimeOffset=TLocalTimeOffset, TLatAndLon=TLatAndLon, e1T1InputCRCStateV1=e1T1InputCRCStateV1, e1T1InputFrameTypeV1=e1T1InputFrameTypeV1, e11T1InputConfigGroup=e11T1InputConfigGroup, e1T1InputConfigTable=e1T1InputConfigTable, e11T1OutputConfigGroup=e11T1OutputConfigGroup, e1T1InputStatusGroup=e1T1InputStatusGroup, e1T1OutputStatusEntry=e1T1OutputStatusEntry, OUTPUTE1T1FRAMETYPE=OUTPUTE1T1FRAMETYPE, e1T1OutputLengthV1=e1T1OutputLengthV1, e1T1InputSSMStateV1=e1T1InputSSMStateV1, e1T1BasicCompliance=e1T1BasicCompliance, e1T1OutputStatusIndex=e1T1OutputStatusIndex, e1T1OutputStateV1=e1T1OutputStateV1, e1T1InputPortStatusV1=e1T1InputPortStatusV1, e1T1Output=e1T1Output, e1T1UocGroups=e1T1UocGroups, e1T1InputPQLValueV1=e1T1InputPQLValueV1, TSsm=TSsm, e1T1OutputStatus=e1T1OutputStatus, e11T1OutputStatusGroup=e11T1OutputStatusGroup, e1T1InputStatusIndex=e1T1InputStatusIndex, e1T1OutputFrameTypeV1=e1T1OutputFrameTypeV1, e1T1OutputStatusTable=e1T1OutputStatusTable, PYSNMP_MODULE_ID=symmE1T1, PORTSTATETYPE=PORTSTATETYPE, e1T1OutputSSMStateV1=e1T1OutputSSMStateV1, e1T1OutputPortStatusV1=e1T1OutputPortStatusV1, symmE1T1=symmE1T1, e1T1InputConfigEntry=e1T1InputConfigEntry, e1T1input=e1T1input, e1T1OutputPQLValueV1=e1T1OutputPQLValueV1, e1T1Compliances=e1T1Compliances, TAntHeight=TAntHeight, DateAndTime=DateAndTime, e1T1InputStatusEntry=e1T1InputStatusEntry, INPUTE1T1FRAMETYPE=INPUTE1T1FRAMETYPE, TP5000PQLVALUE=TP5000PQLVALUE, e1T1InputPQLCurValueV1=e1T1InputPQLCurValueV1, e1T1InputStatusTable=e1T1InputStatusTable, e1T1OutputConfigEntry=e1T1OutputConfigEntry, e1T1InputSSMBitV1=e1T1InputSSMBitV1, inputE1T1Status=inputE1T1Status, e1T1InputConfigIndex=e1T1InputConfigIndex, e1T1OutputCRCStateV1=e1T1OutputCRCStateV1, e1T1OutputConfigTable=e1T1OutputConfigTable, e1T1OutputZeroSupprV1=e1T1OutputZeroSupprV1, e1T1OutputConfig=e1T1OutputConfig, e1T1OutputConfigIndex=e1T1OutputConfigIndex, eT1InputZeroSupprV1=eT1InputZeroSupprV1, e1T1OutputSSMBitV1=e1T1OutputSSMBitV1, e1T1InputConfig=e1T1InputConfig, e1T1Conformance=e1T1Conformance)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
19704,
44,
11682,
16,
51,
16,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,
1378,
14,
14490,
14,
710,
7780,
715,
14,
... | 2.608591 | 7,892 |
from google.cloud import datastore
import os
import json
client = datastore.Client()
| [
6738,
23645,
13,
17721,
1330,
4818,
459,
382,
198,
11748,
28686,
198,
11748,
33918,
198,
198,
16366,
796,
4818,
459,
382,
13,
11792,
3419,
198
] | 3.44 | 25 |
# http://adventofcode.com/2017/day/2
code = """5048 177 5280 5058 4504 3805 5735 220 4362 1809 1521 230 772 1088 178 1794
6629 3839 258 4473 5961 6539 6870 4140 4638 387 7464 229 4173 5706 185 271
5149 2892 5854 2000 256 3995 5250 249 3916 184 2497 210 4601 3955 1110 5340
153 468 550 126 495 142 385 144 165 188 609 182 439 545 608 319
1123 104 567 1098 286 665 1261 107 227 942 1222 128 1001 122 69 139
111 1998 1148 91 1355 90 202 1522 1496 1362 1728 109 2287 918 2217 1138
426 372 489 226 344 431 67 124 120 386 348 153 242 133 112 369
1574 265 144 2490 163 749 3409 3086 154 151 133 990 1002 3168 588 2998
173 192 2269 760 1630 215 966 2692 3855 3550 468 4098 3071 162 329 3648
1984 300 163 5616 4862 586 4884 239 1839 169 5514 4226 5551 3700 216 5912
1749 2062 194 1045 2685 156 3257 1319 3199 2775 211 213 1221 198 2864 2982
273 977 89 198 85 1025 1157 1125 69 94 919 103 1299 998 809 478
1965 6989 230 2025 6290 2901 192 215 4782 6041 6672 7070 7104 207 7451 5071
1261 77 1417 1053 2072 641 74 86 91 1878 1944 2292 1446 689 2315 1379
296 306 1953 3538 248 1579 4326 2178 5021 2529 794 5391 4712 3734 261 4362
2426 192 1764 288 4431 2396 2336 854 2157 216 4392 3972 229 244 4289 1902"""
print(checksum_1(code))
print(checksum_2(code)) | [
2,
2638,
1378,
324,
1151,
1659,
8189,
13,
785,
14,
5539,
14,
820,
14,
17,
628,
628,
198,
8189,
796,
37227,
1120,
2780,
197,
22413,
197,
4309,
1795,
197,
1120,
3365,
197,
17885,
19,
197,
23734,
20,
197,
3553,
2327,
197,
17572,
197,
... | 1.808418 | 689 |
import logging
from Tensile.SolutionStructs import Convolution
from YamlBuilder.YamlBuilder import YamlBuilder
log =logging.getLogger("testlog")
| [
11748,
18931,
198,
6738,
40280,
576,
13,
46344,
44909,
82,
1330,
34872,
2122,
198,
6738,
14063,
75,
32875,
13,
56,
43695,
32875,
1330,
14063,
75,
32875,
198,
198,
6404,
796,
6404,
2667,
13,
1136,
11187,
1362,
7203,
9288,
6404,
4943,
198... | 3.560976 | 41 |
"""
This module will Help You to Search on Different Websites like Google,Youtube,etc.
You can search on more than 25 websites very easily by just 2 lines of code.
Websites Supported:-
1.Google -google_search("Python")
2.Youtube -youtube_search("Python")
3.Bing -bing_search("Python")
4.Quora -quora_search("5 Python Projects")
5.Python -python_search("Input in Python")
6.Twitter -twitter_search("Python")
7.Facebook -facebook_search("Python")
8.Pinterest -pinterest_search("Python images")
9.Wikipedia -wikipedia_search("Python_(programming_language)")
10.Amazon -amazon_search("Python Books")
11.Reddit -reddit_search("Python")
12.Imdb -imdb_search("python")
13.TripAdvisor -tripadvisor_search("London")
14.Walmart -walmart_search("python Books")
15.Craigslist -craigslist_search("Python")
16.Ebay -ebay_search("Python books")
17.LinkedIn-Job Search, People Search, Learning
18.Playstore -playstore_search("python")
19.Headline -headline_search("python")
20.Esty -esty_search("python")
21.Indeed -indeed_search("Python Developer","USA")
22.Apple -apple_search("Mac Book Pro")
23.ESPN -espn_search("Cricket")
24.Webmd -webmd_search("Python")
25.New York Times -nytimes_search("Covid-19")
26.CNN -cnn_search("Us elections 2020")
27.Best Buy- `bestbuy_search("Python")`
28.Britanica-`britannica_search("Anything")`
29.Bussiness Insider- `businessinsider__search("News")`
30.Dictionary- `dictionary_search("graphics")`
31.Gamepedia- `gamepedia_search("Minecraft")`
32.Github- `github_search("ankitsinghprograms")`
33.Home depot- `homedepot_search("News")`
34.MapQuest- `mapquest_search("California,USA")`
35.Mayo clinic- `mayoclinic_search("What to do during Fever")`
36.Medical News Today- `medicalnewstoday_search("COVID-19")`
37.Merriam Webster- `merriam_webster_search("News")`
38.Microsoft- `microsoft_search("Mac Book Pro")`
39.NIH- `nih_search("Usa News")`
40.Quizlet- `quizlet_search("Std 8")`
41.Rotten Tomatoes- `rottentomatoes_search("Water Bottle")`
42.Target- `target_search("Anything")`
43.Urban Dictionary- `urban_dictionary_search("LOL meaing in urban dictionary")`
44.USA Today- `usatoday_search("USA election")`
45.Yahoo- `yahoo_search("C++")`
46.Zillow- `zillow_search("News")`
========== Example ===========
Code is to simple Just 2 lines of Code.
------------------------------------
from pysearch import *
google_search("How to Search via pysearch module Python")
------------------------------------
=============================
=========== Version ===========
++ 0.1.3 (19/01/2021)+++++++++
~~ Bug Fixes
++++++++++++++++++++++++++++
=============================
======== Getting Errors??========
If You get error then contact me at ankitsingh300307@gmail.com
=============================
=========== Author ==========
Name-Ankit Singh
Email-ankitsingh300307@gmail.com
Github-https://github.com/Ankitsinghprograms
Country-India
============================
"""
import webbrowser
def open(link):
"""
Opening Webpage Through webbrowser module
"""
try:
webbrowser.open(link)
except:
print("EROOR UNABLE TO OPEN WEBSITE")
print("Common Errors:-\n\
~ webbrowser module error \
~ Your system doesn't have Any Webrowser \
-Try Installing modules liks Chrome,Firefox,etc.\
~ Contact to Author via email 'ankitsingh300307@gmail.com'")
def google_search(text):
"""
Search on Google (https://www.google.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
google=f"https://www.google.com/search?q={text}&oq={text}"
open(google)
def youtube_search(text):
"""
Search on Youtube (https://www.youtube.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
youtube=f"https://www.youtube.com/results?search_query={text}"
open(youtube)
def bing_search(text):
"""
Search on Bing (www.bing.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
bing=f"https://www.bing.com/search?q={text}"
open(bing)
def quora_search(text):
"""
Search on Quora (www.quora.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
quora=f"https://www.quora.com/search?q={text}"
open(quora)
def python_search(text):
"""
Search on Python.org (www.python.org)
Parameters
-----------
text:- The query which you want to search about (str)
"""
python_org=f"https://www.python.org/search/?q={text}"
open(python_org)
def twitter_search(text):
"""
Search on twitter (https://twitter.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
twitter=f"https://twitter.com/search?q={text}"
open(twitter)
def facebook_search(text):
"""
Search on Facebook (https://facebook.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
facebook=f"https://facebook.com/search/top/?q={text}"
open(facebook)
def pinterest_search(text):
"""
Search on Pinterest (https://in.pinterest.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
pinterest=f"https://in.pinterest.com/search/pins/?q={text}"
open(pinterest)
def wikipedia_search(text):
"""
Search on Wikipedia (https://en.m.wikipedia.org)
Parameters
-----------
text:- The query which you want to search about (str)
"""
wikipedia=f"https://en.m.wikipedia.org/wiki/{text}"
open(wikipedia)
def amazon_search(text):
"""
Search on amazon (https://www.amazon.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
amazon=f"https://www.amazon.com/s?k={text}"
open(amazon)
def reddit_search(text):
"""
Search on Reddit (https://www.reddit.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
reddit=f"https://www.reddit.com/search?q={text}"
open(reddit)
def imdb_search(text):
"""
Search on imdb (https://www.imdb.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
imdb=f"https://www.imdb.com/find?q={text}"
open(imdb)
def tripadvisor_search(text):
"""
Search on Tripadvisor (https://www.tripadvisor.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
tripadvisor=f"https://www.tripadvisor.com/Search?q={text}"
open(tripadvisor)
def walmart_search(text):
"""
Search on Walmart (https://www.walmart.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
walmart=f'https://www.walmart.com/search/?query={text}'
open(walmart)
def craigslist_search(text):
"""
Search on craigslist (https://kolkata.craigslist.org)
Parameters
-----------
text:- The query which you want to search about (str)
"""
craigslist=f'https://kolkata.craigslist.org/d/services/search/bbb?query={text}'
open(craigslist)
def ebay_search(text):
"""
Search on Ebay (https://www.ebay.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
ebay=f"https://www.ebay.com/sch/i.html?_nkw={text}"
open(ebay)
def linkedin_job_search(text):
"""
Search on Linkedin (https://www.linkedin.com/jobs)
Parameters
-----------
text:- The query which you want to search about (str)
"""
linkedin_job=f"https://www.linkedin.com/jobs/search?keywords={text}"
open(linkedin_job)
def linkedin_people_search(first_name,last_name):
"""
Search on Linkedin (https://www.linkedin.com/people-guest/pub)
Parameters
-----------
first_name:- First Name of the person (str)
last_name:- Last Name of the person (str)
"""
linkedin_people=f"https://www.linkedin.com/people-guest/pub/dir?firstName={first_name}&lastName={last_name}"
open(linkedin_people)
def linkedin_learning_search(text):
"""
Search on Linkedin (https://www.linkedin.com/learning)
Parameters
-----------
text:- The query which you want to search about (str)
"""
linkedin_learning=f"https://www.linkedin.com/learning/search?keywords={text}"
open(linkedin_learning)
def playstore_search(text):
"""
Search on Play Store (https://play.google.com/store)
Parameters
-----------
text:- The query which you want to search about (str)
"""
play_store=f"https://play.google.com/store/search?q={text}"
open(play_store)
def headline_search(text):
"""
Search on Headline (https://www.healthline.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
headline=f'https://www.healthline.com/search?q1={text}'
open(headline)
def esty_search(text):
"""
Search on Esty (https://www.etsy.c:om/in-en)
Parameters
-----------
text:- The query which you want to search about (str)
"""
esty=f'https://www.etsy.com/in-en/search?q={text}'
open(esty)
def indeed_search(job_title,location):
"""
Search on Indeed (https://in.indeed.com/m/jobs)
Parameters
-----------
job_title:- Name of the Job (str)
location:- Location (str)
"""
indeed=f'https://in.indeed.com/m/jobs?q={job_title}&l={location}'
open(indeed)
def apple_search(text):
"""
Search on Apple (https://www.apple.com/us)
Parameters
-----------
text:- The query which you want to search about (str)
"""
apple=f"https://www.apple.com/us/search/{text}"
open(apple)
def espn_search(text):
"""
Search on Espn (https://www.espn.in)
Parameters
-----------
text:- The query which you want to search about (str)
"""
espn=f'https://www.espn.in/search/_/q/{text}'
open(espn)
def webmd_search(text):
"""
Search on Webmd (https://www.webmd.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
webmd=f'https://www.webmd.com/search/search_results/default.aspx?query={text}'
open(webmd)
def nytimes_search(text):
"""
Search on New York Times (https://www.nytimes.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
nytimes=f'https://www.nytimes.com/search?query={text}'
open(nytimes)
def cnn_search(text):
"""
Search on CNN (https://edition.cnn.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
cnn=f'https://edition.cnn.com/search?q={text}'
open(cnn)
# Functions Added in Version- 0.1.2 (19/01/2021) are below:-
def github_search(text):
"""
Search on github (https://github.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
github="https://github.com/search?q={text}"
open(github)
def merriam_webster_search(text):
"""
Search on merriam_webster (https://www.merriam-webster.com/dictionary/)
Parameters
-----------
text:- The query which you want to search about (str)
"""
merriam_webster=f"https://www.merriam-webster.com/dictionary/{text}"
open(merriam_webster)
def gamepedia_search(text):
"""
Search on gamepedia (https://www.gamepedia.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
gamepedia=f'https://www.gamepedia.com/search?search={text}'
open(gamepedia)
def microsoft_search(text):
"""
Search on Microsoft (https://www.microsoft.com/en-in/)
Parameters
-----------
text:- The query which you want to search about (str)
"""
microsoft=f"https://www.microsoft.com/en-in/search/result.aspx?{text}"
open(microsoft)
def target_search(text):
"""
Search on target (https://www.target.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
target=f'https://www.target.com/s?searchTerm={text}'
open(target)
def homedepot_search(text):
"""
Search on homedepot (https://www.homedepot.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
homedepot=f"https://www.homedepot.com/s/{text}"
open(homedepot)
def nih_search(text):
"""
Search on NIH (https://search.nih.gov)
Parameters
-----------
text:- The query which you want to search about (str)
"""
nih=f"https://search.nih.gov/search?utf8=%E2%9C%93&affiliate=nih&query={text}&commit=Search"
open(nih)
def rottentomatoes_search(text):
"""
Search on Rotten Tomatoes (https://www.rottentomatoes.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
rottentomatoes=f"https://www.rottentomatoes.com/search?search={text}"
open(rottentomatoes)
def quizlet_search(text):
"""
Search on Quizlet (https://quizlet.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
quizlet=f"https://quizlet.com/subject/{text}/"
open(quizlet)
def mapquest_search(text):
"""
Search on Mapquest (https://www.mapquest.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
mapquest=f"https://www.mapquest.com/search/results?query={text}"
open(mapquest)
def britannica_search(text):
"""
Search on Britannica (https://www.britannica.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
britannica=f"https://www.britannica.com/search?query={text}"
open(britannica)
def businessinsider_search(text):
"""
Search on Business Insider (https://www.businessinsider.in)
Parameters
-----------
text:- The query which you want to search about (str)
"""
businessinsider=f"https://www.businessinsider.in/searchresult.cms?query={text}"
open(businessinsider)
def dictionary_search(text):
"""
Search on Dictionary (https://www.dictionary.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
dictionary=f"https://www.dictionary.com/browse/{text}/s=t"
open(dictionary)
def zillow_search(text):
"""
Search on Zillow (https://www.zillow.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
zillow=f"https://www.zillow.com/homes/{text}/"
open(zillow)
def mayoclinic_search(text):
"""
Search on Mayoclinic (https://www.mayoclinic.org)
Parameters
-----------
text:- The query which you want to search about (str)
"""
mayoclinic=f'https://www.mayoclinic.org/search/search-results?q={text}'
open(mayoclinic)
def bestbuy_search(text):
"""
Search on Bestbuy (https://www.bestbuy.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
bestbuy=f"https://www.bestbuy.com/site/searchpage.jsp?st={text}"
open(bestbuy)
def yahoo_search(text):
"""
Search on Yahoo (https://in.search.yahoo.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
yahoo=f"https://in.search.yahoo.com/search?p={text}"
open(yahoo)
def usatoday_search(text):
"""
Search on USA Today (https://www.usatoday.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
usatoday=f"https://www.usatoday.com/search/?q={text}"
open(usatoday)
def medicalnewstoday_search(text):
"""
Search on Medical News Today (https://www.medicalnewstoday.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
medicalnewstoday=f"https://www.medicalnewstoday.com/search?q={text}"
open(medicalnewstoday)
def urban_dictionary_search(text):
"""
Search on Urban Dictionary (https://www.urbandictionary.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
urban_dictionary="https://www.urbandictionary.com/define.php?term={text}"
open(urban_dictionary)
def usatoday_search(text):
"""
Search on USA Today (https://www.usnews.com)
Parameters
-----------
text:- The query which you want to search about (str)
"""
usanews=f"https://www.usnews.com/search?q={text}"
open(usanews)
| [
37811,
198,
1212,
8265,
481,
10478,
921,
284,
11140,
319,
20615,
47736,
2737,
588,
3012,
11,
56,
9762,
11,
14784,
13,
628,
198,
1639,
460,
2989,
319,
517,
621,
1679,
9293,
845,
3538,
416,
655,
362,
3951,
286,
2438,
13,
628,
198,
113... | 2.624035 | 6,216 |
import os, sys, shutil
import hashlib
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from common.vars import *
from common.MIME import *
def get_file_size(file_name: str, human_readable: bool = True):
"""
Get file in size in given unit like KB, MB or GB
:param file_name:
:param human_readable:
:return:
"""
size = os.path.getsize(file_name)
if human_readable is False:
return size
elif size > (1024*1024*1024):
return '{:.2f} Gb'.format(size/(1024*1024*1024))
elif size > (1024*1024):
return '{:.2f} Mb'.format(size/(1024*1024))
elif size > 1024:
return '{:.2f} Kb'.format(size/1024)
else:
return '{} bytes'.format(size)
def list_directory(directory_path: str, expected_extension: str = None):
"""
Recursive function for listing files in a folder and his sub folders
:param directory_path: path of the parsed dir
:param expected_extension: list of extension separated by |
:return: list(str)
"""
file_list = list()
for root, directories, files in os.walk(directory_path, topdown=False):
for name in files:
full_path = os.path.join(root, name)
if expected_extension is not None:
if re.search("\\.({})$".format(expected_extension), name) is None:
continue
file_list.append(full_path)
if expected_extension is None:
for name in directories:
file_list.append(os.path.join(root, name))
return file_list
| [
11748,
28686,
11,
25064,
11,
4423,
346,
198,
11748,
12234,
8019,
198,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
5305,
6978,
7,
834,
7753,... | 2.387387 | 666 |
import numpy as np
import os, sys
import argparse
from tqdm import tqdm
import paddle.nn as nn
import paddle
from x2paddle.torch2paddle import DataLoader
import paddle.nn.functional as F
sys.path.append('/home/aistudio')
import scipy.io as sio
from utils.loader import get_validation_data, get_testA_data
import utils
from model import UNet
from model import Uformer
from model import Uformer_Cross
from model import Uformer_CatCross
use_gpu = True
paddle.set_device('gpu:0') if use_gpu else paddle.get_device('cpu')
# from skimage import img_as_float32
# from skimage import img_as_ubyte
# from skimage.metrics import peak_signal_noise_ratio as psnr_loss
# from skimage.metrics import structural_similarity as ssim_loss
parser = argparse.ArgumentParser(description=\
'RGB denoising evaluation on the validation set of SIDD')
parser.add_argument('--input_dir', default=\
'/home/aistudio/demoire', type=str, help=\
'Directory of validation images')
parser.add_argument('--result_dir', default='uformer/result_B',
type=str, help='Directory for results')
parser.add_argument('--weights', default=
'/home/aistudio/uformer/log/Uformer_/model_B/model_best.pdiparams', type=str, help=\
'Path to weights')
parser.add_argument('--gpus', default='0', type=str, help=\
'CUDA_VISIBLE_DEVICES')
parser.add_argument('--arch', default='Uformer', type=str, help='arch')
parser.add_argument('--batch_size', default=1, type=int, help=\
'Batch size for dataloader')
parser.add_argument('--save_images', action='store_true', help=\
'Save denoised images in result directory', default=True)
parser.add_argument('--embed_dim', type=int, default=32, help=\
'number of data loading workers')
parser.add_argument('--win_size', type=int, default=8, help=\
'number of data loading workers')
parser.add_argument('--token_projection', type=str, default='linear', help=\
'linear/conv token projection')
parser.add_argument('--token_mlp', type=str, default='leff', help=\
'ffn/leff token mlp')
parser.add_argument('--vit_dim', type=int, default=256, help='vit hidden_dim')
parser.add_argument('--vit_depth', type=int, default=12, help='vit depth')
parser.add_argument('--vit_nheads', type=int, default=8, help='vit hidden_dim')
parser.add_argument('--vit_mlp_dim', type=int, default=512, help='vit mlp_dim')
parser.add_argument('--vit_patch_size', type=int, default=16, help=\
'vit patch_size')
parser.add_argument('--global_skip', action='store_true', default=False,
help='global skip connection')
parser.add_argument('--local_skip', action='store_true', default=False,
help='local skip connection')
parser.add_argument('--vit_share', action='store_true', default=False, help
='share vit module')
parser.add_argument('--train_ps', type=int, default=256, help=\
'patch size of training sample')
args = parser.parse_args()
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
utils.mkdir(args.result_dir)
testA_dataset = get_testA_data(args.input_dir)
testA_loader = DataLoader(dataset=testA_dataset, batch_size=1, shuffle=False, num_workers=0, drop_last=False)
model_restoration= utils.get_arch(args)
# model_restoration = torch.nn.DataParallel(model_restoration)
utils.load_checkpoint(model_restoration,args.weights)
print("===>Testing using weights: ", args.weights)
model_restoration.cuda()
model_restoration.eval()
with paddle.no_grad():
psnr_val_rgb = []
ssim_val_rgb = []
for ii, data_test in enumerate(tqdm(testA_loader), 0):
rgb_noisy = data_test[0]
filenames = data_test[1]
# print(filenames)
h, w = rgb_noisy.shape[2], rgb_noisy.shape[3]
rgb_restored = model_restoration(rgb_noisy)
# print(rgb_restored)
rgb_restored = rgb_restored * 255
rgb_restored = paddle.clip(rgb_restored,0,255).cpu().numpy().squeeze().transpose((1,2,0))
if args.save_images:
utils.save_img(os.path.join(args.result_dir,filenames[0]), rgb_restored)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
11,
25064,
198,
11748,
1822,
29572,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
39517,
13,
20471,
355,
299,
77,
198,
11748,
39517,
198,
6738,
2124,
17,
79,
37382,
13,
... | 2.683367 | 1,497 |
#!/usr/bin/env python3
import argparse
import os
import time
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
import connexion
import logging
# import umap
from flask import send_from_directory, redirect, json
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import MDS, TSNE
from copy import deepcopy
from s2s.lru import LRU
from s2s.project import S2SProject
from index.annoyVectorIndex import AnnoyVectorIndex
__author__ = 'Hendrik Strobelt, Sebastian Gehrmann, Alexander M. Rush'
CONFIG_FILE_NAME = 's2s.yaml'
projects = {}
cache_translate = LRU(50)
# cache_neighbors = LRU(20)
cache_compare = LRU(50)
pre_cached = []
logging.basicConfig(level=logging.INFO)
app = connexion.App(__name__)
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--debug", action='store_true', help=' Debug mode')
parser.add_argument("--port", default="8080", help="Port to run the app. ")
# parser.add_argument("--nocache", default=False)
parser.add_argument("--preload", action='store_true', help="Preload indices.")
parser.add_argument("--cache", type=str, default='',
help="Preload cache from dir")
parser.add_argument("--dir", type=str,
default=os.path.abspath('data'),
help='Path to project')
# parser.add_argument('-api', type=str, default='pytorch',
# choices=['pytorch', 'lua'],
# help="""The API to use.""")
args = parser.parse_args()
print(args)
# global model
# if args.api == "pytorch":
# # model = ONMTmodelAPI("model_api/data/ende_acc_15.72_ppl_912.74_e9.pt")
# model = ONMTmodelAPI("model_api/data/ende_acc_46.86_ppl_21.19_e12.pt")
# else:
# model = ONMTLuaModelAPI()
# just a simple flask route
@app.route('/')
# send everything from client as static content
@app.route('/client/<path:path>')
def send_static_client(path):
""" serves all files from ./client/ to ``/client/<path:path>``
:param path: path from api call
"""
return send_from_directory('client_dist/', path)
# noinspection SpellCheckingInspection
# ------ API routing as defined in swagger.yaml (connexion)
# def compare_translation(**request):
# pivot = request["in"]
# compare = request["compare"]
# neighbors = request.get('neighbors', [])
#
# current_project = list(projects.values())[0]
# model = current_project.model
#
# # trans_all = model.translate(in_text=[pivot]+compare)
#
# pivot_res = translate(current_project, [pivot])[0]
# pivot_attn = extract_attn(pivot_res)
# pivot_attn_l = pivot_attn.shape[0]
#
# # compare.append(pivot)
# compare_t = translate(current_project, compare)
#
# res = []
# index_orig = 0
# for cc_t_key in compare_t:
# # cc_t = model.translate(in_text=[cc])[0]
# cc_t = compare_t[cc_t_key]
# cc_attn = extract_attn(cc_t)
# dist = 10
# if cc_attn.shape[0] > 0:
# max_0 = max(cc_attn.shape[0], pivot_attn.shape[0])
# max_1 = max(cc_attn.shape[1], pivot_attn.shape[1])
#
# cc__a = np.zeros(shape=(max_0, max_1))
# cc__a[:cc_attn.shape[0], :cc_attn.shape[1]] = cc_attn
#
# cc__b = np.zeros(shape=(max_0, max_1))
# cc__b[:pivot_attn.shape[0], :pivot_attn.shape[1]] = pivot_attn
#
# dist = np.linalg.norm(cc__a - cc__b)
#
# res.append({
# "sentence": extract_sentence(cc_t),
# "attn": extract_attn(cc_t).tolist(),
# "attn_padding": (cc__a - cc__b).tolist(),
# "orig": compare[index_orig],
# "dist": dist
# })
# index_orig += 1
#
# return {"compare": res, "pivot": extract_sentence(pivot_res)}
P_METHODS = {
"pca": PCA(n_components=2, ),
"mds": MDS(),
"tsne": TSNE(init='pca'),
# 'umap': umap.UMAP(metric='cosine'),
"none": lambda x: x
}
def find_and_load_project(directory):
"""
searches for CONFIG_FILE_NAME in all subdirectories of directory
and creates data handlers for all of them
:param directory: scan directory
:return: null
"""
project_dirs = []
for root, dirs, files in os.walk(directory):
if CONFIG_FILE_NAME in files:
project_dirs.append(os.path.abspath(root))
i = 0
for p_dir in project_dirs:
dh_id = os.path.split(p_dir)[1]
cf = os.path.join(p_dir, CONFIG_FILE_NAME)
p = S2SProject(directory=p_dir, config_file=cf)
if args.preload:
p.preload_indices(['encoder', 'decoder'])
projects[dh_id] = p
i += 1
app.add_api('swagger.yaml')
if __name__ == '__main__':
args = parser.parse_args()
app.run(port=int(args.port), debug=args.debug, host="0.0.0.0")
else:
args, _ = parser.parse_known_args()
find_and_load_project(args.dir)
preload_cache(args.cache)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
640,
198,
198,
418,
13,
268,
2268,
14692,
42,
7378,
62,
35,
52,
31484,
6158,
62,
40347,
62,
11380,
8973,
796,
366,
5446,
8... | 2.226577 | 2,220 |
#imports
import pandas as pd
import os
import ast
import sklearn as skl
import sklearn.utils, sklearn.preprocessing, sklearn.decomposition, sklearn.svm
import matplotlib.pyplot as plt
import numpy as np
import pylab
import librosa
import ffmpeg
import audioread
import sklearn
import librosa.display
import datetime
import time
import keras
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Bidirectional, LSTM, Activation, GRU, Conv2D, concatenate, MaxPooling2D, Flatten, Embedding, Lambda, Reshape
from keras.optimizers import Adam, RMSprop
from keras import backend as K
#plot_file(path)
#function to plot spectrograms
#Load and trim datasets to shread out useless info
filePath = 'D:\\fma_metadata\\tracks.csv'
df_tracks = pd.read_csv(filePath, index_col=0, header=[0, 1])
print(list(df_tracks))
filter = [('set', 'split'), ('set', 'subset') , ('track', 'genre_top')]
df_sel = df_tracks[filter]
df_sel = df_sel[df_sel[filter[1]]=='small']
df_sel['track_id'] = df_sel.index
df_test = df_sel[df_sel[filter[0]]=='test']
df_valid = df_sel[df_sel[filter[0]]=='validation']
df_train = df_sel[df_sel[filter[0]]=='training']
print(df_sel.tail())
print(df_test.shape)
print(df_test.head())
print( df_train.shape)
print(df_train.head())
print(df_valid.shape)
print(df_valid.head())
print(df_sel[filter[2]].value_counts())
#Build and train the model
#creates training, testing and validation datasets.
#concatenates fragmented datasets.
# concatenate_datasets() #concatinate fragmented datasets
build_and_train_model()
# create_separate_datasets() #create training, testing and validation datasets
| [
2,
320,
3742,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
198,
11748,
6468,
198,
198,
11748,
1341,
35720,
355,
1341,
75,
198,
11748,
1341,
35720,
13,
26791,
11,
1341,
35720,
13,
3866,
36948,
11,
1341,
35720,
13,
12501,
29... | 2.822828 | 587 |
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import re
regex = re.compile('[^A-Za-zÀ-ÿ]')
def extract_mean_word_vectors(data, vocabulary, embeddings):
'''
extracts mean of word vectors for each tweet
'''
print('> extracting mean of word vectors')
# get vocab equivalence to tweet words
idx_data = [[vocabulary.get((regex.sub(' ', ' '.join(regex.sub(' ', t).split()))), -1) for t in line.strip().split()] for line in data]
idx_data = [[t for t in tokens if t>=0] for tokens in idx_data]
# get dense vector equivalence to tweet words
data_tweets_word_vector = [[embeddings[wd2voc][:] for wd2voc in tweet_words] for tweet_words in idx_data]
# get mean word vector of each tweet
data_tweets_mean_vector = [np.mean(wordvectors,axis=0) for wordvectors in data_tweets_word_vector]
return idx_data, data_tweets_word_vector, data_tweets_mean_vector
def process_train_ML(pos, neg, vocabulary, embeddings, dim_emb):
'''
given the positive and negative tweets data, the vocabulary, the word embeddings
and the embedding dimension, extracts mean of word vectors per tweets, and outputs
a dataframe containing all pos and neg tweets, their labels (1 for pos/ -1 for neg)
and their mean word vectors, then shuffles the rows and also outputs the X matrix
containing mean word vectors and the vector y containinf the labels, ready to be
used into ML algorithms
'''
print('> process pos and neg datas to get X and y to perform ML')
# seperate list of tweets in lines
#pos = [x.strip() for x in pos[0].split(',')]
#neg = [x.strip() for x in neg[0].split(',')]
# extract mean word embeddings
idx_pos_tweets, pos_tweets_word_vector, pos_tweets_mean_vector = extract_mean_word_vectors(pos, vocabulary, embeddings)
idx_neg_tweets, neg_tweets_word_vector, neg_tweets_mean_vector = extract_mean_word_vectors(neg, vocabulary, embeddings)
# create labels
label_pos = [1] * len(pos)
#create a df
pos_df = pd.DataFrame(list(zip(label_pos, pos, idx_pos_tweets, pos_tweets_word_vector, pos_tweets_mean_vector)),\
columns=["Sentiment","Tweet","Token_idx","Words_Vectors","Mean_Word_Vector"])
del label_pos
# create labels
label_neg = [-1] * len(neg)
# create a df
neg_df = pd.DataFrame(list(zip(label_neg, neg, idx_neg_tweets, neg_tweets_word_vector, neg_tweets_mean_vector)),\
columns=["Sentiment","Tweet","Token_idx","Words_Vectors","Mean_Word_Vector"]) #create a df
del label_neg
# regroup the dfs, ignore index in order to get new ones (->no duplicate)
full_df = pd.concat([pos_df,neg_df],ignore_index=True) #regroup the dfs, ignore index in order to get new ones (->no duplicate)
# shuffle the rows
full_df = full_df.sample(frac=1)
print('> X and y informations:')
# get X matrix
X = full_df['Mean_Word_Vector'].to_numpy()
X = [x if not np.isnan(x).any() else np.zeros((dim_emb,)) for x in X]
X = np.concatenate(X, axis=0).reshape((full_df.shape[0], dim_emb))
print('X shape:', X.shape)
# get y
y = full_df['Sentiment'].to_numpy()
print('y shape:', y.shape)
return full_df, X, y
def process_test_ML(test, vocabulary, embeddings, dim_emb):
'''
given test set, the vocabulary, the word embeddings and the embedding dimension,
extracts mean of word vectors per tweets, and outputs a dataframe containing all tweets,
their labels (1 for pos/ -1 for neg) and their mean word vectors, and also outputs
the testx matrix containing mean word vectors and ready to be put in ML algorithms
'''
print('> process test data to get X_test and perform ML')
# extract mean word embeddings
idx_test_tweets,test_tweets_word_vector,test_tweets_mean_vector = extract_mean_word_vectors(test, vocabulary, embeddings)
# create labels
test_ids = np.linspace(1,10000,10000, dtype=int)
# create a df
test_df = pd.DataFrame(list(zip(test_ids, test, idx_test_tweets,test_tweets_word_vector,test_tweets_mean_vector)),\
columns=["Tweet_submission_id","Tweet","Token_idx","Words_Vectors","Mean_Word_Vector"])
del test_ids
print('> X_test informations:')
# get X_test matrix
X_test = test_df['Mean_Word_Vector'].to_numpy()
X_test = [x if not np.isnan(x).any() else np.zeros((dim_emb,)) for x in X_test]
X_test = np.concatenate(X_test, axis=0).reshape((test_df.shape[0], dim_emb))
print('X_test shape:', X_test.shape)
return test_df, X_test
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
302,
198,
260,
25636,
796,
302,
13,
5589,
576,
10786,
58,
61,
32,
12,
57,
64,
12,
89,
127,
222,... | 2.495722 | 1,870 |
import sys
from inspect import Signature
from types import CodeType, FunctionType
from typing import Any, Tuple
if sys.version_info >= (3, 8):
copy_code = CodeType.replace
else:
PY_36_37_CODE_ARGS: Tuple[str, ...] = (
"co_argcount",
"co_kwonlyargcount",
"co_nlocals",
"co_stacksize",
"co_flags",
"co_code",
"co_consts",
"co_names",
"co_varnames",
"co_filename",
"co_name",
"co_firstlineno",
"co_lnotab",
"co_freevars",
"co_cellvars",
)
def copy_code(code: CodeType, **update: Any) -> CodeType:
"""
Create a copy of code object with changed attributes
"""
new_args = [update.pop(arg, getattr(code, arg)) for arg in PY_36_37_CODE_ARGS]
if update:
raise TypeError(f"Unexpected code attribute(s): {update}")
return CodeType(*new_args)
def copy_func(f: FunctionType, name, defaults, signature: Signature):
"""
Makes exact copy of a function object with given name and defaults
"""
new_defaults = []
kw_only_defaults = f.__kwdefaults__.copy() if f.__kwdefaults__ else {}
for key, param in signature.parameters.items():
if param.kind is param.KEYWORD_ONLY:
if key in defaults:
kw_only_defaults[key] = defaults.pop(key)
elif key in defaults:
new_defaults.append(defaults.pop(key))
elif param.default is not param.empty:
new_defaults.append(param.default)
new_func = FunctionType(
code=copy_code(f.__code__, co_name=name),
globals=f.__globals__,
name=name,
argdefs=tuple(new_defaults),
closure=f.__closure__,
)
new_func.__kwdefaults__ = kw_only_defaults
new_func.__dict__.update(f.__dict__)
return new_func
| [
11748,
25064,
198,
6738,
10104,
1330,
34894,
198,
6738,
3858,
1330,
6127,
6030,
11,
15553,
6030,
198,
6738,
19720,
1330,
4377,
11,
309,
29291,
628,
198,
361,
25064,
13,
9641,
62,
10951,
18189,
357,
18,
11,
807,
2599,
198,
220,
220,
22... | 2.136625 | 871 |
from .forms import NewProductForm
from django.db import models
from django.shortcuts import render, resolve_url
from django.http.response import JsonResponse
from quote.models import Product, Brand, User
# ! INVENTORY VIEWS
| [
6738,
764,
23914,
1330,
968,
15667,
8479,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
10568,
62,
6371,
198,
6738,
42625,
14208,
13,
4023,
13,
26209,
1330,
449,
1559,
31077,
1... | 3.66129 | 62 |
from osgeo import gdal
import glob
import os
import numpy as np
| [
6738,
28686,
469,
78,
1330,
308,
31748,
198,
11748,
15095,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
628,
628,
628,
628,
198
] | 3 | 24 |
NAME = ['DLRModel']
VERSION = "1.9.1"
| [
20608,
796,
37250,
19260,
49,
17633,
20520,
198,
198,
43717,
796,
366,
16,
13,
24,
13,
16,
1,
198
] | 2.052632 | 19 |
import pytest
from sqlalchemy import func
from sqlalchemy.future import select
from app.models import ExampleModel
from app.tasks import example_task
pytestmark = pytest.mark.asyncio
| [
11748,
12972,
9288,
198,
6738,
44161,
282,
26599,
1330,
25439,
198,
6738,
44161,
282,
26599,
13,
37443,
1330,
2922,
198,
198,
6738,
598,
13,
27530,
1330,
17934,
17633,
198,
6738,
598,
13,
83,
6791,
1330,
1672,
62,
35943,
198,
198,
9078,... | 3.462963 | 54 |
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Optional
import pandas as pd
if TYPE_CHECKING:
from sklearn.base import TransformerMixin
| [
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
6738,
19720,
1330,
41876,
62,
50084,
2751,
11,
32233,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
361,
41876,
62,
50084,
2751,
25,
198,
220,
220,
220,
422,
1341,
35720,
13,
... | 3.367347 | 49 |
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: plugin_tools
:platform: Unix
:synopsis: Plugin tools
.. moduleauthor:: Jessica Verschoyle <scientificsoftware@diamond.ac.uk>
"""
import os
import copy
import json
import logging
from colorama import Fore
from collections import OrderedDict
import savu.plugins.utils as pu
from savu.data.meta_data import MetaData
import savu.plugins.docstring_parser as doc
import scripts.config_generator.parameter_utils as param_u
from savu.data.plugin_list import CitationInformation
logger = logging.getLogger("documentationLog")
class PluginParameters(object):
"""Save the parameters for the plugin and base classes to a
dictionary. The parameters are in yaml format inside the
define_parameter function. These are read and checked for problems.
"""
def populate_parameters(self, tools_list):
""" Set parameter definitions and default parameter values """
# set the parameter definitions
# populates the dictionary returned by self.get_param_definitions()
list(map(lambda tool_class:
self._set_parameter_definitions(tool_class), tools_list))
# set the default parameter values
# populates the dictionary returned by self.get_param_values()
self._populate_default_parameters()
def _populate_default_parameters(self):
"""
This method should populate all the required parameters with
default values. It is used for checking to see if parameter
values are appropriate
"""
p_defs = self.get_param_definitions()
self.set_docstring(self.get_doc())
self.parameters = \
OrderedDict([(k, v['default']) for k, v in p_defs.items()])
# parameters holds current values, this is edited outside of the
# tools class so default and dependency display values are updated here
self.update_dependent_defaults()
self.check_dependencies(self.parameters)
self._get_plugin().set_parameters(self.parameters)
def _set_parameters_this_instance(self, indices):
""" Determines the parameters for this instance of the plugin, in the
case of parameter tuning.
param np.ndarray indices: the index of the current value in the
parameter tuning list.
"""
dims = set(self.multi_params_dict.keys())
count = 0
for dim in dims:
info = self.multi_params_dict[dim]
name = info['label'].split('_param')[0]
self.parameters[name] = info['values'][indices[count]]
count += 1
def _set_parameter_definitions(self, tool_class):
"""Load the parameters for each base class, c, check the
dataset visibility, check data types, set dictionary values.
"""
param_info_dict = self._load_param_from_doc(tool_class)
if param_info_dict:
for p_name, p_value in param_info_dict.items():
if p_name in self.param.get_dictionary():
for k,v in p_value.items():
self.param[p_name][k] = v
else:
self.param.set(p_name, p_value)
self._check_param_defs(tool_class)
def _check_param_defs(self, tool_class):
"""Check the parameter definitions for errors
:param tool_class: tool_class to use for error message
"""
pdefs = self.param.get_dictionary()
# Remove ignored parameters
self._remove_ignored_params(pdefs)
# Check if the required keys are included
self._check_required_keys(pdefs, tool_class)
# Check that option values are valid
self._check_options(pdefs, tool_class)
# Check that the visibility is valid
self._check_visibility(pdefs, tool_class)
# Check that the dtype is valid
self._check_dtype(pdefs, tool_class)
# Use a display option to apply to dependent parameters later.
self._set_display(pdefs)
for k,v in pdefs.items():
# Change empty OrderedDict to dict (due to yaml loader)
if isinstance(v['default'], OrderedDict):
v['default'] = json.loads(json.dumps(v['default']))
# Change the string to an integer, float, list, str, dict
if not self.default_dependency_dict_exists(v):
v['default'] = pu._dumps(v['default'])
def _load_param_from_doc(self, tool_class):
"""Find the parameter information from the method docstring.
This is provided in a yaml format.
"""
# *** TO DO turn the dtype entry into a string
param_info_dict = None
if hasattr(tool_class, "define_parameters"):
yaml_text = tool_class.define_parameters.__doc__
if yaml_text and yaml_text.strip():
# If yaml_text is not None and not empty or consisting of spaces
param_info_dict = doc.load_yaml_doc(yaml_text)
if param_info_dict:
if not isinstance(param_info_dict, OrderedDict):
error_msg = (
f"The parameters have not been read "
f"in correctly for {tool_class.__name__}"
)
raise Exception(error_msg)
return param_info_dict
def check_for_default(self, mod_param, mod_value):
"""If the value is changed to be 'default', then set the original
default value. If the default contains a dictionary, then search
for the correct value
"""
param_info_dict = self.param.get_dictionary()
if str(mod_value) == "default":
if self.default_dependency_dict_exists(param_info_dict[mod_param]):
mod_value = self.get_dependent_default(param_info_dict[mod_param])
else:
mod_value = param_info_dict[mod_param]["default"]
return mod_value
def _check_required_keys(self, param_info_dict, tool_class):
"""Check the four keys ['dtype', 'description', 'visibility',
'default'] are included inside the dictionary given for each
parameter.
"""
required_keys = ["dtype", "description", "visibility", "default"]
missing_keys = False
missing_key_dict = {}
for p_key, p in param_info_dict.items():
all_keys = p.keys()
if p.get("visibility"):
if p.get("visibility") == "hidden":
# For hidden keys, only require a default value key
required_keys = ["default"]
else:
required_keys = ["visibility"]
if not all(d in all_keys for d in required_keys):
missing_key_dict[p_key] = set(required_keys) - set(all_keys)
missing_keys = True
if missing_keys:
print(
f"{tool_class.__name__} doesn't contain all of the "
f"required keys."
)
for param, missing_values in missing_key_dict.items():
print(f"The missing required keys for '{param}' are:")
print(*missing_values, sep=", ")
logger.error(f"ERROR: Missing keys inside {tool_class.__name__}")
raise Exception(f"Please edit {tool_class.__name__}")
def _check_dtype(self, param_info_dict, tool_class):
"""
Make sure that the dtype input is valid and that the default value is
compatible
"""
plugin_error_str = f"There was an error with {tool_class.__name__}"
for p_key, p_dict in param_info_dict.items():
dtype = p_dict.get("dtype")
if dtype:
dtype = dtype.replace(" ", "")
try:
pvalid, error_str = param_u.is_valid_dtype(dtype)
if not pvalid:
raise Exception("Invalid parameter definition %s:\n %s"
% (p_key, error_str))
except IndexError:
print(plugin_error_str)
if not self.default_dependency_dict_exists(p_dict):
default_value = pu._dumps(p_dict["default"])
pvalid, error_str = param_u.is_valid(p_key, default_value,
p_dict, check=True)
if not pvalid:
raise Exception(f"{plugin_error_str}: {error_str}")
def _check_visibility(self, param_info_dict, tool_class):
"""Make sure that the visibility choice is valid"""
visibility_levels = [
"basic",
"intermediate",
"advanced",
"datasets",
"hidden",
]
visibility_valid = True
for p_key, p in param_info_dict.items():
# Check dataset visibility level is correct
self._check_data_keys(p_key, p)
# Check that the data types are valid choices
if p["visibility"] not in visibility_levels:
print(
f"Inside {tool_class.__name__} the {p_key}"
f" parameter is assigned an invalid visibility "
f"level '{p['visibility']}'"
)
print("Valid choices are:")
print(*visibility_levels, sep=", ")
visibility_valid = False
if not visibility_valid:
raise Exception(
f"Please change the file for {tool_class.__name__}"
)
def _check_data_keys(self, p_key, p):
"""Make sure that the visibility of dataset parameters is 'datasets'
so that the display order is unchanged.
"""
datasets = ["in_datasets", "out_datasets"]
exceptions = ["hidden"]
if p_key in datasets:
if p["visibility"] != "datasets" \
and p["visibility"] not in exceptions:
p["visibility"] = "datasets"
def _check_options(self, param_info_dict, tool_class):
"""Make sure that option verbose descriptions match the actual
options
"""
options_valid = True
for p_key, p in param_info_dict.items():
desc = param_info_dict[p_key].get("description")
# desc not present for hidden keys
if desc and isinstance(desc, dict):
options = param_info_dict[p_key].get("options")
option_desc = desc.get("options")
if options and option_desc:
# Check that there is not an invalid option description
# inside the option list.
invalid_option = [
opt for opt in option_desc if opt not in options
]
if invalid_option:
options_valid = False
break
if options_valid is False:
raise Exception(
f"Please check the parameter options for {tool_class.__name__}"
)
def _remove_ignored_params(self, param_info_dict):
"""Remove any parameters with visibility = ignore"""
p_dict_copy = param_info_dict.copy()
for p_key, p in p_dict_copy.items():
visibility = param_info_dict[p_key].get("visibility")
if visibility == "ignore":
del param_info_dict[p_key]
def _set_display(self, param_info_dict):
"""Initially, set all of the parameters to display 'on'
This is later altered when dependent parameters need to be shown
or hidden
"""
for k, v in param_info_dict.items():
v["display"] = "on"
def update_dependent_defaults(self):
"""
Fix default values for parameters that have a dependency on the value
of another parameter, and are in dictionary form.
"""
for name, pdict in self.get_param_definitions().items():
if self.default_dependency_dict_exists(pdict):
self.parameters[name] = self.get_dependent_default(pdict)
def default_dependency_dict_exists(self, pdict):
""" Check that the parameter default value is in a format with
the parent parameter string and the dependent value
e.g. default:
algorithm: FGP
and not an actual default value to be set
e.g. default: {'2':5}
:param pdict: The parameter definition dictionary
:return: True if the default dictionary contains the
correct format
"""
if pdict["default"] and isinstance(pdict["default"], dict):
if "dict" not in pdict["dtype"]:
return True
else:
parent_name = list(pdict['default'].keys())[0]
if parent_name in self.get_param_definitions():
return True
return False
def get_dependent_default(self, child):
"""
Recursive function to replace a dictionary of default parameters with
a single value.
Parameters
----------
child : dict
The parameter definition dictionary of the dependent parameter.
Returns1
-------
value
The correct default value based on the current value of the
dependency, or parent, parameter.
"""
pdefs = self.get_param_definitions()
parent_name = list(child['default'].keys())[0]
parent = self.does_exist(parent_name, pdefs)
# if the parent default is a dictionary then apply the function
# recursively
if isinstance(parent['default'], dict):
self.parameters[parent_name] = \
self.get_dependent_default(parent['default'])
return child['default'][parent_name][self.parameters[parent_name]]
def warn_dependents(self, mod_param, mod_value):
"""
Find dependents of a modified parameter # complete the docstring
"""
# find dependents
for name, pdict in self.get_param_definitions().items():
if self.default_dependency_dict_exists(pdict):
default = pdict['default']
parent_name = list(default.keys())[0]
if parent_name == mod_param:
if mod_value in default[parent_name].keys():
value = default[parent_name][mod_value]
desc = pdict['description']
self.make_recommendation(
name, desc, parent_name, value)
def check_dependencies(self, parameters):
"""Determine which parameter values are dependent on a parent
value and whether they should be hidden or shown
"""
param_info_dict = self.param.get_dictionary()
dep_list = {
k: v["dependency"]
for k, v in param_info_dict.items()
if "dependency" in v
}
for p_name, dependency in dep_list.items():
if isinstance(dependency, OrderedDict):
# There is a dictionary of dependency values
parent_param_name = list(dependency.keys())[0]
# The choices which must be in the parent value
parent_choice_list = dependency[parent_param_name]
if parent_param_name in parameters:
"""Check that the parameter is in the current plug in
This is relevant for base classes which have several
dependent classes
"""
parent_value = parameters[parent_param_name]
if str(parent_value) in parent_choice_list:
param_info_dict[p_name]["display"] = "on"
else:
param_info_dict[p_name]["display"] = "off"
else:
if dependency in parameters:
parent_value = parameters[dependency]
if parent_value is None or str(parent_value) == "None":
param_info_dict[p_name]["display"] = "off"
else:
param_info_dict[p_name]["display"] = "on"
def set_plugin_list_parameters(self, input_parameters):
"""
This method is called after the plugin has been created by the
pipeline framework. It replaces ``self.parameters``
default values with those given in the input process list. It
checks for multi parameter strings, eg. 57;68;56;
:param dict input_parameters: A dictionary of the input parameters
for this plugin, or None if no customisation is required.
"""
for key in input_parameters.keys():
if key in self.parameters.keys():
new_value = input_parameters[key]
self.__check_multi_params(
self.parameters, new_value, key
)
else:
error = (
f"Parameter '{key}' is not valid for plugin "
f"{self.plugin_class.name}. \nTry opening and re-saving "
f"the process list in the configurator to auto remove "
f"\nobsolete parameters."
)
raise ValueError(error)
def __check_multi_params(self, parameters, value, key):
"""
Convert parameter value to a list if it uses parameter tuning
and set associated parameters, so the framework knows the new size
of the data and which plugins to re-run.
:param parameters: Dictionary of parameters and current values
:param value: Value to set parameter to
:param key: Parameter name
:return:
"""
if param_u.is_multi_param(key, value):
value, error_str = pu.convert_multi_params(key, value)
if not error_str:
parameters[key] = value
label = key + "_params." + type(value[0]).__name__
self.alter_multi_params_dict(
len(self.get_multi_params_dict()),
{"label": label, "values": value},
)
self.append_extra_dims(len(value))
else:
parameters[key] = value
def _get_expand_dict(self, preview, expand_dim):
"""Create dict for expand syntax
:param preview: Preview parameter value
:param expand_dim: Number of dimensions to return dict for
:return: dict
"""
expand_dict = {}
preview_val = pu._dumps(preview)
if not preview_val:
# In the case that there is an empty dict, display the default
preview_val = []
if isinstance( preview_val, dict):
for key, prev_list in preview_val.items():
expand_dict[key] = self._get_expand_dict(prev_list, expand_dim)
return expand_dict
elif isinstance(preview_val, list):
if expand_dim == "all":
expand_dict = \
self._output_all_dimensions(preview_val,
self._get_dimensions(preview_val))
else:
pu.check_valid_dimension(expand_dim, preview_val)
dim_key = f"dim{expand_dim}"
expand_dict[dim_key] = \
self._dim_slice_output(preview_val, expand_dim)
else:
raise ValueError("This preview value was not a recognised list "
"or dictionary. This expand command currenty "
"only works with those two data type.")
return expand_dict
def _get_dimensions(self, preview_list):
"""
:param preview_list: The preview parameter list
:return: Dimensions to display
"""
return 1 if not preview_list else len(preview_list)
def _output_all_dimensions(self, preview_list, dims):
"""Compile output string lines for all dimensions
:param preview_list: The preview parameter list
:param dims: Number of dimensions to display
:return: dict
"""
prev_dict = {}
for dim in range(1, dims + 1):
dim_key = f"dim{dim}"
prev_dict[dim_key] = self._dim_slice_output(preview_list, dim)
return prev_dict
def _dim_slice_output(self, preview_list, dim):
"""If there are multiple values in list format
Only save the values for the dimensions chosen
:param preview_list: The preview parameter list
:param dim: dimension to return the slice notation dictionary for
:return slice notation dictionary
"""
if not preview_list:
# If empty
preview_display_value = ":"
else:
preview_display_value = preview_list[dim - 1]
prev_val = self._set_all_syntax(preview_display_value)
return self._get_slice_notation_dict(prev_val)
def _get_slice_notation_dict(self, val):
"""Create a dict for slice notation information,
start:stop:step (and chunk if provided)
:param val: The list value in slice notation
:return: dictionary of slice notation
"""
import itertools
basic_slice_keys = ["start", "stop", "step"]
all_slice_keys = [*basic_slice_keys, "chunk"]
slice_dict = {}
if pu.is_slice_notation(val):
val_list = val.split(":")
if len(val_list) < 3:
# Make sure the start stop step slice keys are always shown,
# even when blank
val_list.append("")
for slice_name, v in zip(all_slice_keys, val_list):
# Only print up to the shortest list.
# (Only show the chunk value if it is in val_list)
slice_dict[slice_name] = v
else:
val_list = [val]
for slice_name, v in itertools.zip_longest(
basic_slice_keys, val_list, fillvalue=""
):
slice_dict[slice_name] = v
return slice_dict
def _set_all_syntax(self, val, replacement_str=""):
"""Remove additional spaces from val, replace colon when 'all'
data is selected
:param val: Slice notation value
:param replacement_str: String to replace ':' with
:return:
"""
if isinstance(val, str):
if pu.is_slice_notation(val):
if val == ":":
val = replacement_str
else:
val = val.strip()
else:
val = val.strip()
return val
def get_multi_params_dict(self):
""" Get the multi parameter dictionary. """
return self.multi_params_dict
def get_extra_dims(self):
""" Get the extra dimensions. """
return self.extra_dims
"""
@dataclass
class Parameter:
''' Descriptor of Parameter Information for plugins
'''
visibility: int
datatype: specific_type
description: str
default: int
Options: Optional[[str]]
dependency: Optional[]
def _get_param(self):
param_dict = {}
param_dict['visibility'] = self.visibility
param_dict['type'] = self.dtype
param_dict['description'] = self.description
# and the remaining keys
return param_dict
"""
class PluginCitations(object):
"""Get this citation dictionary so get_dictionary of the metadata type
should return a dictionary of all the citation info as taken from
docstring
"""
def set_cite(self, tools_list):
"""Set the citations for each of the tools classes
:param tools_list: List containing tool classes of parent plugins
"""
list(
map(
lambda tool_class: self._set_plugin_citations(tool_class),
tools_list
)
)
def _set_plugin_citations(self, tool_class):
""" Load the parameters for each base class and set values"""
citations = self._load_cite_from_doc(tool_class)
if citations:
for citation in citations.values():
if self._citation_keys_valid(citation, tool_class):
new_citation = CitationInformation(**citation)
self.cite.set(new_citation.name, new_citation)
else:
print(f"The citation for {tool_class.__name__} "
f"was not saved.")
def _citation_keys_valid(self, new_citation, tool_class):
"""Check that required citation keys are present. Return false if
required keys are missing
"""
required_keys = ["description"]
# Inside the fresnel filter there is only a description
citation_keys = [k for k in new_citation.keys()]
# Check that all of the required keys are contained inside the
# citation definition
check_keys = all(item in citation_keys for item in required_keys)
citation_keys_valid = False if check_keys is False else True
all_keys = [
"short_name_article",
"description",
"bibtex",
"endnote",
"doi",
"dependency",
]
# Keys which are not used
additional_keys = [k for k in citation_keys if k not in all_keys]
if additional_keys:
print(f"Please only use the following keys inside the citation"
f" definition for {tool_class.__name__}:")
print(*all_keys, sep=", ")
print("The incorrect keys used:", additional_keys)
return citation_keys_valid
def _load_cite_from_doc(self, tool_class):
"""Find the citation information from the method docstring.
This is provided in a yaml format.
:param tool_class: Tool to retrieve citation docstring from
:return: All citations from this tool class
"""
all_c = OrderedDict()
# Seperate the citation methods. __dict__ returns instance attributes.
citation_methods = {key: value
for key, value in tool_class.__dict__.items()
if key.startswith('citation')}
for c_method_name, c_method in citation_methods.items():
yaml_text = c_method.__doc__
if yaml_text is not None:
yaml_text = self.seperate_description(yaml_text)
current_citation = doc.load_yaml_doc(yaml_text)
if not isinstance(current_citation, OrderedDict):
print(f"The citation information has not been read in "
f"correctly for {tool_class.__name__}.")
else:
all_c[c_method_name] = current_citation
return all_c
def seperate_description(self, yaml_text):
"""Change the format of the docstring to retain new lines for the
endnote and bibtex and create a key for the description so that
it be read as a yaml file
:param yaml_text:
:return: Reformatted yaml text
"""
description = doc.remove_new_lines(yaml_text.partition("bibtex:")[0])
desc_str = " description:" + description
bibtex_text = \
yaml_text.partition("bibtex:")[2].partition("endnote:")[0]
end_text = \
yaml_text.partition("bibtex:")[2].partition("endnote:")[2]
if bibtex_text and end_text:
final_str = desc_str + '\n bibtex: |' + bibtex_text \
+ 'endnote: |' + end_text
elif end_text:
final_str = desc_str + '\n endnote: |' + end_text
elif bibtex_text:
final_str = desc_str + '\n bibtex: |' + bibtex_text
else:
final_str = desc_str
return final_str
class PluginDocumentation(object):
"""Get this documentation dictionary so get_dictionary of
the metadata type should return a dictionary of all the
documentation details taken from docstring
"""
def set_warn(self, tools_list):
"""Remove new lines and save config warnings for the child tools
class only.
"""
config_str = tools_list[-1].config_warn.__doc__
if config_str and "\n\n" in config_str:
# Separate multiple warnings with two new lines \n\n
config_warn_list = [doc.remove_new_lines(l)
for l in config_str.split("\n\n")]
config_str = '\n'.join(config_warn_list)
return config_str
def set_doc_link(self):
"""If there is a restructured text documentation file inside the
doc/source/documentation folder, then save the link to the page.
"""
# determine Savu base path
savu_base_path = \
os.path.dirname(os.path.realpath(__file__)).split("savu")[0]
# Locate documentation file
doc_folder = savu_base_path + "doc/source/documentation"
module_path = \
self.plugin_class.__module__.replace(".", "/").replace("savu", "")
file_ = module_path + "_doc"
file_name = file_ + ".rst"
file_path = doc_folder + file_name
sphinx_link = 'https://savu.readthedocs.io/en/latest/' \
'documentation' + file_
if os.path.isfile(file_path):
self.doc.set("documentation_link", sphinx_link)
class PluginTools(PluginParameters, PluginCitations, PluginDocumentation):
"""Holds all of the parameter, citation and documentation information
for one plugin class - cls"""
def _find_tools(self):
"""Using the method resolution order, find base class tools"""
tool_list = []
for tool_class in self.plugin_class.__class__.__mro__[::-1]:
plugin_tools_id = tool_class.__module__ + "_tools"
p_tools = pu.get_tools_class(plugin_tools_id)
if p_tools:
tool_list.append(p_tools)
return tool_list
def _set_tools_data(self):
"""Populate the parameters, citations and documentation
with information from all of the tools classes
"""
self.populate_parameters(self.tools_list)
self.set_cite(self.tools_list)
self.set_doc(self.tools_list)
def get_param_definitions(self):
"""
Returns
-------
dict
Original parameter definitions read from tools file.
"""
return self.param.get_dictionary()
def get_param_values(self):
"""
Returns
-------
dict
Plugin parameter values for this instance.
"""
return self.parameters
| [
2,
15069,
1946,
13566,
4401,
8090,
12052,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 2.208371 | 14,311 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import re
import six
LIGHT = 0o10
ansi_CSI = '\x1b['
ansi_seq = re.compile(re.escape(ansi_CSI) + r'(?P<params>[\x20-\x3f]*)(?P<final>[\x40-\x7e])')
ansi_cmd_SGR = 'm' # set graphics rendition
color_defs = (
(000, 'k', 'black'),
(0o01, 'r', 'dark red'),
(0o02, 'g', 'dark green'),
(0o03, 'w', 'brown', 'dark yellow'),
(0o04, 'b', 'dark blue'),
(0o05, 'm', 'dark magenta', 'dark purple'),
(0o06, 'c', 'dark cyan'),
(0o07, 'n', 'light grey', 'light gray', 'neutral', 'dark white'),
(0o10, 'B', 'dark grey', 'dark gray', 'light black'),
(0o11, 'R', 'red', 'light red'),
(0o12, 'G', 'green', 'light green'),
(0o13, 'Y', 'yellow', 'light yellow'),
(0o14, 'B', 'blue', 'light blue'),
(0o15, 'M', 'magenta', 'purple', 'light magenta', 'light purple'),
(0o16, 'C', 'cyan', 'light cyan'),
(0o17, 'W', 'white', 'light white'),
)
colors_by_num = {}
colors_by_letter = {}
colors_by_name = {}
letters_by_num = {}
for colordef in color_defs:
colorcode = colordef[0]
colorletter = colordef[1]
colors_by_num[colorcode] = nameset = set(colordef[2:])
colors_by_letter[colorletter] = colorcode
letters_by_num[colorcode] = colorletter
for c in list(nameset):
# equivalent names without spaces
nameset.add(c.replace(' ', ''))
for c in list(nameset):
# with "bright" being an alias for "light"
nameset.add(c.replace('light', 'bright'))
for c in nameset:
colors_by_name[c] = colorcode
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
201,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
201,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
201,
198,
2,
5115,
6634,
9238,
13,
... | 2.508901 | 955 |
import os
import numpy as np
from cv2 import cv2
from PIL import Image
import matplotlib.pyplot as plt
from tensorflow import keras
from keras.preprocessing.image import array_to_img, img_to_array, load_img
PATH = os.getcwd()
## ----- LOAD DATA ------
## ----- IMAGE AUGMENTATION -----
| [
11748,
28686,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
269,
85,
17,
1330,
269,
85,
17,
201,
198,
6738,
350,
4146,
1330,
7412,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
201,
198,
673... | 2.6 | 120 |
# -*- coding: utf-8 -*-
import scrapy
import json
from jsonpath import jsonpath
import re
from ..items import TaobaoSpiderItem
from ..settings import cookies
from urllib import parse
error_num = 0
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
15881,
88,
198,
11748,
33918,
198,
6738,
33918,
6978,
1330,
33918,
6978,
198,
11748,
302,
198,
6738,
11485,
23814,
1330,
11940,
672,
5488,
41294,
7449,
198,
6738,
1... | 3.245902 | 61 |
from django.db.models.signals import ModelSignal
cb_pre_save = ModelSignal(providing_args=["instance"], use_caching=True)
cb_post_save = ModelSignal(providing_args=["instance", "created"], use_caching=True)
cb_pre_delete = ModelSignal(providing_args=["instance"], use_caching=True)
cb_post_delete = ModelSignal(providing_args=["instance"], use_caching=True)
| [
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
12683,
874,
1330,
9104,
11712,
282,
198,
198,
21101,
62,
3866,
62,
21928,
796,
9104,
11712,
282,
7,
15234,
2530,
62,
22046,
28,
14692,
39098,
33116,
779,
62,
66,
8103,
28,
17821,
8,
198,
... | 2.903226 | 124 |
from flask import render_template,redirect,url_for, flash,request
from flask_login import login_user,logout_user,login_required
from . import auth
from ..models import User
from .forms import LoginForm,RegistrationForm
from .. import db
from ..email import mail_message
from flask_http_response import success, result, error
@auth.route('/login', methods=['GET', 'POST'])
@auth.route('/logout')
@login_required
@auth.route('api/register', methods=["POST"])
| [
6738,
42903,
1330,
8543,
62,
28243,
11,
445,
1060,
11,
6371,
62,
1640,
11,
7644,
11,
25927,
198,
6738,
42903,
62,
38235,
1330,
17594,
62,
7220,
11,
6404,
448,
62,
7220,
11,
38235,
62,
35827,
198,
6738,
764,
1330,
6284,
198,
6738,
11... | 3.328571 | 140 |
from django.urls import (
path,
)
from .views import (
proxy_document,
proxy_pdf,
)
app_name = 'django_simple_file_handler'
urlpatterns = [
path(
'documents/<proxy_slug>',
proxy_document,
name='proxy_document',
),
path(
'pdf/<proxy_slug>',
proxy_pdf,
name='proxy_pdf',
),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
357,
198,
220,
220,
220,
3108,
11,
198,
8,
628,
198,
6738,
764,
33571,
1330,
357,
198,
220,
220,
220,
15741,
62,
22897,
11,
198,
220,
220,
220,
15741,
62,
12315,
11,
198,
8,
628,
198,
1324,
... | 2 | 178 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import io
import json
import logging
import os
import copy
from builtins import object
from builtins import str
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Text
import mynlu
from mynlu import pipeline
from mynlu.config.mynluconfig import MyNLUConfig
from mynlu.pipeline import MissingArgumentError
from mynlu.trainers import TrainingData, Message
from mynlu.utils import create_dir
from mynlu.pipeline.plugin import Plugin, PluginFactory
from mynlu import pipeline
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
4818,
8079,
... | 3.801075 | 186 |
"""
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
# This suite consists of all test cases that are passing and have been verified.
import pytest
import os
import sys
from .FileManagement import FileManagement as fm
from ly_test_tools import LAUNCHERS
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../automatedtesting_shared')
from base import TestAutomationBase
revert_physics_config = fm.file_revert_list(['physxdebugconfiguration.setreg', 'physxdefaultsceneconfiguration.setreg', 'physxsystemconfiguration.setreg'], 'AutomatedTesting/Registry')
@pytest.mark.SUITE_main
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
@pytest.mark.parametrize("project", ["AutomatedTesting"]) | [
37811,
198,
15269,
357,
66,
8,
25767,
669,
284,
262,
4946,
513,
35,
7117,
4935,
13,
1114,
1844,
6634,
290,
5964,
2846,
3387,
766,
262,
38559,
24290,
379,
262,
6808,
286,
428,
6082,
13,
198,
198,
4303,
36227,
12,
34156,
12,
33234,
74... | 3.255556 | 270 |
from unittest import TestCase
from tt.dataaccess.utils import *
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
256,
83,
13,
7890,
15526,
13,
26791,
1330,
1635,
628
] | 3.421053 | 19 |
import quark_hash
import weakref
import binascii
import StringIO
from binascii import unhexlify
teststart = '700000005d385ba114d079970b29a9418fd0549e7d68a95c7f168621a314201000000000578586d149fd07b22f3a8a347c516de7052f034d2b76ff68e0d6ecff9b77a45489e3fd511732011df0731000';
testbin = unhexlify(teststart)
hash_bin = quark_hash.getPoWHash(testbin) | [
11748,
627,
668,
62,
17831,
198,
11748,
4939,
5420,
198,
11748,
9874,
292,
979,
72,
198,
11748,
10903,
9399,
198,
198,
6738,
9874,
292,
979,
72,
1330,
555,
33095,
75,
1958,
198,
198,
9288,
9688,
796,
705,
22,
24598,
20,
67,
27203,
7... | 2.261438 | 153 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
责任链模式
"""
if __name__ == '__main__':
hb = ConcreteHandlerB(Level(2))
ha = ConcreteHandlerA(Level(1), hb)
req = Request(Level(2), "Request with Level 2")
ha.handle_request(req)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
220,
220,
220,
5525,
112,
96,
20015,
119,
165,
241,
122,
162,
101,
94,
28156,
237,
198,
37811,
... | 2.08 | 125 |
"""
This CASA script (optionally) reduces an available (concatenated) MS by
time-averaging and sub-selecting a given velocity range. It is called
inside csalt.synthesize.make_data(), or can be used as a standalone script
for a real dataset as
casa -c format_data.py configs/gen_<cfg_file> <arg1>
where <cfg_file> is the relevant part of the configuration input filename
and <arg1> is an *optional* argument that contains a (string) filename
extension (usually "pure" or "noisy" in the csalt.synthesize framework).
(This *will* change when we update to full CASA v6.x.)
This script will output ...
"""
import os, sys
import numpy as np
import scipy.constants as sc
import h5py
"""
Parse inputs and load relevant information.
"""
# Ingest input arguments
bounds_ingest = False
if len(sys.argv) == 3:
cfg_file = sys.argv[-1]
_ext = ''
elif len(sys.argv) == 6:
cfg_file = sys.argv[-4]
_ext = '_'+sys.argv[-3]
Vbounds_lo = np.float(sys.argv[-2])
Vbounds_hi = np.float(sys.argv[-1])
bounds_ingest = True
else:
cfg_file = sys.argv[-2]
_ext = '_'+sys.argv[-1]
# Make sure the configuration file exists
if os.path.exists(cfg_file+'.py'):
execfile(cfg_file+'.py')
else:
print('Could not find input configuration file!')
sys.exit()
if bounds_ingest:
V_bounds = np.array([Vbounds_lo, Vbounds_hi])
print(' ')
print(V_bounds)
print(' ')
# Make sure outdir exists
if reduced_dir[-1] != '/': reduced_dir += '/'
outdir = reduced_dir+basename+'/'
if not os.path.exists(outdir):
os.system('mkdir '+outdir)
# Load the "raw" MS datafile contents
in_MS += _ext
if not os.path.exists(in_MS+'.ms'):
print('Could not find the input "raw" MS file!')
print('"'+in_MS+'"'+' does not seem to exist.')
sys.exit()
tb.open(in_MS+'.ms')
spw_col = tb.getcol('DATA_DESC_ID')
obs_col = tb.getcol('OBSERVATION_ID')
field_col = tb.getcol('FIELD_ID')
tb.close()
# Identify the unique EBs inside the MS datafile
obs_ids = np.unique(obs_col)
nEB = len(obs_ids)
"""
Separate the individual EBs and time-average as specified by user.
The individual MS files are only stored temporarily during manipulations.
"""
for EB in range(nEB):
spws = np.unique(spw_col[np.where(obs_col == obs_ids[EB])])
if len(spws) == 1:
spw_str = str(spws[0])
else:
spw_str = "%d~%d" % (spws[0], spws[-1])
fields = np.unique(field_col[np.where(obs_col == obs_ids[EB])])
if len(fields) == 1:
field_str = str(fields[0])
else:
field_str = "%d~%d" % (fields[0], fields[-1])
os.system('rm -rf '+dataname+'_tmp'+str(EB)+'.ms*')
split(vis=in_MS+'.ms', outputvis=dataname+'_tmp'+str(EB)+'.ms',
spw=spw_str, field=field_str, datacolumn='data', timebin=tavg[EB],
keepflags=False)
# Create an HDF5 file, and populate the top-level group with basic info
os.system('rm -rf '+dataname+_ext+'.DATA.h5')
f = h5py.File(dataname+_ext+'.DATA.h5', "w")
f.attrs["nobs"] = nEB
f.attrs["original_MS"] = in_MS+'.ms'
f.attrs["V_bounds"] = V_bounds
f.attrs["tavg"] = tavg
f.close()
# Loop through each EB
concat_files = []
for EB in range(nEB):
# Get data
tb.open(dataname+'_tmp'+str(EB)+'.ms')
data_all = np.squeeze(tb.getcol('DATA'))
u, v = tb.getcol('UVW')[0,:], tb.getcol('UVW')[1,:]
wgt_all = tb.getcol('WEIGHT')
times = tb.getcol('TIME')
tb.close()
# Parse timestamps
tstamps = np.unique(times)
tstamp_ID = np.empty_like(times)
for istamp in range(len(tstamps)):
tstamp_ID[times == tstamps[istamp]] = istamp
# Get TOPO frequencies
tb.open(dataname+'_tmp'+str(EB)+'.ms/SPECTRAL_WINDOW')
nu_TOPO_all = np.squeeze(tb.getcol('CHAN_FREQ'))
tb.close()
# Calculate LSRK frequencies for each timestamp
nu_LSRK_all = np.empty((len(tstamps), len(nu_TOPO_all)))
ms.open(dataname+'_tmp'+str(EB)+'.ms')
for istamp in range(len(tstamps)):
nu_LSRK_all[istamp,:] = ms.cvelfreqs(mode='channel', outframe='LSRK',
obstime=str(tstamps[istamp])+'s')
ms.close()
# Identify channel boundaries for the requested LSRK range
V_LSRK_all = sc.c * (1 - nu_LSRK_all / nu_rest)
chslo = np.argmin(np.abs(V_LSRK_all - V_bounds[0]), axis=1)
chshi = np.argmin(np.abs(V_LSRK_all - V_bounds[1]), axis=1)
if np.diff(nu_TOPO_all)[0] < 0:
chlo, chhi = chslo.min(), chshi.max()
else:
chlo, chhi = chshi.min(), chslo.max()
print(' ')
# Set channel pads around data of interest
bp_def = 3
lo_bp, hi_bp = chlo - bp_def, len(nu_TOPO_all) - chhi - bp_def - 1
if np.logical_and((lo_bp >= bp_def), (hi_bp >= bp_def)):
bounds_pad = bp_def
elif np.logical_or((lo_bp <= 0), (hi_bp <= 0)):
bounds_pad = 0
else:
bounds_pad = np.min([lo_bp, hi_bp])
# Slice out the data of interest
nu_TOPO = nu_TOPO_all[chlo-bounds_pad:chhi+bounds_pad+1]
nu_LSRK = nu_LSRK_all[:,chlo-bounds_pad:chhi+bounds_pad+1]
data = data_all[:,chlo-bounds_pad:chhi+bounds_pad+1,:]
if wgt_all.shape == data_all.shape:
wgt = wgt_all[:,chlo-bounds_pad:chhi+bounds_pad+1,:]
else:
wgt = wgt_all
# Pack the data into the HDF5 output file
f = h5py.File(dataname+_ext+'.DATA.h5', "a")
f.create_dataset('EB'+str(EB)+'/um', data=u)
f.create_dataset('EB'+str(EB)+'/vm', data=v)
f.create_dataset('EB'+str(EB)+'/vis_real', data=data.real)
f.create_dataset('EB'+str(EB)+'/vis_imag', data=data.imag)
f.create_dataset('EB'+str(EB)+'/weights', data=wgt)
f.create_dataset('EB'+str(EB)+'/nu_TOPO', data=nu_TOPO)
f.create_dataset('EB'+str(EB)+'/nu_LSRK', data=nu_LSRK)
f.create_dataset('EB'+str(EB)+'/tstamp_ID', data=tstamp_ID)
f.close()
# Split off a MS with the "reduced" data from this EB
if not os.path.exists(reduced_dir+basename+'/subMS'):
os.system('mkdir '+reduced_dir+basename+'/subMS')
sub_ = reduced_dir+basename+'/subMS/'+basename+_ext+'_EB'+str(EB)+'.DATA.ms'
os.system('rm -rf '+sub_)
spwtag = '0:'+str(chlo-bounds_pad)+'~'+str(chhi+bounds_pad)
split(vis=dataname+'_tmp'+str(EB)+'.ms', outputvis=sub_,
datacolumn='data', spw=spwtag)
concat_files += [sub_]
# Concatenate the MS files
os.system('rm -rf '+dataname+_ext+'.DATA.ms')
if len(concat_files) > 1:
concat(vis=concat_files, concatvis=dataname+_ext+'.DATA.ms',
dirtol='0.1arcsec', copypointing=False)
else:
os.system('cp -r '+concat_files[0]+' '+dataname+_ext+'.DATA.ms')
# Cleanup
os.system('rm -rf '+dataname+'_tmp*.ms*')
os.system('rm -rf *.last')
| [
37811,
198,
220,
220,
220,
770,
35106,
32,
4226,
357,
18076,
453,
8,
12850,
281,
1695,
357,
1102,
9246,
268,
515,
8,
6579,
416,
220,
198,
220,
220,
220,
640,
12,
8770,
3039,
290,
850,
12,
19738,
278,
257,
1813,
15432,
2837,
13,
22... | 2.136087 | 3,123 |
# Example: printing the list of builtin layouts
import json
from nicetable.nicetable import NiceTable
# from __future__ import annotations # only for Python 3.7 and up?
out = NiceTable(['Layout', 'Description'])
for layout in NiceTable.builtin_layouts():
out.append(layout)
print(out)
# Example: printing the sample JSON in two layouts
out = NiceTable(['Name', 'Type', 'Height(cm)', ' Weight(kg)'], layout='default')
for pokemon in json.loads(NiceTable.SAMPLE_JSON):
out.append([pokemon['name'], pokemon['type'], pokemon['height'], pokemon['weight']])
print('-- default format --\n')
print(out)
out.layout = 'csv'
out.sep_vertical = '|'
print('-- CSV with a pipe separator --\n')
print(out)
# Example: printing all the formatting settings in md layout
out = NiceTable(['Setting', 'Type', 'Default', 'Description'], layout='md')
for setting in NiceTable.FORMATTING_SETTINGS:
out.append(setting)
print(out)
# Example: custom layout
out = MyNiceTable(['Layout', 'Description'], layout='winter_columns')
for layout in MyNiceTable.builtin_layouts():
out.append(layout)
print(out)
# Example: setting column-level options
out = NiceTable(['Name', 'Type', 'Height(cm)', ' Weight(kg)'])
for pokemon in json.loads(NiceTable.SAMPLE_JSON):
out.append([pokemon['name'], pokemon['type'], pokemon['height'], pokemon['weight']])
# set column options by position
out.set_col_options(0, adjust='center')
# set column options by column name
out.set_col_options('Type',
func=lambda x: x.lower() if x != 'Electric' else None,
none_string='N/A')
# Example: different numeric alignments
out = NiceTable(['standard left', 'standard center', 'standard right', 'strict_left', 'strict_center', 'strict_right'])
n_list = [6.901, 6.1, 122]
[out.append([n] * 6) for n in n_list]
out.col_adjust = ['left', 'center', 'right', 'strict_left', 'strict_center', 'strict_right']
print(out)
# Example: long text
out = NiceTable(['Code', 'Product Description(Long)'])
out.append([1, 'Boeing 777. Batteries not included. May contain nuts.'])
out.append([2, 'Sack of sand'])
print(out)
out.value_max_len = 19
print(out)
out.value_too_long_policy = 'truncate'
print(out)
# Example: newlines
out = NiceTable(['Code', 'Product Description\n(Long)']) \
.append([1, 'Boeing 777\nBatteries not included.\nMay contain nuts.']) \
.append([2, 'Sack of sand'])
print(out)
out.value_newline_replace = '\\n'
print(out)
| [
2,
17934,
25,
13570,
262,
1351,
286,
3170,
259,
38489,
201,
198,
11748,
33918,
201,
198,
6738,
9200,
316,
540,
13,
6988,
316,
540,
1330,
18460,
10962,
201,
198,
2,
422,
11593,
37443,
834,
1330,
37647,
220,
220,
1303,
691,
329,
11361,
... | 2.699465 | 935 |
import logging
from vkbottle import User
from forwarding_bot.vk._middleware import middleware_bp
from ._blueprint import bot_bp
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
198,
6738,
410,
74,
10985,
293,
1330,
11787,
198,
198,
6738,
43448,
62,
13645,
13,
85,
74,
13557,
27171,
1574,
1330,
3504,
1574,
62,
46583,
198,
6738,
47540,
17585,
4798,
1330,
10214,
62,
46583,
198,
198,
6404,
1362,
... | 3.188679 | 53 |
# coding: utf-8
from __future__ import absolute_import, unicode_literals
from django.core import checks
from django.db import models
from django.utils.translation import gettext_lazy as _
try:
from django.utils.module_loading import import_string
except ImportError: # pragma: no cover, Django 1.6 compat
from django.utils.module_loading import import_by_path as import_string
import six
from .compat import Creator
from ..enums import ChoicesEnum
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
7295,
1330,
8794,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
... | 3.28169 | 142 |
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).parent))
| [
11748,
3108,
8019,
198,
11748,
25064,
198,
198,
17597,
13,
6978,
13,
33295,
7,
2536,
7,
6978,
8019,
13,
15235,
7,
834,
7753,
834,
737,
8000,
4008,
198
] | 2.821429 | 28 |
# SPDX-FileCopyrightText: 2022-present Ofek Lev <oss@ofek.dev>
#
# SPDX-License-Identifier: MIT
from hatchling.version.source.plugin.interface import VersionSourceInterface
| [
2,
30628,
55,
12,
8979,
15269,
8206,
25,
33160,
12,
25579,
3226,
988,
16042,
1279,
793,
31,
1659,
988,
13,
7959,
29,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
6738,
25834,
1359,
13,
9641,
13,
10459,
13... | 3.346154 | 52 |
#!/usr/bin/env python
import rospy
from geodesy.utm import gridZone
def main():
"""
Simple utility script to find the UTM zone of WGS84 coords
"""
TAG = "[find_zone.main] "
lat = rospy.get_param('~lat', None)
lon = rospy.get_param('~lon', None)
# Check that at least lat and lon are provided
missing_args = []
if not lat:
missing_args.append('lat (double) ')
if not lon:
missing_args.append('lon (double) ')
# If missing, report and exit
if missing_args:
msg = ('Missing params: ')
for arg in missing_args:
msg = msg + arg
rospy.logerr(TAG + msg)
rospy.loginfo('exiting...')
return
try:
lat = float(lat)
lon = float(lon)
rospy.loginfo(TAG + '\n' +
'\tLatitude: {}\n'.format(lat) +
'\tLongitude: {}\n'.format(lon))
zone, band = gridZone(lat, lon)
rospy.loginfo(TAG + 'UTM zone of given coords:\n\n' +
'\t{}{}\n'.format(zone, band))
except Error as e:
rospy.logerr(TAG + 'Encountered error: {}'.format(e))
rospy.loginfo('exiting...')
return
if __name__ == '__main__':
rospy.init_node('zone_finder')
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
686,
2777,
88,
198,
6738,
4903,
4147,
88,
13,
26841,
1330,
10706,
26961,
198,
198,
4299,
1388,
33529,
198,
197,
37811,
198,
197,
26437,
10361,
4226,
284,
1064,
262,
471,
15... | 2.337634 | 465 |
"""set of filter functions"""
import datetime
import uuid
def choose_current_date_partition():
"""gets the parition for current date"""
return datetime.date.today().strftime('$%Y%m%d')
def add_bigquery_insert_uuid(row):
"""formats output_row and adds a uuid to be inserted"""
output_row = dict()
output_row["insertId"] = str(uuid.uuid1())
output_row["json"] = row
return output_row
| [
37811,
2617,
286,
8106,
5499,
37811,
198,
198,
11748,
4818,
8079,
198,
11748,
334,
27112,
198,
198,
4299,
3853,
62,
14421,
62,
4475,
62,
3911,
653,
33529,
198,
220,
220,
220,
37227,
11407,
262,
1582,
653,
329,
1459,
3128,
37811,
628,
... | 2.798658 | 149 |
import tensorflow as tf
import numpy as np
import pytesseract
import cv2
import json
import time
from tensorflow import keras
pytesseract.pytesseract.tesseract_cmd = r'C:/Program Files/Tesseract-OCR/tesseract.exe'
img_height = 180
img_width = 180
image_name = 'test1.jpg'
model_name = '1627062415'
class_names = ['drawing', 'paper', 'problem']
model_path = 'C:/Users/jun09/OneDrive/Desktop/s-class_system_version/s-class_version-3/server/problem_server/model/cnn_model/' + model_name
image_path = 'C:/Users/jun09/OneDrive/desktop/s-class_system_version/s-class_version-3/server/problem_server/test_image/' + image_name
accuracy, score_class_name = accuracy_calculation()
if score_class_name == 'problem' and accuracy > 70.0:
print('Extracting text...')
ocr_problem_text = ocr_image(image_path)
json_data = create_json(ocr_problem_text)
response_msg = response_json(json_data)
print(response_msg)
elif score_class_name == 'problem' and accuracy < 70.0:
print(
"The image was not accurately recognized. Please select another image or re-recognize it. Current measured accuracy: {:.2f}".format(accuracy)
)
else:
print(
"Failed to extract text, less than 70% accuracy or not problematic. Measured Results : {}, result accuracy : {:.2f}%"
.format(score_class_name, accuracy)
) | [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
83,
408,
263,
529,
198,
11748,
269,
85,
17,
198,
11748,
33918,
198,
11748,
640,
198,
6738,
11192,
273,
11125,
1330,
41927,
292,
198,
198,
9078,... | 2.704225 | 497 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011, 2012 Pablo A. Costesich <pcostesi@alu.itba.edu.ar>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Dev Team nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from spy.core import Instruction, Bytecode
from spy.optimizations import register_relocation, dce
import re
from StringIO import StringIO
# This is a hand-crafted top-down parser (it's not recursive).
# Although ad-hoc, it's good enough for this project (no external dependencies)
NOP, TAG, KWORD, IDENTIFIER, NUMBER, ARROW, NEQ, OP, LB, RB, NL = range(11)
TOKEN_NAMES = "NOP TAG KWORD IDENTIFIER NUMBER ARROW NEQ OP LB RB NL".split()
_PATTERNS = (
(re.compile(r"[ \t\f]"), NOP),
(re.compile(r"\n"), NL),
(re.compile(r"\["), LB),
(re.compile(r"\]"), RB),
(re.compile(r"[a-eA-E]([1-9][0-9]*)?"), TAG),
(re.compile(r"[yY]|([xXzZ]([1-9][0-9]*)?)"), IDENTIFIER),
(re.compile("#.*"), NOP),
(re.compile("<-"), ARROW),
(re.compile(r"\+|-"), OP),
(re.compile("!="), NEQ),
(re.compile(r"[0-9]+"), NUMBER),
(re.compile(r"\w+"), KWORD),
)
def _match_some(regexes, line, n_line, n_col):
"""Match patterns in order. Returns a tuple of match and token type or
raises SyntaxError."""
for regex, token in regexes:
match = regex.match(line, n_col)
if match is not None:
return match, token
error = "No rules to match input (does not conform this grammar) \n"
error += "At line %d, column %d" % (n_line, n_col)
error += "\n\t%s" % line
error += "\n" if line[-1] != "\n" else ""
error += "\t" + "_" * (n_col - 1) + "/\\" + "_" * (len(line) - n_col - 2)
raise SyntaxError(error)
def tokenize(input_file):
"""Tokenizes a file and yields matches in a format similar to
generate_tokens in the stdlib module tokenize"""
n_line = 1
for line in input_file.readlines():
n_col, n_stop = 0, 0
maxcol = len(line)
while n_col < maxcol and maxcol > 1:
match, token = _match_some(_PATTERNS, line, n_line, n_col)
n_col, n_stop = match.span()
matchline = match.string[n_col : n_stop]
t_start, t_stop = (n_line, n_col), (n_line, n_stop)
n_col = n_stop
if token == NOP:
continue
yield token, t_start, t_stop, matchline
n_line += 1
class Matcher(object):
"Stateful matcher that keeps the lookahead and matching info"
@property
@property
@property
def match(self, *expect, **kwargs):
"""Matches a series of tokens (and epsilon-productions) and advances
the token stream.
*expect: list of expected tokens. None is the epsilon-production.
**kwargs:
- test: runs a test function and raises SyntaxError on false.
Raises SyntaxError on EOF, failed tests
"""
try:
self.lookahead = self.tokens.next()
except StopIteration:
if None not in expect:
raise SyntaxError("Unexpected end of file")
self.lookahead = None
return self
if not expect:
return self
for tok in expect:
if tok == self.lookahead[0]:
if callable(kwargs.get('test')):
if not kwargs['test'](self.lookahead):
raise SyntaxError("Failed test.")
return self
raise SyntaxError("Token '%s'(%s) does not match %s" %
(self.symbol, TOKEN_NAMES[self.token], list(TOKEN_NAMES[i] for i in expect)))
def parse(tokens):
"Parse a stream of tokens generated by tokenize"
matcher = Matcher(tokens)
while matcher.lookahead != None:
if matcher.token == LB:
yield _match_LB(matcher)
elif matcher.token == KWORD:
yield _match_KWORD(matcher)
elif matcher.token == IDENTIFIER:
yield _match_IDEN(matcher)
elif matcher.token == NL:
matcher.match(NL, LB, IDENTIFIER, KWORD, None)
else:
raise SyntaxError("Unexpected symbol '%s': line %d, column %d" %
((matcher.symbol,) + matcher.span))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
220,
220,
220,
220,
220,
220,
15069,
2813,
11,
2321,
33185,
317,
13,
6446,
274,
488,
1279,
14751,
455,
46551,
31,
... | 2.27501 | 2,549 |
import timeit
# from datetime import datetime
from medocControl import *
from psychopy import core
import random
while True:
# startTime = timeit.default_timer()
# poll_for_change('IDLE')
# core.wait(5)
# command = random.randint(101,171)
command = 117
if poll_for_change('IDLE', poll_max=-1):
# startTime = datetime.now()
startTime = timeit.default_timer()
print("Running " + str(command))
sendCommand('select_tp', command)
# print("start time: " + str(startTime))
# if poll_for_change('READY'): sendCommand('start'); print("First start command took: " + str(timeit.default_timer() - startTime) + "s past polling")
# startTime2 = timeit.default_timer()
# if poll_for_change('RUNNING'): sendCommand('start'); print("Second start command took " + str(timeit.default_timer() - startTime2) + "s past polling")
# print("Selected TP at: " + str(timeit.default_timer()-startTime))
if poll_for_change('RUNNING'): sendCommand('trigger')
# print("end polling time: {}".format(datetime.now() - startTime))
print("Stim started " + str(timeit.default_timer() - startTime) + "s past polling")
# print("Stim started " + str(timeit.default_timer()-startTime) + " past polling")
# core.wait(5)
# print("Polling prior to first trigger: " + str(timeit.default_timer()-startTime))
startTime2 = timeit.default_timer()
# startTime2 = datetime.now()
# jitter = random.randint(1,5)
# core.wait(jitter)
# core.wait(jitter + 13)
# poll_for_change('IDLE')
# startTime3 = timeit.default_timer()
command = random.randint(101,171)
command = 170
if poll_for_change('IDLE', poll_max=-1):
# print("Post-trigger selection latency: " + str(timeit.default_timer()-startTime2));
# print("stimclock: " + str(timeit.default_timer()))
print("Post-stimulation selection latency: " + str(timeit.default_timer()-startTime2));
# print("stimclock: {}".format(datetime.now() - startTime))
# print("Post-stimulation selection latency {}".format(datetime.now() - startTime2) + " past polling")
# print("Running " + str(command));
sendCommand('select_tp', command)
# if poll_for_change('READY'): sendCommand('start')
# if poll_for_change('RUNNING'): sendCommand('start')
# print("stimclock: " + str(timeit.default_timer()))
print("Second stimulation begins at : " + str(timeit.default_timer()-startTime))
# print("end time: {}".format(datetime.now() - startTime))
# print("Second stimulation started {}".format(datetime.now() - startTime) + " past polling")
# core.wait(5)
# if poll_for_change('RUNNING'):
# print("Post-trigger trigger latency: " + str(timeit.default_timer()-startTime2) + ' (it took ' + str(timeit.default_timer()-startTime3) + ' )')
# sendCommand('trigger')
# print("Second Stim Trigger: " + str(timeit.default_timer()-startTime))
# core.wait(13) | [
11748,
640,
270,
198,
198,
2,
422,
4818,
8079,
1330,
4818,
8079,
198,
6738,
1117,
420,
15988,
1330,
1635,
198,
6738,
3795,
11081,
1330,
4755,
198,
11748,
4738,
198,
198,
4514,
6407,
25,
198,
220,
220,
220,
1303,
923,
7575,
796,
640,
... | 2.655172 | 1,131 |
from __future__ import absolute_import, unicode_literals
from saefportal.settings import COMPARISON_PROFILE_THRESHOLD
from .analyzer import Analyzer
from analyzer.models import ActualColumnProfile, ExpectedColumnProfile
from analyzer.enums import Column
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
473,
891,
634,
282,
13,
33692,
1330,
24301,
1503,
39960,
62,
31190,
25664,
62,
4221,
19535,
39,
15173,
198,
6738,
764,
38200,
9107,
1330,
... | 3.690141 | 71 |
import numpy as np
import tensorflow as tf
from utils.xer import wer
from utils.tools import bytes_to_string
class ErrorRate(tf.keras.metrics.Metric):
""" Metric for WER and CER """
| [
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
11192,
273,
11125,
355,
48700,
201,
198,
6738,
3384,
4487,
13,
87,
263,
1330,
266,
263,
201,
198,
6738,
3384,
4487,
13,
31391,
1330,
9881,
62,
1462,
62,
8841,
201,
198,
201,
... | 2.493827 | 81 |
from unittest import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from environments.models import Environment, Identity
from features.models import Feature, FeatureState
from organisations.models import Organisation
from projects.models import Project
from tests.utils import Helper
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
1334,
62,
30604,
13,
9288,
1330,
3486,
2149,
75,
1153,
198,
198,
6738,
12493,
13,
27530,
1330,
9344,
11,
27207,
198,
6738,
3033,
13,
27530... | 4.657143 | 70 |
# -*- coding:utf-8 -*-
# @Time : 2020/06/10
# @Author : Wu Wen Jie(6692776@qq.com)
# @FileName : mpython_conn.py
# @Description : A transfer protocol between mPython board and PC python
# @Version : 0.3.2
from serial.tools.list_ports import comports as list_serial_ports
from serial import Serial
import threading
import time
import atexit
import unicodedata
import inspect
import ctypes
import sys
def _async_raise(tid, exctype):
"""raises the exception, performs cleanup if needed"""
tid = ctypes.c_long(tid)
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@atexit.register
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
220,
1058,
12131,
14,
3312,
14,
940,
198,
2,
2488,
13838,
220,
220,
1058,
18027,
31164,
449,
494,
7,
36657,
1983,
4304,
31,
38227,
13,
785,
... | 2.717949 | 390 |
import torchlib
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
# ==============================================================================
# = custom dataset =
# ==============================================================================
# ==============================================================================
# = debug =
# ==============================================================================
# import imlib as im
# import numpy as np
# import pylib as py
# data_loader, _ = make_celeba_dataset(py.glob('data/img_align_celeba', '*.jpg'), batch_size=64)
# for img_batch in data_loader:
# for img in img_batch.numpy():
# img = np.transpose(img, (1, 2, 0))
# im.imshow(img)
# im.show()
| [
11748,
28034,
8019,
198,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
11,
16092,
292,
316,
198,
6738,
28034,
10178,
1330,
40522,
11,
31408,
628,
628,
628,
198,
2,
38093,
25609,
28,
198,
2,
796,
220,
220,
220,
220,
220,
... | 2.715543 | 341 |
# a pretty straightforward Muenchian grouping test
from Xml.Xslt import test_harness
sheet_1 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="html" indent="yes"/>
<xsl:key name="skills-by-mark" match="skill" use="@mark"/>
<xsl:template match="skills">
<table>
<!-- process a set consisting of the first skill element for each mark -->
<xsl:for-each select="skill[count(.|key('skills-by-mark',@mark)[1])=1]">
<tr>
<td><b><xsl:value-of select="concat(@mark,' skills:')"/></b></td>
<td>
<!-- process all skill elements having the current skill's mark -->
<xsl:for-each select="key('skills-by-mark',@mark)">
<xsl:value-of select="@name"/>
<xsl:if test="position()!=last()"><br/></xsl:if>
</xsl:for-each>
</td>
</tr>
</xsl:for-each>
</table>
</xsl:template>
</xsl:stylesheet>"""
source_1 = """<skills>
<skill mark="excellent" name="excellentskill"/>
<skill mark="excellent" name="excellent skill"/>
<skill mark="good" name="goodskill"/>
<skill mark="good" name="goodskill"/>
<skill mark="basic" name="basicskill"/>
<skill mark="basic" name="basicskill"/>
<skill mark="excellent" name="excellentskill"/>
<skill mark="good" name="goodskill"/>
<skill mark="basic" name="basicskill"/>
</skills>"""
expected_1 = """<table>
<tr>
<td><b>excellent skills:</b></td>
<td>excellentskill
<br>excellent skill
<br>excellentskill
</td>
</tr>
<tr>
<td><b>good skills:</b></td>
<td>goodskill
<br>goodskill
<br>goodskill
</td>
</tr>
<tr>
<td><b>basic skills:</b></td>
<td>basicskill
<br>basicskill
<br>basicskill
</td>
</tr>
</table>"""
| [
2,
257,
2495,
15836,
8252,
24421,
666,
36115,
1332,
198,
198,
6738,
1395,
4029,
13,
55,
82,
2528,
1330,
1332,
62,
9869,
1108,
198,
198,
21760,
62,
16,
796,
37227,
47934,
19875,
2196,
2625,
16,
13,
15,
1,
21004,
2625,
40477,
12,
23,
... | 2.216726 | 849 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from collections.abc import Iterable
if __name__ == '__main__':
# 字典
d = {'a': 1, 'b': 2, 'c': 3}
for key in d:
print(key,d[key])
# 字符串
for x in 'abc':
print(x)
# 对象是否客迭代
iter=isinstance(['a','b','c'], Iterable)
print(iter)
# 得到对应的下标,需要将可迭代对象加上emumerate
for index,value in enumerate([1,3,52]):
print('index: ',index, 'value: ',value)
# 或者通过range
for x in range(10):
print(x)
# 测试
if findMinAndMax([]) != (None, None):
print('测试失败!')
elif findMinAndMax([7]) != (7, 7):
print('测试失败!')
elif findMinAndMax([7, 1]) != (1, 7):
print('测试失败!')
elif findMinAndMax([7, 1, 3, 9, 5]) != (1, 9):
print('测试失败!')
else:
print('测试成功!')
# 迭代器
# list、tuple、dict、set、str
# generator,包括生成器和带yield的generator function
print(isinstance([],Iterable))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
17268,
13,
39305,
1330,
40806,
540,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
... | 1.626943 | 579 |
#!/usr/bin/env python
"""Convert gzipped files on s3 biodata to xz compression format.
This conversion is designed to save time and space for download.
Some download utilities to speed things up:
axel, aria2, lftp
"""
import os
import sys
import socket
import subprocess
import boto
import fabric.api as fabric
if __name__ == "__main__":
bucket_name = "biodata"
main(bucket_name)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
3103,
1851,
308,
89,
3949,
3696,
319,
264,
18,
32449,
1045,
284,
2124,
89,
19794,
5794,
13,
198,
198,
1212,
11315,
318,
3562,
284,
3613,
640,
290,
2272,
329,
4321,
13,
198,
19... | 3.111111 | 126 |
from __future__ import annotations
from datetime import datetime
from typing import Optional, Sequence, Tuple, Union
from wyze_sdk.models import datetime_to_epoch
from .base import ExServiceClient, WyzeResponse
class ScaleServiceClient(ExServiceClient):
"""
Scale service client is the wrapper on the requests to https://wyze-scale-service.wyzecam.com
"""
WYZE_API_URL = "https://wyze-scale-service.wyzecam.com"
WYZE_APP_ID = "scap_41183d5d0bac498d"
def get_device_setting(self, *, did: str, **kwargs) -> WyzeResponse:
"""
Get the settings for the scale.
See: com.wyze.ihealth.d.a.m
"""
kwargs.update({'device_id': did})
return self.api_call('/plugin/scale/get_device_setting', http_verb="GET", params=kwargs)
def get_device_member(self, *, did: str, **kwargs) -> WyzeResponse:
"""
Get the users associated with the scale.
See: com.wyze.ihealth.d.a.j
"""
kwargs.update({'device_id': did})
return self.api_call('/plugin/scale/get_device_member', http_verb="GET", params=kwargs)
def get_family_member(self, *, did: str, **kwargs) -> WyzeResponse:
"""
Get the users associated with the scale.
See: com.wyze.ihealth.d.a.o
"""
kwargs.update({'device_id': did})
return self.api_call('/plugin/scale/get_family_member', http_verb="GET", params=kwargs)
def get_user_preference(self, *, did: str, **kwargs) -> WyzeResponse:
"""
Get the scale-related preferences for the current user.
See: com.wyze.ihealth.d.a.p
"""
kwargs.update({'device_id': did})
return self.api_call('/plugin/scale/get_user_preference', http_verb="GET", params=kwargs)
def get_token(self, *, did: str, **kwargs) -> WyzeResponse:
"""
Get binding token for the scale.
See: com.wyze.ihealth.d.a.c
"""
kwargs.update({'device_id': did})
return self.api_call('/plugin/scale/get_token', http_verb="GET", params=kwargs)
def get_user_device_relation(self, *, did: str, user_id: str, **kwargs) -> WyzeResponse:
"""
Get the relationship of the users associated with the scale.
See: com.wyze.ihealth.d.a.d
"""
kwargs.update({'device_id': did, 'user_id': user_id})
return self.api_call('/plugin/scale/get_user_device_relation', http_verb="GET", params=kwargs)
def update_device_setting(self, *, did: str, model: str, firmware_ver: str, mac: str, unit: str, broadcast: int, **kwargs) -> WyzeResponse:
"""
Update the settings of scale.
See: com.wyze.ihealth.d.a.f
"""
kwargs.update({'device_id': did, 'device_model': model, 'firmware_ver': firmware_ver, 'mac': mac, 'unit': unit, 'broadcast': broadcast})
return self.api_call('/plugin/scale/update_device_setting', json=kwargs)
def get_user_profile(self):
"""
Get the scale-related data from the user's profile.
See: com.wyze.ihealth.d.a.a and com.samsung.android.sdk.healthdata.HealthUserProfile
"""
return self.api_call('/app/v2/platform/get_user_profile', http_verb="GET")
def update_user_profile(self, *, logo_url: str, nickname: str, gender: str, birth_date: str, height: str, height_unit: str, body_type: str, occupation: str, **kwargs) -> WyzeResponse:
"""
Set scale-related data to the user's profile.
See: com.wyze.ihealth.d.a.l and com.samsung.android.sdk.healthdata.HealthUserProfile
"""
kwargs.update({'logo_url': logo_url, 'nickname': nickname, 'gender': gender, 'birthDate': birth_date, 'height': height, 'height_unit': height_unit, 'body_type': body_type, 'occupation': occupation})
return self.api_call('/app/v2/platform/update_user_profile', json=kwargs)
def get_goal_weight(self, *, user_id: str, **kwargs) -> WyzeResponse:
"""
Get the goal weight from the user's profile.
See: com.wyze.ihealth.d.b.v
"""
kwargs.update({'family_member_id': user_id})
return self.api_call('/plugin/scale/get_goal_weight', http_verb="GET", params=kwargs)
def get_heart_rate_record_list(self, *, user_id: Optional[str] = None, record_number: Optional[int] = 1, measure_ts: Optional[int] = None, **kwargs) -> WyzeResponse:
"""
Get the heart rate records from the user's profile.
See: com.wyze.ihealth.d.b.b
"""
if user_id:
kwargs.update({'family_member_id': user_id})
kwargs.update({'record_number': str(record_number)})
if measure_ts:
kwargs.update({'measure_ts': str(measure_ts)})
return self.api_call('/plugin/scale/get_heart_rate_record_list', http_verb="GET", params=kwargs)
def get_latest_records(self, *, user_id: Optional[str] = None, **kwargs) -> WyzeResponse:
"""
Get the latest records from the user's profile.
See: com.wyze.ihealth.d.b.t
"""
if user_id:
kwargs.update({'family_member_id': user_id})
return self.api_call('/plugin/scale/get_latest_record', http_verb="GET", params=kwargs)
def get_records(self, *, user_id: Optional[str] = None, start_time: datetime, end_time: datetime, **kwargs) -> WyzeResponse:
"""
Get a range of records from the user's profile.
See: com.wyze.ihealth.d.b.i and com.samsung.android.sdk.healthdata.HealthConstants.SessionMeasurement
"""
if user_id:
kwargs.update({'family_member_id': user_id})
kwargs.update({'start_time': str(0), 'end_time': str(datetime_to_epoch(end_time))})
return self.api_call('/plugin/scale/get_record_range', http_verb="GET", params=kwargs)
def delete_goal_weight(self, *, user_id: Optional[str] = None, **kwargs) -> WyzeResponse:
"""
Removes the goal weight from the user's profile.
See: com.wyze.ihealth.d.b.j
"""
if user_id:
kwargs.update({'family_member_id': user_id})
return self.api_call('/plugin/scale/delete_goal_weight', http_verb="GET", params=kwargs)
def add_heart_rate_record(self, *, did: str, user_id: str, measure_ts: int, heart_rate: int, **kwargs) -> WyzeResponse:
"""
Add a heart rate record to the user's profile.
See: com.wyze.ihealth.d.b.p
"""
kwargs.update({'device_id': did, 'family_member_id': user_id, 'measure_ts': measure_ts, 'heart_rate': str(heart_rate)})
return self.api_call('/plugin/scale/get_latest_record', json=kwargs)
def add_weight_record(self, *, did: str, mac: str, user_id: str, measure_ts: int, measure_type: int = 1, weight: float, **kwargs) -> WyzeResponse:
"""
Add a weight-only record to the user's profile.
See: com.wyze.ihealth.d.b.k
"""
kwargs.update({'device_id': did, 'mac': mac, 'family_member_id': user_id, 'measure_ts': measure_ts, 'measure_type': measure_type, 'weight': weight})
return self.api_call('/plugin/scale/get_latest_record', json=kwargs)
def delete_record(self, *, data_id=Union[int, Sequence[int]], **kwargs) -> WyzeResponse:
"""
Delete health records from the user's profile.
See: com.wyze.ihealth.d.b.u
"""
if isinstance(data_id, (list, Tuple)):
kwargs.update({"data_id_list": ",".join(data_id)})
else:
kwargs.update({"data_id_list": [data_id]})
return self.api_call('/plugin/scale/delete_record', json=kwargs)
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
19720,
1330,
32233,
11,
45835,
11,
309,
29291,
11,
4479,
198,
198,
6738,
266,
88,
2736,
62,
21282,
74,
13,
27530,
1330,
4818,
8079,
62,
146... | 2.330979 | 3,257 |
# Generated by Django 2.0.7 on 2018-07-06 14:37
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
22,
319,
2864,
12,
2998,
12,
3312,
1478,
25,
2718,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
from setuptools import setup, find_packages
setup(
name="pre_wigs_validation",
version="0.1.0",
description="Pre-WIG Validator for Linux",
author="steno",
author_email="steno@amazon.com",
packages=find_packages(exclude=["tests"]),
include_package_data=True,
install_requires=["requests", "dataclasses", "distro", "PrettyTable"]
# Maybe include dev dependencies in a txt file
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
201,
198,
201,
198,
201,
198,
40406,
7,
201,
198,
220,
220,
220,
1438,
2625,
3866,
62,
86,
9235,
62,
12102,
341,
1600,
201,
198,
220,
220,
220,
2196,
2625,
15,
13,
16,
13,
... | 2.6 | 165 |
import pathlib
import sys
import tensorflow as tf
import numpy as np
from tensorflow.python.ops import init_ops
from tensorflow.python.ops.rnn_cell_impl import _Linear, LSTMStateTuple
from tensorflow.python.ops import variable_scope as vs
from utils import *
if __name__ == '__main__':
batch_num = 1
hidden_num = 4
# step_num = 8
iteration = 30
ensemble_space = 10
learning_rate = 1e-3
multivariate = True
partition = True
save_model = False
try:
sys.argv[1]
except IndexError:
for n in range(1, 7):
# file name parameter
dataset = n
if dataset == 1:
file_name = './GD/data/Genesis_AnomalyLabels.csv'
print(file_name)
k_partition = 40
abnormal_data, abnormal_label = ReadGDDataset(file_name)
elem_num = 18
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 2.5)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
if dataset == 2:
file_name = './HSS/data/HRSS_anomalous_standard.csv'
print(file_name)
k_partition = 80
abnormal_data, abnormal_label = ReadHSSDataset(file_name)
elem_num = 18
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
if dataset == 3:
for root, dirs, _ in os.walk('./YAHOO/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = ReadS5Dataset(file_name)
elem_num = 1
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 4:
for root, dirs, _ in os.walk('./NAB/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = ReadNABDataset(file_name)
elem_num = 1
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 5:
for root, dirs, files in os.walk('./2D/test'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = Read2DDataset(file_name)
elem_num = 2
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(
splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(
abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 6:
k_partition = 2
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for root, dirs, _ in os.walk('./UAH/'):
for dir in dirs:
folder_name = os.path.join(root, dir)
print(folder_name)
abnormal_data, abnormal_label = ReadUAHDataset(folder_name)
elem_num = 4
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label,
_part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, cks = RunModel(
splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
print('########################################')
precision, recall, f1 = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(precision, recall, f1)
_, _, roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(roc_auc))
_, _, pr_auc = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(pr_auc))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
print('########################################')
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 7:
for root, dirs, files in os.walk('./ECG/'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = ReadECGDataset(file_name)
elem_num = 3
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data,
abnormal_label,
_part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(
splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label,
y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label,
final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(
abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data,
abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
else:
# file name parameter
dataset = int(sys.argv[1])
if dataset == 1:
file_name = './GD/data/Genesis_AnomalyLabels.csv'
print(file_name)
k_partition = 40
abnormal_data, abnormal_label = ReadGDDataset(file_name)
elem_num = 18
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 2.5)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
if dataset == 2:
file_name = './HSS/data/HRSS_anomalous_standard.csv'
print(file_name)
k_partition = 80
abnormal_data, abnormal_label = ReadHSSDataset(file_name)
elem_num = 18
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
if dataset == 3:
for root, dirs, _ in os.walk('./YAHOO/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = ReadS5Dataset(file_name)
elem_num = 1
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 4:
for root, dirs, _ in os.walk('./NAB/data'):
for dir in dirs:
k_partition = 10
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = ReadNABDataset(file_name)
elem_num = 1
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label, _part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 5:
for root, dirs, files in os.walk('./2D/test'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = Read2DDataset(file_name)
elem_num = 2
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label,
_part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(
splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label,
final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(
abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data,
abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 6:
k_partition = 2
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for root, dirs, _ in os.walk('./UAH/'):
for dir in dirs:
folder_name = os.path.join(root, dir)
print(folder_name)
abnormal_data, abnormal_label = ReadUAHDataset(folder_name)
elem_num = 4
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label,
_part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, cks = RunModel(
splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
print('########################################')
precision, recall, f1 = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(precision, recall, f1)
_, _, roc_auc = CalculateROCAUCMetrics(abnormal_label, final_error)
print('roc_auc=' + str(roc_auc))
_, _, pr_auc = CalculatePrecisionRecallCurve(abnormal_label, final_error)
print('pr_auc=' + str(pr_auc))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
print('########################################')
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data, abnormal_label)
s_precision.append(precision)
s_recall.append(recall)
s_f1.append(f1)
s_roc_auc.append(roc_auc)
s_pr_auc.append(pr_auc)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
if dataset == 7:
for root, dirs, files in os.walk('./ECG/'):
for dir in dirs:
k_partition = 3
s_precision = []
s_recall = []
s_f1 = []
s_roc_auc = []
s_pr_auc = []
s_cks = []
for _, _, files in os.walk(root + '/' + dir):
for file in files:
file_name = os.path.join(root, dir, file)
print(file_name)
abnormal_data, abnormal_label = ReadECGDataset(file_name)
elem_num = 3
if multivariate:
abnormal_data = np.expand_dims(abnormal_data, axis=0)
if partition:
splitted_data, splitted_label = PartitionTimeSeriesKPart(abnormal_data, abnormal_label,
_part_number=k_partition)
final_error = []
for i in range(k_partition):
error_partition, precision_partition, recall_partition, f1_partition, roc_auc_partition, pr_auc_partition, pr_cks = RunModel(
splitted_data[i], splitted_label[i])
final_error.append(error_partition)
# print('-----------------------------------------')
final_error = np.concatenate(final_error).ravel()
final_zscore = Z_Score(final_error)
y_pred = CreateLabelBasedOnZscore(final_zscore, 3)
final_p, final_r, final_f = CalculatePrecisionRecallF1Metrics(abnormal_label, y_pred)
PrintPrecisionRecallF1Metrics(final_p, final_r, final_f)
final_fpr, final_tpr, final_average_roc_auc = CalculateROCAUCMetrics(abnormal_label,
final_error)
print('roc_auc=' + str(final_average_roc_auc))
final_precision_curve, final_recall_curve, final_average_precision = CalculatePrecisionRecallCurve(
abnormal_label, final_error)
print('pr_auc=' + str(final_average_precision))
cks = CalculateCohenKappaMetrics(abnormal_label, y_pred)
print('cohen_kappa=' + str(cks))
else:
error, precision, recall, f1, roc_auc, pr_auc, cks = RunModel(abnormal_data,
abnormal_label)
s_precision.append(final_p)
s_recall.append(final_r)
s_f1.append(final_f)
s_roc_auc.append(final_average_roc_auc)
s_pr_auc.append(final_average_precision)
s_cks.append(cks)
print('########################################')
avg_precision = CalculateAverageMetric(s_precision)
print('avg_precision=' + str(avg_precision))
avg_recall = CalculateAverageMetric(s_recall)
print('avg_recall=' + str(avg_recall))
avg_f1 = CalculateAverageMetric(s_f1)
print('avg_f1=' + str(avg_f1))
avg_roc_auc = CalculateAverageMetric(s_roc_auc)
print('avg_roc_auc=' + str(avg_roc_auc))
avg_pr_auc = CalculateAverageMetric(s_pr_auc)
print('avg_pr_auc=' + str(avg_pr_auc))
avg_cks = CalculateAverageMetric(s_cks)
print('avg_cks=' + str(avg_cks))
print('########################################')
| [
11748,
3108,
8019,
198,
11748,
25064,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
11192,
273,
11125,
13,
29412,
13,
2840,
1330,
2315,
62,
2840,
198,
6738,
11192,
273,
11125,
13,
29412,
... | 1.674183 | 29,403 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Mozilla Firefox history database plugin."""
import collections
import unittest
from plaso.formatters import firefox as _ # pylint: disable=unused-import
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import firefox
from tests.parsers.sqlite_plugins import test_lib
class FirefoxHistoryPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Mozilla Firefox history database plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = firefox.FirefoxHistoryPlugin()
def testProcessPriorTo24(self):
"""Tests the Process function on a Firefox History database file."""
# This is probably version 23 but potentially an older version.
test_file = self._GetTestFilePath([u'places.sqlite'])
cache = sqlite.SQLiteCache()
event_queue_consumer = self._ParseDatabaseFileWithPlugin(
self._plugin, test_file, cache)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
# The places.sqlite file contains 205 events (1 page visit,
# 2 x 91 bookmark records, 2 x 3 bookmark annotations,
# 2 x 8 bookmark folders).
# However there are three events that do not have a timestamp
# so the test file will show 202 extracted events.
self.assertEqual(len(event_objects), 202)
# Check the first page visited event.
event_object = event_objects[0]
self.assertEqual(event_object.data_type, u'firefox:places:page_visited')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.PAGE_VISITED)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-07-01 11:16:21.371935')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_url = u'http://news.google.com/'
self.assertEqual(event_object.url, expected_url)
expected_title = u'Google News'
self.assertEqual(event_object.title, expected_title)
expected_msg = (
u'{0:s} ({1:s}) [count: 1] Host: news.google.com '
u'(URL not typed directly) Transition: TYPED').format(
expected_url, expected_title)
expected_short = u'URL: {0:s}'.format(expected_url)
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
# Check the first bookmark event.
event_object = event_objects[1]
self.assertEqual(event_object.data_type, u'firefox:places:bookmark')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.ADDED_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-07-01 11:13:59.266344')
self.assertEqual(event_object.timestamp, expected_timestamp)
# Check the second bookmark event.
event_object = event_objects[2]
self.assertEqual(event_object.data_type, u'firefox:places:bookmark')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.MODIFICATION_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-07-01 11:13:59.267198')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_url = (
u'place:folder=BOOKMARKS_MENU&folder=UNFILED_BOOKMARKS&folder=TOOLBAR&'
u'sort=12&excludeQueries=1&excludeItemIfParentHasAnnotation=livemark%2F'
u'feedURI&maxResults=10&queryType=1')
self.assertEqual(event_object.url, expected_url)
expected_title = u'Recently Bookmarked'
self.assertEqual(event_object.title, expected_title)
expected_msg = (
u'Bookmark URL {0:s} ({1:s}) [folder=BOOKMARKS_MENU&'
u'folder=UNFILED_BOOKMARKS&folder=TOOLBAR&sort=12&excludeQueries=1&'
u'excludeItemIfParentHasAnnotation=livemark%2FfeedURI&maxResults=10&'
u'queryType=1] visit count 0').format(
expected_title, expected_url)
expected_short = (
u'Bookmarked Recently Bookmarked '
u'(place:folder=BOOKMARKS_MENU&folder=UNFILED_BO...')
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
# Check the first bookmark annotation event.
event_object = event_objects[183]
self.assertEqual(
event_object.data_type, u'firefox:places:bookmark_annotation')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.CREATION_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-07-01 11:13:59.267146')
self.assertEqual(event_object.timestamp, expected_timestamp)
# Check another bookmark annotation event.
event_object = event_objects[184]
self.assertEqual(
event_object.data_type, u'firefox:places:bookmark_annotation')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.CREATION_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-07-01 11:13:59.267605')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_url = u'place:sort=14&type=6&maxResults=10&queryType=1'
self.assertEqual(event_object.url, expected_url)
expected_title = u'Recent Tags'
self.assertEqual(event_object.title, expected_title)
expected_msg = (
u'Bookmark Annotation: [RecentTags] to bookmark '
u'[{0:s}] ({1:s})').format(
expected_title, expected_url)
expected_short = u'Bookmark Annotation: Recent Tags'
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
# Check the second last bookmark folder event.
event_object = event_objects[200]
self.assertEqual(event_object.data_type, u'firefox:places:bookmark_folder')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.ADDED_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-03-21 10:05:01.553774')
self.assertEqual(event_object.timestamp, expected_timestamp)
# Check the last bookmark folder event.
event_object = event_objects[201]
self.assertEqual(
event_object.data_type, u'firefox:places:bookmark_folder')
self.assertEqual(
event_object.timestamp_desc,
eventdata.EventTimestamp.MODIFICATION_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2011-07-01 11:14:11.766851')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_title = u'Latest Headlines'
self.assertEqual(event_object.title, expected_title)
expected_msg = expected_title
expected_short = expected_title
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
def testProcessVersion25(self):
"""Tests the Process function on a Firefox History database file v 25."""
test_file = self._GetTestFilePath([u'places_new.sqlite'])
cache = sqlite.SQLiteCache()
event_queue_consumer = self._ParseDatabaseFileWithPlugin(
self._plugin, test_file, cache)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
# The places.sqlite file contains 84 events:
# 34 page visits.
# 28 bookmarks
# 14 bookmark folders
# 8 annotations
self.assertEqual(len(event_objects), 84)
counter = collections.Counter()
for event_object in event_objects:
counter[event_object.data_type] += 1
self.assertEqual(counter[u'firefox:places:bookmark'], 28)
self.assertEqual(counter[u'firefox:places:page_visited'], 34)
self.assertEqual(counter[u'firefox:places:bookmark_folder'], 14)
self.assertEqual(counter[u'firefox:places:bookmark_annotation'], 8)
random_event = event_objects[10]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-10-30 21:57:11.281942')
self.assertEqual(random_event.timestamp, expected_timestamp)
expected_short = u'URL: http://code.google.com/p/plaso'
expected_msg = (
u'http://code.google.com/p/plaso [count: 1] Host: code.google.com '
u'(URL not typed directly) Transition: TYPED')
self._TestGetMessageStrings(random_event, expected_msg, expected_short)
class FirefoxDownloadsPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Mozilla Firefox downloads database plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = firefox.FirefoxDownloadsPlugin()
def testProcessVersion25(self):
"""Tests the Process function on a Firefox Downloads database file."""
test_file = self._GetTestFilePath([u'downloads.sqlite'])
cache = sqlite.SQLiteCache()
event_queue_consumer = self._ParseDatabaseFileWithPlugin(
self._plugin, test_file, cache)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
# The downloads.sqlite file contains 2 events (1 download).
self.assertEqual(len(event_objects), 2)
# Check the first page visited event.
event_object = event_objects[0]
self.assertEqual(event_object.data_type, u'firefox:downloads:download')
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.START_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-07-18 18:59:59.312000')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_url = (
u'https://plaso.googlecode.com/files/'
u'plaso-static-1.0.1-win32-vs2008.zip')
self.assertEqual(event_object.url, expected_url)
expected_full_path = u'file:///D:/plaso-static-1.0.1-win32-vs2008.zip'
self.assertEqual(event_object.full_path, expected_full_path)
self.assertEqual(event_object.received_bytes, 15974599)
self.assertEqual(event_object.total_bytes, 15974599)
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
51,
3558,
329,
262,
29258,
16802,
2106,
6831,
13877,
526,
15931,
198,
198,
11748,
17268,
198,
11748,
555,
715,
395,
198... | 2.664297 | 3,658 |
"""Embedded Structures.
Various structure can be embedded in the body, without an operation header.
Saved Chapters:
A saved chapter is a header structure embedded in the body.
There is no command identifier, so the command type is actually
the first field of the header - length/offset. Applying the `subheader`
struct at this point will parse the embedded header.
This section is a work in progress.
"""
from construct import (Array, Computed, Embedded, GreedyBytes, If, Int16ul,
Int32ul, Padding, Peek, String, Struct, Switch)
from mgz import subheader
# pylint: disable=invalid-name
# Embedded chat message
chat = Struct(
"subtype"/Computed("chat"),
"data"/Struct(
"length"/Computed(lambda ctx: ctx._._._.op),
"text"/String(lambda ctx: ctx._._._.op, padchar=b'\x00',
trimdir='right', encoding='latin1'),
)
)
# Embedded header (aka saved chapter)
header = Struct(
"subtype"/Computed("savedchapter"),
"data"/Struct(
"header_length"/Computed(lambda ctx: ctx._._._.op - ctx._._._.start),
Embedded(subheader)
)
)
# Unknown embedded structure - looks like a partial action?
other = Struct(
"subtype"/Computed("unknown"),
"data"/Struct(
Padding(4),
"num_ints"/Int32ul,
If(lambda ctx: ctx.num_ints < 0xff, Array(
lambda ctx: ctx.num_ints, Int32ul
)),
Padding(12)
)
)
# Anything we don't recognize - just consume the remainder
default = Struct(
"subtype"/Computed("default"),
GreedyBytes
)
# Embedded structures identified by first byte (for now)
embedded = "embedded"/Struct(
"marker"/Peek(Int16ul),
Embedded("data"/Switch(lambda ctx: ctx.marker, {
0: header,
9024: chat,
65535: other
}, default=default))
)
| [
37811,
31567,
47238,
32112,
942,
13,
198,
198,
40009,
4645,
460,
307,
14553,
287,
262,
1767,
11,
1231,
281,
4905,
13639,
13,
198,
198,
50,
9586,
42448,
25,
198,
198,
32,
7448,
6843,
318,
257,
13639,
4645,
14553,
287,
262,
1767,
13,
... | 2.508242 | 728 |
#p
for row in range(13):
for col in range(6):
if (col==0 or row==0 and col!=5) or (row==1 and col==5)or (row==2 and col==5)or (row==3 and col==5)or (row==4 and col==5) or (row==5 and col!=5):#p
print("*",end=" ")
else:
print(" ",end=" ")
print()
| [
2,
79,
201,
198,
1640,
5752,
287,
2837,
7,
1485,
2599,
201,
198,
220,
220,
220,
329,
951,
287,
2837,
7,
21,
2599,
201,
198,
220,
220,
220,
220,
220,
220,
220,
611,
357,
4033,
855,
15,
393,
5752,
855,
15,
290,
951,
0,
28,
20,
... | 1.875776 | 161 |
from mkdocs.config import config_options
from mkdocs.plugins import BasePlugin
from pdf_with_js.printer import Printer
import random
| [
201,
198,
6738,
33480,
31628,
13,
11250,
1330,
4566,
62,
25811,
201,
198,
6738,
33480,
31628,
13,
37390,
1330,
7308,
37233,
201,
198,
201,
198,
6738,
37124,
62,
4480,
62,
8457,
13,
1050,
3849,
1330,
1736,
3849,
201,
198,
11748,
4738,
... | 3.27907 | 43 |
from ..algorithms.classify import trained_model
| [
6738,
11485,
282,
7727,
907,
13,
4871,
1958,
1330,
8776,
62,
19849,
628,
628,
628
] | 3.533333 | 15 |
'''
information masking section
'''
import logging
import timeit
from . import encrypt_the_info
from . import null_the_info
def masking_method_selection(start_dataframe, mask_col, mask_method,
save_to_file, masked_file, logger):
'''
Basic check that all input is properly provided and filtering through the
various options if no error occurs. Logging and timer handled here as well.
Arguments:
start_dataframe: the dataframe to mask
mask_col(list): list of column numbers for the attributes to mask
mask_method(str): the way the attributes should be masked
save_to_file(bool): true to save the dataframe to temporary file
masked_file(str): the file name for the output file
logger: custom logging function
Returns:
dataframe with masked properties
'''
total_mask_time_start = timeit.default_timer()
logger.info('running masking method : ' + str(mask_method) +
' on columns : ' + str(mask_col))
logger.info('dataframe before masking : ' + str(start_dataframe.shape))
# should be a list with selection in the future
if mask_method == 'encrypt':
start_dataframe = encrypt_the_info.encrypt_the_proper_columns(
start_dataframe, mask_col)
elif mask_method == 'replace':
start_dataframe = null_the_info.null_the_proper_columns(
start_dataframe, mask_col)
else:
logger.info('improper masking method provided : '+str(mask_method))
return False
# logging the outcome
logger.info('dataframe after masking : '+str(start_dataframe.shape))
# saving to file if that option was set to True
if save_to_file:
start_dataframe.to_csv(masked_file, index=False, header=False)
total_mask_time_stop = timeit.default_timer()
# logging the excecution time
logger.info(" Total masking time is:" +
str(total_mask_time_stop-total_mask_time_start))
return start_dataframe | [
7061,
6,
198,
17018,
9335,
278,
2665,
198,
7061,
6,
198,
11748,
18931,
198,
11748,
640,
270,
198,
6738,
764,
1330,
34117,
62,
1169,
62,
10951,
198,
6738,
764,
1330,
9242,
62,
1169,
62,
10951,
628,
198,
4299,
9335,
278,
62,
24396,
62... | 2.681758 | 751 |
__author__ = 'mason'
from domain_orderFulfillment import *
from timer import DURATION
from state import state
import numpy as np
'''
Several objects to choose from, need to consider weights
Same as problem 4 but only 1 robot
'''
DURATION.TIME = {
'lookupDB': GetCostOfLookup,
'wrap': GetCostOfWrap,
'pickup': GetCostOfPickup,
'putdown': GetCostOfPutdown,
'loadMachine': GetCostOfLoad,
'moveRobot': GetCostOfMove,
'acquireRobot': 1,
'freeRobot': 1,
'wait': 5
}
DURATION.COUNTER = {
'lookupDB': GetCostOfLookup,
'wrap': GetCostOfWrap,
'pickup': GetCostOfPickup,
'putdown': GetCostOfPutdown,
'loadMachine': GetCostOfLoad,
'moveRobot': GetCostOfMove,
'acquireRobot': 1,
'freeRobot': 1,
'wait': 5
}
rv.LOCATIONS = [1, 2, 3, 4, 5, 6, 7]
rv.FACTORY1 = frozenset({1, 2, 3, 4, 6, 7, 5})
rv.FACTORY_UNION = rv.FACTORY1
rv.SHIPPING_DOC = {rv.FACTORY1: 4}
rv.GROUND_EDGES = {1: [2], 2: [1, 3], 3: [2, 4], 4: [3, 5], 5: [4, 6], 6: [5, 7], 7: [6]}
rv.GROUND_WEIGHTS = {(1,2): 1, (2,3): 1, (3,4): 5, (4,5): 8, (5,6): 5, (6,7): 1}
rv.ROBOTS = {'r1': rv.FACTORY1}
rv.ROBOT_CAPACITY = {'r1': 10}
rv.MACHINES = {'m1': rv.FACTORY1}
rv.PALLETS = {'p1'}
tasks = {
1: [['orderStart', ['type1', 'type2']]],
2: [['orderStart', ['type2', 'type1']]],
}
eventsEnv = {
} | [
834,
9800,
834,
796,
705,
76,
888,
6,
198,
198,
6738,
7386,
62,
2875,
37,
4754,
359,
434,
1330,
1635,
198,
6738,
19781,
1330,
360,
4261,
6234,
198,
6738,
1181,
1330,
1181,
198,
11748,
299,
32152,
355,
45941,
198,
198,
7061,
6,
198,
... | 2.045662 | 657 |
"""Track visualization"""
from matplotlib import pyplot as plt
def plot_trj(
trj,
coords=None,
ax=None,
scale=None,
line_fmt="x:",
line_color=None,
line_label="Trajectory",
line_width=None,
marker_size=None,
alpha=None,
start_end=(True, True),
):
"""[summary]
Args:
trj (pandas.DataFrame): tracks to plot
coords (list): The names of the x/y coodrinate column names
ax (optional): matplotlib axes to plot in. Defaults to None.
scale (int, optional): length of scale bar. Defaults to 10.
line_fmt (str, optional): Defaults to "x:".
line_color (str, optional): Defaults to "gray".
line_label (str, optional): Defaults to "Trajectory".
line_width ([type], optional): Defaults to None.
marker_size ([type], optional): Defaults to None.
alpha ([type], optional): Defaults to None.
start_end (tuple, optional): Show marker for start/end of track. Defaults to (True, True).
"""
if not ax:
ax = plt.gca()
if not coords:
coords = trj.coords
ax.plot(
*(trj[coords].values.T),
line_fmt,
color=line_color,
label=line_label,
lw=line_width,
markersize=marker_size,
alpha=alpha
)
if start_end[0]:
ax.plot(*trj[coords].iloc[0].T, "o", color="lightgreen")
if start_end[1]:
ax.plot(*trj[coords].iloc[-1].T, "o", color="red")
ax.axis("off")
if scale is not None:
ax.plot(
[trj[coords[0]].mean() - scale / 2, trj[coords[0]].mean() + scale / 2],
[trj[coords[1]].min() - 3, trj[coords[1]].min() - 3],
"k-",
lw=3,
)
ax.set_aspect(1.0)
| [
37811,
24802,
32704,
37811,
198,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
628,
198,
4299,
7110,
62,
2213,
73,
7,
198,
220,
220,
220,
491,
73,
11,
198,
220,
220,
220,
763,
3669,
28,
14202,
11,
198,
220,
220,
... | 2.083135 | 842 |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Iterator
# Third-party imports
import numpy as np
# First-party imports
from gluonts.core.component import validated
from gluonts.dataset.common import Dataset
from gluonts.model.estimator import Estimator
from gluonts.model.forecast import Forecast, SampleForecast
from gluonts.model.predictor import RepresentablePredictor
class IdentityPredictor(RepresentablePredictor):
"""
A `Predictor` that uses the last `prediction_length` observations
to predict the future.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
num_samples
Number of samples to include in the forecasts. Not that the samples
produced by this predictor will all be identical.
"""
@validated()
class ConstantPredictor(RepresentablePredictor):
"""
A `Predictor` that always produces the same forecast.
Parameters
----------
samples
Samples to use to construct SampleForecast objects for every
prediction.
freq
Frequency of the predicted data.
"""
@validated()
class MeanPredictor(RepresentablePredictor):
"""
A :class:`Predictor` that predicts the mean of the last `context_length`
elements of the input target.
Parameters
----------
context_length
Length of the target context used to condition the predictions.
prediction_length
Length of the prediction horizon.
num_eval_samples
Number of samples to use to construct :class:`SampleForecast` objects
for every prediction.
freq
Frequency of the predicted data.
"""
@validated()
class MeanEstimator(Estimator):
"""
An `Estimator` that computes the mean targets in the training data,
in the trailing `prediction_length` observations, and produces
a `ConstantPredictor` that always predicts such mean value.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
num_samples
Number of samples to include in the forecasts. Not that the samples
produced by this predictor will all be identical.
"""
@validated()
| [
2,
15069,
2864,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
11074,
198,
2,
921,
743,
407,
779,
428,
2393,
... | 3.12404 | 911 |
import pyfoursquare as foursquare
# == OAuth2 Authentication ==
#
# This mode of authentication is the required one for Foursquare
# The client id and client secret can be found on your application's Details
# page located at https://foursquare.com/oauth/
client_id = "E50NJYAFUAPXPAKU5XQNBTXPGKRRSNUGAYWTUUH3RKJ22HH4"
client_secret = "3LQHT1LGX2MVUXKRNLY0ZFKNWIXKNNQDTLYD5UFX4WPAF0GM"
callback = 'http://127.0.0.1:8000/'
auth = foursquare.OauthHandler(client_id, client_secret, callback)
#First Redirect the user who wish to authenticate to.
#It will be create the authorization url for your app
auth_url = auth.get_authorization_url()
print 'Please authorize: ' + auth_url
#If the user accepts, it will be redirected back
#to your registered REDIRECT_URI.
#It will give you a code as
#https://YOUR_REGISTERED_REDIRECT_URI/?code=CODE
code = raw_input('The code: ').strip()
#Now your server will make a request for
#the access token. You can save this
#for future access for your app for this user
access_token = auth.get_access_token(code)
print 'Your access token is ' + access_token
#Now let's create an API
api = foursquare.API(auth)
#Now you can access the Foursquare API!
result = api.venues_search(query='Burburinho', ll='-8.063542,-34.872891')
#You can acess as a Model
print dir(result[0])
#Access all its attributes
print result[0].name
"""
If you already have the access token for this user
you can go until lines 1- 13, and then get at
your database the access token for this user and
set the access token.
auth.set_access_token('ACCESS_TOKEN')
Now you can go on by the line 33.
"""
| [
11748,
12972,
69,
4662,
421,
533,
355,
1440,
23415,
198,
198,
2,
6624,
440,
30515,
17,
48191,
6624,
198,
2,
198,
2,
770,
4235,
286,
18239,
318,
262,
2672,
530,
329,
376,
4662,
421,
533,
198,
198,
2,
383,
5456,
4686,
290,
5456,
320... | 2.97597 | 541 |
import numpy as np
import matplotlib.pyplot as plt
mg = 10
xlist = np.linspace(0,np.pi/2,100)
f1 = 3*mg/2*(np.sin(xlist)*np.cos(xlist)*3/2-np.cos(xlist))
f2 = 3*mg/2*(-np.sin(xlist)+(3*np.sin(xlist)**2-1)/2) + mg
plt.plot(xlist, f1, '-', markersize=1, label = r"$F_x$")
plt.plot(xlist, f2, '-', markersize=1, label = r"$F_y$")
plt.title("Constraint forces")
plt.xlabel(r"$\varphi$")
plt.ylabel("mg")
plt.grid()
plt.legend()
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
11296,
796,
838,
198,
198,
87,
4868,
796,
45941,
13,
21602,
10223,
7,
15,
11,
37659,
13,
14415,
14,
17,
11,
3064,
8,
198,
69,
1... | 1.904348 | 230 |
from graphs import __version__
from graphs.graph import Vertix ,Edge,Graph
graph = Graph()
| [
6738,
28770,
1330,
11593,
9641,
834,
198,
6738,
28770,
13,
34960,
1330,
24417,
844,
837,
37021,
11,
37065,
628,
198,
198,
34960,
796,
29681,
3419,
628,
198
] | 3.555556 | 27 |
import torch.utils.data
import torch.nn as nn
def test(model, data_loader, device, loggi, flag):
"""Evaluate model for dataset."""
# set eval state for Dropout and BN layers
model.eval()
# init loss and accuracy
loss_ = 0.0
acc_ = 0.0
acc_domain_ = 0.0
n_total = 0
# set loss function
criterion = nn.CrossEntropyLoss()
# evaluate network
for (images, labels) in data_loader:
images = images.to(device)
labels = labels.to(device) #labels = labels.squeeze(1)
size = len(labels)
if flag == 'target':
labels_domain = torch.ones(size).long().to(device)
else:
labels_domain = torch.zeros(size).long().to(device)
preds, domain = model(images, alpha=0)
loss_ += criterion(preds, labels).item()
pred_cls = preds.data.max(1)[1]
pred_domain = domain.data.max(1)[1]
acc_ += pred_cls.eq(labels.data).sum().item()
acc_domain_ += pred_domain.eq(labels_domain.data).sum().item()
n_total += size
loss = loss_ / n_total
acc = acc_ / n_total
acc_domain = acc_domain_ / n_total
loggi.info("{}: Avg Loss = {:.6f}, Avg Accuracy = {:.2%}, {}/{}, Avg Domain Accuracy = {:2%}".format(flag, loss, acc, acc_, n_total, acc_domain))
return loss, acc, acc_domain
| [
11748,
28034,
13,
26791,
13,
7890,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
4299,
1332,
7,
19849,
11,
1366,
62,
29356,
11,
3335,
11,
2604,
12397,
11,
6056,
2599,
198,
220,
220,
220,
37227,
36,
2100,
4985,
2746,
329,
270... | 2.324607 | 573 |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
from aniso8601.builders import TupleBuilder
from aniso8601.builders.python import PythonTimeBuilder
from aniso8601.date import parse_date
from aniso8601.duration import parse_duration
from aniso8601.exceptions import ISOFormatError
from aniso8601.time import parse_datetime
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
357,
66,
8,
13130,
11,
14328,
31154,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
770,
3788,
743,
307,
9518,
290,
9387,
739,
262,
2846,
198,
2,
28... | 3.482014 | 139 |
import argparse
from jiant.proj.simple import runscript as run
import jiant.scripts.download_data.runscript as downloader
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--task_name")
parser.add_argument("-d", "--data_dir")
parser.add_argument("-e", "--exp_dir")
parser.add_argument("-m", "--model_name_or_path")
parser.add_argument("-c", "--compression_type")
parser.add_argument("-cc", "--compression_config")
parser.add_argument("-lr", "--learning_rate", default=1e-5, type=float)
parser.add_argument("-s", "--seed", default=42, type=int)
args = parser.parse_args()
main(
args.task_name,
args.data_dir,
args.exp_dir,
args.model_name_or_path,
args.compression_type,
args.compression_config,
args.learning_rate,
args.seed,
)
| [
11748,
1822,
29572,
198,
198,
6738,
474,
3014,
13,
1676,
73,
13,
36439,
1330,
1057,
12048,
355,
1057,
198,
11748,
474,
3014,
13,
46521,
13,
15002,
62,
7890,
13,
5143,
12048,
355,
4321,
263,
628,
198,
198,
361,
11593,
3672,
834,
6624,
... | 2.362667 | 375 |
from django.contrib import messages
from django.contrib.auth import authenticate, login as dj_login, logout as dj_logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import (
AuthenticationForm,
PasswordChangeForm,
PasswordResetForm,
SetPasswordForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import redirect, render
from django.utils.http import urlsafe_base64_decode
from django.views.decorators.http import (
require_http_methods,
require_POST,
require_safe,
)
from main import forms, models
from oscarator import settings
INTERNAL_RESET_URL_TOKEN = "confirmation"
INTERNAL_RESET_SESSION_TOKEN = "_password_reset_token"
@require_safe
@require_safe
@require_POST
@require_safe
@require_POST
@require_http_methods(["HEAD", "GET", "POST"])
@require_http_methods(["HEAD", "GET", "POST"])
@require_http_methods(["HEAD", "GET", "POST"])
@login_required
@require_http_methods(["HEAD", "GET", "POST"])
@login_required
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
6218,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
8323,
5344,
11,
17594,
355,
42625,
62,
38235,
11,
2604,
448,
355,
42625,
62,
6404,
448,
198,
6738,
42625,
14208,
13,
3642,
822,
13... | 2.926396 | 394 |
# -*- coding: utf-8 -*-
"""Command line tool tester (CLIToolTester)."""
__version__ = '20191217'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
21575,
1627,
2891,
256,
7834,
357,
5097,
2043,
970,
51,
7834,
21387,
15931,
198,
198,
834,
9641,
834,
796,
705,
23344,
1065,
1558,
6,
198
] | 2.45 | 40 |
# Generated by Django 3.2.9 on 2021-12-20 20:36
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
24,
319,
33448,
12,
1065,
12,
1238,
1160,
25,
2623,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import requests
import sys
import time
from functools import wraps
from multiprocessing import Pool
from dci_downloader.fs import create_parent_dir
from dciclient.v1.api.context import build_signature_context
from dciclient.v1.api import component as dci_component
from dciclient.v1.api import topic as dci_topic
from dciclient.v1.api import remoteci as dci_remoteci
FIVE_SECONDS = 5
TEN_SECONDS = 10
# We'll allow 5 seconds to connect & 10 seconds to get an answer
REQUESTS_TIMEOUT = (FIVE_SECONDS, TEN_SECONDS)
@retry()
@retry()
@retry()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
11748,
7007,
198,
11748,
25064,
198,
11748,
640,
198,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
... | 2.682609 | 230 |
default_app_config = 'categories.apps.CategoriesConfig'
| [
12286,
62,
1324,
62,
11250,
796,
705,
66,
26129,
13,
18211,
13,
34,
26129,
16934,
6,
198
] | 3.294118 | 17 |
# flake8: noqa
from .rbcz import (
read_statement,
read_statements,
read_statements_from_imap
)
| [
2,
781,
539,
23,
25,
645,
20402,
198,
6738,
764,
81,
15630,
89,
1330,
357,
198,
220,
220,
220,
1100,
62,
26090,
11,
198,
220,
220,
220,
1100,
62,
14269,
3196,
11,
198,
220,
220,
220,
1100,
62,
14269,
3196,
62,
6738,
62,
320,
499... | 2.25 | 48 |
# 1017. 负二进制转换
#
# 20200801
# huao
# 观察奇数位上的1,如果该位置为1,那么使用负二进制表示时,会比实际二进制时少2**(i+1)
# 把这个差值加进去,并进行处理加完以后的值
# 处理完以后,得到的数字的二进制表示就是原数的负二进制表示
sol = Solution()
print(sol.baseNeg2(4))
| [
2,
8949,
22,
13,
5525,
112,
253,
12859,
234,
32573,
249,
26344,
114,
164,
121,
105,
162,
235,
95,
198,
2,
198,
2,
1160,
11528,
486,
198,
2,
289,
84,
5488,
198,
2,
5525,
100,
224,
43380,
253,
25001,
229,
46763,
108,
19526,
235,
4... | 0.740741 | 243 |
import typing
from dataclasses import dataclass
from utils.mixins import DataMixin
@dataclass()
| [
11748,
19720,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
198,
6738,
3384,
4487,
13,
19816,
1040,
1330,
6060,
35608,
259,
628,
198,
31,
19608,
330,
31172,
3419,
198
] | 3.193548 | 31 |
import configparser
import jira
class JiraConfig(object):
"""
PolarionConfig represents data that must be provided through
config (ini) file (to enable communication with the polarion importer APIs)
"""
KEY_SECTION = 'jira'
KEY_PROJECT = 'project'
KEY_URL = 'url'
KEY_USERNAME = 'username'
KEY_PASSWORD = 'password'
KEY_TC_WI = "testcase_work_item"
KEY_QE_TC = "qe_test_coverage"
KEY_VER_IR = "verified_in_release"
@property
def project(self) -> str:
"""
Returns the parsed jira project name
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_PROJECT]
@property
def url(self) -> str:
"""
Returns the parsed jira project url
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_URL]
@property
def username(self) -> str:
"""
Returns the parsed jira username
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_USERNAME]
@property
def password(self) -> str:
"""
Returns the parsed jira password
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_PASSWORD]
@property
def test_case_work_item_custom_field(self) -> str:
"""
Returns the parsed jira custom field for test case work item
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_TC_WI]
@property
def qe_test_coverage_custom_field(self) -> str:
"""
Returns the parsed jira custom field for qe test coverage
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_QE_TC]
@property
def verified_release_custom_field(self) -> str:
"""
Returns the parsed jira custom field for verified in release
:return:
"""
return self.config[JiraConfig.KEY_SECTION][JiraConfig.KEY_VER_IR] or None
| [
11748,
4566,
48610,
198,
11748,
474,
8704,
628,
198,
4871,
449,
8704,
16934,
7,
15252,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
32909,
295,
16934,
6870,
1366,
326,
1276,
307,
2810,
832,
198,
220,
220,
4566,
357,
5362,
8,
2393,
... | 2.337931 | 870 |
import ensurepip
if __name__ == "__main__":
ensurepip._main()
| [
11748,
4155,
79,
541,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
4155,
79,
541,
13557,
12417,
3419,
198
] | 2.481481 | 27 |
# -*- coding: utf-8 -*-
from trytond.model import fields
from trytond.pool import PoolMeta
__metaclass__ = PoolMeta
__all__ = ['SaleConfiguration']
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
1949,
83,
623,
13,
19849,
1330,
7032,
198,
6738,
1949,
83,
623,
13,
7742,
1330,
19850,
48526,
198,
198,
834,
4164,
330,
31172,
834,
796,
19850,
48526,
198,
834,
... | 2.830189 | 53 |
import cv2
import numpy as np
import pycocotools.mask as mask_util
from matplotlib.pyplot import contour
__all__ = [
"mask_to_polygon",
"polygons_to_mask",
"area",
"bbox",
"coco_poygons_to_mask",
]
def mask_to_polygon(
mask, min_score: float = 0.5, approx: float = 0.0, relative: bool = True
):
"""generate polygons from masks
Args:
mask (np.ndarray): a binary mask
min_score (float, optional): [description]. Defaults to 0.5.
approx (float, optional): it approximate the polygons to reduce the number of points. Defaults to 0.0
relative (bool, optional): it the value of the approximation is computed on the relative amount of point or with respect to all the points
Returns:
[type]: [description]
"""
mask = (mask > min_score).astype(np.uint8)
mask = cv2.copyMakeBorder(mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0)
contours, hierarchy = cv2.findContours(
mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE, offset=(-1, -1)
)
polygons = []
for cnt in contours:
if approx > 0:
if relative:
epsilon = approx * cv2.arcLength(cnt, True)
else:
epsilon = approx
approx_poly = cv2.approxPolyDP(cnt, epsilon, True)
else:
approx_poly = cnt
# we need to draw a least a box
if len(approx_poly) >= 4:
approx_flattened = approx_poly.flatten().tolist()
polygons.append(approx_flattened)
return polygons
def polygons_to_mask(polygons, height, width):
"""convert polygons to mask. Filter all the polygons with less than 4 points
Args:
polygons ([type]): [description]
height ([type]): [description]
width ([type]): [description]
Returns:
[type]: a mask of format num_classes, heigth, width
"""
polygons = [polygon for polygon in polygons if len(polygon) >= 8]
if len(polygons) == 0:
return np.zeros((height, width), np.uint8)
rle = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rle)
return mask_util.decode(rle)[:, :]
def bbox_from_mask(mask):
"""return the bounding box from the given mask
Args:
mask ([type]): [description]
Returns:
List: a list of format [x_min, y_min, w, h]
"""
pairs = np.argwhere(mask == True)
if len(pairs) == 0:
return None, None, None, None
min_row = min(pairs[:, 0])
max_row = max(pairs[:, 0])
min_col = min(pairs[:, 1])
max_col = max(pairs[:, 1])
w = max_col - min_col
h = max_row - min_row
return [float(min_col), float(min_row), float(w), float(h)]
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
66,
420,
313,
10141,
13,
27932,
355,
9335,
62,
22602,
198,
6738,
2603,
29487,
8019,
13,
9078,
29487,
1330,
542,
454,
198,
198,
834,
439,
834,
796,
685,
198,
... | 2.264657 | 1,194 |
MOUNT_PATH = "" # in case you are mounting data storage externally
SPLIT = 'training'
KITTI_WORK_DIR = "./"
KITTI_DATA_DIR = "../input/kitti-3d-object-detection-dataset"
NUSCENES_WORK_DIR = MOUNT_PATH + "/storage/slurm/kimal/eagermot_workspace/nuscenes"
NUSCENES_DATA_DIR = MOUNT_PATH + "/storage/slurm/kimal/datasets_original/nuscenes"
| [
44,
28270,
62,
34219,
796,
13538,
220,
1303,
287,
1339,
345,
389,
17260,
1366,
6143,
45107,
198,
4303,
43,
2043,
796,
705,
34409,
6,
198,
198,
42,
22470,
40,
62,
33249,
62,
34720,
796,
366,
19571,
1,
198,
42,
22470,
40,
62,
26947,
... | 2.411348 | 141 |
"""Functions which help end users define customize node_match and
edge_match functions to use during isomorphism checks.
"""
from itertools import permutations
import types
import networkx as nx
__all__ = ['categorical_node_match',
'categorical_edge_match',
'categorical_multiedge_match',
'numerical_node_match',
'numerical_edge_match',
'numerical_multiedge_match',
'generic_node_match',
'generic_edge_match',
'generic_multiedge_match',
]
def copyfunc(f, name=None):
"""Returns a deepcopy of a function."""
try:
return types.FunctionType(f.func_code, f.func_globals, name or f.name,
f.func_defaults, f.func_closure)
except AttributeError:
return types.FunctionType(f.__code__, f.__globals__, name or f.name,
f.__defaults__, f.__closure__)
def allclose(x, y, rtol=1.0000000000000001e-05, atol=1e-08):
"""Returns True if x and y are sufficiently close, elementwise.
Parameters
----------
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
"""
# assume finite weights, see numpy.allclose() for reference
for xi, yi in zip(x,y):
if not ( abs(xi-yi) <= atol + rtol * abs(yi) ):
return False
return True
def close(x, y, rtol=1.0000000000000001e-05, atol=1e-08):
"""Returns True if x and y are sufficiently close.
Parameters
----------
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
"""
# assume finite weights, see numpy.allclose() for reference
return abs(x-y) <= atol + rtol * abs(y)
categorical_doc = """
Returns a comparison function for a categorical node attribute.
The value(s) of the attr(s) must be hashable and comparable via the ==
operator since they are placed into a set([]) object. If the sets from
G1 and G2 are the same, then the constructed function returns True.
Parameters
----------
attr : string | list
The categorical node attribute to compare, or a list of categorical
node attributes to compare.
default : value | list
The default value for the categorical node attribute, or a list of
default values for the categorical node attributes.
Returns
-------
match : function
The customized, categorical `node_match` function.
Examples
--------
>>> import networkx.algorithms.isomorphism as iso
>>> nm = iso.categorical_node_match('size', 1)
>>> nm = iso.categorical_node_match(['color', 'size'], ['red', 2])
"""
categorical_edge_match = copyfunc(categorical_node_match, 'categorical_edge_match')
# Docstrings for categorical functions.
categorical_node_match.__doc__ = categorical_doc
categorical_edge_match.__doc__ = categorical_doc.replace('node', 'edge')
tmpdoc = categorical_doc.replace('node', 'edge')
tmpdoc = tmpdoc.replace('categorical_edge_match', 'categorical_multiedge_match')
categorical_multiedge_match.__doc__ = tmpdoc
numerical_doc = """
Returns a comparison function for a numerical node attribute.
The value(s) of the attr(s) must be numerical and sortable. If the
sorted list of values from G1 and G2 are the same within some
tolerance, then the constructed function returns True.
Parameters
----------
attr : string | list
The numerical node attribute to compare, or a list of numerical
node attributes to compare.
default : value | list
The default value for the numerical node attribute, or a list of
default values for the numerical node attributes.
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
Returns
-------
match : function
The customized, numerical `node_match` function.
Examples
--------
>>> import networkx.algorithms.isomorphism as iso
>>> nm = iso.numerical_node_match('weight', 1.0)
>>> nm = iso.numerical_node_match(['weight', 'linewidth'], [.25, .5])
"""
numerical_edge_match = copyfunc(numerical_node_match, 'numerical_edge_match')
# Docstrings for numerical functions.
numerical_node_match.__doc__ = numerical_doc
numerical_edge_match.__doc__ = numerical_doc.replace('node', 'edge')
tmpdoc = numerical_doc.replace('node', 'edge')
tmpdoc = tmpdoc.replace('numerical_edge_match', 'numerical_multiedge_match')
numerical_multiedge_match.__doc__ = tmpdoc
generic_doc = """
Returns a comparison function for a generic attribute.
The value(s) of the attr(s) are compared using the specified
operators. If all the attributes are equal, then the constructed
function returns True.
Parameters
----------
attr : string | list
The node attribute to compare, or a list of node attributes
to compare.
default : value | list
The default value for the node attribute, or a list of
default values for the node attributes.
op : callable | list
The operator to use when comparing attribute values, or a list
of operators to use when comparing values for each attribute.
Returns
-------
match : function
The customized, generic `node_match` function.
Examples
--------
>>> from operator import eq
>>> from networkx.algorithms.isomorphism.matchhelpers import close
>>> from networkx.algorithms.isomorphism import generic_node_match
>>> nm = generic_node_match('weight', 1.0, close)
>>> nm = generic_node_match('color', 'red', eq)
>>> nm = generic_node_match(['weight', 'color'], [1.0, 'red'], [close, eq])
"""
generic_edge_match = copyfunc(generic_node_match, 'generic_edge_match')
def generic_multiedge_match(attr, default, op):
"""Returns a comparison function for a generic attribute.
The value(s) of the attr(s) are compared using the specified
operators. If all the attributes are equal, then the constructed
function returns True. Potentially, the constructed edge_match
function can be slow since it must verify that no isomorphism
exists between the multiedges before it returns False.
Parameters
----------
attr : string | list
The edge attribute to compare, or a list of node attributes
to compare.
default : value | list
The default value for the edge attribute, or a list of
default values for the dgeattributes.
op : callable | list
The operator to use when comparing attribute values, or a list
of operators to use when comparing values for each attribute.
Returns
-------
match : function
The customized, generic `edge_match` function.
Examples
--------
>>> from operator import eq
>>> from networkx.algorithms.isomorphism.matchhelpers import close
>>> from networkx.algorithms.isomorphism import generic_node_match
>>> nm = generic_node_match('weight', 1.0, close)
>>> nm = generic_node_match('color', 'red', eq)
>>> nm = generic_node_match(['weight', 'color'],
... [1.0, 'red'],
... [close, eq])
...
"""
# This is slow, but generic.
# We must test every possible isomorphism between the edges.
if nx.utils.is_string_like(attr):
else:
attrs = list(zip(attr, default)) # Python 3
return match
# Docstrings for numerical functions.
generic_node_match.__doc__ = generic_doc
generic_edge_match.__doc__ = generic_doc.replace('node', 'edge')
| [
37811,
24629,
2733,
543,
1037,
886,
2985,
8160,
24184,
10139,
62,
15699,
290,
201,
198,
14907,
62,
15699,
5499,
284,
779,
1141,
318,
25831,
1042,
8794,
13,
201,
198,
37811,
201,
198,
6738,
340,
861,
10141,
1330,
9943,
32855,
201,
198,
... | 2.740447 | 2,774 |
# Copyright (C) 2019-2020 HERE Europe B.V.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# License-Filename: LICENSE
"""This module defines API exceptions."""
class AuthenticationError(Exception):
"""Exception raised when authentication fails."""
pass
class ApiError(Exception):
"""Exception raised for API HTTP response status codes not in [200...300).
The exception value will be the response object returned by :mod:`requests`
which provides access to all its attributes, eg. :attr:`status_code`,
:attr:`reason` and :attr:`text`, etc.
Example:
>>> try:
>>> api = HubApi(credentials="MY-XYZ-TOKEN")
>>> api.get("/hub/nope").json()
>>> except ApiError as e:
>>> resp = e.value.args[0]
>>> if resp.status_code == 404 and resp.reason == "Not Found":
>>> ...
"""
def __str__(self):
"""Return a string from the HTTP response causing the exception.
The string simply lists the repsonse's status code, reason and text
content, separated with commas.
"""
resp = self.args[0]
return f"{resp.status_code}, {resp.reason}, {resp.text}"
class TooManyRequestsException(Exception):
"""Exception raised for API HTTP response status code 429.
This is a dedicated exception to be used with the `backoff` package, because
it requires a specific exception class.
The exception value will be the response object returned by :mod:`requests`
which provides access to all its attributes, eg. :attr:`status_code`,
:attr:`reason` and :attr:`text`, etc.
"""
def __str__(self):
"""Return a string from the HTTP response causing the exception.
The string simply lists the repsonse's status code, reason and text
content, separated with commas.
"""
resp = self.args[0]
return f"{resp.status_code}, {resp.reason}, {resp.text}"
| [
2,
15069,
357,
34,
8,
13130,
12,
42334,
15698,
2031,
347,
13,
53,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 3.043103 | 812 |
import configargparse
import logging
import os
class StoreLoggingLevelAction(configargparse.Action):
"""This class converts string into logging level
"""
LEVELS = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET
}
CHOICES = list(LEVELS.keys()) + [str(_) for _ in LEVELS.values()]
def __call__(self, parser, namespace, value, option_string=None):
"""This function gets the key 'value' in the LEVELS, or just uses value
"""
level = StoreLoggingLevelAction.LEVELS.get(value, value)
setattr(namespace, self.dest, level)
class CheckPathAction(configargparse.Action):
"""This class checks file path, if not exits, then create dir(file)
"""
def __call__(self, parser, namespace, value, option_string=None):
"""This function checks file path, if not exits, then create dir(file)
"""
parent_path = os.path.dirname(value)
if not os.path.exists(parent_path):
os.makedirs(parent_path)
setattr(namespace, self.dest, value)
| [
11748,
4566,
853,
29572,
198,
11748,
18931,
198,
11748,
28686,
628,
198,
4871,
9363,
11187,
2667,
4971,
12502,
7,
11250,
853,
29572,
13,
12502,
2599,
198,
220,
220,
220,
37227,
1212,
1398,
26161,
4731,
656,
18931,
1241,
198,
220,
220,
2... | 2.510373 | 482 |
# Copyright 2015, eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext as _
from neutron_lbaas_dashboard import api
from create_lb import * # noqa
INDEX_URL = "horizon:projects:loadbalancersv2:index"
READ_ONLY = {'readonly': 'readonly'}
| [
2,
220,
220,
220,
15069,
1853,
11,
21698,
3457,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
... | 3.188462 | 260 |
## @ CfgDataTool.py
#
# Copyright (c) 2017 - 2020, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import sys
import collections
sys.dont_write_bytecode = True
from IfwiUtility import *
from CommonUtility import *
CFGDATA_INT_GUID = b'\xD0\x6C\x6E\x01\x34\x48\x7E\x4C\xBC\xFE\x41\xDF\xB8\x8A\x6A\x6D'
if __name__ == '__main__':
sys.exit(Main())
| [
2235,
2488,
327,
40616,
6601,
25391,
13,
9078,
198,
2,
198,
2,
15069,
357,
66,
8,
2177,
532,
12131,
11,
8180,
10501,
13,
1439,
2489,
10395,
29847,
11473,
29,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
347,
10305,
12,
17,
1... | 2.188172 | 186 |
import win32serviceutil
import win32service
import win32event
import servicemanager
from eve_service import EveService
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(EveWindowsService)
| [
11748,
1592,
2624,
15271,
22602,
198,
11748,
1592,
2624,
15271,
198,
11748,
1592,
2624,
15596,
198,
11748,
37756,
8463,
3536,
198,
198,
6738,
28001,
62,
15271,
1330,
12882,
16177,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
1241... | 3.409836 | 61 |
# Copyright (c) 2020 Marco Mangan <marco.mangan@gmail.com>
# License: BSD 3 clause
from dyrapy.datasets import load_ouvidoria
| [
2,
15069,
357,
66,
8,
12131,
16556,
27609,
272,
1279,
3876,
1073,
13,
76,
37089,
31,
14816,
13,
785,
29,
198,
2,
13789,
25,
347,
10305,
513,
13444,
198,
198,
6738,
20268,
2416,
88,
13,
19608,
292,
1039,
1330,
3440,
62,
280,
16921,
... | 2.844444 | 45 |
import os
import glob
import json
import tflit
import pytest
import numpy as np
model_dir = os.path.join(os.path.dirname(__file__), 'models')
model_file = os.path.join(model_dir, '{}.tflite')
model_info_file = os.path.join(model_dir, '{}.json')
@pytest.mark.parametrize('name', [
os.path.splitext(os.path.basename(f))[0]
for f in glob.glob(model_file.format('*'))
])
# Utilities
| [
11748,
28686,
198,
11748,
15095,
198,
11748,
33918,
198,
11748,
256,
2704,
270,
198,
11748,
12972,
9288,
198,
11748,
299,
32152,
355,
45941,
628,
198,
19849,
62,
15908,
796,
28686,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,... | 2.418182 | 165 |
import argparse
from core.game_looper import GameLooper
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the Adversarial Game Server.")
parser.add_argument('--port', '--p', type=int, default=8080, help='Port to run the server on')
parser.add_argument('--game-file', default='sample/advshort.txt',
help='The game layout file to be loaded.')
args = parser.parse_args()
game = GameLooper('', args.port, args.game_file)
game.run_game_loop()
| [
11748,
1822,
29572,
198,
198,
6738,
4755,
13,
6057,
62,
5439,
3575,
1330,
3776,
27654,
3575,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
30751,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
11... | 2.712042 | 191 |
"""
Functions related user logins, signups, password authentications,
logouts, etc ...
"""
from lib_db import User, UserID, Group, VerifyUser
from google.appengine.api import mail
import hashlib
import random
import re
import string
import stripe
PASS_RE = re.compile(r"^.{3,20}$")
EMAIL_RE = re.compile(r"^[\S]+@[\S]+\.[\S]+$")
# Define an exception for authentication errors
def get_cookie_string(email):
"""
Creates a cookie string to use for authenticating users.
user_id|encrypted_password
"""
user = User.all().filter("email =", email).fetch(1)[0]
name = 'user'
value = str(user.user_id) + '|' + str(user.password)
return '%s=%s; Path=/' % (name, value)
def make_salt():
"""
Create a random string to salt up the encryption
"""
return ''.join(
[random.choice(string.letters + string.digits)
for i in range(10)])
def encrypt_password(password, salt):
"""
Encrypts a password with sha256, but should be upgraded to bcrypt
once google has that python library in app engine.
"""
return hashlib.sha256(password + salt).hexdigest()
def make_userid():
"""
Generates the next user id number from the database.
"""
uid = UserID.all().fetch(1)
if not len(uid):
uid = UserID(next_id=1)
else:
uid = uid[0]
# update ids
current_id = uid.next_id
next_id = current_id + 1
uid.next_id = next_id
uid.put()
return current_id
def signup(email, password, parent=None):
"""
Checks for valid inputs then adds a user to the User database.
"""
exists = User.all().ancestor(parent).filter("email =", email)
if (exists.fetch(1)):
raise AuthExcept("Account Exists")
if not EMAIL_RE.match(email):
raise AuthExcept("Invalid Email")
if not PASS_RE.match(password):
raise AuthExcept("Invalid Password")
salt = make_salt()
encrypted_password = encrypt_password(password, salt)
temp_id = hashlib.sha256(make_salt()).hexdigest()
# Set up groups. See if the email domain exists
groups = ['public']
domain = email.split('@')[1]
g = Group.all().ancestor(parent).filter("name =", domain).fetch(1)
if g:
groups.append(domain)
user = VerifyUser(email=email, password=encrypted_password,
salt=salt, temp_id=temp_id,
group=groups, parent=parent)
user.put()
print("http://modelr.io/verify_email?user_id=%s" %
str(user.temp_id))
mail.send_mail(sender="Hello <hello@modelr.io>",
to="<%s>" % user.email,
subject="Modelr email verification",
body="""
Welcome to Modelr!
We need to verify your email address. Click the link below to validate your account and continue to billing.
http://modelr.io/verify_email?user_id=%s
Cheers,
Matt, Evan, and Ben
""" % str(user.temp_id))
return temp_id
def verify_signup(user_id, parent):
"""
Checks that a user id is in the queue to be added. The temporary
user id is sent through email verification. Raises a AuthExcept if
the id is invalid, otherwise returns the temporary user object
from the database.
:param user_id: User id from email verification
:param parent: Ancestor database of the temporary user
:returns the temporary user object.
"""
u = VerifyUser.all().ancestor(parent).filter("temp_id =", user_id)
verified_user = u.fetch(1)
# Check for success
if not verified_user:
raise AuthExcept("Verification Failed")
return verified_user[0]
def initialize_user(email, stripe_id, parent, tax_code, price, tax):
"""
Takes a verified user email from the authentication queue and adds
it to the permanent database with a stripe id.
:param verified_email: email of the verified user to add.
:param stripe_id: The stripe customer id of the user.
:param parent: The ancestor database key to use for the database.
:param tax_code: The tax code for the user
(province abbrieviation)
"""
verified_filter = VerifyUser.all()\
.ancestor(parent)\
.filter("email =", email)
verified_user = verified_filter.fetch(1)
if not verified_user:
raise AuthExcept("verification failed")
verified_user = verified_user[0]
# Make new user and populate
user = User(parent=parent)
user.user_id = make_userid()
user.email = verified_user.email
user.password = verified_user.password
user.salt = verified_user.salt
user.group = verified_user.group
user.stripe_id = stripe_id
user.tax_code = tax_code
for group in user.group:
g = Group.all().ancestor(parent).filter("name =",
group).fetch(1)
g[0].allowed_users.append(user.user_id)
g[0].put()
user.put()
# remove the temporary user from the queue
verified_user.delete()
# send a payment confirmation email
mail.send_mail(sender="Hello <hello@modelr.io>",
to="<%s>" % user.email,
subject="Modelr subscription confirmation",
body="""
Welcome to Modelr!
You are now subscribed to Modelr! Your receipt is below.
To unsubscribe, please reply to this email or log in to Modelr and check your user settings.
Cheers,
Matt, Evan, and Ben
=======================
modelr.io
=======================
Monthly fee USD{0:.2f}
Sales tax USD{1:.2f}
Total USD{2:.2f}
========================
Modelr is a product of
Agile Geoscience Ltd
Nova Scotia - Canada
Canada Revenue Agency
reg # 840217913RT0001
========================
""".format(price/100., tax/100., (price+tax)/100.))
def signin(email, password, parent):
"""
Checks if a email and password are valid. Will throw a AuthExcept
if they are not.
"""
user = User.all().ancestor(parent).filter("email =",
email).fetch(1)
if not user:
raise AuthExcept('invalid email')
user = user[0]
encrypted_password = encrypt_password(password, user.salt)
if not encrypted_password == user.password:
raise AuthExcept('invalid password')
def verify(userid, password, ancestor):
"""
Verifies that the userid and encrypted password from a cookie
match the database
"""
try:
user = User.all().ancestor(ancestor)\
.filter("user_id =",
int(userid)).fetch(1)[0]
verified = (user.password == password)
return user
except IndexError:
verified = False
def authenticate(func):
"""
Wrapper function for methods that require a logged in
user
"""
return authenticate_and_call
def send_message(subject, message):
"""
Sends us a message from a user or non-user.
"""
# send the message
mail.send_mail(sender="Hello <hello@modelr.io>",
to="hello@modelr.io",
subject=subject,
body=message)
def forgot_password(email, parent):
"""
Sets a new password after the user forgot it.
"""
user = User.all().ancestor(parent).filter("email =",
email).fetch(1)
if not user:
raise AuthExcept('invalid email')
user = user[0]
new = generate_password()
# send a new password email
mail.send_mail(sender="Hello <hello@modelr.io>",
to="<%s>" % user.email,
subject="Modelr password reset",
body="""
Here's your new password!
%s
Please sign in with this new password, and then change it in your
profile page.
http://modelr.io/signin?redirect=settings
Cheers,
Matt, Evan, and Ben
""" % new
)
# Change it in the database
user.password = encrypt_password(new, user.salt)
user.put()
def reset_password(user, current_pword, new_password,
verify):
"""
Resets the password at the user's request.
:param user: The user database object requesting the password
change.
:param current_pword: The user's current password to verify.
:param new_password: The user's new password.
:param verify: The new password verification.
"""
# This check should be done in the javascript on the page
if new_password != verify:
raise AuthExcept("New password verification failed")
# Check if the original password matches the database
if encrypt_password(current_pword, user.salt) != user.password:
raise AuthExcept("Incorrect password")
# Update the password in the database
user.password = encrypt_password(new_password, user.salt)
# Save it in the database
user.put()
def cancel_subscription(user):
"""
Delete the user. See notes in DeleteHandler() in main.py
"""
try:
stripe_customer = stripe.Customer.retrieve(user.stripe_id)
# Check for extra invoices, ie Taxes, that also need
# to be cancelled.
invoice_items = stripe.InvoiceItem.all(customer=stripe_customer)
for invoice in invoice_items.data:
invoice_id = invoice["id"]
# get the invoice and delete it if we can
invoice_obj = stripe.InvoiceItem.retrieve(invoice_id)
try:
invoice_obj.delete()
except:
msg = """
invoice # {0} not deleted from stripe id {1}
""".format(invoice_id, user.stripe_id)
send_message("invoice not deleted",
msg)
sub_id = stripe_customer.subscriptions["data"][0]["id"]
stripe_customer.subscriptions\
.retrieve(sub_id).delete(at_period_end=True)
user.unsubscribed = True
user.put()
# TODO MailChimp
except Exception as e:
print e
raise AuthExcept("Failed to unsubscribe user: " + user.email)
mail.send_mail(sender="Hello <hello@modelr.io>",
to="<%s>" % user.email,
subject="Modelr account deleted",
body="""
You have unsubscribed from Modelr. Your account will be deleted
at the end of the billing cycle.
Thank you for using Modelr. We hope to meet again some day.
Cheers,
Matt, Evan, and Ben
""")
| [
37811,
198,
24629,
2733,
3519,
2836,
2604,
1040,
11,
1051,
4739,
11,
9206,
8323,
3736,
11,
198,
6404,
5269,
11,
3503,
2644,
198,
37811,
198,
6738,
9195,
62,
9945,
1330,
11787,
11,
11787,
2389,
11,
4912,
11,
49899,
12982,
198,
6738,
23... | 2.439092 | 4,318 |
from pymodbus.client.sync import ModbusSerialClient
client = ModbusSerialClient(
method='rtu',
port='/dev/ttyS0',
baudrate=9600,
timeout=3,
parity='N',
stopbits=1,
bytesize=8
)
if client.connect(): # Trying for connect to Modbus Server/Slave
'''Reading from a holding register with the below content.'''
res = client.read_holding_registers(address=1, count=1, unit=1)
'''Reading from a discrete register with the below content.'''
# res = client.read_discrete_inputs(address=1, count=1, unit=1)
if not res.isError():
print(res.registers)
else:
print(res)
else:
print('Cannot connect to the Modbus Server/Slave') | [
6738,
12972,
4666,
10885,
13,
16366,
13,
27261,
1330,
3401,
10885,
32634,
11792,
198,
198,
16366,
796,
3401,
10885,
32634,
11792,
7,
198,
220,
220,
220,
2446,
11639,
17034,
84,
3256,
198,
220,
220,
220,
2493,
11639,
14,
7959,
14,
42852,... | 2.6 | 265 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import hashlib
import os
import tempfile
from oslo_serialization import jsonutils
from oslo_utils import units
import testtools
from subject.common import timeutils
from subject.tests.integration.legacy_functional import base
from subject.tests.utils import minimal_headers
FIVE_KB = 5 * units.Ki
FIVE_GB = 5 * units.Gi
| [
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
220,
220,
... | 3.423221 | 267 |
import random
import math
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
from tqdm import tqdm
from ray.tune import run, Trainable, sample_from
from dqn import DQN, update_target
from loss import TDLoss, StableTDLoss
from pbuffer import PrioritizedBuffer
from env import get_env
USE_CUDA = torch.cuda.is_available()
Variable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda() if USE_CUDA else autograd.Variable(*args, **kwargs)
beta_start = 0.4
beta_frames = 1000
BETA_BY_FRAME = lambda frame_idx: min(1.0, beta_start + frame_idx * (1.0 - beta_start) / beta_frames)
epsilon_start = 1.0
epsilon_final = 0.01
epsilon_decay = 500
EPSILON_BY_FRAME = lambda frame_idx: epsilon_final + (epsilon_start - epsilon_final) * math.exp(-1. * frame_idx / epsilon_decay)
if __name__ == '__main__':
trainable = MyTrainable()
trainable._setup()
print("train result: ", trainable._train())
# def train(self):
# config = self.config
# '''
# hyperparams
# method - 'average_over_batch', 'PER'
# var
# mean
# decision_eps,
# alpha, beta,
# hardcoded, cnn,
# invert_actions = False,
# num_frames = 30000,
# num_val_trials = 10,
# batch_size = 32,
# gamma = 0.99,
# num_trials = 5,
# USE_CUDA = False,
# device = "",
# eps = 1.,
# avg_stored=False
# '''
# if USE_CUDA:
# device = torch.device("cuda")
# """Args:"""
# losses = []
# all_rewards = []
# standard_val_rewards = []
# noisy_val_rewards = []
# states_count_ratios = []
# episode_reward = 0
# # Initialize state
# noisyGame = False
# state = config['env'].reset()
# state = np.append(state, float(noisyGame))
# meta_state = (state, float(noisyGame))
# # Initialize replay buffer, model, TD loss, and optimizers
# result_df = pd.DataFrame()
# theta = 1.
# power = config['theta']
# all_standard_val_rewards = []
# all_proportions = []
# std_weights = []
# noisy_weights = []
# std_buffer_example_count = []
# noisy_buffer_example_count = []
# for t in range(num_trials):
# if cnn:
# current_model = CnnDQN(env.observation_space.shape, env.action_space.n)
# target_model = CnnDQN(env.observation_space.shape, env.action_space.n)
# else:
# current_model = DQN(env.observation_space.shape[0] + 1, env.action_space.n)
# target_model = DQN(env.observation_space.shape[0] + 1, env.action_space.n)
# td_loss = TDLoss(method=config['method'])
# optimizer = optim.Adam(current_model.parameters())
# # # Single GPU Code
# if USE_CUDA:
# current_model = current_model.cuda()
# target_model = target_model.cuda()
# if config['method']=='average_over_buffer':
# replay_buffer = AugmentedPrioritizedBuffer(int(1e6))
# else:
# replay_buffer = PrioritizedBuffer(int(1e6))
# print("trial number: {}".format(t))
# for frame_idx in range(1, config['num_frames'] + 1):
# epsilon = EPSILON_BY_FRAME(frame_idx)
# original_action = current_model.act(state, epsilon)
# # If in noisy environment, make action random with probability eps
# if noisyGame and random.uniform(0,1) < config['decision_eps']:
# if invert_actions:
# actual_action = 1 - original_action # invert
# else:
# actual_action = original_action
# else:
# actual_action = original_action
# next_state, reward, done, _ = config['env'].step(actual_action)
# # If in noisy environment, make reward completely random
# if noisyGame:
# reward *= np.random.normal(config['mean'], var)
# if not cnn:
# next_state = np.append(next_state, float(noisyGame))
# meta_next_state = (next_state, float(noisyGame))
# # store q values and hidden states in buffer
# if config['method']=='average_over_buffer':
# state_var = Variable(torch.FloatTensor(np.float32(state)))
# with torch.no_grad():
# q_values, hiddens = current_model.forward(state_var, config['return_latent'] = "last")
# replay_buffer.push(meta_state, original_action, reward, meta_next_state, done, hiddens, q_values)
# else:
# replay_buffer.push(meta_state, original_action, reward, meta_next_state, done)
# meta_state = meta_next_state
# episode_reward += reward
# if done:
# noisyGame = 1-noisyGame
# state = env.reset()
# state = np.append(state, float(noisyGame))
# meta_state = (state, float(noisyGame))
# all_rewards.append(episode_reward)
# episode_reward = 0
# if len(replay_buffer) > batch_size and frame_idx % 4 == 0:
# beta = BETA_BY_FRAME(frame_idx)
# loss = td_loss.compute(current_model, target_model, beta, replay_buffer, optimizer)
# losses.append(loss.data.tolist())
# if frame_idx % 200 == 0:
# all_standard_val_rewards.append(test(val_env, False, eps, num_val_trials, current_model))
# all_proportions.append(float(replay_buffer.states_count[1]) / (float(replay_buffer.states_count[1]) + float(replay_buffer.states_count[0])))
# weight_dict = replay_buffer.get_average_weight_by_env()
# std_weights.append(weight_dictconfig['std_avg'])
# noisy_weights.append(weight_dictconfig['noisy_avg'])
# std_buffer_example_count.append(weight_dictconfig['std_count'])
# noisy_buffer_example_count.append(weight_dictconfig['noisy_count'])
# # plot(frame_idx, all_rewards, losses, standard_val_rewards, noisy_val_rewards, states_count_ratios)
# if frame_idx % 1000 == 0:
# print("Frame {}".format(frame_idx))
# update_target(current_model, target_model)
# print(len(all_proportions))
# result_dfconfig['frame'] = 200*np.arange(len(all_proportions)) % num_frames
# result_dfconfig['trial_num'] = np.floor(200 *np.arange(len(all_proportions)) / num_frames)
# result_dfconfig['val_reward'] = all_standard_val_rewards
# result_dfconfig['proportion'] = all_proportions
# result_dfconfig['std_weights'] = std_weights
# result_dfconfig['noisy_weights'] = noisy_weights
# result_dfconfig['std_buffer_example_count'] = std_buffer_example_count
# result_dfconfig['noisy_buffer_example_count'] = noisy_buffer_example_count
# return result_df
| [
11748,
4738,
198,
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
11748,
28034,
13,
230... | 2.106715 | 3,336 |
import qimpy as qp
import torch
import pytest
@pytest.mark.mpi_skip
def main():
"""Run test and additionally plot for visual inspection."""
import matplotlib.pyplot as plt
qp.utils.log_config()
qp.rc.init()
# Plot a single blip function for testing:
plt.figure()
coeff = torch.zeros(12)
coeff[5] = 1
t = torch.linspace(0.0, 12.0, 101, device=qp.rc.device)
for deriv in range(5):
plt.plot(
t.to(qp.rc.cpu),
qp.ions.quintic_spline.Interpolator(t, 2.0, deriv)(coeff).to(qp.rc.cpu),
label=f"Deriv: {deriv}",
)
plt.axhline(0, color="k", ls="dotted")
plt.legend()
# Generate test data:
dx, x_fine, y_fine, y_prime_fine, y_coeff = test_interpolator()
# Plot results:
plt.figure()
plt.plot(
x_fine.to(qp.rc.cpu),
y_fine.to(qp.rc.cpu),
"k--",
label="Reference data",
zorder=10,
)
plt.plot(
x_fine.to(qp.rc.cpu),
y_prime_fine.to(qp.rc.cpu),
"k:",
label="Reference derivative",
zorder=10,
)
for deriv in range(5):
plt.plot(
x_fine.to(qp.rc.cpu),
qp.ions.quintic_spline.Interpolator(x_fine, dx, deriv)(y_coeff).to(
qp.rc.cpu
),
label=f"Interpolant (deriv: {deriv})",
lw=3,
)
plt.axhline(0, color="k", ls="dotted")
plt.legend()
plt.show()
if __name__ == "__main__":
main()
| [
11748,
10662,
320,
9078,
355,
10662,
79,
198,
11748,
28034,
198,
11748,
12972,
9288,
628,
198,
31,
9078,
9288,
13,
4102,
13,
3149,
72,
62,
48267,
628,
198,
4299,
1388,
33529,
198,
220,
220,
220,
37227,
10987,
1332,
290,
36527,
7110,
3... | 1.84217 | 811 |
from django.db import models
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
628,
198,
2,
13610,
534,
4981,
994,
13,
628
] | 3.6875 | 16 |
import numpy as np
import bruges
import scipy.stats
import scipy.linalg
import warnings
from scipy.ndimage import gaussian_filter
from typing import Tuple, Union, List, Optional, Callable, Any
# TODO: Add support for horizons that "stop"/"vanish" (i.e. a layer is eroded).
class SyntheticData:
"""Class for generating synthetic geo-volumes and seismic therefrom.
This class can do the following:
- Generate semi-realistic random synthetic horizons inn a subsurface volume of
the desired size (number of voxels). The horizons cover the entire volume.
- Generate simple (unrealistic), parallel faults.
- Generate synthetic seismic data from the synthetic subsurface volume.
Args:
shape (Tuple[int, int, int]): Shape of the synthetic geo-volume, on the format
(I, X, T).
Attributes:
I: Number of ilines, > 0.
X: Number of xlines, > 0.
T: Number of tlines, > 0.
n_horizons: Number of horizons in geo-volume, > 0.
horizons: List of length n_horizons of ndarray of int, shape (I, X). Element
(I, X) of list element h gives the height of horizon h in (I, X) - only one
horizon point per horizon per trace is supported. -1 indicates out of
bounds, i.e. the horizon is not in the geo-volume.
facies: ndarray of int, shape (I, X, T). Facies start at horizons (inclusive)
and continue to next horizon (exclusive) in t-direction. I.e.
n_facies = n_horizons + 1. The array contains integers from 0 to n_horizons.
seismic: ndarray of float, shape (I, X, T). Synthetic seismic.
wavelet: array_like; list of wavelet amplitudes.
reflection_coeffs: List of reflection coefficients, one for each horizon. Each
can be a float (constant coefficients across horizons) or an (I*X) array.
-1 < reflection coefficient < 1.
oob_horizons: List of horizons that are partly or entirely out of bounds, i.e.
some/all points of the horizon not in the geo-volume.
"""
@property
def shape(self) -> Tuple[int, int, int]:
"""Shape property.
Returns:
Tuple[int, int, int]: Shape of geo-volume (I*X*T).
"""
return self.I, self.X, self.T
@property
def reflection_coeffs_array(self) -> Optional[np.ndarray]:
"""Reflection coefficient array property.
Returns:
np.ndarray: Shape (I*X*T); array of reflection coefficients.
"""
if self.reflection_coeffs is None:
return None
else:
r_array = np.zeros(self.shape)
ii, xx = np.mgrid[: self.I, : self.X]
for i in range(self.n_horizons):
h = self.horizons[i] # type: ignore
r_array[ii, xx, h] = self.reflection_coeffs[i]
return r_array
@property
def noise(self) -> np.ndarray:
"""Noise property.
Subtracting noise from self.seismic gives noise-free seismic.
Returns:
np.ndarray: Shape (I*X*T); array of noise contribution to seismic.
"""
if self._blur_noise is not None:
return self._blur_noise
if self._systematic_noise is not None:
if self._white_noise is not None:
return self._systematic_noise + self._white_noise
return self._systematic_noise
if self._white_noise is not None:
return self._white_noise
return np.zeros(self.shape)
def generate_horizons(
self,
n_horizons: int,
min_distance: int = 5,
volatility: float = 0.6,
trend_size: float = 1,
trend_length: int = 30,
fault_xlines: Union[int, List[int]] = None,
fault_size: Union[int, List[int]] = 5,
generate_reflection_coeffs: bool = True,
reflection_coeff_volatility: float = 0.005,
reflection_coeff_seeds: List[float] = None,
) -> np.ndarray:
"""Generate synthetic horizons.
Generate random synthetic horizons in the defined synthetic geo-volume.
Args:
n_horizons: int > 0. Number of horizons to be generated.
min_distance: int >= 0. Minimum distance between the horizons (and top
horizon and 0).
volatility: float > 0. Decides the volatility of the horizons.
trend_size: float > 0. Decides how significant trends the horizons have.
trend_length: float > 0. Decides how long the trends last for.
fault_xlines: Create faults at these xlines.
fault_size: List of size of fault jumps, or size of all jumps if just an
integer. Ignored if fault_xlines is None.
generate_reflection_coeffs: If True, generate random, non-constant
reflection coefficients.
reflection_coeff_volatility: float > 0. Volatility of the reflection
coefficients.
reflection_coeff_seeds: Initial values that the random reflection
coefficients will fluctuate around.
Returns:
List of horizon numpy arrays of size (I*X).
"""
# Reset:
self.facies = None
self.seismic = None
self.oob_horizons = []
self.n_horizons = n_horizons
if reflection_coeff_seeds is not None:
msg = (
"Please provide a reflection coefficient seed value for each horizon, "
"if any."
)
assert len(reflection_coeff_seeds) == self.n_horizons, msg
# TODO: Should respect bounds from _generate_horizons.
self.horizons = self._generate_overlapping_horizons(
volatility,
trend_length,
trend_size,
generate_reflection_coeffs,
reflection_coeff_volatility,
reflection_coeff_seeds,
)
self.horizons = self._set_min_distance(min_distance)
if fault_xlines is not None:
if isinstance(fault_xlines, int):
fault_xlines = [fault_xlines]
if isinstance(fault_size, int):
fault_size = [fault_size] * len(fault_xlines)
else:
assert len(fault_size) == len(fault_xlines)
for x, size in zip(fault_xlines, fault_size):
self.horizons = self.create_fault(x, size)
self.horizons = self._move_above_zero(min_distance)
self.horizons = self._set_oob() # set points above top of vol to 0
return self.horizons
def _generate_overlapping_horizons(
self,
volatility: float,
trend_length: int,
trend_size: float,
generate_reflection_coeffs: bool,
reflection_coeff_volatility: float,
reflection_coeff_seeds: Optional[List[float]],
) -> np.ndarray:
"""Generate horizons independently. They will overlap."""
horizons = np.zeros((self.n_horizons, self.I, self.X))
if generate_reflection_coeffs:
self.reflection_coeffs = np.zeros((self.n_horizons, self.I, self.X))
# Create trend vectors
i_trend = self._get_trend_vec(self.I, trend_size, trend_length)
x_trend = self._get_trend_vec(self.X, trend_size, trend_length)
# Generate one horizon at a time according to a random process using
# the trend vectors
for h in range(0, self.n_horizons):
horizons[h] = self._generate_horizon(i_trend, x_trend, _jump_r)
if generate_reflection_coeffs:
rel_vol = reflection_coeff_volatility / volatility
for h in range(0, self.n_horizons):
# Trend might be decreasing with increasing depth
flip = np.random.choice((-1, 1))
if reflection_coeff_seeds is None:
seed = None
else:
seed = reflection_coeff_seeds[h]
self.reflection_coeffs[h] = self._generate_horizon( # type: ignore
flip * i_trend, flip * x_trend, _jump_c, True, seed
)
# horizons should be integer-valued.
horizons = horizons.round().astype(int)
return horizons
def _generate_horizon(
self,
i_trend: np.ndarray,
x_trend: np.ndarray,
jump: Callable,
reflection_coeff: bool = False,
reflection_coeff_seed: float = None,
) -> np.ndarray:
"""Generate and return a single horizon or horizon reflection coefficients."""
iline_edge = np.zeros(self.I)
xline_edge = np.zeros(self.X)
if reflection_coeff:
if reflection_coeff_seed is not None:
iline_edge[0] = reflection_coeff_seed
xline_edge[0] = reflection_coeff_seed
else:
# Init range (-0.25, -0.1) or (0.1, 0.25)
iline_edge[0] = np.random.uniform(-0.15, 0.15)
iline_edge[0] += np.sign(iline_edge[0]) * 0.1
xline_edge[0] = iline_edge[0]
high = 0.3 * np.sign(iline_edge[0])
low = 0.05 * np.sign(iline_edge[0])
if high < low:
high, low = (low, high)
else:
high = np.inf
low = -high
# Generate the horizon along the edges iline = 0 and xline = 0.
for i in range(1, self.I):
iline_edge[i] = (iline_edge[i - 1] + jump(i_trend[i])).clip(low, high)
for x in range(1, self.X):
xline_edge[x] = (xline_edge[x - 1] + jump(x_trend[x])).clip(low, high)
horizon = np.zeros((self.I, self.X))
horizon[:, 0] = iline_edge
horizon[0, :] = xline_edge
# Generate the rest of the horizon.
for i in range(1, self.I):
for x in range(1, self.X):
i_jump = jump(i_trend[i])
x_jump = jump(x_trend[x])
horizon[i, x] = (
0.5 * (horizon[i - 1, x] + i_jump + horizon[i, x - 1] + x_jump)
).clip(low, high)
return horizon
def _get_trend_vec(
self, n: int, trend_size: float, trend_length: int
) -> np.ndarray:
"""Get trend of a random walk with trend."""
trend = trend_size * np.random.randn(n)
trend[0] = 0
trend = self._moving_average(trend, trend_length)
return trend
@staticmethod
def _moving_average(a: np.ndarray, n: int) -> np.ndarray:
"""Moving average of a, window size = n."""
b = np.copy(a)
b = np.insert(b, 0, np.full(n, a[0]))
s = np.cumsum(b)
res = (s[n:] - s[:-n]) / n
return res
def _set_min_distance(self, min_distance: int) -> np.ndarray:
"""Move horizons to fulfill minimum distance specification."""
for j in range(1, self.n_horizons):
diff = self.horizons[j] - self.horizons[j - 1] # type: ignore
min_diff = diff.min()
if min_diff < min_distance:
dist = np.random.randint(min_distance, 3 * min_distance)
self.horizons[j] += dist - min_diff # type: ignore
return self.horizons
def create_fault(self, fault_xline: int, fault_size: int) -> np.ndarray:
"""Create a fault at a xline fault_xline.
Args:
fault_xline: Xline to create fault at.
fault_size: Size of fault.
Returns:
See class attribute self.horizons.
"""
self.horizons[:, :, fault_xline:] += fault_size # type: ignore
return self.horizons
def _move_above_zero(self, min_distance: int) -> np.ndarray:
"""Make sure that the top horizon is a little above 0 (below seabed)."""
h_min = self.horizons[0].min() # type: ignore
self.horizons -= h_min
self.horizons += np.random.randint(0, self.T // min(10, self.T))
self.horizons += min_distance
return self.horizons
def _set_oob(self) -> np.ndarray:
"""Remove parts of horizons above (geologically below) defined geo-volume."""
oob = self.horizons > (self.T - 1) # type: ignore
if oob.sum() > 0: # type: ignore
self.horizons[oob] = -1 # type: ignore
for h in range(self.n_horizons - 1, -1, -1):
n_out = oob[h].sum() # type: ignore
if n_out > 0:
I, X = self.I, self.X
warnings.warn(
f"horizon {h} is "
f'{"partly" if n_out < (I*X) else "entirely"} '
f"out of bounds."
)
self.oob_horizons.append(h)
else:
break
return self.horizons
def horizon_volume(self, horizon_number: int) -> Optional[np.ndarray]:
"""Produce horizon volume for a single horizon.
This function transforms the generated horizon into a binary numpy array of
dimensions (I, X, T). The horizon is represented by the ones.
Args:
horizon_number: Which horizon to generate volume for.
Returns:
binary ndarray of size (I*X*T) if horizon is (partly) within bounds, None
otherwise.
"""
horizon = self.ixtn_horizons()
horizon = horizon[horizon[:, 3] == horizon_number]
if horizon.size == 0:
warnings.warn(f"horizon {horizon_number} is not in volume.")
return None
horizon_vol = np.zeros(self.shape)
horizon_vol[horizon[:, 0], horizon[:, 1], horizon[:, 2]] = 1
return horizon_vol
def ixtn_horizons(self) -> np.ndarray:
"""Produce horizon coords.
This function transforms the generated horizons into a numpy array of dimensions
(n_horizon_points, 4) with rows (I, X, T, n_horizon).
Returns:
ndarray of horizon coords; shape (n_horizon_points, 4).
"""
in_bounds = self.horizons > -1 # type: ignore
s = in_bounds.sum() # type: ignore
ixtn = np.empty(shape=(s, 4), dtype=int)
nix = np.argwhere(in_bounds)
ixtn[:, :2] = nix[:, 1:]
ixtn[:, 3] = nix[:, 0]
ixtn[:, 2] = self.horizons[nix[:, 0], nix[:, 1], nix[:, 2]] # type: ignore
return ixtn
def get_facies(self) -> np.ndarray:
"""Generate facies array.
Returns:
ndarray of int, shape (I, X, T). See class attribute docstring (facies) for
description.
"""
ixtn = self.ixtn_horizons()
facies = np.zeros(self.shape, dtype=int)
facies[ixtn[:, 0], ixtn[:, 1], ixtn[:, 2]] = 1
for t in range(1, self.T):
facies[:, :, t] = facies[:, :, t] + facies[:, :, (t - 1)]
self.facies = facies
return facies
def generate_synthetic_seismic(
self,
reflection_coeffs: Union[float, List[Union[float, np.ndarray]]] = None,
systematic_sigma: float = 0,
white_sigma: float = 0,
blur_sigma: float = 0,
wavelet_frequency: int = 40,
):
"""Generate synthetic seismic.
Create synthetic seismic using instance horizons and coefficients, or provided
(constant) coefficients.
Args:
reflection_coeffs: See class attributes.
systematic_sigma: Systematic noise added if not None; higher means more
noise.
white_sigma: White noise added if not None; higher means more noise.
blur_sigma: Seismic blurred if not None; higher means more blurred.
wavelet_frequency: Frequency of wavelet passed to bruges.filters.ricker() to
define wavelet.
Returns:
ndarray of float, shape (I, X, T).
"""
if reflection_coeffs is not None:
if isinstance(reflection_coeffs, float):
self.reflection_coeffs = np.array(reflection_coeffs).reshape(1)
else:
self.reflection_coeffs = np.array(reflection_coeffs)
msg = (
"Please provide one reflection coefficient constant/array for each"
"horizon."
)
assert len(self.reflection_coeffs) == self.n_horizons, msg
assert np.all(np.abs(self.reflection_coeffs) < 1), "Max 100% reflected."
if self.reflection_coeffs is None:
warnings.warn("No reflection coefficients. Cannot generate seismic.")
return
dt = 0.005
# For some reason, odd length of the wave gives two spike points, we want one...
even_T = self.T - self.T % 2
duration = min(0.100, 0.005 * even_T) # n_steps <= self.T
wave = bruges.filters.ricker(duration=duration, dt=dt, f=wavelet_frequency)
# ... but we want odd length
wave = np.delete(wave, 0)
self.wavelet = wave
# TODO: Quicker to use convolution_matrix here?
reflection_arr = self.reflection_coeffs_array
seismic = np.apply_along_axis(
lambda r: np.convolve(r, wave, mode="same"), axis=-1, arr=reflection_arr
)
self.seismic = seismic
if systematic_sigma > 0:
first_col = np.zeros(self.T)
l = wave.size // 2 + 1
first_col[:l] = wave[(l - 1) :]
convolution_matrix = scipy.linalg.toeplitz(first_col)
self._systematic_sigma = systematic_sigma
W = convolution_matrix
covariance_matrix = systematic_sigma ** 2 * W @ W.T
dist = scipy.stats.multivariate_normal(np.zeros(self.T), covariance_matrix)
self._systematic_noise = dist.rvs((self.I, self.X))
seismic += self._systematic_noise
else:
self._systematic_sigma = 0
if white_sigma > 0:
self._white_sigma = white_sigma
self._white_noise = np.random.normal(np.zeros(seismic.shape), white_sigma)
seismic += self._white_noise
else:
self._white_sigma = 0
if blur_sigma > 0:
self._blur_sigma = blur_sigma
seismic = gaussian_filter(seismic, sigma=[blur_sigma, blur_sigma, 0])
self._blur_noise = self.seismic - seismic
else:
self._blur_sigma = 0
self.seismic = seismic
return seismic
| [
11748,
299,
32152,
355,
45941,
198,
11748,
275,
2143,
274,
198,
11748,
629,
541,
88,
13,
34242,
198,
11748,
629,
541,
88,
13,
75,
1292,
70,
198,
11748,
14601,
198,
6738,
629,
541,
88,
13,
358,
9060,
1330,
31986,
31562,
62,
24455,
19... | 2.118168 | 8,691 |
""" Python Character Mapping Codec generated from '8859-8.TXT'.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
### Codec APIs
### encodings module API
### Decoding Map
decoding_map = {
0x00aa: 0x00d7, # MULTIPLICATION SIGN
0x00af: 0x203e, # OVERLINE
0x00ba: 0x00f7, # DIVISION SIGN
0x00df: 0x2017, # DOUBLE LOW LINE
0x00e0: 0x05d0, # HEBREW LETTER ALEF
0x00e1: 0x05d1, # HEBREW LETTER BET
0x00e2: 0x05d2, # HEBREW LETTER GIMEL
0x00e3: 0x05d3, # HEBREW LETTER DALET
0x00e4: 0x05d4, # HEBREW LETTER HE
0x00e5: 0x05d5, # HEBREW LETTER VAV
0x00e6: 0x05d6, # HEBREW LETTER ZAYIN
0x00e7: 0x05d7, # HEBREW LETTER HET
0x00e8: 0x05d8, # HEBREW LETTER TET
0x00e9: 0x05d9, # HEBREW LETTER YOD
0x00ea: 0x05da, # HEBREW LETTER FINAL KAF
0x00eb: 0x05db, # HEBREW LETTER KAF
0x00ec: 0x05dc, # HEBREW LETTER LAMED
0x00ed: 0x05dd, # HEBREW LETTER FINAL MEM
0x00ee: 0x05de, # HEBREW LETTER MEM
0x00ef: 0x05df, # HEBREW LETTER FINAL NUN
0x00f0: 0x05e0, # HEBREW LETTER NUN
0x00f1: 0x05e1, # HEBREW LETTER SAMEKH
0x00f2: 0x05e2, # HEBREW LETTER AYIN
0x00f3: 0x05e3, # HEBREW LETTER FINAL PE
0x00f4: 0x05e4, # HEBREW LETTER PE
0x00f5: 0x05e5, # HEBREW LETTER FINAL TSADI
0x00f6: 0x05e6, # HEBREW LETTER TSADI
0x00f7: 0x05e7, # HEBREW LETTER QOF
0x00f8: 0x05e8, # HEBREW LETTER RESH
0x00f9: 0x05e9, # HEBREW LETTER SHIN
0x00fa: 0x05ea, # HEBREW LETTER TAV
}
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k
| [
37811,
11361,
15684,
337,
5912,
39298,
7560,
422,
705,
3459,
3270,
12,
23,
13,
51,
25010,
4458,
628,
198,
25354,
416,
13067,
12,
31258,
20607,
7423,
357,
7617,
31,
293,
2022,
3686,
13,
785,
737,
198,
198,
7,
66,
8,
15069,
31171,
711... | 1.791146 | 881 |
import argparse
import subprocess
import os
import time
import random
if __name__ == "__main__":
main()
| [
11748,
1822,
29572,
198,
11748,
850,
14681,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
4738,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 3.114286 | 35 |
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: FBOutput
import tdw.flatbuffers
| [
2,
6338,
7560,
416,
262,
21939,
36474,
364,
17050,
11,
466,
407,
13096,
198,
198,
2,
25745,
25,
13186,
26410,
198,
198,
11748,
41560,
86,
13,
38568,
36873,
364,
198
] | 3.866667 | 30 |