hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dcb463cc37985becee76776bf16977727d8606af
| 7,622
|
py
|
Python
|
ros_catkin_ws/install_isolated/lib/python2.7/dist-packages/topic_tools/srv/_MuxList.py
|
letrend/neopixel_fpga
|
d9247417a9d311eceebad5898571846c6e33a44a
|
[
"MIT"
] | 2
|
2018-12-11T16:35:20.000Z
|
2019-01-23T16:42:17.000Z
|
ros_catkin_ws/install_isolated/lib/python2.7/dist-packages/topic_tools/srv/_MuxList.py
|
letrend/neopixel_fpga
|
d9247417a9d311eceebad5898571846c6e33a44a
|
[
"MIT"
] | 1
|
2018-12-28T21:11:50.000Z
|
2018-12-28T21:11:50.000Z
|
ros_catkin_ws/install_isolated/lib/python2.7/dist-packages/topic_tools/srv/_MuxList.py
|
letrend/neopixel_fpga
|
d9247417a9d311eceebad5898571846c6e33a44a
|
[
"MIT"
] | 3
|
2018-01-21T17:53:17.000Z
|
2021-09-08T10:22:05.000Z
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from topic_tools/MuxListRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class MuxListRequest(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "topic_tools/MuxListRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(MuxListRequest, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from topic_tools/MuxListResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class MuxListResponse(genpy.Message):
_md5sum = "b0eef9a05d4e829092fc2f2c3c2aad3d"
_type = "topic_tools/MuxListResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """string[] topics
"""
__slots__ = ['topics']
_slot_types = ['string[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
topics
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(MuxListResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.topics is None:
self.topics = []
else:
self.topics = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
length = len(self.topics)
buff.write(_struct_I.pack(length))
for val1 in self.topics:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack('<I%ss'%length, length, val1))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.topics = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.topics.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
length = len(self.topics)
buff.write(_struct_I.pack(length))
for val1 in self.topics:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack('<I%ss'%length, length, val1))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.topics = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.topics.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
class MuxList(object):
_type = 'topic_tools/MuxList'
_md5sum = 'b0eef9a05d4e829092fc2f2c3c2aad3d'
_request_class = MuxListRequest
_response_class = MuxListResponse
| 32.712446
| 145
| 0.645631
|
b4ce89a3b5273b0909d89125c79a169a5fc41ae3
| 3,531
|
py
|
Python
|
src/core/model.py
|
Lily-Le/shape_templates
|
5525a0ca017de3bb022069eb97e2c3fccf5fae40
|
[
"MIT"
] | 14
|
2021-07-26T18:33:11.000Z
|
2022-03-01T05:48:13.000Z
|
src/core/model.py
|
Lily-Le/shape_templates
|
5525a0ca017de3bb022069eb97e2c3fccf5fae40
|
[
"MIT"
] | 4
|
2021-10-18T07:36:07.000Z
|
2022-03-08T12:42:57.000Z
|
src/core/model.py
|
Lily-Le/shape_templates
|
5525a0ca017de3bb022069eb97e2c3fccf5fae40
|
[
"MIT"
] | 4
|
2021-08-08T13:51:36.000Z
|
2022-02-15T08:41:02.000Z
|
import torch
import torch.nn as nn
from torch.optim import Adam
import torchvision.models as models
from src.core.networks import ParameterRegressor, ImageTranslator
from src.core.utils.losses import compute_anchor_loss, compute_boundary_loss, _l1_loss
from src.core.utils.helper import draw_template, load_anchor_points
from src.core.utils.transforms import transform_template, transform_anchor_points
class Model:
def __init__(self, cfg, device):
self.template = draw_template(cfg['template_path'], size=cfg['img_size'], batch_size=cfg['batch_size'],
device=device)
self.core, self.single, self.double = load_anchor_points(cfg['anchor_pts_path'], device, cfg['batch_size'])
self.regressor = ParameterRegressor(num_features=cfg['regressor_nf'], num_parts=cfg['num_parts']).to(device)
self.translator = ImageTranslator(num_features=cfg['translator_nf'], num_parts=cfg['num_parts']).to(device)
self.optim = Adam(list(self.regressor.parameters()) + list(self.translator.parameters()),
lr=cfg['learning_rate'])
self.vgg = nn.Sequential()
vgg = models.vgg19(pretrained=True).features.eval().to(device)
depth = 14
for i in range(depth):
self.vgg.add_module(str(i), vgg[i])
self.I = torch.eye(3)[0:2].view(1, 1, 2, 3).repeat(cfg['batch_size'], cfg['num_parts'], 1, 1).to(device)
self.aug = torch.Tensor([0, 0, 1]).view(1, 1, 1, 3).repeat(cfg['batch_size'], cfg['num_parts'], 1, 1).to(device)
self.lambda1 = cfg['anchor_loss_weight']
self.labmda2 = cfg['boundary_loss_weight']
def train_step(self, frame1, frame2, return_imgs=False):
batch_size = frame1.shape[0]
num_parts = self.template.shape[1]
img_size = frame1.shape[2]
estimated_params = self.regressor(frame1)
estimated_params = self.I + estimated_params
# (batch, num_parts) --> (batch*num_parts)
batched_template = self.template.view(-1, img_size, img_size).unsqueeze(1)
batched_params = estimated_params.view(-1, 2, 3)
transformed_template = transform_template(batched_template, batched_params)
# (batch*num_parts) --> (batch, num_parts)
transformed_template = transformed_template.view(batch_size, num_parts, img_size, img_size)
# append [0, 0, 1] as last row to matrices
A = torch.cat([estimated_params, self.aug], dim=-2)
transformed_anchors = transform_anchor_points(A, self.core, self.double, self.single)
reconstructed = self.translator(frame2, transformed_template)
anchor_loss = compute_anchor_loss(*transformed_anchors, size=img_size)
boundary_loss = compute_boundary_loss(*transformed_anchors, img_size=img_size)
recon_loss = _l1_loss(self.vgg(frame1), self.vgg(reconstructed))
# overall loss, gradient and weight update
loss = recon_loss + self.lambda1 * anchor_loss + self.labmda2 * boundary_loss
# reset gradients
self.regressor.zero_grad()
self.translator.zero_grad()
loss.backward()
self.optim.step()
d = {'anchor_loss': anchor_loss, 'boundary_loss': boundary_loss, 'recon_loss': recon_loss}
if return_imgs:
d['reconstructed_frame'] = reconstructed.detach()
d['frame1'] = frame1
d['frame2'] = frame2
d['transformed_template'] = transformed_template.sum(1, keepdims=True).detach()
return d
| 47.716216
| 120
| 0.674313
|
4263ff9b6ea927b8cd514dec7d0b399efcf248c0
| 4,345
|
py
|
Python
|
L1Trigger/L1TMuon/python/simDigis_cff.py
|
p2l1pfp/cmssw
|
9bda22bf33ecf18dd19a3af2b3a8cbdb1de556a9
|
[
"Apache-2.0"
] | 2
|
2018-06-01T05:18:55.000Z
|
2021-04-08T21:44:06.000Z
|
L1Trigger/L1TMuon/python/simDigis_cff.py
|
p2l1pfp/cmssw
|
9bda22bf33ecf18dd19a3af2b3a8cbdb1de556a9
|
[
"Apache-2.0"
] | 26
|
2018-10-30T12:47:58.000Z
|
2022-03-29T08:39:00.000Z
|
L1Trigger/L1TMuon/python/simDigis_cff.py
|
p2l1pfp/cmssw
|
9bda22bf33ecf18dd19a3af2b3a8cbdb1de556a9
|
[
"Apache-2.0"
] | null | null | null |
import FWCore.ParameterSet.Config as cms
import sys
#
# Legacy L1 Muon modules still running in 2016 trigger:
#
# - DT TP emulator
from L1Trigger.DTTrigger.dtTriggerPrimitiveDigis_cfi import *
import L1Trigger.DTTrigger.dtTriggerPrimitiveDigis_cfi
simDtTriggerPrimitiveDigis = L1Trigger.DTTrigger.dtTriggerPrimitiveDigis_cfi.dtTriggerPrimitiveDigis.clone(
digiTag = 'simMuonDTDigis'
)
#simDtTriggerPrimitiveDigis.debug = cms.untracked.bool(True)
# - CSC TP emulator
from L1Trigger.CSCCommonTrigger.CSCCommonTrigger_cfi import *
import L1Trigger.CSCTriggerPrimitives.cscTriggerPrimitiveDigis_cfi
simCscTriggerPrimitiveDigis = L1Trigger.CSCTriggerPrimitives.cscTriggerPrimitiveDigis_cfi.cscTriggerPrimitiveDigis.clone(
CSCComparatorDigiProducer = 'simMuonCSCDigis:MuonCSCComparatorDigi',
CSCWireDigiProducer = 'simMuonCSCDigis:MuonCSCWireDigi'
)
SimL1TMuonCommonTask = cms.Task(simDtTriggerPrimitiveDigis, simCscTriggerPrimitiveDigis)
SimL1TMuonCommon = cms.Sequence(SimL1TMuonCommonTask)
#
# Legacy Trigger:
#
#
# - CSC Track Finder emulator
#
import L1Trigger.CSCTrackFinder.csctfTrackDigis_cfi
simCsctfTrackDigis = L1Trigger.CSCTrackFinder.csctfTrackDigis_cfi.csctfTrackDigis.clone(
SectorReceiverInput = 'simCscTriggerPrimitiveDigis:MPCSORTED',
DTproducer = 'simDtTriggerPrimitiveDigis'
)
import L1Trigger.CSCTrackFinder.csctfDigis_cfi
simCsctfDigis = L1Trigger.CSCTrackFinder.csctfDigis_cfi.csctfDigis.clone(
CSCTrackProducer = 'simCsctfTrackDigis'
)
#
# - DT Track Finder emulator
#
import L1Trigger.DTTrackFinder.dttfDigis_cfi
simDttfDigis = L1Trigger.DTTrackFinder.dttfDigis_cfi.dttfDigis.clone(
DTDigi_Source = 'simDtTriggerPrimitiveDigis',
CSCStub_Source = 'simCsctfTrackDigis'
)
#
# - RPC PAC Trigger emulator
#
from L1Trigger.RPCTrigger.rpcTriggerDigis_cff import *
simRpcTriggerDigis = L1Trigger.RPCTrigger.rpcTriggerDigis_cff.rpcTriggerDigis.clone(
label = 'simMuonRPCDigis'
)
#
# - Global Muon Trigger emulator
#
import L1Trigger.GlobalMuonTrigger.gmtDigis_cfi
simGmtDigis = L1Trigger.GlobalMuonTrigger.gmtDigis_cfi.gmtDigis.clone(
DTCandidates = 'simDttfDigis:DT',
CSCCandidates = 'simCsctfDigis:CSC',
RPCbCandidates = 'simRpcTriggerDigis:RPCb',
RPCfCandidates = 'simRpcTriggerDigis:RPCf',
# Note: GMT requires input from calorimeter emulators, namely MipIsoData from GCT
MipIsoData = 'simRctDigis'
)
#
#
SimL1TMuonTask = cms.Task(SimL1TMuonCommonTask, simCsctfTrackDigis, simCsctfDigis, simDttfDigis, simRpcTriggerDigis, simGmtDigis)
SimL1TMuon = cms.Sequence(SimL1TMuonTask)
#
# Stage-2 Trigger
#
from L1Trigger.L1TTwinMux.simTwinMuxDigis_cfi import *
from L1Trigger.L1TMuonBarrel.simBmtfDigis_cfi import *
from L1Trigger.L1TMuonBarrel.simKBmtfStubs_cfi import *
from L1Trigger.L1TMuonBarrel.simKBmtfDigis_cfi import *
from L1Trigger.L1TMuonEndCap.simEmtfDigis_cfi import *
from L1Trigger.L1TMuonOverlap.simOmtfDigis_cfi import *
from L1Trigger.L1TMuon.simGmtCaloSumDigis_cfi import *
from L1Trigger.L1TMuon.simGmtStage2Digis_cfi import *
from Configuration.Eras.Modifier_stage2L1Trigger_cff import stage2L1Trigger
#
#
stage2L1Trigger.toReplaceWith(SimL1TMuonTask, cms.Task(SimL1TMuonCommonTask, simTwinMuxDigis, simBmtfDigis, simKBmtfStubs, simKBmtfDigis, simEmtfDigis, simOmtfDigis, simGmtCaloSumDigis, simGmtStage2Digis))
#
# Phase-2 Trigger
#
from L1Trigger.L1TMuonBarrel.simKBmtfStubs_cfi import *
from L1Trigger.L1TMuonBarrel.simKBmtfDigis_cfi import *
from Configuration.Eras.Modifier_phase2_trigger_cff import phase2_trigger
phase2_trigger.toReplaceWith(SimL1TMuonTask, cms.Task(SimL1TMuonCommonTask, simTwinMuxDigis, simBmtfDigis, simKBmtfStubs, simKBmtfDigis, simEmtfDigis, simOmtfDigis, simGmtCaloSumDigis, simGmtStage2Digis))
## GEM TPs
from L1Trigger.L1TGEM.simGEMDigis_cff import *
_run3_SimL1TMuonTask = SimL1TMuonTask.copy()
_run3_SimL1TMuonTask.add(simMuonGEMPadTask)
from Configuration.Eras.Modifier_run3_GEM_cff import run3_GEM
(stage2L1Trigger & run3_GEM).toReplaceWith( SimL1TMuonTask, _run3_SimL1TMuonTask )
## ME0 TPs
from L1Trigger.L1TGEM.me0TriggerDigis_cff import *
_phase2_SimL1TMuonTask = SimL1TMuonTask.copy()
_phase2_SimL1TMuonTask.add(me0TriggerAllDigiTask)
from Configuration.Eras.Modifier_phase2_muon_cff import phase2_muon
(stage2L1Trigger & phase2_muon).toReplaceWith( SimL1TMuonTask, _phase2_SimL1TMuonTask )
| 38.794643
| 205
| 0.837514
|
9e0f085bd6416b972a9fb9c29e723349c11aa84e
| 5,642
|
py
|
Python
|
datamodel_code_generator/model/pydantic/base_model.py
|
reb00ter/datamodel-code-generator
|
ac2078b674b6880815581c634c5a321839eb8ab7
|
[
"MIT"
] | null | null | null |
datamodel_code_generator/model/pydantic/base_model.py
|
reb00ter/datamodel-code-generator
|
ac2078b674b6880815581c634c5a321839eb8ab7
|
[
"MIT"
] | null | null | null |
datamodel_code_generator/model/pydantic/base_model.py
|
reb00ter/datamodel-code-generator
|
ac2078b674b6880815581c634c5a321839eb8ab7
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from typing import Any, ClassVar, DefaultDict, Dict, List, Optional, Set, Tuple
from pydantic import Field
from datamodel_code_generator.imports import Import
from datamodel_code_generator.model import (
ConstraintsBase,
DataModel,
DataModelFieldBase,
)
from datamodel_code_generator.model.pydantic.imports import IMPORT_EXTRA, IMPORT_FIELD
from datamodel_code_generator.reference import Reference
from datamodel_code_generator.types import chain_as_tuple
class Constraints(ConstraintsBase):
gt: Optional[float] = Field(None, alias='exclusiveMinimum')
ge: Optional[float] = Field(None, alias='minimum')
lt: Optional[float] = Field(None, alias='exclusiveMaximum')
le: Optional[float] = Field(None, alias='maximum')
multiple_of: Optional[float] = Field(None, alias='multipleOf')
min_items: Optional[int] = Field(None, alias='minItems')
max_items: Optional[int] = Field(None, alias='maxItems')
min_length: Optional[int] = Field(None, alias='minLength')
max_length: Optional[int] = Field(None, alias='maxLength')
regex: Optional[str] = Field(None, alias='pattern')
class DataModelField(DataModelFieldBase):
_EXCLUDE_FIELD_KEYS: ClassVar[Set[str]] = {
'alias',
'default',
'default_factory',
'const',
'gt',
'ge',
'lt',
'le',
'multiple_of',
'min_items',
'max_items',
'min_length',
'max_length',
'regex',
}
constraints: Optional[Constraints] = None
@property
def method(self) -> Optional[str]:
return self.validator
@property
def validator(self) -> Optional[str]:
return None
# TODO refactor this method for other validation logic
# from datamodel_code_generator.model.pydantic import VALIDATOR_TEMPLATE
#
# return VALIDATOR_TEMPLATE.render(
# field_name=self.name, types=','.join([t.type_hint for t in self.data_types])
# )
@property
def field(self) -> Optional[str]:
"""for backwards compatibility"""
result = str(self)
if result == "":
return None
return result
def self_reference(self) -> bool:
return isinstance(self.parent, BaseModel) and self.parent.reference.path in {
d.reference.path for d in self.data_type.all_data_types if d.reference
}
def __str__(self) -> str:
data: Dict[str, Any] = {
k: v for k, v in self.extras.items() if k not in self._EXCLUDE_FIELD_KEYS
}
if self.alias:
data['alias'] = self.alias
if (
self.constraints is not None
and not self.self_reference()
and not self.data_type.strict
):
data = {**data, **self.constraints.dict()}
field_arguments = sorted(
f"{k}={repr(v)}" for k, v in data.items() if v is not None
)
if not field_arguments:
if self.nullable and self.required:
return 'Field(...)' # Field() is for mypy
return ""
value_arg = "..." if self.required else repr(self.default)
kwargs = ",".join(field_arguments)
return f'Field({value_arg}, {kwargs})'
class BaseModel(DataModel):
TEMPLATE_FILE_PATH: ClassVar[str] = 'pydantic/BaseModel.jinja2'
BASE_CLASS: ClassVar[str] = 'pydantic.BaseModel'
def __init__(
self,
*,
reference: Reference,
fields: List[DataModelField],
decorators: Optional[List[str]] = None,
base_classes: Optional[List[Reference]] = None,
custom_base_class: Optional[str] = None,
custom_template_dir: Optional[Path] = None,
extra_template_data: Optional[DefaultDict[str, Any]] = None,
path: Optional[Path] = None,
description: Optional[str] = None,
):
methods: List[str] = [field.method for field in fields if field.method]
super().__init__(
fields=fields, # type: ignore
reference=reference,
decorators=decorators,
base_classes=base_classes,
custom_base_class=custom_base_class,
custom_template_dir=custom_template_dir,
extra_template_data=extra_template_data,
methods=methods,
path=path,
description=description,
)
config_parameters: Dict[str, Any] = {}
additionalProperties = self.extra_template_data.get('additionalProperties')
if additionalProperties is not None:
config_parameters['extra'] = (
'Extra.allow' if additionalProperties else 'Extra.forbid'
)
self._additional_imports.append(IMPORT_EXTRA)
for config_attribute in 'allow_population_by_field_name', 'allow_mutation':
if config_attribute in self.extra_template_data:
config_parameters[config_attribute] = self.extra_template_data[
config_attribute
]
for data_type in self.all_data_types:
if data_type.is_custom_type:
config_parameters['arbitrary_types_allowed'] = True
break
if config_parameters:
from datamodel_code_generator.model.pydantic import Config
self.extra_template_data['config'] = Config.parse_obj(config_parameters)
@property
def imports(self) -> Tuple[Import, ...]:
if any(f for f in self.fields if f.field):
return chain_as_tuple(super().imports, (IMPORT_FIELD,))
return super().imports
| 34.193939
| 90
| 0.625842
|
4caaa878ed7096419e6a87318f1b18045434d2c9
| 1,475
|
py
|
Python
|
datasource/PostgresDataSources.py
|
antoniopenta/geo-rest-service
|
2f515a341277c758e415b0b2ce455fd8eb44c2ee
|
[
"Apache-2.0"
] | null | null | null |
datasource/PostgresDataSources.py
|
antoniopenta/geo-rest-service
|
2f515a341277c758e415b0b2ce455fd8eb44c2ee
|
[
"Apache-2.0"
] | null | null | null |
datasource/PostgresDataSources.py
|
antoniopenta/geo-rest-service
|
2f515a341277c758e415b0b2ce455fd8eb44c2ee
|
[
"Apache-2.0"
] | null | null | null |
import psycopg2
class PostgresDataSources:
def __init__(self, conf):
self.conf = conf
self.conn = None
def create_connection(self):
try:
string_connection = "dbname='%s' user='%s' host='%s' password='%s'" % \
(self.conf.PARAM['DBNAME'], self.conf.PARAM['USER'],
self.conf.PARAM['SERVER_DB'], self.conf.PARAM['PWD'])
if self.conf.PARAM['DEBUG']:
print("String_connection: " + string_connection)
self.conn = psycopg2.connect(string_connection)
except Exception as e:
raise ValueError('The connection is not created '+str(e))
def get_connection(self):
if self.conn is None:
raise ValueError('The connection is not created yet ')
return self.conn
def close_connection(self):
if self.conn is None:
raise ValueError('The connection is not created yet')
try:
self.conn.close()
except Exception as e:
raise ValueError('Connection is not closed properly')
def commit(self):
if self.conn is None:
raise ValueError('The connection is not created yet')
try:
self.conn.commit()
except Exception as e:
raise ValueError('The commit is not done properly '+str(e))
| 32.065217
| 107
| 0.536949
|
7e369ad341f18321bc48a23a64c8f12b9d67a4bd
| 2,152
|
py
|
Python
|
planarity_checking/benchmark.py
|
petrovp/networkx-related
|
ebe7053e032b527ebaa9565f96ba91145de3fd50
|
[
"BSD-3-Clause"
] | 2
|
2018-08-02T15:11:20.000Z
|
2018-09-18T13:20:36.000Z
|
planarity_checking/benchmark.py
|
petrovp/networkx-related
|
ebe7053e032b527ebaa9565f96ba91145de3fd50
|
[
"BSD-3-Clause"
] | 3
|
2018-06-06T15:39:33.000Z
|
2018-06-06T15:40:11.000Z
|
planarity_checking/benchmark.py
|
petrovp/networkx-related
|
ebe7053e032b527ebaa9565f96ba91145de3fd50
|
[
"BSD-3-Clause"
] | 1
|
2018-09-18T13:31:01.000Z
|
2018-09-18T13:31:01.000Z
|
import networkx as nx
import urllib
import tarfile
import os, sys
from timeit import default_timer as timer
links = [
('https://users.dcc.uchile.cl/~jfuentess/datasets/files/g/planar1M.tar.gz', 'planar_embedding1000000.pg'),
('https://users.dcc.uchile.cl/~jfuentess/datasets/files/g/planar5M.tar.gz', 'planar_embedding5000000.pg'),
('https://users.dcc.uchile.cl/~jfuentess/datasets/files/g/planar10M.tar.gz', 'planar_embedding10000000.pg'),
('https://users.dcc.uchile.cl/~jfuentess/datasets/files/g/planar15M.tar.gz', 'planar_embedding15000000.pg'),
('https://users.dcc.uchile.cl/~jfuentess/datasets/files/g/planar20M.tar.gz', 'planar_embedding20000000.pg'),
('https://users.dcc.uchile.cl/~jfuentess/datasets/files/g/planar25M.tar.gz', 'planar_embedding25000000.pg'),
('https://users.dcc.uchile.cl/~jfuentess/datasets/files/g/worldcities.tar.gz', 'worldcitiespop.pg')
]
graph_num = int(sys.argv[1])
graph_link = links[graph_num][0]
graph_tar = graph_link.split("/")[-1]
graph_file = links[graph_num][1]
print("Starting check on planar graph: " + graph_file)
time_start = timer()
if not os.path.isfile(graph_file):
if not os.path.isfile(graph_tar):
urllib.urlretrieve(graph_link, graph_tar)
tar = tarfile.open(graph_tar)
tar.extractall()
tar.close()
os.remove(graph_tar)
time_download_finished = timer()
print("Download time: {}".format(time_download_finished - time_start))
G = nx.read_edgelist(graph_file)
time_graph_import_finished = timer()
print("Graph successfully loaded. Graph import time: {}".format(time_graph_import_finished- time_download_finished))
res, embedding = nx.check_planarity(G)
time_planarity_check_finished = timer()
print("Planarity check time: {}".format(time_planarity_check_finished - time_graph_import_finished))
if not res:
print("The check returned 'non planar'.")
exit()
embedding.check_structure()
time_structure_check_finished = timer()
print("Everything is correct. Structure check time: {}".format(time_structure_check_finished - time_planarity_check_finished))
print("Complete time: {}".format(time_structure_check_finished - time_start))
| 38.428571
| 126
| 0.754647
|
3c9f81c41a55102d014fb2c5f81cf47a4b15cf45
| 1,361
|
py
|
Python
|
model-optimizer/extensions/middle/AttributedTileNormalizer.py
|
monroid/openvino
|
8272b3857ef5be0aaa8abbf7bd0d5d5615dc40b6
|
[
"Apache-2.0"
] | 2,406
|
2020-04-22T15:47:54.000Z
|
2022-03-31T10:27:37.000Z
|
model-optimizer/extensions/middle/AttributedTileNormalizer.py
|
thomas-yanxin/openvino
|
031e998a15ec738c64cc2379d7f30fb73087c272
|
[
"Apache-2.0"
] | 4,948
|
2020-04-22T15:12:39.000Z
|
2022-03-31T18:45:42.000Z
|
model-optimizer/extensions/middle/AttributedTileNormalizer.py
|
thomas-yanxin/openvino
|
031e998a15ec738c64cc2379d7f30fb73087c272
|
[
"Apache-2.0"
] | 991
|
2020-04-23T18:21:09.000Z
|
2022-03-31T18:40:57.000Z
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from mo.front.common.partial_infer.utils import int64_array
from mo.graph.graph import Graph
from mo.middle.replacement import MiddleReplacementPattern
from mo.ops.const import Const
from mo.ops.tile import Tile
class AttributedTileNormalizer(MiddleReplacementPattern):
enabled = True
@staticmethod
def pattern():
return dict(
nodes=[
('tile', dict(op='AttributedTile', axis=lambda x: x is not None, tiles=lambda x: x is not None))],
edges=[]
)
def replace_pattern(self, graph: Graph, match: dict):
node = match['tile']
name = node.soft_get('name', node.id)
axis = node.axis
tiles = node.tiles
input_shape = node.in_port(0).data.get_shape()
assert input_shape is not None
tiles_input_value = int64_array(np.ones(input_shape.size))
tiles_input_value[axis] = tiles
const = Const(graph, {'value': tiles_input_value, 'name': name + '/tiles'}).create_node()
tile = Tile(graph, {'name': name}).create_node()
node.out_port(0).get_connection().set_source(tile.out_port(0))
node.in_port(0).get_connection().set_destination(tile.in_port(0))
const.out_port(0).connect(tile.in_port(1))
| 32.404762
| 114
| 0.663483
|
a9fe9b8f8426f6f2d597cafacc7d8592b9a43fc9
| 2,317
|
py
|
Python
|
dev-tools/travis/print-errors-from-test-reports.py
|
kalimfaria/storm
|
4af88950ef86ce1cd74b2513e0784d0a05d3197c
|
[
"Apache-2.0"
] | null | null | null |
dev-tools/travis/print-errors-from-test-reports.py
|
kalimfaria/storm
|
4af88950ef86ce1cd74b2513e0784d0a05d3197c
|
[
"Apache-2.0"
] | null | null | null |
dev-tools/travis/print-errors-from-test-reports.py
|
kalimfaria/storm
|
4af88950ef86ce1cd74b2513e0784d0a05d3197c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import glob
from xml.etree.ElementTree import ElementTree
def print_detail_information(testcase, fail_or_error):
print "-" * 50
print "classname: %s / testname: %s" % (testcase.get("classname"), testcase.get("name"))
print fail_or_error.text
stdout = testcase.find("system-out")
if stdout != None:
print "-" * 20, "system-out", "-"*20
print stdout.text
stderr = testcase.find("system-err")
if stderr != None:
print "-" * 20, "system-err", "-"*20
print stderr.text
print "-" * 50
def print_error_reports_from_report_file(file_path):
tree = ElementTree()
try:
tree.parse(file_path)
except:
print "-" * 50
print "Error parsing %s"%file_path
f = open(file_path, "r");
print f.read();
print "-" * 50
return
testcases = tree.findall(".//testcase")
for testcase in testcases:
error = testcase.find("error")
if error is not None:
print_detail_information(testcase, error)
fail = testcase.find("fail")
if fail is not None:
print_detail_information(testcase, fail)
def main(report_dir_path):
for test_report in glob.iglob(report_dir_path + '/*.xml'):
file_path = os.path.abspath(test_report)
try:
print "Checking %s" % test_report
print_error_reports_from_report_file(file_path)
except Exception, e:
print "Error while reading report file, %s" % file_path
print "Exception: %s" % e
if __name__ == "__main__":
if sys.argv < 2:
print "Usage: %s [report dir path]" % sys.argv[0]
sys.exit(1)
main(sys.argv[1])
| 30.893333
| 92
| 0.636599
|
993283fa5324bafdc155122f26795f1dfff7662e
| 1,771
|
py
|
Python
|
setup.py
|
TobiasSchaffner/pymarkdownlint
|
19d766877998fb75494bc5847f4c7ae287e86684
|
[
"MIT"
] | 1
|
2021-05-18T18:58:53.000Z
|
2021-05-18T18:58:53.000Z
|
setup.py
|
TobiasSchaffner/pymarkdownlint
|
19d766877998fb75494bc5847f4c7ae287e86684
|
[
"MIT"
] | null | null | null |
setup.py
|
TobiasSchaffner/pymarkdownlint
|
19d766877998fb75494bc5847f4c7ae287e86684
|
[
"MIT"
] | 1
|
2021-05-18T19:01:17.000Z
|
2021-05-18T19:01:17.000Z
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import re
import os
# There is an issue with building python packages in a shared vagrant directory
# because of how setuptools works in python < 2.7.9. We solve this by deleting
# the filesystem hardlinking capability during build.
# See: http://stackoverflow.com/a/22147112/381010
del os.link
long_description = (
"Markdown linter written in python. Under active development."
"Source code: https://github.com/jorisroovers/pymarkdownlint"
)
# shamelessly stolen from mkdocs' setup.py:
# https://github.com/mkdocs/mkdocs/blob/master/setup.py
def get_version(package):
"""Return package version as listed in `__version__` in `init.py`."""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
setup(
name="pymarkdownlint",
version=get_version("pymarkdownlint"),
description="Markdown linter written in python. Under active development.",
long_description=long_description,
classifiers=[
"Development Status :: 3 - Alpha",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License"
],
install_requires=[
'bs4==0.0.1',
'click==7.1.2',
'Markdown==3.3.4',
],
keywords='markdown markdownlint pymarkdownlint',
author='Cheuk Yin Ng',
url='https://github.com/cheukyin699/pymarkdownlint',
license='MIT',
packages=find_packages(exclude=["examples"]),
entry_points={
"console_scripts": [
"markdownlint = pymarkdownlint.cli:cli",
],
},
)
| 32.2
| 79
| 0.667984
|
ba84411084cd8b1762bf4b25ec90eb5627161379
| 2,003
|
py
|
Python
|
flask_app/public/views.py
|
kcarwile/flask-app
|
630620e2e76c0a0ffeb06d6ef6f8cd6ca701c33e
|
[
"MIT"
] | null | null | null |
flask_app/public/views.py
|
kcarwile/flask-app
|
630620e2e76c0a0ffeb06d6ef6f8cd6ca701c33e
|
[
"MIT"
] | null | null | null |
flask_app/public/views.py
|
kcarwile/flask-app
|
630620e2e76c0a0ffeb06d6ef6f8cd6ca701c33e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Public section, including homepage and signup."""
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask_login import login_required, login_user, logout_user
from flask_app.extensions import login_manager
from flask_app.public.forms import LoginForm
from flask_app.user.forms import RegisterForm
from flask_app.user.models import User
from flask_app.utils import flash_errors
blueprint = Blueprint('public', __name__, static_folder='../static')
@login_manager.user_loader
def load_user(user_id):
"""Load user by ID."""
return User.get_by_id(int(user_id))
@blueprint.route('/', methods=['GET', 'POST'])
def home():
"""Home page."""
form = LoginForm(request.form)
# Handle logging in
if request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash('You are logged in.', 'success')
redirect_url = request.args.get('next') or url_for('user.members')
return redirect(redirect_url)
else:
flash_errors(form)
return render_template('public/home.html', form=form)
@blueprint.route('/logout/')
@login_required
def logout():
"""Logout."""
logout_user()
flash('You are logged out.', 'info')
return redirect(url_for('public.home'))
@blueprint.route('/register/', methods=['GET', 'POST'])
def register():
"""Register new user."""
form = RegisterForm(request.form)
if form.validate_on_submit():
User.create(username=form.username.data, email=form.email.data, password=form.password.data, active=True)
flash('Thank you for registering. You can now log in.', 'success')
return redirect(url_for('public.home'))
else:
flash_errors(form)
return render_template('public/register.html', form=form)
@blueprint.route('/about/')
def about():
"""About page."""
form = LoginForm(request.form)
return render_template('public/about.html', form=form)
| 31.296875
| 113
| 0.682976
|
b9453365893224dc83ee23d87e3748800c3f34f4
| 1,113
|
py
|
Python
|
_doing/foobar/001-re-id/solution.py
|
demohack/yute
|
2fb136118733394e3595bf707cb32f1b7b2aede0
|
[
"MIT"
] | null | null | null |
_doing/foobar/001-re-id/solution.py
|
demohack/yute
|
2fb136118733394e3595bf707cb32f1b7b2aede0
|
[
"MIT"
] | 17
|
2021-03-24T14:59:50.000Z
|
2022-03-05T23:52:31.000Z
|
_doing/foobar/001-re-id/solution.py
|
demohack/yute
|
2fb136118733394e3595bf707cb32f1b7b2aede0
|
[
"MIT"
] | null | null | null |
def solution(i):
# i = the starting index n of Lambda's string of all primes
# returns the next five digits in the string
# build prime string until size of string >= i + 5
g = gen_primes()
l = 0
lz = i + 5
p0 = 0
p1 = ""
p2 = "" # 2357111317192329
while l < lz:
p0 = next(g)
p1 = str(p0)
p2 = p2 + p1
l += len(p1)
print({'p0': p0, 'p1': p1, 'p': p2, 'l': l, 'lz': lz})
return str(p2[i:lz])
# Sieve of Eratosthenes
# Code by David Eppstein, UC Irvine, 28 Feb 2002
# http://code.activestate.com/recipes/117119/
def gen_primes():
""" Generate an infinite sequence of prime numbers.
"""
D = {}
# The running integer that's checked for primeness
q = 2
while True:
if q not in D:
# q is a new prime.
yield q
D[q * q] = [q]
else:
# q is composite. D[q] is the list of primes that
# divide it.
for p in D[q]:
D.setdefault(p + q, []).append(p)
del D[q]
q += 1
| 23.680851
| 63
| 0.489668
|
c550330ac95f9d6ed32e6cf90dddb701d635d81b
| 68,590
|
py
|
Python
|
packages/dcos-integration-test/extra/test_metrics.py
|
timgates42/dcos
|
9d7e4d65b797d67fcd32c626f8026f28d2dece90
|
[
"Apache-2.0"
] | 2,577
|
2016-04-19T09:57:39.000Z
|
2022-03-17T10:34:25.000Z
|
packages/dcos-integration-test/extra/test_metrics.py
|
timgates42/dcos
|
9d7e4d65b797d67fcd32c626f8026f28d2dece90
|
[
"Apache-2.0"
] | 7,410
|
2016-04-19T21:19:31.000Z
|
2022-01-21T20:14:21.000Z
|
packages/dcos-integration-test/extra/test_metrics.py
|
timgates42/dcos
|
9d7e4d65b797d67fcd32c626f8026f28d2dece90
|
[
"Apache-2.0"
] | 625
|
2016-04-19T10:09:35.000Z
|
2022-03-16T10:53:45.000Z
|
import contextlib
import copy
import logging
import re
import sys
import uuid
from typing import Any, Generator
import pytest
import retrying
from dcos_test_utils.dcos_api import DcosApiSession
from prometheus_client.parser import text_string_to_metric_families
from test_helpers import get_expanded_config
__maintainer__ = 'philipnrmn'
__contact__ = 'dcos-cluster-ops@mesosphere.io'
DEPLOY_TIMEOUT = 2 * 60
METRICS_WAITTIME = 4 * 60 * 1000
METRICS_INTERVAL = 2 * 1000
STD_WAITTIME = 15 * 60 * 1000
STD_INTERVAL = 5 * 1000
# tags added if a fault domain is present
FAULT_DOMAIN_TAGS = {'fault_domain_zone', 'fault_domain_region'}
def check_tags(tags: dict, required_tag_names: set, optional_tag_names: set = set()) -> None:
"""Assert that tags contains only expected keys with nonempty values."""
keys = set(tags.keys())
assert keys & required_tag_names == required_tag_names, 'Not all required tags were set'
assert keys - required_tag_names - optional_tag_names == set(), 'Encountered unexpected tags'
for tag_name, tag_val in tags.items():
assert tag_val != '', 'Value for tag "{}" must not be empty'.format(tag_name)
def test_metrics_ping(dcos_api_session: DcosApiSession) -> None:
""" Test that the dcos-metrics service is up on master and agents."""
nodes = get_master_and_agents(dcos_api_session)
for node in nodes:
response = dcos_api_session.metrics.get('/ping', node=node)
assert response.status_code == 200, 'Status code: {}, Content {}'.format(
response.status_code, response.content)
assert response.json()['ok'], 'Status code: {}, Content {}'.format(response.status_code, response.content)
def test_metrics_agents_prom(dcos_api_session: DcosApiSession) -> None:
"""Telegraf Prometheus endpoint is reachable on master and agents."""
nodes = get_master_and_agents(dcos_api_session)
for node in nodes:
response = dcos_api_session.session.request('GET', 'http://' + node + ':61091/metrics')
assert response.status_code == 200, 'Status code: {}'.format(response.status_code)
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def get_metrics_prom(dcos_api_session: DcosApiSession, node: str) -> Any:
"""Gets metrics from prometheus port on node and returns the response.
Retries on non-200 status for up to 300 seconds.
"""
response = dcos_api_session.session.request(
'GET', 'http://{}:61091/metrics'.format(node))
assert response.status_code == 200, 'Status code: {}'.format(response.status_code)
return response
def test_metrics_procstat(dcos_api_session: DcosApiSession) -> None:
"""Assert that procstat metrics are present on master and agent nodes."""
nodes = get_master_and_agents(dcos_api_session)
for node in nodes:
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def check_procstat_metrics() -> None:
response = get_metrics_prom(dcos_api_session, node)
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if sample[0] == 'procstat_lookup_pid_count':
return
raise Exception('Expected Procstat procstat_lookup_pid_count metric not found')
check_procstat_metrics()
def test_metrics_agents_mesos(dcos_api_session: DcosApiSession) -> None:
"""Assert that mesos metrics on agents are present."""
nodes = get_agents(dcos_api_session)
for node in nodes:
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def check_mesos_metrics() -> None:
response = get_metrics_prom(dcos_api_session, node)
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if sample[0] == 'mesos_slave_uptime_secs':
return
raise Exception('Expected Mesos mesos_slave_uptime_secs metric not found')
check_mesos_metrics()
def test_metrics_master_mesos(dcos_api_session: DcosApiSession) -> None:
"""Assert that mesos metrics on master are present."""
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def check_mesos_metrics() -> None:
response = get_metrics_prom(dcos_api_session, dcos_api_session.masters[0])
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if sample[0] == 'mesos_master_uptime_secs':
return
raise Exception('Expected Mesos mesos_master_uptime_secs metric not found')
check_mesos_metrics()
def test_metrics_agents_mesos_overlay(dcos_api_session: DcosApiSession) -> None:
"""Assert that mesos agent overlay module metrics on master and agents are present."""
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def _check_mesos_overlay_metrics() -> None:
response = get_metrics_prom(dcos_api_session, node)
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if sample[0] == 'mesos_overlay_slave_registering':
return
raise Exception('Expected Mesos mesos_overlay_slave_registering metric not found')
nodes = get_master_and_agents(dcos_api_session)
for node in nodes:
_check_mesos_overlay_metrics()
def test_metrics_master_mesos_overlay(dcos_api_session: DcosApiSession) -> None:
"""Assert that mesos overlay module metrics on master are present."""
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def _check_mesos_overlay_metrics() -> None:
response = get_metrics_prom(dcos_api_session, dcos_api_session.masters[0])
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if sample[0] == 'mesos_overlay_master_process_restarts':
return
raise Exception('Expected Mesos mesos_overlay_master_process_restarts metric not found')
_check_mesos_overlay_metrics()
def test_metrics_master_zookeeper(dcos_api_session: DcosApiSession) -> None:
"""Assert that ZooKeeper metrics on master are present."""
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def check_zookeeper_metrics() -> None:
response = get_metrics_prom(dcos_api_session, dcos_api_session.masters[0])
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if sample[0] == 'zookeeper_avg_latency':
assert sample[1]['dcos_component_name'] == 'ZooKeeper'
return
raise Exception('Expected ZooKeeper zookeeper_avg_latency metric not found')
check_zookeeper_metrics()
def test_metrics_master_cockroachdb(dcos_api_session: DcosApiSession) -> None:
"""Assert that CockroachDB metrics on master are present."""
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def check_cockroachdb_metrics() -> None:
response = get_metrics_prom(dcos_api_session, dcos_api_session.masters[0])
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if sample[0] == 'ranges_underreplicated':
assert sample[1]['dcos_component_name'] == 'CockroachDB'
return
raise Exception('Expected CockroachDB ranges_underreplicated metric not found')
check_cockroachdb_metrics()
def test_metrics_master_etcd(dcos_api_session: DcosApiSession) -> None:
"""Assert that DC/OS etcd metrics on master are present."""
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def _check_etcd_metrics() -> None:
response = get_metrics_prom(dcos_api_session, dcos_api_session.masters[0])
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if sample[0].startswith('etcd_') and sample[1].get('dcos_component_name') == 'etcd':
return
raise Exception('Expected DC/OS etcd etcd_* metric on master nodes not found')
_check_etcd_metrics()
def test_metrics_master_calico(dcos_api_session: DcosApiSession) -> None:
"""Assert that DC/OS Calico metrics on master are present."""
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def _check_calico_metrics() -> None:
response = get_metrics_prom(dcos_api_session, dcos_api_session.masters[0])
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if sample[0].startswith('felix') and sample[1].get('dcos_component_name') == 'DC/OS Calico':
return
raise Exception('Expected DC/OS Calico felix* metric on master nodes not found')
_check_calico_metrics()
def test_metrics_agents_calico(dcos_api_session: DcosApiSession) -> None:
"""Assert that DC/OS Calico metrics on agents are present."""
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def _check_calico_metrics() -> None:
response = get_metrics_prom(dcos_api_session, node)
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if sample[0].startswith('felix') and sample[1].get('dcos_component_name') == 'DC/OS Calico':
return
raise Exception('Expected DC/OS Calico felix* metric on agent nodes not found')
nodes = get_agents(dcos_api_session)
for node in nodes:
_check_calico_metrics()
def test_metrics_master_adminrouter_nginx_vts(dcos_api_session: DcosApiSession) -> None:
"""Assert that Admin Router Nginx VTS metrics on master are present."""
@retrying.retry(
wait_fixed=STD_INTERVAL,
stop_max_delay=METRICS_WAITTIME,
retry_on_exception=lambda e: isinstance(e, AssertionError)
)
def check_adminrouter_metrics() -> None:
response = get_metrics_prom(dcos_api_session, dcos_api_session.masters[0])
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if sample[0].startswith('nginx_vts_') and sample[1].get('dcos_component_name') == 'Admin Router':
return
raise AssertionError('Expected Admin Router nginx_vts_* metrics not found')
check_adminrouter_metrics()
def test_metrics_master_exhibitor_status(dcos_api_session: DcosApiSession) -> None:
"""Assert that Exhibitor status metrics on master are present."""
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def check_exhibitor_metrics() -> None:
response = get_metrics_prom(dcos_api_session, dcos_api_session.masters[0])
expected_metrics = {'exhibitor_status_code', 'exhibitor_status_isleader'}
samples = []
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if sample[0] in expected_metrics:
samples.append(sample)
reported_metrics = {sample[0] for sample in samples}
assert reported_metrics == expected_metrics, (
'Expected Exhibitor status metrics not found. '
'Expected: {} Reported: {}'.format(
expected_metrics, reported_metrics,
)
)
for sample in samples:
assert sample[1]['dcos_component_name'] == 'Exhibitor'
assert 'url' not in sample[1]
assert 'exhibitor_address' in sample[1]
check_exhibitor_metrics()
def _nginx_vts_measurement_basename(name: str) -> str:
"""
Extracts the base name of the metric reported by nginx vts filter module
and removes the metric suffix.
E.g.: nginx_server_status_request_bytes -> nginx_server_status
"""
return '_'.join(name.split('_')[:3])
def test_metrics_master_adminrouter_nginx_drop_requests_seconds(dcos_api_session: DcosApiSession) -> None:
"""
nginx_vts_*_request_seconds* metrics are not present.
"""
node = dcos_api_session.masters[0]
# Make request to a fine-grained metrics annotated upstream of
# Admin Router (IAM in this case).
dcos_api_session.get('/acs/api/v1/auth/jwks', host=node)
@retrying.retry(
wait_fixed=STD_INTERVAL,
stop_max_delay=METRICS_WAITTIME,
retry_on_exception=lambda e: isinstance(e, AssertionError)
)
def check_adminrouter_metrics() -> None:
vts_metrics_count = 0
response = get_metrics_prom(dcos_api_session, node)
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
match = re.match(r'^nginx_vts_.+_request_seconds.*$', sample[0])
assert match is None
# We assert the validity of the test here by confirming that
# VTS reported metrics have been scraped by telegraf.
if sample[0].startswith('nginx_vts_'):
vts_metrics_count += 1
assert vts_metrics_count > 0
check_adminrouter_metrics()
def test_metrics_agent_adminrouter_nginx_drop_requests_seconds(dcos_api_session: DcosApiSession) -> None:
"""
nginx_vts_*_request_seconds* metrics are not present.
"""
# Make request to Admin Router on every agent to ensure metrics.
state_response = dcos_api_session.get('/state', host=dcos_api_session.masters[0], port=5050)
assert state_response.status_code == 200
state = state_response.json()
for agent in state['slaves']:
agent_url = '/system/v1/agent/{}/dcos-metadata/dcos-version.json'.format(agent['id'])
response = dcos_api_session.get(agent_url)
assert response.status_code == 200
nodes = get_agents(dcos_api_session)
for node in nodes:
@retrying.retry(
wait_fixed=STD_INTERVAL,
stop_max_delay=METRICS_WAITTIME,
retry_on_exception=lambda e: isinstance(e, AssertionError)
)
def check_adminrouter_metrics() -> None:
vts_metrics_count = 0
response = get_metrics_prom(dcos_api_session, node)
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
match = re.match(r'^nginx_vts_.+_request_seconds.*$', sample[0])
assert match is None
# We assert the validity of the test here by confirming that
# VTS reported metrics have been scraped by telegraf.
if sample[0].startswith('nginx_vts_'):
vts_metrics_count += 1
assert vts_metrics_count > 0
check_adminrouter_metrics()
def test_metrics_master_adminrouter_nginx_vts_processor(dcos_api_session: DcosApiSession) -> None:
"""Assert that processed Admin Router metrics on master are present."""
node = dcos_api_session.masters[0]
# Make request to a fine-grained metrics annotated upstream of
# Admin Router (IAM in this case).
r = dcos_api_session.get('/acs/api/v1/auth/jwks', host=node)
assert r.status_code == 200
# Accessing /service/marathon/v2/queue via Admin Router will cause
# Telegraf to emit nginx_service_backend and nginx_service_status metrics.
r = dcos_api_session.get('/service/marathon/v2/queue', host=node)
assert r.status_code == 200
@retrying.retry(
wait_fixed=STD_INTERVAL,
stop_max_delay=METRICS_WAITTIME,
retry_on_exception=lambda e: isinstance(e, AssertionError)
)
def check_adminrouter_metrics() -> None:
measurements = set()
expect_dropped = set([
'nginx_vts_filter',
'nginx_vts_upstream',
'nginx_vts_server',
])
unexpected_samples = []
response = get_metrics_prom(dcos_api_session, node)
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if sample[0].startswith('nginx_') and sample[1].get('dcos_component_name') == 'Admin Router':
basename = _nginx_vts_measurement_basename(sample[0])
measurements.add(basename)
if basename in expect_dropped:
unexpected_samples.append(sample)
assert unexpected_samples == []
expected = set([
'nginx_server_status',
'nginx_upstream_status',
'nginx_upstream_backend',
'nginx_service_backend',
'nginx_service_status',
])
difference = expected - measurements
assert not difference
remainders = expect_dropped & measurements
assert not remainders
check_adminrouter_metrics()
def test_metrics_agents_adminrouter_nginx_vts(dcos_api_session: DcosApiSession) -> None:
"""Assert that Admin Router Nginx VTS metrics on agents are present."""
nodes = get_agents(dcos_api_session)
for node in nodes:
@retrying.retry(
wait_fixed=STD_INTERVAL,
stop_max_delay=METRICS_WAITTIME,
retry_on_exception=lambda e: isinstance(e, AssertionError)
)
def check_adminrouter_metrics() -> None:
response = get_metrics_prom(dcos_api_session, node)
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if (
sample[0].startswith('nginx_vts_') and
sample[1].get('dcos_component_name') == 'Admin Router Agent'
):
return
raise AssertionError('Expected Admin Router nginx_vts_* metrics not found')
check_adminrouter_metrics()
def test_metrics_agent_adminrouter_nginx_vts_processor(dcos_api_session: DcosApiSession) -> None:
"""Assert that processed Admin Router metrics on agent are present."""
# Make request to Admin Router on every agent to ensure metrics.
state_response = dcos_api_session.get('/state', host=dcos_api_session.masters[0], port=5050)
assert state_response.status_code == 200
state = state_response.json()
for agent in state['slaves']:
agent_url = '/system/v1/agent/{}/dcos-metadata/dcos-version.json'.format(agent['id'])
response = dcos_api_session.get(agent_url)
assert response.status_code == 200
nodes = get_agents(dcos_api_session)
for node in nodes:
@retrying.retry(
wait_fixed=STD_INTERVAL,
stop_max_delay=METRICS_WAITTIME,
retry_on_exception=lambda e: isinstance(e, AssertionError)
)
def check_adminrouter_metrics() -> None:
measurements = set()
expect_dropped = set([
'nginx_vts_filter',
'nginx_vts_upstream',
'nginx_vts_server',
])
unexpected_samples = []
response = get_metrics_prom(dcos_api_session, node)
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if sample[0].startswith('nginx_') and sample[1].get('dcos_component_name') == 'Admin Router Agent':
basename = _nginx_vts_measurement_basename(sample[0])
measurements.add(basename)
if basename in expect_dropped:
unexpected_samples.append(sample)
assert unexpected_samples == []
expected = set([
'nginx_server_status',
])
difference = expected - measurements
assert not difference
remainders = expect_dropped & measurements
assert not remainders
check_adminrouter_metrics()
def test_metrics_diagnostics(dcos_api_session: DcosApiSession) -> None:
"""Assert that DC/OS Diagnostics metrics on master are present."""
nodes = get_master_and_agents(dcos_api_session)
for node in nodes:
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def check_diagnostics_metrics() -> None:
response = get_metrics_prom(dcos_api_session, node)
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if sample[1]['dcos_component_name'] == 'DC/OS Diagnostics':
return
raise Exception('Expected DC/OS Diagnostics metrics not found')
check_diagnostics_metrics()
def test_metrics_fluentbit(dcos_api_session: DcosApiSession) -> None:
"""Ensure that fluent bit metrics are present on masters and agents"""
nodes = get_master_and_agents(dcos_api_session)
for node in nodes:
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def check_fluentbit_metrics() -> None:
response = get_metrics_prom(dcos_api_session, node)
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if sample[0].startswith('fluentbit_output_errors_total'):
assert sample[1]['dcos_component_name'] == 'DC/OS Fluent Bit'
return
raise Exception('Expected DC/OS Fluent Bit metrics not found')
check_fluentbit_metrics()
def check_statsd_app_metrics(dcos_api_session: DcosApiSession, marathon_app: Any, node: str, expected_metrics: Any
) -> None:
with dcos_api_session.marathon.deploy_and_cleanup(marathon_app, check_health=False, timeout=DEPLOY_TIMEOUT):
endpoints = dcos_api_session.marathon.get_app_service_endpoints(marathon_app['id'])
assert len(endpoints) == 1, 'The marathon app should have been deployed exactly once.'
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def check_statsd_metrics() -> None:
expected_copy = copy.deepcopy(expected_metrics)
response = get_metrics_prom(dcos_api_session, node)
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if sample[0] in expected_copy:
val = expected_copy.pop(sample[0])
assert sample[2] == val
if len(expected_copy) == 0:
return
sys.stderr.write(
"%r\n%r\n" % (
expected_metrics,
expected_copy,
)
)
raise Exception('Expected statsd metrics not found')
check_statsd_metrics()
def test_metrics_agent_statsd(dcos_api_session: DcosApiSession) -> None:
"""Assert that statsd metrics on private agent are present."""
task_name = 'test-metrics-statsd-app'
metric_name_pfx = 'test_metrics_statsd_app'
marathon_app = {
'id': '/' + task_name,
'instances': 1,
'cpus': 0.1,
'mem': 128,
'env': {
'STATIC_STATSD_UDP_PORT': '61825',
'STATIC_STATSD_UDP_HOST': 'localhost'
},
'cmd': '\n'.join([
'echo "Sending metrics to $STATIC_STATSD_UDP_HOST:$STATIC_STATSD_UDP_PORT"',
'echo "Sending gauge"',
'echo "{}.gauge:100|g" | nc -w 1 -u $STATIC_STATSD_UDP_HOST $STATIC_STATSD_UDP_PORT'.format(
metric_name_pfx),
'echo "Sending counts"',
'echo "{}.count:1|c" | nc -w 1 -u $STATIC_STATSD_UDP_HOST $STATIC_STATSD_UDP_PORT'.format(
metric_name_pfx),
'echo "Sending timings"',
'echo "{}.timing:1|ms" | nc -w 1 -u $STATIC_STATSD_UDP_HOST $STATIC_STATSD_UDP_PORT'.format(
metric_name_pfx),
'echo "Sending histograms"',
'echo "{}.histogram:1|h" | nc -w 1 -u $STATIC_STATSD_UDP_HOST $STATIC_STATSD_UDP_PORT'.format(
metric_name_pfx),
'echo "Done. Sleeping forever."',
'while true; do',
' sleep 1000',
'done',
]),
'container': {
'type': 'MESOS',
# pin image to working version - https://jira.mesosphere.com/browse/DCOS-62478
'docker': {'image': 'library/alpine:3.10.3'}
},
'networks': [{'mode': 'host'}],
}
expected_metrics = {
metric_name_pfx + '_gauge': 100.0,
# NOTE: prometheus_client appends _total to counter-type metrics if they don't already have the suffix
# ref: https://github.com/prometheus/client_python/blob/master/prometheus_client/parser.py#L169
# (the raw prometheus output here omits _total)
metric_name_pfx + '_count_total': 1.0,
metric_name_pfx + '_timing_count': 1.0,
metric_name_pfx + '_histogram_count': 1.0,
}
if dcos_api_session.slaves:
marathon_app['constraints'] = [['hostname', 'LIKE', dcos_api_session.slaves[0]]]
check_statsd_app_metrics(dcos_api_session, marathon_app, dcos_api_session.slaves[0], expected_metrics)
if dcos_api_session.public_slaves:
marathon_app['acceptedResourceRoles'] = ["slave_public"]
marathon_app['constraints'] = [['hostname', 'LIKE', dcos_api_session.public_slaves[0]]]
check_statsd_app_metrics(dcos_api_session, marathon_app, dcos_api_session.public_slaves[0], expected_metrics)
@contextlib.contextmanager
def deploy_and_cleanup_dcos_package(dcos_api_session: DcosApiSession, package_name: str, package_version: str,
framework_name: str) -> Generator:
"""Deploys dcos package and waits for package teardown once the context is left"""
app_id = dcos_api_session.cosmos.install_package(package_name, package_version=package_version).json()['appId']
dcos_api_session.marathon.wait_for_deployments_complete()
try:
yield
finally:
dcos_api_session.cosmos.uninstall_package(package_name, app_id=app_id)
# Retry for 15 minutes for teardown completion
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=STD_WAITTIME)
def wait_for_package_teardown() -> None:
state_response = dcos_api_session.get('/state', host=dcos_api_session.masters[0], port=5050)
assert state_response.status_code == 200
state = state_response.json()
# Rarely, the framework will continue to show up in 'frameworks' instead of
# 'completed_frameworks', even after teardown. To avoid this causing a test
# failure, if the framework continues to show up in 'frameworks', we instead
# check if there are any running tasks.
frameworks = {f['name']: f for f in state['frameworks']}
assert framework_name not in frameworks or len(
frameworks[framework_name]['tasks']) == 0, 'Framework {} still running'.format(framework_name)
wait_for_package_teardown()
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def get_task_hostname(dcos_api_session: DcosApiSession, framework_name: str, task_name: str) -> Any:
# helper func that gets a framework's task's hostname
mesos_id = node = ''
state_response = dcos_api_session.get('/state', host=dcos_api_session.masters[0], port=5050)
assert state_response.status_code == 200
state = state_response.json()
for framework in state['frameworks']:
if framework['name'] == framework_name:
for task in framework['tasks']:
if task['name'] == task_name:
mesos_id = task['slave_id']
break
break
assert mesos_id is not None
for agent in state['slaves']:
if agent['id'] == mesos_id:
node = agent['hostname']
break
return node
def test_task_metrics_metadata(dcos_api_session: DcosApiSession) -> None:
"""Test that task metrics have expected metadata/labels"""
expanded_config = get_expanded_config()
if expanded_config.get('security') == 'strict':
pytest.skip('MoM disabled for strict mode')
with deploy_and_cleanup_dcos_package(dcos_api_session, 'marathon', '1.6.535', 'marathon-user'):
node = get_task_hostname(dcos_api_session, 'marathon', 'marathon-user')
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def check_metrics_metadata() -> None:
response = get_metrics_prom(dcos_api_session, node)
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if sample[1].get('task_name') == 'marathon-user':
assert sample[1]['service_name'] == 'marathon'
# check for whitelisted label
assert sample[1]['DCOS_SERVICE_NAME'] == 'marathon-user'
return
raise Exception('Expected marathon task metrics not found')
check_metrics_metadata()
def test_executor_metrics_metadata(dcos_api_session: DcosApiSession) -> None:
"""Test that executor metrics have expected metadata/labels"""
expanded_config = get_expanded_config()
if expanded_config.get('security') == 'strict':
pytest.skip('Framework disabled for strict mode')
with deploy_and_cleanup_dcos_package(dcos_api_session, 'hello-world', '2.2.0-0.42.2', 'hello-world'):
node = get_task_hostname(dcos_api_session, 'marathon', 'hello-world')
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def check_executor_metrics_metadata() -> None:
response = get_metrics_prom(dcos_api_session, node)
for family in text_string_to_metric_families(response.text):
for sample in family.samples:
if sample[0] == 'cpus_nr_periods' and sample[1].get('service_name') == 'hello-world':
assert sample[1]['task_name'] == ''
# hello-world executors can be named "hello" or "world"
assert (sample[1]['executor_name'] == 'hello' or sample[1]['executor_name'] == 'world')
return
raise Exception('Expected hello-world executor metrics not found')
check_executor_metrics_metadata()
def test_metrics_node(dcos_api_session: DcosApiSession) -> None:
"""Test that the '/system/v1/metrics/v0/node' endpoint returns the expected
metrics and metric metadata.
"""
def expected_datapoint_response(response: dict) -> bool:
"""Enure that the "node" endpoint returns a "datapoints" dict.
"""
assert 'datapoints' in response, '"datapoints" dictionary not found'
'in response, got {}'.format(response)
for dp in response['datapoints']:
assert 'name' in dp, '"name" parameter should not be empty, got {}'.format(dp)
if 'filesystem' in dp['name']:
assert 'tags' in dp, '"tags" key not found, got {}'.format(dp)
assert 'path' in dp['tags'], ('"path" tag not found for filesystem metric, '
'got {}'.format(dp))
assert len(dp['tags']['path']) > 0, ('"path" tag should not be empty for '
'filesystem metrics, got {}'.format(dp))
return True
def expected_dimension_response(response: dict) -> bool:
"""Ensure that the "node" endpoint returns a dimensions dict that
contains a non-empty string for cluster_id.
"""
assert 'dimensions' in response, '"dimensions" object not found in'
'response, got {}'.format(response)
assert 'cluster_id' in response['dimensions'], '"cluster_id" key not'
'found in dimensions, got {}'.format(response)
assert response['dimensions']['cluster_id'] != "", 'expected cluster to contain a value'
assert response['dimensions']['mesos_id'] == '', 'expected dimensions to include empty "mesos_id"'
return True
# Retry for 5 minutes for for the node metrics content to appear.
@retrying.retry(stop_max_delay=METRICS_WAITTIME)
def wait_for_node_response(node: Any) -> Any:
response = dcos_api_session.metrics.get('/node', node=node)
assert response.status_code == 200
return response
nodes = get_master_and_agents(dcos_api_session)
for node in nodes:
response = wait_for_node_response(node)
assert response.status_code == 200, 'Status code: {}, Content {}'.format(
response.status_code, response.content)
assert expected_datapoint_response(response.json())
assert expected_dimension_response(response.json())
def get_master_and_agents(dcos_api_session: DcosApiSession) -> list:
nodes = [dcos_api_session.masters[0]]
nodes.extend(get_agents(dcos_api_session))
return nodes
def get_agents(dcos_api_session: DcosApiSession) -> list:
nodes = []
if dcos_api_session.slaves:
nodes.append(dcos_api_session.slaves[0])
if dcos_api_session.public_slaves:
nodes.append(dcos_api_session.public_slaves[0])
return nodes
def test_metrics_containers(dcos_api_session: DcosApiSession) -> None:
"""Assert that a Marathon app's container and app metrics can be retrieved."""
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def test_containers(app_endpoints: list) -> None:
for agent in app_endpoints:
container_metrics, app_metrics = get_metrics_for_task(dcos_api_session, agent.host, 'statsd-emitter')
# Check container metrics.
# Check tags on each datapoint.
cid_registry = set()
for dp in container_metrics['datapoints']:
# Verify expected tags are present.
assert 'tags' in dp, 'got {}'.format(dp)
expected_tag_names = {
'container_id',
}
if 'executor_name' in dp['tags']:
# if present we want to make sure it has a valid value.
expected_tag_names.add('executor_name')
if dp['name'].startswith('blkio.'):
# blkio stats have 'blkio_device' tags.
expected_tag_names.add('blkio_device')
check_tags(dp['tags'], expected_tag_names, FAULT_DOMAIN_TAGS)
# Ensure all container ID's in the container/<id> endpoint are
# the same.
cid_registry.add(dp['tags']['container_id'])
assert len(cid_registry) == 1, 'Not all container IDs in the metrics response are equal'
# Check app metrics.
# We expect three datapoints, could be in any order
uptime_dp = None
for dp in app_metrics['datapoints']:
if dp['name'] == 'statsd_tester.time.uptime':
uptime_dp = dp
break
# If this metric is missing, statsd-emitter's metrics were not received
assert uptime_dp is not None, 'got {}'.format(app_metrics)
datapoint_keys = ['name', 'value', 'unit', 'timestamp', 'tags']
for k in datapoint_keys:
assert k in uptime_dp, 'got {}'.format(uptime_dp)
expected_tag_names = {
'dcos_cluster_id',
'test_tag_key',
'dcos_cluster_name',
'host'
}
# If fault domain is enabled, ensure that fault domain tags are present
expanded_config = get_expanded_config()
if expanded_config.get('fault_domain_enabled') == 'true':
expected_tag_names |= FAULT_DOMAIN_TAGS
check_tags(uptime_dp['tags'], expected_tag_names)
assert uptime_dp['tags']['test_tag_key'] == 'test_tag_value', 'got {}'.format(uptime_dp)
assert uptime_dp['value'] > 0
marathon_config = {
"id": "/statsd-emitter",
"cmd": "./statsd-emitter -debug",
"fetch": [
{
"uri": "https://downloads.mesosphere.com/dcos-metrics/1.11.0/statsd-emitter",
"executable": True
}
],
"cpus": 0.5,
"mem": 128.0,
"instances": 1
}
with dcos_api_session.marathon.deploy_and_cleanup(marathon_config, check_health=False, timeout=DEPLOY_TIMEOUT):
endpoints = dcos_api_session.marathon.get_app_service_endpoints(marathon_config['id'])
assert len(endpoints) == 1, 'The marathon app should have been deployed exactly once.'
test_containers(endpoints)
def test_statsd_metrics_containers_app(dcos_api_session: DcosApiSession) -> None:
"""Assert that statsd app metrics appear in the v0 metrics API."""
task_name = 'test-statsd-metrics-containers-app'
metric_name_pfx = 'test_statsd_metrics_containers_app'
marathon_app = {
'id': '/' + task_name,
'instances': 1,
'cpus': 0.1,
'mem': 128,
'cmd': '\n'.join([
'echo "Sending metrics to $STATSD_UDP_HOST:$STATSD_UDP_PORT"',
'echo "Sending gauge"',
'echo "{}.gauge:100|g" | nc -w 1 -u $STATSD_UDP_HOST $STATSD_UDP_PORT'.format(metric_name_pfx),
'echo "Sending counts"',
'echo "{}.count:1|c" | nc -w 1 -u $STATSD_UDP_HOST $STATSD_UDP_PORT'.format(metric_name_pfx),
'echo "{}.count:1|c" | nc -w 1 -u $STATSD_UDP_HOST $STATSD_UDP_PORT'.format(metric_name_pfx),
'echo "Sending timings"',
'echo "{}.timing:1|ms" | nc -w 1 -u $STATSD_UDP_HOST $STATSD_UDP_PORT'.format(metric_name_pfx),
'echo "{}.timing:2|ms" | nc -w 1 -u $STATSD_UDP_HOST $STATSD_UDP_PORT'.format(metric_name_pfx),
'echo "{}.timing:3|ms" | nc -w 1 -u $STATSD_UDP_HOST $STATSD_UDP_PORT'.format(metric_name_pfx),
'echo "Sending histograms"',
'echo "{}.histogram:1|h" | nc -w 1 -u $STATSD_UDP_HOST $STATSD_UDP_PORT'.format(metric_name_pfx),
'echo "{}.histogram:2|h" | nc -w 1 -u $STATSD_UDP_HOST $STATSD_UDP_PORT'.format(metric_name_pfx),
'echo "{}.histogram:3|h" | nc -w 1 -u $STATSD_UDP_HOST $STATSD_UDP_PORT'.format(metric_name_pfx),
'echo "{}.histogram:4|h" | nc -w 1 -u $STATSD_UDP_HOST $STATSD_UDP_PORT'.format(metric_name_pfx),
'echo "Done. Sleeping forever."',
'while true; do',
' sleep 1000',
'done',
]),
'container': {
'type': 'MESOS',
'docker': {'image': 'library/alpine'}
},
'networks': [{'mode': 'host'}],
}
expected_metrics = [
# metric_name, metric_value
('.'.join([metric_name_pfx, 'gauge']), 100),
('.'.join([metric_name_pfx, 'count']), 2),
('.'.join([metric_name_pfx, 'timing', 'count']), 3),
('.'.join([metric_name_pfx, 'histogram', 'count']), 4),
]
deploy_marathon_app_and_check_metrics(dcos_api_session, expected_metrics, marathon_app, task_name)
def deploy_marathon_app_and_check_metrics(dcos_api_session: DcosApiSession, expected_metrics: list, marathon_app: Any,
task_name: str) -> None:
with dcos_api_session.marathon.deploy_and_cleanup(marathon_app, check_health=False, timeout=DEPLOY_TIMEOUT):
endpoints = dcos_api_session.marathon.get_app_service_endpoints(marathon_app['id'])
assert len(endpoints) == 1, 'The marathon app should have been deployed exactly once.'
node = endpoints[0].host
for metric_name, metric_value in expected_metrics:
assert_app_metric_value_for_task(dcos_api_session, node, task_name, metric_name, metric_value)
def test_prom_metrics_containers_app_host(dcos_api_session: DcosApiSession) -> None:
"""Assert that prometheus app metrics appear in the v0 metrics API."""
task_name = 'test-prom-metrics-containers-app-host'
metric_name_pfx = 'test_prom_metrics_containers_app_host'
marathon_app = {
'id': '/' + task_name,
'instances': 1,
'cpus': 0.1,
'mem': 128,
'cmd': '\n'.join([
'echo "Creating metrics file..."',
'touch metrics',
'echo "# TYPE {}_gauge gauge" >> metrics'.format(metric_name_pfx),
'echo "{}_gauge 100" >> metrics'.format(metric_name_pfx),
'echo "# TYPE {}_count counter" >> metrics'.format(metric_name_pfx),
'echo "{}_count 2" >> metrics'.format(metric_name_pfx),
'echo "# TYPE {}_histogram histogram" >> metrics'.format(metric_name_pfx),
'echo "{}_histogram_bucket{{le=\\"+Inf\\"}} 4" >> metrics'.format(metric_name_pfx),
'echo "{}_histogram_sum 4" >> metrics'.format(metric_name_pfx),
'echo "{}_histogram_seconds_count 4" >> metrics'.format(metric_name_pfx),
'echo "Serving prometheus metrics on http://localhost:$PORT0"',
'python3 -m http.server $PORT0',
]),
'container': {
'type': 'DOCKER',
'docker': {'image': 'library/python:3'}
},
'portDefinitions': [{
'protocol': 'tcp',
'port': 0,
'labels': {'DCOS_METRICS_FORMAT': 'prometheus'},
}],
}
logging.debug('Starting marathon app with config: %s', marathon_app)
expected_metrics = [
# metric_name, metric_value
('_'.join([metric_name_pfx, 'gauge.gauge']), 100),
('_'.join([metric_name_pfx, 'count.counter']), 2),
('_'.join([metric_name_pfx, 'histogram_seconds', 'count']), 4),
]
deploy_marathon_app_and_check_metrics(dcos_api_session, expected_metrics, marathon_app, task_name)
def test_prom_metrics_containers_app_bridge(dcos_api_session: DcosApiSession) -> None:
"""Assert that prometheus app metrics appear in the v0 metrics API."""
task_name = 'test-prom-metrics-containers-app-bridge'
metric_name_pfx = 'test_prom_metrics_containers_app_bridge'
marathon_app = {
'id': '/' + task_name,
'instances': 1,
'cpus': 0.1,
'mem': 128,
'cmd': '\n'.join([
'echo "Creating metrics file..."',
'touch metrics',
'echo "# TYPE {}_gauge gauge" >> metrics'.format(metric_name_pfx),
'echo "{}_gauge 100" >> metrics'.format(metric_name_pfx),
'echo "# TYPE {}_count counter" >> metrics'.format(metric_name_pfx),
'echo "{}_count 2" >> metrics'.format(metric_name_pfx),
'echo "# TYPE {}_histogram histogram" >> metrics'.format(metric_name_pfx),
'echo "{}_histogram_bucket{{le=\\"+Inf\\"}} 4" >> metrics'.format(metric_name_pfx),
'echo "{}_histogram_sum 4" >> metrics'.format(metric_name_pfx),
'echo "{}_histogram_seconds_count 4" >> metrics'.format(metric_name_pfx),
'echo "Serving prometheus metrics on http://localhost:8000"',
'python3 -m http.server 8000',
]),
'networks': [{'mode': 'container/bridge'}],
'container': {
'type': 'MESOS',
'docker': {'image': 'library/python:3'},
'portMappings': [
{
'containerPort': 8000,
'hostPort': 0,
'protocol': 'tcp',
'labels': {'DCOS_METRICS_FORMAT': 'prometheus'},
}
]
},
}
logging.debug('Starting marathon app with config: %s', marathon_app)
expected_metrics = [
# metric_name, metric_value
('_'.join([metric_name_pfx, 'gauge.gauge']), 100),
('_'.join([metric_name_pfx, 'count.counter']), 2),
('_'.join([metric_name_pfx, 'histogram_seconds', 'count']), 4),
]
deploy_marathon_app_and_check_metrics(dcos_api_session, expected_metrics, marathon_app, task_name)
def test_task_prom_metrics_not_filtered(dcos_api_session: DcosApiSession) -> None:
"""Assert that prometheus app metrics aren't filtered according to adminrouter config.
This is a regression test protecting a fix for a bug that mistakenly applied filter criteria intended for
adminrouter metrics to Prometheus-formatted metrics gathered from tasks.
"""
task_name = 'test-task-prom-metrics-not-filtered'
metric_name_pfx = 'test_task_prom_metrics_not_filtered'
marathon_app = {
'id': '/' + task_name,
'instances': 1,
'cpus': 0.1,
'mem': 128,
'cmd': '\n'.join([
# Serve metrics that would be dropped by Telegraf were they collected from the adminrouter. These are task
# metrics, so we expect Telegraf to gather and output them.
'echo "Creating metrics file..."',
# Adminrouter metrics with direction="[1-5]xx" tags get dropped.
'echo "# TYPE {}_gauge gauge" >> metrics'.format(metric_name_pfx),
'echo "{}_gauge{{direction=\\"1xx\\"}} 100" >> metrics'.format(metric_name_pfx),
# Adminrouter metrics with these names get dropped.
'echo "# TYPE nginx_vts_filter_cache_foo gauge" >> metrics',
'echo "nginx_vts_filter_cache_foo 100" >> metrics',
'echo "# TYPE nginx_vts_server_foo gauge" >> metrics',
'echo "nginx_vts_server_foo 100" >> metrics',
'echo "# TYPE nginx_vts_upstream_foo gauge" >> metrics',
'echo "nginx_vts_upstream_foo 100" >> metrics',
'echo "# TYPE nginx_vts_foo_request_seconds gauge" >> metrics',
'echo "nginx_vts_foo_request_seconds 100" >> metrics',
'echo "Serving prometheus metrics on http://localhost:8000"',
'python3 -m http.server 8000',
]),
'networks': [{'mode': 'container/bridge'}],
'container': {
'type': 'MESOS',
'docker': {'image': 'library/python:3'},
'portMappings': [
{
'containerPort': 8000,
'hostPort': 0,
'protocol': 'tcp',
'labels': {'DCOS_METRICS_FORMAT': 'prometheus'},
}
]
},
}
logging.debug('Starting marathon app with config: %s', marathon_app)
expected_metrics = [
# metric_name, metric_value
('_'.join([metric_name_pfx, 'gauge.gauge']), 100),
('nginx_vts_filter_cache_foo.gauge', 100),
('nginx_vts_server_foo.gauge', 100),
('nginx_vts_upstream_foo.gauge', 100),
('nginx_vts_foo_request_seconds.gauge', 100),
]
deploy_marathon_app_and_check_metrics(dcos_api_session, expected_metrics, marathon_app, task_name)
def test_metrics_containers_nan(dcos_api_session: DcosApiSession) -> None:
"""Assert that the metrics API can handle app metric gauges with NaN values."""
task_name = 'test-metrics-containers-nan'
metric_name = 'test_metrics_containers_nan'
marathon_app = {
'id': '/' + task_name,
'instances': 1,
'cpus': 0.1,
'mem': 128,
'cmd': '\n'.join([
'echo "Sending gauge with NaN value to $STATSD_UDP_HOST:$STATSD_UDP_PORT"',
'echo "{}:NaN|g" | nc -w 1 -u $STATSD_UDP_HOST $STATSD_UDP_PORT'.format(metric_name),
'echo "Done. Sleeping forever."',
'while true; do',
' sleep 1000',
'done',
]),
'container': {
'type': 'MESOS',
'docker': {'image': 'library/alpine'}
},
'networks': [{'mode': 'host'}],
}
with dcos_api_session.marathon.deploy_and_cleanup(marathon_app, check_health=False, timeout=DEPLOY_TIMEOUT):
endpoints = dcos_api_session.marathon.get_app_service_endpoints(marathon_app['id'])
assert len(endpoints) == 1, 'The marathon app should have been deployed exactly once.'
node = endpoints[0].host
# NaN should be converted to empty string.
metric_value = get_app_metric_for_task(dcos_api_session, node, task_name, metric_name)['value']
assert metric_value == '', 'unexpected metric value: {}'.format(metric_value)
@retrying.retry(wait_fixed=METRICS_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def assert_app_metric_value_for_task(dcos_api_session: DcosApiSession, node: str, task_name: str, metric_name: str,
metric_value: Any) -> None:
"""Assert the value of app metric metric_name for container task_name is metric_value.
Retries on error, non-200 status, missing container metrics, missing app
metric, or unexpected app metric value for up to 5 minutes.
"""
assert get_app_metric_for_task(dcos_api_session, node, task_name, metric_name)['value'] == metric_value
@retrying.retry(wait_fixed=METRICS_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def get_app_metric_for_task(dcos_api_session: DcosApiSession, node: str, task_name: str, metric_name: str) -> Any:
"""Return the app metric metric_name for container task_name.
Retries on error, non-200 status, or missing container metrics, or missing
app metric for up to 5 minutes.
"""
_, app_metrics = get_metrics_for_task(dcos_api_session, node, task_name)
assert app_metrics is not None, "missing metrics for task {}".format(task_name)
dps = [dp for dp in app_metrics['datapoints'] if dp['name'] == metric_name]
assert len(dps) == 1, 'expected 1 datapoint for metric {}, got {}'.format(metric_name, len(dps))
return dps[0]
# Retry for 5 minutes since the collector collects state
# every 2 minutes to propogate containers to the API
@retrying.retry(wait_fixed=METRICS_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def get_container_ids(dcos_api_session: DcosApiSession, node: str) -> Any:
"""Return container IDs reported by the metrics API on node.
Retries on error, non-200 status, or empty response for up to 5 minutes.
"""
response = dcos_api_session.metrics.get('/containers', node=node)
assert response.status_code == 200
container_ids = response.json()
assert len(container_ids) > 0, 'must have at least 1 container'
return container_ids
@retrying.retry(wait_fixed=METRICS_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def get_container_metrics(dcos_api_session: DcosApiSession, node: str, container_id: str) -> Any:
"""Return container_id's metrics from the metrics API on node.
Returns None on 204.
Retries on error, non-200 status, or missing response fields for up
to 5 minutes.
"""
response = dcos_api_session.metrics.get('/containers/' + container_id, node=node)
if response.status_code == 204:
return None
assert response.status_code == 200
container_metrics = response.json()
assert 'datapoints' in container_metrics, (
'container metrics must include datapoints. Got: {}'.format(container_metrics)
)
assert 'dimensions' in container_metrics, (
'container metrics must include dimensions. Got: {}'.format(container_metrics)
)
return container_metrics
@retrying.retry(wait_fixed=METRICS_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def get_app_metrics(dcos_api_session: DcosApiSession, node: str, container_id: str) -> Any:
"""Return app metrics for container_id from the metrics API on node.
Returns None on 204.
Retries on error or non-200 status for up to 5 minutes.
"""
resp = dcos_api_session.metrics.get('/containers/' + container_id + '/app', node=node)
if resp.status_code == 204:
return None
assert resp.status_code == 200, 'got {}'.format(resp.status_code)
app_metrics = resp.json()
assert 'datapoints' in app_metrics, 'got {}'.format(app_metrics)
assert 'dimensions' in app_metrics, 'got {}'.format(app_metrics)
return app_metrics
@retrying.retry(wait_fixed=METRICS_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def get_metrics_for_task(dcos_api_session: DcosApiSession, node: str, task_name: str) -> Any:
"""Return (container_metrics, app_metrics) for task_name on node.
Retries on error, non-200 responses, or missing metrics for task_name for
up to 5 minutes.
"""
task_names_seen = [] # Used for exception message if task_name can't be found.
for cid in get_container_ids(dcos_api_session, node):
container_metrics = get_container_metrics(dcos_api_session, node, cid)
if container_metrics is None:
task_names_seen.append((cid, None))
continue
if container_metrics['dimensions'].get('task_name') != task_name:
task_names_seen.append((cid, container_metrics['dimensions'].get('task_name')))
continue
app_metrics = get_app_metrics(dcos_api_session, node, cid)
return container_metrics, app_metrics
raise Exception(
'No metrics found for task {} on host {}. Task names seen: {}'.format(task_name, node, task_names_seen)
)
def test_standalone_container_metrics(dcos_api_session: DcosApiSession) -> None:
"""
An operator should be able to launch a standalone container using the
LAUNCH_CONTAINER call of the agent operator API. Additionally, if the
process running within the standalone container emits statsd metrics, they
should be accessible via the DC/OS metrics API.
"""
expanded_config = get_expanded_config()
if expanded_config.get('security') == 'strict':
reason = (
'Only resource providers are authorized to launch standalone '
'containers in strict mode. See DCOS-42325.'
)
pytest.skip(reason)
# Fetch the mesos master state to get an agent ID
master_ip = dcos_api_session.masters[0]
r = dcos_api_session.get('/state', host=master_ip, port=5050)
assert r.status_code == 200
state = r.json()
# Find hostname and ID of an agent
assert len(state['slaves']) > 0, 'No agents found in master state'
agent_hostname = state['slaves'][0]['hostname']
agent_id = state['slaves'][0]['id']
logging.debug('Selected agent %s at %s', agent_id, agent_hostname)
def _post_agent(json: dict) -> Any:
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
r = dcos_api_session.post(
'/api/v1',
host=agent_hostname,
port=5051,
headers=headers,
json=json,
data=None,
stream=False)
return r
# Prepare container ID data
container_id = {'value': 'test-standalone-%s' % str(uuid.uuid4())}
# Launch standalone container. The command for this container executes a
# binary installed with DC/OS which will emit statsd metrics.
launch_data = {
'type': 'LAUNCH_CONTAINER',
'launch_container': {
'command': {
'value': './statsd-emitter',
'uris': [{
'value': 'https://downloads.mesosphere.com/dcos-metrics/1.11.0/statsd-emitter',
'executable': True
}]
},
'container_id': container_id,
'resources': [
{
'name': 'cpus',
'scalar': {'value': 0.2},
'type': 'SCALAR'
},
{
'name': 'mem',
'scalar': {'value': 64.0},
'type': 'SCALAR'
},
{
'name': 'disk',
'scalar': {'value': 1024.0},
'type': 'SCALAR'
}
],
'container': {
'type': 'MESOS'
}
}
}
# There is a short delay between the container starting and metrics becoming
# available via the metrics service. Because of this, we wait up to 5
# minutes for these metrics to appear before throwing an exception.
def _should_retry_metrics_fetch(response: Any) -> Any:
return response.status_code == 204
@retrying.retry(wait_fixed=METRICS_INTERVAL,
stop_max_delay=METRICS_WAITTIME,
retry_on_result=_should_retry_metrics_fetch,
retry_on_exception=lambda x: False)
def _get_metrics() -> Any:
master_response = dcos_api_session.get(
'/system/v1/agent/%s/metrics/v0/containers/%s/app' % (agent_id, container_id['value']),
host=master_ip)
return master_response
r = _post_agent(launch_data)
assert r.status_code == 200, 'Received unexpected status code when launching standalone container'
try:
logging.debug('Successfully created standalone container with container ID %s', container_id['value'])
# Verify that the standalone container's metrics are being collected
r = _get_metrics()
assert r.status_code == 200, 'Received unexpected status code when fetching standalone container metrics'
metrics_response = r.json()
assert 'datapoints' in metrics_response, 'got {}'.format(metrics_response)
uptime_dp = None
for dp in metrics_response['datapoints']:
if dp['name'] == 'statsd_tester.time.uptime':
uptime_dp = dp
break
# If this metric is missing, statsd-emitter's metrics were not received
assert uptime_dp is not None, 'got {}'.format(metrics_response)
datapoint_keys = ['name', 'value', 'unit', 'timestamp', 'tags']
for k in datapoint_keys:
assert k in uptime_dp, 'got {}'.format(uptime_dp)
expected_tag_names = {
'dcos_cluster_id',
'test_tag_key',
'dcos_cluster_name',
'host'
}
check_tags(uptime_dp['tags'], expected_tag_names, FAULT_DOMAIN_TAGS)
assert uptime_dp['tags']['test_tag_key'] == 'test_tag_value', 'got {}'.format(uptime_dp)
assert uptime_dp['value'] > 0
assert 'dimensions' in metrics_response, 'got {}'.format(metrics_response)
assert metrics_response['dimensions']['container_id'] == container_id['value']
finally:
# Clean up the standalone container
kill_data = {
'type': 'KILL_CONTAINER',
'kill_container': {
'container_id': container_id
}
}
_post_agent(kill_data)
def test_pod_application_metrics(dcos_api_session: DcosApiSession) -> None:
"""Launch a pod, wait for its containers to be added to the metrics service,
and then verify that:
1) Container statistics metrics are provided for the executor container
2) Application metrics are exposed for the task container
"""
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def test_application_metrics(agent_ip: str, agent_id: str, task_name: str, num_containers: int) -> Any:
# Get expected 2 container ids from mesos state endpoint
# (one container + its parent container)
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def get_container_ids_from_state(dcos_api_session: DcosApiSession, num_containers: int) -> set:
state_response = dcos_api_session.get('/state', host=dcos_api_session.masters[0], port=5050)
assert state_response.status_code == 200
state = state_response.json()
cids = set()
for framework in state['frameworks']:
if framework['name'] == 'marathon':
for task in framework['tasks']:
if task['name'] == 'statsd-emitter-task':
container = task['statuses'][0]['container_status']['container_id']
cids.add(container['value'])
if 'parent' in container:
cids.add(container['parent']['value'])
break
break
assert len(cids) == num_containers, 'Test should create {} containers'.format(num_containers)
return cids
container_ids = get_container_ids_from_state(dcos_api_session, num_containers)
# Retry for two and a half minutes since the collector collects
# state every 2 minutes to propagate containers to the API
@retrying.retry(wait_fixed=STD_INTERVAL, stop_max_delay=METRICS_WAITTIME)
def wait_for_container_metrics_propagation(container_ids: set) -> None:
response = dcos_api_session.metrics.get('/containers', node=agent_ip)
assert response.status_code == 200
assert container_ids.issubset(
response.json()), "Containers {} should have been propagated".format(container_ids)
wait_for_container_metrics_propagation(container_ids)
get_containers = {
"type": "GET_CONTAINERS",
"get_containers": {
"show_nested": True,
"show_standalone": True
}
}
r = dcos_api_session.post('/agent/{}/api/v1'.format(agent_id), json=get_containers)
r.raise_for_status()
mesos_agent_containers = r.json()['get_containers']['containers']
mesos_agent_cids = [container['container_id']['value'] for container in mesos_agent_containers]
assert container_ids.issubset(mesos_agent_cids), "Missing expected containers {}".format(container_ids)
def is_nested_container(container: dict) -> Any:
"""Helper to check whether or not a container returned in the
GET_CONTAINERS response is a nested container.
"""
return 'parent' in container['container_status']['container_id']
for container in mesos_agent_containers:
container_id = container['container_id']['value']
# Test that /containers/<id> responds with expected data.
container_id_path = '/containers/{}'.format(container_id)
if (is_nested_container(container)):
# Retry for 5 minutes for each nested container to appear.
# Since nested containers do not report resource statistics, we
# expect the response code to be 204.
@retrying.retry(stop_max_delay=METRICS_WAITTIME)
def wait_for_container_response() -> Any:
response = dcos_api_session.metrics.get(container_id_path, node=agent_ip)
assert response.status_code == 204
return response
# For the nested container, we do not expect any container-level
# resource statistics, so this response should be empty.
assert not wait_for_container_response().json()
# Test that expected application metrics are present.
app_response = dcos_api_session.metrics.get('/containers/{}/app'.format(container_id), node=agent_ip)
assert app_response.status_code == 200, 'got {}'.format(app_response.status_code)
# Ensure all /container/<id>/app data is correct
assert 'datapoints' in app_response.json(), 'got {}'.format(app_response.json())
# We expect three datapoints, could be in any order
uptime_dp = None
for dp in app_response.json()['datapoints']:
if dp['name'] == 'statsd_tester.time.uptime':
uptime_dp = dp
break
# If this metric is missing, statsd-emitter's metrics were not received
assert uptime_dp is not None, 'got {}'.format(app_response.json())
datapoint_keys = ['name', 'value', 'unit', 'timestamp', 'tags']
for k in datapoint_keys:
assert k in uptime_dp, 'got {}'.format(uptime_dp)
expected_tag_names = {
'dcos_cluster_id',
'test_tag_key',
'dcos_cluster_name',
'host'
}
check_tags(uptime_dp['tags'], expected_tag_names, FAULT_DOMAIN_TAGS)
assert uptime_dp['tags']['test_tag_key'] == 'test_tag_value', 'got {}'.format(uptime_dp)
assert uptime_dp['value'] > 0
assert 'dimensions' in app_response.json(), 'got {}'.format(app_response.json())
assert 'task_name' in app_response.json()['dimensions'], 'got {}'.format(
app_response.json()['dimensions'])
# Look for the specified task name.
assert task_name.strip('/') == app_response.json()['dimensions']['task_name'],\
'Nested container was not tagged with the correct task name'
else:
# Retry for 5 minutes for each parent container to present its
# content.
@retrying.retry(stop_max_delay=METRICS_WAITTIME)
def wait_for_container_response() -> Any:
response = dcos_api_session.metrics.get(container_id_path, node=agent_ip)
assert response.status_code == 200
return response
container_response = wait_for_container_response()
assert 'datapoints' in container_response.json(), 'got {}'.format(container_response.json())
cid_registry = set()
for dp in container_response.json()['datapoints']:
# Verify expected tags are present.
assert 'tags' in dp, 'got {}'.format(dp)
expected_tag_names = {
'container_id',
}
if dp['name'].startswith('blkio.'):
# blkio stats have 'blkio_device' tags.
expected_tag_names.add('blkio_device')
check_tags(dp['tags'], expected_tag_names, FAULT_DOMAIN_TAGS)
# Ensure all container IDs in the response from the
# containers/<id> endpoint are the same.
cid_registry.add(dp['tags']['container_id'])
assert len(cid_registry) == 1, 'Not all container IDs in the metrics response are equal'
assert 'dimensions' in container_response.json(), 'got {}'.format(container_response.json())
# The executor container shouldn't expose application metrics.
app_response = dcos_api_session.metrics.get('/containers/{}/app'.format(container_id), node=agent_ip)
assert app_response.status_code == 204, 'got {}'.format(app_response.status_code)
return True
marathon_pod_config = {
"id": "/statsd-emitter-task-group",
"containers": [{
"name": "statsd-emitter-task",
"resources": {
"cpus": 0.5,
"mem": 128.0,
"disk": 1024.0
},
"image": {
"kind": "DOCKER",
"id": "alpine"
},
"exec": {
"command": {
"shell": "./statsd-emitter"
}
},
"artifacts": [{
"uri": "https://downloads.mesosphere.com/dcos-metrics/1.11.0/statsd-emitter",
"executable": True
}],
}],
"scheduling": {
"instances": 1
}
}
with dcos_api_session.marathon.deploy_pod_and_cleanup(marathon_pod_config):
r = dcos_api_session.marathon.get('/v2/pods/{}::status'.format(marathon_pod_config['id']))
r.raise_for_status()
data = r.json()
assert len(data['instances']) == 1, 'The marathon pod should have been deployed exactly once.'
test_application_metrics(
data['instances'][0]['agentHostname'],
data['instances'][0]['agentId'],
marathon_pod_config['containers'][0]['name'], 2) # type: ignore
| 43.576874
| 119
| 0.631973
|
400e2f28e46ce75157670318285a386d5d8cf9a4
| 3,438
|
py
|
Python
|
pydarknet/pydarknet_helpers.py
|
WildMeOrg/wbia-tpl-pydarknet
|
ca7eed0965523123f71132a6aa26100d791cd2fe
|
[
"Apache-2.0"
] | null | null | null |
pydarknet/pydarknet_helpers.py
|
WildMeOrg/wbia-tpl-pydarknet
|
ca7eed0965523123f71132a6aa26100d791cd2fe
|
[
"Apache-2.0"
] | 1
|
2020-06-25T17:28:35.000Z
|
2020-06-25T17:28:35.000Z
|
pydarknet/pydarknet_helpers.py
|
WildMeOrg/wbia-tpl-pydarknet
|
ca7eed0965523123f71132a6aa26100d791cd2fe
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# ============================
# Python Interface
# ============================
from __future__ import absolute_import, division, print_function
import logging
from os.path import join, realpath, dirname
import numpy as np
import ctypes as C
import sys
from pydarknet import ctypes_interface
logger = logging.getLogger('pydarknet')
def ensure_bytes_strings(str_list):
# converts python3 strings into bytes
if sys.hexversion > 0x03000000:
return [
str_ if not isinstance(str_, str) else bytes(str_, 'utf-8')
for str_ in str_list
]
else:
return str_list
def _cast_list_to_c(py_list, dtype):
"""
Converts a python list of strings into a c array of strings
adapted from "http://stackoverflow.com/questions/3494598/passing-a-list-of
-strings-to-from-python-ctypes-to-c-function-expecting-char"
Avi's code
"""
c_arr = (dtype * len(py_list))()
c_arr[:] = py_list
return c_arr
def _arrptr_to_np(c_arrptr, shape, arr_t, dtype):
"""
Casts an array pointer from C to numpy
Input:
c_arrpt - an array pointer returned from C
shape - shape of that array pointer
arr_t - the ctypes datatype of c_arrptr
Avi's code
"""
arr_t_size = C.POINTER(C.c_char * dtype().itemsize) # size of each item
c_arr = C.cast(c_arrptr.astype(int), arr_t_size) # cast to ctypes
np_arr = np.ctypeslib.as_array(c_arr, shape) # cast to numpy
np_arr.dtype = dtype # fix numpy dtype
np_arr = np.require(np_arr, dtype=dtype, requirements=['O']) # prevent memory leaks
return np_arr
def _extract_np_array(size_list, ptr_list, arr_t, arr_dtype, arr_dim):
"""
size_list - contains the size of each output 2d array
ptr_list - an array of pointers to the head of each output 2d
array (which was allocated in C)
arr_t - the C pointer type
arr_dtype - the numpy array type
arr_dim - the number of columns in each output 2d array
"""
arr_list = [
_arrptr_to_np(arr_ptr, (size, arr_dim), arr_t, arr_dtype)
for (arr_ptr, size) in zip(ptr_list, size_list)
]
return arr_list
def _find_c_shared_library_by_device(device='cpu'):
root_dir = realpath(join(dirname(__file__), 'lib'))
if device in ['cpu']:
libname = 'pydarknet'
elif device in ['gpu']:
libname = 'pydarknet_cuda'
else:
raise ValueError('device %r not recognized' % (device,))
try:
darknet_clib, def_cfunc = ctypes_interface.load_clib(libname, root_dir)
except ImportError:
if device not in ['cpu']:
logging.info('CPU fallback for: %s' % (libname,))
darknet_clib, def_cfunc = _find_c_shared_library_by_device()
else:
import warnings
warnings.warn('Unable to load C library for darknet')
darknet_clib, def_cfunc = None, None
return darknet_clib, def_cfunc
def _load_c_shared_library(METHODS, device='cpu'):
"""Loads the pydarknet dynamic library and defines its functions"""
darknet_clib, def_cfunc = _find_c_shared_library_by_device(device=device)
if None in [darknet_clib, def_cfunc]:
return None, None
# Load and expose methods from lib
for method in METHODS.keys():
def_cfunc(METHODS[method][1], method, METHODS[method][0])
return darknet_clib, def_cfunc
| 30.972973
| 88
| 0.654741
|
5ee62dc0d0a0807936d474d8c73f50139b626da4
| 20
|
py
|
Python
|
boxxy/__init__.py
|
Cure549/mo-menus
|
39ff10f99b486cd65d974f9cdd50e0ee939dfa6d
|
[
"MIT"
] | null | null | null |
boxxy/__init__.py
|
Cure549/mo-menus
|
39ff10f99b486cd65d974f9cdd50e0ee939dfa6d
|
[
"MIT"
] | null | null | null |
boxxy/__init__.py
|
Cure549/mo-menus
|
39ff10f99b486cd65d974f9cdd50e0ee939dfa6d
|
[
"MIT"
] | null | null | null |
from . import Boxxy
| 10
| 19
| 0.75
|
6eb4bb65ec8824474de01791e5ee02501f48440e
| 13,863
|
py
|
Python
|
datumaro/plugins/vgg_face2_format.py
|
dvkruchinin/datumaro
|
8e1203a3d83e3d6421317d1ebf55c8c6371a1a37
|
[
"MIT"
] | null | null | null |
datumaro/plugins/vgg_face2_format.py
|
dvkruchinin/datumaro
|
8e1203a3d83e3d6421317d1ebf55c8c6371a1a37
|
[
"MIT"
] | null | null | null |
datumaro/plugins/vgg_face2_format.py
|
dvkruchinin/datumaro
|
8e1203a3d83e3d6421317d1ebf55c8c6371a1a37
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2020-2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
import csv
import os
import os.path as osp
from datumaro.components.annotation import (
AnnotationType, Bbox, Label, LabelCategories, Points,
)
from datumaro.components.converter import Converter
from datumaro.components.extractor import DatasetItem, Extractor, Importer
from datumaro.components.format_detection import FormatDetectionContext
from datumaro.util.image import find_images
from datumaro.util.meta_file_util import has_meta_file, parse_meta_file
class VggFace2Path:
ANNOTATION_DIR = "bb_landmark"
IMAGE_EXT = '.jpg'
BBOXES_FILE = 'loose_bb_'
LANDMARKS_FILE = 'loose_landmark_'
LABELS_FILE = 'labels.txt'
IMAGES_DIR_NO_LABEL = 'no_label'
class VggFace2Extractor(Extractor):
def __init__(self, path):
subset = None
if osp.isdir(path):
self._path = path
elif osp.isfile(path):
subset = osp.splitext(osp.basename(path).split('_')[2])[0]
self._path = osp.dirname(path)
else:
raise Exception("Can't read annotations from '%s'" % path)
annotation_files = [p for p in os.listdir(self._path)
if (osp.basename(p).startswith(VggFace2Path.BBOXES_FILE) or \
osp.basename(p).startswith(VggFace2Path.LANDMARKS_FILE)) and \
p.endswith('.csv')]
if len(annotation_files) < 1:
raise Exception("Can't find annotations in the directory '%s'" % path)
super().__init__()
self._dataset_dir = osp.dirname(self._path)
self._subsets = {subset} if subset else set(
osp.splitext(f.split('_')[2])[0] for f in annotation_files
)
self._categories = {}
self._items = []
self._load_categories()
for subset in self._subsets:
self._items.extend(list(self._load_items(subset).values()))
def __iter__(self):
return iter(self._items)
def categories(self):
return self._categories
def _load_categories(self):
label_cat = LabelCategories()
path = osp.join(self._dataset_dir, VggFace2Path.LABELS_FILE)
if has_meta_file(self._dataset_dir):
labels = parse_meta_file(self._dataset_dir).keys()
for label in labels:
label_cat.add(label)
elif osp.isfile(path):
with open(path, encoding='utf-8') as labels_file:
lines = [s.strip() for s in labels_file]
for line in lines:
objects = line.split()
label = objects[0]
class_name = None
if 1 < len(objects):
class_name = objects[1]
label_cat.add(label, parent=class_name)
else:
for subset in self._subsets:
subset_path = osp.join(self._dataset_dir, subset)
if osp.isdir(subset_path):
for images_dir in sorted(os.listdir(subset_path)):
if osp.isdir(osp.join(subset_path, images_dir)) and \
images_dir != VggFace2Path.IMAGES_DIR_NO_LABEL:
label_cat.add(images_dir)
self._categories[AnnotationType.label] = label_cat
def _load_items(self, subset):
def _get_label(path):
label_name = path.split('/')[0]
label = None
if label_name != VggFace2Path.IMAGES_DIR_NO_LABEL:
label = \
self._categories[AnnotationType.label].find(label_name)[0]
return label
items = {}
image_dir = osp.join(self._dataset_dir, subset)
if osp.isdir(image_dir):
images = {
osp.splitext(osp.relpath(p, image_dir))[0].replace('\\', '/'): p
for p in find_images(image_dir, recursive=True)
}
else:
images = {}
landmarks_path = osp.join(self._dataset_dir, VggFace2Path.ANNOTATION_DIR,
VggFace2Path.LANDMARKS_FILE + subset + '.csv')
if osp.isfile(landmarks_path):
with open(landmarks_path, encoding='utf-8') as content:
landmarks_table = list(csv.DictReader(content))
for row in landmarks_table:
item_id = row['NAME_ID']
label = None
if '/' in item_id:
label = _get_label(item_id)
if item_id not in items:
items[item_id] = DatasetItem(id=item_id, subset=subset,
image=images.get(row['NAME_ID']))
annotations = items[item_id].annotations
if [a for a in annotations if a.type == AnnotationType.points]:
raise Exception("Item %s: an image can have only one "
"set of landmarks" % item_id)
if len([p for p in row if row[p] == '']) == 0 and len(row) == 11:
annotations.append(Points(
[float(row[p]) for p in row if p != 'NAME_ID'],
label=label)
)
elif label is not None:
annotations.append(Label(label=label))
bboxes_path = osp.join(self._dataset_dir, VggFace2Path.ANNOTATION_DIR,
VggFace2Path.BBOXES_FILE + subset + '.csv')
if osp.isfile(bboxes_path):
with open(bboxes_path, encoding='utf-8') as content:
bboxes_table = list(csv.DictReader(content))
for row in bboxes_table:
item_id = row['NAME_ID']
label = None
if '/' in item_id:
label = _get_label(item_id)
if item_id not in items:
items[item_id] = DatasetItem(id=item_id, subset=subset,
image=images.get(row['NAME_ID']))
annotations = items[item_id].annotations
if [a for a in annotations if a.type == AnnotationType.bbox]:
raise Exception("Item %s: an image can have only one "
"bbox" % item_id)
if len([p for p in row if row[p] == '']) == 0 and len(row) == 5:
annotations.append(Bbox(float(row['X']), float(row['Y']),
float(row['W']), float(row['H']), label=label))
return items
class VggFace2Importer(Importer):
@classmethod
def detect(cls, context: FormatDetectionContext) -> None:
with context.require_any():
for prefix in (
VggFace2Path.BBOXES_FILE, VggFace2Path.LANDMARKS_FILE
):
with context.alternative():
context.require_file(
f'{VggFace2Path.ANNOTATION_DIR}/{prefix}*.csv')
@classmethod
def find_sources(cls, path):
if osp.isdir(path):
annotation_dir = osp.join(path, VggFace2Path.ANNOTATION_DIR)
if osp.isdir(annotation_dir):
return [{
'url': annotation_dir, 'format': VggFace2Extractor.NAME,
}]
elif osp.isfile(path):
if (osp.basename(path).startswith(VggFace2Path.LANDMARKS_FILE) or \
osp.basename(path).startswith(VggFace2Path.BBOXES_FILE)) and \
path.endswith('.csv'):
return [{
'url': path, 'format': VggFace2Extractor.NAME,
}]
return []
class VggFace2Converter(Converter):
DEFAULT_IMAGE_EXT = VggFace2Path.IMAGE_EXT
def apply(self):
def _get_name_id(item_parts, label_name):
if 1 < len(item_parts) and item_parts[0] == label_name:
return '/'.join([label_name, *item_parts[1:]])
else:
return '/'.join([label_name, *item_parts])
save_dir = self._save_dir
os.makedirs(save_dir, exist_ok=True)
if self._save_dataset_meta:
self._save_meta_file(save_dir)
else:
labels_path = osp.join(save_dir, VggFace2Path.LABELS_FILE)
labels_file = ''
for label in self._extractor.categories()[AnnotationType.label]:
labels_file += '%s' % label.name
if label.parent:
labels_file += ' %s' % label.parent
labels_file += '\n'
with open(labels_path, 'w', encoding='utf-8') as f:
f.write(labels_file)
label_categories = self._extractor.categories()[AnnotationType.label]
for subset_name, subset in self._extractor.subsets().items():
bboxes_table = []
landmarks_table = []
for item in subset:
item_parts = item.id.split('/')
if item.has_image and self._save_images:
labels = set(p.label for p in item.annotations
if getattr(p, 'label') != None)
if labels:
for label in labels:
image_dir = label_categories[label].name
if 1 < len(item_parts) and image_dir == item_parts[0]:
image_dir = ''
self._save_image(item, subdir=osp.join(subset_name,
image_dir))
else:
image_dir = VggFace2Path.IMAGES_DIR_NO_LABEL
if 1 < len(item_parts) and image_dir == item_parts[0]:
image_dir = ''
self._save_image(item, subdir=osp.join(subset_name,
image_dir))
landmarks = [a for a in item.annotations
if a.type == AnnotationType.points]
if 1 < len(landmarks):
raise Exception("Item (%s, %s): an image can have only one "
"set of landmarks" % (item.id, item.subset))
if landmarks:
if landmarks[0].label is not None and \
label_categories[landmarks[0].label].name:
name_id = _get_name_id(item_parts,
label_categories[landmarks[0].label].name)
else:
name_id = _get_name_id(item_parts,
VggFace2Path.IMAGES_DIR_NO_LABEL)
points = landmarks[0].points
if len(points) != 10:
landmarks_table.append({'NAME_ID': name_id})
else:
landmarks_table.append({'NAME_ID': name_id,
'P1X': points[0], 'P1Y': points[1],
'P2X': points[2], 'P2Y': points[3],
'P3X': points[4], 'P3Y': points[5],
'P4X': points[6], 'P4Y': points[7],
'P5X': points[8], 'P5Y': points[9]})
bboxes = [a for a in item.annotations
if a.type == AnnotationType.bbox]
if 1 < len(bboxes):
raise Exception("Item (%s, %s): an image can have only one "
"bbox" % (item.id, item.subset))
if bboxes:
if bboxes[0].label is not None and \
label_categories[bboxes[0].label].name:
name_id = _get_name_id(item_parts,
label_categories[bboxes[0].label].name)
else:
name_id = _get_name_id(item_parts,
VggFace2Path.IMAGES_DIR_NO_LABEL)
bboxes_table.append({'NAME_ID': name_id, 'X': bboxes[0].x,
'Y': bboxes[0].y, 'W': bboxes[0].w, 'H': bboxes[0].h})
labels = [a for a in item.annotations
if a.type == AnnotationType.label]
for label in labels:
if label.label is not None and \
label_categories[label.label].name:
name_id = _get_name_id(item_parts,
label_categories[labels[0].label].name)
else:
name_id = _get_name_id(item_parts,
VggFace2Path.IMAGES_DIR_NO_LABEL)
landmarks_table.append({'NAME_ID': name_id})
if not landmarks and not bboxes and not labels:
landmarks_table.append({'NAME_ID': _get_name_id(item_parts,
VggFace2Path.IMAGES_DIR_NO_LABEL)})
landmarks_path = osp.join(save_dir, VggFace2Path.ANNOTATION_DIR,
VggFace2Path.LANDMARKS_FILE + subset_name + '.csv')
os.makedirs(osp.dirname(landmarks_path), exist_ok=True)
with open(landmarks_path, 'w', encoding='utf-8', newline='') as file:
columns = ['NAME_ID', 'P1X', 'P1Y', 'P2X', 'P2Y',
'P3X', 'P3Y', 'P4X', 'P4Y', 'P5X', 'P5Y']
writer = csv.DictWriter(file, fieldnames=columns)
writer.writeheader()
writer.writerows(landmarks_table)
if bboxes_table:
bboxes_path = osp.join(save_dir, VggFace2Path.ANNOTATION_DIR,
VggFace2Path.BBOXES_FILE + subset_name + '.csv')
os.makedirs(osp.dirname(bboxes_path), exist_ok=True)
with open(bboxes_path, 'w', encoding='utf-8', newline='') as file:
columns = ['NAME_ID', 'X', 'Y', 'W', 'H']
writer = csv.DictWriter(file, fieldnames=columns)
writer.writeheader()
writer.writerows(bboxes_table)
| 43.45768
| 86
| 0.526942
|
1e25b849109e225a9a86305442b4912ab3b2e874
| 6,937
|
py
|
Python
|
semtk3/semtkclient.py
|
ge-semtk/semtk-python3
|
d6d8e749121ec9309b47d17b01bdcd98935f7e70
|
[
"Apache-2.0"
] | 1
|
2020-04-30T16:48:05.000Z
|
2020-04-30T16:48:05.000Z
|
semtk3/semtkclient.py
|
ge-semtk/semtk-python3
|
d6d8e749121ec9309b47d17b01bdcd98935f7e70
|
[
"Apache-2.0"
] | 7
|
2020-07-09T18:31:41.000Z
|
2021-09-02T12:23:36.000Z
|
semtk3/semtkclient.py
|
ge-semtk/semtk-python3
|
d6d8e749121ec9309b47d17b01bdcd98935f7e70
|
[
"Apache-2.0"
] | 2
|
2020-07-17T22:35:41.000Z
|
2020-07-28T16:16:09.000Z
|
#
# Copyright 2019-20 General Electric Company
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import sys
import logging
from . import restclient
from . import semtktable
#
# SETUP NOTES
# - Inside GE, will fail with Captcha if Windows has HTTP_PROXY and maybe HTTPS_PROXY environment variables set
# unless NO_PROXY is set up on your endpoint
#
semtk3_logger = logging.getLogger("semtk3")
class SemTkClient(restclient.RestClient):
JOB_ID_KEY = "JobId"
RESULT_TYPE_KEY = "resultType"
def _check_status(self, content):
''' check content is a dict, has status=="success"
'''
if not isinstance(content, dict):
self.raise_exception("Can't process content from rest service")
if "status" not in content.keys():
self.raise_exception("Can't find status in content from rest service")
if content["status"] != "success":
self.raise_exception("Rest service call did not succeed ")
def _check_simple(self, content):
''' perform all checks on content through checking for simpleresults
'''
self._check_status(content)
if "simpleresults" not in content.keys():
self.raise_exception("Rest service did not return simpleresults")
def _check_table(self, content):
''' perform all checks on content through checking for table
'''
self._check_status(content)
if "table" not in content.keys():
self.raise_exception("Rest service did not return table")
if "@table" not in content["table"].keys():
self.raise_exception("Rest service table does not contain @table")
def _check_record_process(self, content):
''' checking for recordProcess table
'''
if "recordProcessResults" not in content.keys():
self._check_status(content)
self.raise_exception("Rest service did not record process results")
def get_simple_field(self, simple_res, field):
''' get a simple field with REST error handling
'''
if field not in simple_res.keys():
self.raise_exception("Rest service did not return simple result " + field)
return simple_res[field]
def get_simple_field_int(self, simple_res, field):
''' get integer simple results field
returns int
raises RestException on type or missing field
'''
try:
f = self.get_simple_field(simple_res, field)
return int(f)
except ValueError:
self.raise_exception("Simple results field " + field + " expecting integer, found " + f)
def get_simple_field_str(self, simple_res, field):
''' get string from simple result
returns string
raises RestException on type or missing field
'''
f = self.get_simple_field(simple_res, field)
return str(f)
def ping(self):
'''
logger.INFO(success) or logger.ERROR(error)
returns True (success) or False (failure)
'''
try:
res = self.post_to_simple("serviceInfo/ping")
semtk3_logger.info(self.baseURL + self.get_simple_field_str(res, "available"))
return True
except Exception as e:
semtk3_logger.error(str(e).replace("\n", ""))
return False
def post_to_status(self, endpoint, dataObj={}, files=None):
'''
returns dict - the simple results
which can be used as a regular dict, or with error-handling get_simple_field*() methods
'''
content = self.post(endpoint, dataObj=dataObj, files=files)
content = json.loads(content.decode("utf-8", errors='ignore'))
self._check_status(content)
return content
def post_to_simple(self, endpoint, dataObj={}, files=None):
'''
returns dict - the simple results
which can be used as a regular dict, or with error-handling get_simple_field*() methods
'''
content = self.post(endpoint, dataObj=dataObj, files=files)
content = json.loads(content.decode("utf-8", errors='ignore'))
self._check_simple(content)
return content["simpleresults"]
def post_to_table(self, endpoint, dataObj={}):
'''
returns dict - the table
raises RestException
'''
content = self.post(endpoint, dataObj)
content = json.loads(content.decode("utf-8", errors='ignore'))
self._check_table(content)
table = semtktable.SemtkTable(content["table"]["@table"])
return table
def post_to_record_process(self, endpoint, dataObj={}, files=None):
'''
returns records processed successfully
raises RestException unless failuresEncountered = 0
'''
content = self.post(endpoint, dataObj=dataObj, files=files)
content = json.loads(content.decode("utf-8", errors='ignore'))
# throw exception if no recordProcessResults
self._check_record_process(content)
record_process = content["recordProcessResults"]
if "failuresEncountered" not in record_process.keys():
raise Exception("Results did not contain recordProcessResults.failuresEncountered: \n" + content)
if record_process["failuresEncountered"] != 0:
if "errorTable" in record_process:
t = semtktable.SemtkTable(record_process["errorTable"])
raise Exception("Encountered failures: \n" + t.get_csv_string())
else:
raise Exception("Encountered failures but no table given: \n" + content)
if not ("recordsProcessed" in record_process):
raise Exception("Results did not contain recordProcessResults.recordsProcessed: \n" + content)
return record_process["recordsProcessed"]
def post_to_jobid(self, endpoint, dataObj={}):
'''
returns string jobid
raises errors otherwise
'''
simple_res = self.post_to_simple(endpoint, dataObj)
return self.get_simple_field_str(simple_res, SemTkClient.JOB_ID_KEY)
| 37.295699
| 114
| 0.622315
|
e937f283a31f537ce1811fc84a7634f6d9eb7096
| 5,181
|
py
|
Python
|
examples/conversationbot.py
|
ehsanbarkhordar/botcup
|
4e45c3df2dceb8afe3833c0e89813fa9493295ed
|
[
"MIT"
] | null | null | null |
examples/conversationbot.py
|
ehsanbarkhordar/botcup
|
4e45c3df2dceb8afe3833c0e89813fa9493295ed
|
[
"MIT"
] | null | null | null |
examples/conversationbot.py
|
ehsanbarkhordar/botcup
|
4e45c3df2dceb8afe3833c0e89813fa9493295ed
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Simple Bot to reply to Telegram messages
# This program is dedicated to the public domain under the CC0 license.
"""
This Bot uses the Updater class to handle the bot.
First, a few callback functions are defined. Then, those functions are passed to
the Dispatcher and registered at their respective places.
Then, the bot is started and runs until we press Ctrl-C on the command line.
Usage:
Example of a bot-user conversation using ConversationHandler.
Send /start to initiate the conversation.
Press Ctrl-C on the command line or send a signal to the process to stop the
bot.
"""
from telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove)
from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, RegexHandler,
ConversationHandler)
import logging
# Enable logging
from examples.config import Config
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
GENDER, PHOTO, LOCATION, BIO = range(4)
def start(bot, update):
reply_keyboard = [['Boy', 'Girl', 'Other']]
update.message.reply_text(
'Hi! My name is Professor Bot. I will hold a conversation with you. '
'Send /cancel to stop talking to me.\n\n'
'Are you a boy or a girl?',
reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))
return GENDER
def gender(bot, update):
user = update.message.from_user
logger.info("Gender of %s: %s", user.first_name, update.message.text)
update.message.reply_text('I see! Please send me a photo of yourself, '
'so I know what you look like, or send /skip if you don\'t want to.',
reply_markup=ReplyKeyboardRemove())
return PHOTO
def photo(bot, update):
user = update.message.from_user
photo_file = bot.get_file(update.message.photo[-1].file_id)
photo_file.download('user_photo.jpg')
logger.info("Photo of %s: %s", user.first_name, 'user_photo.jpg')
update.message.reply_text('Gorgeous! Now, send me your location please, '
'or send /skip if you don\'t want to.')
return LOCATION
def skip_photo(bot, update):
user = update.message.from_user
logger.info("User %s did not send a photo.", user.first_name)
update.message.reply_text('I bet you look great! Now, send me your location please, '
'or send /skip.')
return LOCATION
def location(bot, update):
user = update.message.from_user
user_location = update.message.location
logger.info("Location of %s: %f / %f", user.first_name, user_location.latitude,
user_location.longitude)
update.message.reply_text('Maybe I can visit you sometime! '
'At last, tell me something about yourself.')
return BIO
def skip_location(bot, update):
user = update.message.from_user
logger.info("User %s did not send a location.", user.first_name)
update.message.reply_text('You seem a bit paranoid! '
'At last, tell me something about yourself.')
return BIO
def bio(bot, update):
user = update.message.from_user
logger.info("Bio of %s: %s", user.first_name, update.message.text)
update.message.reply_text('Thank you! I hope we can talk again some day.')
return ConversationHandler.END
def cancel(bot, update):
user = update.message.from_user
logger.info("User %s canceled the conversation.", user.first_name)
update.message.reply_text('Bye! I hope we can talk again some day.',
reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
def error(bot, update, error):
"""Log Errors caused by Updates."""
logger.warning('Update "%s" caused error "%s"', update, error)
def main():
# Create the EventHandler and pass it your bot's token.
updater = Updater(Config.bot_token)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
GENDER: [RegexHandler('^(Boy|Girl|Other)$', gender)],
PHOTO: [MessageHandler(Filters.photo, photo),
CommandHandler('skip', skip_photo)],
LOCATION: [MessageHandler(Filters.location, location),
CommandHandler('skip', skip_location)],
BIO: [MessageHandler(Filters.text, bio)]
},
fallbacks=[CommandHandler('cancel', cancel)]
)
dp.add_handler(conv_handler)
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
| 31.785276
| 99
| 0.663771
|
71335623579060b433260cfaa56edbfabd60fbca
| 951
|
py
|
Python
|
tests/test_00_dxf_low_level_structs/test_016_encoding.py
|
jkjt/ezdxf
|
2acc5611b81476ea16b98063b9f55446a9182b81
|
[
"MIT"
] | 515
|
2017-01-25T05:46:52.000Z
|
2022-03-29T09:52:27.000Z
|
tests/test_00_dxf_low_level_structs/test_016_encoding.py
|
jkjt/ezdxf
|
2acc5611b81476ea16b98063b9f55446a9182b81
|
[
"MIT"
] | 417
|
2017-01-25T10:01:17.000Z
|
2022-03-29T09:22:04.000Z
|
tests/test_00_dxf_low_level_structs/test_016_encoding.py
|
jkjt/ezdxf
|
2acc5611b81476ea16b98063b9f55446a9182b81
|
[
"MIT"
] | 149
|
2017-02-01T15:52:02.000Z
|
2022-03-17T10:33:38.000Z
|
# Copyright (C) 2016-2020, Manfred Moitzi
# License: MIT License
import pytest
import codecs
from ezdxf.lldxf.encoding import dxf_backslash_replace, encode
from pathlib import Path
# setup DXF unicode encoder -> '\U+nnnn'
codecs.register_error("dxfreplace", dxf_backslash_replace)
def test_ascii_encoding():
assert b"123\\U+6539" == encode("123改", "ascii")
@pytest.mark.parametrize(
["s", "e"],
[
("300\n\udcb7\udc9e\udcff\n", b"300\n\xb7\x9e\xff\n"),
("123改", b"123\\U+6539"),
],
)
def test_surrogate_escape_support_in_dxf_replace_encoder(s, e):
assert e == encode(s, "ascii")
@pytest.mark.parametrize("n", [0, 1, 2])
def test_XRECORD_handling_of_dxf_replace_encoder(n):
XRECORD = Path(__file__).parent / f"XRECORD_{n}.bin"
with open(XRECORD, "rb") as f:
data = f.read()
s = data.decode("utf8", errors="surrogateescape")
result = encode(s, encoding="utf8")
assert data == result
| 27.171429
| 63
| 0.675079
|
68174a6cbe454726b4a40242219d7d418727dd84
| 3,991
|
py
|
Python
|
helios/nodes/base.py
|
hyperevo/py-helios-node
|
ff417fe3fe90f85c9f95b3d8a5f0dd4c80532ee8
|
[
"MIT"
] | null | null | null |
helios/nodes/base.py
|
hyperevo/py-helios-node
|
ff417fe3fe90f85c9f95b3d8a5f0dd4c80532ee8
|
[
"MIT"
] | null | null | null |
helios/nodes/base.py
|
hyperevo/py-helios-node
|
ff417fe3fe90f85c9f95b3d8a5f0dd4c80532ee8
|
[
"MIT"
] | null | null | null |
from abc import abstractmethod
from pathlib import Path
from multiprocessing.managers import (
BaseManager,
)
from typing import (
Type,
List,
)
from helios.utils.chain_proxy import create_chain_manager
from hvm.chains.base import (
BaseChain,
)
from helios.chains.coro import AsyncChain
from hp2p.peer import BasePeerPool
from hp2p.service import (
BaseService,
)
from helios.config import (
ChainConfig,
)
from helios.extensibility import (
PluginManager,
)
from helios.extensibility.events import (
ResourceAvailableEvent
)
from helios.utils.db_proxy import (
create_db_manager
)
from eth_typing import Address
from eth_keys.datatypes import PrivateKey
class Node(BaseService):
"""
Create usable nodes by adding subclasses that define the following
unset attributes.
"""
chain_class: Type[BaseChain] = None
_chain_managers: List[BaseManager] = []
def __init__(self, plugin_manager: PluginManager, chain_config: ChainConfig) -> None:
super().__init__()
self.chain_config: ChainConfig = chain_config
self.private_helios_key = chain_config.node_private_helios_key
self.wallet_address = chain_config.node_wallet_address
self._plugin_manager = plugin_manager
self._db_manager = create_db_manager(chain_config.database_ipc_path)
self._db_manager.connect() # type: ignore
for i in range(chain_config.num_chain_processes):
chain_manager = create_chain_manager(chain_config.get_chain_ipc_path(i))
chain_manager.connect()
self._chain_managers.append(chain_manager)
self._chain_head_db = self._db_manager.get_chain_head_db() # type: ignore
self._jsonrpc_ipc_path: Path = chain_config.jsonrpc_ipc_path
@abstractmethod
def get_chain(self) -> AsyncChain:
raise NotImplementedError("Node classes must implement this method")
@abstractmethod
def get_new_chain(self, chain_address: Address=None, private_key:PrivateKey = None) -> AsyncChain:
raise NotImplementedError("Node classes must implement this method")
@abstractmethod
def get_new_private_chain(self, chain_address: Address = None) -> AsyncChain:
raise NotImplementedError("Node classes must implement this method")
@abstractmethod
def get_peer_pool(self) -> BasePeerPool:
"""
Return the PeerPool instance of the node
"""
raise NotImplementedError("Node classes must implement this method")
@abstractmethod
def get_p2p_server(self) -> BaseService:
"""
This is the main service that will be run, when calling :meth:`run`.
It's typically responsible for syncing the chain, with peer connections.
"""
raise NotImplementedError("Node classes must implement this method")
@property
def db_manager(self) -> BaseManager:
return self._db_manager
@property
def chain_managers(self) -> List[BaseManager]:
return self._chain_managers
@property
def chain_head_db(self):
return self._chain_head_db
def notify_resource_available(self) -> None:
# We currently need this to give plugins the chance to start as soon
# as the `PeerPool` is available. In the long term, the peer pool may become
# a plugin itself and we can get rid of this.
peer_pool = self.get_peer_pool()
self._plugin_manager.broadcast(ResourceAvailableEvent(
resource=(peer_pool, self.cancel_token),
resource_type=type(peer_pool)
))
# This broadcasts the *local* chain, which is suited for tasks that aren't blocking
# for too long. There may be value in also broadcasting the proxied chain.
self._plugin_manager.broadcast(ResourceAvailableEvent(
resource=self.get_chain(),
resource_type=BaseChain
))
async def _run(self) -> None:
await self.get_p2p_server().run()
| 32.447154
| 102
| 0.704335
|
b6c49b445a5c1027dca42a002256acdeea8db28a
| 1,926
|
py
|
Python
|
openverse_catalog/dags/common_api_workflows.py
|
lyu4321/openverse-catalog
|
cd5be8fbff402a7420e772a803abdc2a20fd7235
|
[
"MIT"
] | null | null | null |
openverse_catalog/dags/common_api_workflows.py
|
lyu4321/openverse-catalog
|
cd5be8fbff402a7420e772a803abdc2a20fd7235
|
[
"MIT"
] | 1
|
2021-10-07T19:21:21.000Z
|
2021-10-07T19:21:21.000Z
|
openverse_catalog/dags/common_api_workflows.py
|
lyu4321/openverse-catalog
|
cd5be8fbff402a7420e772a803abdc2a20fd7235
|
[
"MIT"
] | null | null | null |
import logging
import os
import util.config as conf
from airflow import DAG
from croniter import croniter
from util.operator_util import get_log_operator, get_runner_operator
logging.basicConfig(
format="%(asctime)s: [%(levelname)s - DAG Loader] %(message)s", level=logging.INFO
)
CRONTAB_STR = conf.CRONTAB_STR
SCRIPT = conf.SCRIPT
DAG_DEFAULT_ARGS = conf.DAG_DEFAULT_ARGS
DAG_VARIABLES = conf.DAG_VARIABLES
def load_dag_conf(source, DAG_VARIABLES):
"""Validate and load configuration variables"""
logging.info(f"Loading configuration for {source}")
logging.debug(f"DAG_VARIABLES: {DAG_VARIABLES}")
dag_id = f"{source}_workflow"
script_location = DAG_VARIABLES[source].get(SCRIPT)
try:
assert os.path.exists(script_location)
except Exception as e:
logging.warning(f"Invalid script location: {script_location}. Error: {e}")
script_location = None
crontab_str = DAG_VARIABLES[source].get(CRONTAB_STR)
try:
croniter(crontab_str)
except Exception as e:
logging.warning(f"Invalid crontab string: {crontab_str}. Error: {e}")
crontab_str = None
return script_location, dag_id, crontab_str
def create_dag(
source, script_location, dag_id, crontab_str=None, default_args=DAG_DEFAULT_ARGS
):
dag = DAG(
dag_id=dag_id,
default_args=default_args,
schedule_interval=crontab_str,
catchup=False,
)
with dag:
start_task = get_log_operator(dag, source, "starting")
run_task = get_runner_operator(dag, source, script_location)
end_task = get_log_operator(dag, source, "finished")
start_task >> run_task >> end_task
return dag
for source in DAG_VARIABLES:
script_location, dag_id, crontab_str = load_dag_conf(source, DAG_VARIABLES)
if script_location:
globals()[dag_id] = create_dag(source, script_location, dag_id, crontab_str)
| 28.323529
| 86
| 0.715472
|
9f12087a439352ee0289b9a26276f4bdc3de9c8e
| 1,141
|
py
|
Python
|
tags.py
|
adrianomota/projeto_open_street_map
|
e588745c962d51575355ac162e8f416377711527
|
[
"MIT"
] | null | null | null |
tags.py
|
adrianomota/projeto_open_street_map
|
e588745c962d51575355ac162e8f416377711527
|
[
"MIT"
] | null | null | null |
tags.py
|
adrianomota/projeto_open_street_map
|
e588745c962d51575355ac162e8f416377711527
|
[
"MIT"
] | null | null | null |
"""
Contagem dos padrões das tags.
"""
import xml.etree.cElementTree as ET
import pprint
import re
from collections import defaultdict
lower = re.compile(r'^([a-z]|_)*$')
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
OSM_PATH = "dataset/barueri_e_cidades_vizinhas.osm"
def key_type(element, keys):
if element.tag == "tag":
for tag in element.iter('tag'):
k = tag.get('k')
if lower.search(element.attrib['k']):
keys['lower'] = keys['lower'] + 1
elif lower_colon.search(element.attrib['k']):
keys['lower_colon'] = keys['lower_colon'] + 1
elif problemchars.search(element.attrib['k']):
keys['problemchars'] = keys['problemchars'] + 1
else:
keys['other'] = keys['other'] + 1
return keys
def process_map(filename):
keys = {"lower": 0, "lower_colon": 0, "problemchars": 0, "other": 0}
for _, element in ET.iterparse(filename):
keys = key_type(element, keys)
return keys
pprint.pprint(process_map(OSM_PATH))
| 29.25641
| 72
| 0.579316
|
39a9c2589f281e7d9490c661a331f4881af83e82
| 3,202
|
py
|
Python
|
catalyst/metrics/precision.py
|
Ditwoo/catalyst
|
3126390f9f679ebcfedbe01707b416678a2732ac
|
[
"Apache-2.0"
] | 1
|
2020-11-14T13:35:22.000Z
|
2020-11-14T13:35:22.000Z
|
catalyst/metrics/precision.py
|
Ditwoo/catalyst
|
3126390f9f679ebcfedbe01707b416678a2732ac
|
[
"Apache-2.0"
] | null | null | null |
catalyst/metrics/precision.py
|
Ditwoo/catalyst
|
3126390f9f679ebcfedbe01707b416678a2732ac
|
[
"Apache-2.0"
] | null | null | null |
from typing import Optional, Union
import torch
from catalyst.metrics import precision_recall_fbeta_support
from catalyst.metrics.functional import process_multilabel_components
def average_precision(
outputs: torch.Tensor,
targets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Computes the average precision.
Args:
outputs: NxK tensor that for each of the N examples
indicates the probability of the example belonging to each of
the K classes, according to the model.
targets: binary NxK tensort that encodes which of the K
classes are associated with the N-th input
(eg: a row [0, 1, 0, 1] indicates that the example is
associated with classes 2 and 4)
weights: importance for each sample
Returns:
torch.Tensor: tensor of [K; ] shape,
with average precision for K classes
"""
# outputs - [bs; num_classes] with scores
# targets - [bs; num_classes] with binary labels
outputs, targets, weights = process_multilabel_components(
outputs=outputs, targets=targets, weights=weights,
)
if outputs.numel() == 0:
return torch.zeros(1)
ap = torch.zeros(targets.size(1))
# compute average precision for each class
for class_i in range(targets.size(1)):
# sort scores
class_scores = outputs[:, class_i]
class_targets = targets[:, class_i]
_, sortind = torch.sort(class_scores, dim=0, descending=True)
correct = class_targets[sortind]
# compute true positive sums
if weights is not None:
class_weight = weights[sortind]
weighted_correct = correct.float() * class_weight
tp = weighted_correct.cumsum(0)
rg = class_weight.cumsum(0)
else:
tp = correct.float().cumsum(0)
rg = torch.arange(1, targets.size(0) + 1).float()
# compute precision curve
precision = tp.div(rg)
# compute average precision
ap[class_i] = precision[correct.bool()].sum() / max(
float(correct.sum()), 1
)
return ap
def precision(
outputs: torch.Tensor,
targets: torch.Tensor,
argmax_dim: int = -1,
eps: float = 1e-7,
num_classes: Optional[int] = None,
) -> Union[float, torch.Tensor]:
"""
Multiclass precision metric.
Args:
outputs: estimated targets as predicted by a model
with shape [bs; ..., (num_classes or 1)]
targets: ground truth (correct) target values
with shape [bs; ..., 1]
argmax_dim: int, that specifies dimension for argmax transformation
in case of scores/probabilities in ``outputs``
eps: float. Epsilon to avoid zero division.
num_classes: int, that specifies number of classes if it known
Returns:
Tensor:
"""
precision_score, _, _, _, = precision_recall_fbeta_support(
outputs=outputs,
targets=targets,
argmax_dim=argmax_dim,
eps=eps,
num_classes=num_classes,
)
return precision_score
__all__ = ["average_precision", "precision"]
| 30.788462
| 75
| 0.631793
|
8dd7a26fd8434f83c44b88bdd08aca59aa9a0793
| 6,071
|
py
|
Python
|
Functions/register_functions.py
|
mboyr4z/Hastane_Otomasyonu
|
6f92d559953db233a5fe3c80757624d61cca7618
|
[
"MIT"
] | 7
|
2021-07-17T10:05:08.000Z
|
2022-03-25T19:17:29.000Z
|
Functions/register_functions.py
|
mboyr4z/Hastane_Otomasyonu
|
6f92d559953db233a5fe3c80757624d61cca7618
|
[
"MIT"
] | null | null | null |
Functions/register_functions.py
|
mboyr4z/Hastane_Otomasyonu
|
6f92d559953db233a5fe3c80757624d61cca7618
|
[
"MIT"
] | null | null | null |
from Hospital_Automation.Database.AddHuman import addDoctortoDatabase,addPatienttoDatabase,addPharmacisttoDatabase,addCounselortoDatabase
class register_functions():
def __init__(self,var):
self.var = var
self.errorCount = 0
def f_register(self): ## kayıt ol kısmımız
self.f_check_All_Input_and_Register() #kaydolmadan önce input kontrollerim
def f_check_All_Input_and_Register(self):
self.errorCount = 0
if self.var.selectedJob == "doctor": # doktor için olan input kontrollerim
if(self.var.screen_register.doctor_line_name.text() == ""):
print("name cant be null")
elif(self.var.screen_register.doctor_line_surname.text() == ""):
print("surname cant be null")
elif(self.var.screen_register.doctor_line_username.text() == ""):
print("username cant be null")
elif(self.var.screen_register.doctor_line_password.text() == ""):
print("password cant be null")
elif self.var.imagePath == "":
print("image Path catn be null. Please select Image!!.")
else:
self.doctor_name = self.var.screen_register.doctor_line_name.text()
self.doctor_surname = self.var.screen_register.doctor_line_surname.text()
self.doctor_username = self.var.screen_register.doctor_line_username.text()
self.doctor_password = self.var.screen_register.doctor_line_password.text()
self.doctor_profession = self.var.screen_register.doctor_combo_profession.currentText()
addDoctortoDatabase("Database/database.db",self.doctor_name,self.doctor_surname,self.doctor_profession,self.doctor_username,self.doctor_password,self.var.imagePath)
elif self.var.selectedJob == "patient": #hasta için input kontrollerim
self.errorCount = 0
if (self.var.screen_register.patient_line_name.text() == ""):
print("name cant be null")
self.errorCount+=1
elif (self.var.screen_register.patient_line_surname.text() == ""):
print("surname cant be null")
self.errorCount += 1
elif (self.var.screen_register.patient_line_username.text() == ""):
print("username cant be null")
self.errorCount += 1
elif (self.var.screen_register.patient_line_password.text() == ""):
print("password cant be null")
self.errorCount += 1
try:
a = int(self.var.screen_register.patient_line_tc.text())
except :
print("TC only can be number")
self.errorCount += 1
try:
a = int(self.var.screen_register.patient_line_age.text())
except:
print("Age can be only number")
self.errorCount += 1
try:
a = int(self.var.screen_register.patient_line_gender.text())
except:
print("Gender can be only number ('0' girl and '1' man)")
self.errorCount += 1
if self.errorCount == 0:
tc_id = self.var.screen_register.patient_line_tc.text()
name = self.var.screen_register.patient_line_name.text()
surname = self.var.screen_register.patient_line_surname.text()
age = int(self.var.screen_register.patient_line_age.text())
gender = int(self.var.screen_register.patient_line_gender.text())
username = self.var.screen_register.patient_line_username.text()
password = self.var.screen_register.patient_line_password.text()
addPatienttoDatabase("Database/database.db",tc_id,name,surname,age,gender,username,password,self.var.imagePath)
else:
print("Fix All errors then try again!")
elif self.var.selectedJob == "counselor":
print("Counselordayız")
if (self.var.screen_register.counselor_line_name.text() == ""):
print("name cant be null")
elif (self.var.screen_register.counselor_line_surname.text() == ""):
print("surname cant be null")
elif (self.var.screen_register.counselor_line_username.text() == ""):
print("username cant be null")
elif (self.var.screen_register.counselor_line_password.text() == ""):
print("password cant be null")
else:
name = self.var.screen_register.counselor_line_name.text()
surname = self.var.screen_register.counselor_line_surname.text()
username = self.var.screen_register.counselor_line_username.text()
password = self.var.screen_register.counselor_line_password.text()
imagePath = self.var.imagePath
addCounselortoDatabase("Database/database.db",name,surname,username,password,imagePath)
else:
if (self.var.screen_register.pharmacist_line_name.text() == ""):
print("name cant be null")
elif (self.var.screen_register.pharmacist_line_surname.text() == ""):
print("surname cant be null")
elif (self.var.screen_register.pharmacist_line_username.text() == ""):
print("username cant be null")
elif (self.var.screen_register.pharmacist_line_password.text() == ""):
print("password cant be null")
else:
name = self.var.screen_register.pharmacist_line_name.text()
surname = self.var.screen_register.pharmacist_line_surname.text()
username = self.var.screen_register.pharmacist_line_username.text()
password = self.var.screen_register.pharmacist_line_password.text()
addPharmacisttoDatabase("Database/database.db",name,surname,username,password,self.var.imagePath)
| 48.959677
| 180
| 0.610443
|
bbb6d061b1652318c8963b702b0f46cf310b037c
| 2,090
|
py
|
Python
|
devtools/old_migrations/drop_procedures.py
|
ahurta92/QCFractal
|
c7285c9786659c34b1f6f611c6d60b93d76e2678
|
[
"BSD-3-Clause"
] | null | null | null |
devtools/old_migrations/drop_procedures.py
|
ahurta92/QCFractal
|
c7285c9786659c34b1f6f611c6d60b93d76e2678
|
[
"BSD-3-Clause"
] | null | null | null |
devtools/old_migrations/drop_procedures.py
|
ahurta92/QCFractal
|
c7285c9786659c34b1f6f611c6d60b93d76e2678
|
[
"BSD-3-Clause"
] | null | null | null |
import argparse
from qcfractal.storage_sockets import storage_socket_factory
from qcfractal.storage_sockets.sql_models import (
ProcedureMap,
OptimizationProcedureORM,
OptimizationHistory,
TorsionDriveProcedureORM,
GridOptimizationProcedureORM,
GridOptimizationAssociation,
Trajectory,
torsion_init_mol_association,
)
sql_uri = "postgresql+psycopg2://qcarchive:mypass@localhost:5432/qcarchivedb"
sql_storage = storage_socket_factory(sql_uri, "qcarchivedb", db_type="sqlalchemy")
with sql_storage.engine.connect() as con:
con.execute("ALTER TABLE optimization_history " + "DROP CONSTRAINT optimization_history_pkey;")
con.execute(
"ALTER TABLE optimization_history "
"ADD CONSTRAINT optimization_history_pkey "
"PRIMARY KEY (torsion_id, opt_id, key, position);"
)
# with sql_storage.session_scope() as session:
# procedures = session.query(OptimizationProcedureORM).all()
# print('Deleteing Opt proc: ', len(procedures))
# # delete through session to delete correctly from base_result
# for proc in procedures:
# session.delete(proc)
#
# procedures = session.query(TorsionDriveProcedureORM).all()
# print('Deleteing Torsion proc: ', len(procedures))
# # delete through session to delete correctly from base_result
# for proc in procedures:
# session.delete(proc)
#
# procedures = session.query(GridOptimizationProcedureORM).all()
# print('Deleteing Grid proc: ', len(procedures))
# # delete through session to delete correctly from base_result
# for proc in procedures:
# session.delete(proc)
#
# session.commit()
#
# # drop tables
# torsion_init_mol_association.drop(sql_storage.engine)
# OptimizationHistory.__table__.drop(sql_storage.engine)
# GridOptimizationAssociation.__table__.drop(sql_storage.engine)
# Trajectory.__table__.drop(sql_storage.engine)
#
# OptimizationProcedureORM.__table__.drop(sql_storage.engine)
# TorsionDriveProcedureORM.__table__.drop(sql_storage.engine)
# GridOptimizationProcedureORM.__table__.drop(sql_storage.engine)
#
# ProcedureMap.__table__.drop(sql_storage.engine)
| 33.174603
| 99
| 0.779904
|
9d53e15835e507e490b601d75fa93449a7a1c009
| 460
|
py
|
Python
|
smarm.com/wakeup/migrations/0003_auto_20201102_0024.py
|
jphacks/E_2002
|
46d66f0c1cc479b70111754e2b49ad601e7be108
|
[
"MIT"
] | null | null | null |
smarm.com/wakeup/migrations/0003_auto_20201102_0024.py
|
jphacks/E_2002
|
46d66f0c1cc479b70111754e2b49ad601e7be108
|
[
"MIT"
] | null | null | null |
smarm.com/wakeup/migrations/0003_auto_20201102_0024.py
|
jphacks/E_2002
|
46d66f0c1cc479b70111754e2b49ad601e7be108
|
[
"MIT"
] | 1
|
2020-11-05T17:56:25.000Z
|
2020-11-05T17:56:25.000Z
|
# Generated by Django 2.2 on 2020-11-01 15:24
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('wakeup', '0002_auto_20201101_2359'),
]
operations = [
migrations.AlterField(
model_name='schedule',
name='start_time',
field=models.TimeField(default=django.utils.timezone.now, verbose_name='Alarm'),
),
]
| 23
| 92
| 0.63913
|
87383fef717a6a1d76b44e4d625d6eb405a9f431
| 34,012
|
py
|
Python
|
lib/sqlalchemy/pool/base.py
|
karlicoss/sqlalchemy
|
402cca8f2ac42a08fba7a200c4e1e086e2081aad
|
[
"MIT"
] | 1
|
2021-04-04T10:13:08.000Z
|
2021-04-04T10:13:08.000Z
|
lib/sqlalchemy/pool/base.py
|
karlicoss/sqlalchemy
|
402cca8f2ac42a08fba7a200c4e1e086e2081aad
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/pool/base.py
|
karlicoss/sqlalchemy
|
402cca8f2ac42a08fba7a200c4e1e086e2081aad
|
[
"MIT"
] | null | null | null |
# sqlalchemy/pool.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base constructs for connection pools.
"""
from collections import deque
import time
import weakref
from .. import event
from .. import exc
from .. import log
from .. import util
from ..util import threading
reset_rollback = util.symbol("reset_rollback")
reset_commit = util.symbol("reset_commit")
reset_none = util.symbol("reset_none")
class _ConnDialect(object):
"""partial implementation of :class:`.Dialect`
which provides DBAPI connection methods.
When a :class:`_pool.Pool` is combined with an :class:`_engine.Engine`,
the :class:`_engine.Engine` replaces this with its own
:class:`.Dialect`.
"""
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
dbapi_connection.commit()
def do_close(self, dbapi_connection):
dbapi_connection.close()
def do_ping(self, dbapi_connection):
raise NotImplementedError(
"The ping feature requires that a dialect is "
"passed to the connection pool."
)
class Pool(log.Identified):
"""Abstract base class for connection pools."""
_dialect = _ConnDialect()
_is_asyncio = False
def __init__(
self,
creator,
recycle=-1,
echo=None,
logging_name=None,
reset_on_return=True,
events=None,
dialect=None,
pre_ping=False,
_dispatch=None,
):
"""
Construct a Pool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param recycle: If set to a value other than -1, number of
seconds between connection recycling, which means upon
checkout, if this timeout is surpassed the connection will be
closed and replaced with a newly opened connection. Defaults to -1.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param echo: if True, the connection pool will log
informational output such as when connections are invalidated
as well as when connections are recycled to the default log handler,
which defaults to ``sys.stdout`` for output.. If set to the string
``"debug"``, the logging will include pool checkouts and checkins.
The :paramref:`_pool.Pool.echo` parameter can also be set from the
:func:`_sa.create_engine` call by using the
:paramref:`_sa.create_engine.echo_pool` parameter.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param reset_on_return: Determine steps to take on
connections as they are returned to the pool.
reset_on_return can have any of these values:
* ``"rollback"`` - call rollback() on the connection,
to release locks and transaction resources.
This is the default value. The vast majority
of use cases should leave this value set.
* ``True`` - same as 'rollback', this is here for
backwards compatibility.
* ``"commit"`` - call commit() on the connection,
to release locks and transaction resources.
A commit here may be desirable for databases that
cache query plans if a commit is emitted,
such as Microsoft SQL Server. However, this
value is more dangerous than 'rollback' because
any data changes present on the transaction
are committed unconditionally.
* ``None`` - don't do anything on the connection.
This setting should generally only be made on a database
that has no transaction support at all,
namely MySQL MyISAM; when used on this backend, performance
can be improved as the "rollback" call is still expensive on
MySQL. It is **strongly recommended** that this setting not be
used for transaction-supporting databases in conjunction with
a persistent pool such as :class:`.QueuePool`, as it opens
the possibility for connections still in a transaction to be
idle in the pool. The setting may be appropriate in the
case of :class:`.NullPool` or special circumstances where
the connection pool in use is not being used to maintain connection
lifecycle.
* ``False`` - same as None, this is here for
backwards compatibility.
:param events: a list of 2-tuples, each of the form
``(callable, target)`` which will be passed to :func:`.event.listen`
upon construction. Provided here so that event listeners
can be assigned via :func:`_sa.create_engine` before dialect-level
listeners are applied.
:param dialect: a :class:`.Dialect` that will handle the job
of calling rollback(), close(), or commit() on DBAPI connections.
If omitted, a built-in "stub" dialect is used. Applications that
make use of :func:`_sa.create_engine` should not use this parameter
as it is handled by the engine creation strategy.
.. versionadded:: 1.1 - ``dialect`` is now a public parameter
to the :class:`_pool.Pool`.
:param pre_ping: if True, the pool will emit a "ping" (typically
"SELECT 1", but is dialect-specific) on the connection
upon checkout, to test if the connection is alive or not. If not,
the connection is transparently re-connected and upon success, all
other pooled connections established prior to that timestamp are
invalidated. Requires that a dialect is passed as well to
interpret the disconnection error.
.. versionadded:: 1.2
"""
if logging_name:
self.logging_name = self._orig_logging_name = logging_name
else:
self._orig_logging_name = None
log.instance_logger(self, echoflag=echo)
self._threadconns = threading.local()
self._creator = creator
self._recycle = recycle
self._invalidate_time = 0
self._pre_ping = pre_ping
self._reset_on_return = util.symbol.parse_user_argument(
reset_on_return,
{
reset_rollback: ["rollback", True],
reset_none: ["none", None, False],
reset_commit: ["commit"],
},
"reset_on_return",
resolve_symbol_names=False,
)
self.echo = echo
if _dispatch:
self.dispatch._update(_dispatch, only_propagate=False)
if dialect:
self._dialect = dialect
if events:
for fn, target in events:
event.listen(self, target, fn)
@property
def _creator(self):
return self.__dict__["_creator"]
@_creator.setter
def _creator(self, creator):
self.__dict__["_creator"] = creator
self._invoke_creator = self._should_wrap_creator(creator)
def _should_wrap_creator(self, creator):
"""Detect if creator accepts a single argument, or is sent
as a legacy style no-arg function.
"""
try:
argspec = util.get_callable_argspec(self._creator, no_self=True)
except TypeError:
return lambda crec: creator()
defaulted = argspec[3] is not None and len(argspec[3]) or 0
positionals = len(argspec[0]) - defaulted
# look for the exact arg signature that DefaultStrategy
# sends us
if (argspec[0], argspec[3]) == (["connection_record"], (None,)):
return creator
# or just a single positional
elif positionals == 1:
return creator
# all other cases, just wrap and assume legacy "creator" callable
# thing
else:
return lambda crec: creator()
def _close_connection(self, connection):
self.logger.debug("Closing connection %r", connection)
try:
self._dialect.do_close(connection)
except Exception:
self.logger.error(
"Exception closing connection %r", connection, exc_info=True
)
def _create_connection(self):
"""Called by subclasses to create a new ConnectionRecord."""
return _ConnectionRecord(self)
def _invalidate(self, connection, exception=None, _checkin=True):
"""Mark all connections established within the generation
of the given connection as invalidated.
If this pool's last invalidate time is before when the given
connection was created, update the timestamp til now. Otherwise,
no action is performed.
Connections with a start time prior to this pool's invalidation
time will be recycled upon next checkout.
"""
rec = getattr(connection, "_connection_record", None)
if not rec or self._invalidate_time < rec.starttime:
self._invalidate_time = time.time()
if _checkin and getattr(connection, "is_valid", False):
connection.invalidate(exception)
def recreate(self):
"""Return a new :class:`_pool.Pool`, of the same class as this one
and configured with identical creation arguments.
This method is used in conjunction with :meth:`dispose`
to close out an entire :class:`_pool.Pool` and create a new one in
its place.
"""
raise NotImplementedError()
def dispose(self):
"""Dispose of this pool.
This method leaves the possibility of checked-out connections
remaining open, as it only affects connections that are
idle in the pool.
.. seealso::
:meth:`Pool.recreate`
"""
raise NotImplementedError()
def connect(self):
"""Return a DBAPI connection from the pool.
The connection is instrumented such that when its
``close()`` method is called, the connection will be returned to
the pool.
"""
return _ConnectionFairy._checkout(self)
def _return_conn(self, record):
"""Given a _ConnectionRecord, return it to the :class:`_pool.Pool`.
This method is called when an instrumented DBAPI connection
has its ``close()`` method called.
"""
self._do_return_conn(record)
def _do_get(self):
"""Implementation for :meth:`get`, supplied by subclasses."""
raise NotImplementedError()
def _do_return_conn(self, conn):
"""Implementation for :meth:`return_conn`, supplied by subclasses."""
raise NotImplementedError()
def status(self):
raise NotImplementedError()
class _ConnectionRecord(object):
"""Internal object which maintains an individual DBAPI connection
referenced by a :class:`_pool.Pool`.
The :class:`._ConnectionRecord` object always exists for any particular
DBAPI connection whether or not that DBAPI connection has been
"checked out". This is in contrast to the :class:`._ConnectionFairy`
which is only a public facade to the DBAPI connection while it is checked
out.
A :class:`._ConnectionRecord` may exist for a span longer than that
of a single DBAPI connection. For example, if the
:meth:`._ConnectionRecord.invalidate`
method is called, the DBAPI connection associated with this
:class:`._ConnectionRecord`
will be discarded, but the :class:`._ConnectionRecord` may be used again,
in which case a new DBAPI connection is produced when the
:class:`_pool.Pool`
next uses this record.
The :class:`._ConnectionRecord` is delivered along with connection
pool events, including :meth:`_events.PoolEvents.connect` and
:meth:`_events.PoolEvents.checkout`, however :class:`._ConnectionRecord`
still
remains an internal object whose API and internals may change.
.. seealso::
:class:`._ConnectionFairy`
"""
def __init__(self, pool, connect=True):
self.__pool = pool
if connect:
self.__connect(first_connect_check=True)
self.finalize_callback = deque()
fresh = False
fairy_ref = None
starttime = None
connection = None
"""A reference to the actual DBAPI connection being tracked.
May be ``None`` if this :class:`._ConnectionRecord` has been marked
as invalidated; a new DBAPI connection may replace it if the owning
pool calls upon this :class:`._ConnectionRecord` to reconnect.
"""
_soft_invalidate_time = 0
@util.memoized_property
def info(self):
"""The ``.info`` dictionary associated with the DBAPI connection.
This dictionary is shared among the :attr:`._ConnectionFairy.info`
and :attr:`_engine.Connection.info` accessors.
.. note::
The lifespan of this dictionary is linked to the
DBAPI connection itself, meaning that it is **discarded** each time
the DBAPI connection is closed and/or invalidated. The
:attr:`._ConnectionRecord.record_info` dictionary remains
persistent throughout the lifespan of the
:class:`._ConnectionRecord` container.
"""
return {}
@util.memoized_property
def record_info(self):
"""An "info' dictionary associated with the connection record
itself.
Unlike the :attr:`._ConnectionRecord.info` dictionary, which is linked
to the lifespan of the DBAPI connection, this dictionary is linked
to the lifespan of the :class:`._ConnectionRecord` container itself
and will remain persistent throughout the life of the
:class:`._ConnectionRecord`.
.. versionadded:: 1.1
"""
return {}
@classmethod
def checkout(cls, pool):
rec = pool._do_get()
try:
dbapi_connection = rec.get_connection()
except Exception as err:
with util.safe_reraise():
rec._checkin_failed(err)
echo = pool._should_log_debug()
fairy = _ConnectionFairy(dbapi_connection, rec, echo)
rec.fairy_ref = weakref.ref(
fairy,
lambda ref: _finalize_fairy
and _finalize_fairy(None, rec, pool, ref, echo),
)
if echo:
pool.logger.debug(
"Connection %r checked out from pool", dbapi_connection
)
return fairy
def _checkin_failed(self, err):
self.invalidate(e=err)
self.checkin(_no_fairy_ref=True)
def checkin(self, _no_fairy_ref=False):
if self.fairy_ref is None and not _no_fairy_ref:
util.warn("Double checkin attempted on %s" % self)
return
self.fairy_ref = None
connection = self.connection
pool = self.__pool
while self.finalize_callback:
finalizer = self.finalize_callback.pop()
finalizer(connection)
if pool.dispatch.checkin:
pool.dispatch.checkin(connection, self)
pool._return_conn(self)
@property
def in_use(self):
return self.fairy_ref is not None
@property
def last_connect_time(self):
return self.starttime
def close(self):
if self.connection is not None:
self.__close()
def invalidate(self, e=None, soft=False):
"""Invalidate the DBAPI connection held by this :class:`._ConnectionRecord`.
This method is called for all connection invalidations, including
when the :meth:`._ConnectionFairy.invalidate` or
:meth:`_engine.Connection.invalidate` methods are called,
as well as when any
so-called "automatic invalidation" condition occurs.
:param e: an exception object indicating a reason for the invalidation.
:param soft: if True, the connection isn't closed; instead, this
connection will be recycled on next checkout.
.. versionadded:: 1.0.3
.. seealso::
:ref:`pool_connection_invalidation`
"""
# already invalidated
if self.connection is None:
return
if soft:
self.__pool.dispatch.soft_invalidate(self.connection, self, e)
else:
self.__pool.dispatch.invalidate(self.connection, self, e)
if e is not None:
self.__pool.logger.info(
"%sInvalidate connection %r (reason: %s:%s)",
"Soft " if soft else "",
self.connection,
e.__class__.__name__,
e,
)
else:
self.__pool.logger.info(
"%sInvalidate connection %r",
"Soft " if soft else "",
self.connection,
)
if soft:
self._soft_invalidate_time = time.time()
else:
self.__close()
self.connection = None
def get_connection(self):
recycle = False
# NOTE: the various comparisons here are assuming that measurable time
# passes between these state changes. however, time.time() is not
# guaranteed to have sub-second precision. comparisons of
# "invalidation time" to "starttime" should perhaps use >= so that the
# state change can take place assuming no measurable time has passed,
# however this does not guarantee correct behavior here as if time
# continues to not pass, it will try to reconnect repeatedly until
# these timestamps diverge, so in that sense using > is safer. Per
# https://stackoverflow.com/a/1938096/34549, Windows time.time() may be
# within 16 milliseconds accuracy, so unit tests for connection
# invalidation need a sleep of at least this long between initial start
# time and invalidation for the logic below to work reliably.
if self.connection is None:
self.info.clear()
self.__connect()
elif (
self.__pool._recycle > -1
and time.time() - self.starttime > self.__pool._recycle
):
self.__pool.logger.info(
"Connection %r exceeded timeout; recycling", self.connection
)
recycle = True
elif self.__pool._invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to pool invalidation; "
+ "recycling",
self.connection,
)
recycle = True
elif self._soft_invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to local soft invalidation; "
+ "recycling",
self.connection,
)
recycle = True
if recycle:
self.__close()
self.info.clear()
self.__connect()
return self.connection
def __close(self):
self.finalize_callback.clear()
if self.__pool.dispatch.close:
self.__pool.dispatch.close(self.connection, self)
self.__pool._close_connection(self.connection)
self.connection = None
def __connect(self, first_connect_check=False):
pool = self.__pool
# ensure any existing connection is removed, so that if
# creator fails, this attribute stays None
self.connection = None
try:
self.starttime = time.time()
connection = pool._invoke_creator(self)
pool.logger.debug("Created new connection %r", connection)
self.connection = connection
self.fresh = True
except Exception as e:
with util.safe_reraise():
pool.logger.debug("Error on connect(): %s", e)
else:
if first_connect_check:
pool.dispatch.first_connect.for_modify(
pool.dispatch
).exec_once_unless_exception(self.connection, self)
if pool.dispatch.connect:
pool.dispatch.connect(self.connection, self)
def _finalize_fairy(
connection, connection_record, pool, ref, echo, fairy=None
):
"""Cleanup for a :class:`._ConnectionFairy` whether or not it's already
been garbage collected.
"""
if ref is not None:
if connection_record.fairy_ref is not ref:
return
assert connection is None
connection = connection_record.connection
if connection is not None:
if connection_record and echo:
pool.logger.debug(
"Connection %r being returned to pool", connection
)
try:
fairy = fairy or _ConnectionFairy(
connection, connection_record, echo
)
assert fairy.connection is connection
fairy._reset(pool)
# Immediately close detached instances
if not connection_record:
if pool.dispatch.close_detached:
pool.dispatch.close_detached(connection)
pool._close_connection(connection)
except BaseException as e:
pool.logger.error(
"Exception during reset or similar", exc_info=True
)
if connection_record:
connection_record.invalidate(e=e)
if not isinstance(e, Exception):
raise
if connection_record and connection_record.fairy_ref is not None:
connection_record.checkin()
class _ConnectionFairy(object):
"""Proxies a DBAPI connection and provides return-on-dereference
support.
This is an internal object used by the :class:`_pool.Pool` implementation
to provide context management to a DBAPI connection delivered by
that :class:`_pool.Pool`.
The name "fairy" is inspired by the fact that the
:class:`._ConnectionFairy` object's lifespan is transitory, as it lasts
only for the length of a specific DBAPI connection being checked out from
the pool, and additionally that as a transparent proxy, it is mostly
invisible.
.. seealso::
:class:`._ConnectionRecord`
"""
def __init__(self, dbapi_connection, connection_record, echo):
self.connection = dbapi_connection
self._connection_record = connection_record
self._echo = echo
connection = None
"""A reference to the actual DBAPI connection being tracked."""
_connection_record = None
"""A reference to the :class:`._ConnectionRecord` object associated
with the DBAPI connection.
This is currently an internal accessor which is subject to change.
"""
_reset_agent = None
"""Refer to an object with a ``.commit()`` and ``.rollback()`` method;
if non-None, the "reset-on-return" feature will call upon this object
rather than directly against the dialect-level do_rollback() and
do_commit() methods.
In practice, a :class:`_engine.Connection` assigns a :class:`.Transaction`
object
to this variable when one is in scope so that the :class:`.Transaction`
takes the job of committing or rolling back on return if
:meth:`_engine.Connection.close` is called while the :class:`.Transaction`
still exists.
This is essentially an "event handler" of sorts but is simplified as an
instance variable both for performance/simplicity as well as that there
can only be one "reset agent" at a time.
"""
@classmethod
def _checkout(cls, pool, threadconns=None, fairy=None):
if not fairy:
fairy = _ConnectionRecord.checkout(pool)
fairy._pool = pool
fairy._counter = 0
if threadconns is not None:
threadconns.current = weakref.ref(fairy)
if fairy.connection is None:
raise exc.InvalidRequestError("This connection is closed")
fairy._counter += 1
if (
not pool.dispatch.checkout and not pool._pre_ping
) or fairy._counter != 1:
return fairy
# Pool listeners can trigger a reconnection on checkout, as well
# as the pre-pinger.
# there are three attempts made here, but note that if the database
# is not accessible from a connection standpoint, those won't proceed
# here.
attempts = 2
while attempts > 0:
connection_is_fresh = fairy._connection_record.fresh
fairy._connection_record.fresh = False
try:
if pool._pre_ping:
if not connection_is_fresh:
if fairy._echo:
pool.logger.debug(
"Pool pre-ping on connection %s",
fairy.connection,
)
result = pool._dialect.do_ping(fairy.connection)
if not result:
if fairy._echo:
pool.logger.debug(
"Pool pre-ping on connection %s failed, "
"will invalidate pool",
fairy.connection,
)
raise exc.InvalidatePoolError()
elif fairy._echo:
pool.logger.debug(
"Connection %s is fresh, skipping pre-ping",
fairy.connection,
)
pool.dispatch.checkout(
fairy.connection, fairy._connection_record, fairy
)
return fairy
except exc.DisconnectionError as e:
if e.invalidate_pool:
pool.logger.info(
"Disconnection detected on checkout, "
"invalidating all pooled connections prior to "
"current timestamp (reason: %r)",
e,
)
fairy._connection_record.invalidate(e)
pool._invalidate(fairy, e, _checkin=False)
else:
pool.logger.info(
"Disconnection detected on checkout, "
"invalidating individual connection %s (reason: %r)",
fairy.connection,
e,
)
fairy._connection_record.invalidate(e)
try:
fairy.connection = (
fairy._connection_record.get_connection()
)
except Exception as err:
with util.safe_reraise():
fairy._connection_record._checkin_failed(err)
attempts -= 1
pool.logger.info("Reconnection attempts exhausted on checkout")
fairy.invalidate()
raise exc.InvalidRequestError("This connection is closed")
def _checkout_existing(self):
return _ConnectionFairy._checkout(self._pool, fairy=self)
def _checkin(self):
_finalize_fairy(
self.connection,
self._connection_record,
self._pool,
None,
self._echo,
fairy=self,
)
self.connection = None
self._connection_record = None
_close = _checkin
def _reset(self, pool):
if pool.dispatch.reset:
pool.dispatch.reset(self, self._connection_record)
if pool._reset_on_return is reset_rollback:
if self._echo:
pool.logger.debug(
"Connection %s rollback-on-return%s",
self.connection,
", via agent" if self._reset_agent else "",
)
if self._reset_agent:
if not self._reset_agent.is_active:
util.warn(
"Reset agent is not active. "
"This should not occur unless there was already "
"a connectivity error in progress."
)
pool._dialect.do_rollback(self)
else:
self._reset_agent.rollback()
else:
pool._dialect.do_rollback(self)
elif pool._reset_on_return is reset_commit:
if self._echo:
pool.logger.debug(
"Connection %s commit-on-return%s",
self.connection,
", via agent" if self._reset_agent else "",
)
if self._reset_agent:
if not self._reset_agent.is_active:
util.warn(
"Reset agent is not active. "
"This should not occur unless there was already "
"a connectivity error in progress."
)
pool._dialect.do_commit(self)
else:
self._reset_agent.commit()
else:
pool._dialect.do_commit(self)
@property
def _logger(self):
return self._pool.logger
@property
def is_valid(self):
"""Return True if this :class:`._ConnectionFairy` still refers
to an active DBAPI connection."""
return self.connection is not None
@util.memoized_property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`.ConnectionFairy`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`._ConnectionFairy`. It is shared
with the :attr:`._ConnectionRecord.info` and
:attr:`_engine.Connection.info`
accessors.
The dictionary associated with a particular DBAPI connection is
discarded when the connection itself is discarded.
"""
return self._connection_record.info
@property
def record_info(self):
"""Info dictionary associated with the :class:`._ConnectionRecord
container referred to by this :class:`.ConnectionFairy`.
Unlike the :attr:`._ConnectionFairy.info` dictionary, the lifespan
of this dictionary is persistent across connections that are
disconnected and/or invalidated within the lifespan of a
:class:`._ConnectionRecord`.
.. versionadded:: 1.1
"""
if self._connection_record:
return self._connection_record.record_info
else:
return None
def invalidate(self, e=None, soft=False):
"""Mark this connection as invalidated.
This method can be called directly, and is also called as a result
of the :meth:`_engine.Connection.invalidate` method. When invoked,
the DBAPI connection is immediately closed and discarded from
further use by the pool. The invalidation mechanism proceeds
via the :meth:`._ConnectionRecord.invalidate` internal method.
:param e: an exception object indicating a reason for the invalidation.
:param soft: if True, the connection isn't closed; instead, this
connection will be recycled on next checkout.
.. versionadded:: 1.0.3
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.connection is None:
util.warn("Can't invalidate an already-closed connection.")
return
if self._connection_record:
self._connection_record.invalidate(e=e, soft=soft)
if not soft:
self.connection = None
self._checkin()
def cursor(self, *args, **kwargs):
"""Return a new DBAPI cursor for the underlying connection.
This method is a proxy for the ``connection.cursor()`` DBAPI
method.
"""
return self.connection.cursor(*args, **kwargs)
def __getattr__(self, key):
return getattr(self.connection, key)
def detach(self):
"""Separate this connection from its Pool.
This means that the connection will no longer be returned to the
pool when closed, and will instead be literally closed. The
containing ConnectionRecord is separated from the DB-API connection,
and will create a new connection when next used.
Note that any overall connection limiting constraints imposed by a
Pool implementation may be violated after a detach, as the detached
connection is removed from the pool's knowledge and control.
"""
if self._connection_record is not None:
rec = self._connection_record
rec.fairy_ref = None
rec.connection = None
# TODO: should this be _return_conn?
self._pool._do_return_conn(self._connection_record)
self.info = self.info.copy()
self._connection_record = None
if self._pool.dispatch.detach:
self._pool.dispatch.detach(self.connection, rec)
def close(self):
self._counter -= 1
if self._counter == 0:
self._checkin()
| 35.318795
| 84
| 0.606139
|
44cc4e9f7b1f8702435bf92907269bd63daa8fce
| 3,641
|
py
|
Python
|
Rannan_configs/splunk_apps_indexes/apps_configs.tgz_fixed/splunk_app_db_connect/bin/dbx2/migration/migrate.py
|
mhassan2/datafabric_splunk
|
b4a1ba77ad7e1ab84d22cb40da6ff12e6bf16d1e
|
[
"Apache-2.0"
] | 14
|
2017-12-21T00:17:04.000Z
|
2020-07-15T19:00:31.000Z
|
Rannan_configs/splunk_apps_indexes/apps_configs.tgz_fixed/splunk_app_db_connect/bin/dbx2/migration/migrate.py
|
mhassan2/datafabric_splunk
|
b4a1ba77ad7e1ab84d22cb40da6ff12e6bf16d1e
|
[
"Apache-2.0"
] | null | null | null |
Rannan_configs/splunk_apps_indexes/apps_configs.tgz_fixed/splunk_app_db_connect/bin/dbx2/migration/migrate.py
|
mhassan2/datafabric_splunk
|
b4a1ba77ad7e1ab84d22cb40da6ff12e6bf16d1e
|
[
"Apache-2.0"
] | 5
|
2018-01-18T17:34:48.000Z
|
2019-03-27T09:32:05.000Z
|
import argparse
import getpass
import sys
import splunk
from splunk import auth
from splunk import entity as en
from dbx2.migration.controllers import MigrationController
from dbx2.exceptions import ResourceExistsException, EntityCanNotBeMigrated
class MigrateCommand(object):
'''
helper class to run migration scripts from the command line
'''
def __init__(self, username=None, password=None, session_key=None):
self.session_key = session_key if session_key else auth.getSessionKey(username, password)
def migrate(self, src_endpoint, migration_ids, filter=None, force=False):
entities = en.getEntities(src_endpoint, namespace='dbx', owner='nobody', search=filter, count=0)
if not entities:
print "No entity found"
return
# iterate source entires
for key, entity in entities.iteritems():
for migration_id in migration_ids:
path = en.buildEndpoint(entity.path, entityName=entity.name, namespace='dbx', owner='nobody')
controller = MigrationController(migration_id, path, self.session_key)
try:
result = controller.migrate(force=force)
if result is None:
print "{} of {} has been migrated already. Use --force to overwrite.".format(migration_id, path)
else:
print "{} of {} has been migrated with following attributes:\n".format(migration_id, path)
self.pretty_print(result)
except ResourceExistsException:
print "{} already exists in {} for DB Connect 2. Use --force to overwrite.".format(key, migration_id)
except EntityCanNotBeMigrated:
pass
except Exception as e:
print "an exception occurred while migrating {} from {}: {}".format(migration_id, path, e)
def pretty_print(self, entity):
print '> [{}]'.format(entity.name)
# sort the attribute - it just looks much better
for name, value in sorted(entity.content.iteritems(), key=lambda key: key[0]):
print '> {}: {}'.format(name, value)
print ''
def migrate(migration_name, endpoint, migration_ids):
parser = argparse.ArgumentParser(description='Migrates {} entities from DB Connect 1 to DB Connect 2.'.format(migration_name))
parser.add_argument('-u', '--user', help='admin username')
parser.add_argument('-p', '--password', help='admin password')
parser.add_argument('-f', '--filter', help='run only entities of which names match FILTER (standard Splunk filter)')
parser.add_argument('--force', action='store_true', help='force overwriting')
args = parser.parse_args()
user = args.user
if user is None:
print 'username: ',
user = sys.stdin.readline().rstrip()
password = args.password
if password is None:
password = getpass.getpass()
if user is None or password is None:
print 'username and password are required!'
return
try:
migrate_cmd = MigrateCommand(user, password)
migrate_cmd.migrate(endpoint, migration_ids, filter=args.filter, force=args.force)
except splunk.AuthenticationFailed:
print "Incorrect username or password. Authentication Failed"
except splunk.AuthorizationFailed:
print "Insufficient privilege."
except splunk.SplunkdConnectionException:
print "Connection Failed. Is Splunkd daemon running?"
except Exception as e:
print "An error occured: {}".format(e)
| 37.927083
| 130
| 0.648723
|
4119d693ca9d612a1f684033798a6a7d78628618
| 12,425
|
py
|
Python
|
Segmentation/args.py
|
VITA-Group/CV_A-FAN
|
d5cc54bfea4636868b192ac2a628ac74446db88f
|
[
"MIT"
] | 13
|
2021-03-24T02:13:39.000Z
|
2022-02-07T11:08:43.000Z
|
Segmentation/args.py
|
VITA-Group/CV_A-FAN
|
d5cc54bfea4636868b192ac2a628ac74446db88f
|
[
"MIT"
] | 1
|
2022-01-10T22:08:49.000Z
|
2022-01-10T22:08:49.000Z
|
Segmentation/args.py
|
VITA-Group/CV_A-FAN
|
d5cc54bfea4636868b192ac2a628ac74446db88f
|
[
"MIT"
] | 1
|
2021-06-21T17:18:06.000Z
|
2021-06-21T17:18:06.000Z
|
import argparse
from utils import ext_transforms as et
from datasets import VOCSegmentation, Cityscapes
import utils
import torch
import attack_algo
import pdb
from tqdm import tqdm
def get_argparser():
parser = argparse.ArgumentParser()
parser.add_argument("exp", type=str, default='', help="path to exp")
parser.add_argument('--loss_settings', default=0, type=int, help='loss setting')
parser.add_argument("--eval_pgd", type=str, default='', help="path to ckpt")
parser.add_argument("--test_only", type=str, default='', help="path to ckpt")
# se settings
parser.add_argument("--mix_layer", type=str, default='', help="from clean to adv")
parser.add_argument('--steps', default=1, type=int, help='PGD-steps')
parser.add_argument('--pertub_idx_se', help='index of perturb layers', default=3, type=int)
parser.add_argument('--gamma_se', help='index of PGD gamma', default=0.5, type=float)
parser.add_argument('--randinit', action="store_true", help="whether using randinit")
parser.add_argument('--clip', action="store_true", help="whether using clip")
parser.add_argument('--mix_all', action="store_true", help="whether using clip")
parser.add_argument('--eps', default=2, type=float)
# sd settings
parser.add_argument('--pertub_idx_sd', help='index of perturb layers', default="", type=str)
parser.add_argument('--gamma_sd', help='index of PGD gamma', default=0.5, type=float)
parser.add_argument('--noise_sd', help='if use noise', default=0, type=float)
parser.add_argument('--adv_loss_weight_sd', help='loss', default=0.5, type=float)
parser.add_argument('--mix_sd', action="store_true", help="whether using mix")
# sd settings
parser.add_argument('--steps_pgd', default=1, type=int, help='PGD-steps')
parser.add_argument('--gamma_pgd', help='index of PGD gamma', default=0.5, type=float)
parser.add_argument('--eps_pgd', default=2, type=float)
parser.add_argument('--randinit_pgd', action="store_true", help="whether using randinit")
parser.add_argument('--clip_pgd', action="store_true", help="whether using clip")
parser.add_argument('--adv_type', default="baseline", help="whether test baseline")
# Datset Options
parser.add_argument("--data_root", type=str, default='./datasets/data',
help="path to Dataset")
parser.add_argument("--dataset", type=str, default='voc',
choices=['voc', 'cityscapes'], help='Name of dataset')
parser.add_argument("--num_classes", type=int, default=None,
help="num classes (default: None)")
# Deeplab Options
parser.add_argument("--model", type=str, default='deeplabv3plus_resnet50',
choices=['deeplabv3_resnet50', 'deeplabv3plus_resnet50',
'deeplabv3_resnet101', 'deeplabv3plus_resnet101',
'deeplabv3_mobilenet', 'deeplabv3plus_mobilenet'], help='model name')
parser.add_argument("--separable_conv", action='store_true', default=False,
help="apply separable conv to decoder and aspp")
parser.add_argument("--output_stride", type=int, default=16, choices=[8, 16])
# Train Options
parser.add_argument("--save_val_results", action='store_true', default=False,
help="save segmentation results to \"./results\"")
parser.add_argument("--total_itrs", type=int, default=30e3,
help="epoch number (default: 30k)")
parser.add_argument("--lr", type=float, default=0.01,
help="learning rate (default: 0.01)")
parser.add_argument("--lr_policy", type=str, default='poly', choices=['poly', 'step'],
help="learning rate scheduler policy")
parser.add_argument("--step_size", type=int, default=10000)
parser.add_argument("--crop_val", action='store_true', default=False,
help='crop validation (default: False)')
parser.add_argument("--batch_size", type=int, default=16,
help='batch size (default: 16)')
parser.add_argument("--val_batch_size", type=int, default=4,
help='batch size for validation (default: 4)')
parser.add_argument("--crop_size", type=int, default=513)
parser.add_argument("--ckpt", default=None, type=str,
help="restore from checkpoint")
parser.add_argument("--continue_training", action='store_true', default=False)
parser.add_argument("--loss_type", type=str, default='cross_entropy',
choices=['cross_entropy', 'focal_loss'], help="loss type (default: False)")
parser.add_argument("--gpu_id", type=str, default='0',
help="GPU ID")
parser.add_argument("--weight_decay", type=float, default=1e-4,
help='weight decay (default: 1e-4)')
parser.add_argument("--random_seed", type=int, default=1,
help="random seed (default: 1)")
parser.add_argument("--print_interval", type=int, default=10,
help="print interval of loss (default: 10)")
parser.add_argument("--val_interval", type=int, default=100,
help="epoch interval for eval (default: 100)")
parser.add_argument("--download", action='store_true', default=False,
help="download datasets")
# PASCAL VOC Options
parser.add_argument("--year", type=str, default='2012',
choices=['2012_aug', '2012', '2011', '2009', '2008', '2007'], help='year of VOC')
# Visdom options
parser.add_argument("--enable_vis", action='store_true', default=False,
help="use visdom for visualization")
parser.add_argument("--vis_port", type=str, default='13570',
help='port for visdom')
parser.add_argument("--vis_env", type=str, default='main',
help='env for visdom')
parser.add_argument("--vis_num_samples", type=int, default=8,
help='number of samples for visualization (default: 8)')
return parser
def get_dataset(opts):
""" Dataset And Augmentation
"""
if opts.dataset == 'voc':
train_transform = et.ExtCompose([
#et.ExtResize(size=opts.crop_size),
et.ExtRandomScale((0.5, 2.0)),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size), pad_if_needed=True),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
# et.ExtNormalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225]),
])
if opts.crop_val:
val_transform = et.ExtCompose([
et.ExtResize(opts.crop_size),
et.ExtCenterCrop(opts.crop_size),
et.ExtToTensor(),
# et.ExtNormalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225]),
])
else:
val_transform = et.ExtCompose([
et.ExtToTensor(),
# et.ExtNormalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225]),
])
train_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='train', download=opts.download, transform=train_transform)
val_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='val', download=False, transform=val_transform)
if opts.dataset == 'cityscapes':
train_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size)),
et.ExtColorJitter( brightness=0.5, contrast=0.5, saturation=0.5 ),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
# et.ExtNormalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225]),
])
val_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtToTensor(),
# et.ExtNormalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225]),
])
train_dst = Cityscapes(root=opts.data_root,
split='train', transform=train_transform)
val_dst = Cityscapes(root=opts.data_root,
split='val', transform=val_transform)
return train_dst, val_dst
def validate(opts, model, loader, device, metrics, ret_samples_ids=None):
"""Do validation and return specified samples"""
metrics.reset()
ret_samples = []
if opts.save_val_results:
if not os.path.exists('results'):
os.mkdir('results')
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
img_id = 0
with torch.no_grad():
for i, (images, labels) in enumerate(loader):
images = images.to(device, dtype=torch.float32)
labels = labels.to(device, dtype=torch.long)
inputs_all = {"x": images, "adv": None, "out_idx": 0, "flag": 'clean'}
outputs = model(inputs_all)
preds = outputs.detach().max(dim=1)[1].cpu().numpy()
targets = labels.cpu().numpy()
metrics.update(targets, preds)
if ret_samples_ids is not None and i in ret_samples_ids: # get vis samples
ret_samples.append(
(images[0].detach().cpu().numpy(), targets[0], preds[0]))
if opts.save_val_results:
for i in range(len(images)):
image = images[i].detach().cpu().numpy()
target = targets[i]
pred = preds[i]
image = (denorm(image) * 255).transpose(1, 2, 0).astype(np.uint8)
target = loader.dataset.decode_target(target).astype(np.uint8)
pred = loader.dataset.decode_target(pred).astype(np.uint8)
Image.fromarray(image).save('results/%d_image.png' % img_id)
Image.fromarray(target).save('results/%d_target.png' % img_id)
Image.fromarray(pred).save('results/%d_pred.png' % img_id)
fig = plt.figure()
plt.imshow(image)
plt.axis('off')
plt.imshow(pred, alpha=0.7)
ax = plt.gca()
ax.xaxis.set_major_locator(matplotlib.ticker.NullLocator())
ax.yaxis.set_major_locator(matplotlib.ticker.NullLocator())
plt.savefig('results/%d_overlay.png' % img_id, bbox_inches='tight', pad_inches=0)
plt.close()
img_id += 1
score = metrics.get_results()
return score, ret_samples
def pgd_validate(opts, model, loader, device, metrics, criterion, ret_samples_ids=None):
"""Do validation and return specified samples"""
metrics.reset()
ret_samples = []
for i, (images, labels) in tqdm(enumerate(loader)):
images = images.to(device, dtype=torch.float32)
labels = labels.to(device, dtype=torch.long)
adv_images = attack_algo.adv_input(
x = images,
criterion = criterion,
y = labels,
model= model,
steps = opts.steps_pgd,
eps = (opts.eps_pgd / 255),
gamma = (opts.gamma_pgd / 255),
randinit = opts.randinit_pgd,
clip = opts.clip_pgd)
inputs_all = {"x": adv_images, "adv": None, "out_idx": -1, "flag": 'clean'}
outputs = model(inputs_all)
preds = outputs.detach().max(dim=1)[1].cpu().numpy()
targets = labels.cpu().numpy()
metrics.update(targets, preds)
if ret_samples_ids is not None and i in ret_samples_ids: # get vis samples
ret_samples.append(
(images[0].detach().cpu().numpy(), targets[0], preds[0]))
score = metrics.get_results()
return score, ret_samples
def print_args(args, str_num=80):
for arg, val in args.__dict__.items():
print(arg + '.' * (str_num - len(arg) - len(str(val))) + str(val))
print()
| 47.423664
| 105
| 0.581328
|
1b97280608ca0a0a5047121edef2e1e0bc75a359
| 3,966
|
py
|
Python
|
DailyProgrammer/DP20130715A.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | 2
|
2020-12-23T18:59:22.000Z
|
2021-04-14T13:16:09.000Z
|
DailyProgrammer/DP20130715A.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
DailyProgrammer/DP20130715A.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
"""
[07/15/13] Challenge #133 [Easy] Foot-Traffic Analysis
https://www.reddit.com/r/dailyprogrammer/comments/1iambu/071513_challenge_133_easy_foottraffic_analysis/
# [](#EasyIcon) *(Easy)*: Foot-Traffic Analysis
The world's most prestigious art gallery in the world needs *your* help! Management wants to figure out how many people
visit each room in the gallery, and for how long: this is to help improve the quality of the overall gallery in the
future.
Your goal is to write a program that takes a formatted log file that describes the overall gallery's foot-traffic on a
minute-to-minute basis. From this data you must compute the average time spent in each room, and how many visitors
there were in each room.
*Author: nint22*
# Formal Inputs & Outputs
## Input Description
You will be first given an integer N which represents the following N-number of lines of text. Each line represents
either a visitor entering or leaving a room: it starts with an integer, representing a visitor's unique identifier.
Next on this line is another integer, representing the room index. Note that there are at most 100 rooms, starting at
index 0, and at most 1,024 visitors, starting at index 0. Next is a single character, either 'I' (for "In") for this
visitor entering the room, or 'O' (for "out") for the visitor leaving the room. Finally, at the end of this line, there
is a time-stamp integer: it is an integer representing the minute the event occurred during the day. This integer will
range from 0 to 1439 (inclusive). All of these elements are space-delimited.
You may assume that all input is logically well-formed: for each person entering a room, he or she will always leave it
at some point in the future. A visitor will only be in one room at a time.
Note that the order of events in the log **are not sorted in any way**; it shouldn't matter, as you can solve this
problem without sorting given data. Your output (see details below) **must** be sorted by room index, ascending.
## Output Description
For each room that had log data associated with it, print the room index (starting at 0), then print the average length
of time visitors have stayed as an integer (round down), and then finally print the total number of visitors in the
room. All of this should be on the same line and be space delimited; you may optionally include labels on this text,
like in our sample output 1.
# Sample Inputs & Outputs
## Sample Input 1
4
0 0 I 540
1 0 I 540
0 0 O 560
1 0 O 560
## Sample Output 1
Room 0, 20 minute average visit, 2 visitor(s) total
## Sample Input 2
36
0 11 I 347
1 13 I 307
2 15 I 334
3 6 I 334
4 9 I 334
5 2 I 334
6 2 I 334
7 11 I 334
8 1 I 334
0 11 O 376
1 13 O 321
2 15 O 389
3 6 O 412
4 9 O 418
5 2 O 414
6 2 O 349
7 11 O 418
8 1 O 418
0 12 I 437
1 28 I 343
2 32 I 408
3 15 I 458
4 18 I 424
5 26 I 442
6 7 I 435
7 19 I 456
8 19 I 450
0 12 O 455
1 28 O 374
2 32 O 495
3 15 O 462
4 18 O 500
5 26 O 479
6 7 O 493
7 19 O 471
8 19 O 458
## Sample Output 2
Room 1, 85 minute average visit, 1 visitor total
Room 2, 48 minute average visit, 2 visitors total
Room 6, 79 minute average visit, 1 visitor total
Room 7, 59 minute average visit, 1 visitor total
Room 9, 85 minute average visit, 1 visitor total
Room 11, 57 minute average visit, 2 visitors total
Room 12, 19 minute average visit, 1 visitor total
Room 13, 15 minute average visit, 1 visitor total
Room 15, 30 minute average visit, 2 visitors total
Room 18, 77 minute average visit, 1 visitor total
Room 19, 12 minute average visit, 2 visitors total
Room 26, 38 minute average visit, 1 visitor total
Room 28, 32 minute average visit, 1 visitor total
Room 32, 88 minute average visit, 1 visitor total
"""
def main():
pass
if __name__ == "__main__":
main()
| 38.504854
| 119
| 0.709783
|
f04c56e3049b88a7167f1959179eba0d267abc7a
| 1,903
|
py
|
Python
|
setup.py
|
mdsrosa/flask-slacker
|
3417680dfa2a15336a0747c772cd1e342d50f4d0
|
[
"MIT"
] | 2
|
2019-06-30T16:37:05.000Z
|
2021-05-19T06:34:18.000Z
|
setup.py
|
mdsrosa/flask-slacker
|
3417680dfa2a15336a0747c772cd1e342d50f4d0
|
[
"MIT"
] | 7
|
2018-10-07T12:07:12.000Z
|
2021-06-01T21:50:39.000Z
|
setup.py
|
mdsrosa/flask-slacker
|
3417680dfa2a15336a0747c772cd1e342d50f4d0
|
[
"MIT"
] | null | null | null |
"""
Flask-Slacker
--------------
Adds Slack support to your Flask application using Slacker.
"""
import ast
import re
from setuptools import setup
def get_version():
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('flask_slacker/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
return version
install_requires = [
'Flask >= 0.12.2',
'Slacker >= 0.9.60'
]
test_requires = [
'pytest >= 3.2.2',
'pytest-runner >= 2.12.1',
'pytest-flake8 >= 0.8.1',
'pytest-isort >= 0.1.0'
]
setup(
name='Flask-Slacker',
version=get_version(),
license='MIT',
url='https://github.com/mdsrosa/flask-slacker',
author='Matheus Rosa',
author_email='matheusdsrosa@gmail.com',
description='Adds support to your Flask application using Slacker.',
long_description=__doc__,
packages=['flask_slacker'],
zip_safe=False,
platforms='any',
include_package_data=True,
install_requires=install_requires,
setup_requires=test_requires,
tests_require=test_requires,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
]
)
| 27.57971
| 72
| 0.616395
|
66bf7b641b57190e3c590bb7d4020e7e5b5b8b41
| 1,549
|
py
|
Python
|
util.py
|
ronaldyuwandika/rawfile-localpv
|
848d87453f7ac68e9d7b48b092899a2873ebad44
|
[
"Apache-2.0"
] | 38
|
2020-06-12T19:39:17.000Z
|
2022-03-06T17:34:25.000Z
|
util.py
|
ronaldyuwandika/rawfile-localpv
|
848d87453f7ac68e9d7b48b092899a2873ebad44
|
[
"Apache-2.0"
] | 6
|
2020-08-08T06:15:07.000Z
|
2021-11-19T15:38:46.000Z
|
util.py
|
ronaldyuwandika/rawfile-localpv
|
848d87453f7ac68e9d7b48b092899a2873ebad44
|
[
"Apache-2.0"
] | 14
|
2020-06-19T20:52:44.000Z
|
2022-03-31T12:08:02.000Z
|
import base64
import functools
import inspect
import pickle
import subprocess
def indent(obj, lvl):
return "\n".join([(lvl * " ") + line for line in str(obj).splitlines()])
def log_grpc_request(func):
@functools.wraps(func)
def wrap(self, request, context):
try:
res = func(self, request, context)
print(
f"""{func.__name__}({{
{indent(request, 2)}
}}) = {{
{indent(res, 2)}
}}"""
)
return res
except Exception as exc:
ret = (str(context._state.code), context._state.details)
print(
f"""{func.__name__}({{
{indent(request, 2)}
}}) = {ret}
"""
)
raise exc
return wrap
def run(cmd):
return subprocess.run(cmd, shell=True, check=True)
def run_out(cmd: str):
p = subprocess.run(cmd, shell=True, capture_output=True)
return p
class remote_fn(object):
def __init__(self, fn):
self.fn = fn
def as_cmd(self, *args, **kwargs):
call_data = [inspect.getsource(self.fn).encode(), args, kwargs]
call_data_serialized = base64.b64encode(pickle.dumps(call_data))
run_cmd = f"""
python <<EOF
import base64
import pickle
remote_fn = lambda fn: fn # FIXME: dirty hack
call_data = pickle.loads(base64.b64decode({call_data_serialized}))
exec(call_data[0])
{self.fn.__name__}(*call_data[1], **call_data[2])
EOF
"""
return run_cmd
def __call__(self, *args, **kwargs):
raise Exception("Should only be run inside pod")
| 22.128571
| 76
| 0.599742
|
8c203994651b71dbe4f317ada01d0abe1d3ccd60
| 1,929
|
py
|
Python
|
examples/publications/2013MNRAS.429..895P/disk_script/boxedfi.py
|
rknop/amuse
|
85d5bdcc29cfc87dc69d91c264101fafd6658aec
|
[
"Apache-2.0"
] | 131
|
2015-06-04T09:06:57.000Z
|
2022-02-01T12:11:29.000Z
|
examples/publications/2013MNRAS.429..895P/disk_script/boxedfi.py
|
rknop/amuse
|
85d5bdcc29cfc87dc69d91c264101fafd6658aec
|
[
"Apache-2.0"
] | 690
|
2015-10-17T12:18:08.000Z
|
2022-03-31T16:15:58.000Z
|
examples/publications/2013MNRAS.429..895P/disk_script/boxedfi.py
|
rieder/amuse
|
3ac3b6b8f922643657279ddee5c8ab3fc0440d5e
|
[
"Apache-2.0"
] | 102
|
2015-01-22T10:00:29.000Z
|
2022-02-09T13:29:43.000Z
|
from amuse.units import nbody_system
from amuse.units import units
import amuse.datamodel as core
from amuse.community.fi.interface import Fi
from amuse.ext.gasplummer import MakePlummerGasModel
class BoxedFi(Fi):
def __init__(self, *args, **kargs):
Fi.__init__(self, *args, **kargs)
self.escapers=core.Particles(0)
def evolve_model(self, *args, **kargs):
self.stopping_conditions.out_of_box_detection.enable()
outofbox=0.9*self.parameters.periodic_box_size/2
self.parameters.stopping_conditions_out_of_box_size = outofbox
# Fi.evolve_model(self,*args,**kargs)
self.overridden().evolve_model(*args,**kargs)
while self.stopping_conditions.out_of_box_detection.is_set():
escapers=self.particles.select_array(
lambda x,y,z: (x**2+y**2+z**2 > outofbox**2), ["x","y","z"])
print "***", len(escapers)
if len(escapers)>0:
self.escapers.add_particles(escapers)
self.particles.remove_particles(escapers)
# Fi.evolve_model(self,*args, **kargs)
self.overridden().evolve_model(*args,**kargs)
if __name__=="__main__":
Ngas=1000
conv = nbody_system.nbody_to_si(100 | units.MSun, 1 | units.parsec)
dt=conv.to_si(1|nbody_system.time)/100
print dt.in_(units.Myr)
parts=MakePlummerGasModel(Ngas,convert_nbody=conv).result
parts.h_smooth=0 | units.parsec
outofbox=0.9*10. | units.parsec
escapers=parts.select_array(
lambda x,y,z: (x**2+y**2+z**2 > outofbox**2), ["x","y","z"])
print "**",len(escapers),outofbox.in_(units.parsec)
parts.remove_particles(escapers)
print len(parts)
sph=BoxedFi(convert_nbody=conv,use_gl=True)
sph.parameters.periodic_box_size=20. | units.parsec
sph.parameters.timestep=dt
sph.parameters.self_gravity_flag=False
sph.gas_particles.add_particles(parts)
sph.start_viewer()
sph.evolve_model(dt*1000)
print len(sph.gas_particles)
print len(sph.particles)
| 31.112903
| 69
| 0.713841
|
1042e5d806057cc026b3c75ab095f67633173422
| 1,622
|
py
|
Python
|
TieredLists.py
|
garrettwilkin/TieredList
|
ef7e9a5ce3aa07caaa881f2419775114dec79896
|
[
"MIT"
] | null | null | null |
TieredLists.py
|
garrettwilkin/TieredList
|
ef7e9a5ce3aa07caaa881f2419775114dec79896
|
[
"MIT"
] | null | null | null |
TieredLists.py
|
garrettwilkin/TieredList
|
ef7e9a5ce3aa07caaa881f2419775114dec79896
|
[
"MIT"
] | null | null | null |
"""Tiered Lists - a simple way to group items into tiers (at insertion time) while maintaining uniqueness."""
class TieredLists(object):
def __init__(self, tiers):
self.tiers = sorted(tiers, reverse=True)
self.bags = {}
for t in self.tiers:
self.bags[t] = set()
def get_tier(self, key):
"""Determine in which tier a key should be inserted.
Returns
-------
int
if the value is greater than at least one of the tiers
None
if the value is less than the least tier."""
tier = None
for t in self.tiers:
if key >= t:
tier = t
break
return tier
def add(self, key, value):
"""Adds a value to a tier based upon where key fits in the tiers
Parameters
----------
key : int
used to select the tier into which this value will be inserted
value : basestring
to be inserted in the set for the tier.
Returns
-------
int
int identifying into which tier the item was inserted. None if the key was too low for the lowest tier."""
tier = self.get_tier(key)
if tier is not None:
self.bags[tier].add(value)
return tier
def bag(self, tier):
return self.bags[tier]
def __str__(self):
msg = "{{ tiers: {}, bags: {{".format(self.tiers)
for t, vals in sorted(self.bags.items(), reverse=True):
msg += "\n{}: {}, {}".format(t, len(vals), vals)
msg += "} }"
return msg
| 30.603774
| 118
| 0.535142
|
f1b0e7b672fb42b28bcfc1c0327bd6c1098f3e62
| 4,666
|
py
|
Python
|
tests/creator/creator-profiler.py
|
atwoodjw/Optimus
|
938463cec41a6683d2077c9afc7d6ba05c3b993f
|
[
"Apache-2.0"
] | null | null | null |
tests/creator/creator-profiler.py
|
atwoodjw/Optimus
|
938463cec41a6683d2077c9afc7d6ba05c3b993f
|
[
"Apache-2.0"
] | null | null | null |
tests/creator/creator-profiler.py
|
atwoodjw/Optimus
|
938463cec41a6683d2077c9afc7d6ba05c3b993f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.1
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # This notebook create the tests in python code. All this cells must be run to executed the tests
# %load_ext autoreload
# %autoreload 2
# + {"outputHidden": false, "inputHidden": false}
import sys
sys.path.append("../..")
# -
from optimus import Optimus
from optimus.helpers.test import Test
op = Optimus(master='local', verbose=True)
# +
import pandas as pd
from pyspark.sql.types import *
from datetime import date, datetime
cols = [
("names", "str"),
("height(ft)", ShortType()),
("function", "str"),
("rank", ByteType()),
("age", "int"),
("weight(t)", "float"),
"japanese name",
"last position seen",
"date arrival",
"last date seen",
("attributes", ArrayType(FloatType())),
("Date Type", DateType()),
("timestamp", TimestampType()),
("Cybertronian", BooleanType()),
("function(binary)", BinaryType()),
("NullType", NullType())
]
rows = [
("Optimus", -28, "Leader", 10, 5000000, 4.30, ["Inochi", "Convoy"], "19.442735,-99.201111", "1980/04/10",
"2016/09/10", [8.5344, 4300.0], date(2016, 9, 10), datetime(2014, 6, 24), True, bytearray("Leader", "utf-8"),
None),
("bumbl#ebéé ", 17, "Espionage", 7, 5000000, 2.0, ["Bumble", "Goldback"], "10.642707,-71.612534", "1980/04/10",
"2015/08/10", [5.334, 2000.0], date(2015, 8, 10), datetime(2014, 6, 24), True, bytearray("Espionage", "utf-8"),
None),
("ironhide&", 26, "Security", 7, 5000000, 4.0, ["Roadbuster"], "37.789563,-122.400356", "1980/04/10",
"2014/07/10", [7.9248, 4000.0], date(2014, 6, 24), datetime(2014, 6, 24), True, bytearray("Security", "utf-8"),
None),
("Jazz", 13, "First Lieutenant", 8, 5000000, 1.80, ["Meister"], "33.670666,-117.841553", "1980/04/10",
"2013/06/10", [3.9624, 1800.0], date(2013, 6, 24), datetime(2014, 6, 24), True,
bytearray("First Lieutenant", "utf-8"), None),
("Megatron", None, "None", 10, 5000000, 5.70, ["Megatron"], None, "1980/04/10", "2012/05/10", [None, 5700.0],
date(2012, 5, 10), datetime(2014, 6, 24), True, bytearray("None", "utf-8"), None),
("Metroplex_)^$", 300, "Battle Station", 8, 5000000, None, ["Metroflex"], None, "1980/04/10", "2011/04/10",
[91.44, None], date(2011, 4, 10), datetime(2014, 6, 24), True, bytearray("Battle Station", "utf-8"), None),
(None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None),
]
source_df = op.create.df(cols ,rows)
source_df.table()
# -
# ### End Init Section
# ## Profiler
from pyspark.ml.linalg import Vectors
import re
a="a\'a"
re.escape(a)
print(a)
t = Test(op, source_df, "df_profiler", imports=["from pyspark.ml.linalg import Vectors, VectorUDT, DenseVector",
"import numpy as np",
"nan = np.nan",
"import datetime",
"from pyspark.sql import functions as F",
"from optimus.profiler.profiler import Profiler",
"null = None",
"true = True",
"p= Profiler()"], path = "df_profiler", final_path="..")
# +
from pyspark.sql import functions as F
def func(col_name, attrs):
return F.col(col_name) * 2
numeric_col = "height(ft)"
numeric_col_B = "rank"
numeric_col_C = "rank"
string_col = "function"
date_col = "date arrival"
date_col_B = "last date seen"
new_col = "new col"
array_col = "attributes"
# -
from optimus.profiler.profiler import Profiler
p= Profiler()
p.run(source_df, "*")
t.create(p, "dataset", None, 'json', None, source_df,"*")
t.run()
mismatch = {"names":"dd/mm/yyyy","height(ft)":r'^([0-2][0-9]|(3)[0-1])(\/)(((0)[0-9])|((1)[0-2]))(\/)\d{4}$',"function":"yyyy-mm-dd"}
t.create(p, "dataset", "mismatch", 'json', None, source_df,"*", mismatch=mismatch)
t.run()
t.create(p, "columns_stats", None, 'json', None, source_df,"*")
t.run()
t.create(p, "columns_agg", None, 'json', None, source_df,"*")
t.run()
a = "{'name'=a'a}"
print(a)
import json
json.dumps("{'name'=a'a}")
| 30.103226
| 133
| 0.552508
|
f2f46a5fdbcaa9c5aecc018cdafff1bdc177d437
| 2,168
|
py
|
Python
|
xfel/ui/components/averaging.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
xfel/ui/components/averaging.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
xfel/ui/components/averaging.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division
from xfel.util.mp import get_lsf_submit_command
import os
class AveragingCommand(get_lsf_submit_command):
def __init__(self, run, db_params, raw=False):
# command setup
super(AveragingCommand, self).__init__("cxi.mpi_average", None, None, db_params.mp, job_name="average")
# run parameters
run_no = int(run.run)
self.args.append("-r %d" % run_no)
# current settings and experiment parameters
nproc = int(db_params.mp.nproc)
self.command = "-n %d %s" % (nproc, self.command) # to make sure nproc is passed to mpirun
expt = str(db_params.facility.lcls.experiment)
self.args.append("-x %s" % expt)
outdir = os.path.join(str(db_params.output_folder), "averages")
if not os.path.exists(outdir):
os.mkdir(outdir)
self.args.append("-o %s" % outdir)
logdir = os.path.join(outdir, "logs")
if not os.path.exists(logdir):
os.mkdir(logdir)
self.stdoutdir = logdir
self.submit_path = os.path.join(logdir, "r%04d.sh" % run_no)
self.log_name = "r%04d.log" % run_no
ffb = db_params.facility.lcls.use_ffb
if ffb:
self.args.append("-f")
if raw:
self.args.append("-R")
# rungroup parameters (using the most recent, active rungroup
rungroups = run.get_rungroups()
rungroup_ids = [rg.id for rg in rungroups]
rungroup_obj = rungroups[rungroup_ids.index(max(rungroup_ids))]
address = rungroup_obj.detector_address
self.args.append("-a %s" % address)
beamx = rungroup_obj.beamx
if beamx:
self.args.append("-X %.1f" % beamx)
beamy = rungroup_obj.beamy
if beamy:
self.args.append("-Y %.1f" % beamy)
distance = rungroup_obj.detz_parameter
self.args.append("-d %.3f" % distance)
config = rungroup_obj.config_str
if config:
self.args.append("-c %s" % config)
pickle = rungroup_obj.format == "pickle"
if pickle:
self.args.append("-p")
binning = rungroup_obj.binning
if binning:
self.args.append("-b %d" % binning)
self.args.append("-v")
def customize_for_method(self):
self.submit_head = "bsub"
self.command = "mpirun %s" % self.command
| 35.540984
| 107
| 0.666513
|
feb3c69886ee2bd520958bbba5f6ec4bc59eb0cc
| 1,553
|
py
|
Python
|
assignments/05_hamming/hamming.py
|
KHLopez08/biosystems-analytics-2020
|
dcd9e8f435f169ff644002ad03253501cd128fd8
|
[
"MIT"
] | null | null | null |
assignments/05_hamming/hamming.py
|
KHLopez08/biosystems-analytics-2020
|
dcd9e8f435f169ff644002ad03253501cd128fd8
|
[
"MIT"
] | null | null | null |
assignments/05_hamming/hamming.py
|
KHLopez08/biosystems-analytics-2020
|
dcd9e8f435f169ff644002ad03253501cd128fd8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Author : khlopez08
Date : 2020-03-04
Purpose: Rock the Casbah
"""
import argparse
import os
import sys
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Rock the Casbah',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file',
metavar='FILE',
default=[sys.stdin],
type=argparse.FileType('r'),
help='Input file')
parser.add_argument('-m',
'--min',
help='A minimum integer',
metavar='int',
type=int,
default=0)
return parser.parse_args()
# --------------------------------------------------
def main():
"""The Imperial March sounds"""
args = get_args()
for fh in args.file:
word1, word2 = '', ''
int1=len(fh.split())
int2= int1/2
for i in range(int(int1) - int(int2)):
for line in fh:
word1, word2 = fh.split()
value = [char1 == char2 for char1, char2 in zip(word1, word2)]
hamming = len(word2) - (sum(value[0:]))
if hamming >= args.min:
print(f'{hamming:8}:{word1:20}{word2:20}')
# --------------------------------------------------
if __name__ == '__main__':
main()
| 25.459016
| 78
| 0.43593
|
34c5636157cf172adab3278b94960f81eb9c642c
| 1,703
|
py
|
Python
|
tools/test-domains.py
|
Scratchcat1/not-on-my-shift
|
c749e05b04755f34c66780504053d44ef48c6b0c
|
[
"0BSD"
] | 32
|
2020-05-07T19:39:36.000Z
|
2021-12-28T09:30:02.000Z
|
tools/test-domains.py
|
Scratchcat1/not-on-my-shift
|
c749e05b04755f34c66780504053d44ef48c6b0c
|
[
"0BSD"
] | 12
|
2020-05-09T18:05:40.000Z
|
2021-10-01T08:48:57.000Z
|
tools/test-domains.py
|
Scratchcat1/not-on-my-shift
|
c749e05b04755f34c66780504053d44ef48c6b0c
|
[
"0BSD"
] | 2
|
2021-03-09T11:22:16.000Z
|
2021-06-06T13:14:23.000Z
|
#!/usr/bin/env python3
import argparse
from tqdm import tqdm
import socket
import sys
import re
import multiprocessing.pool
import subprocess
parser = argparse.ArgumentParser(description='Tests which domains in the list exist.')
parser.add_argument('--file', type=argparse.FileType('r'), default=sys.stdin, help='domain list')
parser.add_argument('--threads', type=int, default=4, help='domain querying threads')
args = parser.parse_args()
domains = set()
for line in args.file:
line = line.strip().lower()
if re.match(r'^([a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)*[a-z0-9](?:[a-z0-9-]*[a-z0-9])?$', line):
domains.add(line)
registered = []
unregistered = []
FREE_MARKERS = [
'no match for',
'status: free',
'not found',
'no match',
'no data found',
'no entries found',
'registration status: available',
'no object found'
]
def domain_exists(domain):
try:
socket.gethostbyname(domain)
return domain, True
except socket.gaierror as e:
pass
wdata = subprocess.run(["whois", domain], capture_output=True)
stdout = wdata.stdout.decode('ascii', 'ignore').casefold()
for marker in FREE_MARKERS:
if marker in stdout:
return domain, False
print('Whois for %s tested true but A did not' % domain)
return domain, True
with multiprocessing.pool.ThreadPool(args.threads) as pool:
with tqdm(total=len(domains)) as pbar:
for domain, result in pool.imap_unordered(domain_exists, domains):
if result:
registered.append(domain)
else:
unregistered.append(domain)
pbar.update()
registered.sort()
unregistered.sort()
print('### REGISTERED ###')
for domain in registered:
print(domain)
print()
print('### UNREGISTERED ###')
for domain in unregistered:
print(domain)
| 23.328767
| 97
| 0.707575
|
dfdaccb3dd57c045b03cbdcd2733f944492860a6
| 6,804
|
py
|
Python
|
ros/src/tl_detector/tl_detector.py
|
evgenyslab/CarND-Capstone
|
d0f62717fc2e896c5c2de7526f45736df561b444
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/tl_detector.py
|
evgenyslab/CarND-Capstone
|
d0f62717fc2e896c5c2de7526f45736df561b444
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/tl_detector.py
|
evgenyslab/CarND-Capstone
|
d0f62717fc2e896c5c2de7526f45736df561b444
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
import tf
import cv2
import yaml
import math
STATE_COUNT_THRESHOLD = 3
LOOKAHEAD_WPS = 200
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.current_waypoint = None
self.camera_image = None
self.has_image = False
self.lights = []
# for debugging...
self.light_wp = None
self.light_idx = None
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
sub3 = rospy.Subscriber('/current_waypoint_idx', Int32, self.current_waypoint_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
sub7 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.bridge = CvBridge()
self.light_classifier = TLClassifier()
self.listener = tf.TransformListener()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
self.loop()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
def current_waypoint_cb(self, msg):
self.current_waypoint = msg.data
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
self.has_image = True
self.camera_image = msg
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
if(not self.has_image):
# return sim's light color
return self.lights[self.light_idx].state
cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
#Get classification
return self.light_classifier.get_classification(cv_image)
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
light = None
light_index = None
stop_line_positions = self.config['stop_line_positions']
if(self.pose is not None and self.lights is not None and self.waypoints is not None):
light_index = self.closest_traffic_light(self.pose.pose, self.lights)
if light_index is not None:
light = self.lights[light_index]
stop_line_position = stop_line_positions[light_index]
light_wp = self.closest_stop_line_idx(stop_line_position)
else:
light_wp = -1
if light and self.current_waypoint is not None and light_wp >= self.current_waypoint and light_wp <=self.current_waypoint+LOOKAHEAD_WPS:
self.light_wp = light_wp
self.light_idx = light_index
state = self.get_light_state(light)
return light_wp, state
return -1, TrafficLight.UNKNOWN
def closest_traffic_light(self, pose, lights):
dist = 1e9
index = 0
selected_index = None
fdist = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2)
for light in lights:
temp_dist = fdist(light.pose.pose.position, pose.position)
if temp_dist < dist:
dist = temp_dist
selected_index = index
index = index + 1
return selected_index
def closest_stop_line_idx(self, stop_line_position):
dist = 1e9
index = 0
selected_index = None
fdist = lambda a, b: math.sqrt((a.x-b[0])**2 + (a.y-b[1])**2)
for waypoint in self.waypoints.waypoints:
temp_dist = fdist(waypoint.pose.pose.position, stop_line_position)
if temp_dist < dist:
dist = temp_dist
selected_index = index
index = index + 1
return selected_index
def loop(self):
rate = rospy.Rate(10)
while not rospy.is_shutdown():
light_wp, state = self.process_traffic_lights()
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if (state == TrafficLight.RED or state == TrafficLight.YELLOW) else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
rate.sleep()
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
| 34.714286
| 144
| 0.635949
|
67e8c17e2e1920a06e06b92668f52e51109ba571
| 7,090
|
py
|
Python
|
urlgetter.py
|
DevopediaOrg/topic-resource-finder
|
ecafaf0e7f6e03ca3a8d7aee34414f871c8ac18d
|
[
"MIT"
] | null | null | null |
urlgetter.py
|
DevopediaOrg/topic-resource-finder
|
ecafaf0e7f6e03ca3a8d7aee34414f871c8ac18d
|
[
"MIT"
] | 2
|
2019-10-19T10:39:18.000Z
|
2019-11-01T05:09:27.000Z
|
urlgetter.py
|
DevopediaOrg/topic-resource-finder
|
ecafaf0e7f6e03ca3a8d7aee34414f871c8ac18d
|
[
"MIT"
] | 7
|
2019-10-09T15:31:34.000Z
|
2019-10-30T06:06:58.000Z
|
import re
import pytz
import datetime
import platform
import sys
import requests
from newspaper import Article
from bs4 import BeautifulSoup
from readability.readability import Document as Paper
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
done = {}
def textgetter(url):
"""Scrapes web news and returns the content
Parameters
----------
url : str
web address to news report
Returns
-------
answer : dict
Python dictionary with key/value pairs for:
text (str) - Full text of article
url (str) - url to article
title (str) - extracted title of article
author (str) - name of extracted author(s)
base (str) - base url of where article was located
provider (str) - string of the news provider from url
published_date (str,isoformat) - extracted date of article
top_image (str) - extracted url of the top image for article
"""
global done
TAGS = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7', 'p', 'li']
# regex for url check
s = re.compile('(http://|https://)([A-Za-z0-9_\.-]+)')
u = re.compile("(http://|https://)(www.)?(.*)(\.[A-Za-z0-9]{1,4})$")
if s.search(url):
site = u.search(s.search(url).group()).group(3)
else:
site = None
answer = {}
# check that its an url
if s.search(url):
if url in done.keys():
yield done[url]
pass
try:
# make a request to the url
r = requests.get(url, verify=False, timeout=1)
except:
# if the url does not return data, set to empty values
done[url] = "Unable to reach website."
answer['author'] = None
answer['base'] = s.search(url).group()
answer['provider']=site
answer['published_date']=None
answer['text'] = "Unable to reach website."
answer['title'] = None
answer['top_image'] = None
answer['url'] = url
answer['keywords']=None
answer['summary']=None
yield answer
# if url does not return successfully, set ot empty values
if r.status_code != 200:
done[url] = "Unable to reach website."
answer['author'] = None
answer['base'] = s.search(url).group()
answer['provider']=site
answer['published_date']=None
answer['text'] = "Unable to reach website."
answer['title'] = None
answer['top_image'] = None
answer['url'] = url
answer['keywords']=None
answer['summary']=None
# test if length of url content is greater than 500, if so, fill data
if len(r.content)>500:
# set article url
article = Article(url)
# test for python version because of html different parameters
if int(platform.python_version_tuple()[0])==3:
article.download(input_html=r.content)
elif int(platform.python_version_tuple()[0])==2:
article.download(html=r.content)
# parse the url
article.parse()
article.nlp()
# if parse doesn't pull text fill the rest of the data
if len(article.text) >= 200:
answer['author'] = ", ".join(article.authors)
answer['base'] = s.search(url).group()
answer['provider']=site
answer['published_date'] = article.publish_date
answer['keywords']=article.keywords
answer['summary']=article.summary
# convert the data to isoformat; exception for naive date
if isinstance(article.publish_date,datetime.datetime):
try:
answer['published_date']=article.publish_date.astimezone(pytz.utc).isoformat()
except:
answer['published_date']=article.publish_date.isoformat()
answer['text'] = article.text
answer['title'] = article.title
answer['top_image'] = article.top_image
answer['url'] = url
# if previous didn't work, try another library
else:
doc = Paper(r.content)
data = doc.summary()
title = doc.title()
soup = BeautifulSoup(data, 'lxml')
newstext = " ".join([l.text for l in soup.find_all(TAGS)])
# as we did above, pull text if it's greater than 200 length
if len(newstext) > 200:
answer['author'] = None
answer['base'] = s.search(url).group()
answer['provider']=site
answer['published_date']=None
answer['text'] = newstext
answer['title'] = title
answer['top_image'] = None
answer['url'] = url
answer['keywords']=None
answer['summary']=None
# if nothing works above, use beautiful soup
else:
newstext = " ".join([
l.text
for l in soup.find_all(
'div', class_='field-item even')
])
done[url] = newstext
answer['author'] = None
answer['base'] = s.search(url).group()
answer['provider']=site
answer['published_date']=None
answer['text'] = newstext
answer['title'] = title
answer['top_image'] = None
answer['url'] = url
answer['keywords']=None
answer['summary']=None
# if nothing works, fill with empty values
else:
answer['author'] = None
answer['base'] = s.search(url).group()
answer['provider']=site
answer['published_date']=None
answer['text'] = 'No text returned'
answer['title'] = None
answer['top_image'] = None
answer['url'] = url
answer['keywords']=None
answer['summary']=None
yield answer
yield answer
# the else clause to catch if invalid url passed in
else:
answer['author'] = None
answer['base'] = s.search(url).group()
answer['provider']=site
answer['published_date']=None
answer['text'] = 'This is not a proper url'
answer['title'] = None
answer['top_image'] = None
answer['url'] = url
answer['keywords']=None
answer['summary']=None
yield answer
if __name__ == "__main__":
print(list(textgetter(sys.argv[1])))
| 37.120419
| 102
| 0.512271
|
94c4c4cade54de3b79a73971214a827a00d35cd7
| 4,622
|
py
|
Python
|
test_fields_smallint.py
|
kezabelle/django-strictmodels
|
87ff7d7850dcfec437d1a9751938ed932844cb45
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2015-11-11T13:42:32.000Z
|
2015-11-11T16:38:45.000Z
|
test_fields_smallint.py
|
kezabelle/django-strictmodels
|
87ff7d7850dcfec437d1a9751938ed932844cb45
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
test_fields_smallint.py
|
kezabelle/django-strictmodels
|
87ff7d7850dcfec437d1a9751938ed932844cb45
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.core.exceptions import ValidationError
from django.forms.models import model_to_dict, modelform_factory
from model_mommy.mommy import Mommy
import pytest
from fakeapp.models import SmallIntegerFieldModel
from strictmodels import MODEL_MOMMY_MAPPING, SafeModelForm
def test_StrictSmallIntegerField_null():
SmallIntegerFieldModel()
@pytest.mark.django_db
def test_StrictSmallIntegerField_save():
x = SmallIntegerFieldModel(field='4')
x.save()
assert model_to_dict(x) == model_to_dict(SmallIntegerFieldModel.objects.get(pk=x.pk))
@pytest.mark.django_db
def test_StrictSmallIntegerField_mommy():
mommy = Mommy(model=SmallIntegerFieldModel)
mommy.type_mapping.update(MODEL_MOMMY_MAPPING)
try:
mommy.prepare()
except ValidationError:
# this is OK because it means our mapping works
pass
try:
mommy.make()
except ValidationError:
# this is OK because it means our mapping works
pass
@pytest.mark.django_db
def test_StrictSmallIntegerField_form_with_instance_valid():
x = SmallIntegerFieldModel(field=5)
form_class = modelform_factory(model=SmallIntegerFieldModel, fields=['field'])
form = form_class(data={'field': 6}, instance=x)
assert form.is_valid() is True
assert form.errors == {}
assert form.save().field == 6
def test_StrictSmallIntegerField_form_with_instance_invalid():
x = SmallIntegerFieldModel(field=5)
form_class = modelform_factory(model=SmallIntegerFieldModel,
form=SafeModelForm, fields=['field'])
form = form_class(data={'field': 9223372036854775808}, instance=x)
assert form.is_valid() is False
assert form.errors == {'field': ['Ensure this value is less than or equal to 32767.']}
@pytest.mark.django_db
def test_StrictSmallIntegerField_form_without_instance_valid():
form_class = modelform_factory(model=SmallIntegerFieldModel, fields=['field'])
form = form_class(data={'field': 6})
assert form.is_valid() is True
assert form.errors == {}
assert form.save().field == 6
def test_StrictSmallIntegerField_form_without_instance_invalid():
form_class = modelform_factory(model=SmallIntegerFieldModel,
form=SafeModelForm, fields=['field'])
form = form_class(data={'field': 9223372036854775808})
assert form.is_valid() is False
assert form.errors == {'field': ['Ensure this value is less than or equal to 32767.']}
def test_StrictSmallIntegerField_descriptor_doesnt_disappear():
"""
don't clobber the descriptor
"""
value = SmallIntegerFieldModel(field=-500)
assert value.field == -500
value.field = 15
assert value.field == 15
with pytest.raises(ValidationError):
value.field = 40000
assert value.field == 15
value.field = -4000
assert value.field == -4000
with pytest.raises(ValidationError):
value.field = -40000
def test_StrictSmallIntegerField_string():
"""
Cannot be null
"""
with pytest.raises(ValidationError):
SmallIntegerFieldModel(field='aaaa')
def test_StrictSmallIntegerField_minvalue():
with pytest.raises(ValidationError):
SmallIntegerFieldModel(field=-32769)
def test_StrictSmallIntegerField_maxvalue():
"""
Ensure this value is less than or equal to 15
"""
with pytest.raises(ValidationError):
SmallIntegerFieldModel(field=32768)
def test_StrictSmallIntegerField_ok():
model4 = SmallIntegerFieldModel(field=15)
assert model4.field == 15
def test_StrictSmallIntegerField_ok_until_changed():
model5 = SmallIntegerFieldModel(field=15)
assert model5.field == 15
with pytest.raises(ValidationError):
model5.field = 40000
@pytest.mark.django_db
def test_StrictSmallIntegerField_create_via_queryset():
assert SmallIntegerFieldModel.objects.count() == 0
with pytest.raises(ValidationError):
SmallIntegerFieldModel.objects.create(field=-40000)
assert SmallIntegerFieldModel.objects.count() == 0
@pytest.mark.django_db
def test_StrictSmallIntegerField_update_via_queryset_invalid_then_get():
"""
Ensure this value is less than or equal to 15.
"""
model = SmallIntegerFieldModel.objects.create(field=15)
model.__class__.objects.filter(pk=model.pk).update(field=40000)
with pytest.raises(ValidationError):
model.__class__.objects.get(pk=model.pk)
| 30.813333
| 90
| 0.729122
|
e5d563455e896e1901c669cc92f5097d7e01b73b
| 10,967
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_tensordot.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 11
|
2016-08-29T07:43:26.000Z
|
2016-08-29T07:51:24.000Z
|
python/paddle/fluid/tests/unittests/test_tensordot.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_tensordot.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 1
|
2021-12-09T08:59:17.000Z
|
2021-12-09T08:59:17.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools as it
import numpy as np
import unittest
import paddle
import paddle.fluid.core as core
def tensordot_np(x, y, axes):
if isinstance(axes, paddle.fluid.framework.Variable):
axes = axes.tolist()
# np.tensordot does not support empty axes
if not axes:
axes = 0
if (isinstance(axes, (tuple, list))):
if all(np.issubdtype(type(i), np.integer) for i in axes):
axes = [axes, axes]
else:
axes_x = axes[0]
if len(axes) > 1:
axes_y = axes[1]
else:
axes_y = axes_x
len_axes_x, len_axes_y = len(axes_x), len(axes_y)
if len_axes_x < len_axes_y:
axes_x = axes_x + axes_y[len_axes_x:]
elif len_axes_y < len_axes_x:
axes_y = axes_y + axes_x[len_axes_y:]
axes = [axes_x, axes_y]
# np.tensordot does not support broadcast
if (isinstance(axes, (tuple, list))):
axes_x, axes_y = axes
else:
axes_x = list(range(x.ndim - axes, x.ndim))
axes_y = list(range(axes))
shape_x, shape_y = list(np.shape(x)), list(np.shape(y))
for i in range(len(axes_x)):
dim_x, dim_y = axes_x[i], axes_y[i]
sx, sy = shape_x[dim_x], shape_y[dim_y]
if sx == 1:
shape_y[dim_y] = 1
y = np.sum(y, dim_y)
y = np.reshape(y, shape_y)
elif sy == 1:
shape_x[dim_x] = 1
x = np.sum(x, dim_x)
x = np.reshape(x, shape_x)
return np.tensordot(x, y, axes)
class TestTensordotAPI(unittest.TestCase):
def setUp(self):
self.set_place()
self.set_dtype()
self.set_input_shape()
self.set_input_data()
self.set_test_axes()
def set_place(self):
self.places = [core.CPUPlace()]
if core.is_compiled_with_cuda():
self.places.append(core.CUDAPlace(0))
def set_dtype(self):
self.dtype = np.float32
def set_input_shape(self):
self.x_shape = [5, 5, 5, 5]
self.y_shape = [5, 5, 5, 5]
def set_input_data(self):
self.x = np.random.random(self.x_shape).astype(self.dtype)
self.y = np.random.random(self.y_shape).astype(self.dtype)
def set_test_axes(self):
self.all_axes = [[[3, 2], [3]], [[2, 1, 0], [2, 1]],
[[1, 2, 0], [1, 3, 2]], [3, 0], [[], [0, 3, 1]],
[[2, 1, 0, 3], [2, 0, 1, 3]], [[3, 1, 2], [1, 3, 2,
0]],
[[2, 1], [0, 2]], [[2, 0, 1, 3], [2]],
[[1, 2, 0, 3], [0, 2, 1]], [[2, 1, 3, 0], [1, 2, 3]],
[[2, 0, 1, 3], [3, 1, 0, 2]], [[0, 3], [0, 3, 2, 1]],
[[1, 3, 2, 0], [2, 1, 0, 3]],
[[1, 3, 2, 0], [1, 3, 2, 0]], [[1, 0, 2], [0, 1]],
[[2, 3, 0], [3, 1]], [[1, 3, 2, 0], [3, 0, 1, 2]],
[[3, 2, 1], [2, 0, 1]], [[0], []],
[[2, 3, 0], [1, 2, 0]], [[3, 0, 2, 1], [2, 1, 0, 3]],
[[3, 1, 2], [2, 3, 1]], [[1, 0, 2, 3], []],
[[1, 2], [1, 2, 3]], [[2, 0, 1, 3], [2, 0, 1]],
[[3, 1, 2], [1, 3, 2]], [[3, 1, 2, 0], [1, 2, 3, 0]],
[[0, 2, 3], [0, 1, 2]], [[3, 2, 0], [2, 0, 3, 1]],
[[2, 1, 0, 3], [3, 1, 2, 0]],
[[1, 2, 3, 0], [1, 3, 0, 2]], [[3, 0], [2, 1]],
[[0, 1, 3, 2], [0, 2, 1, 3]], [[1, 0], [2, 1, 3]],
[[1, 0, 3, 2], [2, 3, 0, 1]], [[1, 2], [3]],
[[1, 2, 3, 0], [3, 2, 1, 0]],
[[0, 3, 2, 1], [2, 1, 3, 0]], [0],
[[0, 2, 3], [3, 2, 0, 1]], [[1, 2, 3, 0], [3, 2, 1,
0]],
[[3, 1], [3]], [[3, 2, 0, 1], [3, 2, 0]],
[[2, 3, 0, 1], [0, 3, 2]], [[1], [1, 3]],
[[1, 2], [2, 1, 0]], [[3, 1, 2], [3, 1, 0]],
[[1, 3], [3, 1, 2]], [[2, 0, 1, 3], [3, 1, 0, 2]],
[[1, 3, 0], [1, 3]], [[2, 3, 1], [1, 0, 2]],
[[1, 2, 0, 3], [0, 2, 1, 3]], [[2], [0, 1, 3]],
[[1], [1, 2]], [[1, 0, 2, 3], [3, 0, 1, 2]],
[[0, 1, 3, 2], [1, 3, 0, 2]], [[3, 0, 2, 1], [0, 2,
3]],
[[1, 2, 0], [1, 2, 3]], [[1, 0, 3], [2, 3, 0]],
[[2, 3, 0], [3, 1, 0]], [[1, 3], [1, 0]],
[[2, 1, 0, 3], [2, 0, 3, 1]], [[3, 2, 0], [2, 1, 0]],
[[0, 1, 3], [0, 3, 1]], [[3, 1, 0], [3, 2, 1]],
[[3, 2], [3, 1]], [[3], [2, 1, 0]], [[1, 2, 3, 0], []],
[[1, 3, 2, 0], [3, 1, 2]], [[1], [0, 2]],
[[3, 2, 0], [3, 2, 0]], [[3], []], [[1, 0, 3], [2, 1]],
[[3, 1, 0, 2], [2, 3, 1, 0]], [[0, 1], [0, 3, 2]],
[[0, 2, 3], [0, 2, 1]], [[1, 3, 0], [3, 0, 2]],
[[3, 1, 2], [1, 2, 3]], [[3, 1, 2], [3, 1, 0]],
[[0, 3, 1, 2], [3, 2, 1, 0]], [[0, 3], [3, 2, 1]],
[[2, 3], [1, 3, 0]], [[0, 3, 2], [2, 0, 3, 1]],
[[2, 3], [1, 3]], [[3, 1, 2, 0], [2, 3, 1, 0]],
[[1, 0, 3, 2], [3, 0, 1, 2]],
[[3, 2, 1, 0], [0, 1, 3, 2]], [[3, 1, 2], [3]],
[[0, 1, 3, 2], [2, 3, 0, 1]],
[[1, 2, 3, 0], [1, 3, 0, 2]], [3, 1, 2],
[[3, 1, 2], [0, 3, 2]], [[2, 3, 0], [1, 2, 0]],
[[2, 0, 3], [2, 0]], [[3, 1, 0, 2], [3, 1, 0, 2]],
[[0, 1, 2], [2, 0, 1]], [[1, 0, 3], [2, 3, 0]],
[[2, 0, 1], [0, 1, 3]], [[2, 1], [0, 1, 3]]]
def test_dygraph(self):
paddle.disable_static()
for axes in self.all_axes:
for place in self.places:
x = paddle.to_tensor(self.x, place=place)
y = paddle.to_tensor(self.y, place=place)
paddle_res = paddle.tensordot(x, y, axes)
np_res = tensordot_np(self.x, self.y, axes)
np.testing.assert_allclose(paddle_res, np_res, rtol=1e-6)
def test_static(self):
paddle.enable_static()
for axes in self.all_axes:
for place in self.places:
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
x = paddle.static.data(name='x',
shape=self.x_shape,
dtype=self.dtype)
y = paddle.static.data(name='y',
shape=self.y_shape,
dtype=self.dtype)
z = paddle.tensordot(x, y, axes)
exe = paddle.static.Executor(place)
paddle_res = exe.run(feed={
'x': self.x,
'y': self.y
},
fetch_list=[z])
np_res = tensordot_np(self.x, self.y, axes)
np.testing.assert_allclose(paddle_res[0], np_res, rtol=1e-6)
class TestTensordotAPIFloat64(TestTensordotAPI):
def set_dtype(self):
self.dtype = np.float64
class TestTensordotAPIBroadcastCase1(TestTensordotAPI):
def set_input_shape(self):
self.x_shape = [1, 1, 1, 5]
self.y_shape = [1, 5, 1, 1]
class TestTensordotAPIBroadcastCase2(TestTensordotAPI):
def set_input_shape(self):
self.x_shape = [1, 5, 5, 5]
self.y_shape = [1, 1, 1, 5]
class TestTensordotAPIBroadcastCase3(TestTensordotAPI):
def set_input_shape(self):
self.x_shape = [5, 5, 5, 1]
self.y_shape = [5, 5, 1, 5]
class TestTensordotAPIBroadcastCase4(TestTensordotAPI):
def set_input_shape(self):
self.x_shape = [5, 5, 5, 1]
self.y_shape = [1, 1, 1, 1]
class TestTensordotAPIBroadcastCase5(TestTensordotAPI):
def set_input_shape(self):
self.x_shape = [1, 1, 5, 5]
self.y_shape = [5, 5, 1, 5]
class TestTensordotAPIAxesType(TestTensordotAPI):
def set_input_shape(self):
self.x_shape = [3, 4, 4]
self.y_shape = [4, 4, 5]
def set_test_axes(self):
self.all_axes = [
0, 1, 2, (1, ), [1], ((1, ), ), ([1], ), ((2, 1), (0, )),
((1, 2), (0, 1)), ([1, 2], [0, 1]), ([1, 2], [0, 1]),
[[1, 2], [0, 1]]
]
def test_tensor_axes(self):
# The 'axes' with type 'Tensor' in tensordot is not available in static mode
paddle.disable_static()
tensor_axes = [
paddle.to_tensor([1]), (paddle.to_tensor([1])),
(paddle.to_tensor([1, 2]), paddle.to_tensor([0, 1])),
[paddle.to_tensor([1, 2]),
paddle.to_tensor([0, 1])],
paddle.to_tensor([[1, 2], [0, 1]])
]
for place in self.places:
for axes in tensor_axes:
x = paddle.to_tensor(self.x, place=place)
y = paddle.to_tensor(self.y, place=place)
paddle_res = paddle.tensordot(x, y, axes)
np_res = tensordot_np(self.x, self.y, axes)
np.testing.assert_allclose(paddle_res, np_res, rtol=1e-6)
def test_error(self):
self.all_axes = [[[[0], [1]]], 0.1, -1, 100, [[1, 2], [0, 0]],
[[1, 2], [0, -1]], [0, 1, 2, 3]]
paddle.disable_static()
x = paddle.to_tensor(self.x)
y = paddle.to_tensor(self.y)
for axes in self.all_axes:
with self.assertRaises(BaseException):
paddle.tensordot(x, y, axes)
class TestTensordotAPIAxesTypeFloat64(TestTensordotAPIAxesType):
def set_dtype(self):
self.dtype = np.float64
if __name__ == "__main__":
unittest.main()
| 40.025547
| 84
| 0.413696
|
53b95290d879034a97884343775a138a64d221c5
| 390
|
py
|
Python
|
Ex0090/ex0090.py
|
Rodrigo-Antonio-Silva/ExerciciosPythonCursoemVideo
|
3b2d68094dd5d60f0e45a75590eb2be9be030640
|
[
"MIT"
] | null | null | null |
Ex0090/ex0090.py
|
Rodrigo-Antonio-Silva/ExerciciosPythonCursoemVideo
|
3b2d68094dd5d60f0e45a75590eb2be9be030640
|
[
"MIT"
] | null | null | null |
Ex0090/ex0090.py
|
Rodrigo-Antonio-Silva/ExerciciosPythonCursoemVideo
|
3b2d68094dd5d60f0e45a75590eb2be9be030640
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# exercício 90
alunos = dict()
alunos['nome'] = str(input('Nome: '))
alunos['media'] = float(input('Média: '))
if 5 <= alunos["media"] < 7:
alunos["situacao"] = 'Recuperação'
elif alunos["media"] < 5:
alunos['situacao'] = 'Reprovado'
else:
alunos["situacao"] = 'Aprovado'
print('-=' * 21)
for k, v in alunos.items():
print(f' -{k} é igual a {v}')
| 21.666667
| 41
| 0.574359
|
90e21c5cb669093be7f64c00da775f35d54fa538
| 2,875
|
py
|
Python
|
pypp11/code_creators/array_1_registrator.py
|
ISoirar/pypp11
|
7f929064766a48d9cb3f3b29c93fdc938b83bac5
|
[
"BSL-1.0"
] | 9
|
2016-06-07T19:14:53.000Z
|
2020-02-28T09:06:19.000Z
|
pypp11/code_creators/array_1_registrator.py
|
ISoirar/pypp11
|
7f929064766a48d9cb3f3b29c93fdc938b83bac5
|
[
"BSL-1.0"
] | 1
|
2018-08-15T11:33:40.000Z
|
2018-08-15T11:33:40.000Z
|
pypp11/code_creators/array_1_registrator.py
|
ISoirar/pypp11
|
7f929064766a48d9cb3f3b29c93fdc938b83bac5
|
[
"BSL-1.0"
] | 5
|
2016-06-23T09:37:00.000Z
|
2019-12-18T13:51:29.000Z
|
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os
import algorithm
import registration_based
from pypp11 import code_repository
from pypp11.decl_wrappers import call_policies
from pypp11.decl_wrappers import python_traits
from pygccxml import declarations
class array_1_registrator_t( registration_based.registration_based_t ):
"""
This class creates code that register static sized array
"""
def __init__( self, array_type ):
registration_based.registration_based_t.__init__( self )
self._array_type = array_type
self._call_policies = self._guess_call_policies()
self.works_on_instance = False
def _get_array_type( self ):
return self._array_type
def _set_array_type( self, new_array_type ):
self._array_type = new_array_type
array_type = property( _get_array_type, _set_array_type )
def _get_call_policies( self ):
return self._call_policies
def _set_call_policies( self, new_call_policies ):
self._call_policies = new_call_policies
call_policies = property( _get_call_policies, _set_call_policies )
def _create_name( self ):
item_type = declarations.array_item_type(self.array_type)
return "__array_1_%(type)s_%(size)d" \
% dict( type=algorithm.create_valid_name( item_type.decl_string )
, size=declarations.array_size(self.array_type) )
def _guess_call_policies(self):
item_type = declarations.array_item_type( self.array_type )
if python_traits.is_immutable( item_type ):
return call_policies.default_call_policies()
else:
return call_policies.return_internal_reference()
def _create_impl(self):
templates = declarations.templates
call_invocation = declarations.call_invocation
ns_name = code_repository.array_1.namespace
if declarations.is_const( self.array_type ):
fn_name = 'register_const_array_1'
else:
fn_name = 'register_array_1'
fn_def_tmpl_args = [ declarations.array_item_type(self.array_type).decl_string
, str( declarations.array_size(self.array_type) ) ]
if not self.call_policies.is_default():
fn_def_tmpl_args.append(
self.call_policies.create(self, call_policies.CREATION_POLICY.AS_TEMPLATE_ARGUMENT ) )
fn_def = templates.join( '::'.join( [ns_name, fn_name] ), fn_def_tmpl_args )
return call_invocation.join( fn_def, [ '"%s"' % self._create_name() ] ) + ';'
def _get_system_files_impl( self ):
return [code_repository.array_1.file_name]
| 41.071429
| 103
| 0.686957
|
fc1eccf16eabf81e41f2eec7464304aeeea681b5
| 3,486
|
py
|
Python
|
src/clientApp.py
|
Rishav-hub/DeepFace_FaceRecognition
|
8271250b47d493e93958d4633558f8112edcb9fb
|
[
"MIT"
] | 5
|
2021-12-02T15:33:43.000Z
|
2021-12-24T07:05:17.000Z
|
src/clientApp.py
|
Rishav-hub/DeepFace_FaceRecognition
|
8271250b47d493e93958d4633558f8112edcb9fb
|
[
"MIT"
] | null | null | null |
src/clientApp.py
|
Rishav-hub/DeepFace_FaceRecognition
|
8271250b47d493e93958d4633558f8112edcb9fb
|
[
"MIT"
] | 1
|
2022-02-14T10:59:51.000Z
|
2022-02-14T10:59:51.000Z
|
import argparse
import logging
import os
## Imports
from src.face_embeddings.generate_embeddings import FaceEmbeddings
from src.face_features.extract_attributes import FaceAttributes
from src.face_predictor.generate_prediction import FacePredictor
logging_str = "[%(asctime)s: %(levelname)s: %(module)s]: %(message)s"
log_dir = "logs"
os.makedirs(log_dir, exist_ok=True)
logging.basicConfig(filename=os.path.join(log_dir, 'input_log.log'), level=logging.INFO, format=logging_str,
filemode="a")
def getFaceEmbeddings():
"""
Function to get the face embeddings from the images in the given directory
"""
parser = argparse.ArgumentParser(description='Generate face embeddings')
parser.add_argument('--faces', default= 50, help='Number of faces that the camera will get')
parser.add_argument('--detector_backend', type=str, default='mtcnn', help='Face detector to be used')
parser.add_argument('--config', default='config/config.yaml', help='Path to the config file', type=str)
parser.add_argument('--params', default='params.yaml', help='Path to the config file', type=str)
args = vars(parser.parse_args())
embeddings = FaceEmbeddings(args)
embeddings.GenerateFaceEmbedding()
def getFaceFeatures():
parser = argparse.ArgumentParser(description='Generate face features')
parser.add_argument('--faces', default= 10, help='Number of faces that the camera will get')
parser.add_argument('--detector_backend', type=str, default='mtcnn', help='Face detector to be used')
parser.add_argument('--config', default='config/config.yaml', help='Path to the config file', type=str)
parser.add_argument('--params', default='params.yaml', help='Path to the config file', type=str)
args = vars(parser.parse_args())
embeddings = FaceAttributes(args)
embeddings.ExtractAttributes()
def getFacePrediction():
"""
Function to get the face embeddings from the images in the given directory
"""
parser = argparse.ArgumentParser(description='Generate face embeddings')
parser.add_argument('--faces', default= 50, help='Number of faces that the camera will get')
parser.add_argument('--detector_backend', type=str, default='mtcnn', help='Face detector to be used')
parser.add_argument('--config', default='config/config.yaml', help='Path to the config file', type=str)
parser.add_argument('--params', default='params.yaml', help='Path to the config file', type=str)
args = vars(parser.parse_args())
embeddings = FacePredictor(args)
embeddings.GenerateFacePrediction()
# if __name__ == '__main__':
# try:
# logging.info(">>> Stage one Started")
# getFaceEmbeddings()
# # print(embeddings_list)
# logging.info("Stage one Completed >>>>")
# except Exception as e:
# logging.error("Error in Stage one")
# logging.error(e)
# raise e
# try:
# logging.info(">>> Stage two Started")
# getFaceFeatures()
# logging.info("Stage two Completed >>>>")
# except Exception as e:
# logging.error("Error in Stage two")
# logging.error(e)
# raise e
# try:
# logging.info(">>> Stage Three Started")
# getFacePrediction()
# # print(embeddings_list)
# logging.info("Stage Three Completed >>>>")
# except Exception as e:
# logging.error("Error in Stage Three")
# logging.error(e)
# raise e
| 38.307692
| 108
| 0.679002
|
21b685a4cedec0cc62d7b4e4611390f061f9521f
| 12,568
|
py
|
Python
|
autoapi/mappers/python/mapper.py
|
ashb/sphinx-autoapi
|
7330297a83c6b45fc520fa706c33374db4b148d9
|
[
"MIT"
] | null | null | null |
autoapi/mappers/python/mapper.py
|
ashb/sphinx-autoapi
|
7330297a83c6b45fc520fa706c33374db4b148d9
|
[
"MIT"
] | null | null | null |
autoapi/mappers/python/mapper.py
|
ashb/sphinx-autoapi
|
7330297a83c6b45fc520fa706c33374db4b148d9
|
[
"MIT"
] | null | null | null |
import collections
import copy
import operator
import os
import sphinx.util
from sphinx.util.console import bold
import sphinx.util.docstrings
import sphinx.util.logging
from ..base import SphinxMapperBase
from .parser import Parser
from .objects import (
PythonClass,
PythonFunction,
PythonModule,
PythonMethod,
PythonPackage,
PythonAttribute,
PythonData,
PythonException,
)
LOGGER = sphinx.util.logging.getLogger(__name__)
def _expand_wildcard_placeholder(original_module, originals_map, placeholder):
"""Expand a wildcard placeholder to a sequence of named placeholders.
:param original_module: The data dictionary of the module
that the placeholder is imported from.
:type original_module: dict
:param originals_map: A map of the names of children under the module
to their data dictionaries.
:type originals_map: dict(str, dict)
:param placeholder: The wildcard placeholder to expand.
:type placeholder: dict
:returns: The placeholders that the wildcard placeholder represents.
:rtype: list(dict)
"""
originals = originals_map.values()
if original_module["all"] is not None:
originals = []
for name in original_module["all"]:
if name == "__all__":
continue
if name not in originals_map:
msg = "Invalid __all__ entry {0} in {1}".format(
name, original_module["name"]
)
LOGGER.warning(msg)
continue
originals.append(originals_map[name])
placeholders = []
for original in originals:
new_full_name = placeholder["full_name"].replace("*", original["name"])
new_original_path = placeholder["original_path"].replace("*", original["name"])
if "original_path" in original:
new_original_path = original["original_path"]
new_placeholder = dict(
placeholder,
name=original["name"],
full_name=new_full_name,
original_path=new_original_path,
)
placeholders.append(new_placeholder)
return placeholders
def _resolve_module_placeholders(modules, module_name, visit_path, resolved):
"""Resolve all placeholder children under a module.
:param modules: A mapping of module names to their data dictionary.
Placeholders are resolved in place.
:type modules: dict(str, dict)
:param module_name: The name of the module to resolve.
:type module_name: str
:param visit_path: An ordered set of visited module names.
:type visited: collections.OrderedDict
:param resolved: A set of already resolved module names.
:type resolved: set(str)
"""
if module_name in resolved:
return
visit_path[module_name] = True
module, children = modules[module_name]
for child in list(children.values()):
if child["type"] != "placeholder":
continue
if child["original_path"] in modules:
module["children"].remove(child)
children.pop(child["name"])
continue
imported_from, original_name = child["original_path"].rsplit(".", 1)
if imported_from in visit_path:
msg = "Cannot resolve cyclic import: {0}, {1}".format(
", ".join(visit_path), imported_from
)
LOGGER.warning(msg)
module["children"].remove(child)
children.pop(child["name"])
continue
if imported_from not in modules:
msg = "Cannot resolve import of unknown module {0} in {1}".format(
imported_from, module_name
)
LOGGER.warning(msg)
module["children"].remove(child)
children.pop(child["name"])
continue
_resolve_module_placeholders(modules, imported_from, visit_path, resolved)
if original_name == "*":
original_module, originals_map = modules[imported_from]
# Replace the wildcard placeholder
# with a list of named placeholders.
new_placeholders = _expand_wildcard_placeholder(
original_module, originals_map, child
)
child_index = module["children"].index(child)
module["children"][child_index : child_index + 1] = new_placeholders
children.pop(child["name"])
for new_placeholder in new_placeholders:
if new_placeholder["name"] not in children:
children[new_placeholder["name"]] = new_placeholder
original = originals_map[new_placeholder["name"]]
_resolve_placeholder(new_placeholder, original)
elif original_name not in modules[imported_from][1]:
msg = "Cannot resolve import of {0} in {1}".format(
child["original_path"], module_name
)
LOGGER.warning(msg)
module["children"].remove(child)
children.pop(child["name"])
continue
else:
original = modules[imported_from][1][original_name]
_resolve_placeholder(child, original)
del visit_path[module_name]
resolved.add(module_name)
def _resolve_placeholder(placeholder, original):
"""Resolve a placeholder to the given original object.
:param placeholder: The placeholder to resolve, in place.
:type placeholder: dict
:param original: The object that the placeholder represents.
:type original: dict
"""
new = copy.deepcopy(original)
# We are supposed to be resolving the placeholder,
# not replacing it with another.
assert original["type"] != "placeholder"
# The name remains the same.
new["name"] = placeholder["name"]
new["full_name"] = placeholder["full_name"]
# Record where the placeholder originally came from.
new["original_path"] = original["full_name"]
# The source lines for this placeholder do not exist in this file.
# The keys might not exist if original is a resolved placeholder.
new.pop("from_line_no", None)
new.pop("to_line_no", None)
# Resolve the children
stack = list(new.get("children", ()))
while stack:
child = stack.pop()
# Relocate the child to the new location
assert child["full_name"].startswith(original["full_name"])
suffix = child["full_name"][len(original["full_name"]) :]
child["full_name"] = new["full_name"] + suffix
# The source lines for this placeholder do not exist in this file.
# The keys might not exist if original is a resolved placeholder.
child.pop("from_line_no", None)
child.pop("to_line_no", None)
# Resolve the remaining children
stack.extend(child.get("children", ()))
placeholder.clear()
placeholder.update(new)
class PythonSphinxMapper(SphinxMapperBase):
"""Auto API domain handler for Python
Parses directly from Python files.
:param app: Sphinx application passed in as part of the extension
"""
_OBJ_MAP = {
cls.type: cls
for cls in (
PythonClass,
PythonFunction,
PythonModule,
PythonMethod,
PythonPackage,
PythonAttribute,
PythonData,
PythonException,
)
}
_OBJ_MAP["property"] = PythonMethod
def __init__(self, app, template_dir=None, url_root=None):
super(PythonSphinxMapper, self).__init__(app, template_dir, url_root)
self._use_implicit_namespace = (
self.app.config.autoapi_python_use_implicit_namespaces
)
def _find_files(self, patterns, dirs, ignore):
for dir_ in dirs:
dir_root = dir_
if (
os.path.exists(os.path.join(dir_, "__init__.py"))
or self._use_implicit_namespace
):
dir_root = os.path.abspath(os.path.join(dir_, os.pardir))
for path in self.find_files(patterns=patterns, dirs=[dir_], ignore=ignore):
yield dir_root, path
def load(self, patterns, dirs, ignore=None):
"""Load objects from the filesystem into the ``paths`` dictionary
Also include an attribute on the object, ``relative_path`` which is the
shortened, relative path the package/module
"""
dir_root_files = list(self._find_files(patterns, dirs, ignore))
for dir_root, path in sphinx.util.status_iterator(
dir_root_files,
bold("[AutoAPI] Reading files... "),
length=len(dir_root_files),
stringify_func=(lambda x: x[1]),
):
data = self.read_file(path=path, dir_root=dir_root)
if data:
data["relative_path"] = os.path.relpath(path, dir_root)
self.paths[path] = data
def read_file(self, path, **kwargs):
"""Read file input into memory, returning deserialized objects
:param path: Path of file to read
"""
dir_root = kwargs.get("dir_root")
try:
if self._use_implicit_namespace:
parsed_data = Parser().parse_file_in_namespace(path, dir_root)
else:
parsed_data = Parser().parse_file(path)
return parsed_data
except (IOError, TypeError, ImportError):
LOGGER.warning("Unable to read file: {0}".format(path))
LOGGER.debug("Reason:", exc_info=True)
return None
def _resolve_placeholders(self):
"""Resolve objects that have been imported from elsewhere."""
modules = {}
for module in self.paths.values():
children = {child["name"]: child for child in module["children"]}
modules[module["name"]] = (module, children)
resolved = set()
for module_name in modules:
visit_path = collections.OrderedDict()
_resolve_module_placeholders(modules, module_name, visit_path, resolved)
def map(self, options=None):
self._resolve_placeholders()
super(PythonSphinxMapper, self).map(options)
parents = {obj.name: obj for obj in self.objects.values()}
for obj in self.objects.values():
parent_name = obj.name.rsplit(".", 1)[0]
if parent_name in parents and parent_name != obj.name:
parent = parents[parent_name]
attr = "sub{}s".format(obj.type)
getattr(parent, attr).append(obj)
for obj in self.objects.values():
obj.submodules.sort()
obj.subpackages.sort()
def create_class(self, data, options=None, **kwargs):
"""Create a class from the passed in data
:param data: dictionary data of parser output
"""
try:
cls = self._OBJ_MAP[data["type"]]
except KeyError:
LOGGER.warning("Unknown type: %s" % data["type"])
else:
obj = cls(
data,
class_content=self.app.config.autoapi_python_class_content,
options=self.app.config.autoapi_options,
jinja_env=self.jinja_env,
app=self.app,
**kwargs
)
obj.url_root = self.url_root
lines = sphinx.util.docstrings.prepare_docstring(obj.docstring)
if lines and "autodoc-process-docstring" in self.app.events.events:
self.app.emit(
"autodoc-process-docstring", cls.type, obj.name, None, None, lines
)
obj.docstring = "\n".join(lines)
self._record_typehints(obj)
for child_data in data.get("children", []):
for child_obj in self.create_class(
child_data, options=options, **kwargs
):
obj.children.append(child_obj)
# Parser gives children in source order already
if self.app.config.autoapi_member_order == "alphabetical":
obj.children.sort(key=operator.attrgetter("name"))
elif self.app.config.autoapi_member_order == "groupwise":
obj.children.sort(key=lambda x: (x.member_order, x.name))
yield obj
def _record_typehints(self, obj):
if isinstance(obj, (PythonClass, PythonFunction, PythonMethod)):
annotations = self.app.env.temp_data.setdefault("annotations", {})
annotations[obj.id] = obj.obj["annotations"]
| 35.806268
| 87
| 0.612826
|
c3458d831535f17e7c2652779f91e23d81d3d46a
| 684
|
py
|
Python
|
project_site/project_app/migrations/0005_auto_20210130_1445.py
|
Shufyan/project_management
|
92332f68b451d4ecedd66c5449850c66b4cb8d32
|
[
"MIT"
] | null | null | null |
project_site/project_app/migrations/0005_auto_20210130_1445.py
|
Shufyan/project_management
|
92332f68b451d4ecedd66c5449850c66b4cb8d32
|
[
"MIT"
] | null | null | null |
project_site/project_app/migrations/0005_auto_20210130_1445.py
|
Shufyan/project_management
|
92332f68b451d4ecedd66c5449850c66b4cb8d32
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.5 on 2021-01-30 09:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project_app', '0004_auto_20210130_0348'),
]
operations = [
migrations.AlterField(
model_name='employee',
name='id',
field=models.CharField(default='c5rF2h', editable=False, max_length=6, primary_key=True, serialize=False, unique=True),
),
migrations.AlterField(
model_name='project',
name='id',
field=models.IntegerField(default=837, editable=False, primary_key=True, serialize=False, unique=True),
),
]
| 28.5
| 131
| 0.621345
|
ee2cbf35f6be4ec2bd22a3896303f6114c39d408
| 26,222
|
py
|
Python
|
test/functional/test_runner.py
|
freelancedeveloper025/MGC
|
6a532d9bc6e9cb1b0e0240d2ca6dfcf7a053c2b2
|
[
"MIT"
] | null | null | null |
test/functional/test_runner.py
|
freelancedeveloper025/MGC
|
6a532d9bc6e9cb1b0e0240d2ca6dfcf7a053c2b2
|
[
"MIT"
] | null | null | null |
test/functional/test_runner.py
|
freelancedeveloper025/MGC
|
6a532d9bc6e9cb1b0e0240d2ca6dfcf7a053c2b2
|
[
"MIT"
] | 1
|
2021-09-24T11:24:48.000Z
|
2021-09-24T11:24:48.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
# 30 minutes represented in seconds
TRAVIS_TIMEOUT_DURATION = 30 * 60
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_dip3_deterministicmns.py', # NOTE: needs mgc_hash to pass
'feature_block_reward_reallocation.py',
'feature_llmq_data_recovery.py',
'wallet_hd.py',
'wallet_backup.py',
# vv Tests less than 5m vv
'feature_block.py', # NOTE: needs mgc_hash to pass
'rpc_fundrawtransaction.py',
'rpc_fundrawtransaction_hd.py',
'wallet_multiwallet.py --usecli',
'p2p_quorum_data.py',
# vv Tests less than 2m vv
'p2p_instantsend.py',
'wallet_basic.py',
'wallet_labels.py',
'wallet_dump.py',
'wallet_listtransactions.py',
'feature_multikeysporks.py',
'feature_llmq_signing.py', # NOTE: needs mgc_hash to pass
'feature_llmq_signing.py --spork21', # NOTE: needs mgc_hash to pass
'feature_llmq_chainlocks.py', # NOTE: needs mgc_hash to pass
'feature_llmq_connections.py', # NOTE: needs mgc_hash to pass
'feature_llmq_simplepose.py', # NOTE: needs mgc_hash to pass
'feature_llmq_is_cl_conflicts.py', # NOTE: needs mgc_hash to pass
'feature_llmq_is_retroactive.py', # NOTE: needs mgc_hash to pass
'feature_llmq_dkgerrors.py', # NOTE: needs mgc_hash to pass
'feature_dip4_coinbasemerkleroots.py', # NOTE: needs mgc_hash to pass
# vv Tests less than 60s vv
'p2p_sendheaders.py', # NOTE: needs mgc_hash to pass
'wallet_zapwallettxes.py',
'wallet_importmulti.py',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py',
'wallet_abandonconflict.py',
'feature_csv_activation.py',
'rpc_rawtransaction.py',
'feature_reindex.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
'interface_zmq_mgc.py',
'interface_zmq.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'wallet_txn_clone.py',
'rpc_getchaintips.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py',
'interface_http.py',
'rpc_users.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'p2p_disconnect_ban.py',
'feature_addressindex.py',
'feature_timestampindex.py',
'feature_spentindex.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
'rpc_net.py',
'wallet_keypool.py',
'wallet_keypool_hd.py',
'p2p_mempool.py',
'mining_prioritisetransaction.py',
'p2p_invalid_block.py',
'p2p_invalid_tx.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py',
'rpc_zmq.py',
'rpc_signmessage.py',
'feature_nulldummy.py',
'wallet_import_rescan.py',
'rpc_bind.py --ipv4',
'rpc_bind.py --ipv6',
'rpc_bind.py --nonloopback',
'mining_basic.py',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
'p2p_leak.py',
'p2p_compactblocks.py',
'p2p_connect_to_devnet.py',
'feature_sporks.py',
'rpc_getblockstats.py',
'wallet_encryption.py',
'wallet_upgradetohd.py',
'feature_dersig.py',
'feature_cltv.py',
'feature_new_quorum_type_activation.py',
'feature_governance_objects.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py',
'feature_minchainwork.py',
'p2p_unrequested_blocks.py', # NOTE: needs mgc_hash to pass
'feature_shutdown.py',
'rpc_coinjoin.py',
'rpc_masternode.py',
'rpc_mnauth.py',
'rpc_verifyislock.py',
'rpc_verifychainlock.py',
'p2p_fingerprint.py',
'rpc_platform_filter.py',
'feature_dip0020_activation.py',
'feature_uacomment.py',
'p2p_unrequested_blocks.py',
'feature_logging.py',
'p2p_node_network_limited.py',
'feature_blocksdir.py',
'feature_config_args.py',
'feature_help.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py', # NOTE: Prune mode is incompatible with -txindex, should work with governance validation disabled though.
# vv Tests less than 20m vv
'feature_fee_estimation.py',
# vv Tests less than 5m vv
'feature_maxuploadtarget.py',
'mempool_packages.py',
'feature_dbcrash.py',
# vv Tests less than 2m vv
'feature_bip68_sequence.py',
'mining_getblocktemplate_longpoll.py', # FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651
'p2p_timeouts.py',
# vv Tests less than 60s vv
# vv Tests less than 30s vv
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
'wallet_txn_clone.py --mineblock',
'feature_txindex.py',
'feature_notifications.py',
'rpc_invalidateblock.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--ci', action='store_true', help='Run checks and code that are usually only enabled in a continuous integration environment')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure')
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two mgces; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/mgc_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and mgcd must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
test_list = []
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", test) + ".py" for test in tests]
for test in tests:
if test in ALL_SCRIPTS:
test_list.append(test)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
elif args.extended:
# Include extended tests
test_list += ALL_SCRIPTS
else:
# Run base tests only
test_list += BASE_SCRIPTS
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
exclude_tests = [re.sub("\.py$", "", test) + ".py" for test in args.exclude.split(',')]
for exclude_test in exclude_tests:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h'])
sys.exit(0)
check_script_list(src_dir=config["environment"]["SRCDIR"], fail_on_warn=args.ci)
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(
test_list=test_list,
src_dir=config["environment"]["SRCDIR"],
build_dir=config["environment"]["BUILDDIR"],
tmpdir=tmpdir,
jobs=args.jobs,
enable_coverage=args.coverage,
args=passon_args,
failfast=args.failfast,
runs_ci=args.ci,
combined_logs_len=args.combinedlogslen,
)
def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, failfast=False, runs_ci, combined_logs_len=0):
args = args or []
# Warn if mgcd is already running (unix only)
try:
pidof_output = subprocess.check_output(["pidof", "mgcd"])
if not (pidof_output is None or pidof_output == b''):
print("%sWARNING!%s There is already a mgcd process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s" % cache_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(
num_tests_parallel=jobs,
tests_dir=tests_dir,
tmpdir=tmpdir,
test_list=test_list,
flags=flags,
timeout_duration=TRAVIS_TIMEOUT_DURATION if runs_ci else float('inf'), # in seconds
)
start_time = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([sys.executable, os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
if failfast:
logging.debug("Early exiting after test failure")
break
print_results(test_results, max_len_name, (int(time.time() - start_time)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
# This will be a no-op unless failfast is True in which case there may be dangling
# processes which need to be killed.
job_queue.kill_and_join()
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, *, num_tests_parallel, tests_dir, tmpdir, test_list, flags, timeout_duration):
assert num_tests_parallel >= 1
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.timeout_duration = timeout_duration
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie mgcds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = test.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((test,
time.time(),
subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for job in self.jobs:
(name, start_time, proc, testdir, log_out, log_err) = job
if int(time.time() - start_time) > self.timeout_duration:
# In travis, timeout individual tests (to stop tests hanging and not providing useful output).
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(job)
return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
print('.', end='', flush=True)
def kill_and_join(self):
"""Send SIGKILL to all jobs and block until all have ended."""
procs = [i[2] for i in self.jobs]
for proc in procs:
proc.kill()
for proc in procs:
proc.wait()
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that test scripts start with one of the allowed name prefixes."""
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if bad_script_names:
print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
raise AssertionError("Some tests are not following naming convention!")
def check_script_list(*, src_dir, fail_on_warn):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if fail_on_warn:
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `mgc-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % command) for command in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as coverage_ref_file:
all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as coverage_file:
covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
| 40.156202
| 195
| 0.641942
|
60cec18baa6f2bae670d906113cc0303a1a5d06a
| 19,660
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_05_01/operations/_vpn_link_connections_operations.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_05_01/operations/_vpn_link_connections_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_05_01/operations/_vpn_link_connections_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnLinkConnectionsOperations(object):
"""VpnLinkConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _reset_connection_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
link_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self._reset_connection_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'linkConnectionName': self._serialize.url("link_connection_name", link_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_connection_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}/vpnLinkConnections/{linkConnectionName}/resetconnection'} # type: ignore
def begin_reset_connection(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
link_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Resets the VpnLink connection specified.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:param link_connection_name: The name of the vpn link connection.
:type link_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_connection_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
connection_name=connection_name,
link_connection_name=link_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'linkConnectionName': self._serialize.url("link_connection_name", link_connection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}/vpnLinkConnections/{linkConnectionName}/resetconnection'} # type: ignore
def _get_ike_sas_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
link_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self._get_ike_sas_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'linkConnectionName': self._serialize.url("link_connection_name", link_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_ike_sas_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}/vpnLinkConnections/{linkConnectionName}/getikesas'} # type: ignore
def begin_get_ike_sas(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
link_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[str]
"""Lists IKE Security Associations for Vpn Site Link Connection in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:param link_connection_name: The name of the vpn link connection.
:type link_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_ike_sas_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
connection_name=connection_name,
link_connection_name=link_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'linkConnectionName': self._serialize.url("link_connection_name", link_connection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_ike_sas.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}/vpnLinkConnections/{linkConnectionName}/getikesas'} # type: ignore
def list_by_vpn_connection(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnSiteLinkConnectionsResult"]
"""Retrieves all vpn site link connections for a particular virtual wan vpn gateway vpn
connection.
:param resource_group_name: The resource group name of the vpn gateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnSiteLinkConnectionsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_05_01.models.ListVpnSiteLinkConnectionsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnSiteLinkConnectionsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_vpn_connection.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnSiteLinkConnectionsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_vpn_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}/vpnLinkConnections'} # type: ignore
| 50.801034
| 276
| 0.667497
|
91838324f40ec6483f3b8142fc7ebf1a4ed544db
| 3,244
|
py
|
Python
|
tensorflow/python/training/gradient_descent.py
|
nkgwer/tensorflow
|
9cfb4b13c4639f741ec6b92e24c77d294a44c2d5
|
[
"Apache-2.0"
] | 3
|
2021-07-26T18:24:06.000Z
|
2021-11-02T19:48:13.000Z
|
tensorflow/python/training/gradient_descent.py
|
nkgwer/tensorflow
|
9cfb4b13c4639f741ec6b92e24c77d294a44c2d5
|
[
"Apache-2.0"
] | 1
|
2021-10-06T08:29:35.000Z
|
2021-10-06T08:36:59.000Z
|
tensorflow/python/training/gradient_descent.py
|
nkgwer/tensorflow
|
9cfb4b13c4639f741ec6b92e24c77d294a44c2d5
|
[
"Apache-2.0"
] | 1
|
2021-10-03T18:47:35.000Z
|
2021-10-03T18:47:35.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GradientDescent for TensorFlow."""
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.GradientDescentOptimizer"])
class GradientDescentOptimizer(optimizer.Optimizer):
"""Optimizer that implements the gradient descent algorithm.
"""
def __init__(self, learning_rate, use_locking=False, name="GradientDescent"):
"""Construct a new gradient descent optimizer.
Args:
learning_rate: A Tensor or a floating point value. The learning
rate to use.
use_locking: If True use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "GradientDescent".
@compatibility(eager)
When eager execution is enabled, `learning_rate` can be a callable that
takes no arguments and returns the actual value to use. This can be useful
for changing these values across different invocations of optimizer
functions.
@end_compatibility
"""
super(GradientDescentOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._learning_rate_tensor = None
def _apply_dense(self, grad, var):
return training_ops.apply_gradient_descent(
var,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, handle):
return training_ops.resource_apply_gradient_descent(
handle.handle, math_ops.cast(self._learning_rate_tensor,
grad.dtype.base_dtype),
grad, use_locking=self._use_locking)
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
return resource_variable_ops.resource_scatter_add(
handle.handle, indices, -grad * self._learning_rate)
def _apply_sparse_duplicate_indices(self, grad, var):
delta = ops.IndexedSlices(
grad.values *
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad.indices, grad.dense_shape)
return var.scatter_sub(delta, use_locking=self._use_locking)
def _prepare(self):
learning_rate = self._call_if_callable(self._learning_rate)
self._learning_rate_tensor = ops.convert_to_tensor(
learning_rate, name="learning_rate")
| 41.063291
| 80
| 0.732429
|
16f9e632d25f3796901e0d8ec2fe949acfcb7fd7
| 5,233
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/util/row_delete.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/util/row_delete.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/util/row_delete.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
"""
Code to delete rows from a table within a Django mgmt command using best practices.
Following lines show how to use delete_rows():
# Command to delete all rows from the student_historicalcourseenrollment table.
import logging
from openedx.core.djangoapps.util.row_delete import BaseDeletionCommand, delete_rows
from common.djangoapps.student.models import CourseEnrollment
log = logging.getLogger(__name__)
class Command(BaseDeletionCommand):
# Example usage: ./manage.py lms --settings=devstack delete_historical_enrollment_data
help = 'Deletes all historical CourseEnrollment rows (in chunks).'
def handle(self, *args, **options):
# Deletes rows, chunking the deletes to avoid long table/row locks.
chunk_size, sleep_between = super(Command, self).handle(*args, **options)
delete_rows(
CourseEnrollment.objects,
'student_historicalcourseenrollment',
'history_id',
chunk_size, sleep_between
)
"""
import logging
import time
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
log = logging.getLogger(__name__)
def delete_rows(model_mgr,
table_name,
primary_id_name,
chunk_size,
sleep_between):
"""
Deletes *ALL* rows from table, chunking the deletes to avoid long table/row locks.
Args:
model_mgr (django.db.models.manager.Manager): Django ORM mgr for the table's model.
table_name (str): Name of table from which to delete all rows.
primary_id_name (str): Name of primary ID autoincrement column from table.
chunk_size (int): Number of rows to delete in each transaction.
sleep_between (float): Number of seconds to sleep between transactions.
"""
if chunk_size <= 0:
raise CommandError(f'Only positive chunk size is allowed ({chunk_size}).')
if sleep_between < 0:
raise CommandError(f'Only non-negative sleep between seconds is allowed ({sleep_between}).')
# The "as id" below fools Django raw query into thinking the primary key is being queried.
# It's necessary because Django will throw an exception if the raw SQL does not query the primary key.
min_max_ids = model_mgr.raw(
f'SELECT MIN({primary_id_name}) as id, MAX({primary_id_name}) as max_id FROM {table_name}' # lint-amnesty, pylint: disable=duplicate-string-formatting-argument
)[0]
min_id = min_max_ids.id
max_id = min_max_ids.max_id
if not min_id or not max_id:
log.info("No data exists in table %s - skipping.", table_name)
return
log.info(
"STARTED: Deleting around %s rows with chunk size of %s and %s seconds between chunks.",
max_id - min_id + 1, chunk_size, sleep_between
)
lower_id = min_id
while lower_id <= max_id:
deletions_now = min(chunk_size, max_id - lower_id + 1)
upper_id = lower_id + deletions_now
log.info("Deleting around %s rows between ids %s and %s...", deletions_now, lower_id, upper_id)
with transaction.atomic():
# xss-lint: disable=python-wrap-html
delete_sql = 'DELETE FROM {} WHERE {} >= {} AND {} < {}'.format( # lint-amnesty, pylint: disable=duplicate-string-formatting-argument
table_name, primary_id_name, lower_id, primary_id_name, upper_id
)
log.info(delete_sql)
try:
list(model_mgr.raw(delete_sql))
except TypeError:
# The list() above is simply to get the RawQuerySet to be evaluated.
# Without evaluation, the raw DELETE SQL will *not* actually execute.
# But - it will cause a "TypeError: 'NoneType' object is not iterable" to be ignored.
pass
lower_id += deletions_now
log.info("Sleeping %s seconds...", sleep_between)
time.sleep(sleep_between)
log.info("FINISHED: Deleted at most %s rows total.", max_id - min_id + 1)
class BaseDeletionCommand(BaseCommand):
"""
Base command used to delete all rows from a table.
"""
# Default maximum number of rows to delete in a single transaction.
DEFAULT_CHUNK_SIZE = 10000
# Default seconds to sleep between chunked deletes of rows.
DEFAULT_SLEEP_BETWEEN_DELETES = 0
def add_arguments(self, parser):
parser.add_argument(
'--chunk_size',
default=self.DEFAULT_CHUNK_SIZE,
type=int,
help='Maximum number of rows to delete in each DB transaction. Choose this value carefully to avoid DB outages!' # lint-amnesty, pylint: disable=line-too-long
)
parser.add_argument(
'--sleep_between',
default=self.DEFAULT_SLEEP_BETWEEN_DELETES,
type=float,
help='Seconds to sleep between chunked delete of rows.'
)
def handle(self, *args, **options):
"""
Deletes rows, chunking the deletes to avoid long table/row locks.
"""
return (
options.get('chunk_size', self.DEFAULT_CHUNK_SIZE),
options.get('sleep_between', self.DEFAULT_SLEEP_BETWEEN_DELETES)
)
| 39.345865
| 171
| 0.661189
|
9e26421b824c56f40b25266b94f8335b683944ab
| 44,473
|
py
|
Python
|
tensorflow/python/distribute/mirrored_strategy.py
|
buchgr/tensorflow
|
2938772a08ed02ced4663ca38168ab3f82e8f81b
|
[
"Apache-2.0"
] | 1
|
2020-03-12T10:39:06.000Z
|
2020-03-12T10:39:06.000Z
|
tensorflow/python/distribute/mirrored_strategy.py
|
buchgr/tensorflow
|
2938772a08ed02ced4663ca38168ab3f82e8f81b
|
[
"Apache-2.0"
] | 2
|
2021-08-25T15:57:08.000Z
|
2022-02-10T00:40:07.000Z
|
tensorflow/python/distribute/mirrored_strategy.py
|
buchgr/tensorflow
|
2938772a08ed02ced4663ca38168ab3f82e8f81b
|
[
"Apache-2.0"
] | 3
|
2020-03-09T19:17:02.000Z
|
2020-06-26T23:14:31.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class MirroredStrategy implementing tf.distribute.Strategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import copy
import functools
import threading
import weakref
from tensorflow.python import pywrap_tfe
from tensorflow.python.autograph.core import ag_ctx as autograph_ctx
from tensorflow.python.autograph.impl import api as autograph
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import shared_variable_creator
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# TODO(josh11b): Replace asserts in this file with if ...: raise ...
@contextlib.contextmanager
def _enter_graph(g, eager, creator_stack=None):
"""Context manager for selecting a graph and maybe eager mode."""
if eager:
with g.as_default(), context.eager_mode():
if creator_stack is not None:
g._variable_creator_stack = creator_stack # pylint: disable=protected-access
yield
else:
with g.as_default():
if creator_stack is not None:
g._variable_creator_stack = creator_stack # pylint: disable=protected-access
yield
def _cpu_device(device):
cpu_device = tf_device.DeviceSpec.from_string(device)
cpu_device = cpu_device.replace(device_type="CPU", device_index=0)
return cpu_device.to_string()
class _RequestedStop(Exception): # pylint: disable=g-bad-exception-name
pass
# _call_for_each_replica is not a member of MirroredStrategy so that it is
# not allowed to use anything specific to MirroredStrategy and thus
# can be shared with other distribution strategies.
# TODO(yuefengz): maybe create a common class for those who need to call this
# _call_for_each_replica.
def _call_for_each_replica(distribution, devices, fn, args, kwargs):
"""Run `fn` in separate threads, once per replica/worker device.
Args:
distribution: the DistributionStrategy object.
devices: the devices to run `fn` on (logical device 0 for each replica).
fn: function to run (will be run once per replica, each in its own thread).
args: positional arguments for `fn`
kwargs: keyword arguments for `fn`.
Returns:
Merged return value of `fn` across all replicas.
Raises:
RuntimeError: If fn() calls get_replica_context().merge_call() a different
number of times from the available devices.
"""
# TODO(josh11b): Add this option once we add synchronization to variable
# creation. Until then, this is pretty unsafe to use.
run_concurrently = False
if not context.executing_eagerly():
# Needed for per-thread device, etc. contexts in graph mode.
ops.get_default_graph().switch_to_thread_local()
coord = coordinator.Coordinator(clean_stop_exception_types=(_RequestedStop,))
shared_variable_store = {}
# TODO(isaprykin): Create these threads once instead of during every call.
threads = []
for index in range(len(devices)):
variable_creator_fn = shared_variable_creator.make_fn(
shared_variable_store, index)
t = _MirroredReplicaThread(
distribution, coord, index, devices, variable_creator_fn, fn,
values.select_replica(index, args),
values.select_replica(index, kwargs))
threads.append(t)
for t in threads:
t.start()
# When `fn` starts `should_run` event is set on _MirroredReplicaThread
# (`MRT`) threads. The execution waits until
# `MRT.has_paused` is set, which indicates that either `fn` is
# complete or a `get_replica_context().merge_call()` is called. If `fn` is
# complete, then `MRT.done` is set to True. Otherwise, arguments
# of `get_replica_context().merge_call` from all paused threads are grouped
# and the `merge_fn` is performed. Results of the
# `get_replica_context().merge_call` are then set to `MRT.merge_result`.
# Each such `get_replica_context().merge_call` call returns the
# `MRT.merge_result` for that thread when `MRT.should_run` event
# is reset again. Execution of `fn` resumes.
try:
with coord.stop_on_exception():
all_done = False
while not all_done and not coord.should_stop():
done = []
if run_concurrently:
for t in threads:
t.should_run.set()
for t in threads:
t.has_paused.wait()
t.has_paused.clear()
if coord.should_stop():
return None
done.append(t.done)
else:
for t in threads:
t.should_run.set()
t.has_paused.wait()
t.has_paused.clear()
if coord.should_stop():
return None
done.append(t.done)
if coord.should_stop():
return None
all_done = all(done)
if not all_done:
if any(done):
raise RuntimeError("Some replicas made a different number of "
"replica_context().merge_call() calls.")
# get_replica_context().merge_call() case
merge_args = values.regroup(tuple(t.merge_args for t in threads))
merge_kwargs = values.regroup(tuple(t.merge_kwargs for t in threads))
# We capture the name_scope of the MRT when we call merge_fn
# to ensure that if we have opened a name scope in the MRT,
# it will be respected when executing the merge function. We only
# capture the name_scope from the first MRT and assume it is
# the same for all other MRTs.
mtt_captured_name_scope = threads[0].captured_name_scope
mtt_captured_var_scope = threads[0].captured_var_scope
# Capture and merge the control dependencies from all the threads.
mtt_captured_control_deps = set()
for t in threads:
mtt_captured_control_deps.update(t.captured_control_deps)
with ops.name_scope(mtt_captured_name_scope),\
ops.control_dependencies(mtt_captured_control_deps), \
variable_scope.variable_scope(mtt_captured_var_scope):
merge_result = threads[0].merge_fn(distribution, *merge_args,
**merge_kwargs)
for r, t in enumerate(threads):
t.merge_result = values.select_replica(r, merge_result)
finally:
for t in threads:
t.should_run.set()
coord.join(threads)
return values.regroup(tuple(t.main_result for t in threads))
def _is_device_list_single_worker(devices):
"""Checks whether the devices list is for single or multi-worker.
Args:
devices: a list of device strings or tf.config.LogicalDevice objects, for
either local or for remote devices.
Returns:
a boolean indicating whether these device strings are for local or for
remote.
Raises:
ValueError: if device strings are not consistent.
"""
specs = []
for d in devices:
name = d.name if isinstance(d, context.LogicalDevice) else d
specs.append(tf_device.DeviceSpec.from_string(name))
num_workers = len({(d.job, d.task, d.replica) for d in specs})
all_local = all(d.job in (None, "localhost") for d in specs)
any_local = any(d.job in (None, "localhost") for d in specs)
if any_local and not all_local:
raise ValueError("Local device string cannot have job specified other "
"than 'localhost'")
if num_workers == 1 and not all_local:
if any(d.task is None for d in specs):
raise ValueError("Remote device string must have task specified.")
return num_workers == 1
def _cluster_spec_to_device_list(cluster_spec, num_gpus_per_worker):
"""Returns a device list given a cluster spec."""
cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
devices = []
for task_type in ("chief", "worker"):
for task_id in range(len(cluster_spec.as_dict().get(task_type, []))):
if num_gpus_per_worker == 0:
devices.append("/job:%s/task:%d" % (task_type, task_id))
else:
devices.extend([
"/job:%s/task:%d/device:GPU:%i" % (task_type, task_id, gpu_id)
for gpu_id in range(num_gpus_per_worker)
])
return devices
def _group_device_list(devices):
"""Groups the devices list by task_type and task_id.
Args:
devices: a list of device strings for remote devices.
Returns:
a dict of list of device strings mapping from task_type to a list of devices
for the task_type in the asceding order of task_id.
"""
assert not _is_device_list_single_worker(devices)
device_dict = {}
for d in devices:
d_spec = tf_device.DeviceSpec.from_string(d)
# Create an entry for the task_type.
if d_spec.job not in device_dict:
device_dict[d_spec.job] = []
# Fill the device list for task_type until it covers the task_id.
while len(device_dict[d_spec.job]) <= d_spec.task:
device_dict[d_spec.job].append([])
device_dict[d_spec.job][d_spec.task].append(d)
return device_dict
def _is_gpu_device(device):
return tf_device.DeviceSpec.from_string(device).device_type == "GPU"
def _infer_num_gpus_per_worker(devices):
"""Infers the number of GPUs on each worker.
Currently to make multi-worker cross device ops work, we need all workers to
have the same number of GPUs.
Args:
devices: a list of device strings, can be either local devices or remote
devices.
Returns:
number of GPUs per worker.
Raises:
ValueError if workers have different number of GPUs or GPU indices are not
consecutive and starting from 0.
"""
if _is_device_list_single_worker(devices):
return sum(1 for d in devices if _is_gpu_device(d))
else:
device_dict = _group_device_list(devices)
num_gpus = None
for _, devices_in_task in device_dict.items():
for device_in_task in devices_in_task:
if num_gpus is None:
num_gpus = sum(1 for d in device_in_task if _is_gpu_device(d))
# Verify other workers have the same number of GPUs.
elif num_gpus != sum(1 for d in device_in_task if _is_gpu_device(d)):
raise ValueError("All workers should have the same number of GPUs.")
for d in device_in_task:
d_spec = tf_device.DeviceSpec.from_string(d)
if (d_spec.device_type == "GPU" and
d_spec.device_index >= num_gpus):
raise ValueError("GPU `device_index` on a worker should be "
"consecutive and start from 0.")
return num_gpus
def all_local_devices(num_gpus=None):
devices = config.list_logical_devices("GPU")
if num_gpus is not None:
devices = devices[:num_gpus]
return devices or config.list_logical_devices("CPU")
def all_devices():
devices = []
tfconfig = TFConfigClusterResolver()
if tfconfig.cluster_spec().as_dict():
devices = _cluster_spec_to_device_list(tfconfig.cluster_spec(),
context.num_gpus())
return devices if devices else all_local_devices()
@tf_export("distribute.MirroredStrategy", v1=[]) # pylint: disable=g-classes-have-attributes
class MirroredStrategy(distribute_lib.Strategy):
"""Synchronous training across multiple replicas on one machine.
This strategy is typically used for training on one
machine with multiple GPUs. For TPUs, use
`tf.distribute.experimental.TPUStrategy`. To use `MirroredStrategy` with
multiple workers, please refer to
`tf.distribute.experimental.MultiWorkerMirroredStrategy`.
For example, a variable created under a `MirroredStrategy` is a
`MirroredVariable`. If no devices are specified in the constructor argument of
the strategy then it will use all the available GPUs. If no GPUs are found, it
will use the available CPUs. Note that TensorFlow treats all CPUs on a
machine as a single device, and uses threads internally for parallelism.
>>> strategy = tf.distribute.MirroredStrategy()
>>> with strategy.scope():
... x = tf.Variable(1.)
>>> x
MirroredVariable:{
0: <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=1.0>
}
While using distribution strategies, all the variable creation should be done
within the strategy's scope. This will replicate the variables across all the
replicas and keep them in sync using an all-reduce algorithm.
Variables created inside a `MirroredStrategy` which is wrapped with a
`tf.function` are still `MirroredVariables`.
>>> x = []
>>> @tf.function # Wrap the function with tf.function.
... def create_variable():
... if not x:
... x.append(tf.Variable(1.))
>>> strategy = tf.distribute.MirroredStrategy()
>>> with strategy.scope():
... create_variable()
... print (x[0])
MirroredVariable:{
0: <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=1.0>
}
`experimental_distribute_dataset` can be used to distribute the dataset across
the replicas when writing your own training loop. If you are using `.fit` and
`.compile` methods available in `tf.keras`, then `tf.keras` will handle the
distribution for you.
For example:
```python
my_strategy = tf.distribute.MirroredStrategy()
with my_strategy.scope():
@tf.function
def distribute_train_epoch(dataset):
def replica_fn(input):
# process input and return result
return result
total_result = 0
for x in dataset:
per_replica_result = my_strategy.experimental_run_v2(replica_fn,
args=(x,))
total_result += my_strategy.reduce(tf.distribute.ReduceOp.SUM,
per_replica_result, axis=None)
return total_result
dist_dataset = my_strategy.experimental_distribute_dataset(dataset)
for _ in range(EPOCHS):
train_result = distribute_train_epoch(dist_dataset)
```
Args:
devices: a list of device strings such as `['/gpu:0', '/gpu:1']`. If
`None`, all available GPUs are used. If no GPUs are found, CPU is used.
cross_device_ops: optional, a descedant of `CrossDeviceOps`. If this is not
set, `NcclAllReduce()` will be used by default. One would customize this
if NCCL isn't available or if a special implementation that exploits
the particular hardware is available.
"""
def __init__(self, devices=None, cross_device_ops=None):
extended = MirroredExtended(
self, devices=devices, cross_device_ops=cross_device_ops)
super(MirroredStrategy, self).__init__(extended)
distribute_lib.distribution_strategy_gauge.get_cell("V2").set(
"MirroredStrategy")
@tf_export(v1=["distribute.MirroredStrategy"])
class MirroredStrategyV1(distribute_lib.StrategyV1): # pylint: disable=g-missing-docstring
__doc__ = MirroredStrategy.__doc__
def __init__(self, devices=None, cross_device_ops=None):
extended = MirroredExtended(
self, devices=devices, cross_device_ops=cross_device_ops)
super(MirroredStrategyV1, self).__init__(extended)
distribute_lib.distribution_strategy_gauge.get_cell("V1").set(
"MirroredStrategy")
# TODO(josh11b): Switch to V2 when we no longer need to support tf.compat.v1.
class MirroredExtended(distribute_lib.StrategyExtendedV1):
"""Implementation of MirroredStrategy."""
def __init__(self, container_strategy, devices=None, cross_device_ops=None):
super(MirroredExtended, self).__init__(container_strategy)
if context.executing_eagerly():
if devices and not _is_device_list_single_worker(devices):
raise RuntimeError("In-graph multi-worker training with "
"`MirroredStrategy` is not supported in eager mode.")
else:
if TFConfigClusterResolver().cluster_spec().as_dict():
# if you are executing in eager mode, only the single machine code
# path is supported.
logging.info("Initializing local devices since in-graph multi-worker "
"training with `MirroredStrategy` is not supported in "
"eager mode. TF_CONFIG will be ignored when "
"when initializing `MirroredStrategy`.")
devices = devices or all_local_devices()
else:
devices = devices or all_devices()
assert devices, ("Got an empty `devices` list and unable to recognize "
"any local devices.")
self._cross_device_ops = cross_device_ops
self._initialize_strategy(devices)
self._cfer_fn_cache = weakref.WeakKeyDictionary()
# TODO(b/128995245): Enable last partial batch support in graph mode.
if ops.executing_eagerly_outside_functions():
self.experimental_enable_get_next_as_optional = True
def _initialize_strategy(self, devices):
# The _initialize_strategy method is intended to be used by distribute
# coordinator as well.
assert devices, "Must specify at least one device."
devices = tuple(device_util.resolve(d) for d in devices)
assert len(set(devices)) == len(devices), (
"No duplicates allowed in `devices` argument: %s" % (devices,))
if _is_device_list_single_worker(devices):
self._initialize_single_worker(devices)
else:
self._initialize_multi_worker(devices)
def _initialize_single_worker(self, devices):
"""Initializes the object for single-worker training."""
self._devices = tuple(device_util.canonicalize(d) for d in devices)
self._input_workers = input_lib.InputWorkers(
((device_util.canonicalize("/device:CPU:0", devices[0]), devices),))
self._inferred_cross_device_ops = None if self._cross_device_ops else (
cross_device_ops_lib.choose_the_best(devices))
self._host_input_device = numpy_dataset.SingleDevice(
self._input_workers.worker_devices[0])
self._is_multi_worker_training = False
logging.info("Using MirroredStrategy with devices %r", devices)
device_spec = tf_device.DeviceSpec.from_string(
self._input_workers.worker_devices[0])
# Ensures when we enter strategy.scope() we use the correct default device
if device_spec.job is not None and device_spec.job != "localhost":
self._default_device = "/job:%s/replica:%d/task:%d" % (
device_spec.job, device_spec.replica, device_spec.task)
def _initialize_multi_worker(self, devices):
"""Initializes the object for multi-worker training."""
device_dict = _group_device_list(devices)
workers = []
worker_devices = []
for job in ("chief", "worker"):
for task in range(len(device_dict.get(job, []))):
worker = "/job:%s/task:%d" % (job, task)
workers.append(worker)
worker_devices.append((worker, device_dict[job][task]))
# Setting `_default_device` will add a device scope in the
# distribution.scope. We set the default device to the first worker. When
# users specify device under distribution.scope by
# with tf.device("/cpu:0"):
# ...
# their ops will end up on the cpu device of its first worker, e.g.
# "/job:worker/task:0/device:CPU:0". Note this is not used in replica mode.
self._default_device = workers[0]
self._host_input_device = numpy_dataset.SingleDevice(workers[0])
self._devices = tuple(devices)
self._input_workers = input_lib.InputWorkers(worker_devices)
self._is_multi_worker_training = True
if len(workers) > 1:
if not isinstance(self._cross_device_ops,
cross_device_ops_lib.MultiWorkerAllReduce):
raise ValueError(
"In-graph multi-worker training with `MirroredStrategy` is not "
"supported.")
self._inferred_cross_device_ops = self._cross_device_ops
else:
# TODO(yuefengz): make `choose_the_best` work with device strings
# containing job names.
self._inferred_cross_device_ops = cross_device_ops_lib.NcclAllReduce()
logging.info("Using MirroredStrategy with remote devices %r", devices)
def _get_variable_creator_initial_value(self,
replica_id,
device,
primary_var,
**kwargs):
"""Return the initial value for variables on a replica."""
if replica_id == 0:
return kwargs["initial_value"]
else:
assert primary_var is not None
assert device is not None
assert kwargs is not None
def initial_value_fn():
if context.executing_eagerly() or ops.inside_function():
init_value = primary_var.value()
return array_ops.identity(init_value)
else:
with ops.device(device):
init_value = primary_var.initial_value
return array_ops.identity(init_value)
return initial_value_fn
def _create_variable(self, next_creator, *args, **kwargs):
"""Create a mirrored variable. See `DistributionStrategy.scope`."""
colocate_with = kwargs.pop("colocate_with", None)
if colocate_with is None:
devices = self._devices
elif isinstance(colocate_with, numpy_dataset.SingleDevice):
with ops.device(colocate_with.device):
return next_creator(*args, **kwargs)
else:
devices = colocate_with.devices
def _real_mirrored_creator(*args, **kwargs): # pylint: disable=g-missing-docstring
value_list = []
for i, d in enumerate(devices):
with ops.device(d):
kwargs["initial_value"] = self._get_variable_creator_initial_value(
replica_id=i,
device=d,
primary_var=value_list[0] if value_list else None,
**kwargs)
if i > 0:
# Give replicas meaningful distinct names:
var0name = value_list[0].name.split(":")[0]
# We append a / to variable names created on replicas with id > 0 to
# ensure that we ignore the name scope and instead use the given
# name as the absolute name of the variable.
kwargs["name"] = "%s/replica_%d/" % (var0name, i)
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
# Don't record operations (e.g. other variable reads) during
# variable creation.
with tape.stop_recording():
v = next_creator(*args, **kwargs)
assert not isinstance(v, values.DistributedVariable)
value_list.append(v)
return value_list
return values.create_mirrored_variable(
self._container_strategy(), _real_mirrored_creator,
values.MirroredVariable, values.SyncOnReadVariable, *args, **kwargs)
def _validate_colocate_with_variable(self, colocate_with_variable):
values.validate_colocate_distributed_variable(colocate_with_variable, self)
def _make_dataset_iterator(self, dataset):
return input_lib.DatasetIterator(
dataset,
self._input_workers,
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
input_contexts = []
num_workers = self._input_workers.num_workers
for i in range(num_workers):
input_contexts.append(distribute_lib.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=i,
num_replicas_in_sync=self._num_replicas_in_sync))
return input_lib.InputFunctionIterator(input_fn, self._input_workers,
input_contexts,
self._container_strategy())
def _experimental_distribute_dataset(self, dataset):
return input_lib.get_distributed_dataset(
dataset,
self._input_workers,
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync)
def _experimental_make_numpy_dataset(self, numpy_input, session):
return numpy_dataset.one_host_numpy_dataset(
numpy_input, self._host_input_device, session)
def _experimental_distribute_datasets_from_function(self, dataset_fn):
input_contexts = []
num_workers = self._input_workers.num_workers
for i in range(num_workers):
input_contexts.append(distribute_lib.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=i,
num_replicas_in_sync=self._num_replicas_in_sync))
return input_lib.get_distributed_datasets_from_function(
dataset_fn,
self._input_workers,
input_contexts,
self._container_strategy())
# TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.
def _experimental_run_steps_on_iterator(self, fn, iterator, iterations,
initial_loop_values=None):
if initial_loop_values is None:
initial_loop_values = {}
initial_loop_values = nest.flatten(initial_loop_values)
ctx = input_lib.MultiStepContext()
def body(i, *args):
"""A wrapper around `fn` to create the while loop body."""
del args
fn_result = fn(ctx, iterator.get_next())
for (name, output) in ctx.last_step_outputs.items():
# Convert all outputs to tensors, potentially from `DistributedValues`.
ctx.last_step_outputs[name] = self._local_results(output)
flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)
with ops.control_dependencies([fn_result]):
return [i + 1] + flat_last_step_outputs
# We capture the control_flow_context at this point, before we run `fn`
# inside a while_loop. This is useful in cases where we might need to exit
# these contexts and get back to the outer context to do some things, for
# e.g. create an op which should be evaluated only once at the end of the
# loop on the host. One such usage is in creating metrics' value op.
self._outer_control_flow_context = (
ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access
cond = lambda i, *args: i < iterations
i = constant_op.constant(0)
loop_result = control_flow_ops.while_loop(
cond, body, [i] + initial_loop_values, name="",
parallel_iterations=1, back_prop=False, swap_memory=False,
return_same_structure=True)
del self._outer_control_flow_context
ctx.run_op = control_flow_ops.group(loop_result)
# Convert the last_step_outputs from a list to the original dict structure
# of last_step_outputs.
last_step_tensor_outputs = loop_result[1:]
last_step_tensor_outputs_dict = nest.pack_sequence_as(
ctx.last_step_outputs, last_step_tensor_outputs)
for name, reduce_op in ctx._last_step_outputs_reduce_ops.items(): # pylint: disable=protected-access
output = last_step_tensor_outputs_dict[name]
# For outputs that have already been reduced, wrap them in a Mirrored
# container, else in a PerReplica container.
if reduce_op is None:
last_step_tensor_outputs_dict[name] = values.regroup(output)
else:
assert len(output) == 1
last_step_tensor_outputs_dict[name] = output[0]
ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access
return ctx
def _broadcast_to(self, tensor, destinations):
# This is both a fast path for Python constants, and a way to delay
# converting Python values to a tensor until we know what type it
# should be converted to. Otherwise we have trouble with:
# global_step.assign_add(1)
# since the `1` gets broadcast as an int32 but global_step is int64.
if isinstance(tensor, (float, int)):
return tensor
# TODO(josh11b): In eager mode, use one thread per device, or async mode.
if not destinations:
# TODO(josh11b): Use current logical device instead of 0 here.
destinations = self._devices
return self._get_cross_device_ops().broadcast(tensor, destinations)
def _call_for_each_replica(self, fn, args, kwargs):
if isinstance(fn, def_function.Function):
wrapped = self._cfer_fn_cache.get(fn)
if wrapped is None:
# We need to wrap fn such that it triggers _call_for_each_replica inside
# the tf.function.
wrapped = fn._clone( # pylint: disable=protected-access
python_function=functools.partial(self._call_for_each_replica,
fn.python_function))
self._cfer_fn_cache[fn] = wrapped
return wrapped(args, kwargs)
if context.executing_eagerly():
logging.log_first_n(logging.WARN, "Using %s eagerly has significant "
"overhead currently. We will be working on improving "
"this in the future, but for now please wrap "
"`call_for_each_replica` or `experimental_run` or "
"`experimental_run_v2` inside a tf.function to get "
"the best performance." %
self._container_strategy().__class__.__name__, 5)
else:
# When a tf.function is wrapped to trigger _call_for_each_replica (see
# the other branch above), AutoGraph stops conversion at
# _call_for_each_replica itself (TF library functions are whitelisted).
# This makes sure that the Python function that originally passed to
# the tf.function is still converted.
fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx())
return _call_for_each_replica(self._container_strategy(), self._devices,
fn, args, kwargs)
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
del task_type, task_id
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
if cluster_spec:
# TODO(yuefengz): remove the following code once cluster_resolver is
# added.
num_gpus_per_worker = _infer_num_gpus_per_worker(self._devices)
multi_worker_devices = _cluster_spec_to_device_list(
cluster_spec, num_gpus_per_worker)
self._initialize_multi_worker(multi_worker_devices)
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
updated_config.isolate_session_state = True
return updated_config
def _get_cross_device_ops(self):
return self._cross_device_ops or self._inferred_cross_device_ops
def _reduce_to(self, reduce_op, value, destinations):
if (isinstance(value, values.Mirrored) and
reduce_op == reduce_util.ReduceOp.MEAN):
return value
assert not isinstance(value, values.Mirrored)
if not isinstance(value, values.DistributedValues):
# This function handles reducing values that are not PerReplica or
# Mirrored values. For example, the same value could be present on all
# replicas in which case `value` would be a single value or value could
# be 0.
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, value, destinations, self._num_replicas_in_sync)
return self._get_cross_device_ops().reduce(
reduce_op, value, destinations=destinations)
def _batch_reduce_to(self, reduce_op, value_destination_pairs):
return self._get_cross_device_ops().batch_reduce(reduce_op,
value_destination_pairs)
def _update(self, var, fn, args, kwargs, group):
# TODO(josh11b): In eager mode, use one thread per device.
assert isinstance(var, values.DistributedVariable)
updates = []
for i, v in enumerate(var.values):
name = "update_%d" % i
with ops.device(v.device), \
distribute_lib.UpdateContext(i), \
ops.name_scope(name):
# If args and kwargs are not mirrored, the value is returned as is.
updates.append(fn(v,
*values.select_replica_mirrored(i, args),
**values.select_replica_mirrored(i, kwargs)))
return values.update_regroup(self, updates, group)
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
assert isinstance(colocate_with, tuple)
# TODO(josh11b): In eager mode, use one thread per device.
updates = []
for i, d in enumerate(colocate_with):
name = "update_%d" % i
with ops.device(d), distribute_lib.UpdateContext(i), ops.name_scope(name):
updates.append(fn(*values.select_replica_mirrored(i, args),
**values.select_replica_mirrored(i, kwargs)))
return values.update_regroup(self, updates, group)
def read_var(self, replica_local_var):
"""Read the aggregate value of a replica-local variable."""
if isinstance(replica_local_var, values.SyncOnReadVariable):
return replica_local_var._get_cross_replica() # pylint: disable=protected-access
assert isinstance(replica_local_var, values.Mirrored)
return array_ops.identity(replica_local_var.get())
def _local_results(self, val):
if isinstance(val, values.DistributedValues):
return val.values
return (val,)
def value_container(self, val):
return values.value_container(val)
@property
def _num_replicas_in_sync(self):
return len(self._devices)
@property
def worker_devices(self):
return self._devices
@property
def worker_devices_by_replica(self):
return [[d] for d in self._devices]
@property
def parameter_devices(self):
return self.worker_devices
@property
def experimental_between_graph(self):
return False
@property
def experimental_should_init(self):
return True
@property
def should_checkpoint(self):
return True
@property
def should_save_summary(self):
return True
def non_slot_devices(self, var_list):
del var_list
# TODO(josh11b): Should this be the last logical device instead?
return self._devices
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""`make_dataset_iterator` and `make_numpy_iterator` use global batch size.
`make_input_fn_iterator` assumes per-replica batching.
Returns:
Boolean.
"""
return True
def _in_multi_worker_mode(self):
"""Whether this strategy indicates working in multi-worker settings."""
return False
class _MirroredReplicaThread(threading.Thread):
"""A thread that runs() a function on a device."""
def __init__(self, dist, coord, replica_id, devices, variable_creator_fn,
fn, args, kwargs):
super(_MirroredReplicaThread, self).__init__()
self.coord = coord
self.distribution = dist
self.devices = devices
self.replica_id = replica_id
self.variable_creator_fn = variable_creator_fn
# State needed to run and return the results of `fn`.
self.main_fn = fn
self.main_args = args
self.main_kwargs = kwargs
self.main_result = None
self.done = False
# State needed to run the next merge_call() (if any) requested via
# ReplicaContext.
self.merge_fn = None
self.merge_args = None
self.merge_kwargs = None
self.merge_result = None
self.captured_name_scope = None
self.captured_var_scope = None
# We use a thread.Event for the main thread to signal when this
# thread should start running (`should_run`), and another for
# this thread to transfer control back to the main thread
# (`has_paused`, either when it gets to a
# `get_replica_context().merge_call` or when `fn` returns). In
# either case the event starts cleared, is signaled by calling
# set(). The receiving thread waits for the signal by calling
# wait() and then immediately clearing the event using clear().
self.should_run = threading.Event()
self.has_paused = threading.Event()
# These fields have to do with inheriting various contexts from the
# parent thread:
context.ensure_initialized()
ctx = context.context()
self.in_eager = ctx.executing_eagerly()
self.record_thread_local_summary_state()
self.record_thread_local_eager_context_state()
self.context_device_policy = (
pywrap_tfe.TFE_ContextGetDevicePlacementPolicy(
ctx._context_handle)) # pylint: disable=protected-access
self.graph = ops.get_default_graph()
with ops.init_scope():
self._init_in_eager = context.executing_eagerly()
self._init_graph = ops.get_default_graph()
self._variable_creator_stack = self.graph._variable_creator_stack[:] # pylint: disable=protected-access
self._var_scope = variable_scope.get_variable_scope()
# Adding a "/" at end lets us re-enter this scope later.
self._name_scope = self.graph.get_name_scope()
if self._name_scope:
self._name_scope += "/"
if self.replica_id > 0:
if not self._name_scope:
self._name_scope = ""
self._name_scope += "replica_%d/" % self.replica_id
def run(self):
self.should_run.wait()
self.should_run.clear()
try:
if self.coord.should_stop():
return
self.restore_thread_local_summary_state()
self.restore_thread_local_eager_context_state()
# TODO(josh11b): Use current logical device instead of 0 here.
with self.coord.stop_on_exception(), \
_enter_graph(self._init_graph, self._init_in_eager), \
_enter_graph(self.graph, self.in_eager,
self._variable_creator_stack), \
context.device_policy(self.context_device_policy), \
MirroredReplicaContext(self.distribution, constant_op.constant(
self.replica_id, dtypes.int32)), \
ops.device(self.devices[self.replica_id]), \
ops.name_scope(self._name_scope), \
variable_scope.variable_scope(
self._var_scope, reuse=self.replica_id > 0), \
variable_scope.variable_creator_scope(self.variable_creator_fn):
self.main_result = self.main_fn(*self.main_args, **self.main_kwargs)
self.done = True
finally:
self.has_paused.set()
def record_thread_local_summary_state(self):
"""Record the thread local summary state in self."""
# TODO(slebedev): is this still relevant? the referenced bug is closed.
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
self._summary_step = summary_state.step
self._summary_writer = summary_state.writer
self._summary_recording = summary_state.is_recording
self._summary_recording_distribution_strategy = (
summary_state.is_recording_distribution_strategy)
def restore_thread_local_summary_state(self):
"""Restore thread local summary state from self."""
# TODO(slebedev): is this still relevant? the referenced bug is closed.
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
summary_state.step = self._summary_step
summary_state.writer = self._summary_writer
summary_state.is_recording = self._summary_recording
summary_state.is_recording_distribution_strategy = (
self._summary_recording_distribution_strategy)
def record_thread_local_eager_context_state(self):
ctx = context.context()
eager_context_state = ctx._thread_local_data # pylint: disable=protected-access
self._eager_context_op_callbacks = eager_context_state.op_callbacks
# TODO(b/125892694): record other fields in EagerContext.
def restore_thread_local_eager_context_state(self):
ctx = context.context()
eager_context_state = ctx._thread_local_data # pylint: disable=protected-access
eager_context_state.op_callbacks = self._eager_context_op_callbacks
# TODO(b/125892694): record other fields in EagerContext.
class MirroredReplicaContext(distribute_lib.ReplicaContext):
"""ReplicaContext used in MirroredStrategy.extended.call_for_each_replica().
Opened in `_MirroredReplicaThread`, to allow the user to invoke
`MirroredStrategy`'s specific implementation of `merge_call()`,
which works by delegating the function and its arguments to
the main thread (the one that invoked
`MirroredStrategy.extended.call_for_each_replica()`).
"""
def _merge_call(self, fn, args, kwargs):
"""Delegate to the main thread to actually perform merge_call()."""
t = threading.current_thread() # a _MirroredReplicaThread
t.merge_fn = fn
t.merge_args = args
t.merge_kwargs = kwargs
t.captured_name_scope = t.graph.get_name_scope()
# Adding a "/" at end lets us re-enter this scope later.
if t.captured_name_scope:
t.captured_name_scope += "/"
t.captured_var_scope = variable_scope.get_variable_scope()
t.captured_control_deps = t.graph._current_control_dependencies() # pylint: disable=protected-access
# NOTE(priyag): Throw an error if there is a merge call in the middle of a
# `fn` passed to call_for_each_replica which changes the graph being used
# while calling `fn`. This can happen when the `fn` is decorated with
# `tf.function` and there is a merge_call in `fn`. This breaks because each
# thread tries to create a distinct tf.function. Each tf.function creation
# takes a lock, and so if there is a merge call in the middle, the lock is
# never released and subsequent replica threads cannot proceed to define
# their own functions. Checking for the graph being the same is one way for
# us to check this didn't happen.
if ops.get_default_graph() != t.graph:
raise RuntimeError(
"`merge_call` called while defining a new graph or a tf.function. "
"This can often happen if the function `fn` passed to "
"`strategy.experimental_run()` is decorated with "
"`@tf.function` (or contains a nested `@tf.function`), and `fn` "
"contains a synchronization point, such as aggregating gradients. "
"This behavior is not yet supported. Instead, please wrap the entire "
"call `strategy.experimental_run(fn)` in a `@tf.function`, and avoid "
"nested `tf.function`s that may potentially cross a synchronization "
"boundary.")
t.has_paused.set()
t.should_run.wait()
t.should_run.clear()
if t.coord.should_stop():
raise _RequestedStop()
return t.merge_result
@property
def devices(self):
distribute_lib.require_replica_context(self)
replica_id = tensor_util.constant_value(self._replica_id_in_sync_group)
return [self._strategy.extended.worker_devices_by_replica[replica_id]]
| 41.178704
| 109
| 0.700043
|
2e086935642f60093d6656dc19a0f580775e4b0c
| 10,538
|
py
|
Python
|
fsbackup/hashVolume.py
|
zeycus/fsbackup
|
2110405c211deb0b978a983a8a5a4abc700e6410
|
[
"MIT"
] | 1
|
2017-11-05T11:56:52.000Z
|
2017-11-05T11:56:52.000Z
|
fsbackup/hashVolume.py
|
zeycus/fsbackup
|
2110405c211deb0b978a983a8a5a4abc700e6410
|
[
"MIT"
] | null | null | null |
fsbackup/hashVolume.py
|
zeycus/fsbackup
|
2110405c211deb0b978a983a8a5a4abc700e6410
|
[
"MIT"
] | 1
|
2021-04-17T17:45:46.000Z
|
2021-04-17T17:45:46.000Z
|
#!/usr/bin/python3.6
"""
.. module:: hashVolume
:platform: Windows, linux
:synopsis: module for class :class:`HashVolume <hashVolume.HashVolume>`.
.. moduleauthor:: Miguel Garcia <zeycus@gmail.com>
"""
import os
import re
import shutil
import bisect
import random
from fsbackup.shaTools import sha256
from fsbackup.fileTools import sizeof_fmt, abspath2longabspath, safeFileCopy
from fsbackup.diskTools import getVolumeInfo
class HashVolume(object):
"""Class that handles a backup volume.
"""
def __init__(self, logger, locationPath, container, volId=None):
"""Constructor.
:param logger: internally stored logger, for feedback.
:param locationPath: root path for the volume. Usually something like ``'G:\'``
:type locationPath: str
:param container: database information regarding which hashes are stored in which volume.
:type container: Mongo_shelve
:param volId: volume id. Currently, the volume SerialNumber, but the hard-drive SerialNumber might be a better choice.
It is optional, if ``None``, it is obtained by the OS.
:type volId: str
"""
self.logger = logger
self.locationPath = locationPath
self.container = container
if volId is None:
if os.name == 'nt':
self.volId = getVolumeInfo(locationPath[0])['VolumeSerialNumber']
elif os.name == 'posix':
from fsbackup.diskTools import getMountPointSerialNumberLinux
self.volId = getMountPointSerialNumberLinux(self.locationPath)
else:
raise OSError("OS '%s' not supported." % os.name)
else:
self.volId = volId
def allVolumesHashes(self):
"""Returns the set of all hashes in any volume, according to the DDBB.
:rtype: set
"""
return set(self.container)
def recalculateContainer(self):
"""Rebuilds the DDBB volume information, traversing the files in the volume.
.. note::
This is something ordinarily you don't need to do, because the DDBB
is kept synchronized with the files in the volume. This method is to be used
in case for some reason the synchronization was broken.
"""
self.logger.debug("Rebuilding DDBB info for volume '%s'." % self.volId)
result = self.container.delete_many(dict(volume=self.volId))
self.logger.debug("Removed all (%s) documents." % result.deleted_count)
result = self.container.insert([dict(volume=self.volId, hash=fn, size=size) for (fn, size) in self.traverseFiles()])
self.logger.debug("Created %s new documents." % len(result))
def fnForHash(self, sha):
"""Returns the absolute path of the file for a given hash.
The first three letters in the hash are used to create a 3-levels folder system,
for instance hash ``4c07766937a4d241fafd3104426766f07c3ce9de7e577a76ad61eba512433cea``
corresponds to file
:file:`self.locationPath/4/c/0/4c07766937a4d241fafd3104426766f07c3ce9de7e577a76ad61eba512433cea`
:param sha: any valid SHA
:type sha: str
:rtype: str
"""
return os.path.join(self.locationPath, sha[0], sha[1], sha[2], sha)
def storeFilename(self, filename, size, sha=None):
"""Creates a file in the volume.
The filename in the volume is the sha, not the original filename.
:param filename: location of the original file
:type filename: str
:param size: size in bytes of the original file
:type size: int
:param sha: the hash for the file. If not provided, it is calculated now
"""
filename = abspath2longabspath(filename)
if sha is None:
sha = sha256(filename)
fn_dest = self.fnForHash(sha)
os.makedirs(os.path.dirname(fn_dest), exist_ok=True) # Si el directorio no existe, lo crea.
safeFileCopy(
src=filename,
dst=fn_dest,
)
self.container[sha] = dict(volume=self.volId, size=size)
def retrieveFilename(self, sha, filename):
"""Extracts a file from the volume, given its hash.
:param sha: the given hash
:type sha: str
:param filename: the filename of the file to be created
:type filename: str
"""
fn_source = abspath2longabspath(self.fnForHash(sha))
os.makedirs(os.path.dirname(filename), exist_ok=True) # Si el directorio no existe, lo crea.
safeFileCopy(
src=fn_source,
dst=filename,
)
def remove(self, sha):
"""Deletes the file with a given hash.
:param sha: the given hash
:type sha: str
"""
os.remove(self.fnForHash(sha))
del self.container[sha]
def getAvailableSpace(self):
"""Returns the available free space in the volume drive, in bytes.
:rtype: int
"""
return shutil.disk_usage(self.locationPath).free
def augmentWithFiles(self, fDB):
"""Include in the volume backup for the files that need it.
It is done until all files are backed-up, on until the volume is full.
:param fDB: filesystem information in DDBB.
:type fDB: FileDB
:rtype: a pair (isFinished, hashList)
* isFinished tells whether the backup is complete. It is False if there are still
files that are not backed-up in any volume.
* hashList is the list of hashes of the created files.
.. note::
The strategy to choose which file to backup next is the following, but there are no
strong reasons for this, it should be changed if another is found better.
* While there is plenty of room in the volume (threshold currently set to 20GB) and there is room
for the biggest file that requires backup, files are chosen randomly.
The reason is that usually there are folders with huge files, others with only tiny files.
If files were processed by their folder order, a volume could end up with millions
of small files, while another could contain just hundreds of heavy files. Not that it would
be a problem in principle, but I thought it might be better to balance volumes, and
a simple strategy is the random choice.
* When the previous condition fails, choose the biggest file that fits, until none does.
"""
shasStored = self.allVolumesHashes()
filesizes = [(info['size'], fn, info['hash']) for (fn, info) in fDB if info['hash'] not in shasStored]
filesizes.sort()
shasAugmented = []
avail = self.getAvailableSpace()
while filesizes:
if avail < filesizes[0][0] + 100000: # Avoiding to use the very last free byte, just in case.
return shasAugmented, False
# Choice of file to backup.
if (avail > 20 * 2**30) and (avail > filesizes[-1][0]):
pos = random.randint(0, len(filesizes) - 1)
else:
pos = bisect.bisect_right(filesizes, (avail, '', '') ) - 1 # The biggest file that fits
if not (0 <= pos < len(filesizes)): # Just checking, this should never happen
raise Exception("File chosen out of range")
sizeFound, fnFound, shaFound = filesizes[pos]
self.logger.debug("Including new file '%s (%s)'. Available: %s" % (fnFound, sizeof_fmt(sizeFound), sizeof_fmt(avail)))
self.storeFilename(
filename=fDB.compFn(fnFound),
size=sizeFound,
sha=shaFound,
)
avail = self.getAvailableSpace()
del filesizes[pos]
shasAugmented.append(shaFound)
return shasAugmented, True
def cleanOldHashes(self, totalHashesNeeded):
"""Removes files that are no longer necessary.
Returns the number of files removed.
:param totalHashesNeeded: hashes of files that need to be backed-up.
:type totalHashesNeeded: set
:rtype: int
"""
nbDeleted = 0
for sha, _ in self:
if sha not in totalHashesNeeded:
self.remove(sha)
nbDeleted += 1
return nbDeleted
def checkout(self, fDB, sourcePath, destPath):
"""Rebuilds the filesystem, or a subfolder, from the backup content.
Returns a list of the filenames (in the original filesystem) that were restored.
:param fDB: filesystem information in DDBB.
:type fDB: FileDB
:param sourcePath: path in the filesystem that you want restored
:type sourcePath: str
:param destPath: location where you want the files created
:type destPath: str
:rtype: list of str
"""
hashesVolume = set(sha for sha, size in self)
filesFound = []
nbFilesToCheck = len(fDB) # Numero total ficheros en fDB, no todos estaran en sourcePath.
for ind, (fn, info) in enumerate(fDB):
if info['hash'] in hashesVolume: # Este volumen contiene el fichero buscado
relP = os.path.relpath(fn, sourcePath)
if relP[0] != '.': # fn esta en el path del que hacemos checkout
destFn = os.path.join(destPath, relP)
self.logger.debug("File %d of %d, copying '%s' to '%s'" % (ind+1, nbFilesToCheck, fn, destFn))
self.retrieveFilename(
sha=info['hash'],
filename=destFn,
)
filesFound.append(fn)
return filesFound
def traverseFiles(self):
"""Iterator over pairs (hash, size) for the present volume, checking which actual files are stored in it."""
for root, _, files in os.walk(self.locationPath):
for fn in files:
if re.match(r"^[a-fA-F0-9]{64}$", fn): # Por filtrar los tipicos ficheros que crea el SO y no son de SHA-256
fnComp = os.path.join(root, fn)
fnStat = os.stat(abspath2longabspath(fnComp))
yield fn, fnStat.st_size
def __iter__(self):
"""Iterator over pairs (hash, size) for the present volume in the DDBB"""
for doc in self.container.find(dict(volume=self.volId)):
yield (doc['hash'], doc['size'])
| 40.068441
| 130
| 0.614443
|
d8fc18f93ffe2ce1c7c6ecb433c051a323d915ab
| 1,454
|
py
|
Python
|
pisa/stages/osc/prob3numba/test_numba.py
|
torkjellsdatter/pisa
|
7b26b0ac40c873a87786286acfd1c96abf724a99
|
[
"Apache-2.0"
] | null | null | null |
pisa/stages/osc/prob3numba/test_numba.py
|
torkjellsdatter/pisa
|
7b26b0ac40c873a87786286acfd1c96abf724a99
|
[
"Apache-2.0"
] | null | null | null |
pisa/stages/osc/prob3numba/test_numba.py
|
torkjellsdatter/pisa
|
7b26b0ac40c873a87786286acfd1c96abf724a99
|
[
"Apache-2.0"
] | null | null | null |
'''
Test functions used during developpement of the osc. code
Please ignore unless you are the author
'''
import numpy as np
import time
from numba import guvectorize, SmartArray
from pisa import TARGET
from pisa.utils.numba_tools import *
@myjit
def sum_row_kernel(mix, bla, inp, out):
C = cuda.local.array(shape=(3,3), dtype=ftype)
D = cuda.local.array(shape=(3), dtype=ctype)
E = cuda.local.array(shape=(3), dtype=ctype)
matrix_dot_matrix(mix, mix, C)
D[0] = 0.+2.j
D[1] = 1.+2.j
D[2] = 1.+2.j
matrix_dot_vector(C,D,E)
bla *= 0.1
out[0] += E[1].real * bla.real + inp[0]
@guvectorize(['void(float64[:,:], complex128, int32[:], int32[:])'], '(a,b),(),(f)->()', target=TARGET)
def sum_row(mix, bla, inp, out):
sum_row_kernel(mix, bla, inp, out)
def main():
print 'ftype=',ftype
# hist arrays
mix = np.ones((3,3), dtype=np.float64)
n = 1000000
inp = np.arange(3*n, dtype=np.int32).reshape(n, 3)
out = np.ones((n), dtype=np.int32)
inp = SmartArray(inp)
out = SmartArray(out)
start_t = time.time()
sum_row(mix, 42.+2j, inp.get(WHERE), out=out.get(WHERE))
end_t = time.time()
print 'took %.5f'%(end_t - start_t)
start_t = time.time()
sum_row(mix, 42.+2j, inp.get(WHERE), out=out.get(WHERE))
end_t = time.time()
print 'took %.5f'%(end_t - start_t)
out.mark_changed(WHERE)
print out.get('host')
if __name__ == '__main__':
main()
| 25.964286
| 103
| 0.618982
|
02f97cdd11176df789fe57713f210b2ba3dfa994
| 288
|
py
|
Python
|
vnpy/trader/event.py
|
erikgqp8645/vnpy
|
b9f87bfb0a1acdee071d31ac17a3e0ecd83dc11d
|
[
"MIT"
] | null | null | null |
vnpy/trader/event.py
|
erikgqp8645/vnpy
|
b9f87bfb0a1acdee071d31ac17a3e0ecd83dc11d
|
[
"MIT"
] | null | null | null |
vnpy/trader/event.py
|
erikgqp8645/vnpy
|
b9f87bfb0a1acdee071d31ac17a3e0ecd83dc11d
|
[
"MIT"
] | null | null | null |
"""
Event type string used in VN Trader.
VN Trader中使用的事件类型字符串
"""
from vnpy.event import EVENT_TIMER # noqa
EVENT_TICK = "eTick."
EVENT_TRADE = "eTrade."
EVENT_ORDER = "eOrder."
EVENT_POSITION = "ePosition."
EVENT_ACCOUNT = "eAccount."
EVENT_CONTRACT = "eContract."
EVENT_LOG = "eLog"
| 19.2
| 42
| 0.732639
|
dd6bdc8931e9d41de91c918bbe5ba6455ed6eaf4
| 1,327
|
py
|
Python
|
vendor/github.com/tensorflow/tensorflow/tensorflow/python/autograph/utils/testing.py
|
owennewo/kfserving
|
89f73c87525b8e06ea799f69f2979c4ad272fcb3
|
[
"Apache-2.0"
] | 52
|
2018-11-12T06:39:35.000Z
|
2022-03-08T05:31:27.000Z
|
tensorflow/python/autograph/utils/testing.py
|
shekharpalit/tensorflow
|
6aa83398ab03bfae822f36772757097bcb98b6ed
|
[
"Apache-2.0"
] | 30
|
2016-10-04T15:38:08.000Z
|
2020-07-16T12:09:33.000Z
|
tensorflow/python/autograph/utils/testing.py
|
shekharpalit/tensorflow
|
6aa83398ab03bfae822f36772757097bcb98b6ed
|
[
"Apache-2.0"
] | 36
|
2017-07-27T21:12:40.000Z
|
2022-02-03T16:45:56.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
def fake_tf():
"""Creates a fake module that looks like TensorFlow, for testing."""
mod = imp.new_module('tensorflow')
mod_contents = dict()
mod_contents.update(gen_math_ops.__dict__)
mod_contents.update(math_ops.__dict__)
mod_contents.update(ops.__dict__)
mod_contents.update(mod.__dict__)
mod.__dict__.update(mod_contents)
return mod
| 34.921053
| 80
| 0.73474
|
3bde851068b577f248c991b888a87c485b679049
| 978
|
py
|
Python
|
learning_logs/models.py
|
kevinbowen777/learning_log
|
ec81918ae92d4859591f840274dd88a37afa4998
|
[
"MIT"
] | null | null | null |
learning_logs/models.py
|
kevinbowen777/learning_log
|
ec81918ae92d4859591f840274dd88a37afa4998
|
[
"MIT"
] | null | null | null |
learning_logs/models.py
|
kevinbowen777/learning_log
|
ec81918ae92d4859591f840274dd88a37afa4998
|
[
"MIT"
] | null | null | null |
# from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
from django.db import models
class Topic(models.Model):
"""A topic the user is learning about."""
text = models.CharField(max_length=200)
date_added = models.DateTimeField(auto_now_add=True)
owner = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
def __str__(self):
"""Return a string representation of the model."""
return self.text
class Entry(models.Model):
"""Something specific learned about a topic."""
topic = models.ForeignKey(Topic, on_delete=models.CASCADE)
text = models.TextField()
date_added = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name_plural = "entries"
def __str__(self):
"""Return a string representation of the model."""
if len(self.text[:]) < 50:
return f"{self.text}"
else:
return f"{self.text[:50]}..."
| 28.764706
| 73
| 0.669734
|
b2002ae2af8b29551859ced92a76ec5965d76a0f
| 1,366
|
py
|
Python
|
news.py
|
sarveshwar-s/stockcast
|
6a71fd227b1fd2ea86d1465756be383a156467e5
|
[
"Apache-2.0"
] | null | null | null |
news.py
|
sarveshwar-s/stockcast
|
6a71fd227b1fd2ea86d1465756be383a156467e5
|
[
"Apache-2.0"
] | null | null | null |
news.py
|
sarveshwar-s/stockcast
|
6a71fd227b1fd2ea86d1465756be383a156467e5
|
[
"Apache-2.0"
] | 1
|
2021-10-16T09:31:19.000Z
|
2021-10-16T09:31:19.000Z
|
from textblob import TextBlob
# from textblob.sentiments import NaiveBayesAnalyzer
import requests as req
# api = "https://api.nytimes.com/svc/search/v2/articlesearch.json?q=GOOG&api-key=YOUR_API_KEY"
# print(datas["response"]["docs"][0]["lead_paragraph"]) #This is for newyork times
def news_analysis(compname):
positive = 0
negative = 0
neutral = 0
api = "http://newsapi.org/v2/everything?q="+ compname +"&apiKey=API_KEY"
responses = req.get(api)
datas = responses.json()
articles_len = len(datas["articles"])
for items in range(0,articles_len):
sentences = datas["articles"][items]["description"]
blobs = TextBlob(sentences)
print(blobs.sentences)
for sentence in blobs.sentences:
print(sentence.sentiment.polarity)
if(sentence.sentiment.polarity > 0 ):
positive+=1
elif(sentence.sentiment.polarity < 0):
negative+=1
else:
neutral+=1
# sentence.accracy
positive_perentage = (positive/100)*100
negative_percentage = (negative/100)*100
neutral_percentage = 100-(positive_perentage + negative_percentage)
persentlist = []
persentlist.append(positive_perentage)
persentlist.append(negative_percentage)
persentlist.append(neutral_percentage)
return persentlist
| 35.947368
| 94
| 0.666179
|
4a86e16f541d3b31dc6ab75f6dcde71e04c1cd39
| 169
|
py
|
Python
|
solutions/5_Registration_Exercise2Answer2.py
|
mikiec84/CourseInBiomedicalImageAnalysisVisualizationAndArtificialIntelligence
|
bbdb743ef814c9ddddb656fa4ddb50e24bf9f3ca
|
[
"Apache-2.0"
] | 30
|
2019-08-28T09:15:05.000Z
|
2021-04-19T17:43:35.000Z
|
solutions/5_Registration_Exercise2Answer2.py
|
mikiec84/CourseInBiomedicalImageAnalysisVisualizationAndArtificialIntelligence
|
bbdb743ef814c9ddddb656fa4ddb50e24bf9f3ca
|
[
"Apache-2.0"
] | 27
|
2019-03-01T14:39:38.000Z
|
2019-07-30T04:44:48.000Z
|
solutions/5_Registration_Exercise2Answer2.py
|
mikiec84/CourseInBiomedicalImageAnalysisVisualizationAndArtificialIntelligence
|
bbdb743ef814c9ddddb656fa4ddb50e24bf9f3ca
|
[
"Apache-2.0"
] | 14
|
2019-10-02T17:16:04.000Z
|
2021-02-24T17:28:24.000Z
|
resampler = itk.ResampleImageFilter.New(Input=movingImage,
Transform=outputCompositeTransform,
UseReferenceImage=True,
ReferenceImage=fixedImage)
| 42.25
| 58
| 0.757396
|
cb04c84f76de348c187408f7f8ed5d92e74176d9
| 5,767
|
py
|
Python
|
src/textconfig.py
|
1alexandra/collage
|
671ca1713d8e9f74faae9e824552a03c253adad0
|
[
"MIT"
] | 1
|
2020-07-21T12:10:58.000Z
|
2020-07-21T12:10:58.000Z
|
src/textconfig.py
|
1alexandra/collage
|
671ca1713d8e9f74faae9e824552a03c253adad0
|
[
"MIT"
] | 17
|
2020-03-14T20:27:09.000Z
|
2022-01-13T02:31:39.000Z
|
src/textconfig.py
|
1alexandra/collage
|
671ca1713d8e9f74faae9e824552a03c253adad0
|
[
"MIT"
] | null | null | null |
import tkinter as tk
from tkinter.colorchooser import askcolor
from datetime import datetime
from src.fonts import get_system_fonts
from src.grid import grid_frame
class TextConfigureApp(tk.Frame):
"""Simple Collage Creator second window.
Used for adding a caption to a collage. Allows user to customize a
content and a style of the caption.
The window consists of five blocks:
- text redactor,
- font chooser,
- canvas with an intermediate result,
- font parameters input fields: italic, bold, underlined checkboxes \
and font size entry,
- buttons block: ``Change color...``, ``Try font``, ``OK`` buttons.
"""
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.rgb = (0, 0, 0)
self.text_redactor = None
self.font_chooser = None
self.system_fonts = get_system_fonts()
self.font = self.system_fonts[0]
self.font_size = tk.StringVar(self.master, '12')
self.italic_var = tk.IntVar(self.master)
self.bold_var = tk.IntVar(self.master)
self.lined_var = tk.IntVar(self.master)
self.create_widgets()
def create_widgets(self):
"""Create and grid all widgets."""
grid_frame(self.master, is_root=True)
grid_frame(self, [0], [0], 0, 0, 'news')
frame = tk.Frame(self, bd=10)
grid_frame(frame, [0, 1, 2], [0, 1], 0, 0, 'news')
self.create_text_redactor(frame, 0, 0)
self.create_font_chooser(frame, 0, 1)
self.create_canvas(frame, 1, 0)
self.create_modifiers(frame, 1, 1)
self.create_buttons(frame, 2, 1)
self.draw()
def create_canvas(self, frame, row, col):
"""Create, configure and grid result canvas."""
self.canvas = tk.Canvas(frame, width=300, height=100, bg='white')
self.canvas.grid(row=row, column=col)
def create_text_redactor(self, frame, row, col):
"""Create, grid and initialize text redactor."""
# TODO: add scrollbar
text_frame = tk.Frame(frame, bd=10)
grid_frame(text_frame, [1], [0], row, col, 'news')
label = tk.Label(text_frame, text="Type text here:", bd=10)
label.grid(row=0, column=0, sticky='s')
self.text_redactor = tk.Text(text_frame, width=45, height=15, wrap=tk.WORD)
self.text_redactor.grid(row=1, column=0, sticky='news')
self.text_redactor.insert(tk.END, datetime.now().date().strftime("%B %Y"))
def create_font_chooser(self, frame, row, col):
"""Create and grid font chooser listbox, fill the options."""
# TODO: add scrollbar
font_frame = tk.Frame(frame, bd=10)
grid_frame(font_frame, [1], [0], row, col, 'news')
label = tk.Label(font_frame, text="Select font:", bd=10)
label.grid(row=0, column=0, sticky='s')
self.font_chooser = tk.Listbox(font_frame, selectmode='SINGLE')
self.font_chooser.grid(row=1, column=0, sticky='news')
for item in self.system_fonts:
self.font_chooser.insert(tk.END, item)
self.font_chooser.selection_set(0)
def create_modifiers(self, frame, row, col):
"""Create and grid font modifiers block."""
# TODO: add validation function
buttons = tk.Frame(frame, bd=10)
grid_frame(buttons, [1], [0, 1, 2], row, col, 'news')
variables = {
'italic': self.italic_var,
'bold': self.bold_var,
'underlined': self.lined_var
}
for i, (text, variable) in enumerate(variables.items()):
check = tk.Checkbutton(buttons, text=text, variable=variable, onvalue=1, offvalue=0, bd=10)
check.grid(row=0, column=i, sticky='ne')
label = tk.Label(buttons, text="Font size:", padx=5)
label.grid(row=1, column=0, sticky='ne')
entry = tk.Entry(buttons, textvariable=self.font_size, width=30)
entry.grid(row=1, column=1, sticky='new', columnspan=2)
def create_buttons(self, frame, row, col):
"""Create and grid buttons block."""
buttons = tk.Frame(frame, bd=10)
grid_frame(buttons, [], [0, 1, 2], row, col, 'news')
commands = {
'Change color...': self.choose_color,
'Try font': self.choose_font,
'OK': self.ok_quit
}
for i, (text, command) in enumerate(commands.items()):
button = tk.Button(buttons, text=text, command=command, padx=5, pady=5, width=15)
button.grid(row=0, column=i, sticky='ews')
def draw(self):
"""Show intermediate result on the canvas."""
# TODO: drawing text in choosen style on canvas
text = self.text_redactor.get('1.0', 'end-1c')
font = self.font
font_size = int(self.font_size.get())
rgb = self.rgb
is_italic = bool(self.italic_var.get())
is_bold = bool(self.bold_var.get())
is_lined = bool(self.lined_var.get())
print(text, font, font_size, rgb, is_italic, is_bold, is_lined)
pass
def choose_color(self):
"""Run askcolor dialog and show intermediate result."""
# ToDo: validation
self.rgb, _ = askcolor(parent=self, title="Choose color:")
self.draw()
def choose_font(self):
"""Update font and show intermediate result."""
self.font = self.system_fonts[self.font_chooser.curselection()[0]]
self.draw()
def ok_quit(self):
"""Update result canvas and close the window."""
self.draw()
self.master.destroy()
return 'break'
def get_return(self):
"""Return canvas with stylized capture."""
# TODO: check if canvas exists after self.master.destroy()
return self.canvas
| 39.772414
| 103
| 0.61297
|
2c93a8e25042ef18051545fb30fe5729b92d2621
| 2,684
|
py
|
Python
|
ai_modules/block_and_score_ai.py
|
PeterGaivoronski/n-size-tic-tac-toe
|
322d7d1110a16974639b5207ebf88f30e12f1fa4
|
[
"BSD-3-Clause"
] | null | null | null |
ai_modules/block_and_score_ai.py
|
PeterGaivoronski/n-size-tic-tac-toe
|
322d7d1110a16974639b5207ebf88f30e12f1fa4
|
[
"BSD-3-Clause"
] | null | null | null |
ai_modules/block_and_score_ai.py
|
PeterGaivoronski/n-size-tic-tac-toe
|
322d7d1110a16974639b5207ebf88f30e12f1fa4
|
[
"BSD-3-Clause"
] | null | null | null |
from .random_ai import computer_turn as random_fallback_computer_turn
def close_to_winning(symbol, game_state):
num_squares = len(game_state["board"])
diag_0_win = num_squares
diag_1_win = num_squares
diag_0_index = 0
diag_1_index = num_squares - 1
col_wins = [num_squares for x in game_state["board"][0]]
row_wins = [num_squares for y in game_state["board"]]
for y_index, y in enumerate(game_state["board"]):
for x_index, x in enumerate(y):
if x == symbol:
row_wins[y_index] -= 1
col_wins[x_index] -= 1
if x_index == diag_0_index:
diag_0_win -= 1
if x_index == diag_1_index:
diag_1_win -= 1
diag_0_index += 1
diag_1_index -= 1
return diag_0_win, diag_1_win, col_wins, row_wins
def place_at_column(col_num, game_state):
for y_index, y in enumerate(game_state["board"]):
if game_state["board"][y_index][col_num] == game_state["empty"]:
game_state["board"][y_index][col_num] = game_state["cpu_symbol"]
return True
return False
def place_at_row(row_num, game_state):
for x_index, x in enumerate(game_state["board"][row_num]):
if x == game_state["empty"]:
game_state["board"][row_num][x_index] = game_state["cpu_symbol"]
return True
return False
def place_at_diag(diag, game_state):
if diag == 0:
diag_index = 0
else:
diag_index = len(game_state["board"])-1
for y_index, y in enumerate(game_state["board"]):
for x_index, x in enumerate(y):
if x_index == diag_index and x == game_state["empty"]:
game_state["board"][y_index][x_index] = game_state["cpu_symbol"]
return True
if diag == 0:
diag_index += 1
else:
diag_index -= 1
return False
def place_potential_win_location(game_state, diag_0_win, diag_1_win, col_wins, row_wins):
if diag_0_win == 1:
placed = place_at_diag(0, game_state)
if placed:
return True
if diag_1_win == 1:
placed = place_at_diag(1, game_state)
if placed:
return True
for col, val in enumerate(col_wins):
if val == 1:
placed = place_at_column(col, game_state)
if placed:
return True
for row, val in enumerate(row_wins):
if val == 1:
placed = place_at_row(row, game_state)
if placed:
return True
return False
def computer_turn(game_state):
# If the cpu has 1 left to win a particular case, score it
placed = place_potential_win_location(game_state, *close_to_winning(game_state["cpu_symbol"], game_state))
if placed:
return
# If the player has 1 left to win a particular case, block it
placed = place_potential_win_location(game_state, *close_to_winning(game_state["user_symbol"], game_state))
if placed:
return
# Player not close to winning, random move
return random_fallback_computer_turn(game_state)
| 28.860215
| 108
| 0.720939
|
d4386323764a6385c0548004d49afa3bec346b27
| 4,775
|
py
|
Python
|
sheets/management/commands/sync_assignment_sheet.py
|
mitodl/mit-xpro
|
981d6c87d963837f0b9ccdd996067fe81394dba4
|
[
"BSD-3-Clause"
] | 10
|
2019-02-20T18:41:32.000Z
|
2021-07-26T10:39:58.000Z
|
sheets/management/commands/sync_assignment_sheet.py
|
mitodl/mit-xpro
|
981d6c87d963837f0b9ccdd996067fe81394dba4
|
[
"BSD-3-Clause"
] | 2,226
|
2019-02-20T20:03:57.000Z
|
2022-03-31T11:18:56.000Z
|
sheets/management/commands/sync_assignment_sheet.py
|
mitodl/mit-xpro
|
981d6c87d963837f0b9ccdd996067fe81394dba4
|
[
"BSD-3-Clause"
] | 4
|
2020-08-26T19:26:02.000Z
|
2021-03-09T17:46:47.000Z
|
"""
Compares assignment sheet rows to enrollment records in the database and message delivery data in Mailgun.
If the data in the sheet does not match, a request is sent to update/"sync" the sheet data.
"""
from django.core.management import BaseCommand, CommandError
from ecommerce.mail_api import send_bulk_enroll_emails
from ecommerce.models import BulkCouponAssignment
from sheets.api import get_authorized_pygsheets_client
from sheets.coupon_assign_api import CouponAssignmentHandler
from sheets.management.utils import get_assignment_spreadsheet_by_title
class Command(BaseCommand):
"""
Compares assignment sheet rows to enrollment records in the database and message delivery data in Mailgun.
If the data in the sheet does not match, a request is sent to update/"sync" the sheet data.
"""
help = __doc__
def add_arguments(self, parser): # pylint:disable=missing-docstring
group = parser.add_mutually_exclusive_group()
group.add_argument("--id", type=int, help="The BulkCouponAssignment ID")
group.add_argument(
"--sheet-id", type=str, help="The coupon assignment Sheet ID"
)
group.add_argument(
"-t",
"--title",
type=str,
help="The title of the coupon assignment Sheet (should match exactly one sheet)",
)
parser.add_argument(
"--skip-confirm",
action="store_true",
help="Skip the confirmation step for sending enrollment code emails that should have been sent",
)
super().add_arguments(parser)
def handle(
self, *args, **options
): # pylint:disable=missing-docstring,too-many-locals
if not any([options["id"], options["sheet_id"], options["title"]]):
raise CommandError("Need to provide --id, --sheet-id, or --title")
if options["id"]:
qset_kwargs = dict(id=options["id"])
elif options["sheet_id"]:
qset_kwargs = dict(assignment_sheet_id=options["sheet_id"])
else:
pygsheets_client = get_authorized_pygsheets_client()
spreadsheet = get_assignment_spreadsheet_by_title(
pygsheets_client, options["title"]
)
qset_kwargs = dict(assignment_sheet_id=spreadsheet.id)
bulk_assignment = BulkCouponAssignment.objects.get(**qset_kwargs)
coupon_assignment_handler = CouponAssignmentHandler(
spreadsheet_id=bulk_assignment.assignment_sheet_id,
bulk_assignment=bulk_assignment,
)
row_updates, unsent_assignments = (
coupon_assignment_handler.get_out_of_sync_sheet_data()
)
if len(row_updates) == 0 and len(unsent_assignments) == 0:
self.stdout.write(
self.style.WARNING(
"Spreadsheet data appears to be synced. No updates needed. Exiting..."
)
)
return
row_update_results, message_delivery_results = "", ""
if row_updates:
coupon_assignment_handler.update_sheet_with_new_statuses(
row_updates=row_updates
)
row_update_summary = "\n".join(
[
"Row: {}, Status: {}".format(
row_update.row_index, row_update.status
)
for row_update in row_updates
]
)
row_update_results = "Request sent to the Google API to update {} row(s):\n{}".format(
len(row_updates), row_update_summary
)
if unsent_assignments and not options["skip_confirm"]:
user_input = input(
"{} users(s) will be sent an enrollment code email:\n"
"{}\n"
"Enter 'y' to confirm and send the emails, or any other key to skip this step: ".format(
len(unsent_assignments),
"\n".join(
[
f" {assignment.email} (code: {assignment.product_coupon.coupon.coupon_code})"
for assignment in unsent_assignments
]
),
)
)
if user_input.lower() != "y":
unsent_assignments = []
if unsent_assignments:
send_bulk_enroll_emails(bulk_assignment.id, unsent_assignments)
message_delivery_results = (
f"{len(unsent_assignments)} enrollment code email(s) sent."
)
summary = "\n\n".join(
list(filter(None, [row_update_results, message_delivery_results]))
)
self.stdout.write(self.style.SUCCESS(summary))
| 40.12605
| 110
| 0.597068
|
7e737a3798ef247dd21dbb2d8ded5bc14963148b
| 531
|
py
|
Python
|
ex072 - Número por extenso.py
|
CelsoAntunesNogueira/Phyton3
|
3a7ec8d7a6f996b2d3c36a9c6d239707e76aace4
|
[
"MIT"
] | null | null | null |
ex072 - Número por extenso.py
|
CelsoAntunesNogueira/Phyton3
|
3a7ec8d7a6f996b2d3c36a9c6d239707e76aace4
|
[
"MIT"
] | null | null | null |
ex072 - Número por extenso.py
|
CelsoAntunesNogueira/Phyton3
|
3a7ec8d7a6f996b2d3c36a9c6d239707e76aace4
|
[
"MIT"
] | null | null | null |
nu = ('zero', 'um', 'dois', 'três', 'quatro', 'cinco', 'seis', 'sete', 'oito', 'nove', 'dez', 'onze',
'doze', 'treze', 'catorze', 'quinze', 'dezesseis', 'dezesete', 'dezoito', 'dezenove', 'vinte')
while True:
a = int(input("Digite um número de 0 a 20:"))
if a > -1 and a < 21:
print(f"O número escolhido foi {(nu[a])}")
else:
print("Número invalido.", end=' ')
c = ' '
while c not in "SN":
c = input('Deseja continuar ? [S/N]').upper().split()[0]
if c == 'N':
break
| 31.235294
| 101
| 0.504708
|
22fcb14c04cc74c1cf15646d73ff8cfa79ef030e
| 482
|
py
|
Python
|
location/urls.py
|
radamizell/WallApp
|
20cb8dfe0709c79f20c93201f25b81ac5a517fc6
|
[
"MIT"
] | null | null | null |
location/urls.py
|
radamizell/WallApp
|
20cb8dfe0709c79f20c93201f25b81ac5a517fc6
|
[
"MIT"
] | 1
|
2017-05-20T03:09:37.000Z
|
2017-06-01T05:09:56.000Z
|
location/urls.py
|
radamizell/WallApp
|
20cb8dfe0709c79f20c93201f25b81ac5a517fc6
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url, include
from django.views.generic import ListView, DetailView
from . import views
urlpatterns = [
url(r'^$', views.fetch_places, name= 'list'),
url(r'^foo/', views.fetch_places_loc, name='foo'),
url(r'^create/$', views.post_create),
url(r'^(?P<id>\d+)/$', views.post_detail, name='detail'),
url(r'^(?P<id>\d+)/edit/$', views.post_update, name='update'),
url(r'^(?P<id>\d+)/delete/$', views.post_delete, name= 'delete'),
]
| 37.076923
| 69
| 0.639004
|
fdb4b2d4561e030a2e7327db6b08c8c70d1455d0
| 1,300
|
py
|
Python
|
var/spack/repos/builtin/packages/blast-legacy/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/blast-legacy/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2021-11-09T20:28:40.000Z
|
2022-03-15T03:26:33.000Z
|
var/spack/repos/builtin/packages/blast-legacy/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from os import symlink
from spack.package import *
class BlastLegacy(Package):
"""Legacy NCBI BLAST distribution -- no longer supported.
Contains older programs including `blastall'"""
homepage = "https://www.ncbi.nlm.nih.gov/"
url = "ftp://ftp.ncbi.nlm.nih.gov/blast/executables/legacy.NOTSUPPORTED/2.2.26/ncbi.tar.gz"
version('2.2.26', sha256='d8fffac25efc8ca894c707c840a4797a8a949ae6fd983d2f91c9972f788efb7d', deprecated=True)
depends_on('tcsh', type='build')
def install(self, spec, prefix):
filter_file('/bin/csh -f', '/usr/bin/env tcsh', 'make/ln-if-absent',
string=True)
symlink(self.stage.source_path, '../ncbi')
tcsh = which('tcsh')
with working_dir('..'):
tcsh('./ncbi/make/makedis.csh')
# depends on local data in the source tree
install_path = join_path(prefix, 'usr/local/blast-legacy')
mkdirp(install_path)
install_tree('.', install_path)
# symlink build output with binaries
symlink(join_path(install_path, 'build'), prefix.bin)
| 34.210526
| 113
| 0.668462
|
d7ab7ae0d6bff08b4e0ffbccc08e09b3afcf80b1
| 4,451
|
py
|
Python
|
model.py
|
tracyhayford/behavioral-cloning
|
c32237768beec281e3459f5972067305bff9b64b
|
[
"MIT"
] | null | null | null |
model.py
|
tracyhayford/behavioral-cloning
|
c32237768beec281e3459f5972067305bff9b64b
|
[
"MIT"
] | null | null | null |
model.py
|
tracyhayford/behavioral-cloning
|
c32237768beec281e3459f5972067305bff9b64b
|
[
"MIT"
] | null | null | null |
import csv
import cv2
import numpy as np
lines = []
data_path = './data/'
# These are some other data paths that were used during testing
#data_path = '../Origdata/'
#data_path = '../THdata/'
#data_path = '../THdata1/'
# Read the driving log CSV file
first_line = True
with open(data_path + 'driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
if (first_line):
# first line of the sample data has a header, just skip it
first_line = False
else:
lines.append(line)
# Print some debug information to make sure we're on the right "track"
print(data_path + 'driving_log.csv')
# image path from the last line
print(line[0])
# steering value from the last line
print(line[3])
# define the arrays for images and (steering) measurements
images = []
measurements = []
# parse the CSV file data:
# 1) Read and save the images (3 perspectives - center, left, right)
# 2) Read the center steering measurement and produce a left and right steering measurement using a correction factor (tuning parameter)
#
first = True
for line in lines:
# collect all three cameras (center, left, right)
for i in range(3):
source_path = line[i]
tokens = source_path.split('/')
#print('Tokens: ', tokens)
filename = tokens[-1]
local_path = data_path + 'IMG/' + filename
#print(local_path)
image = cv2.imread(local_path)
images.append(image)
# save the first images for report
if (first):
if (i == 0):
cv2.imwrite('./First_Center.jpg', image)
elif (i == 1):
cv2.imwrite('./First_Left.jpg', image)
else:
cv2.imwrite('./First_Right.jpg', image)
first = False
left_correction = 0.15
right_correction = 0.1
measurement = float(line[3])
# save center (steering) measurement
measurements.append(measurement)
# emulate left (steering) measurement
measurements.append(measurement+left_correction)
# emulate right (steering) measurement
measurements.append(measurement-right_correction)
# Test - 2190 of each
#print(len(images))
#print(len(measurements))
# augment the data set by flipping the image and measurement along the vertical axis
augmented_images = []
augmented_measurements = []
first = True
for image, measurement in zip(images, measurements):
# add the original image
augmented_images.append(image)
augmented_measurements.append(measurement)
# add the flipped image
flipped_image = cv2.flip(image,1)
flipped_measurement = float(measurement) * -1.0
# save the first flipped image for report
if (first):
cv2.imwrite('./First_Flipped.jpg', flipped_image)
first = False
augmented_images.append(flipped_image)
augmented_measurements.append(flipped_measurement)
# create the numpy arrays (required by Keras) from the augmented dataset
X_train = np.array(augmented_images)
y_train = np.array(augmented_measurements)
# A little data check point
print(X_train.shape)
# My model starts here
import keras
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda
from keras.layers.convolutional import Convolution2D, Cropping2D
from keras.layers.pooling import MaxPooling2D
# This is an implementation of the NVidia model
model = Sequential()
# Normalization
model.add(Lambda(lambda x: x/255.0 - 0.5, input_shape=(160, 320, 3)))
# crop the top 70 pixels (sky) and bottom 25 pixels (car hood) - less data to process
model.add(Cropping2D(cropping=((70,25),(0,0))))
# 5 convolutional layers
model.add(Convolution2D(24,5,5,subsample=(2,2),activation='relu'))
model.add(Convolution2D(36,5,5,subsample=(2,2),activation='relu'))
model.add(Convolution2D(48,5,5,subsample=(2,2),activation='relu'))
model.add(Convolution2D(64,3,3,activation='relu'))
model.add(Convolution2D(64,3,3,activation='relu'))
# five fully connected layers
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
# optimize with an Adam optimizer minimizing mean square error (mse)
model.compile(optimizer='adam', loss='mse')
# train the model using 20% of dataset for validation over 7 epochs shuffling the data each time
model.fit(X_train, y_train, validation_split=0.2, epochs=7, shuffle=True)
# save this model for use in the simulator
model.save('model.h5')
| 33.216418
| 138
| 0.700067
|
42ede81403649a36e143173f0c614e7ec03cf0a9
| 2,355
|
py
|
Python
|
nms.py
|
ysy9893/object_recognition_with_hand_gesture
|
fc43211a1e6fe8a19f726f156cf276a8acdcb246
|
[
"Apache-2.0"
] | null | null | null |
nms.py
|
ysy9893/object_recognition_with_hand_gesture
|
fc43211a1e6fe8a19f726f156cf276a8acdcb246
|
[
"Apache-2.0"
] | null | null | null |
nms.py
|
ysy9893/object_recognition_with_hand_gesture
|
fc43211a1e6fe8a19f726f156cf276a8acdcb246
|
[
"Apache-2.0"
] | 1
|
2021-06-26T05:46:03.000Z
|
2021-06-26T05:46:03.000Z
|
# import the necessary packages
import numpy as np
def NMS(boxes,classes, probs, overlapThresh,imH,imW):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# For the bunch of division, the type of elements in bbox should be converterd to float
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:, 1]*imW
y1 = boxes[:, 0]*imH
x2 = boxes[:, 3]*imW
y2 = boxes[:, 2]*imH
# compute the area of the bounding boxes and grab the indexes to sort
# (in the case that no probabilities are provided, simply sort on the
# bottom-left y-coordinate)
area = (x2 - x1+1) * (y2 - y1+1)#By adding 1, we can prevent multiplication on floating point numbers.
idxs = y2.copy()
# if probabilities are provided, sort on them instead
if probs is not None:
idxs = probs
#이미지 내 바운딩박스마다 가지고 있는 confidence score을 이용해서
#confidnece score를 기준으로 오름차순으로 정렬한다.
idxs = np.argsort(idxs)#index로 저장되어 있다.
# keep looping while some indexes still remain in the indexes list
while len(idxs) > 0:
# grab the last index in the indexes list and add the index value
# to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]#i는 특정 (x1,y1,x2,y2)박스의 confidence값을 가리키는 index이다.
pick.append(i)# confidence score 중 가장 큰 값을 리스트에 추가한다.
# find the largest (x, y) coordinates for the start of the bounding
# box and the smallest (x, y) coordinates for the end of the bounding
# box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
#idxs[:last]는 마지막 값을 제외한 나머지 값들 즉, <last
#np.maximum은 두 배열을 비교하기 위해 둘중 작은 배열을 element-wise한 다음에
#각각 배열을 순서대로 원소를 비교한 다음 큰 값을 새 배열에 넣는다.
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have overlap greater
# than the provided overlap threshold
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
# return only the bounding boxes that were picked
return boxes[pick],probs[pick],classes[pick]
| 33.169014
| 104
| 0.6862
|
20d089d4b1594960751cfefc33e004ac394611ff
| 9,657
|
py
|
Python
|
engineio/asyncio_socket.py
|
StoneMoe/python-engineio
|
43f8fb5cd3fefe96768f8a8d91006787fa9c1c19
|
[
"MIT"
] | null | null | null |
engineio/asyncio_socket.py
|
StoneMoe/python-engineio
|
43f8fb5cd3fefe96768f8a8d91006787fa9c1c19
|
[
"MIT"
] | null | null | null |
engineio/asyncio_socket.py
|
StoneMoe/python-engineio
|
43f8fb5cd3fefe96768f8a8d91006787fa9c1c19
|
[
"MIT"
] | null | null | null |
import asyncio
import six
import sys
import time
from . import exceptions
from . import packet
from . import payload
from . import socket
class AsyncSocket(socket.Socket):
async def poll(self):
"""Wait for packets to send to the client."""
try:
packets = [await asyncio.wait_for(self.queue.get(),
self.server.ping_timeout)]
self.queue.task_done()
except (asyncio.TimeoutError, asyncio.CancelledError):
raise exceptions.QueueEmpty()
if packets == [None]:
return []
try:
packets.append(self.queue.get_nowait())
self.queue.task_done()
except asyncio.QueueEmpty:
pass
return packets
async def receive(self, pkt):
"""Receive packet from the client."""
self.server.logger.info('%s: Received packet %s data %s',
self.sid, packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes)
else '<binary>')
if pkt.packet_type == packet.PING:
self.last_ping = time.time()
await self.send(packet.Packet(packet.PONG, pkt.data))
elif pkt.packet_type == packet.MESSAGE:
await self.server._trigger_event(
'message', self.sid, pkt.data,
run_async=self.server.async_handlers)
elif pkt.packet_type == packet.UPGRADE:
await self.send(packet.Packet(packet.NOOP))
elif pkt.packet_type == packet.CLOSE:
await self.close(wait=False, abort=True)
else:
raise exceptions.UnknownPacketError()
async def check_ping_timeout(self):
"""Make sure the client is still sending pings.
This helps detect disconnections for long-polling clients.
"""
if self.closed:
raise exceptions.SocketIsClosedError()
if time.time() - self.last_ping > self.server.ping_interval + 5:
self.server.logger.info('%s: Client is gone, closing socket',
self.sid)
# Passing abort=False here will cause close() to write a
# CLOSE packet. This has the effect of updating half-open sockets
# to their correct state of disconnected
await self.close(wait=False, abort=False)
return False
return True
async def send(self, pkt):
"""Send a packet to the client."""
if not await self.check_ping_timeout():
return
if self.upgrading:
self.packet_backlog.append(pkt)
else:
await self.queue.put(pkt)
self.server.logger.info('%s: Sending packet %s data %s',
self.sid, packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes)
else '<binary>')
async def handle_get_request(self, environ):
"""Handle a long-polling GET request from the client."""
connections = [
s.strip()
for s in environ.get('HTTP_CONNECTION', '').lower().split(',')]
transport = environ.get('HTTP_UPGRADE', '').lower()
if 'upgrade' in connections and transport in self.upgrade_protocols:
self.server.logger.info('%s: Received request to upgrade to %s',
self.sid, transport)
return await getattr(self, '_upgrade_' + transport)(environ)
try:
packets = await self.poll()
except exceptions.QueueEmpty:
exc = sys.exc_info()
await self.close(wait=False)
six.reraise(*exc)
return packets
async def handle_post_request(self, environ):
"""Handle a long-polling POST request from the client."""
length = int(environ.get('CONTENT_LENGTH', '0'))
if length > self.server.max_http_buffer_size:
raise exceptions.ContentTooLongError()
else:
body = await environ['wsgi.input'].read(length)
p = payload.Payload(encoded_payload=body)
for pkt in p.packets:
await self.receive(pkt)
async def close(self, wait=True, abort=False):
"""Close the socket connection."""
if not self.closed and not self.closing:
self.closing = True
await self.server._trigger_event('disconnect', self.sid)
if not abort:
await self.send(packet.Packet(packet.CLOSE))
self.closed = True
if wait:
await self.queue.join()
async def _upgrade_websocket(self, environ):
"""Upgrade the connection from polling to websocket."""
if self.upgraded:
raise IOError('Socket has been upgraded already')
if self.server._async['websocket'] is None:
# the selected async mode does not support websocket
return self.server._bad_request()
ws = self.server._async['websocket'](self._websocket_handler)
return await ws(environ)
async def _websocket_handler(self, ws):
"""Engine.IO handler for websocket transport."""
if self.connected:
# the socket was already connected, so this is an upgrade
self.upgrading = True # hold packet sends during the upgrade
await self.queue.join() # flush the queue first
try:
pkt = await ws.wait()
except IOError: # pragma: no cover
return
decoded_pkt = packet.Packet(encoded_packet=pkt)
if decoded_pkt.packet_type != packet.PING or \
decoded_pkt.data != 'probe':
self.server.logger.info(
'%s: Failed websocket upgrade, no PING packet', self.sid)
return
await ws.send(packet.Packet(
packet.PONG,
data=six.text_type('probe')).encode(always_bytes=False))
await self.queue.put(packet.Packet(packet.NOOP)) # end poll
try:
pkt = await ws.wait()
except IOError: # pragma: no cover
return
decoded_pkt = packet.Packet(encoded_packet=pkt)
if decoded_pkt.packet_type != packet.UPGRADE:
self.upgraded = False
self.server.logger.info(
('%s: Failed websocket upgrade, expected UPGRADE packet, '
'received %s instead.'),
self.sid, pkt)
return
self.upgraded = True
# flush any packets that were sent during the upgrade
for pkt in self.packet_backlog:
await self.queue.put(pkt)
self.packet_backlog = []
self.upgrading = False
else:
self.connected = True
self.upgraded = True
# start separate writer thread
async def writer():
while True:
packets = None
try:
packets = await self.poll()
except exceptions.QueueEmpty:
break
if not packets:
# empty packet list returned -> connection closed
break
try:
for pkt in packets:
await ws.send(pkt.encode(always_bytes=False))
except:
break
writer_task = asyncio.ensure_future(writer())
self.server.logger.info(
'%s: Upgrade to websocket successful', self.sid)
while True:
p = None
wait_task = asyncio.ensure_future(ws.wait())
try:
p = await asyncio.wait_for(wait_task, self.server.ping_timeout)
except asyncio.CancelledError: # pragma: no cover
# there is a bug (https://bugs.python.org/issue30508) in
# asyncio that causes a "Task exception never retrieved" error
# to appear when wait_task raises an exception before it gets
# cancelled. Calling wait_task.exception() prevents the error
# from being issued in Python 3.6, but causes other errors in
# other versions, so we run it with all errors suppressed and
# hope for the best.
try:
wait_task.exception()
except:
pass
break
except:
break
if p is None:
# connection closed by client
break
if isinstance(p, six.text_type): # pragma: no cover
p = p.encode('utf-8')
pkt = packet.Packet(encoded_packet=p)
try:
await self.receive(pkt)
except exceptions.UnknownPacketError: # pragma: no cover
pass
except exceptions.SocketIsClosedError: # pragma: no cover
self.server.logger.info('Receive error -- socket is closed')
break
except: # pragma: no cover
# if we get an unexpected exception we log the error and exit
# the connection properly
self.server.logger.exception('Unknown receive error')
await self.queue.put(None) # unlock the writer task so it can exit
await asyncio.wait_for(writer_task, timeout=None)
await self.close(wait=False, abort=True)
| 40.746835
| 79
| 0.550896
|
0828b310e6ced0a8d0c5133e03eec8e200247bf3
| 2,489
|
py
|
Python
|
GPF/example/main_single.py
|
seeker1943/GPF
|
478e3c121f8ca774b9c6fefcfe1180ab4b7aa918
|
[
"MIT"
] | 104
|
2018-11-04T04:47:13.000Z
|
2022-02-26T11:52:47.000Z
|
GPF/example/main_single.py
|
seeker1943/GPF
|
478e3c121f8ca774b9c6fefcfe1180ab4b7aa918
|
[
"MIT"
] | 4
|
2019-03-03T01:35:57.000Z
|
2021-05-08T13:59:44.000Z
|
GPF/example/main_single.py
|
seeker1943/GPF
|
478e3c121f8ca774b9c6fefcfe1180ab4b7aa918
|
[
"MIT"
] | 25
|
2019-02-01T07:19:18.000Z
|
2022-01-25T06:11:29.000Z
|
#!/usr/bin/env python
# encoding: utf-8
import sys
#指定路径
sys.path.append("../src/")
import graph
import embeddings
import sample
import classifier
from classifier import loop_dataset
import subgraphs
import argparse
import torch
import torch.optim as optim
import networkx as nx
import until
import random
from tqdm import tqdm
#导入配置参数
from parameters import parser, cmd_embed, cmd_opt
#参数转换
args = parser.parse_args()
args.cuda = not args.noCuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
if args.hop != 'auto':
args.hop = int(args.hop)
if args.maxNodesPerHop is not None:
args.maxNodesPerHop = int(args.maxNodesPerHop)
#读取数据
g = graph.Graph()
g.read_edgelist(filename=args.dataName, weighted=args.weighted, directed=args.directed)
g.read_node_status(filename=args.labelName)
#获取全图节点的Embedding
embed_args = cmd_embed.parse_args()
embeddings = embeddings.learn_embeddings(g, embed_args)
node_information = embeddings
#print node_information
#正负节点采样
train, train_status, test, test_status = sample.sample_single(g, args.testRatio, max_train_num=args.maxTrainNum)
#抽取节点对的封闭子图
net = until.nxG_to_mat(g)
#print net
train_graphs, test_graphs, max_n_label = subgraphs.singleSubgraphs(net, train, train_status, test, test_status, args.hop, args.maxNodesPerHop, node_information)
print('# train: %d, # test: %d' % (len(train_graphs), len(test_graphs)))
#加载网络模型,并在classifier中配置相关参数
cmd_args = cmd_opt.parse_args()
cmd_args.feat_dim = max_n_label + 1
cmd_args.attr_dim = node_information.shape[1]
cmd_args.latent_dim = [int(x) for x in cmd_args.latent_dim.split('-')]
if len(cmd_args.latent_dim) == 1:
cmd_args.latent_dim = cmd_args.latent_dim[0]
model = classifier.Classifier(cmd_args)
optimizer = optim.Adam(model.parameters(), lr=args.learningRate)
#训练和测试
train_idxes = list(range(len(train_graphs)))
best_loss = None
for epoch in range(args.num_epochs):
random.shuffle(train_idxes)
model.train()
avg_loss = loop_dataset(train_graphs, model, train_idxes, cmd_args.batch_size, optimizer=optimizer)
print('\033[92maverage training of epoch %d: loss %.5f acc %.5f auc %.5f\033[0m' % (epoch, avg_loss[0], avg_loss[1], avg_loss[2]))
model.eval()
test_loss = loop_dataset(test_graphs, model, list(range(len(test_graphs))), cmd_args.batch_size)
print('\033[93maverage test of epoch %d: loss %.5f acc %.5f auc %.5f\033[0m' % (epoch, test_loss[0], test_loss[1], test_loss[2]))
| 31.1125
| 160
| 0.76135
|
1f6b762f770a3d90e5588d69fbbd01d58aad9b42
| 649
|
py
|
Python
|
tests/parser/features/test_assert.py
|
lrettig/viper
|
4abdd2b59b58fe87ca0aee05c792a6e0363b5358
|
[
"MIT"
] | null | null | null |
tests/parser/features/test_assert.py
|
lrettig/viper
|
4abdd2b59b58fe87ca0aee05c792a6e0363b5358
|
[
"MIT"
] | null | null | null |
tests/parser/features/test_assert.py
|
lrettig/viper
|
4abdd2b59b58fe87ca0aee05c792a6e0363b5358
|
[
"MIT"
] | 1
|
2018-09-02T22:47:00.000Z
|
2018-09-02T22:47:00.000Z
|
import pytest
from tests.setup_transaction_tests import chain as s, tester as t, ethereum_utils as u, check_gas, \
get_contract_with_gas_estimation, get_contract
def test_assert_refund(t):
code = """
@public
def foo():
assert 1 == 2
"""
c = get_contract_with_gas_estimation(code)
pre_balance = t.s.head_state.get_balance(t.a0)
with pytest.raises(t.TransactionFailed):
c.foo(startgas=10**6, gasprice=10)
post_balance = t.s.head_state.get_balance(t.a0)
# Checks for gas refund from revert
# 10**5 is added to account for gas used before the transactions fails
assert pre_balance < post_balance + 10**5
| 34.157895
| 100
| 0.72265
|
927f80a5890c73ce3ace9fe979f70aecee02a872
| 156
|
py
|
Python
|
predicode/hierarchical/__init__.py
|
sflippl/predicode
|
f3d0b43a2c05cd6dbdf8656f6759127483f79a58
|
[
"MIT"
] | 2
|
2019-09-24T14:43:17.000Z
|
2021-02-07T08:34:54.000Z
|
predicode/hierarchical/.ipynb_checkpoints/__init__-checkpoint.py
|
sflippl/predicode
|
f3d0b43a2c05cd6dbdf8656f6759127483f79a58
|
[
"MIT"
] | 8
|
2019-09-09T16:01:10.000Z
|
2022-02-10T00:20:45.000Z
|
predicode/hierarchical/.ipynb_checkpoints/__init__-checkpoint.py
|
sflippl/predicode
|
f3d0b43a2c05cd6dbdf8656f6759127483f79a58
|
[
"MIT"
] | 1
|
2019-09-24T15:00:39.000Z
|
2019-09-24T15:00:39.000Z
|
"""Implements hierarchical predictive coding models."""
from predicode.hierarchical.initializer import *
from predicode.hierarchical.hierarchical import *
| 31.2
| 55
| 0.826923
|
c3c663bbac5709660c6537d5be6eace473304864
| 1,485
|
py
|
Python
|
ash/resources/PRESUBMIT.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
ash/resources/PRESUBMIT.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 86
|
2015-10-21T13:02:42.000Z
|
2022-03-14T07:50:50.000Z
|
ash/resources/PRESUBMIT.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium Ash resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
https://chromium.googlesource.com/chromium/src/+/HEAD/styleguide/web/web.md
for the rules we're checking against here.
"""
USE_PYTHON3 = True
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
resources = input_api.os_path.join(input_api.PresubmitLocalPath(),
'../../ui/resources')
# List of paths with their associated scale factor. This is used to verify
# that the images modified in one are the correct scale of the other.
path_scales = [
[(100, 'default_100_percent/'), (200, 'default_200_percent/')],
]
import sys
old_path = sys.path
try:
sys.path = [resources] + old_path
from resource_check import resource_scale_factors
for paths in path_scales:
results.extend(resource_scale_factors.ResourceScaleFactors(
input_api, output_api, paths).RunChecks())
finally:
sys.path = old_path
return results
| 29.117647
| 76
| 0.744781
|
fa64416460b750277761d843f0df40ac83b95450
| 2,747
|
py
|
Python
|
cli/rmaker_tools/rmaker_prov/transport/transport_ble.py
|
dajewiss/esp-rainmaker
|
bca07db139d5da5fe3301659d35df7ad20b78455
|
[
"Apache-2.0"
] | 78
|
2020-12-16T01:04:28.000Z
|
2022-03-29T02:07:38.000Z
|
cli/rmaker_tools/rmaker_prov/transport/transport_ble.py
|
dajewiss/esp-rainmaker
|
bca07db139d5da5fe3301659d35df7ad20b78455
|
[
"Apache-2.0"
] | 88
|
2020-12-19T02:39:12.000Z
|
2022-03-15T18:00:00.000Z
|
cli/rmaker_tools/rmaker_prov/transport/transport_ble.py
|
dajewiss/esp-rainmaker
|
bca07db139d5da5fe3301659d35df7ad20b78455
|
[
"Apache-2.0"
] | 47
|
2020-12-18T16:55:10.000Z
|
2022-02-11T02:04:16.000Z
|
# Copyright 2020 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from .transport import Transport
from . import ble_cli
class Transport_BLE(Transport):
def __init__(self, devname, service_uuid, nu_lookup):
# Expect service UUID like '0000ffff-0000-1000-8000-00805f9b34fb'
for name in nu_lookup.keys():
# Calculate characteristic UUID for each endpoint
nu_lookup[name] = service_uuid[:4] + '{:02x}'.format(
int(nu_lookup[name], 16) & int(service_uuid[4:8], 16)) + service_uuid[8:]
# Get BLE client module
self.cli = ble_cli.get_client()
# Use client to connect to BLE device and bind to service
if not self.cli.connect(devname=devname, iface='hci0',
chrc_names=nu_lookup.keys(),
fallback_srv_uuid=service_uuid):
raise RuntimeError("Failed to initialize transport")
# Irrespective of provided parameters, let the client
# generate a lookup table by reading advertisement data
# and characteristic user descriptors
self.name_uuid_lookup = self.cli.get_nu_lookup()
# If that doesn't work, use the lookup table provided as parameter
if self.name_uuid_lookup is None:
self.name_uuid_lookup = nu_lookup
# Check if expected characteristics are provided by the service
for name in self.name_uuid_lookup.keys():
if not self.cli.has_characteristic(self.name_uuid_lookup[name]):
raise RuntimeError("'" + name + "' endpoint not found")
def __del__(self):
# Make sure device is disconnected before application gets closed
try:
self.disconnect()
except Exception:
pass
def disconnect(self):
self.cli.disconnect()
def send_data(self, ep_name, data):
# Write (and read) data to characteristic corresponding to the endpoint
if ep_name not in self.name_uuid_lookup.keys():
raise RuntimeError("Invalid endpoint : " + ep_name)
return self.cli.send_data(self.name_uuid_lookup[ep_name], data)
| 41
| 89
| 0.66873
|
ac0115f34aabbd2708857c1d9d25f92c7a13a846
| 400
|
py
|
Python
|
temperaturePlot.py
|
PostojePoczekam/TemperaturePlot
|
550209efcb793ca686d2364f5dc3812903b366c7
|
[
"MIT"
] | null | null | null |
temperaturePlot.py
|
PostojePoczekam/TemperaturePlot
|
550209efcb793ca686d2364f5dc3812903b366c7
|
[
"MIT"
] | null | null | null |
temperaturePlot.py
|
PostojePoczekam/TemperaturePlot
|
550209efcb793ca686d2364f5dc3812903b366c7
|
[
"MIT"
] | null | null | null |
import sys
import pandas as pd
import matplotlib.pyplot as plt
filename = sys.argv[1]
df = pd.read_csv(filename, sep=',', engine='python')
plt.figure(figsize=(16, 9))
plt.title(filename.replace('\\', '/').split('/').pop())
plt.ylabel('Temperature [°C]')
plt.xlabel('Time [s]')
plt.plot(df.iloc[:, 3], label='GPU')
plt.plot(df.iloc[:, 13], label='CPU')
plt.grid()
plt.legend()
plt.savefig('out.png')
| 25
| 55
| 0.6625
|
b2895dab4a3ae6e7d5fd147787ecc5c6e1c6a9c3
| 1,078
|
py
|
Python
|
pipenv/patched/notpip/_internal/models/index.py
|
jrottenberg/pipenv
|
cda15b3b30e04e038ee286bced6c47a311f1e0ec
|
[
"MIT"
] | 18,636
|
2017-12-06T14:53:18.000Z
|
2022-03-31T13:12:34.000Z
|
pipenv/patched/notpip/_internal/models/index.py
|
jrottenberg/pipenv
|
cda15b3b30e04e038ee286bced6c47a311f1e0ec
|
[
"MIT"
] | 3,640
|
2017-12-06T16:58:35.000Z
|
2022-03-31T22:20:57.000Z
|
pipenv/patched/notpip/_internal/models/index.py
|
jrottenberg/pipenv
|
cda15b3b30e04e038ee286bced6c47a311f1e0ec
|
[
"MIT"
] | 1,987
|
2017-12-06T15:04:51.000Z
|
2022-03-26T10:05:15.000Z
|
from pipenv.patched.notpip._vendor.six.moves.urllib import parse as urllib_parse
class PackageIndex(object):
"""Represents a Package Index and provides easier access to endpoints
"""
def __init__(self, url, file_storage_domain):
# type: (str, str) -> None
super(PackageIndex, self).__init__()
self.url = url
self.netloc = urllib_parse.urlsplit(url).netloc
self.simple_url = self._url_for_path('simple')
self.pypi_url = self._url_for_path('pypi')
# This is part of a temporary hack used to block installs of PyPI
# packages which depend on external urls only necessary until PyPI can
# block such packages themselves
self.file_storage_domain = file_storage_domain
def _url_for_path(self, path):
# type: (str) -> str
return urllib_parse.urljoin(self.url, path)
PyPI = PackageIndex(
'https://pypi.org/', file_storage_domain='files.pythonhosted.org'
)
TestPyPI = PackageIndex(
'https://test.pypi.org/', file_storage_domain='test-files.pythonhosted.org'
)
| 33.6875
| 80
| 0.688312
|
f362b97808ae43174b62ffed9748347e8016b28d
| 1,567
|
py
|
Python
|
cloudify_custom_workflow/batch_utils.py
|
jrzeszutek/cloudify-utilities-plugin
|
d62fc98e9164fd836d8f22a757b5a58ca119f97a
|
[
"Apache-2.0"
] | 8
|
2017-06-14T14:59:17.000Z
|
2021-09-15T05:44:05.000Z
|
cloudify_custom_workflow/batch_utils.py
|
jrzeszutek/cloudify-utilities-plugin
|
d62fc98e9164fd836d8f22a757b5a58ca119f97a
|
[
"Apache-2.0"
] | 75
|
2017-04-20T20:42:26.000Z
|
2022-02-16T11:03:02.000Z
|
cloudify_custom_workflow/batch_utils.py
|
jrzeszutek/cloudify-utilities-plugin
|
d62fc98e9164fd836d8f22a757b5a58ca119f97a
|
[
"Apache-2.0"
] | 21
|
2017-08-13T13:19:58.000Z
|
2021-12-09T14:41:42.000Z
|
########
# Copyright (c) 2014-2018 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cloudify_common_sdk.utils import get_deployments_from_blueprint
def generate_group_id_from_blueprint(blueprint_id):
deployments = get_deployments_from_blueprint(blueprint_id)
if not deployments:
return '{bp}-group'.format(bp=blueprint_id)
else:
return '{bp}-group-{i}'.format(bp=blueprint_id, i=len(deployments))
def generate_deployment_ids_from_group_id(group_id, deployments):
return ['{g}-{i}'.format(g=group_id, i=i) for i in range(
len(deployments))]
def generate_inputs_from_deployments(inputs, deployments):
inputs = inputs or []
for iterator, deployment_id in enumerate(deployments):
try:
inputs[iterator]['deployment'] = deployment_id
except IndexError:
inputs.append({'deployment': deployment_id})
return inputs
def generate_labels_from_inputs(inputs):
return [{'csys-obj-parent': inp['deployment']} for inp in inputs]
| 34.822222
| 75
| 0.728781
|
e3e7ad6ec1ebda2e58b02efed13a5c6b54038157
| 14,237
|
py
|
Python
|
test/functional/wallet_hd.py
|
tokyocoin-project/tokyocoin
|
48cbabf73f0687cc04b6658cf69aba65aa1b997d
|
[
"MIT"
] | null | null | null |
test/functional/wallet_hd.py
|
tokyocoin-project/tokyocoin
|
48cbabf73f0687cc04b6658cf69aba65aa1b997d
|
[
"MIT"
] | null | null | null |
test/functional/wallet_hd.py
|
tokyocoin-project/tokyocoin
|
48cbabf73f0687cc04b6658cf69aba65aa1b997d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016-2020 The Tokyocoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Hierarchical Deterministic wallet function."""
import os
import shutil
from test_framework.test_framework import TokyocoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class WalletHDTest(TokyocoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ['-keypool=0']]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Make sure we use hd, keep masterkeyid
hd_fingerprint = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['hdmasterfingerprint']
assert_equal(len(hd_fingerprint), 8)
# create an internal key
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV = self.nodes[1].getaddressinfo(change_addr)
if self.options.descriptors:
assert_equal(change_addrV["hdkeypath"], "m/84'/1'/0'/1/0")
else:
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/0'") #first internal child key
# Import a non-HD private key in the HD wallet
non_hd_add = 'bcrt1qmevj8zfx0wdvp05cqwkmr6mxkfx60yezwjksmt'
non_hd_key = 'cS9umN9w6cDMuRVYdbkfE4c7YUFLJRoXMfhQ569uY4odiQbVN8Rt'
self.nodes[1].importprivkey(non_hd_key)
# This should be enough to keep the master key and the non-HD key
self.nodes[1].backupwallet(os.path.join(self.nodes[1].datadir, "hd.bak"))
#self.nodes[1].dumpwallet(os.path.join(self.nodes[1].datadir, "hd.dump"))
# Derive some HD addresses and remember the last
# Also send funds to each add
self.nodes[0].generate(101)
hd_add = None
NUM_HD_ADDS = 10
for i in range(1, NUM_HD_ADDS + 1):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].getaddressinfo(hd_add)
if self.options.descriptors:
assert_equal(hd_info["hdkeypath"], "m/84'/1'/0'/0/" + str(i))
else:
assert_equal(hd_info["hdkeypath"], "m/0'/0'/" + str(i) + "'")
assert_equal(hd_info["hdmasterfingerprint"], hd_fingerprint)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
# create an internal key (again)
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV = self.nodes[1].getaddressinfo(change_addr)
if self.options.descriptors:
assert_equal(change_addrV["hdkeypath"], "m/84'/1'/0'/1/1")
else:
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/1'") #second internal child key
self.sync_all()
assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1)
self.log.info("Restore backup ...")
self.stop_node(1)
# we need to delete the complete chain directory
# otherwise node1 would auto-recover all funds in flag the keypool keys as used
shutil.rmtree(os.path.join(self.nodes[1].datadir, self.chain, "blocks"))
shutil.rmtree(os.path.join(self.nodes[1].datadir, self.chain, "chainstate"))
shutil.copyfile(
os.path.join(self.nodes[1].datadir, "hd.bak"),
os.path.join(self.nodes[1].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename),
)
self.start_node(1)
# Assert that derivation is deterministic
hd_add_2 = None
for i in range(1, NUM_HD_ADDS + 1):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].getaddressinfo(hd_add_2)
if self.options.descriptors:
assert_equal(hd_info_2["hdkeypath"], "m/84'/1'/0'/0/" + str(i))
else:
assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/" + str(i) + "'")
assert_equal(hd_info_2["hdmasterfingerprint"], hd_fingerprint)
assert_equal(hd_add, hd_add_2)
self.connect_nodes(0, 1)
self.sync_all()
# Needs rescan
self.restart_node(1, extra_args=self.extra_args[1] + ['-rescan'])
assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1)
# Try a RPC based rescan
self.stop_node(1)
shutil.rmtree(os.path.join(self.nodes[1].datadir, self.chain, "blocks"))
shutil.rmtree(os.path.join(self.nodes[1].datadir, self.chain, "chainstate"))
shutil.copyfile(
os.path.join(self.nodes[1].datadir, "hd.bak"),
os.path.join(self.nodes[1].datadir, self.chain, "wallets", self.default_wallet_name, self.wallet_data_filename),
)
self.start_node(1, extra_args=self.extra_args[1])
self.connect_nodes(0, 1)
self.sync_all()
# Wallet automatically scans blocks older than key on startup
assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1)
out = self.nodes[1].rescanblockchain(0, 1)
assert_equal(out['start_height'], 0)
assert_equal(out['stop_height'], 1)
out = self.nodes[1].rescanblockchain()
assert_equal(out['start_height'], 0)
assert_equal(out['stop_height'], self.nodes[1].getblockcount())
assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1)
# send a tx and make sure its using the internal chain for the changeoutput
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout']
keypath = ""
for out in outs:
if out['value'] != 1:
keypath = self.nodes[1].getaddressinfo(out['scriptPubKey']['addresses'][0])['hdkeypath']
if self.options.descriptors:
assert_equal(keypath[0:14], "m/84'/1'/0'/1/")
else:
assert_equal(keypath[0:7], "m/0'/1'")
if not self.options.descriptors:
# Generate a new HD seed on node 1 and make sure it is set
orig_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid']
self.nodes[1].sethdseed()
new_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid']
assert orig_masterkeyid != new_masterkeyid
addr = self.nodes[1].getnewaddress()
# Make sure the new address is the first from the keypool
assert_equal(self.nodes[1].getaddressinfo(addr)['hdkeypath'], 'm/0\'/0\'/0\'')
self.nodes[1].keypoolrefill(1) # Fill keypool with 1 key
# Set a new HD seed on node 1 without flushing the keypool
new_seed = self.nodes[0].dumpprivkey(self.nodes[0].getnewaddress())
orig_masterkeyid = new_masterkeyid
self.nodes[1].sethdseed(False, new_seed)
new_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid']
assert orig_masterkeyid != new_masterkeyid
addr = self.nodes[1].getnewaddress()
assert_equal(orig_masterkeyid, self.nodes[1].getaddressinfo(addr)['hdseedid'])
# Make sure the new address continues previous keypool
assert_equal(self.nodes[1].getaddressinfo(addr)['hdkeypath'], 'm/0\'/0\'/1\'')
# Check that the next address is from the new seed
self.nodes[1].keypoolrefill(1)
next_addr = self.nodes[1].getnewaddress()
assert_equal(new_masterkeyid, self.nodes[1].getaddressinfo(next_addr)['hdseedid'])
# Make sure the new address is not from previous keypool
assert_equal(self.nodes[1].getaddressinfo(next_addr)['hdkeypath'], 'm/0\'/0\'/0\'')
assert next_addr != addr
# Sethdseed parameter validity
assert_raises_rpc_error(-1, 'sethdseed', self.nodes[0].sethdseed, False, new_seed, 0)
assert_raises_rpc_error(-5, "Invalid private key", self.nodes[1].sethdseed, False, "not_wif")
assert_raises_rpc_error(-1, "JSON value is not a boolean as expected", self.nodes[1].sethdseed, "Not_bool")
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[1].sethdseed, False, True)
assert_raises_rpc_error(-5, "Already have this key", self.nodes[1].sethdseed, False, new_seed)
assert_raises_rpc_error(-5, "Already have this key", self.nodes[1].sethdseed, False, self.nodes[1].dumpprivkey(self.nodes[1].getnewaddress()))
self.log.info('Test sethdseed restoring with keys outside of the initial keypool')
self.nodes[0].generate(10)
# Restart node 1 with keypool of 3 and a different wallet
self.nodes[1].createwallet(wallet_name='origin', blank=True)
self.restart_node(1, extra_args=['-keypool=3', '-wallet=origin'])
self.connect_nodes(0, 1)
# sethdseed restoring and seeing txs to addresses out of the keypool
origin_rpc = self.nodes[1].get_wallet_rpc('origin')
seed = self.nodes[0].dumpprivkey(self.nodes[0].getnewaddress())
origin_rpc.sethdseed(True, seed)
self.nodes[1].createwallet(wallet_name='restore', blank=True)
restore_rpc = self.nodes[1].get_wallet_rpc('restore')
restore_rpc.sethdseed(True, seed) # Set to be the same seed as origin_rpc
restore_rpc.sethdseed(True) # Rotate to a new seed, making original `seed` inactive
self.nodes[1].createwallet(wallet_name='restore2', blank=True)
restore2_rpc = self.nodes[1].get_wallet_rpc('restore2')
restore2_rpc.sethdseed(True, seed) # Set to be the same seed as origin_rpc
restore2_rpc.sethdseed(True) # Rotate to a new seed, making original `seed` inactive
# Check persistence of inactive seed by reloading restore. restore2 is still loaded to test the case where the wallet is not reloaded
restore_rpc.unloadwallet()
self.nodes[1].loadwallet('restore')
restore_rpc = self.nodes[1].get_wallet_rpc('restore')
# Empty origin keypool and get an address that is beyond the initial keypool
origin_rpc.getnewaddress()
origin_rpc.getnewaddress()
last_addr = origin_rpc.getnewaddress() # Last address of initial keypool
addr = origin_rpc.getnewaddress() # First address beyond initial keypool
# Check that the restored seed has last_addr but does not have addr
info = restore_rpc.getaddressinfo(last_addr)
assert_equal(info['ismine'], True)
info = restore_rpc.getaddressinfo(addr)
assert_equal(info['ismine'], False)
info = restore2_rpc.getaddressinfo(last_addr)
assert_equal(info['ismine'], True)
info = restore2_rpc.getaddressinfo(addr)
assert_equal(info['ismine'], False)
# Check that the origin seed has addr
info = origin_rpc.getaddressinfo(addr)
assert_equal(info['ismine'], True)
# Send a transaction to addr, which is out of the initial keypool.
# The wallet that has set a new seed (restore_rpc) should not detect this transaction.
txid = self.nodes[0].sendtoaddress(addr, 1)
origin_rpc.sendrawtransaction(self.nodes[0].gettransaction(txid)['hex'])
self.nodes[0].generate(1)
self.sync_blocks()
origin_rpc.gettransaction(txid)
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', restore_rpc.gettransaction, txid)
out_of_kp_txid = txid
# Send a transaction to last_addr, which is in the initial keypool.
# The wallet that has set a new seed (restore_rpc) should detect this transaction and generate 3 new keys from the initial seed.
# The previous transaction (out_of_kp_txid) should still not be detected as a rescan is required.
txid = self.nodes[0].sendtoaddress(last_addr, 1)
origin_rpc.sendrawtransaction(self.nodes[0].gettransaction(txid)['hex'])
self.nodes[0].generate(1)
self.sync_blocks()
origin_rpc.gettransaction(txid)
restore_rpc.gettransaction(txid)
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', restore_rpc.gettransaction, out_of_kp_txid)
restore2_rpc.gettransaction(txid)
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', restore2_rpc.gettransaction, out_of_kp_txid)
# After rescanning, restore_rpc should now see out_of_kp_txid and generate an additional key.
# addr should now be part of restore_rpc and be ismine
restore_rpc.rescanblockchain()
restore_rpc.gettransaction(out_of_kp_txid)
info = restore_rpc.getaddressinfo(addr)
assert_equal(info['ismine'], True)
restore2_rpc.rescanblockchain()
restore2_rpc.gettransaction(out_of_kp_txid)
info = restore2_rpc.getaddressinfo(addr)
assert_equal(info['ismine'], True)
# Check again that 3 keys were derived.
# Empty keypool and get an address that is beyond the initial keypool
origin_rpc.getnewaddress()
origin_rpc.getnewaddress()
last_addr = origin_rpc.getnewaddress()
addr = origin_rpc.getnewaddress()
# Check that the restored seed has last_addr but does not have addr
info = restore_rpc.getaddressinfo(last_addr)
assert_equal(info['ismine'], True)
info = restore_rpc.getaddressinfo(addr)
assert_equal(info['ismine'], False)
info = restore2_rpc.getaddressinfo(last_addr)
assert_equal(info['ismine'], True)
info = restore2_rpc.getaddressinfo(addr)
assert_equal(info['ismine'], False)
if __name__ == '__main__':
WalletHDTest().main()
| 50.846429
| 154
| 0.641498
|
926fe9b2a6c5d3d033cf217127706a985c4c7021
| 12,848
|
py
|
Python
|
spyder/plugins/editor/widgets/tests/test_recover.py
|
Earthman100/spyder
|
949ce0f9100a69504c70a5678e8589a05aee7d38
|
[
"MIT"
] | 7,956
|
2015-02-17T01:19:09.000Z
|
2022-03-31T21:52:15.000Z
|
spyder/plugins/editor/widgets/tests/test_recover.py
|
Earthman100/spyder
|
949ce0f9100a69504c70a5678e8589a05aee7d38
|
[
"MIT"
] | 16,326
|
2015-02-16T23:15:21.000Z
|
2022-03-31T23:34:34.000Z
|
spyder/plugins/editor/widgets/tests/test_recover.py
|
Earthman100/spyder
|
949ce0f9100a69504c70a5678e8589a05aee7d38
|
[
"MIT"
] | 1,918
|
2015-02-20T19:26:26.000Z
|
2022-03-31T19:03:25.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Tests for recover.py"""
# Standard library imports
import os.path as osp
import pytest
import shutil
# Third party imports
from qtpy.QtWidgets import QDialogButtonBox, QPushButton, QTableWidget
# Local imports
from spyder.py3compat import PY2
from spyder.plugins.editor.widgets.recover import (make_temporary_files,
RecoveryDialog)
@pytest.fixture
def recovery_env(tmpdir):
"""Create a dir with various autosave files and cleans up afterwards."""
yield make_temporary_files(str(tmpdir))
shutil.rmtree(str(tmpdir))
def test_recoverydialog_has_cancel_button(qtbot, tmpdir):
"""
Test that RecoveryDialog has a Cancel button.
Test that a RecoveryDialog has a button in a dialog button box and that
this button cancels the dialog window.
"""
dialog = RecoveryDialog([])
qtbot.addWidget(dialog)
button = dialog.findChild(QDialogButtonBox).findChild(QPushButton)
with qtbot.waitSignal(dialog.rejected):
button.click()
def test_recoverydialog_table_labels(qtbot, recovery_env):
"""Test that table in RecoveryDialog has the correct labels."""
orig_dir, autosave_dir, autosave_mapping = recovery_env
dialog = RecoveryDialog(autosave_mapping)
table = dialog.findChild(QTableWidget)
def text(i, j):
return table.cellWidget(i, j).text()
# ham.py: Both original and autosave files exist, mentioned in mapping
assert osp.join(orig_dir, 'ham.py') in text(0, 0)
assert osp.join(autosave_dir, 'ham.py') in text(0, 1)
# spam.py: Only autosave file exists, mentioned in mapping
assert osp.join(orig_dir, 'spam.py') in text(1, 0)
assert 'no longer exists' in text(1, 0)
assert osp.join(autosave_dir, 'spam.py') in text(1, 1)
# eggs.py: Only original files exists, so cannot be recovered
# It won't be in the table, so nothing to test
# cheese.py: Only autosave file exists, not mentioned in mapping
assert 'not recorded' in text(2, 0)
assert osp.join(autosave_dir, 'cheese.py') in text(2, 1)
# Thus, there should be three rows in total
assert table.rowCount() == 3
def test_recoverydialog_exec_if_nonempty_when_empty(qtbot, tmpdir, mocker):
"""
Test that exec_if_nonempty does nothing if autosave files do not exist.
Specifically, test that it does not `exec_()` the dialog.
"""
dialog = RecoveryDialog([('ham', 'spam')])
mocker.patch.object(dialog, 'exec_')
assert dialog.exec_if_nonempty() == dialog.Accepted
dialog.exec_.assert_not_called()
def test_recoverydialog_exec_if_nonempty_when_nonempty(
qtbot, recovery_env, mocker):
"""Test that exec_if_nonempty executes dialog if autosave dir not empty."""
orig_dir, autosave_dir, autosave_mapping = recovery_env
dialog = RecoveryDialog(autosave_mapping)
mocker.patch.object(dialog, 'exec_', return_value='eggs')
assert dialog.exec_if_nonempty() == 'eggs'
assert dialog.exec_.called
def test_recoverydialog_exec_if_nonempty_when_no_autosave_dir(
qtbot, recovery_env, mocker):
"""
Test that exec_if_nonempty does nothing if autosave dir does not exist.
Specifically, test that it does not `exec_()` the dialog.
"""
orig_dir, autosave_dir, autosave_mapping = recovery_env
shutil.rmtree(autosave_dir)
dialog = RecoveryDialog(autosave_mapping)
mocker.patch.object(dialog, 'exec_')
assert dialog.exec_if_nonempty() == dialog.Accepted
dialog.exec_.assert_not_called()
def test_recoverydialog_restore_button(qtbot, recovery_env):
"""
Test the `Restore` button in `RecoveryDialog`.
Test that after pressing the 'Restore' button, the original file is
replaced by the autosave file, the latter is removed, and the row in the
grid is deactivated.
"""
orig_dir, autosave_dir, autosave_mapping = recovery_env
dialog = RecoveryDialog(autosave_mapping)
table = dialog.findChild(QTableWidget)
button = table.cellWidget(0, 2).findChildren(QPushButton)[0]
button.click()
with open(osp.join(orig_dir, 'ham.py')) as f:
assert f.read() == 'ham = "autosave"\n'
assert not osp.isfile(osp.join(autosave_dir, 'ham.py'))
for col in range(table.columnCount()):
assert not table.cellWidget(0, col).isEnabled()
def test_recoverydialog_restore_when_original_does_not_exist(
qtbot, recovery_env):
"""
Test the `Restore` button when the original file does not exist.
Test that after pressing the 'Restore' button, the autosave file is moved
to the location of the original file and the row in the grid is
deactivated.
"""
orig_dir, autosave_dir, autosave_mapping = recovery_env
dialog = RecoveryDialog(autosave_mapping)
table = dialog.findChild(QTableWidget)
button = table.cellWidget(1, 2).findChildren(QPushButton)[0]
button.click()
with open(osp.join(orig_dir, 'spam.py')) as f:
assert f.read() == 'spam = "autosave"\n'
assert not osp.isfile(osp.join(autosave_dir, 'spam.py'))
for col in range(table.columnCount()):
assert not table.cellWidget(1, col).isEnabled()
def test_recoverydialog_restore_when_original_not_recorded(
qtbot, recovery_env, mocker):
"""
Test the `Restore` button when the original file name is not known.
Test that after pressing the 'Restore' button, the autosave file is moved
to a location specified by the user and the row in the grid is deactivated.
"""
orig_dir, autosave_dir, autosave_mapping = recovery_env
new_name = osp.join(orig_dir, 'monty.py')
mocker.patch('spyder.plugins.editor.widgets.recover.getsavefilename',
return_value=(new_name, 'ignored'))
dialog = RecoveryDialog(autosave_mapping)
table = dialog.findChild(QTableWidget)
button = table.cellWidget(2, 2).findChildren(QPushButton)[0]
button.click()
with open(new_name) as f:
assert f.read() == 'cheese = "autosave"\n'
assert not osp.isfile(osp.join(autosave_dir, 'cheese.py'))
for col in range(table.columnCount()):
assert not table.cellWidget(2, col).isEnabled()
def test_recoverydialog_restore_fallback(qtbot, recovery_env, mocker):
"""
Test fallback for when os.replace() fails when recovering a file.
Test that after pressing the 'Restore' button, if os.replace() fails,
the fallback to copy and delete kicks in and the restore succeeds.
Regression test for spyder-ide/spyder#8631.
"""
orig_dir, autosave_dir, autosave_mapping = recovery_env
if not PY2:
mocker.patch('spyder.plugins.editor.widgets.recover.os.replace',
side_effect=OSError)
dialog = RecoveryDialog(autosave_mapping)
table = dialog.findChild(QTableWidget)
button = table.cellWidget(0, 2).findChildren(QPushButton)[0]
button.click()
with open(osp.join(orig_dir, 'ham.py')) as f:
assert f.read() == 'ham = "autosave"\n'
assert not osp.isfile(osp.join(autosave_dir, 'ham.py'))
for col in range(table.columnCount()):
assert not table.cellWidget(0, col).isEnabled()
def test_recoverydialog_restore_when_error(qtbot, recovery_env, mocker):
"""
Test that errors during a restore action are handled gracefully.
Test that if an error arises when restoring a file, both the original and
the autosave files are kept unchanged, a dialog is displayed, and the row
in the grid is not deactivated.
"""
orig_dir, autosave_dir, autosave_mapping = recovery_env
if not PY2:
mocker.patch('spyder.plugins.editor.widgets.recover.os.replace',
side_effect=OSError)
mocker.patch('spyder.plugins.editor.widgets.recover.shutil.copy2',
side_effect=IOError)
mock_QMessageBox = mocker.patch(
'spyder.plugins.editor.widgets.recover.QMessageBox')
dialog = RecoveryDialog(autosave_mapping)
table = dialog.findChild(QTableWidget)
button = table.cellWidget(0, 2).findChildren(QPushButton)[0]
button.click()
with open(osp.join(orig_dir, 'ham.py')) as f:
assert f.read() == 'ham = "original"\n'
with open(osp.join(autosave_dir, 'ham.py')) as f:
assert f.read() == 'ham = "autosave"\n'
assert mock_QMessageBox.called
for col in range(table.columnCount()):
assert table.cellWidget(0, col).isEnabled()
def test_recoverydialog_accepted_after_all_restored(
qtbot, recovery_env, mocker):
"""
Test that the recovery dialog is accepted after all files are restored.
Click all `Restore` buttons and test that the dialog is accepted
afterwards, but not before.
"""
orig_dir, autosave_dir, autosave_mapping = recovery_env
new_name = osp.join(orig_dir, 'monty.py')
mocker.patch('spyder.plugins.editor.widgets.recover.getsavefilename',
return_value=(new_name, 'ignored'))
dialog = RecoveryDialog(autosave_mapping)
table = dialog.findChild(QTableWidget)
with qtbot.assertNotEmitted(dialog.accepted):
for row in range(table.rowCount() - 1):
table.cellWidget(row, 2).findChildren(QPushButton)[0].click()
with qtbot.waitSignal(dialog.accepted):
row = table.rowCount() - 1
table.cellWidget(row, 2).findChildren(QPushButton)[0].click()
def test_recoverydialog_discard_button(qtbot, recovery_env):
"""
Test the `Discard` button in the recovery dialog.
Test that after pressing the 'Discard' button, the autosave file is
deleted, the original file unchanged, and the row in the grid is
deactivated.
"""
orig_dir, autosave_dir, autosave_mapping = recovery_env
dialog = RecoveryDialog(autosave_mapping)
table = dialog.findChild(QTableWidget)
button = table.cellWidget(0, 2).findChildren(QPushButton)[1]
button.click()
assert not osp.isfile(osp.join(autosave_dir, 'ham.py'))
with open(osp.join(orig_dir, 'ham.py')) as f:
assert f.read() == 'ham = "original"\n'
for col in range(table.columnCount()):
assert not table.cellWidget(0, col).isEnabled()
def test_recoverydialog_discard_when_error(qtbot, recovery_env, mocker):
"""
Test that errors during a discard action are handled gracefully.
Test that if an error arises when discarding a file, both the original and
the autosave files are kept unchanged, a dialog is displayed, and the row
in the grid is not deactivated.
"""
orig_dir, autosave_dir, autosave_mapping = recovery_env
mocker.patch('spyder.plugins.editor.widgets.recover.os.remove',
side_effect=OSError)
mock_QMessageBox = mocker.patch(
'spyder.plugins.editor.widgets.recover.QMessageBox')
dialog = RecoveryDialog(autosave_mapping)
table = dialog.findChild(QTableWidget)
button = table.cellWidget(0, 2).findChildren(QPushButton)[1]
button.click()
with open(osp.join(orig_dir, 'ham.py')) as f:
assert f.read() == 'ham = "original"\n'
with open(osp.join(autosave_dir, 'ham.py')) as f:
assert f.read() == 'ham = "autosave"\n'
assert mock_QMessageBox.called
for col in range(table.columnCount()):
assert table.cellWidget(0, col).isEnabled()
def test_recoverydialog_open_button(qtbot, recovery_env):
"""
Test the `Open` button in the recovery dialog.
Test that after pressing the 'Open' button, `files_to_open` contains
the autosave and the original file, and the row in the grid is
deactivated.
"""
orig_dir, autosave_dir, autosave_mapping = recovery_env
dialog = RecoveryDialog(autosave_mapping)
table = dialog.findChild(QTableWidget)
button = table.cellWidget(0, 2).findChildren(QPushButton)[2]
button.click()
assert dialog.files_to_open == [osp.join(orig_dir, 'ham.py'),
osp.join(autosave_dir, 'ham.py')]
for col in range(table.columnCount()):
assert not table.cellWidget(0, col).isEnabled()
def test_recoverydialog_open_when_no_original(qtbot, recovery_env):
"""
Test the `Open` button when the original file is not known.
Test that when the user requests to open an autosave file for which the
original file is not known, `files_to_open` contains only the autosave
file.
"""
orig_dir, autosave_dir, autosave_mapping = recovery_env
dialog = RecoveryDialog(autosave_mapping)
table = dialog.findChild(QTableWidget)
button = table.cellWidget(2, 2).findChildren(QPushButton)[2]
button.click()
assert dialog.files_to_open == [osp.join(autosave_dir, 'cheese.py')]
for col in range(table.columnCount()):
assert not table.cellWidget(2, col).isEnabled()
| 39.051672
| 79
| 0.701199
|
33a1c291f568e58ad0ef4bf9f31de8c5f1cd1fc5
| 3,457
|
py
|
Python
|
rplugin/python3/denite/source/tag.py
|
timgates42/denite.nvim
|
12a9b5456f5a4600afeb0ba284ce1098bd35e501
|
[
"MIT"
] | null | null | null |
rplugin/python3/denite/source/tag.py
|
timgates42/denite.nvim
|
12a9b5456f5a4600afeb0ba284ce1098bd35e501
|
[
"MIT"
] | null | null | null |
rplugin/python3/denite/source/tag.py
|
timgates42/denite.nvim
|
12a9b5456f5a4600afeb0ba284ce1098bd35e501
|
[
"MIT"
] | null | null | null |
# ============================================================================
# FILE: tag.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
import re
import typing
from os.path import exists
from pathlib import Path
from denite.base.source import Base
from denite.util import (parse_tagline, Nvim,
UserContext, Candidates, Candidate)
TAG_HIGHLIGHT_SYNTAX = [
{'name': 'Type', 'link': 'Statement', 're': r'\[.\{-}\]'},
{'name': 'File', 'link': 'Type', 're': r'@\w*\W\w*'},
{'name': 'Pattern', 'link': 'Comment', 're': r'<->\s.*'},
]
class Source(Base):
def __init__(self, vim: Nvim) -> None:
super().__init__(vim)
self.vim = vim
self.name = 'tag'
self.kind = 'file'
def on_init(self, context: UserContext) -> None:
self._tags = self._get_tagfiles(context)
def highlight(self) -> None:
for syn in TAG_HIGHLIGHT_SYNTAX:
self.vim.command(
'syntax match {0}_{1} /{2}/ contained containedin={0}'.format(
self.syntax_name, syn['name'], syn['re']
)
)
self.vim.command(
'highlight default link {0}_{1} {2}'.format(
self.syntax_name, syn['name'], syn['link']
)
)
def gather_candidates(self, context: UserContext) -> Candidates:
candidates = []
for filename in self._tags:
with open(filename, 'r',
encoding=context['encoding'],
errors='replace') as ins:
for line in ins:
candidate = self._get_candidate(filename, line)
if candidate:
candidates.append(candidate)
return sorted(candidates, key=lambda value: str(value['word']))
def _get_candidate(self, filename: str, line: str) -> Candidate:
if re.match('!', line) or not line:
return {}
info = parse_tagline(line.rstrip(), filename)
candidate = {
'word': info['name'],
'action__path': info['file']
}
info['name'] = (
(info['name'][:33] + '..')
if len(info['name']) >= 33
else info['name']
)
info['file'] = Path(info['file']).name
fmt = '{name:<35} @{file:<25}'
if info['line']:
candidate['action__line'] = info['line']
fmt += ':{line} [{type}] {ref}'
else:
candidate['action__pattern'] = info['pattern']
m = re.search(r'\^\S*(.*)\$', info['pattern'])
if m:
info['pattern'] = '<-> ' + m.group(1).lstrip()
fmt += ' [{type}] {pattern}'
candidate['abbr'] = fmt.format(**info)
return candidate
def _get_tagfiles(self, context: UserContext) -> typing.List[str]:
if (
context['args']
and context['args'][0] == 'include'
and self.vim.call('exists', '*neoinclude#include#get_tag_files')
):
tagfiles = self.vim.call('neoinclude#include#get_tag_files')
else:
tagfiles = self.vim.call('tagfiles')
return [
x
for x in self.vim.call('map', tagfiles, 'fnamemodify(v:val, ":p")')
if exists(x)
]
| 33.563107
| 79
| 0.482789
|
2ccee2153349f6f6eac4c9cb250121311a412541
| 130
|
py
|
Python
|
congregation/config/__init__.py
|
CCD-HRI/congregation
|
a552856b03a64a4295792184107c4e529ca3f4ae
|
[
"MIT"
] | 3
|
2020-10-05T16:30:15.000Z
|
2021-01-22T13:38:02.000Z
|
congregation/config/__init__.py
|
CCD-HRI/congregation
|
a552856b03a64a4295792184107c4e529ca3f4ae
|
[
"MIT"
] | null | null | null |
congregation/config/__init__.py
|
CCD-HRI/congregation
|
a552856b03a64a4295792184107c4e529ca3f4ae
|
[
"MIT"
] | 1
|
2021-02-19T12:40:57.000Z
|
2021-02-19T12:40:57.000Z
|
from congregation.config.config import Config
from congregation.config.network import *
from congregation.config.codegen import *
| 32.5
| 45
| 0.846154
|
8bb2330f257a1f013cce4f0b68f2f4fc04b34dee
| 3,019
|
py
|
Python
|
miper/tests/unit/test_conf.py
|
MountainWei/miper
|
4dd6df51cb0e7d041b8dc7decebbcfb47a79f210
|
[
"Apache-2.0"
] | 1
|
2016-01-13T04:23:20.000Z
|
2016-01-13T04:23:20.000Z
|
miper/tests/unit/test_conf.py
|
MountainWei/miper
|
4dd6df51cb0e7d041b8dc7decebbcfb47a79f210
|
[
"Apache-2.0"
] | 1
|
2015-12-17T09:58:04.000Z
|
2016-08-01T15:23:27.000Z
|
miper/tests/unit/test_conf.py
|
MountainWei/miper
|
4dd6df51cb0e7d041b8dc7decebbcfb47a79f210
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from miper import test
CONF = cfg.CONF
CONF.register_opt(cfg.StrOpt('conf_unittest',
default='foo',
help='for testing purposes only'))
class ConfigTestCase(test.TestCase):
def setUp(self):
super(ConfigTestCase, self).setUp()
def test_declare(self):
self.assertNotIn('answer', CONF)
CONF.import_opt('answer', 'miper.tests.unit.declare_conf')
self.assertIn('answer', CONF)
self.assertEqual(42, CONF.answer)
# Make sure we don't overwrite anything
CONF.set_override('answer', 256)
self.assertEqual(256, CONF.answer)
CONF.import_opt('answer', 'miper.tests.unit.declare_conf')
self.assertEqual(256, CONF.answer)
def test_runtime_and_unknown_conf(self):
self.assertNotIn('runtime_answer', CONF)
import miper.tests.unit.runtime_conf # noqa
self.assertIn('runtime_answer', CONF)
self.assertEqual(54, CONF.runtime_answer)
def test_long_vs_short_conf(self):
CONF.clear()
CONF.register_cli_opt(cfg.StrOpt('duplicate_answer_long',
default='val',
help='desc'))
CONF.register_cli_opt(cfg.IntOpt('duplicate_answer',
default=50,
help='desc'))
argv = ['--duplicate_answer=60']
CONF(argv, default_config_files=[])
self.assertEqual(60, CONF.duplicate_answer)
self.assertEqual('val', CONF.duplicate_answer_long)
def test_conf_leak_left(self):
self.assertEqual('foo', CONF.conf_unittest)
self.flags(conf_unittest='bar')
self.assertEqual('bar', CONF.conf_unittest)
def test_conf_leak_right(self):
self.assertEqual('foo', CONF.conf_unittest)
self.flags(conf_unittest='bar')
self.assertEqual('bar', CONF.conf_unittest)
def test_conf_overrides(self):
self.assertEqual('foo', CONF.conf_unittest)
self.flags(conf_unittest='bar')
self.assertEqual('bar', CONF.conf_unittest)
CONF.reset()
self.assertEqual('foo', CONF.conf_unittest)
| 35.940476
| 78
| 0.642597
|
a53569b1c3d0f1c0b4ef30ef9ad205a7152513cf
| 12,569
|
py
|
Python
|
tests/test_tablemultivariablelookup.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 19
|
2015-12-08T23:33:51.000Z
|
2022-01-31T04:41:10.000Z
|
tests/test_tablemultivariablelookup.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 2
|
2019-10-04T10:57:00.000Z
|
2021-10-01T06:46:17.000Z
|
tests/test_tablemultivariablelookup.py
|
marcelosalles/pyidf
|
c2f744211572b5e14e29522aac1421ba88addb0e
|
[
"Apache-2.0"
] | 7
|
2015-11-04T02:25:01.000Z
|
2021-12-08T03:14:28.000Z
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.performance_tables import TableMultiVariableLookup
log = logging.getLogger(__name__)
class TestTableMultiVariableLookup(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_tablemultivariablelookup(self):
pyidf.validation_level = ValidationLevel.error
obj = TableMultiVariableLookup()
# alpha
var_name = "Name"
obj.name = var_name
# alpha
var_interpolation_method = "LinearInterpolationOfTable"
obj.interpolation_method = var_interpolation_method
# integer
var_number_of_interpolation_points = 3
obj.number_of_interpolation_points = var_number_of_interpolation_points
# alpha
var_curve_type = "Linear"
obj.curve_type = var_curve_type
# alpha
var_table_data_format = "SingleLineIndependentVariableWithMatrix"
obj.table_data_format = var_table_data_format
# alpha
var_external_file_name = "External File Name"
obj.external_file_name = var_external_file_name
# alpha
var_x1_sort_order = "Ascending"
obj.x1_sort_order = var_x1_sort_order
# alpha
var_x2_sort_order = "Ascending"
obj.x2_sort_order = var_x2_sort_order
# real
var_normalization_reference = 9.9
obj.normalization_reference = var_normalization_reference
# real
var_minimum_value_of_x1 = 10.1
obj.minimum_value_of_x1 = var_minimum_value_of_x1
# real
var_maximum_value_of_x1 = 11.11
obj.maximum_value_of_x1 = var_maximum_value_of_x1
# real
var_minimum_value_of_x2 = 12.12
obj.minimum_value_of_x2 = var_minimum_value_of_x2
# real
var_maximum_value_of_x2 = 13.13
obj.maximum_value_of_x2 = var_maximum_value_of_x2
# real
var_minimum_value_of_x3 = 14.14
obj.minimum_value_of_x3 = var_minimum_value_of_x3
# real
var_maximum_value_of_x3 = 15.15
obj.maximum_value_of_x3 = var_maximum_value_of_x3
# real
var_minimum_value_of_x4 = 16.16
obj.minimum_value_of_x4 = var_minimum_value_of_x4
# real
var_maximum_value_of_x4 = 17.17
obj.maximum_value_of_x4 = var_maximum_value_of_x4
# real
var_minimum_value_of_x5 = 18.18
obj.minimum_value_of_x5 = var_minimum_value_of_x5
# real
var_maximum_value_of_x5 = 19.19
obj.maximum_value_of_x5 = var_maximum_value_of_x5
# real
var_minimum_table_output = 20.2
obj.minimum_table_output = var_minimum_table_output
# real
var_maximum_table_output = 21.21
obj.maximum_table_output = var_maximum_table_output
# alpha
var_input_unit_type_for_x1 = "Dimensionless"
obj.input_unit_type_for_x1 = var_input_unit_type_for_x1
# alpha
var_input_unit_type_for_x2 = "Dimensionless"
obj.input_unit_type_for_x2 = var_input_unit_type_for_x2
# alpha
var_input_unit_type_for_x3 = "Dimensionless"
obj.input_unit_type_for_x3 = var_input_unit_type_for_x3
# alpha
var_input_unit_type_for_x4 = "Dimensionless"
obj.input_unit_type_for_x4 = var_input_unit_type_for_x4
# alpha
var_input_unit_type_for_x5 = "Dimensionless"
obj.input_unit_type_for_x5 = var_input_unit_type_for_x5
# alpha
var_output_unit_type = "Dimensionless"
obj.output_unit_type = var_output_unit_type
# integer
var_number_of_independent_variables = 3
obj.number_of_independent_variables = var_number_of_independent_variables
# integer
var_number_of_values_for_independent_variable_x1 = 29
obj.number_of_values_for_independent_variable_x1 = var_number_of_values_for_independent_variable_x1
# real
var_field_1_determined_by_the_number_of_independent_variables = 30.3
obj.field_1_determined_by_the_number_of_independent_variables = var_field_1_determined_by_the_number_of_independent_variables
# real
var_field_2_determined_by_the_number_of_independent_variables = 31.31
obj.field_2_determined_by_the_number_of_independent_variables = var_field_2_determined_by_the_number_of_independent_variables
paras = []
var_field_3_determined_by_the_number_of_independent_variables = 32.32
paras.append(var_field_3_determined_by_the_number_of_independent_variables)
var_v1 = 33.33
paras.append(var_v1)
var_v2 = 34.34
paras.append(var_v2)
var_v3 = 35.35
paras.append(var_v3)
var_v4 = 36.36
paras.append(var_v4)
var_v5 = 37.37
paras.append(var_v5)
var_v6 = 38.38
paras.append(var_v6)
var_v7 = 39.39
paras.append(var_v7)
var_v8 = 40.4
paras.append(var_v8)
var_v9 = 41.41
paras.append(var_v9)
var_v10 = 42.42
paras.append(var_v10)
var_v11 = 43.43
paras.append(var_v11)
var_v12 = 44.44
paras.append(var_v12)
var_v13 = 45.45
paras.append(var_v13)
var_v14 = 46.46
paras.append(var_v14)
var_v15 = 47.47
paras.append(var_v15)
var_v16 = 48.48
paras.append(var_v16)
var_v17 = 49.49
paras.append(var_v17)
var_v18 = 50.5
paras.append(var_v18)
var_v19 = 51.51
paras.append(var_v19)
obj.add_extensible(*paras)
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.tablemultivariablelookups[0].name, var_name)
self.assertEqual(idf2.tablemultivariablelookups[0].interpolation_method, var_interpolation_method)
self.assertEqual(idf2.tablemultivariablelookups[0].number_of_interpolation_points, var_number_of_interpolation_points)
self.assertEqual(idf2.tablemultivariablelookups[0].curve_type, var_curve_type)
self.assertEqual(idf2.tablemultivariablelookups[0].table_data_format, var_table_data_format)
self.assertEqual(idf2.tablemultivariablelookups[0].external_file_name, var_external_file_name)
self.assertEqual(idf2.tablemultivariablelookups[0].x1_sort_order, var_x1_sort_order)
self.assertEqual(idf2.tablemultivariablelookups[0].x2_sort_order, var_x2_sort_order)
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].normalization_reference, var_normalization_reference)
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].minimum_value_of_x1, var_minimum_value_of_x1)
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].maximum_value_of_x1, var_maximum_value_of_x1)
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].minimum_value_of_x2, var_minimum_value_of_x2)
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].maximum_value_of_x2, var_maximum_value_of_x2)
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].minimum_value_of_x3, var_minimum_value_of_x3)
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].maximum_value_of_x3, var_maximum_value_of_x3)
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].minimum_value_of_x4, var_minimum_value_of_x4)
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].maximum_value_of_x4, var_maximum_value_of_x4)
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].minimum_value_of_x5, var_minimum_value_of_x5)
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].maximum_value_of_x5, var_maximum_value_of_x5)
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].minimum_table_output, var_minimum_table_output)
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].maximum_table_output, var_maximum_table_output)
self.assertEqual(idf2.tablemultivariablelookups[0].input_unit_type_for_x1, var_input_unit_type_for_x1)
self.assertEqual(idf2.tablemultivariablelookups[0].input_unit_type_for_x2, var_input_unit_type_for_x2)
self.assertEqual(idf2.tablemultivariablelookups[0].input_unit_type_for_x3, var_input_unit_type_for_x3)
self.assertEqual(idf2.tablemultivariablelookups[0].input_unit_type_for_x4, var_input_unit_type_for_x4)
self.assertEqual(idf2.tablemultivariablelookups[0].input_unit_type_for_x5, var_input_unit_type_for_x5)
self.assertEqual(idf2.tablemultivariablelookups[0].output_unit_type, var_output_unit_type)
self.assertEqual(idf2.tablemultivariablelookups[0].number_of_independent_variables, var_number_of_independent_variables)
self.assertEqual(idf2.tablemultivariablelookups[0].number_of_values_for_independent_variable_x1, var_number_of_values_for_independent_variable_x1)
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].field_1_determined_by_the_number_of_independent_variables, var_field_1_determined_by_the_number_of_independent_variables)
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].field_2_determined_by_the_number_of_independent_variables, var_field_2_determined_by_the_number_of_independent_variables)
index = obj.extensible_field_index("Field 3 Determined by the Number of Independent Variables")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_field_3_determined_by_the_number_of_independent_variables)
index = obj.extensible_field_index("V1")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_v1)
index = obj.extensible_field_index("V2")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_v2)
index = obj.extensible_field_index("V3")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_v3)
index = obj.extensible_field_index("V4")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_v4)
index = obj.extensible_field_index("V5")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_v5)
index = obj.extensible_field_index("V6")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_v6)
index = obj.extensible_field_index("V7")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_v7)
index = obj.extensible_field_index("V8")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_v8)
index = obj.extensible_field_index("V9")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_v9)
index = obj.extensible_field_index("V10")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_v10)
index = obj.extensible_field_index("V11")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_v11)
index = obj.extensible_field_index("V12")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_v12)
index = obj.extensible_field_index("V13")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_v13)
index = obj.extensible_field_index("V14")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_v14)
index = obj.extensible_field_index("V15")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_v15)
index = obj.extensible_field_index("V16")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_v16)
index = obj.extensible_field_index("V17")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_v17)
index = obj.extensible_field_index("V18")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_v18)
index = obj.extensible_field_index("V19")
self.assertAlmostEqual(idf2.tablemultivariablelookups[0].extensibles[0][index], var_v19)
| 52.153527
| 186
| 0.738086
|
3e49e0cdd977537ed757179668b6fc1443e7cfa5
| 34,203
|
py
|
Python
|
test/functional/rpc_fundrawtransaction.py
|
limitstory/pyeongtaekcoin
|
c77889d1ec25759b67fab17180f17eb8f96bbaa1
|
[
"MIT"
] | null | null | null |
test/functional/rpc_fundrawtransaction.py
|
limitstory/pyeongtaekcoin
|
c77889d1ec25759b67fab17180f17eb8f96bbaa1
|
[
"MIT"
] | null | null | null |
test/functional/rpc_fundrawtransaction.py
|
limitstory/pyeongtaekcoin
|
c77889d1ec25759b67fab17180f17eb8f96bbaa1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Pyeongtaekcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from decimal import Decimal
from test_framework.test_framework import PyeongtaekcoinTestFramework
from test_framework.util import (
assert_equal,
assert_fee_amount,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises_rpc_error,
connect_nodes_bi,
count_bytes,
find_vout_for_address,
)
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(PyeongtaekcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].getaddressinfo(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
# Lock UTXO so nodes[0] doesn't accidentally spend it
watchonly_vout = find_vout_for_address(self.nodes[0], watchonly_txid, watchonly_address)
self.nodes[0].lockunspent(False, [{"txid": watchonly_txid, "vout": watchonly_vout}])
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
# reserveChangeKey was deprecated and is now removed
assert_raises_rpc_error(-3, "Unexpected key reserveChangeKey", lambda: self.nodes[2].fundrawtransaction(hexstring=rawtx, options={'reserveChangeKey': True}))
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid pyeongtaekcoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################
# test a fundrawtransaction with a provided change type #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[2].fundrawtransaction, rawtx, {'change_type': None})
assert_raises_rpc_error(-5, "Unknown change type ''", self.nodes[2].fundrawtransaction, rawtx, {'change_type': ''})
rawtx = self.nodes[2].fundrawtransaction(rawtx, {'change_type': 'bech32'})
dec_tx = self.nodes[2].decoderawtransaction(rawtx['hex'])
assert_equal('witness_v0_keyhash', dec_tx['vout'][rawtx['changepos']]['scriptPubKey']['type'])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
addr3Obj = self.nodes[1].getaddressinfo(addr3)
addr4Obj = self.nodes[1].getaddressinfo(addr4)
addr5Obj = self.nodes[1].getaddressinfo(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
# send 1.2 PTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.stop_node(0)
self.nodes[1].node_encrypt_wallet("test")
self.stop_node(2)
self.stop_node(3)
self.start_nodes()
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
# Again lock the watchonly UTXO or nodes[0] may spend it, because
# lockunspent is memory-only and thus lost on restart
self.nodes[0].lockunspent(False, [{"txid": watchonly_txid, "vout": watchonly_vout}])
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransactionwithwallet(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransactionwithwallet(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 1*min_relay_tx_fee}) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx_['hex']) for tx_ in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
| 45.182299
| 223
| 0.575067
|
2f6b1e187326875e8f55f729bc5ed8da9e235a32
| 436
|
py
|
Python
|
problems/day-14/part_1.py
|
stringham/advent-of-code-2020
|
01cfad88b2d70969976f44efdb66245470d5f925
|
[
"MIT"
] | 1
|
2020-12-21T10:56:07.000Z
|
2020-12-21T10:56:07.000Z
|
problems/day-14/part_1.py
|
stringham/advent-of-code-2020
|
01cfad88b2d70969976f44efdb66245470d5f925
|
[
"MIT"
] | null | null | null |
problems/day-14/part_1.py
|
stringham/advent-of-code-2020
|
01cfad88b2d70969976f44efdb66245470d5f925
|
[
"MIT"
] | 1
|
2020-12-23T20:15:16.000Z
|
2020-12-23T20:15:16.000Z
|
#!/usr/bin/env python3
import sys
memory = {}
for line in sys.stdin:
parts = line.split()
if parts[0] == "mask":
mask = parts[2]
continue
address = int(parts[0][4:-1])
value_bin = format(int(parts[2]), "b").zfill(len(mask))
value_bin = "".join(
b if mask[i] == "X" else mask[i] for i, b in enumerate(value_bin)
)
memory[address] = int(value_bin, 2)
print(sum(memory.values()))
| 18.956522
| 73
| 0.571101
|
f817590e2a20f96379584d0cb94a2c468166584f
| 59,720
|
py
|
Python
|
fastly/__init__.py
|
jcristau/fastly-python
|
4c25a08cc29c119279c30970c9c38d7e5135b005
|
[
"BSD-2-Clause"
] | null | null | null |
fastly/__init__.py
|
jcristau/fastly-python
|
4c25a08cc29c119279c30970c9c38d7e5135b005
|
[
"BSD-2-Clause"
] | null | null | null |
fastly/__init__.py
|
jcristau/fastly-python
|
4c25a08cc29c119279c30970c9c38d7e5135b005
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python
# Author: Chris Zacharias (chris@imgix.com)
# Copyright (c) 2012, Zebrafish Labs Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from datetime import datetime
import httplib2
import json
import re
import sys
if sys.version_info[0] < 3:
from urllib import quote, urlencode
else:
from urllib.parse import quote, urlencode
from .version import __version__
FASTLY_SCHEME = "https"
FASTLY_HOST = "api.fastly.com"
FASTLY_SESSION_REGEX = re.compile("(fastly\.session=[^;]+);")
class FastlyRoles(object):
USER = "user"
BILLING = "billing"
ENGINEER = "engineer"
SUPERUSER = "superuser"
class FastlyCacheSettingsAction(object):
CACHE = "cache"
PASS = "pass"
RESTART = "restart"
class FastlyConditionType(object):
RESPONSE = "response"
CACHE = "cache"
REQUEST = "request"
FETCH = "fetch"
class FastlyHeaderAction(object):
SET = "set"
APPEND = "append"
DELETE = "delete"
REGEX = "regex"
REGEX_ALL = "regex_repeat"
class FastlyHeaderType(object):
RESPONSE = "response"
FETCH = "fetch"
CACHE = "cache"
REQUEST = "request"
class FastlyRequestSettingAction(object):
LOOKUP = "lookup"
PASS = "pass"
class FastlyForwardedForAction(object):
CLEAR = "clear"
LEAVE = "leave"
APPEND = "append"
APPEND_ALL = "append_all"
OVERWRITE = "overwrite"
class FastlyStatsType(object):
ALL = "all"
DAILY = "daily"
HOURLY = "hourly"
MINUTELY = "minutely"
class FastlyDirectorType(object):
RANDOM = 1
ROUNDROBIN = 2
HASH = 3
CLIENT = 4
class FastlyConnection(object):
def __init__(self, api_key):
self._session = None
self._api_key = api_key
self._fully_authed = False
@property
def fully_authed(self):
return self._fully_authed
def login(self, user, password):
body = self._formdata({
"user": user,
"password": password,
}, ["user", "password"])
content = self._fetch("/login", method="POST", body=body)
self._fully_authed = True
return FastlySession(self, content)
def list_backends(self, service_id, version_number):
"""List all backends for a particular service and version."""
content = self._fetch("/service/%s/version/%d/backend" % (service_id, version_number))
return map(lambda x: FastlyBackend(self, x), content)
def create_backend(
self,
service_id,
version_number,
name,
address,
use_ssl=False,
port=80,
connect_timeout=1000,
first_byte_timeout=15000,
between_bytes_timeout=10000,
error_threshold=0,
max_conn=200,
weight=100,
auto_loadbalance=False,
shield=None,
request_condition=None,
healthcheck=None,
comment=None,
ssl_cert_hostname=None,
ssl_sni_hostname=None,
min_tls_version=None,
max_tls_version=None,):
"""Create a backend for a particular service and version."""
body = self._formdata({
"name": name,
"address": address,
"use_ssl": use_ssl,
"port": port,
"connect_timeout": connect_timeout,
"first_byte_timeout": first_byte_timeout,
"between_bytes_timeout": between_bytes_timeout,
"error_threshold": error_threshold,
"max_conn": max_conn,
"weight": weight,
"auto_loadbalance": auto_loadbalance,
"shield": shield,
"request_condition": request_condition,
"healthcheck": healthcheck,
"comment": comment,
"ssl_cert_hostname": ssl_cert_hostname,
"ssl_sni_hostname": ssl_sni_hostname,
"min_tls_version": min_tls_version,
"max_tls_version": max_tls_version,
}, FastlyBackend.FIELDS)
content = self._fetch("/service/%s/version/%d/backend" % (service_id, version_number), method="POST", body=body)
return FastlyBackend(self, content)
def get_backend(self, service_id, version_number, name):
"""Get the backend for a particular service and version."""
content = self._fetch("/service/%s/version/%d/backend/%s" % (service_id, version_number, quote(name, safe='')))
return FastlyBackend(self, content)
def update_backend(self, service_id, version_number, name_key, **kwargs):
"""Update the backend for a particular service and version."""
body = self._formdata(kwargs, FastlyBackend.FIELDS)
content = self._fetch("/service/%s/version/%d/backend/%s" % (service_id, version_number, quote(name_key, safe='')), method="PUT", body=body)
return FastlyBackend(self, content)
def delete_backend(self, service_id, version_number, name):
"""Delete the backend for a particular service and version."""
content = self._fetch("/service/%s/version/%d/backend/%s" % (service_id, version_number, quote(name, safe='')), method="DELETE")
return self._status(content)
def check_backends(self, service_id, version_number):
"""Performs a health check against each backend in version. If the backend has a specific type of healthcheck, that one is performed, otherwise a HEAD request to / is performed. The first item is the details on the Backend itself. The second item is details of the specific HTTP request performed as a health check. The third item is the response details."""
content = self._fetch("/service/%s/version/%d/backend/check_all" % (service_id, version_number))
# TODO: Use a strong-typed class for output?
return content
def list_cache_settings(self, service_id, version_number):
"""Get a list of all cache settings for a particular service and version."""
content = self._fetch("/service/%s/version/%d/cache_settings" % (service_id, version_number))
return map(lambda x: FastlyCacheSettings(self, x), content)
def create_cache_settings(
self,
service_id,
version_number,
name,
action,
ttl=None,
stale_ttl=None,
cache_condition=None):
"""Create a new cache settings object."""
body = self._formdata({
"name": name,
"action": action,
"ttl": ttl,
"stale_ttl": stale_ttl,
"cache_condition": cache_condition,
}, FastlyCacheSettings.FIELDS)
content = self._fetch("/service/%s/version/%d/cache_settings" % (service_id, version_number), method="POST", body=body)
return FastlyCacheSettings(self, content)
def get_cache_settings(self, service_id, version_number, name):
"""Get a specific cache settings object."""
content = self._fetch("/service/%s/version/%d/cache_settings/%s" % (service_id, version_number, quote(name, safe='')))
return FastlyCacheSettings(self, content)
def update_cache_settings(self, service_id, version_number, name_key, **kwargs):
"""Update a specific cache settings object."""
body = self._formdata(kwargs, FastlyCacheSettings.FIELDS)
content = self._fetch("/service/%s/version/%d/cache_settings/%s" % (service_id, version_number, quote(name_key, safe='')), method="PUT", body=body)
return FastlyCacheSettings(self, content)
def delete_cache_settings(self, service_id, version_number, name):
"""Delete a specific cache settings object."""
content = self._fetch("/service/%s/version/%d/cache_settings/%s" % (service_id, version_number, quote(name, safe='')), method="DELETE")
return self._status(content)
def list_conditions(self, service_id, version_number):
"""Gets all conditions for a particular service and version."""
content = self._fetch("/service/%s/version/%d/condition" % (service_id, version_number))
return map(lambda x: FastlyCondition(self, x), content)
def create_condition(
self,
service_id,
version_number,
name,
_type,
statement,
priority="10",
comment=None):
"""Creates a new condition."""
body = self._formdata({
"name": name,
"type": _type,
"statement": statement,
"priority": priority,
"comment": comment,
}, FastlyCondition.FIELDS)
content = self._fetch("/service/%s/version/%d/condition" % (service_id, version_number), method="POST", body=body)
return FastlyCondition(self, content)
def get_condition(self, service_id, version_number, name):
"""Gets a specified condition."""
content = self._fetch("/service/%s/version/%d/condition/%s" % (service_id, version_number, quote(name, safe='')))
return FastlyCondition(self, content)
def update_condition(self, service_id, version_number, name_key, **kwargs):
"""Updates the specified condition."""
if '_type' in kwargs:
kwargs['type'] = kwargs['_type']
body = self._formdata(kwargs, FastlyCondition.FIELDS)
content = self._fetch("/service/%s/version/%d/condition/%s" % (service_id, version_number, quote(name_key, safe='')), method="PUT", body=body)
return FastlyCondition(self, content)
def delete_condition(self, service_id, version_number, name):
"""Deletes the specified condition."""
content = self._fetch("/service/%s/version/%d/condition/%s" % (service_id, version_number, quote(name, safe='')), method="DELETE")
return self._status(content)
def content_edge_check(self, url):
"""Retrieve headers and MD5 hash of the content for a particular url from each Fastly edge server."""
prefixes = ["http://", "https://"]
for prefix in prefixes:
if url.startswith(prefix):
url = url[len(prefix):]
break
content = self._fetch("/content/edge_check/%s" % url)
return content
def get_current_customer(self):
"""Get the logged in customer."""
content = self._fetch("/current_customer")
return FastlyCustomer(self, content)
def get_customer(self, customer_id):
"""Get a specific customer."""
content = self._fetch("/customer/%s" % customer_id)
return FastlyCustomer(self, content)
def get_customer_details(self, customer_id):
"""Get a specific customer, owner, and billing contact."""
content = self._fetch("/customer/details/%s" % customer_id)
return content
def list_customer_users(self, customer_id):
"""List all users from a specified customer id."""
content = self._fetch("/customer/users/%s" % customer_id)
return map(lambda x: FastlyUser(self, x), content)
def update_customer(self, customer_id, **kwargs):
"""Update a customer."""
body = self._formdata(kwargs, FastlyCustomer.FIELDS)
content = self._fetch("/customer/%s" % customer_id, method="PUT", body=body)
return FastlyCustomer(self, content)
def delete_customer(self, customer_id):
"""Delete a customer."""
content = self._fetch("/customer/%s" % customer_id, method="DELETE")
return self._status(content)
def list_directors(self, service_id, version_number):
"""List the directors for a particular service and version."""
content = self._fetch("/service/%s/version/%d/director" % (service_id, version_number))
return map(lambda x: FastlyDirector(self, x), content)
def create_director(
self,
service_id,
version_number,
name,
quorum=75,
_type=FastlyDirectorType.RANDOM,
retries=5,
shield=None):
"""Create a director for a particular service and version."""
body = self._formdata({
"name": name,
"quorum": quorum,
"type": _type,
"retries": retries,
"shield": shield,
}, FastlyDirector.FIELDS)
content = self._fetch("/service/%s/version/%d/director" % (service_id, version_number), method="POST", body=body)
return FastlyDirector(self, content)
def get_director(self, service_id, version_number, name):
"""Get the director for a particular service and version."""
content = self._fetch("/service/%s/version/%d/director/%s" % (service_id, version_number, quote(name, safe='')))
return FastlyDirector(self, content)
def update_director(self, service_id, version_number, name_key, **kwargs):
"""Update the director for a particular service and version."""
if '_type' in kwargs:
kwargs['type'] = kwargs['_type']
body = self._formdata(kwargs, FastlyDirector.FIELDS)
content = self._fetch("/service/%s/version/%d/director/%s" % (service_id, version_number, quote(name_key, safe='')), method="PUT", body=body)
return FastlyDirector(self, content)
def delete_director(self, service_id, version_number, name):
"""Delete the director for a particular service and version."""
content = self._fetch("/service/%s/version/%d/director/%s" % (service_id, version_number, quote(name, safe='')), method="DELETE")
return self._status(content)
def get_director_backend(self, service_id, version_number, director_name, backend_name):
"""Returns the relationship between a Backend and a Director. If the Backend has been associated with the Director, it returns a simple record indicating this. Otherwise, returns a 404."""
content = self._fetch("/service/%s/version/%d/director/%s/backend/%s" % (service_id, version_number, director_name, quote(backend_name, safe='')), method="GET")
return FastlyDirectorBackend(self, content)
def create_director_backend(self, service_id, version_number, director_name, backend_name):
"""Establishes a relationship between a Backend and a Director. The Backend is then considered a member of the Director and can be used to balance traffic onto."""
content = self._fetch("/service/%s/version/%d/director/%s/backend/%s" % (service_id, version_number, director_name, quote(backend_name, safe='')), method="POST")
return FastlyDirectorBackend(self, content)
def delete_director_backend(self, service_id, version_number, director_name, backend_name):
"""Deletes the relationship between a Backend and a Director. The Backend is no longer considered a member of the Director and thus will not have traffic balanced onto it from this Director."""
content = self._fetch("/service/%s/version/%d/director/%s/backend/%s" % (service_id, version_number, director_name, quote(backend_name, safe='')), method="DELETE")
return self._status(content)
def list_domains(self, service_id, version_number):
"""List the domains for a particular service and version."""
content = self._fetch("/service/%s/version/%d/domain" % (service_id, version_number))
return map(lambda x: FastlyDomain(self, x), content)
def create_domain(
self,
service_id,
version_number,
name,
comment=None):
"""Create a domain for a particular service and version."""
body = self._formdata({
"name": name,
"comment": comment,
}, FastlyDomain.FIELDS)
content = self._fetch("/service/%s/version/%d/domain" % (service_id, version_number), method="POST", body=body)
return FastlyDomain(self, content)
def get_domain(self, service_id, version_number, name):
"""Get the domain for a particular service and version."""
content = self._fetch("/service/%s/version/%d/domain/%s" % (service_id, version_number, quote(name, safe='')))
return FastlyDomain(self, content)
def update_domain(self, service_id, version_number, name_key, **kwargs):
"""Update the domain for a particular service and version."""
body = self._formdata(kwargs, FastlyDomain.FIELDS)
content = self._fetch("/service/%s/version/%d/domain/%s" % (service_id, version_number, quote(name_key, safe='')), method="PUT", body=body)
return FastlyDomain(self, content)
def delete_domain(self, service_id, version_number, name):
"""Delete the domain for a particular service and version."""
content = self._fetch("/service/%s/version/%d/domain/%s" % (service_id, version_number, quote(name, safe='')), method="DELETE")
return self._status(content)
def check_domain(self, service_id, version_number, name):
"""Checks the status of a domain's DNS record. Returns an array of 3 items. The first is the details for the domain. The second is the current CNAME of the domain. The third is a boolean indicating whether or not it has been properly setup to use Fastly."""
content = self._fetch("/service/%s/version/%d/domain/%s/check" % (service_id, version_number, quote(name, safe='')))
return FastlyDomainCheck(self, content)
def check_domains(self, service_id, version_number):
"""Checks the status of all domain DNS records for a Service Version. Returns an array items in the same format as the single domain /check."""
content = self._fetch("/service/%s/version/%d/domain/check_all" % (service_id, version_number))
return map(lambda x: FastlyDomainCheck(self, x), content)
def get_event_log(self, object_id):
"""Get the specified event log."""
content = self._fetch("/event_log/%s" % object_id, method="GET")
return FastlyEventLog(self, content)
def list_gzip(self, service_id, version_number):
"""List all gzip configurations for a particular service and version"""
content = self._fetch("/service/%s/version/%d/gzip" % (service_id, version_number))
return map(lambda x: FastlyGzip(self, x), content)
def create_gzip(self, service_id, version_number, name, cache_condition=None, content_types=None, extensions=None):
body = self._formdata({
"name": name,
"cache_condition": cache_condition,
"content_types": content_types,
"extensions": extensions
}, FastlyGzip.FIELDS)
"""Creates a new Gzip object."""
content = self._fetch("/service/%s/version/%d/gzip" % (service_id, version_number), method="POST", body=body)
return FastlyGzip(self, content)
def get_gzip(self, service_id, version_number, name):
"""Retrieves a Header object by name."""
content = self._fetch("/service/%s/version/%d/gzip/%s" % (service_id, version_number, quote(name, safe='')))
return FastlyGzip(self, content)
def update_gzip(self, service_id, version_number, name_key, **kwargs):
"""Modifies an existing Gzip object by name."""
body = self._formdata(kwargs, FastlyGzip.FIELDS)
content = self._fetch("/service/%s/version/%d/gzip/%s" % (service_id, version_number, quote(name_key, safe='')), method="PUT", body=body)
return FastlyGzip(self, content)
def delete_gzip(self, service_id, version_number, name):
"""Deletes a Gzip object by name."""
content = self._fetch("/service/%s/version/%d/gzip/%s" % (service_id, version_number, quote(name, safe='')), method="DELETE")
return self._status(content)
def list_headers(self, service_id, version_number):
"""Retrieves all Header objects for a particular Version of a Service."""
content = self._fetch("/service/%s/version/%d/header" % (service_id, version_number))
return map(lambda x: FastlyHeader(self, x), content)
def create_header(self, service_id, version_number, name, dst, src, _type=FastlyHeaderType.RESPONSE, action=FastlyHeaderAction.SET, regex=None, substitution=None, ignore_if_set=None, priority=10, response_condition=None, cache_condition=None, request_condition=None):
body = self._formdata({
"name": name,
"dst": dst,
"src": src,
"type": _type,
"action": action,
"regex": regex,
"substitution": substitution,
"ignore_if_set": ignore_if_set,
"priority": priority,
"response_condition": response_condition,
"request_condition": request_condition,
"cache_condition": cache_condition,
}, FastlyHeader.FIELDS)
"""Creates a new Header object."""
content = self._fetch("/service/%s/version/%d/header" % (service_id, version_number), method="POST", body=body)
return FastlyHeader(self, content)
def get_header(self, service_id, version_number, name):
"""Retrieves a Header object by name."""
content = self._fetch("/service/%s/version/%d/header/%s" % (service_id, version_number, quote(name, safe='')))
return FastlyHeader(self, content)
def update_header(self, service_id, version_number, name_key, **kwargs):
"""Modifies an existing Header object by name."""
if '_type' in kwargs:
kwargs['type'] = kwargs['_type']
body = self._formdata(kwargs, FastlyHeader.FIELDS)
content = self._fetch("/service/%s/version/%d/header/%s" % (service_id, version_number, quote(name_key, safe='')), method="PUT", body=body)
return FastlyHeader(self, content)
def delete_header(self, service_id, version_number, name):
"""Deletes a Header object by name."""
content = self._fetch("/service/%s/version/%d/header/%s" % (service_id, version_number, quote(name, safe='')), method="DELETE")
return self._status(content)
def list_healthchecks(self, service_id, version_number):
"""List all of the healthchecks for a particular service and version."""
content = self._fetch("/service/%s/version/%d/healthcheck" % (service_id, version_number))
return map(lambda x: FastlyHealthCheck(self, x), content)
def create_healthcheck(
self,
service_id,
version_number,
name,
host,
method="HEAD",
path="/",
http_version="1.1",
timeout=1000,
check_interval=5000,
expected_response=200,
window=5,
threshold=3,
initial=1):
"""Create a healthcheck for a particular service and version."""
body = self._formdata({
"name": name,
"method": method,
"host": host,
"path": path,
"http_version": http_version,
"timeout": timeout,
"check_interval": check_interval,
"expected_response": expected_response,
"window": window,
"threshold": threshold,
"initial": initial,
}, FastlyHealthCheck.FIELDS)
content = self._fetch("/service/%s/version/%d/healthcheck" % (service_id, version_number), method="POST", body=body)
return FastlyHealthCheck(self, content)
def get_healthcheck(self, service_id, version_number, name):
"""Get the healthcheck for a particular service and version."""
content = self._fetch("/service/%s/version/%d/healthcheck/%s" % (service_id, version_number, quote(name, safe='')))
return FastlyHealthCheck(self, content)
def update_healthcheck(self, service_id, version_number, name_key, **kwargs):
"""Update the healthcheck for a particular service and version."""
body = self._formdata(kwargs, FastlyHealthCheck.FIELDS)
content = self._fetch("/service/%s/version/%d/healthcheck/%s" % (service_id, version_number, quote(name_key, safe='')), method="PUT", body=body)
return FastlyHealthCheck(self, content)
def delete_healthcheck(self, service_id, version_number, name):
"""Delete the healthcheck for a particular service and version."""
content = self._fetch("/service/%s/version/%d/healthcheck/%s" % (service_id, version_number, quote(name, safe='')), method="DELETE")
return self._status(content)
def purge_url(self, host, path):
"""Purge an individual URL."""
content = self._fetch(path, method="PURGE", headers={"Host": host})
return FastlyPurge(self, content)
def check_purge_status(self, purge_id):
"""Get the status and times of a recently completed purge."""
content = self._fetch("/purge?id=%s" % purge_id)
return map(lambda x: FastlyPurgeStatus(self, x), content)
def list_request_settings(self, service_id, version_number):
"""Returns a list of all Request Settings objects for the given service and version."""
content = self._fetch("/service/%s/version/%d/request_settings" % (service_id, version_number))
return map(lambda x: FastlyRequestSetting(self, x), content)
def create_request_setting(
self,
service_id,
version_number,
name,
default_host=None,
force_miss=None,
force_ssl=None,
action=None,
bypass_busy_wait=None,
max_stale_age=None,
hash_keys=None,
xff=None,
timer_support=None,
geo_headers=None,
request_condition=None):
"""Creates a new Request Settings object."""
body = self._formdata({
"name": name,
"default_host": default_host,
"force_miss": force_miss,
"force_ssl": force_ssl,
"action": action,
"bypass_busy_wait": bypass_busy_wait,
"max_stale_age": max_stale_age,
"hash_keys": hash_keys,
"xff": xff,
"timer_support": timer_support,
"geo_headers": geo_headers,
"request_condition": request_condition,
}, FastlyRequestSetting.FIELDS)
content = self._fetch("/service/%s/version/%d/request_settings" % (service_id, version_number), method="POST", body=body)
return FastlyRequestSetting(self, content)
def get_request_setting(self, service_id, version_number, name):
"""Gets the specified Request Settings object."""
content = self._fetch("/service/%s/version/%d/request_settings/%s" % (service_id, version_number, quote(name, safe='')))
return FastlyRequestSetting(self, content)
def update_request_setting(self, service_id, version_number, name_key, **kwargs):
"""Updates the specified Request Settings object."""
body = self._formdata(kwargs, FastlyHealthCheck.FIELDS)
content = self._fetch("/service/%s/version/%d/request_settings/%s" % (service_id, version_number, quote(name_key, safe='')), method="PUT", body=body)
return FastlyRequestSetting(self, content)
def delete_request_setting(self, service_id, version_number, name):
"""Removes the specfied Request Settings object."""
content = self._fetch("/service/%s/version/%d/request_settings/%s" % (service_id, version_number, quote(name, safe='')), method="DELETE")
return self._status(content)
def list_response_objects(self, service_id, version_number):
"""Returns all Response Objects for the specified service and version."""
content = self._fetch("/service/%s/version/%d/response_object" % (service_id, version_number))
return map(lambda x: FastlyResponseObject(self, x), content)
def create_response_object(self, service_id, version_number, name, status="200", response="OK", content="", request_condition=None, cache_condition=None):
"""Creates a new Response Object."""
body = self._formdata({
"name": name,
"status": status,
"response": response,
"content": content,
"request_condition": request_condition,
"cache_condition": cache_condition,
}, FastlyResponseObject.FIELDS)
content = self._fetch("/service/%s/version/%d/response_object" % (service_id, version_number), method="POST", body=body)
return FastlyResponseObject(self, content)
def get_response_object(self, service_id, version_number, name):
"""Gets the specified Response Object."""
content = self._fetch("/service/%s/version/%d/response_object/%s" % (service_id, version_number, quote(name, safe='')))
return FastlyResponseObject(self, content)
def update_response_object(self, service_id, version_number, name_key, **kwargs):
"""Updates the specified Response Object."""
body = self._formdata(kwargs, FastlyResponseObject.FIELDS)
content = self._fetch("/service/%s/version/%d/response_object/%s" % (service_id, version_number, quote(name_key, safe='')), method="PUT", body=body)
return FastlyResponseObject(self, content)
def delete_response_object(self, service_id, version_number, name):
"""Deletes the specified Response Object."""
content = self._fetch("/service/%s/version/%d/response_object/%s" % (service_id, version_number, quote(name, safe='')), method="DELETE")
return self._status(content)
def create_service(self, customer_id, name, publish_key=None, comment=None):
"""Create a service."""
body = self._formdata({
"customer_id": customer_id,
"name": name,
"publish_key": publish_key,
"comment": comment,
}, FastlyService.FIELDS)
content = self._fetch("/service", method="POST", body=body)
return FastlyService(self, content)
def list_services(self):
"""List Services."""
content = self._fetch("/service")
return map(lambda x: FastlyService(self, x), content)
def get_service(self, service_id):
"""Get a specific service by id."""
content = self._fetch("/service/%s" % service_id)
return FastlyService(self, content)
def get_service_details(self, service_id):
"""List detailed information on a specified service."""
content = self._fetch("/service/%s/details" % service_id)
return FastlyService(self, content)
def get_service_by_name(self, service_name):
"""Get a specific service by name."""
content = self._fetch("/service/search?name=%s" % quote(service_name, safe=''))
return FastlyService(self, content)
def update_service(self, service_id, **kwargs):
"""Update a service."""
body = self._formdata(kwargs, FastlyService.FIELDS)
content = self._fetch("/service/%s" % service_id, method="PUT", body=body)
return FastlyService(self, content)
def delete_service(self, service_id):
"""Delete a service."""
content = self._fetch("/service/%s" % service_id, method="DELETE")
return self._status(content)
def list_domains_by_service(self, service_id):
"""List the domains within a service."""
content = self._fetch("/service/%s/domain" % service_id, method="GET")
return map(lambda x: FastlyDomain(self, x), content)
def purge_service(self, service_id):
"""Purge everything from a service."""
content = self._fetch("/service/%s/purge_all" % service_id, method="POST")
return self._status(content)
def purge_service_by_key(self, service_id, key):
"""Purge a particular service by a key."""
content = self._fetch("/service/%s/purge/%s" % (service_id, key), method="POST")
return self._status(content)
def get_settings(self, service_id, version_number):
"""Get the settings for a particular service and version."""
content = self._fetch("/service/%s/version/%d/settings" % (service_id, version_number))
return FastlySettings(self, content)
def update_settings(self, service_id, version_number, settings={}):
"""Update the settings for a particular service and version."""
body = urlencode(settings)
content = self._fetch("/service/%s/version/%d/settings" % (service_id, version_number), method="PUT", body=body)
return FastlySettings(self, content)
def get_stats(self, service_id, stat_type=FastlyStatsType.ALL):
"""Get the stats from a service."""
content = self._fetch("/service/%s/stats/%s" % (service_id, stat_type))
return content
def list_syslogs(self, service_id, version_number):
"""List all of the Syslogs for a particular service and version."""
content = self._fetch("/service/%s/version/%d/syslog" % (service_id, version_number))
return map(lambda x: FastlySyslog(self, x), content)
def create_syslog(
self,
service_id,
version_number,
name,
address,
port=514,
use_tls="0",
tls_ca_cert=None,
token=None,
_format=None,
response_condition=None):
"""Create a Syslog for a particular service and version."""
body = self._formdata({
"name": name,
"address": address,
"port": port,
"use_tls": use_tls,
"tls_ca_cert": tls_ca_cert,
"token": token,
"format": _format,
"response_condition": response_condition,
}, FastlySyslog.FIELDS)
content = self._fetch("/service/%s/version/%d/syslog" % (service_id, version_number), method="POST", body=body)
return FastlySyslog(self, content)
def get_syslog(self, service_id, version_number, name):
"""Get the Syslog for a particular service and version."""
content = self._fetch("/service/%s/version/%d/syslog/%s" % (service_id, version_number, quote(name, safe='')))
return FastlySyslog(self, content)
def update_syslog(self, service_id, version_number, name_key, **kwargs):
"""Update the Syslog for a particular service and version."""
body = self._formdata(kwargs, FastlySyslog.FIELDS)
content = self._fetch("/service/%s/version/%d/syslog/%s" % (service_id, version_number, quote(name_key, safe='')), method="PUT", body=body)
return FastlySyslog(self, content)
def delete_syslog(self, service_id, version_number, name):
"""Delete the Syslog for a particular service and version."""
content = self._fetch("/service/%s/version/%d/syslog/%s" % (service_id, version_number, quote(name, safe='')), method="DELETE")
return self._status(content)
def change_password(self, old_password, new_password):
"""Update the user's password to a new one."""
body = self._formdata({
"old_password": old_password,
"password": new_password,
}, ["old_password", "password"])
content = self._fetch("/current_user/password", method="POST", body=body)
return FastlyUser(self, content)
def get_current_user(self):
"""Get the logged in user."""
content = self._fetch("/current_user")
return FastlyUser(self, content)
def get_user(self, user_id):
"""Get a specific user."""
content = self._fetch("/user/%s" % user_id)
return FastlyUser(self, content)
def create_user(self, customer_id, name, login, password, role=FastlyRoles.USER, require_new_password=True):
"""Create a user."""
body = self._formdata({
"customer_id": customer_id,
"name": name,
"login": login,
"password": password,
"role": role,
"require_new_password": require_new_password,
}, FastlyUser.FIELDS)
content = self._fetch("/user", method="POST", body=body)
return FastlyUser(self, content)
def update_user(self, user_id, **kwargs):
"""Update a user."""
body = self._formdata(kwargs, FastlyUser.FIELDS)
content = self._fetch("/user/%s" % user_id, method="PUT", body=body)
return FastlyUser(self, content)
def delete_user(self, user_id):
"""Delete a user."""
content = self._fetch("/user/%s" % user_id, method="DELETE")
return self._status(content)
def request_password_reset(self, user_id):
"""Requests a password reset for the specified user."""
content = self._fetch("/user/%s/password/request_reset" % (user_id), method="POST")
return FastlyUser(self, content)
def list_vcls(self, service_id, version_number):
"""List the uploaded VCLs for a particular service and version."""
content = self._fetch("/service/%s/version/%d/vcl" % (service_id, version_number))
return map(lambda x: FastlyVCL(self, x), content)
def upload_vcl(self, service_id, version_number, name, content, main=None, comment=None):
"""Upload a VCL for a particular service and version."""
body = self._formdata({
"name": name,
"content": content,
"comment": comment,
"main": main,
}, FastlyVCL.FIELDS)
content = self._fetch("/service/%s/version/%d/vcl" % (service_id, version_number), method="POST", body=body)
return FastlyVCL(self, content)
def download_vcl(self, service_id, version_number, name):
"""Download the specified VCL."""
# TODO: Not sure what to do here, the documentation shows invalid response. Will have to test.
raise Exception("Not implemented")
def get_vcl(self, service_id, version_number, name, include_content=True):
"""Get the uploaded VCL for a particular service and version."""
content = self._fetch("/service/%s/version/%d/vcl/%s?include_content=%d" % (service_id, version_number, quote(name, safe=''), int(include_content)))
return FastlyVCL(self, content)
def get_vcl_html(self, service_id, version_number, name):
"""Get the uploaded VCL for a particular service and version with HTML syntax highlighting."""
content = self._fetch("/service/%s/version/%d/vcl/%s/content" % (service_id, version_number, quote(name, safe='')))
return content.get("content", None)
def get_generated_vcl(self, service_id, version_number):
"""Display the generated VCL for a particular service and version."""
content = self._fetch("/service/%s/version/%d/generated_vcl" % (service_id, version_number))
return FastlyVCL(self, content)
def get_generated_vcl_html(self, service_id, version_number):
"""Display the content of generated VCL with HTML syntax highlighting."""
content = self._fetch("/service/%s/version/%d/generated_vcl/content" % (service_id, version_number))
return content.get("content", None)
def set_main_vcl(self, service_id, version_number, name):
"""Set the specified VCL as the main."""
content = self._fetch("/service/%s/version/%d/vcl/%s/main" % (service_id, version_number, quote(name, safe='')), method="PUT")
return FastlyVCL(self, content)
def update_vcl(self, service_id, version_number, name_key, **kwargs):
"""Update the uploaded VCL for a particular service and version."""
body = self._formdata(kwargs, FastlyVCL.FIELDS)
content = self._fetch("/service/%s/version/%d/vcl/%s" % (service_id, version_number, quote(name_key, safe='')), method="PUT", body=body)
return FastlyVCL(self, content)
def delete_vcl(self, service_id, version_number, name):
"""Delete the uploaded VCL for a particular service and version."""
content = self._fetch("/service/%s/version/%d/vcl/%s" % (service_id, version_number, quote(name, safe='')), method="DELETE")
return self._status(content)
def list_vcl_snippets(self, service_id, version_number):
"""List all of the VCL snippets for a particular service and version."""
content = self._fetch("/service/%s/version/%d/snippet" % (service_id, version_number))
return map(lambda x: FastlyVCLSnippet(self, x), content)
def create_vcl_snippet(
self,
service_id,
version_number,
name,
priority,
_type,
content,
dynamic="0"):
"""Create a VCL snippet for a particular service and version."""
body = self._formdata({
"name": name,
"dynamic": str(dynamic),
"priority": priority,
"type": _type,
"content": content,
}, FastlyVCLSnippet.FIELDS)
content = self._fetch("/service/%s/version/%d/snippet" % (service_id, version_number), method="POST", body=body)
return FastlyVCLSnippet(self, content)
def get_vcl_snippet(self, service_id, version_number, name):
"""Get the VCL snippet for a particular service and version."""
content = self._fetch("/service/%s/version/%d/snippet/%s" % (service_id, version_number, quote(name, safe='')))
return FastlyVCLSnippet(self, content)
def update_vcl_snippet(self, service_id, version_number, name_key, **kwargs):
"""Update the VCL Snippet for a particular service and version."""
if '_type' in kwargs:
kwargs['type'] = kwargs['_type']
body = self._formdata(kwargs, FastlyVCLSnippet.FIELDS)
content = self._fetch("/service/%s/version/%d/snippet/%s" % (service_id, version_number, quote(name_key, safe='')), method="PUT", body=body)
return FastlyVCLSnippet(self, content)
def delete_vcl_snippet(self, service_id, version_number, name):
"""Delete the VCL Snippet for a particular service and version."""
content = self._fetch("/service/%s/version/%d/snippet/%s" % (service_id, version_number, quote(name, safe='')), method="DELETE")
return self._status(content)
def create_version(self, service_id, inherit_service_id=None, comment=None):
"""Create a version for a particular service."""
body = self._formdata({
"service_id": service_id,
"inherit_service_id": inherit_service_id,
"comment": comment,
}, FastlyVersion.FIELDS)
content = self._fetch("/service/%s/version" % service_id, method="POST", body=body)
return FastlyVersion(self, content)
def list_versions(self, service_id):
content = self._fetch("/service/%s/version" % service_id)
return map(lambda x: FastlyVersion(self, x), content)
def get_version(self, service_id, version_number):
"""Get the version for a particular service."""
content = self._fetch("/service/%s/version/%d" % (service_id, version_number))
return FastlyVersion(self, content)
def update_version(self, service_id, version_number, **kwargs):
"""Update a particular version for a particular service."""
body = self._formdata(kwargs, FastlyVersion.FIELDS)
content = self._fetch("/service/%s/version/%d/" % (service_id, version_number), method="PUT", body=body)
return FastlyVersion(self, content)
def clone_version(self, service_id, version_number):
"""Clone the current configuration into a new version."""
content = self._fetch("/service/%s/version/%d/clone" % (service_id, version_number), method="PUT")
return FastlyVersion(self, content)
def activate_version(self, service_id, version_number):
"""Activate the current version."""
content = self._fetch("/service/%s/version/%d/activate" % (service_id, version_number), method="PUT")
return FastlyVersion(self, content)
def deactivate_version(self, service_id, version_number):
"""Deactivate the current version."""
content = self._fetch("/service/%s/version/%d/deactivate" % (service_id, version_number), method="PUT")
return FastlyVersion(self, content)
def validate_version(self, service_id, version_number):
"""Validate the version for a particular service and version."""
content = self._fetch("/service/%s/version/%d/validate" % (service_id, version_number))
return self._status(content)
def lock_version(self, service_id, version_number):
"""Locks the specified version."""
content = self._fetch("/service/%s/version/%d/lock" % (service_id, version_number))
return self._status(content)
def list_wordpressess(self, service_id, version_number):
"""Get all of the wordpresses for a specified service and version."""
content = self._fetch("/service/%s/version/%d/wordpress" % (service_id, version_number))
return map(lambda x: FastlyWordpress(self, x), content)
def create_wordpress(
self,
service_id,
version_number,
name,
path,
comment=None):
"""Create a wordpress for the specified service and version."""
body = self._formdata({
"name": name,
"path": path,
"comment": comment,
}, FastlyWordpress.FIELDS)
content = self._fetch("/service/%s/version/%d/wordpress" % (service_id, version_number), method="POST", body=body)
return FastlyWordpress(self, content)
def get_wordpress(self, service_id, version_number, name):
"""Get information on a specific wordpress."""
content = self._fetch("/service/%s/version/%d/wordpress/%s" % (service_id, version_number, quote(name, safe='')))
return FastlyWordpress(self, content)
def update_wordpress(self, service_id, version_number, name_key, **kwargs):
"""Update a specified wordpress."""
body = self._formdata(kwargs, FastlyWordpress.FIELDS)
content = self._fetch("/service/%s/version/%d/wordpress/%s" % (service_id, version_number, quote(name_key, safe='')), method="PUT", body=body)
return FastlyWordpress(self, content)
def delete_wordpress(self, service_id, version_number, name):
"""Delete a specified wordpress."""
content = self._fetch("/service/%s/version/%d/wordpress/%s" % (service_id, version_number, quote(name, safe='')), method="DELETE")
return self._status(content)
def list_logging_s3_endpoints(self, service_id, version_number):
"""Returns all Amazon S3 Logging endpoint Objects for the specified service and version."""
content = self._fetch("/service/%s/version/%d/logging/s3" % (service_id, version_number))
return map(lambda x: FastlyLoggingS3(self, x), content)
def create_logging_s3_endpoint(self,
service_id, version_number, name, access_key, bucket_name, secret_key,
domain="s3.amazonaws.com", format_="%h %l %u %t \"%r\" %>s %b",
format_version=2, gzip_level=0, message_type="classic",
path=None, period=3600, placement=None, redundancy=None,
response_condition=None, timestamp_format="%Y-%m-%dT%H:%M:%S.000"):
"""Creates a new Response Object."""
body = self._formdata({
"name": name,
"access_key": access_key,
"bucket_name": bucket_name,
"secret_key": secret_key,
"domain": domain,
"format": format_,
"format_version": format_version,
"gzip_level": gzip_level,
"message_type": message_type,
"path": path,
"period": period,
"placement": placement,
"redundancy": redundancy,
"response_condition": response_condition,
"timestamp_format": timestamp_format
}, FastlyLoggingS3.FIELDS)
content = self._fetch("/service/%s/version/%d/logging/s3" % (service_id, version_number), method="POST", body=body)
return FastlyLoggingS3(self, content)
def get_logging_s3_endpoint(self, service_id, version_number, name):
"""Gets the specified Amazon S3 Logging endpoint Object."""
content = self._fetch("/service/%s/version/%d/logging/s3/%s" % (service_id, version_number, quote(name, safe='')))
return FastlyLoggingS3(self, content)
def update_logging_s3_endpoint(self, service_id, version_number, name_key, **kwargs):
"""Updates the specified Amazon S3 Logging Object."""
body = self._formdata(kwargs, FastlyLoggingS3.FIELDS)
content = self._fetch("/service/%s/version/%d/logging/s3/%s" % (service_id, version_number, quote(name_key, safe='')), method="PUT", body=body)
return FastlyLoggingS3(self, content)
def delete_logging_s3_endpoint(self, service_id, version_number, name):
"""Deletes the specified Amazon S3 Logging Object."""
content = self._fetch("/service/%s/version/%d/logging/s3/%s" % (service_id, version_number, quote(name, safe='')), method="DELETE")
return self._status(content)
# TODO: Is this broken?
def delete_version(self, service_id, version_number):
content = self._fetch("/service/%s/version/%d" % (service_id, version_number), method="DELETE")
return self._status(content)
def _status(self, status):
if not isinstance(status, FastlyStatus):
status = FastlyStatus(self, status)
if status.status != "ok":
raise FastlyError("FastlyError: %s" % status.msg)
return True
def _formdata(self, fields, valid=[]):
data = {}
for key in fields.keys():
if key in valid and fields[key] is not None:
data[key] = fields[key]
if isinstance(data[key], bool):
data[key] = str(int(data[key]))
return urlencode(data)
def _fetch(self, url, method="GET", body=None, headers={}):
hdrs = {}
hdrs.update(headers)
print("Fetch: %s %s" % (method, url))
if body:
print("Body: %s" % body)
if self._fully_authed:
hdrs["Cookie"] = self._session
else:
hdrs["Fastly-Key"] = self._api_key
hdrs["Content-Accept"] = "application/json"
hdrs["User-Agent"] = "fastly-python-v%s" % __version__
if "Content-Type" not in hdrs and method in ["POST", "PUT"]:
hdrs["Content-Type"] = "application/x-www-form-urlencoded"
conn = httplib2.Http(disable_ssl_certificate_validation=False, timeout=10)
endpoint = "%s://%s%s" % (FASTLY_SCHEME, FASTLY_HOST, url)
return self._check(*conn.request(endpoint, method, body=body, headers=hdrs))
def _check(self, resp, content):
status = resp.status
payload = None
if content:
try:
payload = json.loads(content)
# Could not decode, usually HTML
except ValueError:
payload = content
if status == 200:
# Keep track of the session. Only really set during /login
if "set-cookie" in resp:
set_cookie = resp["set-cookie"]
match = FASTLY_SESSION_REGEX.search(set_cookie)
if match is not None:
self._session = match.group(1)
return payload
if payload is None:
raise Exception("HTTP Error %d occurred." % status)
elif isinstance(payload, (type(b''), type(u''))):
raise Exception("HTTP Error %d occurred. { %s }" % (status, payload))
else:
payload["status"] = "error"
status = FastlyStatus(self, payload)
raise FastlyError(status)
class IDateStampedObject(object):
@property
def created_date(self):
if hasattr(self, "created_at"):
return self._parse_date(self.created_at)
else:
return self._parse_date(self.created)
@property
def updated_date(self):
if hasattr(self, "updated_at"):
return self._parse_date(self.updated_at)
else:
return self._parse_date(self.updated)
@property
def deleted_date(self):
if hasattr(self, "deleted_at"):
return self._parse_date(self.deleted_at)
else:
return self._parse_date(self.deleted)
class IServiceObject(object):
@property
def service(self):
return self._conn.get_service(self.service_id)
class IServiceVersionObject(IServiceObject):
@property
def service_version(self):
return self._conn.get_service_version(self.service_id, self.version)
class FastlyObject(object):
def __init__(self, conn, data):
self._conn = conn
self._data = data or {}
def __getattr__(self, name):
cls = self.__class__
if name in cls.FIELDS:
return self._data.get(name, None)
raise AttributeError()
def __str__(self):
return str(self._data)
def __repr__(self):
return repr(self._data)
def _parse_date(self, _date):
return datetime.strptime(_date, "%Y-%m-%dT%H:%M:%S+00:00")
class FastlyStatus(FastlyObject):
FIELDS = [
"msg",
"detail",
"status",
]
class FastlyError(Exception):
def __init__(self, status):
if isinstance(status, FastlyStatus):
Exception.__init__(self, "FastlyError: %s (%s)" % (status.msg, status.detail))
return
Exception.__init__(self, status)
class FastlySession(FastlyObject):
FIELDS = []
@property
def customer(self):
return FastlyCustomer(self._conn, self._data["customer"])
@property
def user(self):
return FastlyUser(self._conn, self._data["user"])
class FastlyBackend(FastlyObject, IServiceVersionObject):
"""A Backend is an address (ip or domain) from which Fastly pulls content. There can be multiple Backends for a Service."""
FIELDS = [
"service_id",
"version",
"name",
"address",
"port",
"use_ssl",
"connect_timeout",
"first_byte_timeout",
"between_bytes_timeout",
"error_threshold",
"max_conn",
"weight",
"auto_loadbalance",
"shield",
"request_condition",
"healthcheck",
"comment",
"ssl_cert_hostname",
"ssl_sni_hostname",
"min_tls_version",
"max_tls_version",
]
@property
def healthcheck(self):
if not self.__getattr__('healthcheck'):
return None
return self._conn.get_healthcheck(self.service_id, self.version, self.__getattr__("healthcheck"))
class FastlyCacheSettings(FastlyObject, IServiceVersionObject):
"""Controls how caching is performed on Fastly. When used in conjunction with Conditions the Cache Settings provide you with fine grain control over how long content persists in the cache."""
FIELDS = [
"service_id",
"version",
"name",
"action",
"ttl",
"stale_ttl",
"cache_condition",
]
class FastlyCondition(FastlyObject, IServiceVersionObject):
"""Conditions are used to control when and how other objects are used in a service configuration. They contain a statement that evaluates to either true or false and is used to determine whether the condition is met.
Depending on the type of the condition, the statment field can make reference to the Varnish Variables req, resp, and/or beresp."""
FIELDS = [
"name",
"service_id",
"version",
"type",
"statement",
"priority",
"comment",
]
class FastlyCustomer(FastlyObject, IDateStampedObject):
"""A Customer is the base object which owns your Users and Services."""
FIELDS = [
"can_configure_wordpress",
"can_edit_matches",
"name",
"created_at",
"updated_at",
"can_stream_syslog",
"id",
"pricing_plan",
"can_upload_vcl",
"has_config_panel",
"raw_api_key",
"has_billing_panel",
"can_reset_passwords",
"owner_id",
]
@property
def owner(self):
return self._conn.get_user(self.owner_id)
class FastlyDirector(FastlyObject, IServiceVersionObject, IDateStampedObject):
"""A Director is responsible for balancing requests among a group of Backends. In addition to simply balancing, Directors can be configured to attempt retrying failed requests. Additionally, Directors have a quorum setting which can be used to determine when the Director as a whole is considered "up", in order to prevent "server whack-a-mole" following an outage as servers come back up."""
FIELDS = [
"name",
"service_id",
"version",
"quorum",
"type",
"retries",
"shield",
"created",
"updated",
"deleted",
"capacity",
"comment",
"backends",
]
class FastlyDirectorBackend(FastlyObject, IServiceVersionObject, IDateStampedObject):
"""Maps and relates backends as belonging to directors. Backends can belong to any number of directors but directors can only hold one reference to a specific backend."""
FIELDS = [
"service_id",
"version",
"director",
"backend",
"created",
"updated",
"deleted",
]
class FastlyDomain(FastlyObject, IServiceVersionObject):
"""A Domain represents the domain name through which visitors will retrieve content. There can be multiple Domains for a Service."""
FIELDS = [
"name",
"comment",
"service_id",
"version",
]
class FastlyDomainCheck(FastlyObject):
@property
def domain(self):
return FastlyDomain(self._conn, self._data[0])
@property
def cname(self):
return self._data[1]
@property
def success(self):
return self._data[2]
class FastlyEventLog(FastlyObject):
"""EventLogs keep track of things that occur within your services or organization. Currently we track events such as activation and deactivation of Versions and mass purges. In the future we intend to track more events and let you trigger EventLog creation as well."""
FIELDS = [
"object_type",
"id",
"message",
"details",
"level",
"timestamp",
"system",
"subsystem",
]
class FastlyGzip(FastlyObject, IServiceVersionObject):
"""Gzip configuration allows you to choose resources to automatically compress."""
FIELDS = [
"cache_condition",
"content_types",
"extensions",
"name",
"service_id",
"version"
]
class FastlyHeader(FastlyObject, IServiceVersionObject):
"""Header objects are used to add, modify, or delete headers from requests and responses. The header content can be simple strings or be derived from variables inside Varnish. Regular expressions can be used to customize the headers even further."""
FIELDS = [
"name",
"service_id",
"version",
"dst",
"src",
"type",
"action",
"regex",
"substitution",
"ignore_if_set",
"priority",
"response_condition",
"request_condition",
"cache_condition",
]
@property
def destination(self):
return self.dst
@property
def source(self):
return self.src
class FastlyHealthCheck(FastlyObject, IServiceVersionObject):
"""Healthchecks are used to customize the way Fastly checks on your Backends. Only Backends that have successful Healthchecks will be sent traffic, thus assuring that the failure of one server does not affect visitors."""
FIELDS = [
"service_id",
"version",
"name",
"method",
"host",
"path",
"http_version",
"timeout",
"check_interval",
"expected_response",
"window",
"threshold",
"initial",
"comment",
]
class FastlyPurge(FastlyObject):
"""Purging removes content from Fastly so it can be refreshed from your origin servers."""
FIELDS = [
"status",
"id",
]
class FastlyPurgeStatus(FastlyObject):
"""The status of a given purge request."""
FIELDS = [
"timestamp",
"server",
]
class FastlyRequestSetting(FastlyObject, IServiceVersionObject):
"""Settings used to customize Fastly's request handling. When used with Conditions the Request Settings object allows you to fine tune how specific types of requests are handled."""
FIELDS = [
"service_id",
"version",
"name",
"default_host",
"force_miss",
"force_ssl",
"action",
"bypass_busy_wait",
"max_stale_age",
"hash_keys",
"xff",
"timer_support",
"geo_headers",
"request_condition",
]
class FastlyResponseObject(FastlyObject, IServiceVersionObject):
"""Allows you to create synthetic responses that exist entirely on the varnish machine. Useful for creating error or maintainence pages that exists outside the scope of your datacenter. Best when used with Condition objects."""
FIELDS = [
"name",
"service_id",
"version",
"status",
"response",
"content",
"cache_condition",
"request_condition",
]
class FastlyService(FastlyObject):
"""A Service represents the configuration for a website, app, api, or anything else to be served through Fastly. A Service can have many Versions, through which Backends, Domains, and more can be configured."""
FIELDS = [
"id",
"name",
"customer_id",
"publish_key",
"active_version",
"versions",
"comment",
]
@property
def active_version(self):
for version in self.versions.values():
if version.active:
return version
return None
class FastlySettings(FastlyObject, IServiceVersionObject):
"""Handles default settings for a particular version of a service."""
FIELDS = [
"service_id",
"version",
"general.default_host",
"general.default_ttl",
]
class FastlySyslog(FastlyObject, IServiceVersionObject, IDateStampedObject):
"""Fastly will stream log messages to the location, and in the format, specified in the Syslog object."""
FIELDS = [
"address",
"created_at",
"deleted_at",
"format",
"format_version",
"hostname",
"ipv4",
"message_type",
"name",
"placement",
"port",
"response_condition",
"service_id",
"tls_ca_cert",
"tls_hostname",
"token",
"updated_at",
"use_tls",
"version",
]
class FastlyUser(FastlyObject, IDateStampedObject):
FIELDS = [
"name",
"created_at",
"updated_at",
"role",
"id",
"email_hash",
"customer_id",
"require_new_password",
"login",
]
@property
def customer(self):
return self._conn.get_customer(self.customer_id)
class FastlyVCL(FastlyObject, IServiceVersionObject):
"""A VCL is a Varnish configuration file used to customize the configuration for a Service."""
FIELDS = [
"name",
"service_id",
"version",
"generation",
"md5",
"content",
"main",
"vcl",
]
class FastlyVCLSnippet(FastlyObject, IServiceVersionObject):
"""A VCL snippet is a piece of VCL put into a particular VCL function."""
FIELDS = [
"name",
"service_id",
"version",
"dynamic",
"type",
"priority",
"content",
]
class FastlyVersion(FastlyObject, IServiceObject, IDateStampedObject):
"""A Version represents a specific instance of the configuration for a Service. A Version can be cloned, locked, activated, or deactivated."""
FIELDS = [
"comment",
"staging",
"locked",
"created_at",
"testing",
"number",
"updated_at",
"active",
"service_id",
"deleted_at",
"deployed",
"inherit_service_id",
]
@property
def settings(self):
dct = {}
result = self._conn.get_service_version_settings(self.service_id)
if result:
dct = result.settings
return dct
@property
def backends(self):
return dict([(b.name, b) for b in self._conn.list_backends(self.service_id, int(self.number))])
@property
def healthchecks(self):
return dict([(h.name, h) for h in self._conn.list_healthchecks(self.service_id, int(self.number))])
@property
def domains(self):
return dict([(d.name, d) for d in self._conn.list_domains(self.service_id, int(self.number))])
@property
def directors(self):
return dict([(d.name, d) for d in self._conn.list_directors(self.service_id, int(self.number))])
@property
def origins(self):
return dict([(o.name, o) for o in self._conn.list_origins(self.service_id, int(self.number))])
@property
def syslogs(self):
return dict([(s.name, s) for s in self._conn.list_syslogs(self.service_id, int(self.number))])
@property
def vcls(self):
return dict([(v.name, v) for v in self._conn.list_vcls(self.service_id, int(self.number))])
class FastlyWordpress(FastlyObject, IServiceVersionObject):
"""The Wordpress object applies configuration optimized for Wordpress to a given path."""
FIELDS = [
"service_id",
"version",
"name",
"path",
"comment",
]
class FastlyLoggingS3(FastlyObject, IServiceVersionObject, IDateStampedObject):
""""""
FIELDS = [
"access_key",
"bucket_name",
"created_at",
"deleted_at",
"domain",
"format",
"format_version",
"gzip_level",
"message_type",
"name",
"path",
"period",
"placement",
"redundancy",
"response_condition",
"secret_key",
"service_id",
"timestamp_format",
"updated_at",
"version"
]
def connect(api_key, username=None, password=None):
conn = FastlyConnection(api_key)
if username is not None and password is not None:
conn.login(username, password)
return conn
| 35.379147
| 393
| 0.722019
|
f766a5c054b38ca323d63c2ed823e6076a335cd8
| 1,080
|
py
|
Python
|
test.py
|
eggveloper/retry
|
81837581ff49a890a292f9c80a989a0dd5e52e3c
|
[
"MIT"
] | null | null | null |
test.py
|
eggveloper/retry
|
81837581ff49a890a292f9c80a989a0dd5e52e3c
|
[
"MIT"
] | null | null | null |
test.py
|
eggveloper/retry
|
81837581ff49a890a292f9c80a989a0dd5e52e3c
|
[
"MIT"
] | null | null | null |
import retri
import time
class App(retri.App):
def __init__(self):
super().__init__(160, 120, 4)
data = self.bank(0).data
data[0, 0] = 7
data[0, 1] = 3
data[0, 2] = 7
data[1, 0] = 8
data[2, 0] = 7
data[7, 7] = 7
self.x = 0
self.count = 0
self.time = 0
def update(self):
start = time.time()
self.cls(2)
for h in range(20):
for i in range(15):
for j in range(20):
self.blt(80, 80, 8, 8, 0, 0, 0, 0)
self.rectb(50, 50, 30, 40, 3)
self.circ(100, 100, 4, 7)
self.circb(30, 100, 4, 7)
self.x = (self.x + 1) % 160
self.pix(self.x, 0, 7)
self.time += time.time() - start
self.count += 1
if self.count == 50:
print(self.time / self.count * 1000)
self.time = 0
self.count = 0
def key_press(self, key, modifiers):
if key == retri.key.ESCAPE or key == retri.key.Q:
exit()
App().run()
| 20
| 57
| 0.448148
|
6dc7b127ae06582a47d2b73be028681cafbb9432
| 1,797
|
py
|
Python
|
setup.py
|
LKNSI/autokeras
|
b26758a8c639795b1850398ffb66403c8ae69c86
|
[
"Apache-2.0"
] | 1
|
2021-09-26T15:23:52.000Z
|
2021-09-26T15:23:52.000Z
|
setup.py
|
vidushi-chouksey/autokeras
|
f53e3fe7bfdf06e95e9626c23073780608a98eb8
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
vidushi-chouksey/autokeras
|
f53e3fe7bfdf06e95e9626c23073780608a98eb8
|
[
"Apache-2.0"
] | null | null | null |
from distutils.core import setup
from pathlib import Path
from setuptools import find_packages
this_file = Path(__file__).resolve()
readme = this_file.parent / "README.md"
setup(
name="autokeras",
version="1.0.16",
description="AutoML for deep learning",
package_data={"": ["README.md"]},
long_description=readme.read_text(encoding="utf-8"),
long_description_content_type="text/markdown",
author="Data Analytics at Texas A&M (DATA) Lab, Keras Team",
author_email="jhfjhfj1@gmail.com",
url="http://autokeras.com",
download_url="https://github.com/keras-team/autokeras/archive/1.0.16.tar.gz",
keywords=["AutoML", "Keras"],
install_requires=[
"packaging",
"keras-tuner>=1.0.2",
"tensorflow<=2.5.0,>=2.3.0",
"scikit-learn",
"pandas",
],
extras_require={
"tests": [
"pytest>=4.4.0",
"flake8",
"black",
"isort",
"pytest-xdist",
"pytest-cov",
"coverage",
"typeguard>=2,<2.11.0",
"typedapi>=0.2,<0.3",
],
},
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Libraries",
],
license="MIT",
packages=find_packages(exclude=("tests",)),
)
| 31.526316
| 81
| 0.579855
|
2dc24de00a16ea34088ecd0ab46570fae5ddef64
| 1,443
|
py
|
Python
|
tests/contrib/operators/test_gcp_function_operator_system.py
|
FlyrInc/airflow-1
|
74b22337b45a1eb25585d52e35694e6b0eb81f03
|
[
"Apache-2.0"
] | 15
|
2017-04-06T09:01:50.000Z
|
2021-10-02T13:54:31.000Z
|
tests/contrib/operators/test_gcp_function_operator_system.py
|
FlyrInc/airflow-1
|
74b22337b45a1eb25585d52e35694e6b0eb81f03
|
[
"Apache-2.0"
] | 26
|
2019-08-05T13:44:11.000Z
|
2022-03-30T10:06:18.000Z
|
tests/contrib/operators/test_gcp_function_operator_system.py
|
FlyrInc/airflow-1
|
74b22337b45a1eb25585d52e35694e6b0eb81f03
|
[
"Apache-2.0"
] | 21
|
2017-08-20T03:01:05.000Z
|
2021-09-07T06:47:51.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from tests.contrib.utils.base_gcp_system_test_case import \
SKIP_TEST_WARNING, DagGcpSystemTestCase
from tests.contrib.utils.gcp_authenticator import GCP_FUNCTION_KEY
@unittest.skipIf(
DagGcpSystemTestCase.skip_check(GCP_FUNCTION_KEY), SKIP_TEST_WARNING)
class GcpFunctionExampleDagsSystemTest(DagGcpSystemTestCase):
def __init__(self, method_name='runTest'):
super(GcpFunctionExampleDagsSystemTest, self).__init__(
method_name,
dag_id='example_gcp_function',
gcp_key=GCP_FUNCTION_KEY)
def test_run_example_dag_function(self):
self._run_dag()
| 37.973684
| 73
| 0.765073
|
8a4b0649ce1ac6e71dfed7b8cdefa5c15f9ded55
| 2,331
|
py
|
Python
|
pipeline/feature-extraction/pipeline_pun_extraction.py
|
I2Cvb/lemaitre-2016-nov
|
9367e550f5ebdbf5994dfe318d319d0a14320240
|
[
"MIT"
] | 3
|
2017-05-18T11:50:19.000Z
|
2019-06-28T13:46:50.000Z
|
pipeline/feature-extraction/pipeline_pun_extraction.py
|
I2Cvb/lemaitre-2016-nov
|
9367e550f5ebdbf5994dfe318d319d0a14320240
|
[
"MIT"
] | null | null | null |
pipeline/feature-extraction/pipeline_pun_extraction.py
|
I2Cvb/lemaitre-2016-nov
|
9367e550f5ebdbf5994dfe318d319d0a14320240
|
[
"MIT"
] | 2
|
2018-02-11T22:17:21.000Z
|
2020-10-19T06:57:20.000Z
|
"""
This pipeline is used to find the extract the PUN parameters
and saved the data in a matrix.
"""
import os
import numpy as np
from joblib import Parallel, delayed
from protoclass.data_management import DCEModality
from protoclass.data_management import GTModality
from protoclass.extraction import PUNQuantificationExtraction
# Define the path where all the patients are
path_patients = '/data/prostate/experiments'
# Define the path of the modality to normalize
path_dce = 'DCE_reg_bspline'
# Define the path of the ground for the prostate
path_gt = 'GT_inv/prostate'
# Define the label of the ground-truth which will be provided
label_gt = ['prostate']
# Define the path to store the Tofts data
path_store = '/data/prostate/pre-processing/lemaitre-2016-nov/pun-features'
# Generate the different path to be later treated
path_patients_list_dce = []
path_patients_list_gt = []
# Create the generator
id_patient_list = [name for name in os.listdir(path_patients)
if os.path.isdir(os.path.join(path_patients, name))]
for id_patient in id_patient_list:
# Append for the DCE data
path_patients_list_dce.append(os.path.join(path_patients, id_patient,
path_dce))
# Append for the GT data - Note that we need a list of gt path
path_patients_list_gt.append([os.path.join(path_patients, id_patient,
path_gt)])
for p_dce, p_gt, pat in zip(path_patients_list_dce, path_patients_list_gt,
id_patient_list):
print 'Processing #{}'.format(pat)
# Create the Tofts Extractor
pun_ext = PUNQuantificationExtraction(DCEModality())
# Read the DCE
print 'Read DCE images'
dce_mod = DCEModality()
dce_mod.read_data_from_path(p_dce)
# Read the GT
print 'Read GT images'
gt_mod = GTModality()
gt_mod.read_data_from_path(label_gt, p_gt)
# Fit the parameters for Brix
print 'Extract Weibull'
pun_ext.fit(dce_mod, ground_truth=gt_mod, cat=label_gt[0])
# Extract the matrix
print 'Extract the feature matrix'
data = pun_ext.transform(dce_mod, ground_truth=gt_mod, cat=label_gt[0])
pat_chg = pat.lower().replace(' ', '_') + '_pun.npy'
filename = os.path.join(path_store, pat_chg)
np.save(filename, data)
| 32.830986
| 75
| 0.705277
|
41498d82e1d7933b7b0a0c444147819cc1c136df
| 604
|
py
|
Python
|
newsApp/dbItem.py
|
adityabansal/newsAroundMe
|
06d30c8a281baae90f916aa0b0bfe3b4db9d2293
|
[
"MIT"
] | 9
|
2017-06-22T18:42:54.000Z
|
2021-03-28T18:58:44.000Z
|
newsApp/dbItem.py
|
adityabansal/newsAroundMe
|
06d30c8a281baae90f916aa0b0bfe3b4db9d2293
|
[
"MIT"
] | 8
|
2016-10-08T12:50:50.000Z
|
2022-03-11T23:13:17.000Z
|
newsApp/dbItem.py
|
adityabansal/newsAroundMe
|
06d30c8a281baae90f916aa0b0bfe3b4db9d2293
|
[
"MIT"
] | 3
|
2018-01-18T11:01:32.000Z
|
2018-11-20T08:33:30.000Z
|
class DbItem:
"""
Represents a generic item which can be added/updated/deleted/retrieved to a
database.
Each dbItem consists of a unique identifier and a set of tags(which are simple key-value pairs).
"""
def __init__(self, id, tags=None):
"""
Instantiates a new dbItem object.
Requires a 'id' parameter which should be a simple representing a unique
identifier for the dbItem.
Optionally accepts a 'tags' parameter which should be a dictionary of
key-value pairs. e.g. you can have a tag for the language of dbItem.
"""
self.id = id
self.tags = tags
| 27.454545
| 98
| 0.69702
|
8c47257f83358248a6975d8c4b2adae403319acd
| 3,417
|
py
|
Python
|
modules/nltk_contrib/scripttranscriber/thai_unittest.py
|
h4ck3rm1k3/NLP-project
|
aeba6302f60d27a8b9e65ad28d2d74e1276c7cd6
|
[
"MIT"
] | 123
|
2015-01-06T10:46:18.000Z
|
2022-02-01T10:05:16.000Z
|
nltk_contrib/scripttranscriber/thai_unittest.py
|
silky/nltk_contrib
|
c152bde901f05915e90b07a615b232adb123bed8
|
[
"Apache-2.0"
] | 12
|
2015-01-13T06:27:18.000Z
|
2020-07-30T23:00:41.000Z
|
nltk_contrib/scripttranscriber/thai_unittest.py
|
silky/nltk_contrib
|
c152bde901f05915e90b07a615b232adb123bed8
|
[
"Apache-2.0"
] | 114
|
2015-01-13T04:47:49.000Z
|
2021-11-13T08:16:02.000Z
|
# -*- coding: utf-8 -*-
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""Sample transcription extractor based on the LCTL Thai parallel
data. Also tests Thai prons and alignment.
"""
__author__ = """
rws@uiuc.edu (Richard Sproat)
"""
import sys
import os
import documents
import tokens
import token_comp
import extractor
import thai_extractor
import pronouncer
import unittest
from __init__ import BASE_
## A sample of 10,000 from each:
ENGLISH_ = '%s/testdata/thai_test_eng.txt' % BASE_
THAI_ = '%s/testdata/thai_test_thai.txt' % BASE_
XML_FILE_ = '%s/testdata/thai_test.xml' % BASE_
TMP_XML_FILE_ = '/tmp/thai_test.xml'
MATCH_FILE_ = '%s/testdata/thai_test.matches' % BASE_
TMP_MATCH_FILE_ = '/tmp/thai_test.matches'
BAD_COST_ = 6.0
def LoadData():
t_extr = thai_extractor.ThaiExtractor()
e_extr = extractor.NameExtractor()
doclist = documents.Doclist()
doc = documents.Doc()
doclist.AddDoc(doc)
#### Thai
lang = tokens.Lang()
lang.SetId('th')
doc.AddLang(lang)
t_extr.FileExtract(THAI_)
lang.SetTokens(t_extr.Tokens())
lang.CompactTokens()
for t in lang.Tokens():
pronouncer_ = pronouncer.UnitranPronouncer(t)
pronouncer_.Pronounce()
#### English
lang = tokens.Lang()
lang.SetId('en')
doc.AddLang(lang)
e_extr.FileExtract(ENGLISH_)
lang.SetTokens(e_extr.Tokens())
lang.CompactTokens()
for t in lang.Tokens():
pronouncer_ = pronouncer.EnglishPronouncer(t)
pronouncer_.Pronounce()
return doclist
def ComputePhoneMatches(doclist, match_file):
matches = {}
for doc in doclist.Docs():
lang1 = doc.Langs()[0]
lang2 = doc.Langs()[1]
for t1 in lang1.Tokens():
hash1 = t1.EncodeForHash()
for t2 in lang2.Tokens():
hash2 = t2.EncodeForHash()
try: result = matches[(hash1, hash2)] ## don't re-calc
except KeyError:
comparator = token_comp.OldPhoneticDistanceComparator(t1, t2)
comparator.ComputeDistance()
result = comparator.ComparisonResult()
matches[(hash1, hash2)] = result
values = matches.values()
values.sort(lambda x, y: cmp(x.Cost(), y.Cost()))
p = open(match_file, 'w') ## zero out the file
p.close()
for v in values:
if v.Cost() > BAD_COST_: break
v.Print(match_file, 'a')
def main(output = False):
doclist = LoadData()
if output:
doclist.XmlDump(XML_FILE_, utf8 = True)
ComputePhoneMatches(doclist, MATCH_FILE_)
else:
doclist.XmlDump(TMP_XML_FILE_, utf8 = True)
ComputePhoneMatches(doclist, TMP_MATCH_FILE_)
unittest.TestUnitOutputs(sys.argv[0] + ': token parsing',\
XML_FILE_, TMP_XML_FILE_)
unittest.TestUnitOutputs(sys.argv[0] + ': string matching',\
MATCH_FILE_, TMP_MATCH_FILE_)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'generate':
main(True)
else:
main()
| 29.456897
| 75
| 0.681299
|
b914e15fded72108e35d0bd970f5922de56f1c8e
| 530
|
py
|
Python
|
rider/migrations/0002_auto_20200427_1501.py
|
Mariga123/carpool
|
f7330634ace2718c2347694b207b9dd49ef6538f
|
[
"MIT"
] | null | null | null |
rider/migrations/0002_auto_20200427_1501.py
|
Mariga123/carpool
|
f7330634ace2718c2347694b207b9dd49ef6538f
|
[
"MIT"
] | null | null | null |
rider/migrations/0002_auto_20200427_1501.py
|
Mariga123/carpool
|
f7330634ace2718c2347694b207b9dd49ef6538f
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-04-27 15:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rider', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='ride',
name='complete',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='ride',
name='status',
field=models.BooleanField(default=False),
),
]
| 22.083333
| 53
| 0.566038
|
946843edf83632fdc60fc17760beda577a044c8b
| 4,113
|
py
|
Python
|
oanda_candles/candle_requester.py
|
aallaire/oanda-candles
|
d9bc4878e0a7d9ce5d2db65d36840dc2edcca6f1
|
[
"Apache-2.0"
] | 3
|
2020-10-11T13:13:28.000Z
|
2021-10-30T04:19:32.000Z
|
oanda_candles/candle_requester.py
|
aallaire/oanda-candles
|
d9bc4878e0a7d9ce5d2db65d36840dc2edcca6f1
|
[
"Apache-2.0"
] | 1
|
2021-12-07T14:42:37.000Z
|
2022-01-25T15:57:54.000Z
|
oanda_candles/candle_requester.py
|
aallaire/oanda-candles
|
d9bc4878e0a7d9ce5d2db65d36840dc2edcca6f1
|
[
"Apache-2.0"
] | null | null | null |
from requests import Session
from typing import List
from urllib.parse import urljoin
from forex_types import Pair
from time_int import TimeInt
from oanda_candles.gran import Gran
from oanda_candles.candle import Candle
class UrlRoot:
real_url = "https://api-fxtrade.oanda.com"
practice_url = "https://api-fxpractice.oanda.com"
class CandleRequester:
def __init__(self, client, pair: Pair, gran: Gran):
self.session: Session = client.session
root_url = UrlRoot.real_url if client.real else UrlRoot.practice_url
self.url = urljoin(root_url, f"/v3/instruments/{pair}/candles")
self.headers = {
"Accept-Datetime-Format": "UNIX",
"Authorization": f"Bearer {client.token}",
"ContentType": "application/json",
}
self.params = {
"alignmentTimezone": "Etc/GMT+1",
"dailyAlignment": 23,
"granularity": str(gran),
"price": "BAM",
"weeklyAlignment": "Sunday",
}
self.history_reached: bool = False
def get(self, count: int) -> List[Candle]:
"""Request the most recent count candles."""
if count < 5000:
return self._request(count=count)
candles = self._request(count=2000)
if len(candles) >= 2000:
extra = count - 2000
self.prepend(candles, extra)
return candles
def get_before(self, time: TimeInt, count: int) -> List[Candle]:
return self._request(count=count, before=time)
def get_after(self, time: TimeInt):
return self._request(after=time)
def prepend(self, candles: List[Candle], count: int) -> bool:
"""Prepend candles to front of a list (recurse if needed).
Args:
candles: list of candles that is prepended with older candles.
count: number of candles to prepend. If 0 or less do nothing.
Returns:
True if the requested number of candles is provided.
False if Oanda ran out of candles to give us.
"""
if count <= 0:
return True
first_candle_time = candles[0].time if candles else TimeInt.now()
pull_size = count if count <= 5000 else 2000
new_candles = self._request(count=pull_size, before=first_candle_time)
if len(new_candles):
candles[0:0] = new_candles
if len(new_candles) < pull_size:
return False
else:
if count > pull_size:
return self.prepend(candles, count - pull_size)
return True
return False
def extend(self, candles: List[Candle]) -> bool:
"""Extend candles to back of a list up to current time (recurse if needed).
Args:
candles: list of candles that is extended with newer candles.
Returns:
True if the last candle we end up with is complete
False if the lst candle we end up with is partial
"""
if candles:
last_candle_time = candles[-1].time
new_candles = self._request(after=last_candle_time, count=5000)
candles[-1:] = new_candles
if len(new_candles) >= 5000:
self.extend(candles)
else:
candles[:] = self._request(count=100)
return candles[-1].complete
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _request(
self, count: int = None, before: int = None, after: int = None
) -> List[Candle]:
params = dict(self.params)
if count is not None:
params["count"] = count
if after is not None:
params["from"] = after
if before is not None:
params["to"] = before
response = self.session.get(self.url, headers=self.headers, params=params)
response.raise_for_status()
data = response.json()
return [Candle.from_oanda(_) for _ in data["candles"]]
| 36.078947
| 83
| 0.571116
|
c8b66aa4307e6d69d86608f65d4958aec4471847
| 18,980
|
py
|
Python
|
maize/util/file_keyring.py
|
denern/maize-blockchain
|
b8639899f44b03232dda90c706d061e5e1158ca3
|
[
"Apache-2.0"
] | 14
|
2021-07-21T19:45:05.000Z
|
2022-02-09T04:29:51.000Z
|
maize/util/file_keyring.py
|
denern/maize-blockchain
|
b8639899f44b03232dda90c706d061e5e1158ca3
|
[
"Apache-2.0"
] | 9
|
2021-07-24T09:30:46.000Z
|
2021-12-05T19:51:29.000Z
|
maize/util/file_keyring.py
|
denern/maize-blockchain
|
b8639899f44b03232dda90c706d061e5e1158ca3
|
[
"Apache-2.0"
] | 5
|
2021-10-04T17:33:47.000Z
|
2022-03-15T08:37:51.000Z
|
import base64
import fasteners
import os
import shutil
import sys
import threading
import yaml
from maize.util.default_root import DEFAULT_KEYS_ROOT_PATH
from contextlib import contextmanager
from cryptography.hazmat.primitives.ciphers.aead import ChaCha20Poly1305 # pyright: reportMissingModuleSource=false
from functools import wraps
from hashlib import pbkdf2_hmac
from pathlib import Path
from secrets import token_bytes
from typing import Any, Dict, Optional
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
SALT_BYTES = 16 # PBKDF2 param
NONCE_BYTES = 12 # ChaCha20Poly1305 nonce is 12-bytes
HASH_ITERS = 100000 # PBKDF2 param
CHECKBYTES_VALUE = b"5f365b8292ee505b" # Randomly generated
MAX_SUPPORTED_VERSION = 1 # Max supported file format version
class FileKeyringLockTimeout(Exception):
pass
def loads_keyring(method):
"""
Decorator which lazily loads the FileKeyring data
"""
@wraps(method)
def inner(self, *args, **kwargs):
self.check_if_keyring_file_modified()
# Check the outer payload for 'data', and check if we have a decrypted cache (payload_cache)
with self.load_keyring_lock:
if (self.has_content() and not self.payload_cache) or self.needs_load_keyring:
self.load_keyring()
return method(self, *args, **kwargs)
return inner
@contextmanager
def acquire_writer_lock(lock_path: Path, timeout=5, max_iters=6):
lock = fasteners.InterProcessReaderWriterLock(str(lock_path))
result = None
for i in range(0, max_iters):
if lock.acquire_write_lock(timeout=timeout):
yield # <----
lock.release_write_lock()
break
else:
print(f"Failed to acquire keyring writer lock after {timeout} seconds.", end="")
if i < max_iters - 1:
print(f" Remaining attempts: {max_iters - 1 - i}")
else:
print("")
raise FileKeyringLockTimeout("Exhausted all attempts to acquire the writer lock")
return result
@contextmanager
def acquire_reader_lock(lock_path: Path, timeout=5, max_iters=6):
lock = fasteners.InterProcessReaderWriterLock(str(lock_path))
result = None
for i in range(0, max_iters):
if lock.acquire_read_lock(timeout=timeout):
yield # <----
lock.release_read_lock()
break
else:
print(f"Failed to acquire keyring reader lock after {timeout} seconds.", end="")
if i < max_iters - 1:
print(f" Remaining attempts: {max_iters - 1 - i}")
else:
print("")
raise FileKeyringLockTimeout("Exhausted all attempts to acquire the writer lock")
return result
class FileKeyring(FileSystemEventHandler):
"""
FileKeyring provides an file-based keyring store that is encrypted to a key derived
from the user-provided master passphrase. The public interface is intended to align
with the API provided by the keyring module such that the KeyringWrapper class can
pick an appropriate keyring store backend based on the OS.
The keyring file format uses YAML with a few top-level keys:
# Keyring file version, currently 1
version: <int>
# Random salt used as a PBKDF2 parameter. Updated when the master passphrase changes
salt: <hex string of 16 bytes>
# Random nonce used as a ChaCha20Poly1305 parameter. Updated on each write to the file
nonce: <hex string of 12 bytes>
# The encrypted data. Internally, a checkbytes value is concatenated with the
# inner payload (a YAML document). The inner payload YAML contains a "keys" element
# that holds a dictionary of keys.
data: <base64-encoded string of encrypted inner-payload>
# An optional passphrase hint
passphrase_hint: <cleartext string>
The file is encrypted using ChaCha20Poly1305. The symmetric key is derived from the
master passphrase using PBKDF2. The nonce is updated each time the file is written-to.
The salt is updated each time the master passphrase is changed.
"""
keyring_path: Path
keyring_lock_path: Path
keyring_observer: Observer = None
load_keyring_lock: threading.RLock # Guards access to needs_load_keyring
needs_load_keyring: bool = False
salt: Optional[bytes] = None # PBKDF2 param
payload_cache: dict = {} # Cache of the decrypted YAML contained in outer_payload_cache['data']
outer_payload_cache: dict = {} # Cache of the plaintext YAML "outer" contents (never encrypted)
@staticmethod
def keyring_path_from_root(keys_root_path: Path) -> Path:
"""
Returns the path to keyring.yaml
"""
path_filename = keys_root_path / "keyring.yaml"
return path_filename
@staticmethod
def lockfile_path_for_file_path(file_path: Path) -> Path:
"""
Returns a path suitable for creating a lockfile derived from the input path.
Currently used to provide a lockfile path to be used by
fasteners.InterProcessReaderWriterLock when guarding access to keyring.yaml
"""
return file_path.with_name(f".{file_path.name}.lock")
def __init__(self, keys_root_path: Path = DEFAULT_KEYS_ROOT_PATH):
"""
Creates a fresh keyring.yaml file if necessary. Otherwise, loads and caches the
outer (plaintext) payload
"""
self.keyring_path = FileKeyring.keyring_path_from_root(keys_root_path)
self.keyring_lock_path = FileKeyring.lockfile_path_for_file_path(self.keyring_path)
self.payload_cache = {} # This is used as a building block for adding keys etc if the keyring is empty
self.load_keyring_lock = threading.RLock()
self.keyring_last_mod_time = None
# Key/value pairs to set on the outer payload on the next write
self.outer_payload_properties_for_next_write: Dict[str, Any] = {}
if not self.keyring_path.exists():
# Super simple payload if starting from scratch
outer_payload = FileKeyring.default_outer_payload()
self.write_data_to_keyring(outer_payload)
self.outer_payload_cache = outer_payload
else:
self.load_outer_payload()
self.setup_keyring_file_watcher()
def setup_keyring_file_watcher(self):
observer = Observer()
# recursive=True necessary for macOS support
observer.schedule(self, self.keyring_path.parent, recursive=True)
observer.start()
self.keyring_observer = Observer()
def cleanup_keyring_file_watcher(self):
if getattr(self, "keyring_observer"):
self.keyring_observer.unschedule_all()
def on_modified(self, event):
self.check_if_keyring_file_modified()
def check_if_keyring_file_modified(self):
if self.keyring_path.exists():
try:
last_modified = os.stat(self.keyring_path).st_mtime
if not self.keyring_last_mod_time or self.keyring_last_mod_time < last_modified:
self.keyring_last_mod_time = last_modified
with self.load_keyring_lock:
self.needs_load_keyring = True
except FileNotFoundError:
# Shouldn't happen, but if the file doesn't exist there's nothing to do...
pass
@staticmethod
def default_outer_payload() -> dict:
return {"version": 1}
@staticmethod
def generate_nonce() -> bytes:
"""
Creates a nonce to be used by ChaCha20Poly1305. This should be called each time
the payload is encrypted.
"""
return token_bytes(NONCE_BYTES)
@staticmethod
def generate_salt() -> bytes:
"""
Creates a salt to be used in combination with the master passphrase to derive
a symmetric key using PBKDF2
"""
return token_bytes(SALT_BYTES)
def has_content(self) -> bool:
"""
Quick test to determine if keyring is populated. The "data" value is expected
to be encrypted.
"""
if self.outer_payload_cache is not None and self.outer_payload_cache.get("data"):
return True
return False
def ensure_cached_keys_dict(self) -> dict:
"""
Returns payload_cache["keys"], ensuring that it's created if necessary
"""
if self.payload_cache.get("keys") is None:
self.payload_cache["keys"] = {}
return self.payload_cache["keys"]
@loads_keyring
def _inner_get_password(self, service: str, user: str) -> Optional[str]:
return self.ensure_cached_keys_dict().get(service, {}).get(user)
def get_password(self, service: str, user: str) -> Optional[str]:
"""
Returns the passphrase named by the 'user' parameter from the cached
keyring data (does not force a read from disk)
"""
with acquire_reader_lock(lock_path=self.keyring_lock_path):
return self._inner_get_password(service, user)
@loads_keyring
def _inner_set_password(self, service: str, user: str, passphrase: str, *args, **kwargs):
keys = self.ensure_cached_keys_dict()
# Convert the passphrase to a string (if necessary)
passphrase = bytes(passphrase).hex() if type(passphrase) == bytes else str(passphrase) # type: ignore
# Ensure a dictionary exists for the 'service'
if keys.get(service) is None:
keys[service] = {}
service_dict = keys[service]
service_dict[user] = passphrase
keys[service] = service_dict
self.payload_cache["keys"] = keys
self.write_keyring() # Updates the cached payload (self.payload_cache) on success
def set_password(self, service: str, user: str, passphrase: str):
"""
Store the passphrase to the keyring data using the name specified by the
'user' parameter. Will force a write to keyring.yaml on success.
"""
with acquire_writer_lock(lock_path=self.keyring_lock_path):
self._inner_set_password(service, user, passphrase)
@loads_keyring
def _inner_delete_password(self, service: str, user: str):
keys = self.ensure_cached_keys_dict()
service_dict = keys.get(service, {})
if service_dict.pop(user, None):
if len(service_dict) == 0:
keys.pop(service)
self.payload_cache["keys"] = keys
self.write_keyring() # Updates the cached payload (self.payload_cache) on success
def delete_password(self, service: str, user: str):
"""
Deletes the passphrase named by the 'user' parameter from the keyring data
(will force a write to keyring.yaml on success)
"""
with acquire_writer_lock(lock_path=self.keyring_lock_path):
self._inner_delete_password(service, user)
def check_passphrase(self, passphrase: str, force_reload: bool = False) -> bool:
"""
Attempts to validate the passphrase by decrypting the outer_payload_cache["data"]
contents and checking the checkbytes value
"""
if force_reload or len(self.outer_payload_cache) == 0:
self.load_outer_payload()
if not self.salt or len(self.outer_payload_cache) == 0:
return False
nonce = None
nonce_str = self.outer_payload_cache.get("nonce")
if nonce_str:
nonce = bytes.fromhex(nonce_str)
if not nonce:
return False
key = FileKeyring.symmetric_key_from_passphrase(passphrase, self.salt)
encrypted_data = base64.b64decode(yaml.safe_load(self.outer_payload_cache.get("data") or ""))
try:
decrypted_data = self.decrypt_data(encrypted_data, key, nonce)
except Exception:
return False
return self.have_valid_checkbytes(decrypted_data)
def have_valid_checkbytes(self, decrypted_data: bytes) -> bool:
checkbytes = decrypted_data[: len(CHECKBYTES_VALUE)]
return checkbytes == CHECKBYTES_VALUE
@staticmethod
def symmetric_key_from_passphrase(passphrase: str, salt: bytes) -> bytes:
return pbkdf2_hmac("sha256", passphrase.encode(), salt, HASH_ITERS)
@staticmethod
def get_symmetric_key(salt: bytes) -> bytes:
from maize.util.keychain import obtain_current_passphrase
try:
passphrase = obtain_current_passphrase(use_passphrase_cache=True)
except Exception as e:
print(f"Unable to unlock the keyring: {e}")
sys.exit(1)
return FileKeyring.symmetric_key_from_passphrase(passphrase, salt)
def encrypt_data(self, input_data: bytes, key: bytes, nonce: bytes) -> bytes:
encryptor = ChaCha20Poly1305(key)
data = encryptor.encrypt(nonce, input_data, None)
return data
def decrypt_data(self, input_data: bytes, key: bytes, nonce: bytes) -> bytes:
decryptor = ChaCha20Poly1305(key)
output = decryptor.decrypt(nonce, input_data, None)
return output
def load_outer_payload(self):
if not self.keyring_path.is_file():
raise ValueError("Keyring file not found")
self.outer_payload_cache = dict(yaml.safe_load(open(self.keyring_path, "r")))
version = int(self.outer_payload_cache.get("version"))
if version > MAX_SUPPORTED_VERSION:
print(
f"Keyring format is unrecognized. Found version {version}"
", expected a value <= {MAX_SUPPORTED_VERSION}"
)
print("Please update to a newer version")
sys.exit(1)
# Attempt to load the salt. It may not be present if the keyring is empty.
salt = self.outer_payload_cache.get("salt")
if salt:
self.salt = bytes.fromhex(salt)
def load_keyring(self, passphrase: str = None):
with self.load_keyring_lock:
self.needs_load_keyring = False
self.load_outer_payload()
# Missing the salt or nonce indicates that the keyring doesn't have any keys stored.
salt_str = self.outer_payload_cache.get("salt")
nonce_str = self.outer_payload_cache.get("nonce")
if not salt_str or not nonce_str:
return
salt = bytes.fromhex(salt_str)
nonce = bytes.fromhex(nonce_str)
key = None
if passphrase:
key = FileKeyring.symmetric_key_from_passphrase(passphrase, salt)
else:
key = FileKeyring.get_symmetric_key(salt)
encrypted_payload = base64.b64decode(yaml.safe_load(self.outer_payload_cache.get("data") or ""))
decrypted_data = self.decrypt_data(encrypted_payload, key, nonce)
if not self.have_valid_checkbytes(decrypted_data):
raise ValueError("decryption failure (checkbytes)")
inner_payload = decrypted_data[len(CHECKBYTES_VALUE) :]
self.payload_cache = dict(yaml.safe_load(inner_payload))
def is_first_write(self):
return self.outer_payload_cache == FileKeyring.default_outer_payload()
def write_keyring(self, fresh_salt: bool = False):
from maize.util.keyring_wrapper import KeyringWrapper
inner_payload = self.payload_cache
inner_payload_yaml = yaml.safe_dump(inner_payload)
nonce = FileKeyring.generate_nonce()
key = None
# Update the salt when changing the master passphrase or when the keyring is new (empty)
if fresh_salt or not self.salt:
self.salt = FileKeyring.generate_salt()
salt = self.salt
# When writing for the first time, we should have a cached passphrase which hasn't been
# validated (because it can't be validated yet...)
if self.is_first_write() and KeyringWrapper.get_shared_instance().has_cached_master_passphrase():
key = FileKeyring.symmetric_key_from_passphrase(
KeyringWrapper.get_shared_instance().get_cached_master_passphrase()[0], self.salt
)
else:
# Prompt for the passphrase interactively and derive the key
key = FileKeyring.get_symmetric_key(salt)
encrypted_inner_payload = self.encrypt_data(CHECKBYTES_VALUE + inner_payload_yaml.encode(), key, nonce)
outer_payload = {
"version": 1,
"salt": self.salt.hex(),
"nonce": nonce.hex(),
"data": base64.b64encode(encrypted_inner_payload).decode("utf-8"),
}
# Merge in other properties like "passphrase_hint"
outer_payload.update(self.outer_payload_properties_for_next_write)
self.outer_payload_properties_for_next_write = {}
self.write_data_to_keyring(outer_payload)
# Update our cached payload
self.outer_payload_cache = outer_payload
self.payload_cache = inner_payload
def write_data_to_keyring(self, data):
os.makedirs(os.path.dirname(self.keyring_path), 0o700, True)
temp_path: Path = self.keyring_path.with_suffix("." + str(os.getpid()))
with open(os.open(str(temp_path), os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o600), "w") as f:
_ = yaml.safe_dump(data, f)
try:
os.replace(str(temp_path), self.keyring_path)
except PermissionError:
shutil.move(str(temp_path), str(self.keyring_path))
def prepare_for_migration(self):
if not self.payload_cache:
self.payload_cache = {"keys": {}}
if not self.salt:
self.salt = FileKeyring.generate_salt()
def get_passphrase_hint(self) -> Optional[str]:
"""
Return the passphrase hint (if set). The hint data may not yet be written to the keyring, so we
return the hint data either from the staging dict (outer_payload_properties_for_next_write), or
from outer_payload_cache (loaded from the keyring)
"""
passphrase_hint: Optional[str] = self.outer_payload_properties_for_next_write.get("passphrase_hint", None)
if passphrase_hint is None:
passphrase_hint = self.outer_payload_cache.get("passphrase_hint", None)
return passphrase_hint
def set_passphrase_hint(self, passphrase_hint: Optional[str]) -> None:
"""
Store the new passphrase hint in the staging dict (outer_payload_properties_for_next_write) to
be written-out on the next write to the keyring.
"""
assert self.outer_payload_properties_for_next_write is not None
if passphrase_hint is not None and len(passphrase_hint) > 0:
self.outer_payload_properties_for_next_write["passphrase_hint"] = passphrase_hint
elif "passphrase_hint" in self.outer_payload_properties_for_next_write:
del self.outer_payload_properties_for_next_write["passphrase_hint"]
| 39.790356
| 116
| 0.667545
|
7ea2e48f618e523d1249b33ead603aeab5773482
| 4,991
|
py
|
Python
|
test/test_utilities.py
|
cmc333333/parsons
|
50804a3627117797570f1e9233c9bbad583f7831
|
[
"Apache-2.0"
] | null | null | null |
test/test_utilities.py
|
cmc333333/parsons
|
50804a3627117797570f1e9233c9bbad583f7831
|
[
"Apache-2.0"
] | 2
|
2021-11-24T19:39:57.000Z
|
2022-01-03T23:03:35.000Z
|
test/test_utilities.py
|
cmc333333/parsons
|
50804a3627117797570f1e9233c9bbad583f7831
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import os
import pytest
import shutil
import datetime
from unittest import mock
from parsons.etl.table import Table
from parsons.utilities.datetime import date_to_timestamp, parse_date
from parsons.utilities import files
from parsons.utilities import check_env
from parsons.utilities import json_format
from parsons.utilities import sql_helpers
from test.conftest import xfail_value_error
@pytest.mark.parametrize(
["date", "exp_ts"],
[pytest.param("2018-12-13", 1544659200),
pytest.param("2018-12-13T00:00:00-08:00", 1544688000),
pytest.param("", None),
pytest.param("2018-12-13 PST", None, marks=[xfail_value_error]),
])
def test_date_to_timestamp(date, exp_ts):
assert date_to_timestamp(date) == exp_ts
def test_parse_date():
# Test parsing an ISO8601 string
expected = datetime.datetime(year=2020, month=1, day=1, tzinfo=datetime.timezone.utc)
parsed = parse_date('2020-01-01T00:00:00.000 UTC')
assert parsed == expected, parsed
# Test parsing a unix timestamp
parsed = parse_date(1577836800)
assert parsed == expected, parsed
# Test "parsing" a datetime object
parsed = parse_date(expected)
assert parsed == expected, parsed
#
# File utility tests (pytest-style)
#
def test_create_temp_file_for_path():
temp_path = files.create_temp_file_for_path('some/file.gz')
assert temp_path[-3:] == '.gz'
def test_create_temp_directory():
temp_directory = files.create_temp_directory()
test_file1 = f'{temp_directory}/test.txt'
test_file2 = f'{temp_directory}/test2.txt'
with open(test_file1, 'w') as fh1, open(test_file2, 'w') as fh2:
fh1.write('TEST')
fh2.write('TEST')
assert files.has_data(test_file1)
assert files.has_data(test_file2)
files.cleanup_temp_directory(temp_directory)
# Verify the temp file no longer exists
with pytest.raises(FileNotFoundError):
open(test_file1, 'r')
def test_close_temp_file():
temp = files.create_temp_file()
files.close_temp_file(temp)
# Verify the temp file no longer exists
with pytest.raises(FileNotFoundError):
open(temp, 'r')
def test_is_gzip_path():
assert files.is_gzip_path('some/file.gz')
assert not files.is_gzip_path('some/file')
assert not files.is_gzip_path('some/file.csv')
def test_suffix_for_compression_type():
assert files.suffix_for_compression_type(None) == ''
assert files.suffix_for_compression_type('') == ''
assert files.suffix_for_compression_type('gzip') == '.gz'
def test_compression_type_for_path():
assert files.compression_type_for_path('some/file') is None
assert files.compression_type_for_path('some/file.csv') is None
assert files.compression_type_for_path('some/file.csv.gz') == 'gzip'
def test_empty_file():
# Create fake files.
os.mkdir('tmp')
with open('tmp/empty.csv', 'w+') as _:
pass
Table([['1'], ['a']]).to_csv('tmp/full.csv')
assert not files.has_data('tmp/empty.csv')
assert files.has_data('tmp/full.csv')
# Remove fake files and dir
shutil.rmtree('tmp')
def test_json_format():
assert json_format.arg_format('my_arg') == 'myArg'
def test_remove_empty_keys():
# Assert key removed when None
test_dict = {'a': None, 'b': 2}
assert json_format.remove_empty_keys(test_dict) == {'b': 2}
# Assert key not removed when None
test_dict = {'a': 1, 'b': 2}
assert json_format.remove_empty_keys(test_dict) == {'a': 1, 'b': 2}
# Assert that a nested empty string is removed
test_dict = {'a': '', 'b': 2}
assert json_format.remove_empty_keys(test_dict) == {'b': 2}
def test_redact_credentials():
# Test with quotes, escape characters, and line breaks
test_str = """COPY schema.tablename
FROM 's3://bucket/path/to/file.csv'
credentials 'aws_access_key_id=string-\\'escaped-quote;
aws_secret_access_key='string-escape-char\\\\'
MANIFEST"""
test_result = """COPY schema.tablename
FROM 's3://bucket/path/to/file.csv'
CREDENTIALS REDACTED
MANIFEST"""
assert sql_helpers.redact_credentials(test_str) == test_result
class TestCheckEnv(unittest.TestCase):
def test_environment_field(self):
"""Test check field"""
result = check_env.check('PARAM', 'param')
self.assertEqual(result, 'param')
@mock.patch.dict(os.environ, {'PARAM': 'env_param'})
def test_environment_env(self):
"""Test check env"""
result = check_env.check('PARAM', None)
self.assertEqual(result, 'env_param')
@mock.patch.dict(os.environ, {'PARAM': 'env_param'})
def test_environment_field_env(self):
"""Test check field with env and field"""
result = check_env.check('PARAM', 'param')
self.assertEqual(result, 'param')
def test_envrionment_error(self):
"""Test check env raises error"""
with self.assertRaises(KeyError) as _:
check_env.check('PARAM', None)
| 29.017442
| 89
| 0.690643
|
3a75c936971996b08401ac77cff7212e93eccb92
| 2,250
|
py
|
Python
|
tm.py
|
jeremyng123/pytm
|
e0bbbbbcfa387887753e27f78678c6004edf0e85
|
[
"MIT"
] | null | null | null |
tm.py
|
jeremyng123/pytm
|
e0bbbbbcfa387887753e27f78678c6004edf0e85
|
[
"MIT"
] | null | null | null |
tm.py
|
jeremyng123/pytm
|
e0bbbbbcfa387887753e27f78678c6004edf0e85
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import random
from pytm.pytm import TM, Actor, Boundary, Dataflow, Datastore, Lambda, Server
# make sure generated diagrams do not change, makes sense if they're commited
random.seed(0)
tm = TM("my test tm")
tm.description = "This is a sample threat model of a very simple system - a web-based comment system. The user enters comments and these are added to a database and displayed back to the user. The thought is that it is, though simple, a complete enough example to express meaningful threats."
tm.isOrdered = True
tm.mergeResponses = True
internet = Boundary("Internet")
server_db = Boundary("Server/DB")
vpc = Boundary("AWS VPC")
user = Actor("User")
user.inBoundary = internet
web = Server("Web Server")
web.OS = "Ubuntu"
web.isHardened = True
web.sanitizesInput = False
web.encodesOutput = True
web.authorizesSource = False
db = Datastore("SQL Database")
db.OS = "CentOS"
db.isHardened = False
db.inBoundary = server_db
db.isSQL = True
db.inScope = True
my_lambda = Lambda("AWS Lambda")
my_lambda.hasAccessControl = True
my_lambda.inBoundary = vpc
user_to_web = Dataflow(user, web, "User enters comments (*)")
user_to_web.protocol = "HTTP"
user_to_web.dstPort = 80
user_to_web.data = 'Comments in HTML or Markdown'
user_to_web.note = "This is a simple web app\nthat stores and retrieves user comments."
web_to_db = Dataflow(web, db, "Insert query with comments")
web_to_db.protocol = "MySQL"
web_to_db.dstPort = 3306
web_to_db.data = 'MySQL insert statement, all literals'
web_to_db.note = "Web server inserts user comments\ninto it's SQL query and stores them in the DB."
db_to_web = Dataflow(db, web, "Retrieve comments")
db_to_web.protocol = "MySQL"
db_to_web.dstPort = 80
db_to_web.data = 'Web server retrieves comments from DB'
db_to_web.responseTo = web_to_db
web_to_user = Dataflow(web, user, "Show comments (*)")
web_to_user.protocol = "HTTP"
web_to_user.data = 'Web server shows comments to the end user'
web_to_user.responseTo = user_to_web
my_lambda_to_db = Dataflow(my_lambda, db, "Lambda periodically cleans DB")
my_lambda_to_db.protocol = "MySQL"
my_lambda_to_db.dstPort = 3306
my_lambda_to_db.data = "Lamda clears DB every 6 hours"
if __name__ == "__main__":
tm.process(dfd=True)
| 30.821918
| 292
| 0.76
|
a0f6e1e12ba3df858b2e1628c41b6cfdd084ee57
| 3,685
|
py
|
Python
|
psdaq/psdaq/control_gui/CGWMainTabExpert.py
|
JBlaschke/lcls2
|
30523ef069e823535475d68fa283c6387bcf817b
|
[
"BSD-3-Clause-LBNL"
] | 16
|
2017-11-09T17:10:56.000Z
|
2022-03-09T23:03:10.000Z
|
psdaq/psdaq/control_gui/CGWMainTabExpert.py
|
JBlaschke/lcls2
|
30523ef069e823535475d68fa283c6387bcf817b
|
[
"BSD-3-Clause-LBNL"
] | 6
|
2017-12-12T19:30:05.000Z
|
2020-07-09T00:28:33.000Z
|
psdaq/psdaq/control_gui/CGWMainTabExpert.py
|
JBlaschke/lcls2
|
30523ef069e823535475d68fa283c6387bcf817b
|
[
"BSD-3-Clause-LBNL"
] | 25
|
2017-09-18T20:02:43.000Z
|
2022-03-27T22:27:42.000Z
|
"""
Class :py:class:`CGWMainTabExpert` is a QWidget for interactive image
=======================================================================
Usage ::
import sys
from PyQt5.QtWidgets import QApplication
from psdaq.control_gui.CGWMainTabExpert import CGWMainTabExpert
app = QApplication(sys.argv)
w = CGWMainTabExpert(None, app)
w.show()
app.exec_()
See:
- :class:`CGWMainTabExpert`
- :class:`CGWMainPartition`
- `lcls2 on github <https://github.com/slac-lcls/lcls2/psdaq/psdaq/control_gui>`_.
Created on 2019-05-07 by Mikhail Dubrovin
"""
#------------------------------
import logging
logger = logging.getLogger(__name__)
#------------------------------
from time import time
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QVBoxLayout, QSplitter, QTextEdit, QSizePolicy
from PyQt5.QtCore import Qt, QSize
from psdaq.control_gui.CGWMainPartition import CGWMainPartition
from psdaq.control_gui.CGWMainControl import CGWMainControl
#------------------------------
class CGWMainTabExpert(QWidget) :
_name = 'CGWMainTabExpert'
def __init__(self, **kwargs) :
parent = kwargs.get('parent', None)
QWidget.__init__(self, parent=None)
logger.debug('In %s' % self._name)
self.wpart = CGWMainPartition()
self.wctrl = CGWMainControl(parent)
#self.wpart = QTextEdit('Txt 1')
#self.wctrl = QTextEdit('Txt 2')
self.vspl = QSplitter(Qt.Vertical)
self.vspl.addWidget(self.wpart)
self.vspl.addWidget(self.wctrl)
self.mbox = QHBoxLayout()
self.mbox.addWidget(self.vspl)
self.setLayout(self.mbox)
self.set_style()
#------------------------------
def set_tool_tips(self) :
pass
#self.butStop.setToolTip('Not implemented yet...')
#--------------------
def sizeHint(self):
return QSize(300, 280)
#--------------------
def set_style(self) :
self.setMinimumSize(280, 260)
self.layout().setContentsMargins(0,0,0,0)
self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Preferred)
#--------------------
def closeEvent(self, e) :
logger.debug('%s.closeEvent' % self._name)
try :
pass
#self.wpart.close()
#self.wctrl.close()
except Exception as ex:
print('Exception: %s' % ex)
#--------------------
if __name__ == "__main__" :
def resizeEvent(self, e):
#logger.debug('resizeEvent', self._name)
print('CGWMainTabExpert.resizeEvent: %s' % str(self.size()))
#def moveEvent(self, e) :
#logger.debug('moveEvent', self._name)
#self.position = self.mapToGlobal(self.pos())
#self.position = self.pos()
#logger.debug('moveEvent - pos:' + str(self.position), __name__)
#logger.info('CGWMainTabExpert.moveEvent - move window to x,y: ', str(self.mapToGlobal(QPoint(0,0))))
#self.wimg.move(self.pos() + QPoint(self.width()+5, 0))
#pass
#--------------------
if __name__ == "__main__" :
from psdaq.control_gui.CGDaqControl import daq_control, DaqControlEmulator, Emulator
daq_control.set_daq_control(DaqControlEmulator())
import sys
logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.DEBUG)
from psdaq.control_gui.CGConfigParameters import cp
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
kwargs = {'parent':None}
cp.test_cpinit()
w = CGWMainTabExpert(**kwargs)
w.show()
app.exec_()
del w
del app
#------------------------------
| 26.89781
| 122
| 0.591316
|
90ca6ee1ada3665563419ca706aede9d87607bb6
| 3,230
|
py
|
Python
|
examples/deprecated/meshing_sweep.py
|
kant/qmt
|
4ed096704a2e11dbebfe7db50c86920ac7cb7b94
|
[
"MIT"
] | 1
|
2018-12-01T21:30:38.000Z
|
2018-12-01T21:30:38.000Z
|
examples/deprecated/meshing_sweep.py
|
imagineagents/qmt
|
5e8a7001cc020979636e492448abcfd894396038
|
[
"MIT"
] | null | null | null |
examples/deprecated/meshing_sweep.py
|
imagineagents/qmt
|
5e8a7001cc020979636e492448abcfd894396038
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import qmt.task_framework as qtf
from qmt.tasks import Task
class GeometryTaskExample(Task):
def __init__(self, options=None, name='geometry_task'):
super(GeometryTaskExample, self).__init__([], options, name)
self.input_task_list = [] # no input tasks here
def _solve_instance(self, input_result_list, current_options):
print('running geom')
return current_options
class MeshingTaskExample(Task):
def __init__(self, geo_task, options=None, name='meshing_task', gather=True):
super(MeshingTaskExample, self).__init__([geo_task], options, name, gather=gather)
self.input_task_lit = [geo_task]
def _solve_gathered(self, list_of_input_result_lists, list_of_options):
print(list_of_input_result_lists)
reduced_sweep = qtf.ReducedSweep.create_from_manager_and_tags(self.sweep_manager, self.list_of_tags)
new_delayed_result = qtf.ReducedSweepDelayed.create_from_reduced_sweep_and_manager(reduced_sweep,
self.sweep_manager)
for sweep_holder_index, tag_values in enumerate(new_delayed_result.tagged_value_list):
current_options = list_of_options[sweep_holder_index]
input_result_lists = list_of_input_result_lists[sweep_holder_index]
output = current_options.copy()
for key in current_options.keys():
output[key].update(input_result_lists[0][key])
print(output)
new_delayed_result.add(output, sweep_holder_index)
return new_delayed_result
class PoissonTaskExample(Task):
def __init__(self, mesh_task, options=None, name='poisson_task'):
super(PoissonTaskExample, self).__init__([mesh_task], options, name)
self.input_task_list = [mesh_task]
def _solve_instance(self, input_result_list, current_options):
mesh_result_instance = input_result_list[0]
print(mesh_result_instance)
time.sleep(2)
output = ''
for part in mesh_result_instance.keys():
output += ' part: ' + part
output += ', min size: ' + str(mesh_result_instance[part]['min_size'])
output += ', voltage: ' + str(current_options[part]['voltage'])
return output
tag1 = qtf.SweepTag('s1')
tag2 = qtf.SweepTag('m1')
tag3 = qtf.SweepTag('v1')
geo_dict = {'part1': {'side length': tag1}, 'part2': {'side length': 3.}}
geo_task = GeometryTaskExample(options=geo_dict)
mesh_dict = {'part1': {'min_size': 1.}, 'part2': {'min_size': tag2}}
mesh_task = MeshingTaskExample(geo_task, options=mesh_dict)
poisson_dict = {'part1': {'voltage': tag3}, 'part2': {'voltage': 1.}}
poisson_task = PoissonTaskExample(mesh_task, options=poisson_dict)
sweeps = [{tag1: 1., tag2: 10., tag3: 1.}, {tag1: 2., tag2: 10., tag3: 1.}, {tag1: 1., tag2: 5., tag3: 1.}, {tag1: 4., tag2: 3., tag3: 2.}]
sweep_man = qtf.SweepManager(sweeps)
# result = sweep_man.run(mesh_task)
# print(mesh_task.reduce())
result = sweep_man.run(poisson_task)
print(poisson_task.reduce())
# print(geo_task.reduce())
# print(map(dask.result,result.futures))
| 37.55814
| 139
| 0.672136
|
a4b0899a3dfa3d2f9be30e6222cc655b78285c8d
| 2,032
|
py
|
Python
|
nearface/basemodels/Boosting.py
|
palmtrey/nearface
|
9274f13b2924a3ad9f97446772eb63bc7c482bff
|
[
"MIT"
] | null | null | null |
nearface/basemodels/Boosting.py
|
palmtrey/nearface
|
9274f13b2924a3ad9f97446772eb63bc7c482bff
|
[
"MIT"
] | null | null | null |
nearface/basemodels/Boosting.py
|
palmtrey/nearface
|
9274f13b2924a3ad9f97446772eb63bc7c482bff
|
[
"MIT"
] | null | null | null |
from nearface import NearFace
from tqdm import tqdm
import os
from os import path
from pathlib import Path
import numpy as np
import gdown
from nearface.commons import functions, distance as dst
def loadModel():
model_names = ['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']
model = {}
model_pbar = tqdm(range(0, 4), desc='Face recognition models')
for index in model_pbar:
model_name = model_names[index]
model_pbar.set_description("Loading %s" % (model_name))
model[model_name] = NearFace.build_model(model_name)
return model
def validate_model(model):
#validate model dictionary because it might be passed from input as pre-trained
found_models = []
for key, value in model.items():
found_models.append(key)
if ('VGG-Face' in found_models) and ('Facenet' in found_models) and ('OpenFace' in found_models) and ('DeepFace' in found_models):
#print("Ensemble learning will be applied for ", found_models," models")
valid = True
else:
missing_ones = set(['VGG-Face', 'Facenet', 'OpenFace', 'DeepFace']) - set(found_models)
raise ValueError("You'd like to apply ensemble method and pass pre-built models but models must contain [VGG-Face, Facenet, OpenFace, DeepFace] but you passed "+str(found_models)+". So, you need to pass "+str(missing_ones)+" models as well.")
def build_gbm():
#this is not a must dependency
import lightgbm as lgb #lightgbm==2.3.1
home = functions.get_deepface_home()
if os.path.isfile(home+'/.deepface/weights/face-recognition-ensemble-model.txt') != True:
print("face-recognition-ensemble-model.txt will be downloaded...")
url = 'https://raw.githubusercontent.com/serengil/deepface/master/deepface/models/face-recognition-ensemble-model.txt'
output = home+'/.deepface/weights/face-recognition-ensemble-model.txt'
gdown.download(url, output, quiet=False)
ensemble_model_path = home+'/.deepface/weights/face-recognition-ensemble-model.txt'
deepface_ensemble = lgb.Booster(model_file = ensemble_model_path)
return deepface_ensemble
| 33.866667
| 244
| 0.744587
|
fe162e5a003e8af5119df09e87184b43e7ce413d
| 5,720
|
py
|
Python
|
metatests/cloudcafe/compute/events/models/compute/test_instance_resize_prep.py
|
rcbops-qa/cloudcafe
|
d937f85496aadafbb94a330b9adb8ea18bee79ba
|
[
"Apache-2.0"
] | null | null | null |
metatests/cloudcafe/compute/events/models/compute/test_instance_resize_prep.py
|
rcbops-qa/cloudcafe
|
d937f85496aadafbb94a330b9adb8ea18bee79ba
|
[
"Apache-2.0"
] | null | null | null |
metatests/cloudcafe/compute/events/models/compute/test_instance_resize_prep.py
|
rcbops-qa/cloudcafe
|
d937f85496aadafbb94a330b9adb8ea18bee79ba
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import unittest
from cloudcafe.compute.events.models.instance_resize_prep import (
InstanceResizePrepStart, InstanceResizePrepEnd)
class BaseInstanceActionsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(BaseInstanceActionsTest, cls).setUpClass()
cls.image_meta_dict = {
"auto_disk_config": "disabled",
"base_image_ref": "5e91ad7f-afe4-4a83-bd5f-84673462cae1",
"container_format": "ovf",
"disk_format": "vhd",
"image_type": "base",
"min_disk": "20",
"min_ram": "512",
"org.openstack__1__architecture": "x64",
"org.openstack__1__os_distro": "com.ubuntu",
"org.openstack__1__os_version": "12.04",
"os_type": "linux"
}
cls.base_resize_prep_dict = {
"access_ip_v4": "10.10.0.0",
"access_ip_v6": None,
"architecture": "x64",
"availability_zone": None,
"cell_name": "cell name",
"created_at": "2015-01-15 18:59:29",
"deleted_at": "",
"disk_gb": 20,
"display_name": "server123456",
"ephemeral_gb": 0,
"host": None,
"hostname": "server123456",
"image_meta": cls.image_meta_dict,
"image_ref_url": "http://127.0.0.1/images/my_image",
"instance_flavor_id": "instance_flavor_id",
"instance_id": "performance1-1",
"instance_type": "1 GB Performance",
"instance_type_id": "9",
"kernel_id": "",
"launched_at": "",
"memory_mb": 1024,
"metadata": {},
"node": None,
"os_type": "linux",
"progress": "",
"ramdisk_id": "",
"reservation_id": "r-abcdefg",
"root_gb": 20,
"state": "building",
"state_description": "",
"tenant_id": "123456",
"terminated_at": "",
"user_id": "123456789",
"vcpus": 1
}
cls.instance_resize_prep_end_dict = {
"new_instance_type": "512MB Standard Instance",
"new_instance_type_id": "2"
}
cls.instance_resize_prep_end_dict.update(cls.base_resize_prep_dict)
cls.resize_prep_start_obj = InstanceResizePrepStart._dict_to_obj(
cls.base_resize_prep_dict)
cls.resize_prep_end_obj = InstanceResizePrepEnd._dict_to_obj(
cls.instance_resize_prep_end_dict)
class InstanceResizePrepStartTest(BaseInstanceActionsTest):
def test_instance_resize_prep_start_valid_json(self):
"""Verify that the valid event deserialized correctly"""
expected_obj = self.resize_prep_start_obj
actual_json = json.dumps(self.base_resize_prep_dict)
actual_obj = InstanceResizePrepStart.deserialize(actual_json, 'json')
self.assertEqual(expected_obj, actual_obj)
self.assertFalse(actual_obj.is_empty())
def test_instance_resize_prep_start_missing_attribute_json(self):
"""Verify event missing expected attribute does not deserialize"""
modified_dict = self.base_resize_prep_dict.copy()
modified_dict.popitem()
actual_json = json.dumps(modified_dict)
actual_obj = InstanceResizePrepStart.deserialize(actual_json, 'json')
self.assertIsNone(actual_obj)
def test_instance_resize_prep_start_extra_attribute_json(self):
"""Verify event with unexpected attribute does not deserialize"""
modified_dict = self.base_resize_prep_dict.copy()
modified_dict['test_dummy'] = 'test_dummy'
actual_json = json.dumps(modified_dict)
actual_obj = InstanceResizePrepStart.deserialize(actual_json, 'json')
self.assertIsNone(actual_obj)
class InstanceResizePrepEndTest(BaseInstanceActionsTest):
def test_instance_resize_prep_end_valid_json(self):
"""Verify that the valid event deserialized correctly"""
expected_obj = self.resize_prep_end_obj
actual_json = json.dumps(self.instance_resize_prep_end_dict)
actual_obj = InstanceResizePrepEnd.deserialize(actual_json, 'json')
self.assertEqual(expected_obj, actual_obj)
self.assertFalse(actual_obj.is_empty())
def test_instance_resize_prep_end_missing_attribute_json(self):
"""Verify event missing expected attribute does not deserialize"""
modified_dict = self.instance_resize_prep_end_dict.copy()
modified_dict.popitem()
actual_json = json.dumps(modified_dict)
actual_obj = InstanceResizePrepEnd.deserialize(actual_json, 'json')
self.assertIsNone(actual_obj)
def test_instance_resize_prep_end_extra_attribute_json(self):
"""Verify event with unexpected attribute does not deserialize"""
modified_dict = self.instance_resize_prep_end_dict.copy()
modified_dict['test_dummy'] = 'test_dummy'
actual_json = json.dumps(modified_dict)
actual_obj = InstanceResizePrepEnd.deserialize(actual_json, 'json')
self.assertIsNone(actual_obj)
| 37.631579
| 77
| 0.658042
|
25f8d8056afc7538ef31b28ae4d09ba20ec8169c
| 480
|
py
|
Python
|
src/compas_rv2/rhino/objects/skeletonobject.py
|
selinabitting/compas-RV2
|
0884cc00d09c8f4a75eb2b97614105e4c8bfd818
|
[
"MIT"
] | 34
|
2020-04-27T13:54:38.000Z
|
2022-01-17T19:16:27.000Z
|
src/compas_rv2/rhino/objects/skeletonobject.py
|
selinabitting/compas-RV2
|
0884cc00d09c8f4a75eb2b97614105e4c8bfd818
|
[
"MIT"
] | 306
|
2020-04-27T12:00:54.000Z
|
2022-03-23T22:28:54.000Z
|
src/compas_rv2/rhino/objects/skeletonobject.py
|
selinabitting/compas-RV2
|
0884cc00d09c8f4a75eb2b97614105e4c8bfd818
|
[
"MIT"
] | 11
|
2020-06-30T08:23:40.000Z
|
2022-02-01T20:47:39.000Z
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas_skeleton.rhino import SkeletonObject
__all__ = ["SkeletonObject"]
class SkeletonObject(SkeletonObject):
"""Scene object for Skeleton in Rhino."""
# ============================================================================
# Main
# ============================================================================
if __name__ == '__main__':
pass
| 22.857143
| 78
| 0.520833
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.