blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0a15e5071a4cf5caaaaf7b34122dc43319feed79
|
105ee300f5d50be628b81f30550c070fdec04495
|
/neuripsconf/done/ma_crosstr_v504b.py
|
e05d06387669366b4437825d7e66fde6f88f7351
|
[] |
no_license
|
Myyyr/segmentation3D
|
9b12c08b6eee245cc93b8ba2d1ac932a349eb618
|
0bd33d7a4c24816f3ecb4089a7d96ceaf64f298b
|
refs/heads/main
| 2023-06-21T06:45:12.609911
| 2021-07-13T07:49:43
| 2021-07-13T07:49:43
| 309,314,249
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,341
|
py
|
# More Parameters (depth) to match with classical UNet number of parameters.
# n_parameters = 114557582
import os
from models.utils import get_scheduler
import torch.optim as optim
import alltrain.atlasUtils as atlasUtils
from PatchedMultiatlasDataset_v3 import *
from torch.utils.data import DataLoader
import torch
import torchio as tio
from models.mymod.cross_patch import CrossPatch3DTr
from utils.metrics import DC_and_CE_loss, MultipleOutputLoss2
from nnunet.utilities.nd_softmax import softmax_helper
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
# TRAINING NO CROSS
# BIGGER MODEL
class ExpConfig():
def __init__(self):
# ID and Name
self.id = '504b'
self.experiment_name = "ma_crosstr_v{}".format(self.id)
self.debug = False
# System
self.checkpointsBasePath = "./checkpoints/"
self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/'
self.labelpath = '/local/DEEPLEARNING/MULTI_ATLAS/MULTI_ATLAS/nnUNet_preprocessed/Task017_BCV/nnUNetData_plans_v2.1_stage1/'
self.datapath = self.labelpath
self.input_shape = [512,512,256]
# self.filters = [16, 32, 64, 128]
self.filters = [64, 192, 448, 704]
d_model=self.filters[-1]
# skip_idx = [1,3,5,6]
# self.patch_size=(128,128,128)
self.patch_size=(192,192,48)
# n_layers=6
self.clip = False
self.patched = True
# GPU
self.gpu = '1'
os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu
# torch.backends.cudnn.benchmark = False
# Model
number_of_cross_heads = 1
number_of_self_heads = 8
number_of_self_layer = 1
self.n_classes = 14
self.net = CrossPatch3DTr(filters=self.filters,patch_size=[1,1,1],d_model=d_model,
n_classes=self.n_classes,n_cheads=number_of_cross_heads,
n_sheads=number_of_self_heads,bn=True,up_mode='deconv',
n_strans=number_of_self_layer, do_cross=True)
self.net.inference_apply_nonlin = softmax_helper
self.n_parameters = count_parameters(self.net)
print("N PARAMS : {}".format(self.n_parameters))
# self.model_path = './checkpoints/models/crosstr_big.pth'
# self.model_path = './checkpoints/models/300/mod.pth'
self.model_path = 'checkpoints/models/504/modlast.pt'
max_displacement = 5,5,5
deg = (0,5,10)
scales = 0
self.transform = tio.Compose([
tio.RandomElasticDeformation(max_displacement=max_displacement),
tio.RandomAffine(scales=scales, degrees=deg)
])
# Training
self.start_epoch = 1000
self.epoch = 2000
# self.loss = torch.nn.CrossEntropyLoss()
self.loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-5, 'do_bg': False}, {})
self.ds_scales = ((1, 1, 1), (0.5, 0.5, 0.5), (0.25, 0.25, 0.25))
################# Here we wrap the loss for deep supervision ############
# we need to know the number of outputs of the network
net_numpool = 4
# we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
# this gives higher resolution outputs more weight in the loss
weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
# we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)])
weights[~mask] = 0
weights = weights / weights.sum()
self.ds_loss_weights = weights
# now wrap the loss
self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
################# END ###################
self.batchsize = 2
self.lr_rate = 1e-3
# self.final_lr_rate = 1e-5
# self.optimizer = optim.Adam(self.net.parameters(), lr = self.lr_rate)
self.optimizer = optim.SGD(self.net.parameters(), lr = self.lr_rate, weight_decay=3e-6, momentum=0.99)
self.optimizer.zero_grad()
self.validate_every_k_epochs = 10
# self.decay = (self.lr_rate/self.final_lr_rate - 1)/self.epoch
self.lr_scheduler = get_scheduler(self.optimizer, "poly", self.lr_rate, max_epochs=self.epoch)
self.load_model(False)
# Other
self.classes_name = ['background','spleen','right kidney','left kidney','gallbladder','esophagus','liver','stomach','aorta','inferior vena cava','portal vein and splenic vein','pancreas','right adrenal gland','left adrenal gland']
def set_data(self, split = 0):
# Data
# print(self.ds_scales)s
self.trainDataset = PatchedMultiAtlasDataset(self, mode="train", n_iter=250, patch_size=self.patch_size, return_full_image=True, ds_scales=self.ds_scales, do_tr=False, return_pos=True)
self.testDataset = PatchedMultiAtlasDataset(self, mode="test", n_iter=1, patch_size=self.patch_size, return_full_image=True, ds_scales=None, do_tr=False, return_pos=True)
self.trainDataLoader = DataLoader(dataset=self.trainDataset, num_workers=1, batch_size=self.batchsize, shuffle=True)
self.testDataLoader = DataLoader(dataset=self.testDataset, num_workers=1, batch_size=1, shuffle=False)
def load_model(self, load_lr=True):
print('LOAD MODEL ...')
if not os.path.exists(self.model_path):
torch.save(self.net.state_dict(), self.model_path)
elif self.start_epoch == 0:
self.net.load_state_dict(torch.load(self.model_path))
else:
a = torch.load(self.model_path)
self.net.load_state_dict(a['net_state_dict'])
# self.optimizer = optim.Adam(self.net.parameters(), lr = self.lr_rate, weight_decay=0)
self.optimizer.load_state_dict(a['optimizer_state_dict'])
if load_lr:
self.lr_scheduler.load_state_dict(a['scheduler'])
def net_stats(self):
s = 0
for p in self.net.parameters():
if p.requires_grad:
s += p.sum()
print('Mean :', s.item()/self.n_parameters)
|
[
"loic.themyr@gmail.com"
] |
loic.themyr@gmail.com
|
a7db5c206101d67106804b107e212f2550467f76
|
6f1034b17b49f373a41ecf3a5a8923fb4948992b
|
/docs/user_guide/operation/scripts/examples/argus/extraction/jan/test scripts/PrepareForBlankNP10.py
|
53218ab06727a51bf390cd988a4e472692541d64
|
[
"Apache-2.0"
] |
permissive
|
NMGRL/pychron
|
a6ec1854488e74eb5d3ff53eee8537ecf98a6e2f
|
8cfc8085393ace2aee6b98d36bfd6fba0bcb41c6
|
refs/heads/main
| 2023-08-30T07:00:34.121528
| 2023-06-12T17:43:25
| 2023-06-12T17:43:25
| 14,438,041
| 38
| 28
|
Apache-2.0
| 2023-08-09T22:47:17
| 2013-11-15T23:46:10
|
Python
|
UTF-8
|
Python
| false
| false
| 531
|
py
|
def main():
info('Prepare for NP10 Blank analysis')
close(description='Jan Inlet')
open(description='Jan Ion Pump')
close(description='Microbone to Minibone')
open(description='Microbone to Turbo')
open(description='Microbone to Inlet Pipette')
close(description='Microbone to Getter NP-10C')
open(description='Microbone to Getter NP-10H')
close(description='CO2 Laser to Felix')
close(description='CO2 Laser to Jan')
close(description='Microbone to CO2 Laser')
sleep(20)
|
[
"jirhiker@gmail.com"
] |
jirhiker@gmail.com
|
984063d568aa192a28ddbfe589875bef2f13dddd
|
a939e018333a9ecd26ddc618f99835b7eb381686
|
/le_crawler/core/.svn/text-base/urlmd5_inserter.py.svn-base
|
a208af395d09a8d72da4a6707dcead850008a074
|
[] |
no_license
|
cash2one/crawl_youtube
|
bff5ba254001c2f31f770e55a4aca39bc54e45ee
|
0dc40186a1d89da2b00f29d4f4edfdc5470eb4fc
|
refs/heads/master
| 2021-01-16T22:30:17.800282
| 2016-02-18T11:50:09
| 2016-02-18T11:50:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,749
|
import memcache
import hashlib
import urllib
class UrlMd5Inserter:
def __init__(self, logger = None):
self._client = memcache.Client(['127.0.0.1:11211'])
self._logger = logger
self._miss_count = 0
self._has_send_message = False
def send_message(self):
for tel in ['13426031534', '18515029185', '15330025605']:
api = 'http://10.182.63.85:8799/warn_messages'
params = {}
params['m'] = 'insert md5 failed in lejian crawler.'
params['p'] = tel
params = urllib.urlencode(params)
urllib.urlopen("%s?%s" % (api, params))
def insert_urlmd5(self, url):
if not url:
return False
if not isinstance(url, basestring):
return False
md5_str = hashlib.md5(url).hexdigest()
if not self._client.get(md5_str):
if self._client.set(md5_str, url):
self._miss_count = 0
if self._logger:
self._logger.debug('insert %s %s' % (md5_str, url))
return True
else:
self._miss_count += 1
if not self._has_send_message and self._miss_count > 5:
self.send_message()
self._has_send_message = True
if self._miss_count < 5 or self._miss_count & 1023 == 0:
self._client = memcache.Client(['127.0.0.1:11211'])
if self._client.set(md5_str, url):
return True
if self._logger:
self._logger.error('insert url_md5 failed! %s' % url)
return False
else:
# if self._logger:
# self._logger.info('md5 %s already has, url = %s' % (md5_str, url))
return True
if __name__ == '__main__':
c = UrlMd5Inserter()
import sys
if len(sys.argv) == 1:
print 'need url param!'
else:
url = sys.argv[1]
c.insert_urlmd5(url)
|
[
"zjc0516@126.com"
] |
zjc0516@126.com
|
|
2b09f1cbc6122618f1660239caaecf8d5c627104
|
070b693744e7e73634c19b1ee5bc9e06f9fb852a
|
/python/problem-BST/TreeNode.py
|
bb2a63b5acedeeaa4bc687eaa2c304616739c7b2
|
[] |
no_license
|
rheehot/practice
|
a7a4ce177e8cb129192a60ba596745eec9a7d19e
|
aa0355d3879e61cf43a4333a6446f3d377ed5580
|
refs/heads/master
| 2021-04-15T22:04:34.484285
| 2020-03-20T17:20:00
| 2020-03-20T17:20:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27
|
py
|
../problem-tree/TreeNode.py
|
[
"morpheus.0@kakaocorp.com"
] |
morpheus.0@kakaocorp.com
|
a4791b97e58d631653912af81e84687580e8838c
|
b8e9dd6fd8f8b691cba5a3af2388467bcf6c90bb
|
/samples/openapi3/client/3_0_3_unit_test/python-experimental/unit_test_api/paths/request_body_post_ref_in_oneof_request_body/post.pyi
|
5af5d8087ee9da8cede3cae09cf08921cd20a938
|
[
"Apache-2.0"
] |
permissive
|
FallenRiteMonk/openapi-generator
|
f8b98940219eecf14dc76dced4b0fbd394522aa3
|
b6576d11733ecad6fa4a0a616e1a06d502a771b7
|
refs/heads/master
| 2023-03-16T05:23:36.501909
| 2022-09-02T01:46:56
| 2022-09-02T01:46:56
| 164,609,299
| 0
| 0
|
Apache-2.0
| 2019-01-08T09:08:56
| 2019-01-08T09:08:56
| null |
UTF-8
|
Python
| false
| false
| 4,463
|
pyi
|
# coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import urllib3
from urllib3._collections import HTTPHeaderDict
from unit_test_api import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from unit_test_api import schemas # noqa: F401
from unit_test_api.model.ref_in_oneof import RefInOneof
# body param
SchemaForRequestBodyApplicationJson = RefInOneof
class BaseApi(api_client.Api):
def _post_ref_in_oneof_request_body_oapg(
self: api_client.Api,
body: typing.Union[SchemaForRequestBodyApplicationJson, ],
content_type: str = 'application/json',
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
"""
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
used_path = path.value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if body is schemas.unset:
raise exceptions.ApiValueError(
'The required body parameter has an invalid value of: unset. Set a valid value instead')
_fields = None
_body = None
serialized_data = request_body_ref_in_oneof.serialize(body, content_type)
_headers.add('Content-Type', content_type)
if 'fields' in serialized_data:
_fields = serialized_data['fields']
elif 'body' in serialized_data:
_body = serialized_data['body']
response = self.api_client.call_api(
resource_path=used_path,
method='post'.upper(),
headers=_headers,
fields=_fields,
body=_body,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class PostRefInOneofRequestBody(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
def post_ref_in_oneof_request_body(
self: BaseApi,
body: typing.Union[SchemaForRequestBodyApplicationJson, ],
content_type: str = 'application/json',
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
return self._post_ref_in_oneof_request_body_oapg(
body=body,
content_type=content_type,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForpost(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
def post(
self: BaseApi,
body: typing.Union[SchemaForRequestBodyApplicationJson, ],
content_type: str = 'application/json',
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
return self._post_ref_in_oneof_request_body_oapg(
body=body,
content_type=content_type,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
|
[
"noreply@github.com"
] |
FallenRiteMonk.noreply@github.com
|
c4e30ab3b20858daa94874c13887eb46b306a2dd
|
c1bfadbc033efba287ad55a804e9d69d297c3bf2
|
/valohai_cli/yaml_wizard.py
|
044a3a8e52e639b7fe3f82e1cee9cc4721456238
|
[
"MIT"
] |
permissive
|
valohai/valohai-cli
|
16560b078d20a02c8cdc7388beeea9bebac4be7d
|
c57cc164e749fb77b622d629a5ad05b2685534bb
|
refs/heads/master
| 2023-08-31T14:04:26.979762
| 2023-08-22T12:54:51
| 2023-08-22T12:54:51
| 81,329,264
| 14
| 5
|
MIT
| 2023-09-11T13:35:04
| 2017-02-08T12:46:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,261
|
py
|
import codecs
import os
from typing import List
import click
import requests
import yaml
from valohai_cli.messages import error, success, warn
from valohai_cli.utils import find_scripts
from valohai_cli.utils.cli_utils import prompt_from_list
YAML_SKELLINGTON = """---
- step:
name: Execute {command}
image: {image}
command: {command}
#inputs:
# - name: example-input
# default: https://example.com/
#parameters:
# - name: example
# description: Example parameter
# type: integer
# default: 300
"""
def get_image_suggestions() -> List[dict]:
try:
resp = requests.get('https://raw.githubusercontent.com/valohai/images/master/images.v2.yaml')
resp.raise_for_status()
images = [
{
'name': image,
'description': info['description'],
}
for image, info
in yaml.safe_load(resp.content).items()
if info.get("isRecommended")
]
images.sort(key=lambda i: str(i.get('name')).lower())
return images
except Exception as exc:
warn(f'Could not load online image suggestions: {exc}')
return []
def yaml_wizard(directory: str) -> None:
while True:
command = choose_command(directory)
image = choose_image()
yaml = YAML_SKELLINGTON.format(
image=image,
command=command,
)
click.secho('Here\'s a preview of the Valohai.yaml file I\'m going to create.', fg='cyan')
print(yaml)
yaml_path = os.path.join(directory, 'valohai.yaml')
if not click.confirm(f'Write this to {click.style(yaml_path, bold=True)}?'): # pragma: no cover
click.echo('Okay, let\'s try again...')
continue
with codecs.open(yaml_path, 'w', 'UTF-8') as out_fp:
out_fp.write(yaml)
success(f'All done! Wrote {yaml_path}.')
break
def choose_image() -> str:
image_suggestions = get_image_suggestions()
click.echo(
'Now let\'s pick a Docker image to use with your code.\n' +
(
'Here are some recommended choices, but feel free to type in one of '
'your own from the ones available at https://hub.docker.com/'
if image_suggestions
else ''
)
)
while True:
image = prompt_from_list(
image_suggestions,
(
'Choose a number or enter a Docker image name.'
if image_suggestions else
'Enter a Docker image name.'
),
nonlist_validator=lambda s: s.strip()
)
image_name = str(image["name"]) if isinstance(image, dict) else str(image)
if click.confirm(f'Is {click.style(image_name, bold=True)} correct?'):
break
success(f'Great! Using {image_name}.')
return image_name
def choose_command(directory: str) -> str:
scripts = sorted(find_scripts(directory))
while True:
if scripts:
click.echo(
'We found these script files in this directory.\n'
'If any of them is the script file you\'d like to use for Valohai, type its number.\n'
'Otherwise, you can just type the command to run.'
)
command = prompt_from_list(
[
{'name': f'{interpreter} {script}'}
for (interpreter, script)
in scripts
],
'Choose a number or enter a command.',
nonlist_validator=lambda s: s.strip()
)
if isinstance(command, dict):
command = command['name']
else: # pragma: no cover
command = click.prompt(
'We couldn\'t find script files in this directory.\n'
'Please enter the command you\'d like to run in the Valohai platform.\n'
)
if not command: # pragma: no cover
error('Please try again.')
continue
if click.confirm(f'Is {click.style(command, bold=True)} correct?'):
break
success(f'Got it! Using {command} as the command.')
return str(command)
|
[
"akx@iki.fi"
] |
akx@iki.fi
|
3b98be268d96202ec11b07ae5bf7405d018285ef
|
4b322cc4a7d5ce099e99fd597e4c51caf1704d16
|
/packs/python_packs.py
|
82322c932fc0b2c9fa9cae3a507ae5cdf303635c
|
[] |
no_license
|
mjgpy3/umm_script
|
7037b8aa8d3052b0534cbf33760cb681f9549c29
|
2f7a6e6ac34c2d0ec58195dc1d2d0912cc168d28
|
refs/heads/master
| 2021-01-10T20:11:06.473971
| 2013-03-06T15:27:30
| 2013-03-06T15:27:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
#!/usr/bin/env python
# Created by Michael Gilliland
# Date: Fri Jan 25 16:47:44 EST 2013
#
#
from package_container import PackageContainer
packages = {'Sympy': 'python-sympy',
'Numpy': 'python-numpy',
'Scipy': 'python-scipy',
'Matplotlib': 'python-matplotlib',
'Spyder': 'spyder'}
container = PackageContainer("Python", 'python', packages)
|
[
"mjg.py3@gmail.com"
] |
mjg.py3@gmail.com
|
8962b5672f25b13d653dde89d99035c4d8090e95
|
2d5d13c4bdc64202a520f32e7d4a44bb75e2004f
|
/week-02/d02/sum.py
|
a6efc084acd3c1c5d52d0715fbd71556a40e3c3e
|
[] |
no_license
|
green-fox-academy/andrasnyarai
|
43b32d5cc4ad3792ef8d621328f9593fc9623e0b
|
19759a146ba2f63f1c3e4e51160e6111ca0ee9c3
|
refs/heads/master
| 2021-09-07T16:19:34.636119
| 2018-02-26T00:38:00
| 2018-02-26T00:38:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
# - Write a function called `sum` that sum all the numbers
# until the given parameter
def sum(x):
s = 0
for i in range(1,x+1):
s = s + i
return s
x = int(input())
print(sum(x))
|
[
"andrasnyarai@gmail.com"
] |
andrasnyarai@gmail.com
|
c54e78fbf7d25340d3cc30fd3f86b4f534345415
|
e0980f704a573894350e285f66f4cf390837238e
|
/.history/streams/blocks_20201019093900.py
|
f2b99c8f80eb8da086d09a372fca19597d3b3670
|
[] |
no_license
|
rucpata/WagtailWebsite
|
28008474ec779d12ef43bceb61827168274a8b61
|
5aa44f51592f49c9a708fc5515ad877c6a29dfd9
|
refs/heads/main
| 2023-02-09T15:30:02.133415
| 2021-01-05T14:55:45
| 2021-01-05T14:55:45
| 303,961,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 108
|
py
|
from wagtail.core import blocks
class TitleBlock(blocks.StructBlock):
text = blocks.CharBlock(
re
)
|
[
"rucinska.patrycja@gmail.com"
] |
rucinska.patrycja@gmail.com
|
7684a374aa577e0696430e5dec1be15046f15e42
|
c3ad35192313827ae1064e9614182c182690d675
|
/3 - Two Pointers Algorithm/228. Middle of Linked List.py
|
f4319e7153443b8a090acb1b9ee6e7585b714d05
|
[] |
no_license
|
LingHsiLiu/Algorithm1
|
2614cb6e7d6d83e53081397153e0c7938a8c3196
|
817332c374cab5d2fa8d6abd1b27dbcad85656d5
|
refs/heads/master
| 2020-03-27T00:46:55.948804
| 2019-11-15T04:15:12
| 2019-11-15T04:15:12
| 145,656,542
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 725
|
py
|
# Find the middle node of a linked list.
# Example
# Given 1->2->3, return the node with value 2.
# Given 1->2, return the node with value 1.
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: the head of linked list.
@return: a middle node of the linked list
"""
def middleNode(self, head):
# write your code here
if head is None:
return None
slow = head
fast = head.next
while fast is not None and fast.next is not None:
slow = slow.next
fast = fast.next.next
return slow
|
[
"noreply@github.com"
] |
LingHsiLiu.noreply@github.com
|
9c24e3ed3ce8a5a9fbed8e742abf78e1de21d5ce
|
e21f7d14e564d7fb921277a329ff078e86ad86a2
|
/2016/day_02/day_02.py
|
6e7b32d360e727f1672245c6798f5f753b46ca59
|
[] |
no_license
|
MrGallo/advent-of-code-solutions
|
31456a0718303cca6790cf1227831bcb14649e27
|
28e0331e663443ffa2638188437cc7e46d09f465
|
refs/heads/master
| 2022-07-07T08:49:30.460166
| 2020-12-17T17:22:24
| 2020-12-17T17:22:24
| 160,988,019
| 0
| 1
| null | 2022-06-21T22:26:19
| 2018-12-08T23:34:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,903
|
py
|
from typing import List
DIRECTIONS = {
"U": (0, -1),
"D": (0, 1),
"L": (-1, 0),
"R": (1, 0)
}
KEYPAD_WIDTH = 3
KEYPAD_HEIGHT = 3
KEYPAD = [
[i for i in range(1+KEYPAD_HEIGHT*j, KEYPAD_HEIGHT*j+KEYPAD_WIDTH+1)]
for j in range(KEYPAD_HEIGHT)
]
ACTUAL_KEYPAD = [
[None, None, "1", None, None],
[None, "2", "3", "4", None],
["5", "6", "7", "8", "9"],
[None, "A", "B", "C", None],
[None, None, "D", None, None]
]
def main() -> None:
with open("input.txt", "r") as f:
instructions = [list(line) for line in f.read().split("\n")]
print(part1(instructions)) # answer: 44558
print(part2(instructions)) # answer: 6BBAD
def part1(instructions: List[List[str]]) -> str:
passcode = ""
cursor_x, cursor_y = 1, 1
for digit_instruction in instructions:
for direction in digit_instruction:
dx, dy = DIRECTIONS[direction]
cursor_x = clamp(cursor_x+dx, 0, KEYPAD_WIDTH-1)
cursor_y = clamp(cursor_y+dy, 0, KEYPAD_HEIGHT-1)
passcode += str(KEYPAD[cursor_y][cursor_x])
return passcode
def part2(instructions: List[List[str]]) -> str:
passcode = ""
cursor_x, cursor_y = 0, 2
for digit_instruction in instructions:
for direction in digit_instruction:
dx, dy = DIRECTIONS[direction]
if cursor_y+dy < 0 or cursor_x+dx < 0: # fix list[-1] wrap-around
continue
try:
if ACTUAL_KEYPAD[cursor_y+dy][cursor_x+dx] is not None:
cursor_x += dx
cursor_y += dy
except IndexError:
continue
passcode += ACTUAL_KEYPAD[cursor_y][cursor_x]
return passcode
def clamp(n: int, a: int, b: int):
"""Clamp an integer (n) within the range of a to b inclusive"""
return min(max(n, a), b)
if __name__ == "__main__":
main()
|
[
"daniel.gallo@ycdsbk12.ca"
] |
daniel.gallo@ycdsbk12.ca
|
e4ce2491502b31086ded61177a25b8a7017a2484
|
bb79411e60fb06844f4d7cc3069e44caaac4d919
|
/asq/tests/test_data_record.py
|
85579f6af3b4bbbf38dacb6c7a0ebd60ccb1c75d
|
[
"MIT"
] |
permissive
|
geraintpalmer/ASQ
|
789bf37f7b51fd493fcb3ed10fabc3ad0ac2a904
|
4ff207317b201c96548bfa8263b6f04fcd4a546c
|
refs/heads/master
| 2021-01-10T11:20:06.865591
| 2015-12-14T16:00:15
| 2015-12-14T16:00:15
| 46,278,239
| 2
| 4
| null | 2015-12-14T16:00:15
| 2015-11-16T14:05:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
import unittest
import asq
class TestDataRecord(unittest.TestCase):
def test_init_method(self):
r = asq.DataRecord(2, 3, 2, 8, 1, 2)
self.assertEqual(r.arrival_date, 2)
self.assertEqual(r.wait, 0)
self.assertEqual(r.service_start_date, 2)
self.assertEqual(r.service_time, 3)
self.assertEqual(r.service_end_date, 5)
self.assertEqual(r.blocked, 3)
self.assertEqual(r.exit_date, 8)
self.assertEqual(r.node, 1)
self.assertEqual(r.customer_class, 2)
r = asq.DataRecord(5.7, 2.1, 8.2, 10.3, 1, 3)
self.assertEqual(r.arrival_date, 5.7)
self.assertEqual(round(r.wait, 1), 2.5)
self.assertEqual(r.service_start_date, 8.2)
self.assertEqual(r.service_time, 2.1)
self.assertEqual(round(r.service_end_date, 1), 10.3)
self.assertEqual(round(r.blocked, 1), 0.0)
self.assertEqual(r.exit_date, 10.3)
self.assertEqual(r.node, 1)
self.assertEqual(r.customer_class, 3)
|
[
"palmer.geraint@googlemail.com"
] |
palmer.geraint@googlemail.com
|
b885fe440bae9dafc2e7c60d43c6c4a58b12034d
|
947720511ee9a0cba09347042c3eadbb10f3e01e
|
/Results_plotting/semantic.py
|
c698e1bae4eea14a485974b0436e8568274b37ca
|
[] |
no_license
|
OMARI1988/language_and_vision
|
8737ee83043517f58e0cb36943b389c0bf617a04
|
527098c1d47ec959a24669d44effcd307b8309c2
|
refs/heads/master
| 2021-01-17T01:14:04.994195
| 2020-10-04T21:26:58
| 2020-10-04T21:26:58
| 31,417,989
| 0
| 1
| null | 2017-09-07T14:07:58
| 2015-02-27T12:18:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,482
|
py
|
# This is a ported version of a MATLAB example from the signal processing
# toolbox that showed some difference at one time between Matplotlib's and
# MATLAB's scaling of the PSD. This differs from psd_demo3.py in that
# this uses a complex signal, so we can see that complex PSD's work properly
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
"""
Simple demo of a horizontal bar chart.
"""
import matplotlib.pyplot as plt
plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
plt.subplots_adjust(hspace=0.45, wspace=1.3)
ax = plt.subplot(3, 1, 1)
opacity = 0.8
bar_width=1
# index = np.arange(3)
# plt.xticks(index, ('A', 'B', 'C'))
# plt.title('real-world dataset')
# ax.bar([0], [0.2], color="blue", width=bar_width, label="A- unsupervised", alpha=opacity, align="center")
# ax.bar([1], [0.75], color="red", width=bar_width, label="B- our approach", alpha=opacity, align="center")
# ax.bar([2], [0.99], color="green", width=bar_width, label="C- supervised", alpha=opacity, align="center")
# ax.legend(loc=2)
#
# ax = plt.subplot(1, 2, 2)
# plt.xticks(index, ('A', 'B', 'C'))
# plt.title('synthetic-world dataset')
# ax.bar([0], [0.2], color="blue", width=bar_width, label="A- unsupervised", alpha=opacity, align="center")
# ax.bar([1], [0.88], color="red", width=bar_width, label="B- our approach", alpha=opacity, align="center")
# ax.bar([2], [0.99], color="green", width=bar_width, label="C- supervised", alpha=opacity, align="center")
# ax.legend(loc=2)
# Example data
people = ('unsupervised', 'our-system', 'supervised')
y_pos = np.arange(len(people))
plt.barh([0], [32.9], align='center', height=1, alpha=0.9,color='orange')
plt.barh([1], [85.6], align='center', height=1, alpha=0.7, color="green")
plt.barh([2], [98.1], align='center', height=1, alpha=0.9, color=(.4,.3,1))
ax.text(32.9-4,0-.2,'32.9',size=16)
ax.text(85.6-4,1-.2,'85.6',size=16)
ax.text(98.1-4,2-.2,'98.1',size=16)
plt.xticks([0,20,40,60,80,100], ['0','20','40','60','80','100'], fontsize=20)
plt.yticks(y_pos, people, fontsize=20)
plt.title('Dukes (2013) dataset', fontsize=20)
plt.tick_params(axis='x', which='both', bottom='on', top='off', labelbottom='on')
plt.tick_params(axis='y', which='both', left='on', right='off', labelleft='on')
ax = plt.subplot(3, 1, 2)
people = ('unsupervised', 'our-system', 'supervised')
y_pos = np.arange(len(people))
plt.barh([0], [14], align='center', height=1, alpha=0.4)
plt.barh([1], [75], align='center', height=1, alpha=0.4, color="red")
plt.barh([2], [99], align='center', height=1, alpha=0.4, color="green")
plt.xticks([0,20,40,60,80,100], ['0','20','40','60','80','100'], fontsize=20)
plt.yticks(y_pos, people, fontsize=20)
plt.title('Jivko (2016) dataset', fontsize=20)
plt.tick_params(axis='x', which='both', bottom='on', top='off', labelbottom='on')
plt.tick_params(axis='y', which='both', left='on', right='off', labelleft='on')
ax = plt.subplot(3, 1, 3)
people = ('unsupervised', 'our-system', 'supervised')
y_pos = np.arange(len(people))
plt.barh([0], [39.8], align='center', height=1, alpha=0.9,color='orange')
plt.barh([1], [91.3], align='center', height=1, alpha=0.7, color="green")
plt.barh([2], [98.9], align='center', height=1, alpha=0.9, color=(.4,.3,1))
ax.text(31.2-4,0-.2,'39.8',size=16)
ax.text(81.5-4,1-.2,'91.3',size=16)
ax.text(97.4-4,2-.2,'98.9',size=16)
plt.xticks([0,20,40,60,80,100], ['0','20','40','60','80','100'], fontsize=20)
plt.yticks(y_pos, people, fontsize=20)
plt.title('Sinapov (2016) dataset', fontsize=20)
plt.tick_params(axis='x', which='both', bottom='on', top='off', labelbottom='on')
plt.tick_params(axis='y', which='both', left='on', right='off', labelleft='on')
ax = plt.subplot(3, 1, 3)
people = ('unsupervised', 'our-system', 'supervised')
y_pos = np.arange(len(people))
plt.barh([0], [31.2], align='center', height=1, alpha=0.9,color='orange')
plt.barh([1], [81.5], align='center', height=1, alpha=0.7, color="green")
plt.barh([2], [97.4], align='center', height=1, alpha=0.9, color=(.4,.3,1))
ax.text(31.2-4,0-.2,'31.2',size=16)
ax.text(81.5-4,1-.2,'81.5',size=16)
ax.text(97.4-4,2-.2,'97.4',size=16)
plt.xticks([0,20,40,60,80,100], ['0','20','40','60','80','100'], fontsize=20)
plt.yticks(y_pos, people, fontsize=20)
plt.title('Alomari (2016) dataset', fontsize=20)
plt.tick_params(axis='x', which='both', bottom='on', top='off', labelbottom='on')
plt.tick_params(axis='y', which='both', left='on', right='off', labelleft='on')
plt.show()
|
[
"omari.1988@gmail.com"
] |
omari.1988@gmail.com
|
6f99374339cac941da9a9510f04d46779fdb8a46
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/ACL_PyTorch/contrib/cv/tracking/SiamFC/utils.py
|
e5d77ea29fc4685c0728936c9cf7e966f078b806
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,910
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import cv2
import torch
def ToTensor(sample):
sample = sample.transpose(2, 0, 1)
C, H, W = sample.shape
sample = sample.reshape(1, C, H, W)
return torch.from_numpy(sample.astype(np.float32))
def get_center(x):
return (x - 1.) / 2.
# top-left bottom-right --> cx,cy,w,h
def xyxy2cxcywh(bbox):
return get_center(bbox[0] + bbox[2]), \
get_center(bbox[1] + bbox[3]), \
(bbox[2] - bbox[0]), \
(bbox[3] - bbox[1])
# model_sz=127, a picture is resized from original_sz to model_sz
def crop_and_pad(img, cx, cy, model_sz, original_sz, img_mean=None):
xmin = cx - original_sz // 2
xmax = cx + original_sz // 2
ymin = cy - original_sz // 2
ymax = cy + original_sz // 2
im_h, im_w, _ = img.shape
left = right = top = bottom = 0
if xmin < 0:
left = int(abs(xmin))
if xmax > im_w:
right = int(xmax - im_w)
if ymin < 0:
top = int(abs(ymin))
if ymax > im_h:
bottom = int(ymax - im_h)
xmin = int(max(0, xmin))
xmax = int(min(im_w, xmax))
ymin = int(max(0, ymin))
ymax = int(min(im_h, ymax))
im_patch = img[ymin:ymax, xmin:xmax]
if left != 0 or right != 0 or top != 0 or bottom != 0:
if img_mean is None:
img_mean = tuple(map(int, img.mean(axis=(0, 1))))
im_patch = cv2.copyMakeBorder(im_patch, top, bottom, left, right,
cv2.BORDER_CONSTANT, value=img_mean)
if model_sz != original_sz:
im_patch = cv2.resize(im_patch, (model_sz, model_sz))
return im_patch
# size_z=127
def get_exemplar_image(img, bbox, size_z, context_amount, img_mean=None):
cx, cy, w, h = xyxy2cxcywh(bbox)
wc_z = w + context_amount * (w + h)
hc_z = h + context_amount * (w + h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = size_z / s_z # 0.75
exemplar_img = crop_and_pad(img, cx, cy, size_z, s_z, img_mean) # 127*127
return exemplar_img, scale_z, s_z
def get_pyramid_instance_image(img, center, size_x, size_x_scales, img_mean=None):
if img_mean is None:
img_mean = tuple(map(int, img.mean(axis=(0, 1))))
pyramid = [crop_and_pad(img, center[0], center[1], size_x, size_x_scale, img_mean)
for size_x_scale in size_x_scales]
return pyramid
def center_error(rects1, rects2):
r"""Center error.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
"""
centers1 = rects1[..., :2] + (rects1[..., 2:] - 1) / 2
centers2 = rects2[..., :2] + (rects2[..., 2:] - 1) / 2
errors = np.sqrt(np.sum(np.power(centers1 - centers2, 2), axis=-1))
return errors
def rect_iou(rects1, rects2, bound=None):
r"""Intersection over union.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
bound (numpy.ndarray): A 4 dimensional array, denotes the bound
(min_left, min_top, max_width, max_height) for ``rects1`` and ``rects2``.
"""
assert rects1.shape == rects2.shape
if bound is not None:
# bounded rects1
rects1[:, 0] = np.clip(rects1[:, 0], 0, bound[0])
rects1[:, 1] = np.clip(rects1[:, 1], 0, bound[1])
rects1[:, 2] = np.clip(rects1[:, 2], 0, bound[0] - rects1[:, 0])
rects1[:, 3] = np.clip(rects1[:, 3], 0, bound[1] - rects1[:, 1])
# bounded rects2
rects2[:, 0] = np.clip(rects2[:, 0], 0, bound[0])
rects2[:, 1] = np.clip(rects2[:, 1], 0, bound[1])
rects2[:, 2] = np.clip(rects2[:, 2], 0, bound[0] - rects2[:, 0])
rects2[:, 3] = np.clip(rects2[:, 3], 0, bound[1] - rects2[:, 1])
rects_inter = _intersection(rects1, rects2)
areas_inter = np.prod(rects_inter[..., 2:], axis=-1)
areas1 = np.prod(rects1[..., 2:], axis=-1)
areas2 = np.prod(rects2[..., 2:], axis=-1)
areas_union = areas1 + areas2 - areas_inter
eps = np.finfo(float).eps
ious = areas_inter / (areas_union + eps)
ious = np.clip(ious, 0.0, 1.0)
return ious
def _intersection(rects1, rects2):
r"""Rectangle intersection.
Args:
rects1 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
rects2 (numpy.ndarray): An N x 4 numpy array, each line represent a rectangle
(left, top, width, height).
"""
assert rects1.shape == rects2.shape
x1 = np.maximum(rects1[..., 0], rects2[..., 0])
y1 = np.maximum(rects1[..., 1], rects2[..., 1])
x2 = np.minimum(rects1[..., 0] + rects1[..., 2],
rects2[..., 0] + rects2[..., 2])
y2 = np.minimum(rects1[..., 1] + rects1[..., 3],
rects2[..., 1] + rects2[..., 3])
w = np.maximum(x2 - x1, 0)
h = np.maximum(y2 - y1, 0)
return np.stack([x1, y1, w, h]).T
|
[
"liuzhuheng@huawei.com"
] |
liuzhuheng@huawei.com
|
d6d9862ce74535a49554e76cbdf4a1b1b3d1f9c1
|
ebd6d5c981b2a897e398ccb6be188cfbad734d8a
|
/experiments/testAllSimilarNets.py
|
14c815b8919ba4912a90256a4c9a18e52076b6e3
|
[] |
no_license
|
thodorisGeorgiou/transfer_learning_experiments
|
02a21b762e5e863dbcc595423955747f4dad245a
|
9180c4f2750d56863ea56a1d2d9db9efaf955f11
|
refs/heads/main
| 2023-03-19T04:15:49.287784
| 2021-03-04T11:37:27
| 2021-03-04T11:37:27
| 344,453,380
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,741
|
py
|
import os
import sys
import multiprocessing
# modelTypes = ["op", "cc"]
# modelTypes = ["vc", "ds", "op", "cc"]
modelTypes = ["vc"]
numRuns = 4
# basePath = sys.argv[1]
# mType = sys.argv[2]
mType = "vc"
# if basePath[-1] != "/":
# exit("Path must end with a slash")
# gpu = sys.argv[1]
# releaseDirs = ["vc/1/","vc/2/","vc/3/","vc/4/"]
# numTrainExamples = ["500/forces/vc/", "1000/forces/vc/", "2000/forces/vc/", "4000/forces/vc/", "8000/forces/vc/"]
# numTrainExamples = ["500/forceFlow_forces/vc/", "1000/forceFlow_forces/vc/", "2000/forceFlow_forces/vc/", "4000/forceFlow_forces/vc/", "8000/forceFlow_forces/vc/"]
# numTrainExamples += ["500/force_forces/vc/", "1000/force_forces/vc/", "2000/force_forces/vc/", "4000/force_forces/vc/", "8000/force_forces/vc/"]
# numTrainExamples = ["500/flow_forces/vc/", "1000/flow_forces/vc/", "2000/flow_forces/vc/", "4000/flow_forces/vc/", "8000/flow_forces/vc/"]
# numTrainExamples += ["500/forceRecon_forces/vc/", "1000/forceRecon_forces/vc/", "2000/forceRecon_forces/vc/", "4000/forceRecon_forces/vc/", "8000/forceRecon_forces/vc/"]
# numTrainExamples += ["500/flowRecon_forces/vc/", "1000/flowRecon_forces/vc/", "2000/flowRecon_forces/vc/", "4000/flowRecon_forces/vc/", "8000/flowRecon_forces/vc/"]
# numTrainExamples += ["500/all_forces/vc/", "1000/all_forces/vc/", "2000/all_forces/vc/", "4000/all_forces/vc/", "8000/all_forces/vc/"]
# numTrainExamples = ["500_2/forces/vc/", "1000_2/forces/vc/"]
# numTrainExamples = ["500_2/force_forces/vc/", "1000_2/force_forces/vc/", "500_2/all_forces/vc/", "1000_2/all_forces/vc/", "500_2/forceFlow_forces/vc/", \
# "1000_2/forceFlow_forces/vc/", "500_2/forceRecon_forces/vc/", "1000_2/forceRecon_forces/vc/"]
# numTrainExamples = ["500_2/force_forces/vc/", "1000_2/force_forces/vc/", "500_2/all_forces/vc/", "1000_2/all_forces/vc/", "500_2/forceFlow_forces/vc/", \
# "1000_2/forceFlow_forces/vc/", "500_2/forceRecon_forces/vc/", "1000_2/forceRecon_forces/vc/", "500_2/flow_forces/vc/", "1000_2/flow_forces/vc/", \
# "500_2/flowRecon_forces/vc/", "1000_2/flowRecon_forces/vc/"]
# numTrainExamples = ["500/", "1000/", "2000/", "4000/", "8000/"]
numTrainExamples = ["500_2/", "1000_2/"]
# paths = ["trainedFromScratchTrainSetSize/", "trainedFromCheckpointFullModelTrainSetSize/", "trainedFromCheckpointOnlyConvLayersTrainSetSize/"]
paths = ["trainedFromCheckpointOnlyConvLayersTrainSetSize/"]
subPaths = ["force_flow/vc/", "forceRecon_flow/vc/"]
# subPaths = ["flow/vc/", "flow_flow/vc/", "force_flow/vc/", "flowRecon_flow/vc/", "forceRecon_flow/vc/", "forceFlow_flow/vc/", "all_flow/vc/"]
runs = ["1", "2", "3", "4"]
def runTest(relDir):
# gpu = int(multiprocessing.current_process().name[-1]) - 1
# run = str(gpu+1)
# relDir = basePath+run+"Release/"
if not os.path.isdir(relDir):
print(relDir+"\nNot there :/")
return
# if gpu > 3:
# exit("ID not dependable :(")
os.system('python3 testNetworksOnFlow.py '+relDir+" "+mType)
# os.system('CUDA_VISIBLE_DEVICES='+str(gpu)+' python3 testNetworksOnFlow.py '+relDir+" "+mType)
allDirs = [basePath+ntExamples+subPath+run+"Release/" for basePath in paths for ntExamples in numTrainExamples for subPath in subPaths for run in runs if os.path.isdir(basePath+ntExamples+subPath+run+"Release/")]
# allDirs = [basePath+run+"Release/" for run in runs]
p = multiprocessing.Pool(1)
res = p.map(runTest, allDirs)
p.close()
p.join()
# for mType in modelTypes:
# for run in range(numRuns):
# # relDir = basePath+mType+"/"+str(run+1)+"/"
# relDir = basePath+str(run+1)+"Release/"
# if not os.path.isdir(relDir):
# print(relDir)
# continue
# os.system('CUDA_VISIBLE_DEVICES='+gpu+' python3 testNetworks.py '+relDir+" "+mType)
# # os.system('python3 testNetworks.py '+relDir+" "+mType)
|
[
"thodorisgeorgiou65@gmail.com"
] |
thodorisgeorgiou65@gmail.com
|
9286d75d6f5700bd05546d605a80cf14630ff80f
|
b7126fb70f72fea0e7bba6fe2fef6925302ef07b
|
/tceh6/venv/bin/easy_install-3.6
|
86c4357459840c772a72c2796a316a5c70675a33
|
[] |
no_license
|
duk1edev/tceh
|
79cd909c5a6221a2ca77d342b917462345140faa
|
21649d42488883beb58d709f4a9d1a05c75d2900
|
refs/heads/master
| 2021-07-12T10:20:22.330005
| 2020-04-29T09:24:08
| 2020-04-29T09:24:08
| 239,434,484
| 0
| 0
| null | 2021-03-20T03:38:26
| 2020-02-10T05:25:33
|
Python
|
UTF-8
|
Python
| false
| false
| 434
|
6
|
#!/home/duk1e/code/tceh6/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.6')()
)
|
[
"duk1e.ptc.ua@yandex.ru"
] |
duk1e.ptc.ua@yandex.ru
|
53acb3b43aae15e5fd6faadb8b7a2ca68b87c566
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/294/78128/submittedfiles/testes.py
|
6763a618605abdfcedcb0473d77f3999150f2231
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
a= int(input('Digite o número: '))
if (a%2)==0:
print('PAR')
else:
print('IMPAR')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
fc8146540a4e5180187271bc7e25e4a7259440eb
|
5b78602dd41bb226bc97e0acc60d2ec7dff10da4
|
/Temur_online_magazini/django_project/store/serializer.py
|
12892efabc4dc35b77f33ed5459f7c556c514a04
|
[] |
no_license
|
developeryuldashev/Rest_Api_boshlang-ich
|
fac742823442d052930526b60dc613853e9a9773
|
a2f1b5cc104d53504a694d26ba8f492f0743e67e
|
refs/heads/main
| 2023-08-18T23:52:40.441378
| 2021-09-13T03:32:29
| 2021-09-13T03:32:29
| 393,358,090
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
from rest_framework import serializers
from .models import *
class OrderDetActions(serializers.Serializer):
action=serializers.CharField(max_length=5)
class CustomerSerializer(serializers.ModelSerializer):
class Meta:
model=Customer
fields='__all__'
class CategoriesSerializer(serializers.ModelSerializer):
class Meta:
model=Categories
fields='__all__'
class ProductsSerializer(serializers.ModelSerializer):
class Meta:
model=Products
fields='__all__'
class OrdersSerializer(serializers.ModelSerializer):
class Meta:
model=Orders
fields='__all__'
class OrderDetailsSerializer(serializers.ModelSerializer):
class Meta:
model=Order_details
fields='__all__'
|
[
"81365808+developeryuldashev@users.noreply.github.com"
] |
81365808+developeryuldashev@users.noreply.github.com
|
3a6bb59f575212600ec46dd373b02dbb5de0329e
|
83f443f454716d534eff57ef399f86aa9a267b20
|
/b1_cnn/basic/1_tf_low_api.py
|
51f9795d81d41463963b3c6e3fd595539420d139
|
[] |
no_license
|
Gilbert-Gb-Li/Artificial-Intelligence
|
fef400c9e767ba7e348e1f135164da1e13c04592
|
28247627eab50613c1a5bf67f70e979a0a9eecb2
|
refs/heads/master
| 2021-04-23T19:46:19.556837
| 2020-04-09T08:47:42
| 2020-04-09T08:47:42
| 249,986,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
import tensorflow as tf
'''
tf 底层api
----------
input, 输入的形式[B, H, W, C]
filter, 卷积核输入形式[B, H, C_in, C_out]
strides, 步长[B, H, W, C]
'''
c_out = 128
'''# 通过输入的数据获取shape'''
b, h, w, c = input.get_shape()
'''
定义filter, 名为kernel;之后操作可以使用该名称提取变量
'''
filter = tf.get_variable('kernel', [3, 3, c, c_out])
tf.nn.conv2d(input,
filter=filter,
strides=[1, 2, 2, 1],
padding='SAME',
use_cudnn_on_gpu=False, # 是否是gpu加速
data_format='NHWC', # NHWC == BHWC
dilations=[1, 2, 2, 1], # 空洞卷积,在卷积过程中补零不增加可训练参数的同时增加感受野
name=None) # 名字,用于tensorboard图形显示
|
[
"gb.l@foxmail.com"
] |
gb.l@foxmail.com
|
093a07fb661b25f93b35aa5258dea95855fc25a2
|
3e0468eb7101281ff8133b2acd08b6f83f8953f9
|
/chap10/map的使用.py
|
48dae53d968733878f5da77b18466b818b9dfc27
|
[] |
no_license
|
etoy0328/python_base_knowledge
|
4e514f93b844a1e5d2a654267cf5ea295ae634e2
|
7db140e838939da1ddf9967f82fc78d109aa6362
|
refs/heads/master
| 2023-03-22T10:23:28.211691
| 2021-03-16T10:32:42
| 2021-03-16T10:32:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
# 中国矿业大学(北京)/ 机电硕-6 / ZQT2000405103 / 李天鸽
# 编辑时间:2021/3/16 14:25
ages = [11,13,12,50,30,33,19]
m = map(lambda ele : ele +2,ages)
print(m) #<map object at 0x00000220C1D6F160>
new_ages = list(m)
print(new_ages)
|
[
"1740636835@qq.com"
] |
1740636835@qq.com
|
83087236d2a91b777ad987ef68a4c78684b0c4aa
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_QC344.py
|
1ace2c0f3a04e6d8f71e1bb60a1394826cc48796
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,589
|
py
|
# qubit number=3
# total number=12
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.x(input_qubit[0]) # number=8
prog.x(input_qubit[0]) # number=9
prog.x(input_qubit[2]) # number=10
prog.x(input_qubit[2]) # number=11
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_QC344.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
2669fd30072a0d9fbdbd1acd75c604560975a01b
|
bcf11ccd5ec986f461edcf874c2d12e2fad13e76
|
/junk/208.py
|
39cd778d1a04341e41199f5b9766962f166b8c6b
|
[] |
no_license
|
buxuele/algo_snippet
|
c1a8a0855e6c2a02f5045d21710baa7fa8b00d13
|
9ab71d523f5079b6d72aef11b09120fee047f66c
|
refs/heads/master
| 2021-05-18T06:45:56.111807
| 2021-02-21T17:13:31
| 2021-02-21T17:13:31
| 251,164,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
# author: fanchuangwater@gmail.com
# date: 2020/3/25 下午11:17
# 目的:
# 先暴力一点试试看
ret = 0
for i in range(10 ** 7):
if len(str(i)) == len(set(str(i))):
# print(i)
ret += 1
print()
print("finally")
print(ret)
|
[
"baogebuxuele@163.com"
] |
baogebuxuele@163.com
|
58c2069f47e0843d054c6bfb544cd0d3cca57cea
|
d8d27da5abc5ad63e288f0f8617f35e4bc14620d
|
/pyproct/clustering/evaluation/metrics/test/data/example_clustering_1.py
|
a70e8ed54ae040fcb93607a5a5b1476dd30c8328
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
victor-gil-sepulveda/pyProCT
|
a48a1b86ebcadc48a1146f88fd7363dabecb5079
|
2777c73efb48d5ca6543c69a31719421c4d54694
|
refs/heads/master
| 2021-01-17T14:47:37.660100
| 2017-08-24T15:14:04
| 2017-08-24T15:14:04
| 6,504,827
| 11
| 13
| null | 2014-07-09T14:26:36
| 2012-11-02T10:47:04
|
Python
|
UTF-8
|
Python
| false
| false
| 3,640
|
py
|
"""
Created on 27/01/2014
@author: victor
"""
# -> with spectral
clustering_01 = {'clusters': [
{'prototype': 13,
'elements': '13, 15, 17:18, 20, 22, 24, 26, 28, 45:46, 48, 50, 52, 54, 56, 60:61, 63, 65, 67, 69, 71, 86, 88, 90, 93, 96, 98, 112, 114, 117, 119, 121, 123, 126, 138, 140, 142, 144, 146, 148, 150, 166, 168, 170, 172:174, 176:177, 179, 182, 185, 194, 196, 199, 201, 204:205, 207, 209, 211:213, 217, 219, 221, 223, 225:226, 228, 239, 242:243, 245, 247, 249, 251, 256, 258:260, 262, 264, 266, 268, 270, 272, 274, 276, 284, 286:287, 291, 293, 295, 297, 299, 301, 303:304, 316, 318, 328, 330:331, 333:334, 336, 339, 341:342, 344:345, 353, 355, 357, 359, 361, 363, 367, 370, 372, 374, 376, 378, 386, 388, 390:391, 393, 395, 397, 402, 405:406, 408, 410:411, 413, 415, 421, 423, 425, 427, 429, 431:432, 434, 436, 438, 440, 457, 459, 461, 463, 465, 467, 469, 471, 473, 475, 478, 480, 482, 494, 496:498, 500, 502, 504, 507, 509, 511, 527, 529, 531, 533, 535, 543, 545, 547, 549:550, 552:553, 556, 558, 560:561, 563',
'id': 'cluster_1'},
{'prototype': 14,
'elements': '14, 21, 23, 25, 29, 47, 55, 59, 64, 66, 68, 89, 113, 115, 118, 120, 122, 127, 139, 141, 145, 147, 149, 151, 155, 167, 169, 171, 195, 200, 210, 214, 218, 220, 222, 224, 229, 240, 244, 246, 248, 257, 261, 267, 273, 275, 281, 288, 290, 292, 294, 300, 305, 317, 329, 332, 335, 343, 356, 358, 360, 362, 371, 373, 375, 377, 387, 394, 398, 403, 407, 409, 422, 426, 428, 430, 433, 437, 439, 441, 458, 460, 464, 466, 468, 470, 472, 474, 479, 481, 483, 495, 503, 508, 510, 528, 530, 532, 534, 536, 544, 551, 562, 564',
'id': 'cluster_6'},
{'prototype': 1,
'elements': '1:3, 5:8, 10:12, 30:32, 34:44, 73:78, 80:82, 84, 102:103, 105:108, 111, 128:130, 132, 134:136, 152:154, 156:161, 163:164, 189, 216, 232, 252, 255, 324:325, 348, 417, 445, 491, 493, 516, 519, 524:525, 538',
'id': 'cluster_0'},
{'prototype': 4,
'elements': '4, 16, 19, 49, 51, 53, 57:58, 62, 70, 72, 87, 91:92, 94, 97, 99:100, 116, 124:125, 175, 178, 180, 183, 186, 197:198, 202:203, 241, 263, 265, 269, 271, 277, 285, 296, 302, 319, 337, 340, 365:366, 368:369, 389, 396, 414, 424, 435, 443, 462, 476, 484, 499, 501, 505:506, 512:513, 537, 546, 554, 557, 559',
'id': 'cluster_2'},
{'prototype': 33,
'elements': '33, 79, 233, 235, 279:280, 283, 306, 308:314, 320:323, 327, 349, 351:352, 379:380, 384:385, 400:401, 416, 418:420, 444, 447:450, 453:456, 485:487, 489, 492, 514:515, 517, 521:523, 526, 539, 541:542',
'id': 'cluster_5'},
{'prototype': 9,
'elements': '9, 83, 104, 109:110, 131, 133, 137, 162, 165, 187:188, 190:193, 215, 230:231, 234, 236:238, 253:254, 282, 307, 315, 326, 347, 350, 381:383, 446, 451:452, 488, 490, 518, 520, 540',
'id': 'cluster_3'},
{'prototype': 0,
'elements': '0, 27, 85, 95, 101, 143, 181, 184, 206, 208, 227, 250, 278, 289, 298, 338, 346, 354, 364, 392, 399, 404, 412, 442, 477, 548, 555',
'id': 'cluster_4'}
],
'total_number_of_elements': 565,
'number_of_clusters': 7}
|
[
"victor.gil.sepulveda@gmail.com"
] |
victor.gil.sepulveda@gmail.com
|
0c634e6bbd42c0e27ca3bc801542af775ec860b2
|
cb94a4cdd7a9df17f9c6f1a03f8f4ff12c916cf3
|
/Programming_Foundtions_Algorithms/Ex_Files_Programming_Foundations_Algorithms/Exercise Files/6 Other Algorithms/Filter_start.py
|
106aa0ba317296d654bc0d6d427504adf4ebebf0
|
[] |
no_license
|
sedstan/LinkedIn-Learning-Python-Course
|
2b936d0f00703a6e66a872220ed47572123dc7fd
|
b4584218355bf07aa3d2939b950911eae67adb0b
|
refs/heads/master
| 2021-10-11T10:19:13.675662
| 2019-01-24T17:55:20
| 2019-01-24T17:55:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
# use a hashtable to filter out duplicate items
# define a set of items that we want to reduce duplicates
items = ["apple", "pear", "orange", "banana", "apple",
"orange", "apple", "pear", "banana", "orange",
"apple", "kiwi", "pear", "apple", "orange"]
# TODO: create a hashtable to perform a filter
filter = dict()
# TODO: loop over each item and add to the hashtable
for key in items:
filter[key] = 0
# TODO: create a set from the resulting keys in the hashtable
result = set(filter.keys())
print(result)
|
[
"sed@wearewhy.co.uk"
] |
sed@wearewhy.co.uk
|
d527bc01e6d4354042cdd1609447257fbca71a4f
|
978248bf0f275ae688f194593aa32c267832b2b6
|
/xlsxwriter/test/worksheet/test_worksheet07.py
|
8987c4dc09a18d9fefa1ba175fa8d598780e5d5f
|
[
"BSD-2-Clause-Views"
] |
permissive
|
satish1337/XlsxWriter
|
b0c216b91be1b74d6cac017a152023aa1d581de2
|
0ab9bdded4f750246c41a439f6a6cecaf9179030
|
refs/heads/master
| 2021-01-22T02:35:13.158752
| 2015-03-31T20:32:28
| 2015-03-31T20:32:28
| 33,300,989
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,651
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
from ...sharedstrings import SharedStringTable
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with formulas in cells."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.str_table = SharedStringTable()
worksheet.select()
# Write some data and formulas.
worksheet.write_number(0, 0, 1)
worksheet.write_number(1, 0, 2)
worksheet.write_formula(2, 2, '=A1+A2', None, 3)
worksheet.write_formula(4, 1, """="<&>" & ";"" '\"""", None, """<&>;" '""")
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:C5"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:3">
<c r="A1">
<v>1</v>
</c>
</row>
<row r="2" spans="1:3">
<c r="A2">
<v>2</v>
</c>
</row>
<row r="3" spans="1:3">
<c r="C3">
<f>A1+A2</f>
<v>3</v>
</c>
</row>
<row r="5" spans="1:3">
<c r="B5" t="str">
<f>"<&>" & ";"" '"</f>
<v><&>;" '</v>
</c>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
8c4037f2fd69e4f6614d648cbd35c997b2d1510e
|
536bce6ca78a9a151247b51acb8c375c9db7445f
|
/src/files/rw_csv.py
|
39ccf3e1f9bacec7c15e21e42533d71dca30dde3
|
[] |
no_license
|
clicianaldoni/aprimeronpython
|
57de34313f4fd2a0c69637fefd60b0fb5861f859
|
a917b62bec669765a238c4b310cc52b79c7df0c9
|
refs/heads/master
| 2023-01-28T18:02:31.175511
| 2023-01-23T08:14:57
| 2023-01-23T08:14:57
| 112,872,454
| 0
| 0
| null | 2017-12-02T19:55:40
| 2017-12-02T19:55:40
| null |
UTF-8
|
Python
| false
| false
| 734
|
py
|
infile = open('budget.csv', 'r')
import csv
table = []
for row in csv.reader(infile):
table.append(row)
infile.close()
import pprint
pprint.pprint(table)
# transform numbers in table into float objects
# (let first row and first column remain strings)
for r in range(1,len(table)):
for c in range(1, len(table[0])):
table[r][c] = float(table[r][c])
pprint.pprint(table)
# add a new row with sums:
row = [0.0]*len(table[0])
row[0] = 'sum'
for c in range(1, len(row)):
s = 0
for r in range(1, len(table)):
s += table[r][c]
row[c] = s
table.append(row)
pprint.pprint(table)
outfile = open('budget2.csv', 'w')
writer = csv.writer(outfile)
for row in table:
writer.writerow(row)
outfile.close()
|
[
"martin@rodvand.net"
] |
martin@rodvand.net
|
ab7df51266add97d209fa99b82f75cf4f1e0eae2
|
9063052d8e2c294efa3b501d42aef2ac59d84fa0
|
/영상처리/assignment3/b.py
|
10dce4c350900a30d1a1377b7d280efdc9e3c9c2
|
[] |
no_license
|
yes99/practice2020
|
ffe5502d23038eabea834ebc2b18ff724f849c4a
|
100ac281f4fe6d0f991213802fbd8524451f1ac2
|
refs/heads/master
| 2021-07-08T00:54:19.728874
| 2021-06-13T05:52:07
| 2021-06-13T05:52:07
| 245,789,109
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,707
|
py
|
import numpy as np
import cv2
img = cv2.imread('nine.png',cv2.IMREAD_COLOR)
#정상적으로 출력되는지 확인
cv2.imshow('image1',img)
#색을 쪼갠다.
b,g,r = cv2.split(img)
#히스토그램을 긁어오는데 쓴다
histogramR = np.zeros((256,),dtype=np.float64)
histogramG = np.zeros((256,),dtype=np.float64)
histogramB = np.zeros((256,),dtype=np.float64)
histogramY = np.zeros((256,),dtype=np.float64)
#구해온 히스토그램을 바탕으로 equalization 계산을 위해 쓴다
cal = np.zeros((256,),dtype=np.float64)
#float16으로 하니까, 용량때문에 수치가 너무 커져서 64로 바꿔주니 정상으로 작동한다
height,width=img.shape
#현재 주어진 수치들을 확인한다.
print ("height = ", height)
print ("width = ", width)
print("histogram")
print("cal")
print(cal)
print("image")
print(img)
#주어진 이미지를 바탕으로 히스토그램을 모두 하나씩 다 긁어온다
for i in range(width):
for j in range(height):
his = img[j,i]
histogram[his] = histogram[his]+1
print("다시 A출력")
print(histogram)
#누적으로 계산을 먼저 해주는 것이 좋다
for i in range(256):
for j in range(i+1):
cal[i] += histogram[j]
print(cal)
#총 픽셀수 만큼 나눠준다
for i in range(256):
cal[i] = cal[i] * (1.0/(height*width))
print(cal)
#max - min 을 곱해준다
for i in range(256):
cal[i] = round(cal[i] * 255)
print(cal)
#변환된 히스토그램을 해당 변환된 값으로 변환해주어서 img에 넣는다
for i in range(width):
for j in range(height):
his = img[j,i]
img[j,i]= cal[his]
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"yes950324@naver.com"
] |
yes950324@naver.com
|
9c90fc258340931a86f59dc8604c1067bd1c4304
|
b05761d771bb5a85d39d370c649567c1ff3eb089
|
/venv/lib/python3.10/site-packages/virtualenv/util/path/_sync.py
|
c8069bad58192642ee180777910daf347c154a3b
|
[] |
no_license
|
JawshyJ/Coding_Practice
|
88c49cab955eab04609ec1003b6b8c20f103fc06
|
eb6b229d41aa49b1545af2120e6bee8e982adb41
|
refs/heads/master
| 2023-02-19T10:18:04.818542
| 2023-02-06T21:22:58
| 2023-02-06T21:22:58
| 247,788,631
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
/home/runner/.cache/pip/pool/5b/8a/a2/f7e8f925fe41daefc00ccfdf9628d50e49315ee8dae212a5ab978784ca
|
[
"37465112+JawshyJ@users.noreply.github.com"
] |
37465112+JawshyJ@users.noreply.github.com
|
328897e5bad2df1b8130ad7198c2b38636080d6b
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Keras_tensorflow/source/tensorflow/python/ops/image_grad.py
|
b6b61ab92ce0e82331d349d7615f3badd7b6399b
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,785
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains Gradient functions for image ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_image_ops
@ops.RegisterGradient("ResizeNearestNeighbor")
def _ResizeNearestNeighborGrad(op, grad):
"""The derivatives for nearest neighbor resizing.
Args:
op: The ResizeNearestNeighbor op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input and the output.
"""
image = op.inputs[0]
if image.get_shape()[1:3].is_fully_defined():
image_shape = image.get_shape()[1:3]
else:
image_shape = array_ops.shape(image)[1:3]
# pylint: disable=protected-access
grads = gen_image_ops._resize_nearest_neighbor_grad(
grad,
image_shape,
align_corners=op.get_attr("align_corners"))
# pylint: enable=protected-access
return [grads, None]
@ops.RegisterGradient("ResizeBilinear")
def _ResizeBilinearGrad(op, grad):
"""The derivatives for bilinear resizing.
Args:
op: The ResizeBilinear op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
allowed_types = [dtypes.float32, dtypes.float64]
grad0 = None
if op.inputs[0].dtype in allowed_types:
# pylint: disable=protected-access
grad0 = gen_image_ops._resize_bilinear_grad(
grad,
op.inputs[0],
align_corners=op.get_attr("align_corners"))
# pylint: enable=protected-access
return [grad0, None]
@ops.RegisterGradient("CropAndResize")
def _CropAndResizeGrad(op, grad):
"""The derivatives for crop_and_resize.
We back-propagate to the image only when the input image tensor has floating
point dtype but we always back-propagate to the input boxes tensor.
Args:
op: The CropAndResize op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input image, boxes, as well as the always-None
gradients w.r.t. box_ind and crop_size.
"""
image = op.inputs[0]
if image.get_shape().is_fully_defined():
image_shape = image.get_shape().as_list()
else:
image_shape = array_ops.shape(image)
allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64]
if op.inputs[0].dtype in allowed_types:
# pylint: disable=protected-access
grad0 = gen_image_ops.crop_and_resize_grad_image(grad,
op.inputs[1],
op.inputs[2],
image_shape,
T=op.get_attr("T"))
# pylint: enable=protected-access
else:
grad0 = None
grad1 = gen_image_ops.crop_and_resize_grad_boxes(grad, op.inputs[0],
op.inputs[1], op.inputs[2])
return [grad0, grad1, None, None]
|
[
"ryfeus@gmail.com"
] |
ryfeus@gmail.com
|
94b42e9b782f24022d395b78afbb0c1454c0747b
|
a9eda70d7cf14604dca1805c19cc5df3a8e8918c
|
/Cell200/Train_CcGAN_backup.py
|
b00824e492a55c5fa0b324a046bde9d94031144d
|
[] |
no_license
|
Zereke/improved_CcGAN
|
6f00037f8245d29deeb11cdb37488ce6d801023b
|
adb1f0cd56ff33e2127d1eee5053dac58e292a5c
|
refs/heads/master
| 2023-04-08T11:35:19.925545
| 2021-04-05T07:07:39
| 2021-04-05T07:07:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,711
|
py
|
import torch
import numpy as np
import os
import timeit
from PIL import Image
from torchvision.utils import save_image
from utils import *
from opts import parse_opts
''' Settings '''
args = parse_opts()
NGPU = torch.cuda.device_count()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# some parameters in opts
niters = args.niters_gan
resume_niters = args.resume_niters_gan
dim_gan = args.dim_gan
lr_g = args.lr_g_gan
lr_d = args.lr_d_gan
save_niters_freq = args.save_niters_freq
batch_size_disc = args.batch_size_disc
batch_size_gene = args.batch_size_gene
threshold_type = args.threshold_type
nonzero_soft_weight_threshold = args.nonzero_soft_weight_threshold
transform = args.transform
NC = args.num_channels
IMG_SIZE = args.img_size
def train_CcGAN(kernel_sigma, kappa, train_images, train_labels, netG, netD, save_images_folder, save_models_folder = None, clip_label=False):
'''
Note that train_images are not normalized to [-1,1]
'''
netG = netG.to(device)
netD = netD.to(device)
optimizerG = torch.optim.Adam(netG.parameters(), lr=lr_g, betas=(0.5, 0.999))
optimizerD = torch.optim.Adam(netD.parameters(), lr=lr_d, betas=(0.5, 0.999))
if save_models_folder is not None and resume_niters>0:
save_file = save_models_folder + "/CcGAN_{}_checkpoint_intrain/CcGAN_checkpoint_niters_{}.pth".format(threshold_type, resume_niters)
checkpoint = torch.load(save_file)
netG.load_state_dict(checkpoint['netG_state_dict'])
netD.load_state_dict(checkpoint['netD_state_dict'])
optimizerG.load_state_dict(checkpoint['optimizerG_state_dict'])
optimizerD.load_state_dict(checkpoint['optimizerD_state_dict'])
torch.set_rng_state(checkpoint['rng_state'])
#end if
#################
unique_train_labels = np.sort(np.array(list(set(train_labels))))
# printed images with labels between the 5-th quantile and 95-th quantile of training labels
n_row=10; n_col = n_row
z_fixed = torch.randn(n_row*n_col, dim_gan, dtype=torch.float).to(device)
start_label = np.quantile(train_labels, 0.05)
end_label = np.quantile(train_labels, 0.95)
selected_labels = np.linspace(start_label, end_label, num=n_row)
y_fixed = np.zeros(n_row*n_col)
for i in range(n_row):
curr_label = selected_labels[i]
for j in range(n_col):
y_fixed[i*n_col+j] = curr_label
print(y_fixed)
y_fixed = torch.from_numpy(y_fixed).type(torch.float).view(-1,1).to(device)
start_time = timeit.default_timer()
for niter in range(resume_niters, niters):
''' Train Discriminator '''
## randomly draw batch_size_disc y's from unique_train_labels
batch_target_labels_raw = np.random.choice(unique_train_labels, size=batch_size_disc, replace=True)
## add Gaussian noise; we estimate image distribution conditional on these labels
batch_epsilons = np.random.normal(0, kernel_sigma, batch_size_disc)
batch_target_labels = batch_target_labels_raw + batch_epsilons
if clip_label:
batch_target_labels = np.clip(batch_target_labels, 0.0, 1.0)
## find index of real images with labels in the vicinity of batch_target_labels
## generate labels for fake image generation; these labels are also in the vicinity of batch_target_labels
batch_real_indx = np.zeros(batch_size_disc, dtype=int) #index of images in the datata; the labels of these images are in the vicinity
batch_fake_labels = np.zeros(batch_size_disc)
for j in range(batch_size_disc):
## index for real images
if threshold_type == "hard":
indx_real_in_vicinity = np.where(np.abs(train_labels-batch_target_labels[j])<= kappa)[0]
else:
# reverse the weight function for SVDL
indx_real_in_vicinity = np.where((train_labels-batch_target_labels[j])**2 <= -np.log(nonzero_soft_weight_threshold)/kappa)[0]
## if the max gap between two consecutive ordered unique labels is large, it is possible that len(indx_real_in_vicinity)<1
while len(indx_real_in_vicinity)<1:
batch_epsilons_j = np.random.normal(0, kernel_sigma, 1)
batch_target_labels[j] = batch_target_labels_raw[j] + batch_epsilons_j
if clip_label:
batch_target_labels = np.clip(batch_target_labels, 0.0, 1.0)
## index for real images
if threshold_type == "hard":
indx_real_in_vicinity = np.where(np.abs(train_labels-batch_target_labels[j])<= kappa)[0]
else:
# reverse the weight function for SVDL
indx_real_in_vicinity = np.where((train_labels-batch_target_labels[j])**2 <= -np.log(nonzero_soft_weight_threshold)/kappa)[0]
#end while len(indx_real_in_vicinity)<1
assert len(indx_real_in_vicinity)>=1
batch_real_indx[j] = np.random.choice(indx_real_in_vicinity, size=1)[0]
## labels for fake images generation
if threshold_type == "hard":
lb = batch_target_labels[j] - kappa
ub = batch_target_labels[j] + kappa
else:
lb = batch_target_labels[j] - np.sqrt(-np.log(nonzero_soft_weight_threshold)/kappa)
ub = batch_target_labels[j] + np.sqrt(-np.log(nonzero_soft_weight_threshold)/kappa)
lb = max(0.0, lb); ub = min(ub, 1.0)
assert lb<=ub
assert lb>=0 and ub>=0
assert lb<=1 and ub<=1
batch_fake_labels[j] = np.random.uniform(lb, ub, size=1)[0]
#end for j
## draw the real image batch from the training set
batch_real_images = train_images[batch_real_indx]
assert batch_real_images.max()>1
batch_real_labels = train_labels[batch_real_indx]
batch_real_labels = torch.from_numpy(batch_real_labels).type(torch.float).to(device)
## transform (rotate, flip, etc.) and normalize real images
if transform:
trainset = IMGs_dataset(batch_real_images, labels=None, normalize=True, rotate=True, degrees = [90,180,270], hflip = True, vflip = True)
else:
trainset = IMGs_dataset(batch_real_images, labels=None, normalize=True)
train_dataloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size_disc, shuffle=False, num_workers=8)
train_dataloader = iter(train_dataloader)
batch_real_images = train_dataloader.next()
assert len(batch_real_images) == batch_size_disc
batch_real_images = batch_real_images.type(torch.float).to(device)
assert batch_real_images.max().item()<=1
## generate the fake image batch
batch_fake_labels = torch.from_numpy(batch_fake_labels).type(torch.float).to(device)
z = torch.randn(batch_size_disc, dim_gan, dtype=torch.float).to(device)
batch_fake_images = netG(z, batch_fake_labels)
## target labels on gpu
batch_target_labels = torch.from_numpy(batch_target_labels).type(torch.float).to(device)
## weight vector
if threshold_type == "soft":
real_weights = torch.exp(-kappa*(batch_real_labels-batch_target_labels)**2).to(device)
fake_weights = torch.exp(-kappa*(batch_fake_labels-batch_target_labels)**2).to(device)
else:
real_weights = torch.ones(batch_size_disc, dtype=torch.float).to(device)
fake_weights = torch.ones(batch_size_disc, dtype=torch.float).to(device)
#end if threshold type
# forward pass
real_dis_out = netD(batch_real_images, batch_target_labels)
fake_dis_out = netD(batch_fake_images.detach(), batch_target_labels)
d_loss = - torch.mean(real_weights * torch.log(real_dis_out+1e-20)) - torch.mean(fake_weights * torch.log(1 - fake_dis_out+1e-20))
optimizerD.zero_grad()
d_loss.backward()
optimizerD.step()
''' Train Generator '''
netG.train()
# generate fake images
# ## randomly draw batch_size_disc y's from unique_train_labels
# batch_target_labels_raw = np.random.choice(unique_train_labels, size=batch_size_gene, replace=True)
## add Gaussian noise; we estimate image distribution conditional on these labels
batch_epsilons = np.random.normal(0, kernel_sigma, batch_size_gene)
batch_target_labels = batch_target_labels_raw[0:batch_size_gene] + batch_epsilons
if clip_label:
batch_target_labels = np.clip(batch_target_labels, 0.0, 1.0)
batch_target_labels = torch.from_numpy(batch_target_labels).type(torch.float).to(device)
z = torch.randn(batch_size_gene, dim_gan, dtype=torch.float).to(device)
batch_fake_images = netG(z, batch_target_labels)
# loss
dis_out = netD(batch_fake_images, batch_target_labels)
g_loss = - torch.mean(torch.log(dis_out+1e-20))
# backward
optimizerG.zero_grad()
g_loss.backward()
optimizerG.step()
# print loss
if (niter+1) % 20 == 0:
print ("CcGAN: [Iter %d/%d] [D loss: %.4e] [G loss: %.4e] [real prob: %.3f] [fake prob: %.3f] [Time: %.4f]" % (niter+1, niters, d_loss.item(), g_loss.item(), real_dis_out.mean().item(), fake_dis_out.mean().item(), timeit.default_timer()-start_time))
if (niter+1) % 100 == 0:
netG.eval()
with torch.no_grad():
gen_imgs = netG(z_fixed, y_fixed)
gen_imgs = gen_imgs.detach().cpu()
save_image(gen_imgs.data, save_images_folder + '/{}.png'.format(niter+1), nrow=n_row, normalize=True)
if save_models_folder is not None and ((niter+1) % save_niters_freq == 0 or (niter+1) == niters):
save_file = save_models_folder + "/CcGAN_{}_checkpoint_intrain/CcGAN_checkpoint_niters_{}.pth".format(threshold_type, niter+1)
os.makedirs(os.path.dirname(save_file), exist_ok=True)
torch.save({
'netG_state_dict': netG.state_dict(),
'netD_state_dict': netD.state_dict(),
'optimizerG_state_dict': optimizerG.state_dict(),
'optimizerD_state_dict': optimizerD.state_dict(),
'rng_state': torch.get_rng_state()
}, save_file)
#end for niter
return netG, netD
def SampCcGAN_given_label(netG, label, path=None, NFAKE = 10000, batch_size = 500):
'''
label: normalized label in [0,1]
'''
if batch_size>NFAKE:
batch_size = NFAKE
fake_images = np.zeros((NFAKE+batch_size, NC, IMG_SIZE, IMG_SIZE), dtype=np.float)
netG=netG.to(device)
netG.eval()
with torch.no_grad():
tmp = 0
while tmp < NFAKE:
z = torch.randn(batch_size, dim_gan, dtype=torch.float).to(device)
y = np.ones(batch_size) * label
y = torch.from_numpy(y).type(torch.float).view(-1,1).to(device)
batch_fake_images = netG(z, y)
fake_images[tmp:(tmp+batch_size)] = batch_fake_images.cpu().detach().numpy()
tmp += batch_size
#remove extra entries
fake_images = fake_images[0:NFAKE]
fake_labels = np.ones(NFAKE) * label #use assigned label
if path is not None:
raw_fake_images = (fake_images*0.5+0.5)*255.0
raw_fake_images = raw_fake_images.astype(np.uint8)
for i in range(NFAKE):
filename = path + '/' + str(i) + '.jpg'
im = Image.fromarray(raw_fake_images[i][0], mode='L')
im = im.save(filename)
return fake_images, fake_labels
|
[
"dingx92@gmail.com"
] |
dingx92@gmail.com
|
a0fd3dd7e8aad3c7028e7c5168f79cb4d2878e3e
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2285/60889/279158.py
|
c12b8b99d18f9cb48e74a03d5c66649041d29e36
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 951
|
py
|
numOfInput = int(input())
for i in range(numOfInput):
days = int(input())
profits = list(map(int,input().split(" ")))
trade = []
trade.append(0)
for j in range(1,days-1):
if ((profits[j] < profits[j-1]) and (profits[j] < profits[j+1])) or ((profits[j] > profits[j-1]) and (profits[j] > profits[j+1])):
trade.append(j)
trade.append(days-1)
if profits[0] > profits[1]:
if len(trade) == 2:
print("没有利润")
else:
j = 1
while len(trade) > j+3:
print("(" + str(trade[j]) + " " + str(trade[j+1]) + ")"+" ",end = "")
j = j + 2
print("(" + str(trade[j]) + " " + str(trade[j+1]) + ")")
else:
j = 0
while len(trade) > j+3:
print("(" + str(trade[j]) + " " + str(trade[j+1]) + ")"+" ",end = "" )
j = j + 2
print("(" + str(trade[j]) + " " + str(trade[j+1]) + ")")
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
e2126a086747b41651adecffe157114a80e7dc03
|
8e657d3f3d94d84e1948c61a82d5fbffcf913348
|
/akivymd/uix/hintwidget.py
|
65895294556bbe496a714b74e305b01e50d2a51d
|
[
"MIT"
] |
permissive
|
quitegreensky/akivymd
|
169b4466b9cbc39e4d940c3d1a616f80528ab8f4
|
b2daddd2f58889859514286606f46a4af6f03828
|
refs/heads/master
| 2023-06-03T15:12:44.746360
| 2020-11-02T19:04:37
| 2020-11-02T19:04:37
| 273,256,286
| 51
| 16
|
MIT
| 2021-06-17T03:34:51
| 2020-06-18T14:12:55
|
Python
|
UTF-8
|
Python
| false
| false
| 7,234
|
py
|
from kivy.animation import Animation
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.lang.builder import Builder
from kivy.properties import (
BooleanProperty,
ListProperty,
NumericProperty,
OptionProperty,
StringProperty,
)
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
Builder.load_string(
"""
<AKHintWidgetItem>
pos: self.parent.pos
<AKHintWidget>:
FloatLayout:
pos: root.pos
size: root.size
FloatLayout:
id: _float_box
# pos: root._hintwidget_x, root._hintwidget_y
size_hint: None, None
size: root.hintwidget_width, root.hintwidget_height
opacity: root._hintwidget_alpha
"""
)
class AKHintWidgetItem(BoxLayout):
pass
class AKHintWidget(BoxLayout):
hintwidget_width = NumericProperty("150dp")
hintwidget_height = NumericProperty("150dp")
opacity_duration = NumericProperty(0.2)
transition = StringProperty("out_quad")
offset_x = NumericProperty("10dp")
offset_y = NumericProperty("10dp")
show_mode = OptionProperty("touch", options=["touch", "hover"])
hintwidget_pos = OptionProperty("tr", options=["tr", "tl", "br", "bl"])
auto_dismiss = BooleanProperty(True)
open_button = OptionProperty("left", options=["left", "right"])
show_delay = NumericProperty(0)
_hintwidget_x = NumericProperty()
_hintwidget_y = NumericProperty()
_hintwidget_alpha = NumericProperty(0)
_opac_anim_started = False
_state = "close"
def __init__(self, **kwargs):
super(AKHintWidget, self).__init__(**kwargs)
Clock.schedule_once(lambda x: self._update())
def _update(self):
if self.show_mode == "hover":
Window.bind(mouse_pos=self._show_hover)
elif self.show_mode == "touch":
Window.unbind(mouse_pos=self._show_hover)
self.bind(_hintwidget_x=self.ids._float_box.setter("x"))
self.bind(_hintwidget_y=self.ids._float_box.setter("y"))
def _right_top_hint(self):
return (self._hintwidget_x, self._hintwidget_y)
def _show_hover(self, instance, pos):
if self.collide_point(pos[0], pos[1]):
self._set_hintwidget_pos(pos)
Clock.schedule_once(
lambda x: self._change_opacity(1), self.show_delay
)
self._state = "open"
else:
self._change_opacity(0)
self._state = "close"
@property
def state(self):
return self._state
def _set_hintwidget_pos(self, pos):
space_x = self.hintwidget_width + self.offset_x
space_y = self.hintwidget_height + self.offset_y
image_top = self.y + self.height
image_right = self.x + self.width
image_left = self.x
image_bottom = self.y
if self.hintwidget_pos == "tr":
mag_right = pos[0] + space_x
mag_top = pos[1] + space_y
mag_left = pos[0]
mag_bottom = pos[1]
if self.hintwidget_pos == "br":
mag_right = pos[0] + space_x
mag_top = pos[1]
mag_left = pos[0]
mag_bottom = pos[1] - space_y
if self.hintwidget_pos in "tl":
mag_right = pos[0]
mag_top = pos[1] + space_y
mag_left = pos[0] - space_x
mag_bottom = pos[1]
if self.hintwidget_pos in "bl":
mag_right = pos[0]
mag_top = pos[1]
mag_left = pos[0] - space_x
mag_bottom = pos[1] - space_y
# ===============
if mag_right >= image_right:
self._hintwidget_x = pos[0] - self.offset_x - self.hintwidget_width
elif mag_left <= image_left:
self._hintwidget_x = pos[0] + self.offset_x
else:
if self.hintwidget_pos in ["tr", "br"]:
self._hintwidget_x = pos[0] + self.offset_x
elif self.hintwidget_pos in ["tl", "bl"]:
self._hintwidget_x = (
pos[0] - self.offset_x - self.hintwidget_width
)
if mag_top >= image_top:
self._hintwidget_y = pos[1] - self.offset_y - self.hintwidget_height
elif mag_bottom <= image_bottom:
self._hintwidget_y = pos[1] + self.offset_y
else:
if self.hintwidget_pos in ["tr", "tl"]:
self._hintwidget_y = pos[1] + self.offset_y
elif self.hintwidget_pos in ["bl", "br"]:
self._hintwidget_y = (
pos[1] - self.offset_y - self.hintwidget_height
)
# ===============
if pos[0] > image_right:
self._hintwidget_x = image_right - space_x
if pos[0] < image_left:
self._hintwidget_x = image_left + self.offset_x
if pos[1] > image_top:
self._hintwidget_y = image_top - space_y
if pos[1] < image_bottom:
self._hintwidget_y = image_bottom + self.offset_y
def _change_opacity(self, opacity):
if not self._opac_anim_started:
anim = Animation(
_hintwidget_alpha=opacity,
duration=self.opacity_duration,
t=self.transition,
)
anim.start(self)
self._opac_anim_started = True
Clock.schedule_once(
lambda x: self._allow_opac_animation(), self.opacity_duration
)
def _allow_opac_animation(self):
self._opac_anim_started = False
def on_touch_down(self, touch):
pos = touch.pos
if self.show_mode == "touch" and self.collide_point(pos[0], pos[1]):
if self._state == "open" and not self.ids._float_box.collide_point(
pos[0], pos[1]
):
opac = 0
self._state = "close"
elif touch.button == self.open_button:
if not self.ids._float_box.collide_point(pos[0], pos[1]):
self._set_hintwidget_pos(pos)
opac = 1
self._state = "open"
else:
return super().on_touch_down(touch)
Clock.schedule_once(
lambda x: self._change_opacity(opac), self.show_delay
)
return super().on_touch_down(touch)
def on_touch_up(self, touch):
pos = touch.pos
if self.show_mode == "touch":
if not self.auto_dismiss and self._state == "open":
opac = 1
else:
opac = 0
self._state = "close"
Clock.schedule_once(
lambda x: self._change_opacity(opac), self.opacity_duration
)
return super().on_touch_up(touch)
def on_touch_move(self, touch):
pos = touch.pos
if self.show_mode == "touch":
self._set_hintwidget_pos(pos)
return super().on_touch_move(touch)
def add_widget(self, widget, index=0, canvas=None):
if issubclass(widget.__class__, AKHintWidgetItem):
self.ids._float_box.add_widget(widget)
else:
super().add_widget(widget, index=index, canvas=canvas)
|
[
"quitegreensky@gmail.com"
] |
quitegreensky@gmail.com
|
5791643f8785ad34b90e8a376555f5e409d40fb1
|
facbdbdadacd23f6c83d266116dc14744741070f
|
/Core_Python/Day-8/Part-2 Loops/22.py
|
fbe299c4b5770cba90a8d6c96f2804bda69c3b43
|
[] |
no_license
|
Yogesh-Singh-Gadwal/YSG_Python
|
51b6b53fe34567bf066b6e487c00da766b47ac6b
|
f0d6841e1f92d1d2b27d8ecdd332d40b49a5ca69
|
refs/heads/master
| 2023-06-06T04:40:12.004713
| 2021-07-06T19:59:26
| 2021-07-06T19:59:26
| 292,482,586
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
# Loops
# While
a = 0
while a < 10:
a += 1
if a > 7:
break
print('Micky ',a)
else:
print('Complete Loop Executes')
print('End Loop')
|
[
"noreply@github.com"
] |
Yogesh-Singh-Gadwal.noreply@github.com
|
c0ba75e7f47fa1c382e6c8d5e91fb1dc9615395f
|
4ec4bc10fb1fd02e56eb1763bde624a47e9b7f9e
|
/sf_dd/model/distracted_driver_configer.py
|
e5791d7eca1a61e28335e3d3ba5299853df5dde1
|
[] |
no_license
|
cooperoelrichs/gpml
|
d4d1682a7074964dfa749af3bcbe2aef16aaafbb
|
fd62908f86d66e03668d9c58850133d0c3af80de
|
refs/heads/master
| 2020-12-12T00:41:59.494739
| 2016-05-13T06:53:54
| 2016-05-13T06:53:54
| 53,009,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 912
|
py
|
from gpml.model import configer
def from_json(config_file, project_dir):
return configer.from_json(
DistractedDriverConfiger, config_file, project_dir)
class DistractedDriverConfiger(configer.ConfigerBase):
def __init__(self, config, project_dir):
super().__init__(config, project_dir)
self.nb_classes = config['nb_classes']
self.class_names = config['class_names']
self.driver_imgs_list = self.data_dir + config['driver_imgs_list']
self.sample_submission = self.data_dir + config['sample_submission']
self.image_dirs = self.add_dir_to_names(
config['image_dirs'], self.data_dir)
self.image_size = tuple(config['image_size'])
self.data_sets = self.add_dir_to_names(
config['data_sets'], self.data_dir)
self.image_lists = self.add_dir_to_names(
config['image_lists'], self.data_dir)
|
[
"c.oelrichs@gmail.com"
] |
c.oelrichs@gmail.com
|
c1d3a1b216b74fd355d314fbf664e2c9a2b0caaa
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_223/ch62_2020_04_27_13_36_33_208956.py
|
31d6e4a06a8eed9bb438bef4c0f69df2a33d789d
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
def pos_arroba(email):
i=0
while i < len(email)-1:
if email[i] == "@":
posicao = i
else:
i+=1
return posicao
|
[
"you@example.com"
] |
you@example.com
|
1f9cdefda207a3dbeecf4e0813a91032280775f0
|
6a95112805b64322953429270a305d01fef3faea
|
/dist/weewx-3.6.2/bin/weewx/drivers/wmr200.py
|
6cb1c880130abbd47e120120c8f7422c96789c2f
|
[
"GPL-1.0-or-later",
"GPL-3.0-only",
"Apache-2.0"
] |
permissive
|
tomdotorg/docker-weewx
|
c6d59dc492a9e53f3bc898f7b9f593717092d72c
|
7085654f455d39b06acc688738fde27e1f78ad1e
|
refs/heads/main
| 2023-06-08T17:57:44.184399
| 2023-01-30T11:21:23
| 2023-01-30T11:21:23
| 54,113,384
| 21
| 16
|
Apache-2.0
| 2022-10-19T23:46:26
| 2016-03-17T11:39:29
|
Dockerfile
|
UTF-8
|
Python
| false
| false
| 78,327
|
py
|
#
# Copyright (c) 2013 Chris Manton <cmanton@gmail.com> www.onesockoff.org
# See the file LICENSE.txt for your full rights.
#
# Special recognition to Lars de Bruin <l...@larsdebruin.net> for contributing
# packet decoding code.
#
# pylint parameters
# suppress global variable warnings
# pylint: disable-msg=W0603
# suppress weewx driver methods not implemented
# pylint: disable-msg=W0223
# suppress weewx driver methods non-conforming name
# pylint: disable-msg=C0103
# suppress too many lines in module
# pylint: disable-msg=C0302
# suppress too many instance attributes
# pylint: disable-msg=R0902
# suppress too many public methods
# pylint: disable-msg=R0904
# suppress too many statements
# pylint: disable-msg=R0915
# suppress unused arguments e.g. loader(...,engine)
# pylint: disable-msg=W0613
"""Classes and functions to interfacing with an Oregon Scientific WMR200 station
Oregon Scientific
http://us.oregonscientific.com/ulimages/manuals2/WMR200.pdf
Bronberg Weather Station
For a pretty good summary of what's in these packets see
http://www.bashewa.com/wmr200-protocol.php
"""
import select
import socket
import syslog
import threading
import time
import usb
import weewx.drivers
import weeutil.weeutil
DRIVER_NAME = 'WMR200'
DRIVER_VERSION = "3.1.1"
def loader(config_dict, engine): # @UnusedVariable
return WMR200(**config_dict[DRIVER_NAME])
def confeditor_loader():
return WMR200ConfEditor()
# General decoding sensor maps.
WIND_DIR_MAP = {0: 'N', 1: 'NNE', 2: 'NE', 3: 'ENE',
4: 'E', 5: 'ESE', 6: 'SE', 7: 'SSE',
8: 'S', 9: 'SSW', 10: 'SW', 11: 'WSW',
12: 'W', 13: 'WNW', 14: 'NW', 15: 'NNW'}
FORECAST_MAP = {0: 'Partly Cloudy', 1: 'Rainy', 2: 'Cloudy',
3: 'Sunny', 4: 'Clear Night', 5: 'Snowy',
6: 'Partly Cloudy Night', 7: 'Unknown7'}
TRENDS = {0: 'Stable', 1: 'Rising', 2: 'Falling', 3: 'Undefined'}
# Size of USB frame to read from weather console.
_WMR200_USB_FRAME_SIZE = 8
# Time to sleep in seconds between querying usb device thread
# for data. This should be non-zero and reduces load on the machine.
_WMR200_USB_POLL_INTERVAL = 1
# Time interval in secs to send data to the wmr200 to request live data.
_WMR200_REQUEST_LIVE_DATA_INTERVAL = 30
# Time in secs to block and wait for data from the weather console device.
# Related to time to request live data.
_WMR200_USB_READ_DATA_INTERVAL = _WMR200_REQUEST_LIVE_DATA_INTERVAL / 2
# Time in ms to wait for USB reset to complete.
_WMR200_USB_RESET_TIMEOUT = 1000
# Guessed wmr200 protocol max packet size in bytes.
# This is only a screen to differentiate between good and
# bad packets.
_WMR200_MAX_PACKET_SIZE = 0x80
# Driver name.
_WMR200_DRIVER_NAME = 'wmr200'
# weewx configurable flags for enabling/disabling debug verbosity.
# Prints processed packets with context from console.
DEBUG_PACKETS_COOKED = 0
# Prints raw pre-processed packets from console.
DEBUG_PACKETS_RAW = 0
# Prints respective packets individually.
DEBUG_PACKETS_ARCHIVE = 0
DEBUG_PACKETS_PRESSURE = 0
DEBUG_PACKETS_RAIN = 0
DEBUG_PACKETS_STATUS = 0
DEBUG_PACKETS_TEMP = 0
DEBUG_PACKETS_UVI = 0
DEBUG_PACKETS_WIND = 0
# Print communication messages
DEBUG_COMM = 0
# Print weather station configuration.
DEBUG_CONFIG_DATA = 0
# Print all writes to weather console.
DEBUG_WRITES = 0
DEBUG_READS = 0
DEBUG_CHECKSUM = 0
def logmsg(dst, msg):
"""Base syslog helper"""
syslog.syslog(dst, ('%s: %s: %s' %
(_WMR200_DRIVER_NAME,
threading.currentThread().getName(), msg)))
def logdbg(msg):
"""Debug syslog helper"""
logmsg(syslog.LOG_DEBUG, 'D ' + msg)
def loginf(msg):
"""Info syslog helper"""
logmsg(syslog.LOG_INFO, 'I ' + msg)
def logwar(msg):
"""Warning syslog helper"""
logmsg(syslog.LOG_WARNING, 'W ' + msg)
def logerr(msg):
"""Error syslog helper"""
logmsg(syslog.LOG_ERR, 'E ' + msg)
def logcrt(msg):
"""Critical syslog helper"""
logmsg(syslog.LOG_CRIT, 'C ' + msg)
class WMR200PacketParsingError(Exception):
"""A driver handled recoverable packet parsing error condition."""
def __init__(self, msg):
super(WMR200PacketParsingError, self).__init__()
self._msg = msg
@property
def msg(self):
"""Exception message to be logged to console."""
return self._msg
class WMR200ProtocolError(weewx.WeeWxIOError):
"""Used to signal a protocol error condition"""
def __init__(self, msg):
super(WMR200ProtocolError, self).__init__()
self._msg = msg
logerr(msg)
class UsbDevice(object):
"""General class to handles all access to device via USB bus."""
def __init__(self):
# Polling read timeout.
self.timeout_read = _WMR200_USB_READ_DATA_INTERVAL
# USB device used for libusb
self.dev = None
# Holds device handle for access
self.handle = None
# debug byte count
self.byte_cnt_rd = 0
self.byte_cnt_wr = 0
# default to a sane endpoint
self.in_endpoint = usb.ENDPOINT_IN + 1
# only one interface
self.interface = 0
def find_device(self, vendor_id, product_id):
"""Find the given vendor and product IDs on the USB bus
Returns: True if specified device was found, otherwise false. """
for bus in usb.busses():
for dev in bus.devices:
if dev.idVendor == vendor_id \
and dev.idProduct == product_id:
self.dev = dev
return True
return False
def open_device(self):
"""Opens a USB device and get a handle to read and write.
A specific device must have been found."""
try:
self.handle = self.dev.open()
except usb.USBError, exception:
logcrt(('open_device() Unable to open USB interface.'
' Reason: %s' % exception))
raise weewx.WakeupError(exception)
except AttributeError, exception:
logcrt('open_device() Device not specified.')
raise weewx.WakeupError(exception)
# Detach any old claimed interfaces
try:
self.handle.detachKernelDriver(self.interface)
except usb.USBError:
pass
try:
self.handle.claimInterface(self.interface)
except usb.USBError, exception:
logcrt(('open_device() Unable to'
' claim USB interface. Reason: %s' % exception))
raise weewx.WakeupError(exception)
def close_device(self):
"""Close a device for access.
NOTE(CMM) There is no busses[].devices[].close() so under linux the
file descriptor will remain open for the life of the process.
An OS independant mechanism is required so 'lsof' and friends will
not be cross platform."""
try:
self.handle.releaseInterface()
except usb.USBError, exception:
logcrt('close_device() Unable to'
' release device interface. Reason: %s' % exception)
def read_device(self):
"""Read a stream of data bytes from the device.
Returns a list of valid protocol bytes from the device.
The first byte indicates the number of valid bytes following
the first byte that are valid protocol bytes. Only the valid
protocol bytes are returned. """
if not self.handle:
msg = 'read_device() No USB handle for usb_device Read'
logerr(msg)
raise weewx.WeeWxIOError(msg)
report = None
try:
report = self.handle.interruptRead(self.in_endpoint,
_WMR200_USB_FRAME_SIZE,
int(self.timeout_read) * 1000)
# I think this value indicates that the buffer has overflowed.
if report[0] == 8:
msg = 'USB read_device overflow error'
logerr(msg)
raise weewx.WeeWxIOError(msg)
self.byte_cnt_rd += len(report)
# The first byte is the size of valid data following.
# We only want to return the valid data.
if DEBUG_READS:
buf = ''
for byte in report[1:report[0]+1]:
buf += '%02x ' % byte
logdbg('read_device(): %s' % buf)
return report[1:report[0] + 1]
except IndexError, e:
# This indicates we failed an index range above.
logerr('read_device() Failed the index rage %s: %s' % (report, e))
except usb.USBError, ex:
# No data presented on the bus. This is a normal part of
# the process that indicates that the current live records
# have been exhausted. We have to send a heartbeat command
# to tell the weather console to start streaming live data
# again.
errmsg = repr(ex)
if not ('No data available' in errmsg):
msg = 'read_device() USB Error Reason:%s' % ex
logerr(msg)
raise weewx.WeeWxIOError(msg)
else:
# No data avail...not an error but probably ok.
logdbg(('No data received in'
' %d seconds' % int(self.timeout_read)))
return []
def write_device(self, buf):
"""Writes a command packet to the device."""
# Unclear how to create this number, but is the wValue portion
# of the set_configuration() specified in the USB spec.
value = 0x00000220
if not self.handle:
msg = 'No USB handle for usb_device Write'
logerr(msg)
raise weewx.WeeWxIOError(msg)
try:
if DEBUG_WRITES:
logdbg('write_device(): %s' % buf)
self.byte_cnt_wr += len(buf)
self.handle.controlMsg(
usb.TYPE_CLASS + usb.RECIP_INTERFACE, # requestType
0x0000009, # request
buf,
value, # value
0x0000000, # index
_WMR200_USB_RESET_TIMEOUT) # timeout
except usb.USBError, exception:
msg = ('write_device() Unable to'
' send USB control message %s' % exception)
logerr(msg)
# Convert to a Weewx error:
raise weewx.WeeWxIOError(exception)
class Packet(object):
"""Top level class for all WMR200 packets.
All wmr200 packets inherit from this class. The process() method
is used to provide useful data to the weewx engine. Some packets
require special processing due to discontinuities in the wmr200
protocol."""
pkt_cmd = 0
pkt_name = 'AbstractPacket'
pkt_len = 0
pkt_id = 0
def __init__(self, wmr200):
"""Initialize base elements of the packet parser."""
# Keep reference to the wmr200 for any special considerations
# or options.
self.wmr200 = wmr200
# Accumulated raw byte data from console.
self._pkt_data = []
# Record dictionary to pass to weewx engine.
self._record = {}
# Add the command byte as the first field
self.append_data(self.pkt_cmd)
# Packet identifier
Packet.pkt_id += 1
self.pkt_id = Packet.pkt_id
def append_data(self, char):
"""Appends new data to packet buffer.
Verifies that the size is a reasonable value.
Upon startup or other times we can may get out
of sync with the weather console."""
self._pkt_data.append(char)
if len(self._pkt_data) == 2 and \
self._pkt_data[1] > _WMR200_MAX_PACKET_SIZE:
raise weewx.WeeWxIOError('Max packet size exceeded')
def size_actual(self):
"""Size of bytes of data in packet received from console."""
return len(self._pkt_data)
def size_expected(self):
"""Expected size of packet from packet protocol field."""
try:
return self._pkt_data[1]
except IndexError:
logerr('Failed to extract size from packet')
return 0
def packet_complete(self):
"""Determines if packet is complete and ready for weewx engine
processing.
This method assumes the packet is at least 2 bytes long"""
if self.size_actual() < 2:
return False
return self.size_actual() == self.size_expected()
def packet_process(self):
"""Process the raw data and creates a record field."""
# Convention is that this driver only works in metric units.
self._record.update({'usUnits': weewx.METRIC})
if DEBUG_PACKETS_RAW or DEBUG_PACKETS_COOKED:
logdbg('Processing %s' % self.pkt_name)
if self.pkt_len and self.pkt_len != self.size_actual():
logwar(('Unexpected packet size act:%d exp:%d' %
(self.size_actual(), self.pkt_len)))
# If applicable calculate time drift between packet and host.
self.calc_time_drift()
def packet_record(self):
"""Returns the dictionary of processed records for this packet."""
return self._record
def record_get(self, key):
"""Returns the record indexed by the key."""
try:
return self._record[key]
except KeyError:
logerr('Record get key not found in record key:%s' % key)
def record_set(self, key, val):
"""Sets the record indexed by the key."""
try:
self._record[key] = val
except KeyError:
logerr('Record set key not found in record key:%s val:%s'
% (key, val))
def record_update(self, record):
"""Updates record dictionary with additional dictionary."""
try:
self._record.update(record)
except (TypeError, KeyError):
logerr('Record update failed to apply record:%s' % record)
def _checksum_calculate(self):
"""Returns the calculated checksum of the current packet.
If the entire packet has not been received will simply
return the checksum of whatever data values exist in the packet."""
try:
cksum = 0
# Checksum is last two bytes in packet.
for byte in self._pkt_data[:-2]:
cksum += byte
return cksum
except IndexError:
msg = 'Packet too small to compute 16 bit checksum'
raise WMR200ProtocolError(msg)
def _checksum_field(self):
"""Returns the checksum field of the current packet.
If the entire packet has not been received will simply
return the last two bytes which are unlikely checksum values."""
try:
return (self._pkt_data[-1] << 8) | self._pkt_data[-2]
except IndexError:
msg = 'Packet too small to contain 16 bit checksum'
raise WMR200ProtocolError(msg)
def verify_checksum(self):
"""Verifies packet for checksum correctness.
Raises exception upon checksum failure unless configured to drop."""
if self._checksum_calculate() != self._checksum_field():
msg = ('Checksum miscompare act:0x%04x exp:0x%04x' %
(self._checksum_calculate(), self._checksum_field()))
logerr(self.to_string_raw('%s packet:' % msg))
if self.wmr200.ignore_checksum:
raise WMR200PacketParsingError(msg)
raise weewx.CRCError(msg)
# Debug test to force checksum recovery testing.
if DEBUG_CHECKSUM and (self.pkt_id % DEBUG_CHECKSUM) == 0:
raise weewx.CRCError('Debug forced checksum error')
@staticmethod
def timestamp_host():
"""Returns the host epoch timestamp"""
return int(time.time() + 0.5)
def timestamp_record(self):
"""Returns the epoch timestamp in the record."""
try:
return self._record['dateTime']
except KeyError:
msg = 'timestamp_record() Timestamp not set in record'
logerr(msg)
raise weewx.ViolatedPrecondition(msg)
def _timestamp_packet(self, pkt_data):
"""Pulls the epoch timestamp from the packet."""
try:
minute = pkt_data[0]
hour = pkt_data[1]
day = pkt_data[2]
month = pkt_data[3]
year = 2000 + pkt_data[4]
return time.mktime((year, month, day, hour, minute,
0, -1, -1, -1))
except IndexError:
msg = ('Packet length too short to get timestamp len:%d'
% len(self._pkt_data))
raise WMR200ProtocolError(msg)
except (OverflowError, ValueError), exception:
msg = ('Packet timestamp with bogus fields min:%d hr:%d day:%d'
' m:%d y:%d %s' % (pkt_data[0], pkt_data[1],
pkt_data[2], pkt_data[3], pkt_data[4], exception))
raise WMR200PacketParsingError(msg)
def timestamp_packet(self):
"""Pulls the epoch timestamp from the packet.
Must only be called by packets that have timestamps in the
protocal packet."""
return self._timestamp_packet(self._pkt_data[2:7])
def calc_time_drift(self):
"""Calculate time drift between host and packet
Not all packets have a live timestamp so must be implemented
by the packet type."""
pass
def to_string_raw(self, out=''):
"""Returns raw string of this packet appended to optional
input string"""
for byte in self._pkt_data:
out += '%02x ' % byte
return out
def print_cooked(self):
"""Debug method method to print the processed packet.
Must be called after the Process() method."""
try:
out = ' Packet cooked: '
out += 'id:%d ' % self.pkt_id
out += '%s ' % self.pkt_name
out += '%s ' % weeutil.weeutil.timestamp_to_string(
self.timestamp_record())
out += 'len:%d ' % self.size_actual()
out += 'fields:%d ' % len(self._record)
out += str(self._record)
logdbg(out)
except KeyError:
msg = 'print_cooked() called before proper setup'
logerr(msg)
raise weewx.ViolatedPrecondition(msg)
class PacketLive(Packet):
"""Packets with live sensor data from console."""
# Number of live packets received from console.
pkt_rx = 0
# Queue of processed packets to be delivered to weewx.
pkt_queue = []
def __init__(self, wmr200):
super(PacketLive, self).__init__(wmr200)
PacketLive.pkt_rx += 1
@staticmethod
def packet_live_data():
"""Yield live data packets to interface on the weewx engine."""
return True
@staticmethod
def packet_archive_data():
"""Yield archived data packets to interface on the weewx engine."""
return False
def packet_process(self):
"""Returns a records field to be processed by the weewx engine."""
super(PacketLive, self).packet_process()
self._record.update({'dateTime': self.timestamp_live(), })
def calc_time_drift(self):
"""Returns the difference between PC time and the packet timestamp.
This value is approximate as all timestamps from a given archive
interval will be the same while PC time marches onwards.
Only done once upon first live packet received."""
if self.wmr200.time_drift is None:
self.wmr200.time_drift = self.timestamp_host() \
- self.timestamp_packet()
loginf('Time drift between host and console in seconds:%d' %
self.wmr200.time_drift)
def timestamp_live(self):
"""Returns the timestamp from a live packet.
Caches the last live timestamp to add to packets that do
not provide timestamps."""
if self.wmr200.use_pc_time:
self.wmr200.last_time_epoch = self.timestamp_host()
else:
self.wmr200.last_time_epoch = self.timestamp_packet()
return self.wmr200.last_time_epoch
class PacketArchive(Packet):
"""Packets with archived sensor data from console."""
# Number of archive packets received from console.
pkt_rx = 0
# Queue of processed packets to be delivered to weewx.
pkt_queue = []
def __init__(self, wmr200):
super(PacketArchive, self).__init__(wmr200)
PacketArchive.pkt_rx += 1
@staticmethod
def packet_live_data():
"""Yield live data packets to interface on the weewx engine."""
return False
@staticmethod
def packet_archive_data():
"""Yield archived data packets to interface on the weewx engine."""
return True
def packet_process(self):
"""Returns a records field to be processed by the weewx engine."""
super(PacketArchive, self).packet_process()
# If we need to adjust the timestamp if pc time is set we will do it
# later
self._record.update({'dateTime': self.timestamp_packet(), })
# Archive packets have extra field indicating interval time.
self._record.update({'interval':
int(self.wmr200.archive_interval / 60.0), })
def timestamp_adjust_drift(self):
"""Archive records may need time adjustment when using PC time."""
try:
loginf(('Using pc time adjusting archive record time by %d sec'
' %s => %s' % (self.wmr200.time_drift,
weeutil.weeutil.timestamp_to_string\
(self.timestamp_record()),
weeutil.weeutil.timestamp_to_string\
(self.timestamp_record()
+ int(self.wmr200.time_drift)))))
self._record['dateTime'] += int(self.wmr200.time_drift)
except TypeError:
logerr('timestamp_adjust_drift() called with invalid time drift')
class PacketControl(Packet):
"""Packets with protocol control info from console."""
# Number of control packets received from console.
pkt_rx = 0
def __init__(self, wmr200):
super(PacketControl, self).__init__(wmr200)
PacketControl.pkt_rx += 1
@staticmethod
def packet_live_data():
"""Yield live data packets to interface on the weewx engine."""
return False
@staticmethod
def packet_archive_data():
"""Yield archived data packets to interface on the weewx engine."""
return False
def size_expected(self):
"""Control packets do not have length field and are only one byte."""
return 1
def verify_checksum(self):
"""This packet does not have a checksum."""
pass
def packet_complete(self):
"""Determines if packet is complete and ready for weewx engine
processing."""
if self.size_actual() == 1:
return True
return False
def packet_process(self):
"""Returns a records field to be processed by the weewx engine.
This packet isn't really passed up to weewx but is assigned a
timestamp for completeness."""
self._record.update({'dateTime': self.timestamp_host(), })
def print_cooked(self):
"""Print the processed packet.
This packet consists of a single byte and thus not much to print."""
out = ' Packet cooked: '
out += '%s ' % self.pkt_name
logdbg(out)
class PacketArchiveReady(PacketControl):
"""Packet parser for control command acknowledge."""
pkt_cmd = 0xd1
pkt_name = 'CmdAck'
pkt_len = 1
def __init__(self, wmr200):
super(PacketArchiveReady, self).__init__(wmr200)
def packet_process(self):
"""Returns a records field to be processed by the weewx engine."""
super(PacketArchiveReady, self).packet_process()
# Immediately request to the console a command to send archived data.
self.wmr200.request_archive_data()
class PacketArchiveData(PacketArchive):
"""Packet parser for archived data."""
pkt_cmd = 0xd2
pkt_name = 'Archive Data'
# Initial console rain total value since 2007-1-1.
rain_total_last = None
def __init__(self, wmr200):
super(PacketArchiveData, self).__init__(wmr200)
def packet_process(self):
"""Returns a records field to be processed by the weewx engine."""
super(PacketArchiveData, self).packet_process()
try:
self._record.update(decode_rain(self, self._pkt_data[ 7:20]))
self._record.update(decode_wind(self, self._pkt_data[20:27]))
self._record.update(decode_uvi(self, self._pkt_data[27:28]))
self._record.update(decode_pressure(self, self._pkt_data[28:32]))
# Number of sensors starting at zero inclusive.
num_sensors = self._pkt_data[32]
for i in xrange(0, num_sensors+1):
base = 33 + i*7
self._record.update(decode_temp(self,
self._pkt_data[base:base+7]))
except IndexError:
msg = ('%s decode index failure' % self.pkt_name)
raise WMR200ProtocolError(msg)
# Tell wmr200 console we have processed it and can handle more.
self.wmr200.request_archive_data()
if DEBUG_PACKETS_ARCHIVE:
logdbg(' Archive packet num_temp_sensors:%d' % num_sensors)
def timestamp_last_rain(self):
"""Pulls the epoch timestamp from the packet.
Returns the epoch time since last accumualted rainfall."""
return self._timestamp_packet(self._pkt_data[15:20])
def decode_wind(pkt, pkt_data):
"""Decode the wind portion of a wmr200 packet."""
try:
# Low byte of gust speed in 0.1 m/s.
gust_speed = ((((pkt_data[3]) & 0x0f) << 8)
| pkt_data[2]) / 10.0
# High nibble is low nibble of average speed.
# Low nibble of high byte and high nibble of low byte
# of average speed. Value is in 0.1 m/s
avg_speed = ((pkt_data[3] >> 4)
| ((pkt_data[4] << 4))) / 10.0
# Wind direction in steps of 22.5 degrees.
# 0 is N, 1 is NNE and so on. See WIND_DIR_MAP for complete list.
# Default to none unless speed is above zero.
dir_deg = None
if avg_speed > 0.0:
dir_deg = (pkt_data[0] & 0x0f) * 22.5
# Windchill temperature. The value is in degrees F.
# Set default to no windchill as it may not exist.
# Convert to metric for weewx presentation.
windchill = None
if pkt_data[6] != 0x20:
if pkt_data[6] & 0x10:
# Think it's a flag of some sort
pass
elif pkt_data[6] != 0x80:
windchill = (((pkt_data[6] << 8) | pkt_data[5]) - 320) \
* (5.0 / 90.0)
elif pkt_data[6] & 0x80:
windchill = ((((pkt_data[5]) * -1) - 320) * (5.0/90.0))
# The console returns wind speeds in m/s. weewx requires
# kph, so the speeds needs to be converted.
record = {'windSpeed' : avg_speed * 3.60,
'windGust' : gust_speed * 3.60,
'windDir' : dir_deg,
'windchill' : windchill,
}
# Sometimes the station emits a wind gust that is less than the
# average wind. weewx requires kph, so the result needs to be
# converted.
if gust_speed < avg_speed:
record['windGust'] = None
record['windGustDir'] = None
else:
# use the regular wind direction for the gust direction
record['windGustDir'] = record['windDir']
if DEBUG_PACKETS_WIND:
logdbg(' Wind Dir: %s' % (WIND_DIR_MAP[pkt_data[0] & 0x0f]))
logdbg(' Gust: %.1f m/s Wind:%.1f m/s' % (gust_speed, avg_speed))
if windchill is not None:
logdbg(' Windchill: %.1f C' % (windchill))
return record
except IndexError:
msg = ('%s decode index failure' % pkt.pkt_name)
raise WMR200ProtocolError(msg)
class PacketWind(PacketLive):
"""Packet parser for wind."""
pkt_cmd = 0xd3
pkt_name = 'Wind'
pkt_len = 0x10
def __init__(self, wmr200):
super(PacketWind, self).__init__(wmr200)
def packet_process(self):
"""Decode a wind packet. Wind speed will be in kph
Returns a packet that can be processed by the weewx engine."""
super(PacketWind, self).packet_process()
self._record.update(decode_wind(self, self._pkt_data[7:14]))
def decode_rain(pkt, pkt_data):
"""Decode the rain portion of a wmr200 packet."""
try:
# Bytes 0 and 1: high and low byte encode the current rainfall rate
# in 0.01 in/h. Convert into metric.
rain_rate = (((pkt_data[1] & 0x0f) << 8) | pkt_data[0]) / 100.0 * 2.54
# Bytes 2 and 3: high and low byte encode rain of the last hour in 0.01in
# Convert into metric.
rain_hour = ((pkt_data[3] << 8) | pkt_data[2]) / 100.0 * 2.54
# Bytes 4 and 5: high and low byte encode rain of the last 24 hours,
# excluding the current hour, in 0.01in
# Convert into metric.
rain_day = ((pkt_data[5] << 8) | pkt_data[4]) / 100.0 * 2.54
# Bytes 6 and 7: high and low byte encode the total rainfall in 0.01in.
# Convert into metric.
rain_total = ((pkt_data[7] << 8) | pkt_data[6]) / 100.0 * 2.54
record = {'rainRate' : rain_rate,
'hourRain' : rain_hour,
'rain24' : rain_day + rain_hour,
'totalRain' : rain_total}
if DEBUG_PACKETS_RAIN:
try:
formatted = ["0x%02x" % x for x in pkt_data]
logdbg(' Rain packets:' + ', '.join(formatted))
logdbg(' Rain rate:%.02f; hour_rain:%.02f; day_rain:%.02f' %
(rain_rate, rain_hour, rain_day))
logdbg(' Total rain_total:%.02f' % (rain_total))
logdbg(' Last rain %s' %
weeutil.weeutil.timestamp_to_string\
(pkt.timestamp_last_rain()))
except Exception:
pass
return record
except IndexError:
msg = ('%s decode index failure' % pkt.pkt_name)
raise WMR200ProtocolError(msg)
def adjust_rain(pkt, packet):
"""Calculate rainfall per poll interval.
Because the WMR does not offer anything like bucket tips, we must
calculate it by looking for the change in total rain.
After driver startup we need to initialize the total rain presented
by the console.
There are two different rain total last values kept. One for archive
data and one for live loop data. They are addressed using a static
variable within the scope of the respective class name."""
record = {}
# Get the total current rain field from the console.
rain_total = pkt.record_get('totalRain')
# Calculate the amount of rain occurring for this interval.
try:
rain_interval = rain_total - packet.rain_total_last
except TypeError:
rain_interval = 0.0
record['rain'] = rain_interval
record['totalRainLast'] = packet.rain_total_last
try:
logdbg(' adjust_rain rain_total:%.02f %s.rain_total_last:%.02f'
' rain_interval:%.02f' % (rain_total, packet.pkt_name,
packet.rain_total_last, rain_interval))
except TypeError:
logdbg(' Initializing %s.rain_total_last to %.02f' %
(packet.pkt_name, rain_total))
packet.rain_total_last = rain_total
return record
class PacketRain(PacketLive):
"""Packet parser for rain."""
pkt_cmd = 0xd4
pkt_name = 'Rain'
pkt_len = 0x16
# Initial console rain total value since 2007-1-1.
rain_total_last = None
def __init__(self, wmr200):
super(PacketRain, self).__init__(wmr200)
def packet_process(self):
"""Returns a packet that can be processed by the weewx engine."""
super(PacketRain, self).packet_process()
self._record.update(decode_rain(self, self._pkt_data[7:20]))
self._record.update(adjust_rain(self, PacketRain))
def timestamp_last_rain(self):
"""Pulls the epoch timestamp from the packet.
Returns the epoch time since last accumualted rainfall."""
return self._timestamp_packet(self._pkt_data[15:20])
def decode_uvi(pkt, pkt_data):
"""Decode the uvi portion of a wmr200 packet."""
try:
record = {'UV': pkt_data[0 & 0x0f]}
if DEBUG_PACKETS_UVI:
logdbg(" UV index:%s\n" % record['UV'])
return record
except IndexError:
msg = ('%s index decode index failure' % pkt.pkt_name)
raise WMR200ProtocolError(msg)
class PacketUvi(PacketLive):
"""Packet parser for ultra violet sensor."""
pkt_cmd = 0xd5
pkt_name = 'UVI'
pkt_len = 0x0a
def __init__(self, wmr200):
super(PacketUvi, self).__init__(wmr200)
def packet_process(self):
"""Returns a packet that can be processed by the weewx engine."""
super(PacketUvi, self).packet_process()
self._record.update(decode_uvi(self, self._pkt_data[7:8]))
def decode_pressure(pkt, pkt_data):
"""Decode the pressure portion of a wmr200 packet."""
try:
# Low byte of pressure. Value is in hPa.
# High nibble is forecast
# Low nibble is high byte of pressure.
# Unfortunately, we do not know if this is MSLP corrected pressure,
# or "gauge" pressure. We will assume the latter.
pressure = float(((pkt_data[1] & 0x0f) << 8) | pkt_data[0])
forecast = (pkt_data[1] >> 4) & 0x7
# Similar to bytes 0 and 1, but altitude corrected
# pressure. Upper nibble of byte 3 is still unknown. Seems to
# be always 3.
altimeter = float(((pkt_data[3] & 0x0f) << 8)
| pkt_data[2])
unknown_nibble = (pkt_data[3] >> 4)
record = {'pressure' : pressure,
'altimeter' : altimeter,
'forecastIcon': forecast}
if DEBUG_PACKETS_PRESSURE:
logdbg(' Forecast: %s' % FORECAST_MAP[forecast])
logdbg(' Raw pressure: %.02f hPa' % pressure)
if unknown_nibble != 3:
logdbg(' Pressure unknown nibble: 0x%x' % unknown_nibble)
logdbg(' Altitude corrected pressure: %.02f hPa console' %
altimeter)
return record
except IndexError:
msg = ('%s index decode index failure' % pkt.pkt_name)
raise WMR200ProtocolError(msg)
class PacketPressure(PacketLive):
"""Packet parser for barometer sensor."""
pkt_cmd = 0xd6
pkt_name = 'Pressure'
pkt_len = 0x0d
def __init__(self, wmr200):
super(PacketPressure, self).__init__(wmr200)
def packet_process(self):
"""Returns a packet that can be processed by the weewx engine."""
super(PacketPressure, self).packet_process()
self._record.update(decode_pressure(self, self._pkt_data[7:11]))
def decode_temp(pkt, pkt_data):
"""Decode the temperature portion of a wmr200 packet."""
try:
record = {}
# The historic data can contain data from multiple sensors. I'm not
# sure if the 0xD7 frames can do too. I've never seen a frame with
# multiple sensors. But historic data bundles data for multiple
# sensors.
# Byte 0: low nibble contains sensor ID. 0 for base station.
sensor_id = pkt_data[0] & 0x0f
# '00 Temp steady
# '01 Temp rising
# '10 Temp falling
temp_trend = (pkt_data[0] >> 6) & 0x3
# '00 Humidity steady
# '01 Humidity rising
# '10 Humidity falling
hum_trend = (pkt_data[0] >> 4) & 0x3
# The high nible contains the sign indicator.
# The low nibble is the high byte of the temperature.
# The low byte of the temperature. The value is in 1/10
# degrees centigrade.
temp = (((pkt_data[2] & 0x0f) << 8) | pkt_data[1]) / 10.0
if pkt_data[2] & 0x80:
temp *= -1
# The humidity in percent.
humidity = pkt_data[3]
# The first high nibble contains the sign indicator.
# The first low nibble is the high byte of the temperature.
# The second byte is low byte of the temperature. The value is in 1/10
# degrees centigrade.
dew_point = (((pkt_data[5] & 0x0f) << 8)
| pkt_data[4]) / 10.0
if pkt_data[5] & 0x80:
dew_point *= -1
# Heat index reported by console.
heat_index = None
if pkt_data[6] != 0:
# For some strange reason it's reported in degF so convert
# to metric.
heat_index = (pkt_data[6] - 32) / (9.0 / 5.0)
if sensor_id == 0:
# Indoor temperature sensor.
record['inTemp'] = temp
record['inHumidity'] = humidity
elif sensor_id == 1:
# Outdoor temperature sensor.
record['outTemp'] = temp
record['outHumidity'] = humidity
record['heatindex'] = heat_index
elif sensor_id >= 2:
# Extra temperature sensors.
# If additional temperature sensors exist (channel>=2), then
# use observation types 'extraTemp1', 'extraTemp2', etc.
record['extraTemp%d' % (sensor_id-1)] = temp
record['extraHumid%d' % (sensor_id-1)] = humidity
if DEBUG_PACKETS_TEMP:
logdbg(' Temperature id:%d %.1f C trend: %s'
% (sensor_id, temp, TRENDS[temp_trend]))
logdbg(' Humidity id:%d %d%% trend: %s'
% (sensor_id, humidity, TRENDS[hum_trend]))
logdbg((' Dew point id:%d: %.1f C' % (sensor_id, dew_point)))
if heat_index is not None:
logdbg(' Heat id:%d index:%d' % (sensor_id, heat_index))
return record
except IndexError:
msg = ('%s index decode index failure' % pkt.pkt_name)
raise WMR200ProtocolError(msg)
class PacketTemperature(PacketLive):
"""Packet parser for temperature and humidity sensor."""
pkt_cmd = 0xd7
pkt_name = 'Temperature'
pkt_len = 0x10
def __init__(self, wmr200):
super(PacketTemperature, self).__init__(wmr200)
def packet_process(self):
"""Returns a packet that can be processed by the weewx engine."""
super(PacketTemperature, self).packet_process()
self._record.update(decode_temp(self, self._pkt_data[7:14]))
# Save the temp record for possible windchill calculation.
self.wmr200.last_temp_record = self._record
class PacketStatus(PacketLive):
"""Packet parser for console sensor status."""
pkt_cmd = 0xd9
pkt_name = 'Status'
pkt_len = 0x08
def __init__(self, wmr200):
super(PacketStatus, self).__init__(wmr200)
def timestamp_live(self):
"""Return timestamp of packet.
This packet does not have a timestamp so we just return the
previous cached timestamp from the last live packet.
Note: If there is no previous cached timestamp then we
return the initial PC timestamp. This would occur quite early
in the driver startup and this time may be quite out of
sequence from the rest of the packets. Another option would be
to simply discard this status packet at this time."""
return self.wmr200.last_time_epoch
def packet_process(self):
"""Returns a packet that can be processed by the weewx engine.
Not all console status aligns with the weewx API but we try
to make it fit."""
super(PacketStatus, self).packet_process()
# Setup defaults as good status.
self._record.update({'outTempFault' : 0,
'windFault' : 0,
'uvFault' : 0,
'rainFault' : 0,
'clockUnsynchronized' : 0,
'outTempBatteryStatus' : 1.0,
'windBatteryStatus' : 1.0,
'uvBatteryStatus' : 1.0,
'rainBatteryStatus' : 1.0,
})
# This information may be sent to syslog
msg_status = []
if self._pkt_data[2] & 0x02:
msg_status.append('Temp outdoor sensor fault')
self._record['outTempFault'] = 1
if self._pkt_data[2] & 0x01:
msg_status.append('Wind sensor fault')
self._record['windFault'] = 1
if self._pkt_data[3] & 0x20:
msg_status.append('UV Sensor fault')
self._record['uvFault'] = 1
if self._pkt_data[3] & 0x10:
msg_status.append('Rain sensor fault')
self._record['rainFault'] = 1
if self._pkt_data[4] & 0x80:
msg_status.append('Clock time unsynchronized')
self._record['clockUnsynchronized'] = 1
if self._pkt_data[4] & 0x02:
msg_status.append('Temp outdoor sensor: Battery low')
self._record['outTempBatteryStatus'] = 0.0
if self._pkt_data[4] & 0x01:
msg_status.append('Wind sensor: Battery low')
self._record['windBatteryStatus'] = 0.0
if self._pkt_data[5] & 0x20:
msg_status.append('UV sensor: Battery low')
self._record['uvBatteryStatus'] = 0.0
if self._pkt_data[5] & 0x10:
msg_status.append('Rain sensor: Battery low')
self._record['rainBatteryStatus'] = 0.0
if self.wmr200.sensor_stat:
while msg_status:
msg = msg_status.pop(0)
logwar(msg)
# Output packet to try to understand other fields.
if DEBUG_PACKETS_STATUS:
logdbg(self.to_string_raw(' Sensor packet:'))
def calc_time_drift(self):
"""Returns the difference between PC time and the packet timestamp.
This packet has no timestamp so cannot be used to calculate."""
pass
class PacketEraseAcknowledgement(PacketControl):
"""Packet parser for archived data is ready to receive."""
pkt_cmd = 0xdb
pkt_name = 'Erase Acknowledgement'
pkt_len = 0x01
def __init__(self, wmr200):
super(PacketEraseAcknowledgement, self).__init__(wmr200)
class PacketFactory(object):
"""Factory to create proper packet from first command byte from device."""
def __init__(self, *subclass_list):
self.subclass = dict((s.pkt_cmd, s) for s in subclass_list)
self.skipped_bytes = 0
def num_packets(self):
"""Returns the number of packets handled by the factory."""
return len(self.subclass)
def get_packet(self, pkt_cmd, wmr200):
"""Returns a protocol packet instance from initial packet command byte.
Returns None if there was no mapping for the protocol command.
Upon startup we may read partial packets. We need to resync to a
valid packet command from the weather console device if we start
reading in the middle of a previous packet.
We may also get out of sync during operation."""
if pkt_cmd in self.subclass:
if self.skipped_bytes:
logwar(('Skipped bytes before resync:%d' %
self.skipped_bytes))
self.skipped_bytes = 0
return self.subclass[pkt_cmd](wmr200)
self.skipped_bytes += 1
return None
# Packet factory parser for each packet presented by weather console.
PACKET_FACTORY = PacketFactory(
PacketArchiveReady,
PacketArchiveData,
PacketWind,
PacketRain,
PacketPressure,
PacketUvi,
PacketTemperature,
PacketStatus,
PacketEraseAcknowledgement,
)
# Count of restarts
STAT_RESTART = 0
class RequestLiveData(threading.Thread):
"""Watchdog thread to poke the console requesting live data.
If the console does not receive a request or heartbeat periodically
for live data then it automatically resets into archive mode."""
def __init__(self, kwargs):
super(RequestLiveData, self).__init__()
self.wmr200 = kwargs['wmr200']
self.poke_time = kwargs['poke_time']
self.sock_rd = kwargs['sock_rd']
loginf(('Created watchdog thread to poke for live data every %d'
' seconds') % self.poke_time)
def run(self):
"""Periodically inform the main driver thread to request live data.
When its time to shutdown this thread, the main thread will send any
string across the socket. This both wakes up this timer thread and
also tells it to expire."""
loginf('Started watchdog thread live data')
while True:
self.wmr200.ready_to_poke(True)
main_thread_comm = \
select.select([self.sock_rd], [], [], self.poke_time)
if main_thread_comm[0]:
# Data is ready to read on socket to indicate thread teardown.
buf = self.sock_rd.recv(4096)
loginf('Watchdog received %s' % buf)
break
loginf('Watchdog thread exiting')
class PollUsbDevice(threading.Thread):
"""A thread continually polls for data with blocking read from a device.
Some devices may overflow buffers if not drained within a timely manner.
This thread will read block on the USB port and buffer data from the
device for consumption."""
def __init__(self, kwargs):
super(PollUsbDevice, self).__init__()
self.wmr200 = kwargs['wmr200']
self.usb_device = self.wmr200.usb_device
# Buffer list to read data from weather console
self._buf = []
# Lock to wrap around the buffer
self._lock_poll = threading.Lock()
# Conditional variable to gate thread after reset applied.
# We don't want to read previous data, if any, until a reset
# has been sent.
self._cv_poll = threading.Condition()
# Gates initial entry into reading from device
self._ok_to_read = False
loginf('Created USB polling thread to read block on device')
def run(self):
"""Polling function to block read the USB device.
This method appends new data after previous buffer
data in preparation for reads to the main driver
thread.
Once this thread is started it will be gated by
a reset to the weather console device to sync it
up."""
loginf('USB polling device thread for live data launched')
# Wait for the main thread to indicate it's safe to read.
self._cv_poll.acquire()
while not self._ok_to_read:
self._cv_poll.wait()
self._cv_poll.release()
loginf('USB polling device thread signaled to start')
# Read and discard next data from weather console device.
_ = self.usb_device.read_device()
read_timeout_cnt = 0
read_reset_cnt = 0
# Loop indefinitely until main thread indicates time to expire.
while self.wmr200.poll_usb_device_enable():
try:
buf = self.usb_device.read_device()
if buf:
self._append_usb_device(buf)
read_timeout_cnt = 0
read_reset_cnt = 0
else:
# We timed out here. We should poke the device
# after a read timeout, and also prepare for more
# serious measures.
self.wmr200.ready_to_poke(True)
read_timeout_cnt += 1
# If we don't receive any data from the console
# after several attempts, send down a reset.
if read_timeout_cnt == 4:
self.reset_console()
read_timeout_cnt = 0
read_reset_cnt += 1
# If we have sent several resets with no data,
# give up and abort.
if read_reset_cnt == 2:
msg = ('Device unresponsive after multiple resets')
logerr(msg)
raise weewx.RetriesExceeded(msg)
except:
logerr('USB device read error')
raise
loginf('USB polling device thread exiting')
def _append_usb_device(self, buf):
"""Appends data from USB device to shared buffer.
Called from child thread."""
self._lock_poll.acquire()
# Append the list of bytes to this buffer.
self._buf.append(buf)
self._lock_poll.release()
def read_usb_device(self):
"""Reads the buffered USB device data.
Called from main thread.
Returns a list of bytes."""
buf = []
self._lock_poll.acquire()
if len(self._buf):
buf = self._buf.pop(0)
self._lock_poll.release()
return buf
def flush_usb_device(self):
"""Flush any previous USB device data.
Called from main thread."""
self._lock_poll.acquire()
self._buf = []
self._lock_poll.release()
loginf('Flushed USB device')
def reset_console(self):
"""Send a reset to wake up the weather console device
Called from main thread or child thread."""
buf = [0x20, 0x00, 0x08, 0x01, 0x00, 0x00, 0x00, 0x00]
try:
self.usb_device.write_device(buf)
loginf('Reset console device')
self._ok_to_read = True
time.sleep(1)
except usb.USBError, exception:
msg = ('reset_console() Unable to send USB control'
'message %s' % exception)
logerr(msg)
# Convert to a Weewx error:
raise weewx.WeeWxIOError(exception)
def notify(self):
"""Gates thread to read of the device.
Called from main thread."""
self._cv_poll.acquire()
self._cv_poll.notify()
self._cv_poll.release()
class WMR200(weewx.drivers.AbstractDevice):
"""Driver for the Oregon Scientific WMR200 station."""
def __init__(self, **stn_dict):
"""Initialize the wmr200 driver.
NAMED ARGUMENTS:
model: Which station model is this? [Optional]
sensor_status: Print sensor faults or failures to syslog. [Optional]
use_pc_time: Use the console timestamp or the Pc. [Optional]
erase_archive: Erase archive upon startup. [Optional]
archive_interval: Time in seconds between intervals [Optional]
archive_threshold: Max time in seconds between valid archive packets [Optional]
ignore_checksum: Ignore checksum failures and drop packet.
archive_startup: Time after startup to await archive data draining.
--- User should not typically change anything below here ---
vendor_id: The USB vendor ID for the WMR [Optional]
product_id: The USB product ID for the WM [Optional]
interface: The USB interface [Optional]
in_endpoint: The IN USB endpoint used by the WMR [Optional]
"""
super(WMR200, self).__init__()
## User configurable options
self._model = stn_dict.get('model', 'WMR200')
# Provide sensor faults in syslog.
self._sensor_stat = weeutil.weeutil.tobool(stn_dict.get('sensor_status',
True))
# Use pc timestamps or weather console timestamps.
self._use_pc_time = \
weeutil.weeutil.tobool(stn_dict.get('use_pc_time', True))
# Use archive data when possible.
self._erase_archive = \
weeutil.weeutil.tobool(stn_dict.get('erase_archive', False))
# Archive interval in seconds.
self._archive_interval = int(stn_dict.get('archive_interval', 60))
if self._archive_interval not in [60, 300]:
logwar('Unverified archive interval:%d sec'
% self._archive_interval)
# Archive threshold in seconds between archive packets before dropping.
self._archive_threshold = int(stn_dict.get('archive_threshold',
3600*24*7))
# Ignore checksum errors.
self._ignore_checksum = \
weeutil.weeutil.tobool(stn_dict.get('ignore_checksum', False))
# Archive startup time in seconds.
self._archive_startup = int(stn_dict.get('archive_startup', 120))
# Device specific hardware options.
vendor_id = int(stn_dict.get('vendor_id', '0x0fde'), 0)
product_id = int(stn_dict.get('product_id', '0xca01'), 0)
interface = int(stn_dict.get('interface', 0))
in_endpoint = int(stn_dict.get('IN_endpoint',
usb.ENDPOINT_IN + 1))
# Buffer of bytes read from weather console device.
self._buf = []
# Packet created from the buffer data read from the weather console
# device.
self._pkt = None
# Setup the generator to get a byte stream from the console.
self.gen_byte = self._generate_bytestream
# Calculate time delta in seconds between host and console.
self.time_drift = None
# Create USB accessor to communiate with weather console device.
self.usb_device = UsbDevice()
# Pass USB parameters to the USB device accessor.
self.usb_device.in_endpoint = in_endpoint
self.usb_device.interface = interface
# Locate the weather console device on the USB bus.
if not self.usb_device.find_device(vendor_id, product_id):
logcrt('Unable to find device with VendorID=%04x ProductID=%04x' %
(vendor_id, product_id))
raise weewx.WeeWxIOError("Unable to find USB device")
# Open the weather console USB device for read and writes.
self.usb_device.open_device()
# Initialize watchdog to poke device to request live
# data stream.
self._rdy_to_poke = True
# Create the lock to sync between main thread and watchdog thread.
self._poke_lock = threading.Lock()
# Create a socket pair to communicate with the watchdog thread.
(self.sock_rd, self.sock_wr) = \
socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM, 0)
# Create the watchdog thread to request live data.
self._thread_watchdog = RequestLiveData(
kwargs = {'wmr200' : self,
'poke_time' : _WMR200_REQUEST_LIVE_DATA_INTERVAL,
'sock_rd' : self.sock_rd})
# Create the usb polling device thread.
self._thread_usb_poll = PollUsbDevice(kwargs={'wmr200': self})
# Start the usb polling device thread.
self._poll_device_enable = True
self._thread_usb_poll.start()
# Send the device a reset
self._thread_usb_poll.reset_console()
self._thread_usb_poll.notify()
# Start the watchdog for live data thread.
self._thread_watchdog.start()
# Not all packets from wmr200 have timestamps, yet weewx requires
# timestamps on all packets pass up the stack. So we will use the
# timestamp from the most recent packet, but still need to see an
# initial timestamp, so we'll seed this with current PC time.
self.last_time_epoch = int(time.time() + 0.5)
# Restart counter when driver crashes and is restarted by the
# weewx engine.
global STAT_RESTART
STAT_RESTART += 1
if STAT_RESTART > 1:
logwar(('Restart count: %d') % STAT_RESTART)
# Reset any other state during startup or after a crash.
PacketArchiveData.rain_total_last = None
# Debugging flags
global DEBUG_WRITES
DEBUG_WRITES = int(stn_dict.get('debug_writes', 0))
global DEBUG_COMM
DEBUG_COMM = int(stn_dict.get('debug_comm', 0))
global DEBUG_CONFIG_DATA
DEBUG_CONFIG_DATA = int(stn_dict.get('debug_config_data', 1))
global DEBUG_PACKETS_RAW
DEBUG_PACKETS_RAW = int(stn_dict.get('debug_packets_raw', 0))
global DEBUG_PACKETS_COOKED
DEBUG_PACKETS_COOKED = int(stn_dict.get('debug_packets_cooked', 0))
global DEBUG_PACKETS_ARCHIVE
DEBUG_PACKETS_ARCHIVE = int(stn_dict.get('debug_packets_archive', 0))
global DEBUG_PACKETS_TEMP
DEBUG_PACKETS_TEMP = int(stn_dict.get('debug_packets_temp', 0))
global DEBUG_PACKETS_RAIN
DEBUG_PACKETS_RAIN = int(stn_dict.get('debug_packets_rain', 0))
global DEBUG_PACKETS_WIND
DEBUG_PACKETS_WIND = int(stn_dict.get('debug_packets_wind', 0))
global DEBUG_PACKETS_STATUS
DEBUG_PACKETS_STATUS = int(stn_dict.get('debug_packets_status', 0))
global DEBUG_PACKETS_PRESSURE
DEBUG_PACKETS_PRESSURE = int(stn_dict.get('debug_packets_pressure', 0))
global DEBUG_CHECKSUM
DEBUG_CHECKSUM = int(stn_dict.get('debug_checksum', 0))
if DEBUG_CONFIG_DATA:
logdbg('Configuration setup')
logdbg(' Log sensor faults: %s' % self._sensor_stat)
logdbg(' Using PC Time: %s' % self._use_pc_time)
logdbg(' Erase archive data: %s' % self._erase_archive)
logdbg(' Archive interval: %d' % self._archive_interval)
logdbg(' Archive threshold: %d' % self._archive_threshold)
@property
def hardware_name(self):
"""weewx api."""
return self._model
@property
def sensor_stat(self):
"""Return if sensor status is enabled for device."""
return self._sensor_stat
@property
def use_pc_time(self):
"""Flag to use pc time rather than weather console time."""
return self._use_pc_time
@property
def archive_interval(self):
"""weewx api. Time in seconds between archive intervals."""
return self._archive_interval
@property
def ignore_checksum(self):
"""Flag to drop rather than fail on checksum errors."""
return self._ignore_checksum
def ready_to_poke(self, val):
"""Set info that device is ready to be poked."""
self._poke_lock.acquire()
self._rdy_to_poke = val
self._poke_lock.release()
def is_ready_to_poke(self):
"""Get info that device is ready to be poked."""
self._poke_lock.acquire()
val = self._rdy_to_poke
self._poke_lock.release()
return val
def poll_usb_device_enable(self):
"""The USB thread calls this to enable data reads from the console."""
return self._poll_device_enable
def _write_cmd(self, cmd):
"""Writes a single command to the wmr200 console."""
buf = [0x01, cmd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
try:
self.usb_device.write_device(buf)
except usb.USBError, exception:
msg = (('_write_cmd() Unable to send USB cmd:0x%02x control'
' message' % cmd))
logerr(msg)
# Convert to a Weewx error:
raise weewx.WeeWxIOError(exception)
def _poke_console(self):
"""Send a heartbeat command to the weather console.
This is used to inform the weather console to continue streaming
live data across the USB bus. Otherwise it enters archive mode
were data is stored on the weather console."""
self._write_cmd(0xD0)
if self._erase_archive:
self._write_cmd(0xDB)
# Reset the ready to poke flag.
self.ready_to_poke(False)
if DEBUG_COMM:
logdbg('Poked device for live data')
def _generate_bytestream(self):
"""Generator to provide byte stream to packet collector.
We need to return occasionally to handle both reading data
from the weather console and handing that data."""
while True:
# Read WMR200 protocol bytes from the weather console
# via a proxy thread that ensure we drain the USB
# fifo data from the weather console.
buf = self._thread_usb_poll.read_usb_device()
# Add list of new USB bytes to previous buffer byte
# array, if any.
if buf:
self._buf.extend(buf)
while self._buf:
# Generate one byte at a time.
yield self._buf.pop(0)
# Bail if there is a lull in data from the weather console
# If we don't bail we won't be able to do other processing
# required to keep the weather console operating.
# e.g. poking the console to maintain live data stream.
if not buf and not self._buf:
return
def _poll_for_data(self):
"""Poll for data from the weather console device.
Read a byte from the weather console. If we are starting
a new packet, get one using that byte from the packet factory.
Otherwise add the byte to the current packet.
Each USB packet may stradle a protocol packet so make sure
we assign the data appropriately."""
if not self._thread_usb_poll.is_alive():
msg = 'USB polling thread unexpectedly terminated'
logerr(msg)
raise weewx.WeeWxIOError(msg)
for byte in self.gen_byte():
if self._pkt:
self._pkt.append_data(byte)
else:
# This may return None if we are out of sync
# with the console.
self._pkt = PACKET_FACTORY.get_packet(byte, self)
if self._pkt is not None and self._pkt.packet_complete():
# If we have a complete packet then bail to handle it.
return
# Prevent busy loop by suspending process a bit to
# wait for usb read thread to accumulate data from the
# weather console.
time.sleep(_WMR200_USB_POLL_INTERVAL)
def request_archive_data(self):
"""Request archive packets from console."""
self._write_cmd(0xDA)
def print_stats(self):
"""Print summary of driver statistics."""
loginf(('Received packet count live:%d archive:%d'
' control:%d') % (PacketLive.pkt_rx,
PacketArchive.pkt_rx,
PacketControl.pkt_rx))
loginf('Received bytes:%d sent bytes:%d' %
(self.usb_device.byte_cnt_rd,
self.usb_device.byte_cnt_wr))
loginf('Packet archive queue len:%d live queue len:%d'
% (len(PacketArchive.pkt_queue), len(PacketLive.pkt_queue)))
def _process_packet_complete(self):
"""Process a completed packet from the wmr200 console."""
if DEBUG_PACKETS_RAW:
logdbg(self._pkt.to_string_raw('Packet raw:'))
# This will raise exception if checksum fails.
self._pkt.verify_checksum()
try:
# Process the actual packet.
self._pkt.packet_process()
if self._pkt.packet_live_data():
PacketLive.pkt_queue.append(self._pkt)
logdbg(' Queuing live packet rx:%d live_queue_len:%d' %
(PacketLive.pkt_rx, len(PacketLive.pkt_queue)))
elif self._pkt.packet_archive_data():
PacketArchive.pkt_queue.append(self._pkt)
logdbg(' Queuing archive packet rx:%d archive_queue_len:%d'
% (PacketArchive.pkt_rx, len(PacketArchive.pkt_queue)))
else:
logdbg((' Acknowledged control packet'
' rx:%d') % PacketControl.pkt_rx)
except WMR200PacketParsingError, e:
# Drop any bogus packets.
logerr(self._pkt.to_string_raw('Discarding bogus packet: %s '
% e.msg))
# Reset this packet to get ready for next one
self._pkt = None
def genLoopPackets(self):
"""Main generator function that continuously returns loop packets
weewx api to return live records."""
# Reset the current packet upon entry.
self._pkt = None
logdbg('genLoop() phase getting live packets')
while True:
# Loop through indefinitely generating records to the
# weewx engine. This loop may resume at the yield()
# or upon entry during any exception, even an exception
# not generated from this driver. e.g. weewx.service.
if self._pkt is not None and self._pkt.packet_complete():
self._process_packet_complete()
# If it's time to poke the console and we are not
# in the middle of collecting a packet then do it here.
if self.is_ready_to_poke() and self._pkt is None:
self._poke_console()
# Pull data from the weather console.
# This may create a packet or append data to existing packet.
self._poll_for_data()
# Yield any live packets we may have obtained from this callback
# or queued from other driver callback services.
while PacketLive.pkt_queue:
pkt = PacketLive.pkt_queue.pop(0)
if DEBUG_PACKETS_COOKED:
pkt.print_cooked()
logdbg('genLoop() Yielding live queued packet id:%d'
% pkt.pkt_id)
yield pkt.packet_record()
def XXXgenArchiveRecords(self, since_ts=0):
"""A generator function to return archive packets from the wmr200.
weewx api to return archive records.
since_ts: A timestamp in database time. All data since but not
including this time will be returned.
Pass in None for all data
NOTE: This API is disabled so that the weewx engine will default
to using sofware archive generation. There may be a way
to use hardware generation if one plays with not poking the console
which would allow archive packets to be created.
yields: a sequence of dictionary records containing the console
data."""
logdbg('genArchive() phase getting archive packets since %s'
% weeutil.weeutil.timestamp_to_string(since_ts))
if self.use_pc_time and self.time_drift is None:
loginf(('genArchive() Unable to process archive packets'
' until live packet received'))
return
while True:
# Loop through indefinitely generating records to the
# weewx engine. This loop may resume at the yield()
# or upon entry during any exception, even an exception
# not generated from this driver. e.g. weewx.service.
if self._pkt is not None and self._pkt.packet_complete():
self._process_packet_complete()
# If it's time to poke the console and we are not
# in the middle of collecting a packet then do it here.
if self.is_ready_to_poke() and self._pkt is None:
self._poke_console()
# Pull data from the weather console.
# This may create a packet or append data to existing packet.
self._poll_for_data()
# Yield any live packets we may have obtained from this callback
# or queued from other driver callback services.
while PacketArchive.pkt_queue:
pkt = PacketLive.pkt_queue.pop(0)
# If we are using PC time we need to adjust the record timestamp
# with the PC drift.
if self.use_pc_time:
pkt.timestamp_adjust_drift()
if DEBUG_PACKETS_COOKED:
pkt.print_cooked()
if pkt.timestamp_record() > since_ts:
logdbg(('genArchive() Yielding received archive record'
' after requested timestamp'))
yield pkt.packet_record()
else:
loginf(('genArchive() Ignoring received archive record'
' before requested timestamp'))
def genStartupRecords(self, since_ts=0):
"""A generator function to present archive packets on start.
weewx api to return archive records."""
logdbg('genStartup() phase getting archive packets since %s'
% weeutil.weeutil.timestamp_to_string(since_ts))
# Reset the current packet upon entry.
self._pkt = None
# Time after last archive packet to indicate there are
# likely no more archive packets left to drain.
timestamp_last_archive_rx = int(time.time() + 0.5)
# Statisics to calculate time in this phase.
timestamp_packet_first = None
timestamp_packet_current = None
timestamp_packet_previous = None
cnt = 0
# If no previous database this parameter gets passed as None.
# Convert to a numerical value representing start of unix epoch.
if since_ts is None:
loginf('genStartup() Database initialization')
since_ts = 0
while True:
# Loop through indefinitely generating archive records to the
# weewx engine. This loop may resume at the yield()
# or upon entry during any exception, even an exception
# not generated from this driver. e.g. weewx.service.
if self._pkt is not None and self._pkt.packet_complete():
self._process_packet_complete()
# If it's time to poke the console and we are not
# in the middle of collecting a packet then do it here.
if self.is_ready_to_poke() and self._pkt is None:
self._poke_console()
# Pull data from the weather console.
# This may create a packet or append data to existing packet.
self._poll_for_data()
# If we have archive packets in the queue then yield them here.
while PacketArchive.pkt_queue:
timestamp_last_archive_rx = int(time.time() + 0.5)
# Present archive packets
# If PC time is set, we must have at least one
# live packet to calculate timestamps in PC time.
if self.use_pc_time and self.time_drift is None:
loginf(('genStartup() Delaying archive packet processing'
' until live packet received'))
break
loginf(('genStartup() Still receiving archive packets'
' cnt:%d len:%d') % (cnt, len(PacketArchive.pkt_queue)))
pkt = PacketArchive.pkt_queue.pop(0)
# If we are using PC time we need to adjust the
# record timestamp with the PC drift.
if self.use_pc_time:
pkt.timestamp_adjust_drift()
# Statisics indicating packets sent in this phase.
if timestamp_packet_first is None:
timestamp_packet_first = pkt.timestamp_record()
if timestamp_packet_previous is None:
if since_ts == 0:
timestamp_packet_previous = pkt.timestamp_record()
else:
timestamp_packet_previous = since_ts
timestamp_packet_current = pkt.timestamp_record()
# Calculate time interval between archive packets.
timestamp_packet_interval = timestamp_packet_current \
- timestamp_packet_previous
if pkt.timestamp_record() > (timestamp_packet_previous
+ self._archive_threshold):
loginf(('genStartup() Discarding received archive'
' record exceeding archive interval cnt:%d'
' threshold:%d timestamp:%s')
% (cnt, self._archive_threshold,
weeutil.weeutil.timestamp_to_string\
(pkt.timestamp_record())))
elif pkt.timestamp_record() > since_ts:
# Calculate the rain accumulation between valid archive
# packets.
pkt.record_update(adjust_rain(pkt, PacketArchiveData))
timestamp_packet_previous = timestamp_packet_current
cnt += 1
logdbg(('genStartup() Yielding received archive'
' record cnt:%d after requested timestamp'
':%d pkt_interval:%d pkt:%s')
% (cnt, since_ts, timestamp_packet_interval,
weeutil.weeutil.timestamp_to_string\
(pkt.timestamp_record())))
if DEBUG_PACKETS_COOKED:
pkt.print_cooked()
yield pkt.packet_record()
else:
timestamp_packet_previous = timestamp_packet_current
loginf(('genStartup() Discarding received archive'
' record before time requested cnt:%d'
' timestamp:%s')
% (cnt, weeutil.weeutil.timestamp_to_string\
(since_ts)))
# Return if we receive not more archive packets in a given time
# interval.
if (int(time.time() + 0.5) - timestamp_last_archive_rx >
self._archive_startup):
loginf(('genStartup() phase exiting since looks like all'
' archive packets have been retrieved after %d'
' sec cnt:%d')
% (self._archive_startup, cnt))
if timestamp_packet_first is not None:
startup_time = timestamp_packet_current \
- timestamp_packet_first
loginf(('genStartup() Yielded %d packets in %d sec '
' between these dates %s ==> %s' %
(cnt, startup_time,
weeutil.weeutil.timestamp_to_string\
(timestamp_packet_first),
weeutil.weeutil.timestamp_to_string\
(timestamp_packet_current))))
if startup_time > 0:
loginf(('genStartup() Average packets per minute:%f' %
(cnt/(startup_time/60.0))))
return
def closePort(self):
"""Closes the USB port to the device.
weewx api to shutdown the weather console."""
# Send a command to the wmr200 console indicating
# we are leaving.
self._write_cmd(0xDF)
# Let the polling thread die off.
self._poll_device_enable = False
# Join with the polling thread.
self._thread_usb_poll.join()
if self._thread_usb_poll.is_alive():
logerr('USB polling thread still alive')
else:
loginf('USB polling thread expired')
# Shutdown the watchdog thread.
self.sock_wr.send('shutdown')
# Join with the watchdog thread.
self._thread_watchdog.join()
if self._thread_watchdog.is_alive():
logerr('Watchdog thread still alive')
else:
loginf('Watchdog thread expired')
self.print_stats()
# Indicate if queues have not been drained.
if len(PacketArchive.pkt_queue):
logwar('Exiting with packets still in archive queue cnt:%d' %
len(PacketArchive.pkt_queue))
if len(PacketLive.pkt_queue):
logwar('Exiting with packets still in live queue cnt:%d' %
len(PacketLive.pkt_queue))
# Shutdown the USB acccess to the weather console device.
self.usb_device.close_device()
loginf('Driver gracefully exiting')
class WMR200ConfEditor(weewx.drivers.AbstractConfEditor):
@property
def default_stanza(self):
return """
[WMR200]
# This section is for the Oregon Scientific WMR200
# The station model, e.g., WMR200, WMR200A, Radio Shack W200
model = WMR200
# The driver to use:
driver = weewx.drivers.wmr200
"""
def modify_config(self, config_dict):
print """
Setting rainRate and windchill calculations to hardware."""
config_dict.setdefault('StdWXCalculate', {})
config_dict['StdWXCalculate'].setdefault('Calculatios', {})
config_dict['StdWXCalculate']['Calculations']['rainRate'] = 'hardware'
config_dict['StdWXCalculate']['Calculations']['windchill'] = 'hardware'
config_dict['StdWXCalculate']['Calculations']['heatindex'] = 'hardware'
|
[
"tom@tom.org"
] |
tom@tom.org
|
3f2c5e2b102f57bd098165ff8171c55a59f18753
|
9df1da438b59a3a53b186db8999b46e92cd44168
|
/src/awp_processing/check_param.py
|
3b84034f08b56ce834a4158fd2470ba5b62a92da
|
[
"MIT"
] |
permissive
|
hzfmer/awp_processing
|
a59b62ba6de5e1859ce26c4f66e64a863ce3fd2a
|
e0aa0040f2ddeb2356386aae9d9b4d77bd77f15f
|
refs/heads/master
| 2021-01-03T06:05:06.785556
| 2020-12-05T04:38:05
| 2020-12-05T04:38:46
| 239,953,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,858
|
py
|
import argparse
from collections.abc import Iterable
import numpy as np
from awp_processing import awp, read_params
from awp_processing.check import check_mesh_cont
from pathlib2 import Path
# !Check these cons in pmcl3d_cons.h in the source code
BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z = 2, 2, 4
nbit_float = 4
parser = argparse.ArgumentParser()
parser.add_argument("--model", default="", help="configuration file")
parser.add_argument("--conf_file", default="param.sh", help="configuration file")
parser.add_argument("--batch_file", default="run.lsf", help="batch script")
args = parser.parse_args()
C = awp.Scenario(model=args.model, conf_file=args.conf_file)
cfg = C.cfg
# Convert parameters to floats or integers
"""
for k, v in cfg.items():
if not isinstance(v, Iterable):
print(k, v, type(v))
if type(v) == str and v and v.isdigit():
cfg[k] = float(v) if "." in v else int(v)
else:
print(k, v, type(v[0]))
if not isinstance(v, str) and type(v[0]) == str and v[0].isdigit():
# is list
v = [float(x) if "." in v else int(x) for x in v ]
cfg[k] = v
"""
# output directories
assert Path(args.model, cfg.chkfile).parent.exists()
assert Path(args.model, cfg.out).exists()
# layers
assert len(cfg.z) == len(cfg.nbgx) == len(cfg.dh) == len(cfg.nsrc) == cfg.g
for i in range(cfg.g):
assert cfg.x[i] % cfg.px == 0 and cfg.x[i] // cfg.px % BLOCK_SIZE_X == 0, f"Layer-{i}: Mismatch in X"
assert cfg.y[i] % cfg.py == 0 and cfg.y[i] // cfg.py % BLOCK_SIZE_Y == 0, f"Layer-{i}: Mismatch in Y"
assert cfg.z[i] // cfg.pz % BLOCK_SIZE_Z == 0, f"Layer-{i}: Mismatch in Z"
if cfg.insrc != "":
assert Path(args.model, cfg.insrc + "_" + str(i)).exists(), f"Layer-{i}: Source does not exist"
assert Path(args.model, cfg.insrc + "_" + str(i)).stat().st_size == cfg.nsrc[i] * (cfg.nst * 6 + 3) * nbit_float, f"Layer-{i}: Mismatch in source size"
assert Path(args.model, cfg.invel + "_" + str(i)).exists(), f"Layer-{i}: Mesh does not exist"
assert Path(args.model, cfg.invel + "_" + str(i)).stat().st_size == cfg.x[i] * cfg.y[i] * cfg.z[i] * cfg.nvar * nbit_float, f"Layer-{i}: Mismatch of mesh size"
if i + 1 < cfg.g:
# Check consistency of adjcent meshes
check_mesh_cont(Path(args.model, cfg.invel + "_" + str(i)),
Path(args.model, cfg.invel + "_" + str(i + 1)),
cfg.x[i], cfg.y[i], cfg.z[i])
# Topography
if cfg.intopo:
file_topo = Path(args.model, cfg.intopo)
nx, ny, pad = np.fromfile(file_topo, dtype='int32', count=3)
assert nx == cfg.x[0] and ny == cfg.y[0], f"Mismatch topography domain size"
assert (nx + 2 * pad) * (ny + 2 * pad) * nbit_float == file_topo.stat().st_size, f"Topography size does not match parameters"
# Receivers
if cfg.recvfile:
assert Path(args.model, cfg.recvfile).parent.exists(), f"Receiver output directory does not exist"
assert cfg.recv_steps % (cfg.recv_stride * cfg.recv_cpu_buffer_size \
* cfg.recv_gpu_buffer_size * cfg.recv_num_writes) == 0, "Check divisibility of receiver writing"
assert cfg.recv_length <= len(cfg.recv_coords), f"More receivers required than given"
# Source files in Ossian's format
if cfg.sourcefile:
assert Path(args.model, cfg.sourcefile).parent.exists(), f"Source file doesn't exist"
assert cfg.src_steps % (cfg.src_stride * cfg.src_cpu_buffer_size \
* cfg.src_gpu_buffer_size * cfg.src_num_writes) == 0, f"Check divisibility of source reading"
assert cfg.src_length == len(cfg.src_coords), f"Mismatch number of sources"
for suf in ['xx', 'yy', 'zz', 'xy', 'xz', 'yz']:
assert cfg.src_length * cfg.src_steps * nbit_float == Path(args.model, cfg.src_file + "_" + suf).stat().st_size, f"Input source file size doesn't match"
|
[
"hzfmer94@gmail.com"
] |
hzfmer94@gmail.com
|
0a37c14e049a9d3fbef7d0962da711525fb62a93
|
dd949f215d968f2ee69bf85571fd63e4f085a869
|
/systems/css-2011-teams/yellow/subarchitectures/planner.sa/src/python/standalone/pddl/test/testdynamic.py
|
05ae9a955fb9957e43361d0929f365f46fa000f2
|
[] |
no_license
|
marc-hanheide/cogx
|
a3fd395805f1b0ad7d713a05b9256312757b37a9
|
cb9a9c9cdfeba02afac6a83d03b7c6bb778edb95
|
refs/heads/master
| 2022-03-16T23:36:21.951317
| 2013-12-10T23:49:07
| 2013-12-10T23:49:07
| 219,460,352
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,823
|
py
|
#! /usr/bin/env python
# -*- coding: latin-1 -*-
import unittest
import common
import os
import parser, domain, state, actions, mapl, durative, dynamic_objects
from mapltypes import *
from predicates import *
from effects import *
from actions import Action
from builder import Builder
from parser import Parser, ParseError
create = """
(:action create
:agent (?a - agent)
:parameters (?t - truck)
:effect (create (?p - package) (assign (location-of ?p) ?t)))
"""
destroy_package = """
(:action destroy_package
:agent (?a - agent)
:parameters (?p - package ?t - truck)
:precondition (= (location-of ?p) (location-of ?t))
:effect (destroy ?p))
"""
destroy_loc = """
(:action destroy_loc
:agent (?a - agent)
:parameters (?t - truck ?to - location)
:precondition (and (= (city-of (location-of ?t)) (city-of ?to))
(not (= (location-of ?t) ?to)))
:effect (and (destroy (location-of ?t))
(assign (location-of ?t) ?to)))
"""
class DynamicTest(common.PddlTest):
def testParsing(self):
"""Testing parsing of create and destroy effects"""
dom, prob = self.load("testdata/logistics.domain.mapl", "testdata/logistics.p1.mapl")
dom.add_requirement("dynamic-objects")
a_create = Parser.parse_as(create.split("\n"), mapl.MAPLAction, dom)
a_pdestroy = Parser.parse_as(destroy_package.split("\n"), mapl.MAPLAction, dom)
a_locdestroy = Parser.parse_as(destroy_loc.split("\n"), mapl.MAPLAction, dom)
dom.actions += [a_create, a_pdestroy, a_locdestroy]
self.roundtrip(dom, prob, print_result=False)
def testTranslation(self):
"""Testing translation of create and destroy effects"""
dom, prob = self.load("testdata/logistics.domain.mapl", "testdata/logistics.p1.mapl")
dom.add_requirement("dynamic-objects")
a_create = Parser.parse_as(create.split("\n"), mapl.MAPLAction, dom)
a_pdestroy = Parser.parse_as(destroy_package.split("\n"), mapl.MAPLAction, dom)
a_locdestroy = Parser.parse_as(destroy_loc.split("\n"), mapl.MAPLAction, dom)
dom.actions += [a_create, a_pdestroy, a_locdestroy]
t = dynamic_objects.DynamicObjectsCompiler()
dom2 = t.translate(dom)
prob2 = t.translate(prob)
self.roundtrip(dom2, prob2, print_result=False)
def testCreateEffect(self):
"""Testing application of create effects"""
dom, prob = self.load("testdata/logistics.domain.mapl", "testdata/logistics.p1.mapl")
dom.add_requirement("dynamic-objects")
a_create = Parser.parse_as(create.split("\n"), mapl.MAPLAction, dom)
self.assert_("package0" not in prob)
st = state.State.from_problem(prob)
oldlen = len(prob)
a_create.instantiate([prob["agent"], prob["tru1"]], prob)
st.apply_effect(a_create.effect)
b = Builder(prob)
self.assertEqual(len(prob), oldlen+1)
self.assert_("package0" in prob)
svar = b.svar("location-of", "package0")
self.assert_(st[svar] == prob["tru1"])
st.apply_effect(a_create.effect)
st.apply_effect(a_create.effect)
self.assertEqual(len(prob), oldlen+3)
self.assert_("package1" in prob)
self.assert_("package2" in prob)
svar1 = b.svar("location-of", "package2")
self.assert_(st[svar1] == prob["tru1"])
# def testModalAction(self):
# """Testing modal action parsing"""
# action = Parser.parse_as(modal_action.split("\n"), mapl.MAPLAction, self.domain)
# self.assertEqual(action.params[1].type, FunctionType(t_object))
# term = predicates.FunctionTerm(self.domain.functions["location-of"][0], [Parameter("?c", self.domain.types["city"])])
# action.instantiate({"?var" : term})
# def testEffects(self):
# """Testing basic effect parsing"""
# action = Parser.parse_as(drive.split("\n"), Action, self.domain)
# self.assert_(isinstance(action.effect, SimpleEffect))
# def testMAPLAction(self):
# """Testing basic effect parsing"""
# action = Parser.parse_as(mapl_drive.split("\n"), mapl.MAPLAction, self.domain)
# self.assertEqual(len(action.agents), 1)
# self.assertEqual(len(action.params), 2)
# self.assertEqual(len(action.vars), 0)
# self.assertEqual(len(action.args), 3)
# self.assert_(isinstance(action.effect, SimpleEffect))
# def testConditionalEffects(self):
# """Testing conditional effect parsing"""
# action = Parser.parse_as(cond_load.split("\n"), Action, self.domain)
# self.assert_(isinstance(action.effect, ConditionalEffect))
# self.assert_(isinstance(action.effect.condition, conditions.LiteralCondition))
# self.assert_(isinstance(action.effect.effect, SimpleEffect))
# def testUniversalEffects(self):
# """Testing conditional effect parsing"""
# action = Parser.parse_as(univ_unload.split("\n"), Action, self.domain)
# self.assert_(isinstance(action.effect, UniversalEffect))
# self.assertEqual(len(action.effect.args), 1)
# self.assert_(isinstance(action.effect.effect, ConditionalEffect))
# def testProbabilisticEffects(self):
# """Testing probabilistic effect parsing"""
# action = Parser.parse_as(prob_load.split("\n"), Action, self.domain)
# self.assert_(isinstance(action.effect, ConjunctiveEffect))
# self.assert_(isinstance(action.effect.parts[0], ProbabilisticEffect))
# self.assert_(isinstance(action.effect.parts[1], ProbabilisticEffect))
# p1, e1 = action.effect.parts[0].effects[0]
# p2, e2 = action.effect.parts[0].effects[1]
# ap1, ae1 = action.effect.parts[1].effects[0]
# ap2, ae2 = action.effect.parts[1].effects[1]
# self.assert_(isinstance(p1, FunctionTerm))
# self.assertEqual(p1.function, self.domain.functions["load_succ_prob"][0])
# self.assert_(isinstance(e1.args[0], FunctionTerm))
# self.assert_(isinstance(e1.args[1], VariableTerm))
# self.assertEqual(p2, 0.5)
# self.assert_(isinstance(e2.args[0], FunctionTerm))
# self.assert_(isinstance(e2.args[1], FunctionTerm))
# self.assertEqual(ae1, e1)
# self.assertEqual(ae2, e2)
# self.assertEqual(ap1, 0.5)
# self.assertEqual(ap2, None)
# # self.assertEqual(action.effect.parts[0].getRandomEffect(0), e2)
# # self.assertEqual(action.effect.parts[0].getRandomEffect(1), e1)
# # self.assertEqual(action.effect.parts[0].getRandomEffect(2), None)
# # import random
# # random.seed(42)
# # for r in xrange(30):
# # self.assert_(action.effect.parts[0].getRandomEffect() in (e1,e2,None))
# def testAssertion(self):
# """Testing parsing of assertions"""
# action = Parser.parse_as(a_load_mapl.split("\n"), mapl.MAPLAction, self.domain)
# self.assert_(action.replan is not None)
# action = Parser.parse_as(a_load.split("\n"), Action, self.domain)
# self.assert_(action.replan is not None)
# def testMaplErrorHandling(self):
# """Testing error handling of MaplAction"""
# try:
# action = Parser.parse_as(error_load1.split("\n"), mapl.MAPLAction, self.domain)
# self.fail("Action with duplicate precondition didn't raise exception")
# except ParseError, e:
# self.assertEqual(e.token.string, ":precondition")
# self.assertEqual(e.token.line, 6)
# try:
# action = Parser.parse_as(error_load2.split("\n"), mapl.MAPLAction, self.domain)
# self.fail("Action with duplicate replan condition didn't raise exception")
# except ParseError, e:
# self.assertEqual(e.token.string, ":replan")
# self.assertEqual(e.token.line, 7)
# try:
# action = Parser.parse_as(error_load3.split("\n"), mapl.MAPLAction, self.domain)
# self.fail("Action with duplicate effect statement didn't raise exception")
# except ParseError, e:
# self.assertEqual(e.token.string, ":effect")
# self.assertEqual(e.token.line, 8)
# try:
# action = Parser.parse_as(error_load4.split("\n"), mapl.MAPLAction, self.domain)
# self.fail("Action with duplicate parameters didn't raise exception")
# except ParseError, e:
# self.assertEqual(e.token.string, "?p")
# self.assertEqual(e.token.line, 4)
# def testActionCosts(self):
# """Testing setting/getting/deleting of action costs"""
# from builder import Builder
# action = Parser.parse_as(cost_load.split("\n"), Action, self.domain)
# b = Builder(action)
# expected_term = b("load-costs", "?v")
# self.assertEqual(action.get_total_cost(), expected_term)
# action.set_total_cost(25)
# self.assertEqual(action.get_total_cost(), b(25))
# action.set_total_cost(None)
# self.assertEqual(len(action.effect.parts), 1)
# action.set_total_cost(b("+", ("load-costs", "?v"), 5))
# self.assertEqual(len(action.effect.parts), 2)
if __name__ == '__main__':
unittest.main()
|
[
"marc@hanheide.net"
] |
marc@hanheide.net
|
97635af1733de430d86b77509ac1a87fac4f87bf
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/inspections/PyTypeCheckerInspection/OverloadsAndPureStubInSamePyiScope/module.pyi
|
95eb5fcb6819b9f21f9e987cf4054e29cd662c7e
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 254
|
pyi
|
from typing import overload
if undefined:
def foo(p: str) -> str: pass
else:
@overload
def foo(p: int) -> int: pass
@overload
def foo(p: str, i: int) -> str: pass
def bar(p: str) -> str: pass
@overload
def bar(p: int) -> int: pass
|
[
"intellij-monorepo-bot-no-reply@jetbrains.com"
] |
intellij-monorepo-bot-no-reply@jetbrains.com
|
fd05edecb92c88140588f6bbeef383ef68594d40
|
9028b6983685a3ace074049fccf2b8c503b77de8
|
/PyStationB/libraries/StaticCharacterization/staticchar/datasets.py
|
d26f4310ed43f6fc533142473c6655b5a4829e65
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
mebristo/station-b-libraries
|
7f5517e5e77e6cdc54c03355804b8c0a4fcae65b
|
40bab526af6562653c42dbb32b174524c44ce2ba
|
refs/heads/main
| 2023-09-03T03:54:53.181082
| 2021-10-01T03:21:11
| 2021-10-01T03:21:11
| 412,871,835
| 0
| 0
|
MIT
| 2021-10-02T17:53:07
| 2021-10-02T17:53:06
| null |
UTF-8
|
Python
| false
| false
| 5,294
|
py
|
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
"""This is a submodule used to retrieve the example data sets.
Its API may frequently change and it should *not* be used in production.
Exports:
Dataset, which is essentially a dictionary of data frames
load_dataframes_from_directory, a function reading all data frames in a directory into a dictionary
"""
import logging
from os import PathLike
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
import pandas as pd
CONDITION_KEY = "_conditions"
def missing_directory_message(path: Path) -> Optional[str]: # pragma: no cover
path = path.absolute()
if path.is_dir():
return None
ancestor = path
while ancestor.parent != ancestor:
ancestor = ancestor.parent
if ancestor.is_dir():
break
return f"Dataset directory {path} not found (only found {ancestor})"
class Dataset(Dict[str, pd.DataFrame]):
"""A class representing a set of data frames in a given directory.
Methods:
__getitem__, so that the data can be accessed using ``dataset[key]`` syntax
items, so that one can iterate over pairs (key, data frame) as in ``dict.items()``
get_a_frame, gives a data frame, what is useful for illustratory purposes
"""
def __init__(self, path: PathLike) -> None:
self._path = Path(path)
assert self._path.is_dir(), missing_directory_message(self._path)
all_csvs = self._path.glob("*.csv")
frames = dict(map(load_dataframe, all_csvs))
if CONDITION_KEY in frames:
conditions = frames[CONDITION_KEY]
self.conditions = {idx: row.to_dict() for idx, row in conditions.set_index("SampleID").iterrows()}
frames.pop(CONDITION_KEY)
else:
self.conditions = {key: {} for key in frames.keys()}
super().__init__(frames)
self.check_conditions_coverage()
def check_conditions_coverage(self):
"""
Warn if the contents of the _conditions.csv file do not exactly match the data files in the folder.
"""
condition_keys = set(self.conditions.keys())
data_keys = set(self.keys())
n_condition_only_keys = len(condition_keys.difference(data_keys))
if n_condition_only_keys > 0: # pragma: no cover
logging.warning(
f"{self._path} has {n_condition_only_keys} rows in {CONDITION_KEY}.csv with no corresponding data file"
)
n_data_only_keys = len(data_keys.difference(condition_keys))
if n_data_only_keys > 0: # pragma: no cover
logging.warning(
f"{self._path} has {n_data_only_keys} data files with no corresponding row in {CONDITION_KEY}.csv"
)
def __repr__(self) -> str:
return f"{type(self).__name__}(path='{self._path}')"
def get_a_frame(self, index: int = 0) -> pd.DataFrame:
"""A utility function, returning a data frame at position `index` in lexicographical order of the keys."""
keys = sorted(self.keys())
key = keys[index]
return self[key]
def items_by_well(self) -> List[Tuple[Optional[str], str, pd.DataFrame]]:
"""
Returns a sorted list of tuples of the form (well_id, sample_id, data_frame), where well_id is the value
of the "Well" field in the conditions, or None if that is absent. The ordering is by well row (letter)
and column (number) if there are well IDs, otherwise alphabetically by sample ID.
"""
items = [(self.conditions[sample_id].get("Well", None), sample_id, value) for sample_id, value in self.items()]
def ordering_tuple(tup: Tuple[Optional[str], str, Any]) -> Tuple[str, int]:
well, sample_id, _ = tup
try:
return well[0], int(well[1:]) # type: ignore
except (ValueError, IndexError, TypeError):
return sample_id, 0
return sorted(items, key=ordering_tuple) # type: ignore
def plate_layout(self) -> Optional[Tuple[List[str], List[int]]]:
"""
Attempts to return the set of letters (row IDs) and numbers (column IDs) for the wells in the dataset,
or None if that fails (most likely because there are no wells defined).
"""
wells = set(self.conditions[sample_id].get("Well", None) for sample_id in self)
try: # pragma: no cover
well_letters = sorted(set(w[0] for w in wells))
well_numbers = sorted(set(int(w[1:]) for w in wells))
return well_letters, well_numbers
except (ValueError, IndexError, TypeError):
return None
def load_dataframe(csv_path: PathLike) -> Tuple[str, pd.DataFrame]:
"""Returns a tuple (name, data frame). Used to construct a data set by `load_dataframes_from_directory`.
See:
load_dataframes_from_directory
Dataset
"""
return Path(csv_path).stem, pd.read_csv(csv_path) # type: ignore # auto
|
[
"noreply@github.com"
] |
mebristo.noreply@github.com
|
da39ff45a23d0fed6921618b9cd8f936042f5398
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/p6uXeD7JC7cmxeD2Z_5.py
|
8144f3ce1a89da17cf533400d0f88447f632ef3e
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
"""
Abigail and Benson are playing Rock, Paper, Scissors.
Each game is represented by an array of length 2, where the first element
represents what Abigail played and the second element represents what Benson
played.
Given a sequence of games, determine who wins the most number of matches. If
they tie, output "Tie".
* R stands for Rock
* P stands for Paper
* S stands for Scissors
### Examples
calculate_score([["R", "P"], ["R", "S"], ["S", "P"]]) ➞ "Abigail"
# Benson wins the first game (Paper beats Rock).
# Abigail wins the second game (Rock beats Scissors).
# Abigail wins the third game (Scissors beats Paper).
# Abigail wins 2/3.
calculate_score([["R", "R"], ["S", "S"]]) ➞ "Tie"
calculate_score([["S", "R"], ["R", "S"], ["R", "R"]]) ➞ "Tie"
### Notes
N/A
"""
def calculate_score(games):
s = 0
for g in games:
if "".join(g) in "PRSP": s+=1
if "".join(g) in "PRSP"[::-1]: s-=1
if s>0: return "Abigail"
if s<0: return "Benson"
return "Tie"
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
f0c1d119d54e73da200cd04bdfebbe4fb2e8a65a
|
b3ac12dfbb8fa74500b406a0907337011d4aac72
|
/tests/util/benchmark_cost.py
|
2cd221f3761d43be40774d8ace02b880cdd844da
|
[
"Apache-2.0"
] |
permissive
|
chia-os/goldcoin-blockchain
|
ab62add5396b7734c11d3c37c41776994489d5e7
|
5c294688dbbe995ae1d4422803f6fcf3e1cc6077
|
refs/heads/main
| 2023-08-11T23:58:53.617051
| 2021-09-12T15:33:26
| 2021-09-12T15:33:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,088
|
py
|
import time
from secrets import token_bytes
from blspy import AugSchemeMPL, PrivateKey
from clvm_tools import binutils
from goldcoin.consensus.default_constants import DEFAULT_CONSTANTS
from goldcoin.types.blockchain_format.program import Program, INFINITE_COST
from goldcoin.types.condition_opcodes import ConditionOpcode
from goldcoin.types.condition_with_args import ConditionWithArgs
from goldcoin.util.ints import uint32
from tests.wallet_tools import WalletTool
from goldcoin.wallet.derive_keys import master_sk_to_wallet_sk
from goldcoin.wallet.puzzles.p2_delegated_puzzle import puzzle_for_pk
def float_to_str(f):
float_string = repr(f)
if "e" in float_string: # detect scientific notation
digits, exp_str = float_string.split("e")
digits = digits.replace(".", "").replace("-", "")
exp = int(exp_str)
zero_padding = "0" * (abs(int(exp)) - 1) # minus 1 for decimal point in the sci notation
sign = "-" if f < 0 else ""
if exp > 0:
float_string = "{}{}{}.0".format(sign, digits, zero_padding)
else:
float_string = "{}0.{}{}".format(sign, zero_padding, digits)
return float_string
def run_and_return_cost_time(chialisp):
start = time.time()
clvm_loop = "((c (q ((c (f (a)) (c (f (a)) (c (f (r (a))) (c (f (r (r (a))))"
" (q ()))))))) (c (q ((c (i (f (r (a))) (q (i (q 1) ((c (f (a)) (c (f (a))"
" (c (- (f (r (a))) (q 1)) (c (f (r (r (a)))) (q ()))))))"
" ((c (f (r (r (a)))) (q ()))))) (q (q ()))) (a)))) (a))))"
loop_program = Program.to(binutils.assemble(clvm_loop))
clvm_loop_solution = f"(1000 {chialisp})"
solution_program = Program.to(binutils.assemble(clvm_loop_solution))
cost, sexp = loop_program.run_with_cost(solution_program, INFINITE_COST)
end = time.time()
total_time = end - start
return cost, total_time
def get_cost_compared_to_addition(addition_cost, addition_time, other_time):
return (addition_cost * other_time) / addition_time
def benchmark_all_operators():
addition = "(+ (q 1000000000) (q 1000000000))"
substraction = "(- (q 1000000000) (q 1000000000))"
multiply = "(* (q 1000000000) (q 1000000000))"
greater = "(> (q 1000000000) (q 1000000000))"
equal = "(= (q 1000000000) (q 1000000000))"
if_clvm = "(i (= (q 1000000000) (q 1000000000)) (q 1000000000) (q 1000000000))"
sha256tree = "(sha256 (q 1000000000))"
pubkey_for_exp = "(pubkey_for_exp (q 1))"
point_add = "(point_add"
" (q 0x17f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb)"
" (q 0x17f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb))"
point_add_cost, point_add_time = run_and_return_cost_time(point_add)
addition_cost, addition_time = run_and_return_cost_time(addition)
substraction_cost, substraction_time = run_and_return_cost_time(substraction)
multiply_cost, multiply_time = run_and_return_cost_time(multiply)
greater_cost, greater_time = run_and_return_cost_time(greater)
equal_cost, equal_time = run_and_return_cost_time(equal)
if_cost, if_time = run_and_return_cost_time(if_clvm)
sha256tree_cost, sha256tree_time = run_and_return_cost_time(sha256tree)
pubkey_for_exp_cost, pubkey_for_exp_time = run_and_return_cost_time(pubkey_for_exp)
one_addition = 1
one_substraction = get_cost_compared_to_addition(addition_cost, addition_time, substraction_time) / addition_cost
one_multiply = get_cost_compared_to_addition(addition_cost, addition_time, multiply_time) / addition_cost
one_greater = get_cost_compared_to_addition(addition_cost, addition_time, greater_time) / addition_cost
one_equal = get_cost_compared_to_addition(addition_cost, addition_time, equal_time) / addition_cost
one_if = get_cost_compared_to_addition(addition_cost, addition_time, if_time) / addition_cost
one_sha256 = get_cost_compared_to_addition(addition_cost, addition_time, sha256tree_time) / addition_cost
one_pubkey_for_exp = (
get_cost_compared_to_addition(addition_cost, addition_time, pubkey_for_exp_time) / addition_cost
)
one_point_add = get_cost_compared_to_addition(addition_cost, addition_time, point_add_time) / addition_cost
print(f"cost of addition is: {one_addition}")
print(f"cost of one_substraction is: {one_substraction}")
print(f"cost of one_multiply is: {one_multiply}")
print(f"cost of one_greater is: {one_greater}")
print(f"cost of one_equal is: {one_equal}")
print(f"cost of one_if is: {one_if}")
print(f"cost of one_sha256 is: {one_sha256}")
print(f"cost of one_pubkey_for_exp is: {one_pubkey_for_exp}")
print(f"cost of one_point_add is: {one_point_add}")
if __name__ == "__main__":
"""
Naive way to calculate cost ratio between vByte and CLVM cost unit.
AggSig has assigned cost of 20vBytes, simple CLVM program is benchmarked against it.
"""
wallet_tool = WalletTool(DEFAULT_CONSTANTS)
benchmark_all_operators()
secret_key: PrivateKey = AugSchemeMPL.key_gen(bytes([2] * 32))
puzzles = []
solutions = []
private_keys = []
public_keys = []
for i in range(0, 1000):
private_key: PrivateKey = master_sk_to_wallet_sk(secret_key, uint32(i))
public_key = private_key.public_key()
solution = wallet_tool.make_solution(
{ConditionOpcode.ASSERT_MY_COIN_ID: [ConditionWithArgs(ConditionOpcode.ASSERT_MY_COIN_ID, [token_bytes()])]}
)
puzzle = puzzle_for_pk(bytes(public_key))
puzzles.append(puzzle)
solutions.append(solution)
private_keys.append(private_key)
public_keys.append(public_key)
# Run Puzzle 1000 times
puzzle_start = time.time()
clvm_cost = 0
for i in range(0, 1000):
cost_run, sexp = puzzles[i].run_with_cost(solutions[i], INFINITE_COST)
clvm_cost += cost_run
puzzle_end = time.time()
puzzle_time = puzzle_end - puzzle_start
print(f"Puzzle_time is: {puzzle_time}")
print(f"Puzzle cost sum is: {clvm_cost}")
private_key = master_sk_to_wallet_sk(secret_key, uint32(0))
public_key = private_key.get_g1()
message = token_bytes()
signature = AugSchemeMPL.sign(private_key, message)
pk_message_pair = (public_key, message)
# Run AggSig 1000 times
agg_sig_start = time.time()
agg_sig_cost = 0
for i in range(0, 1000):
valid = AugSchemeMPL.verify(public_key, message, signature)
assert valid
agg_sig_cost += 20
agg_sig_end = time.time()
agg_sig_time = agg_sig_end - agg_sig_start
print(f"Aggsig Cost: {agg_sig_cost}")
print(f"Aggsig time is: {agg_sig_time}")
# clvm_should_cost = agg_sig_cost * puzzle_time / agg_sig_time
clvm_should_cost = (agg_sig_cost * puzzle_time) / agg_sig_time
print(f"Puzzle should cost: {clvm_should_cost}")
constant = clvm_should_cost / clvm_cost
format = float_to_str(constant)
print(f"Constant factor: {format}")
print(f"CLVM RATIO MULTIPLIER: {1/constant}")
|
[
"faurepierre78@yahoo.com"
] |
faurepierre78@yahoo.com
|
14ec9f43cb1eeea48224b48dd0cbf8762a59731f
|
cb1ce85f80c9315d2bb0342badc0998f416839b2
|
/apps/childcount/commands/LangCommand.py
|
57ab33f976300e27f4c1760f7ed1a7379c8df070
|
[] |
no_license
|
katembu/move-it
|
eb609529d6527694aa1d1c9dbc972f70cd921e5d
|
86c44f5228811bdcba6fa609bf9d8c9d8e46263a
|
HEAD
| 2016-09-05T14:51:47.105500
| 2012-04-12T20:28:08
| 2012-04-12T20:28:08
| 4,008,730
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,883
|
py
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 coding=utf-8
# maintainer: rgaudin
from django.utils.translation import ugettext as _, activate
from childcount.commands import CCCommand
from childcount.models import Patient
from childcount.utils import authenticated
class LangCommand(CCCommand):
''' '''
KEYWORDS = {
'en': ['lang'],
'fr': ['lang'],
}
@authenticated
def process(self):
chw = self.message.persistant_connection.reporter.chw
# warn if no lang specified
if self.params.__len__() < 2:
self.message.respond(_(u"Your language preference is set " \
"to: %(lang)s. Change it by sending your " \
"new language preference code.") \
% {'lang': chw.language.upper()})
return True
newlang = self.params[1].strip()
if chw.language == newlang:
self.message.respond(_(u"Your language preference is already " \
"set to: %(lang)s.") \
% {'lang': chw.language.upper()})
return True
if newlang not in self.KEYWORDS:
self.message.respond(_(u"That language preference code " \
"(%(code)s) is not valid.") \
% {'code': newlang.upper()})
return True
oldlang = chw.language
chw.language = newlang
chw.save()
activate(chw.language)
self.message.respond(_(u"Your language preference has been changed " \
"from %(old)s to %(new)s. ") % \
{'old': oldlang.upper(), \
'new': chw.language.upper()})
return True
|
[
"rgaudin@gmail.com"
] |
rgaudin@gmail.com
|
5b7863a23b2e472cbe0ecf5c16042e25ba4016f6
|
f9f074c44b67a11d4630b5e1cc15e016e8d73cc8
|
/factory-ai-vision/EdgeSolution/modules/ModelManagerModule/app/downloader/tools/accuracy_checker/tests/test_reid_metrics.py
|
915fd137bf4441ef1b2f02defee828853bcd82fa
|
[
"MIT"
] |
permissive
|
Azure-Samples/azure-intelligent-edge-patterns
|
361694680c7e48d3761c5416175788355b684dcd
|
1d2f42cbf9f21157c1e1abf044b26160dfed5b16
|
refs/heads/master
| 2023-05-26T13:15:47.085088
| 2023-02-28T17:25:53
| 2023-02-28T17:25:53
| 186,706,933
| 193
| 164
|
MIT
| 2023-02-28T17:25:55
| 2019-05-14T22:02:41
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,410
|
py
|
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from accuracy_checker.metrics.reid import eval_cmc
class TestCMC:
def test_only_distance_matrix(self):
distance_matrix = np.array([
[0, 1, 2, 3, 4],
[1, 0, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[1, 2, 3, 4, 0]
])
m, n = distance_matrix.shape
result = eval_cmc(
distance_matrix,
query_ids=np.arange(m),
gallery_ids=np.arange(n),
query_cams=np.zeros(m).astype(np.int32),
gallery_cams=np.ones(n).astype(np.int32)
)
assert np.all(result[:5] == [0.6, 0.6, 0.8, 1.0, 1.0])
def test_duplicate_ids(self):
distance_matrix = np.array([
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3]
])
result = eval_cmc(
distance_matrix,
query_ids=np.array([0, 0, 1, 1]),
gallery_ids=np.array([0, 0, 1, 1]),
top_k=4,
gallery_cams=np.ones(distance_matrix.shape[1]).astype(np.int32),
query_cams=np.zeros(distance_matrix.shape[0]).astype(np.int32),
separate_camera_set=False,
single_gallery_shot=False
)
assert np.all(result == [0.5, 0.5, 1, 1])
def test_duplicate_cams(self):
distance_matrix = np.tile(np.arange(5), (5, 1))
result = eval_cmc(
distance_matrix,
query_ids=np.array([0, 0, 0, 1, 1]),
gallery_ids=np.array([0, 0, 0, 1, 1]),
query_cams=np.array([0, 0, 0, 0, 0]),
gallery_cams=np.array([0, 1, 1, 1, 1]),
top_k=5,
separate_camera_set=False,
single_gallery_shot=False
)
assert np.all(result == [0.6, 0.6, 0.6, 1, 1])
|
[
"waitingkuo0527@gmail.com"
] |
waitingkuo0527@gmail.com
|
c1d02968598f3beb707555c296286cadd368736f
|
64bf39b96a014b5d3f69b3311430185c64a7ff0e
|
/intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/t_systems_mms/icinga_director/plugins/modules/icinga_hostgroup.py
|
84adda4c8cdc3654cebc548d0afb7e85971573d0
|
[
"GPL-3.0-only",
"MIT"
] |
permissive
|
SimonFangCisco/dne-dna-code
|
7072eba7da0389e37507b7a2aa5f7d0c0735a220
|
2ea7d4f00212f502bc684ac257371ada73da1ca9
|
refs/heads/master
| 2023-03-10T23:10:31.392558
| 2021-02-25T15:04:36
| 2021-02-25T15:04:36
| 342,274,373
| 0
| 0
|
MIT
| 2021-02-25T14:39:22
| 2021-02-25T14:39:22
| null |
UTF-8
|
Python
| false
| false
| 3,623
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 T-Systems Multimedia Solutions GmbH
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
module: icinga_hostgroup
short_description: Manage hostgroups in Icinga2
description:
- Add or remove a hostgroup to Icinga2 through the director API.
author: Sebastian Gumprich (@rndmh3ro)
extends_documentation_fragment:
- ansible.builtin.url
- t_systems_mms.icinga_director.common_options
version_added: '1.0.0'
notes:
- This module supports check mode.
options:
state:
description:
- Apply feature state.
choices: [ "present", "absent" ]
default: present
type: str
object_name:
description:
- Icinga object name for this hostgroup.
aliases: ['name']
required: true
type: str
display_name:
description:
- An alternative display name for this group.
- If you wonder how this could be helpful just leave it blank.
type: str
assign_filter:
description:
- This allows you to configure an assignment filter.
- Please feel free to combine as many nested operators as you want.
type: str
"""
EXAMPLES = """
- name: Create hostgroup
t_systems_mms.icinga_director.icinga_hostgroup:
state: present
url: "{{ icinga_url }}"
url_username: "{{ icinga_user }}"
url_password: "{{ icinga_pass }}"
object_name: foohostgroup
display_name: foohostgroup
assign_filter: 'host.name="foohost"'
"""
RETURN = r""" # """
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import url_argument_spec
from ansible_collections.t_systems_mms.icinga_director.plugins.module_utils.icinga import (
Icinga2APIObject,
)
# ===========================================
# Module execution.
#
def main():
# use the predefined argument spec for url
argument_spec = url_argument_spec()
# add our own arguments
argument_spec.update(
state=dict(default="present", choices=["absent", "present"]),
url=dict(required=True),
object_name=dict(required=True, aliases=["name"]),
display_name=dict(required=False),
assign_filter=dict(required=False),
)
# Define the main module
module = AnsibleModule(
argument_spec=argument_spec, supports_check_mode=True
)
data = {
"object_name": module.params["object_name"],
"object_type": "object",
"display_name": module.params["display_name"],
"assign_filter": module.params["assign_filter"],
}
icinga_object = Icinga2APIObject(
module=module, path="/hostgroup", data=data
)
changed, diff = icinga_object.update(module.params["state"])
module.exit_json(
changed=changed,
diff=diff,
)
# import module snippets
if __name__ == "__main__":
main()
|
[
"sifang@cisco.com"
] |
sifang@cisco.com
|
e22152a4d7fccfed5a456ac24264e156583c5444
|
b57d337ddbe946c113b2228a0c167db787fd69a1
|
/scr/Spell390 - Remove Blindness Deafness.py
|
6dda7d4d7cf28406c2fe25e031c3232ca63bf139
|
[] |
no_license
|
aademchenko/ToEE
|
ebf6432a75538ae95803b61c6624e65b5cdc53a1
|
dcfd5d2de48b9d9031021d9e04819b309d71c59e
|
refs/heads/master
| 2020-04-06T13:56:27.443772
| 2018-11-14T09:35:57
| 2018-11-14T09:35:57
| 157,520,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,140
|
py
|
from toee import *
def OnBeginSpellCast( spell ):
print "Remove Blindness/Deafness OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
game.particles( "sp-conjuration-conjure", spell.caster )
def OnSpellEffect ( spell ):
print "Remove Blindness/Deafness OnSpellEffect"
spell.duration = 0
target = spell.target_list[0]
## Solves Radial menu problem for Wands/NPCs
spell_arg = spell.spell_get_menu_arg( RADIAL_MENU_PARAM_MIN_SETTING )
if spell_arg != 1 and spell_arg != 2:
spell_arg = 2
game.particles( 'sp-Remove Blindness Deafness', target.obj )
if spell_arg == 1:
# apply remove blindness
target.obj.condition_add_with_args( 'sp-Remove Blindness', spell.id, spell.duration, 0 )
else:
# apply deafness
target.obj.condition_add_with_args( 'sp-Remove Deafness', spell.id, spell.duration, 0 )
spell.target_list.remove_target( target.obj )
spell.spell_end(spell.id)
def OnBeginRound( spell ):
print "Remove Blindness/Deafness OnBeginRound"
def OnEndSpellCast( spell ):
print "Remove Blindness/Deafness OnEndSpellCast"
|
[
"demchenko.recruitment@gmail.com"
] |
demchenko.recruitment@gmail.com
|
fb5f2c458afc3a821916b18dd32088f5e9db972d
|
6ed48bf3c72e61fe53144a3545ab305112c93501
|
/infra/services/master_manager_launcher/desired_state_parser.py
|
33123b15e286d0812c1907918de2542fb517383f
|
[
"BSD-3-Clause"
] |
permissive
|
eunchong/infra
|
ee5f7a9379977de8c814f90dbba3f6adbf06a75c
|
ce3728559112bfb3e8b32137eada517aec6d22f9
|
refs/heads/master
| 2022-11-27T06:26:57.415805
| 2016-04-08T12:34:36
| 2016-04-08T12:34:36
| 55,699,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,201
|
py
|
#!/usr/bin/python
# Copyright 2015 Google Inc. All Rights Reserved.
# pylint: disable=F0401
"""Parse, validate and query the desired master state json."""
import bisect
import datetime
import json
import logging
import operator
import os
import re
from infra.libs.buildbot import master
from infra_libs.time_functions import timestamp
from infra_libs.time_functions import zulu
from infra.services.master_lifecycle import buildbot_state
LOGGER = logging.getLogger(__name__)
# A string that uniquely identifies the structure of a master state
# configuration file. Any changes made to the structure that are not backwards-
# compatible MUST update this value.
VERSION = '2'
# Remove transition once crbug.com/583441 is resolved.
PREV_VERSION = '1'
class InvalidDesiredMasterState(ValueError):
pass
def load_desired_state_file(filename):
with open(filename) as f:
return parse_desired_state(f.read())
def parse_desired_state(data):
try:
desired_state = json.loads(data)
except ValueError as ex:
LOGGER.exception('Failed to parse desired state JSON')
raise InvalidDesiredMasterState(str(ex))
try:
validate_desired_master_state(desired_state)
except InvalidDesiredMasterState as ex:
LOGGER.error(ex.args[0])
raise
return desired_state
def validate_desired_master_state(desired_state):
"""Verify that the desired_master_state file is valid."""
now = timestamp.utcnow_ts()
version = desired_state.get('version', None)
# Remove transition once crbug.com/583441 is resolved.
acceptible_versions = [PREV_VERSION, VERSION]
if version not in acceptible_versions:
raise InvalidDesiredMasterState(
"State version %s is not in %s" % (version, acceptible_versions))
master_states = desired_state.get('master_states', {})
for mastername, states in master_states.iteritems():
# Verify desired_state and timestamp are valid.
for state in states:
# Verify desired_state and transition_time_utc are present.
for k in ('desired_state', 'transition_time_utc'):
if not k in state:
raise InvalidDesiredMasterState(
'one or more states for master %s do not contain %s' % (
mastername, k))
# Verify the desired state is in the allowed set.
if (state['desired_state'] not in
buildbot_state.STATES['desired_buildbot_state']):
raise InvalidDesiredMasterState(
'desired_state \'%s\' is not one of %s' %(
state['desired_state'],
buildbot_state.STATES['desired_buildbot_state']))
# Verify the timestamp is Zulu time. Will raise an exception if invalid.
state_time(state)
# Verify the list is properly sorted.
sorted_states = sorted(
states, key=operator.itemgetter('transition_time_utc'))
if sorted_states != states:
raise InvalidDesiredMasterState(
'master %s does not have states sorted by timestamp\n'
'should be:\n%s' % (
mastername,
json.dumps(sorted_states, indent=2)))
# Verify there is at least one state in the past.
if not get_master_state(states, now=now):
raise InvalidDesiredMasterState(
'master %s does not have a state older than %s' % (mastername, now))
manually_managed = {}
master_params = desired_state.get('master_params', {})
for mastername, params in master_params.iteritems():
allowed_config_keys = set((
'builder_filters',
'drain_timeout_sec',
'manually_managed',
))
extra_configs = set(params.iterkeys()) - allowed_config_keys
if extra_configs:
raise InvalidDesiredMasterState(
'found unsupported configuration keys: %s' % (sorted(extra_configs),))
if params.get('manually_managed'):
manually_managed[mastername] = params['manually_managed']
if params.get('drain_timeout_sec') is not None:
try:
int(params['drain_timeout_sec'])
except ValueError as e:
raise InvalidDesiredMasterState(
'invalid "drain_timeout_sec" for %s (%s): %s' % (
mastername, params['drain_timeout_sec'], e))
for builder_filter in params.get('builder_filters', []):
try:
re.compile(builder_filter)
except re.error as e:
raise InvalidDesiredMasterState(
'invalid "builder_filters" entry for %s (%s): %s' % (
mastername, builder_filter, e))
illegally_managed = set(manually_managed).intersection(set(master_states))
if illegally_managed:
emails = set(manually_managed[master] for master in illegally_managed)
raise InvalidDesiredMasterState(
'cannot restart the following masters via master manager: %s. '
'please contact %s' % (','.join(illegally_managed), ','.join(emails)))
def get_master_state(states, now=None):
"""Returns the latest state earlier than the current (or specified) time.
If there are three items, each with transition times of 100, 200 and 300:
* calling when 'now' is 50 will return None
* calling when 'now' is 150 will return the first item
* calling when 'now' is 400 will return the third item
"""
now = now or timestamp.utcnow_ts()
times = [state_time(x) for x in states]
index = bisect.bisect_left(times, now)
if index > 0: # An index of 0 means all timestamps are in the future.
return states[index - 1]
return None
def get_masters_for_host(desired_state, build_dir, hostname):
"""Identify which masters on this host should be managed.
Returns triggered_masters and ignored_masters (a list and a set respectively).
triggered_masters are masters on this host which have a corresponding entry in
the desired_master_state file. Any master running assigned to this host that
does *not* have an entry in the desired_master_state file is considered
'ignored.'
triggered_masters is a list of dicts. Each dict is the full dict from
mastermap.py with two extra keys:
- 'fulldir': the absolute path to the master directory
- 'states': the state configuration for that master
- 'params': any configured parameters for that master
ignored_masters is a set of 'dirname' strings (ex: master.chromium).
"""
master_states = desired_state.get('master_states', {})
master_params = desired_state.get('master_params', {})
triggered_masters = []
ignored_masters = set()
for master_dict in master.get_mastermap_for_host(
build_dir, hostname):
mastername = master_dict['dirname']
if mastername in master_states:
if master_dict['internal']:
master_dir = os.path.abspath(os.path.join(
build_dir, os.pardir, 'build_internal', 'masters',
mastername))
else:
master_dir = os.path.abspath(os.path.join(
build_dir, 'masters', mastername))
master_dict['fulldir'] = master_dir
master_dict['states'] = master_states[mastername]
master_dict['params'] = master_params.get(mastername, {})
triggered_masters.append(master_dict)
else:
ignored_masters.add(mastername)
return triggered_masters, ignored_masters
def state_time(state):
"""Returns the transition time as float or raises an exception if invalid."""
zt = zulu.parse_zulu_ts(state['transition_time_utc'])
if zt is None:
raise InvalidDesiredMasterState(
'transition_time_utc \'%s\' is not Zulu time' % (
state['transition_time_utc']))
return zt
def prune_desired_state(desired_state, buffer_secs=3600):
"""Prune old desired_state entries.
buffer_secs specifies how many seconds of buffer, only entries at least this
many seconds in the past are considered for pruning.
"""
cutoff = timestamp.utcnow_ts() - buffer_secs
new_desired_state = {}
for mastername, states in desired_state.iteritems():
states_before_cutoff = []
states_after_cutoff = []
for state in states:
# Verify the timestamp is a Zulu time.
parsed_time = state_time(state)
if parsed_time <= cutoff:
states_before_cutoff.append(state)
else:
states_after_cutoff.append(state)
# Verify there is at least one state in the past.
if not states_before_cutoff:
raise InvalidDesiredMasterState(
'master %s does not have a state older than %s (%d secs ago)' % (
mastername, cutoff, buffer_secs))
new_desired_state[mastername] = (
[max(states_before_cutoff, key=state_time)]
+ sorted(states_after_cutoff, key=state_time))
return new_desired_state
def write_master_state(desired_state, filename):
"""Write a desired state file, removing old entries."""
new_desired_state = {
'master_params': desired_state.get('master_params', {}),
'master_states': prune_desired_state(
desired_state.get('master_states', {})),
# Remove transition once crbug.com/583441 is resolved.
'version': PREV_VERSION,
}
# Remove transition once crbug.com/583441 is resolved.
new_desired_state['master_params'].pop('manually_managed', None)
with open(filename, 'w') as f:
json.dump(
new_desired_state, f, sort_keys=True, indent=2, separators=(',', ':'))
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
05cebba82ea2cb7c0ea723fd2c8e258f074a2a2b
|
a66460a46611483dfbdc94c7996893f427e60d97
|
/ansible/my_env/lib/python2.7/site-packages/ansible/modules/network/ios/ios_l3_interface.py
|
200ffec268d94e4233bb6f52c5698ad284fd1576
|
[
"MIT"
] |
permissive
|
otus-devops-2019-02/yyashkin_infra
|
06b57807dde26f94f501828c07503d6bf1d70816
|
0cd0c003884155ac922e3e301305ac202de7028c
|
refs/heads/master
| 2020-04-29T02:42:22.056724
| 2019-05-15T16:24:35
| 2019-05-15T16:24:35
| 175,780,718
| 0
| 0
|
MIT
| 2019-05-15T16:24:36
| 2019-03-15T08:37:35
|
HCL
|
UTF-8
|
Python
| false
| false
| 10,217
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_l3_interface
version_added: "2.5"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage Layer-3 interfaces on Cisco IOS network devices.
description:
- This module provides declarative management of Layer-3 interfaces
on IOS network devices.
notes:
- Tested against IOS 15.2
options:
name:
description:
- Name of the Layer-3 interface to be configured eg. GigabitEthernet0/2
ipv4:
description:
- IPv4 address to be set for the Layer-3 interface mentioned in I(name) option.
The address format is <ipv4 address>/<mask>, the mask is number
in range 0-32 eg. 192.168.0.1/24
ipv6:
description:
- IPv6 address to be set for the Layer-3 interface mentioned in I(name) option.
The address format is <ipv6 address>/<mask>, the mask is number
in range 0-128 eg. fd5d:12c9:2201:1::1/64
aggregate:
description:
- List of Layer-3 interfaces definitions. Each of the entry in aggregate list should
define name of interface C(name) and a optional C(ipv4) or C(ipv6) address.
state:
description:
- State of the Layer-3 interface configuration. It indicates if the configuration should
be present or absent on remote device.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: ios
"""
EXAMPLES = """
- name: Remove GigabitEthernet0/3 IPv4 and IPv6 address
ios_l3_interface:
name: GigabitEthernet0/3
state: absent
- name: Set GigabitEthernet0/3 IPv4 address
ios_l3_interface:
name: GigabitEthernet0/3
ipv4: 192.168.0.1/24
- name: Set GigabitEthernet0/3 IPv6 address
ios_l3_interface:
name: GigabitEthernet0/3
ipv6: "fd5d:12c9:2201:1::1/64"
- name: Set GigabitEthernet0/3 in dhcp
ios_l3_interface:
name: GigabitEthernet0/3
ipv4: dhcp
ipv6: dhcp
- name: Set interface Vlan1 (SVI) IPv4 address
ios_l3_interface:
name: Vlan1
ipv4: 192.168.0.5/24
- name: Set IP addresses on aggregate
ios_l3_interface:
aggregate:
- { name: GigabitEthernet0/3, ipv4: 192.168.2.10/24 }
- { name: GigabitEthernet0/3, ipv4: 192.168.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" }
- name: Remove IP addresses on aggregate
ios_l3_interface:
aggregate:
- { name: GigabitEthernet0/3, ipv4: 192.168.2.10/24 }
- { name: GigabitEthernet0/3, ipv4: 192.168.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- interface GigabitEthernet0/2
- ip address 192.168.0.1 255.255.255.0
- ipv6 address fd5d:12c9:2201:1::1/64
"""
import re
from copy import deepcopy
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.ios.ios import get_config, load_config
from ansible.module_utils.network.ios.ios import ios_argument_spec
from ansible.module_utils.network.common.config import NetworkConfig
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.common.utils import is_netmask, is_masklen, to_netmask, to_masklen
def validate_ipv4(value, module):
if value:
address = value.split('/')
if len(address) != 2:
module.fail_json(msg='address format is <ipv4 address>/<mask>, got invalid format %s' % value)
if not is_masklen(address[1]):
module.fail_json(msg='invalid value for mask: %s, mask should be in range 0-32' % address[1])
def validate_ipv6(value, module):
if value:
address = value.split('/')
if len(address) != 2:
module.fail_json(msg='address format is <ipv6 address>/<mask>, got invalid format %s' % value)
else:
if not 0 <= int(address[1]) <= 128:
module.fail_json(msg='invalid value for mask: %s, mask should be in range 0-128' % address[1])
def validate_param_values(module, obj, param=None):
if param is None:
param = module.params
for key in obj:
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if callable(validator):
validator(param.get(key), module)
def parse_config_argument(configobj, name, arg=None):
cfg = configobj['interface %s' % name]
cfg = '\n'.join(cfg.children)
values = []
matches = re.finditer(r'%s (.+)$' % arg, cfg, re.M)
for match in matches:
match_str = match.group(1).strip()
if arg == 'ipv6 address':
values.append(match_str)
else:
values = match_str
break
return values or None
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
return None
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
for w in want:
name = w['name']
ipv4 = w['ipv4']
ipv6 = w['ipv6']
state = w['state']
interface = 'interface ' + name
commands.append(interface)
obj_in_have = search_obj_in_list(name, have)
if state == 'absent' and obj_in_have:
if obj_in_have['ipv4']:
if ipv4:
address = ipv4.split('/')
if len(address) == 2:
ipv4 = '{0} {1}'.format(address[0], to_netmask(address[1]))
commands.append('no ip address {}'.format(ipv4))
else:
commands.append('no ip address')
if obj_in_have['ipv6']:
if ipv6:
commands.append('no ipv6 address {}'.format(ipv6))
else:
commands.append('no ipv6 address')
if 'dhcp' in obj_in_have['ipv6']:
commands.append('no ipv6 address dhcp')
elif state == 'present':
if ipv4:
if obj_in_have is None or obj_in_have.get('ipv4') is None or ipv4 != obj_in_have['ipv4']:
address = ipv4.split('/')
if len(address) == 2:
ipv4 = '{0} {1}'.format(address[0], to_netmask(address[1]))
commands.append('ip address {}'.format(ipv4))
if ipv6:
if obj_in_have is None or obj_in_have.get('ipv6') is None or ipv6.lower() not in [addr.lower() for addr in obj_in_have['ipv6']]:
commands.append('ipv6 address {}'.format(ipv6))
if commands[-1] == interface:
commands.pop(-1)
return commands
def map_config_to_obj(module):
config = get_config(module, flags=['| section interface'])
configobj = NetworkConfig(indent=1, contents=config)
match = re.findall(r'^interface (\S+)', config, re.M)
if not match:
return list()
instances = list()
for item in set(match):
ipv4 = parse_config_argument(configobj, item, 'ip address')
if ipv4:
# eg. 192.168.2.10 255.255.255.0 -> 192.168.2.10/24
address = ipv4.strip().split(' ')
if len(address) == 2 and is_netmask(address[1]):
ipv4 = '{0}/{1}'.format(address[0], to_text(to_masklen(address[1])))
obj = {
'name': item,
'ipv4': ipv4,
'ipv6': parse_config_argument(configobj, item, 'ipv6 address'),
'state': 'present'
}
instances.append(obj)
return instances
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
validate_param_values(module, item, item)
obj.append(item.copy())
else:
obj.append({
'name': module.params['name'],
'ipv4': module.params['ipv4'],
'ipv6': module.params['ipv6'],
'state': module.params['state']
})
validate_param_values(module, obj)
return obj
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(),
ipv4=dict(),
ipv6=dict(),
state=dict(default='present',
choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(ios_argument_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
if not module.check_mode:
resp = load_config(module, commands)
warnings.extend((out for out in resp if out))
result['changed'] = True
if warnings:
result['warnings'] = warnings
module.exit_json(**result)
if __name__ == '__main__':
main()
|
[
"theyashkins@gmail.com"
] |
theyashkins@gmail.com
|
11c0ce16ec9d3be3a1ce94d97b03ab2c20e62f75
|
d996edcd595c565c5725a16286ce8d338af67246
|
/src/text_classification_benchmarks/bi_lstm/__init__.py
|
d1f2c6b6a9147402c96cf472d5bb51324ae16b49
|
[] |
no_license
|
preddy5/dltemplate
|
fbbfce7660c451495e255cf8d8437e4b4e207f9c
|
77b04b767cbd4914e0a3d3609c645e475aabcc43
|
refs/heads/master
| 2020-04-28T19:37:04.893001
| 2019-03-13T13:35:04
| 2019-03-13T13:35:04
| 175,517,056
| 1
| 1
| null | 2019-03-13T23:59:40
| 2019-03-13T23:59:39
| null |
UTF-8
|
Python
| false
| false
| 3,547
|
py
|
from argparse import ArgumentParser
from common.util import load_hyperparams, merge_dict
import csv
import os
from text_classification_benchmarks.bi_lstm.util import batch_iter, load_dataset, test, train
def run(constant_overwrites):
config_path = os.path.join(os.path.dirname(__file__), 'hyperparams.yml')
constants = merge_dict(load_hyperparams(config_path), constant_overwrites)
outdir = constants['outdir']
run_dir = constants['run_dir']
x_train, y_train, train_lengths, x_val, y_val, val_lengths, max_length, vocab_size, classes = \
load_dataset(outdir, dirname=constants['data_dir'], vocab_name=constants['vocab_name'])
if constants['test']:
print('\nTesting...')
preds = test(x_val, y_val, val_lengths, constants['test_batch_size'], run_dir, constants['checkpoint'],
constants['model_name'])
# Save all predictions
with open(os.path.join(run_dir, 'predictions.csv'), 'w', encoding='utf-8', newline='') as f:
csvwriter = csv.writer(f)
csvwriter.writerow(['True class', 'Prediction'])
for i in range(len(preds)):
csvwriter.writerow([y_val[i], preds[i]])
print('Predictions saved to {}'.format(os.path.join(run_dir, 'predictions.csv')))
else:
print('\nTraining...')
train_data = batch_iter(x_train, y_train, train_lengths, constants['batch_size'], constants['n_epochs'])
train(train_data, x_val, y_val, val_lengths, len(classes), vocab_size,
constants['n_hidden'], constants['n_layers'],
constants['l2_reg_lambda'], constants['learning_rate'],
constants['decay_steps'], constants['decay_rate'],
constants['keep_prob'], outdir, constants['num_checkpoint'],
constants['evaluate_every_steps'], constants['save_every_steps'],
constants['summaries_name'], constants['model_name'])
if __name__ == '__main__':
# read args
parser = ArgumentParser(description='Run Bi-LSTM Classifier')
parser.add_argument('--epochs', dest='n_epochs', type=int, help='number epochs')
parser.add_argument('--batch-size', dest='batch_size', type=int, help='batch size')
parser.add_argument('--hidden-size', dest='n_hidden', type=int, help='number hidden layers')
parser.add_argument('--embedding-size', dest='embedding_size', type=int, help='embedding size')
parser.add_argument('--num-layers', dest='n_layers', type=int, help='number LSTM cells')
parser.add_argument('--learning-rate', dest='learning_rate', type=float, help='learning rate')
parser.add_argument('--outdir', dest='outdir', type=str, help='save directory')
parser.add_argument('--rundir', dest='run_dir', type=str, help='run directory')
parser.add_argument('--data-dir', dest='data_dir', type=str, help='relative path to data')
parser.add_argument('--model-name', dest='model_name', type=str, help='model name')
parser.add_argument('--vocab-name', dest='vocab_name', type=str, help='vocab name')
parser.add_argument('--summaries-name', dest='summaries_name', type=str, help='summaries name')
parser.add_argument('--checkpoint', dest='checkpoint', type=str,
help='restore the graph from this model checkpoint')
parser.add_argument('--test', dest='test',
help='run eval on the test dataset using a fixed checkpoint', action='store_true')
parser.set_defaults(test=False)
args = parser.parse_args()
run(vars(args))
|
[
"markmo@me.com"
] |
markmo@me.com
|
0058be8e27a678bd5d55c56eddcdf0e87555f32f
|
eef6f6e1074a105a4554c79a80d18d5507d5c886
|
/liir/nlp/ml/eval/ConfusionMatrix.py
|
92bda9c9b36c5ecbae53b136ba97c94fbc75a722
|
[] |
no_license
|
quynhdtn/DL
|
0dc09359fd0bb1280cd005f28c454126dc101dc4
|
017ea76a1f926e989581cd6c41f984c8651a5e0d
|
refs/heads/master
| 2021-01-10T04:51:06.354273
| 2015-11-12T12:48:57
| 2015-11-12T12:48:57
| 46,052,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,400
|
py
|
__author__ = 'quynhdo'
import numpy as np
class ConfusionMatrix:
def __init__(self, n_classes, class_names = None):
self.n_classes = n_classes
self.class_names = class_names
self.mat = np.zeros((n_classes, n_classes), dtype='int')
def addBatch(self, y_true, y_predicted):
assert len(y_true) == len(y_predicted)
for i in range(len(y_true)):
self.mat[y_true[i],y_predicted[i]] +=1
def __str__(self):
s = "\t"
for idx in range(self.n_classes): s += str(idx) + "\t"
s += "\n"
for i in range (len(self.mat)):
s += str(i) + "\t"
for j in range(len(self.mat[i])):
s += str(self.mat[i][j]) + "\t"
s += "\n"
return s
def getScore(self):
num_instances = np.sum(self.mat, axis=1)
predict = np.sum(self.mat, axis=0)
correct = np.diag(self.mat).flatten()
p = correct / predict * 100
r = correct / num_instances * 100
f = np.zeros (len(p))
for i in range (len(p)):
if (p[i]+ r[i] != 0):
f = 2 * p * r / (p+r)
else:
f = None
return np.matrix([p, r,f]).transpose()
if __name__ == "__main__":
cm= ConfusionMatrix(3)
cm.addBatch([1,2,1,0],[2,2,0,0])
print (cm.__str__())
print (cm.getScore())
|
[
"quynhdtn.hut@gmail.com"
] |
quynhdtn.hut@gmail.com
|
3c4a8df0cddd9fb678ac426f9645c2dd41ee0171
|
d7e642d34530db686f76e835c7594543adebfedc
|
/例子-1122-04.函数的参数传递.py
|
58bef1ce8a72d0c07e42a9c879d7e34f482b366c
|
[] |
no_license
|
blackplusy/python1119
|
929c61f0b896737d58b77eef50428002ec2f254f
|
546ef52f29a084fd6b1d3d8df78ff0a74a3dfd71
|
refs/heads/master
| 2020-04-07T06:42:56.901263
| 2019-02-26T02:17:54
| 2019-02-26T02:17:54
| 158,148,121
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
def animal(pet1,pet2):
print(pet1+' wang!'+pet2+' miao')
#调用函数传入两个参数
animal('dog','cat')
def animal(pet1,pet2):
print(pet1+' wang!'+pet2+' miao')
animal(pet2='cat',pet1='dog')
def animal(pet2,pet1='2ha'):
print(pet1+' wang!'+pet2+' miao')
animal('bosi')
animal('pig','out man')
print('************************************')
def test(x,y,*args):
print(x,y,args)
test(1,2,'heygor','simida')
print('************************************')
def test1(x,y,**args):
print(x,y,args)
test1(1,2,a=9,b='heygor',c=300)
|
[
"noreply@github.com"
] |
blackplusy.noreply@github.com
|
0d983221abcf4857628f47481dd34d54c9271ec3
|
2bc21de1b3204c677d2912f24a82ba473d90bcf1
|
/Comprehensions/09. Bunker.py
|
2748268e79e762e41f5d8ba64de754b3cb2beea7
|
[
"MIT"
] |
permissive
|
milenpenev/Python_Advanced
|
24260bbdf5b6682157acb2d73f6f77d1cdba97b1
|
2f32012dd682fa9541bbf5fa155f6bdbcfa946de
|
refs/heads/main
| 2023-05-30T16:45:09.599304
| 2021-06-27T16:00:35
| 2021-06-27T16:00:35
| 351,788,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 858
|
py
|
categories = input().split(", ")
n = int(input())
inventory = {category: [] for category in categories}
for _ in range(n):
category, item_name, quantity_quality = input().split(" - ")
quantity, quality = quantity_quality.split(";")
quantity, quality = quantity.split(":")[1], quality.split(":")[1]
quantity, quality = int(quantity), int(quality)
inventory[category].append({"name": item_name, "quantity": quantity, "quality": quality})
total_items = sum([item["quantity"] for items in inventory.values() for item in items])
avg_quality = sum([item["quality"] for items in inventory.values() for item in items])/ len(categories)
print(f"Count of items: {total_items}")
print(f"Average quality: {avg_quality:.2f}")
print('\n'.join(f'{category} -> {", ".join(item["name"] for item in inventory[category])}' for category in categories))
|
[
"milennpenev@gmail.com"
] |
milennpenev@gmail.com
|
969af7a8a9fb663b6055d1582ce6329f8b2e111a
|
48832d27da16256ee62c364add45f21b968ee669
|
/res/scripts/client/messenger/gui/scaleform/data/contacts_data_provider.py
|
60f6130d71aaf2ce5f7e3e1233986d5067020433
|
[] |
no_license
|
webiumsk/WOT-0.9.15.1
|
0752d5bbd7c6fafdd7f714af939ae7bcf654faf7
|
17ca3550fef25e430534d079876a14fbbcccb9b4
|
refs/heads/master
| 2021-01-20T18:24:10.349144
| 2016-08-04T18:08:34
| 2016-08-04T18:08:34
| 64,955,694
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 29,957
|
py
|
# 2016.08.04 19:54:02 Střední Evropa (letní čas)
# Embedded file name: scripts/client/messenger/gui/Scaleform/data/contacts_data_provider.py
import re
import Event
from gui.Scaleform.framework.entities.DAAPIDataProvider import DAAPIDataProvider
from gui.Scaleform.genConsts.CONTACTS_ALIASES import CONTACTS_ALIASES
from messenger.gui.Scaleform.data import contacts_vo_converter as _vo_converter
from messenger.m_constants import USER_TAG as _TAG
from messenger.m_constants import USER_ACTION_ID as _ACTION_ID
from messenger.proto.events import g_messengerEvents
from messenger.proto.shared_find_criteria import UserTagsFindCriteria
from messenger.storage import storage_getter
from account_helpers.settings_core.SettingsCore import g_settingsCore
class _Category(object):
__slots__ = ('_converter', '_visible', '_avoid', '_forced')
def __init__(self, categoryID, forced = False):
super(_Category, self).__init__()
self._converter = _vo_converter.CategoryConverter(categoryID)
self._visible = True
self._avoid = False
self._forced = forced
def clear(self, full = False):
self._avoid = False
def getID(self):
return self._converter.getCategoryID()
def isVisible(self):
return self._visible
def setVisible(self, value):
if value != self._visible:
self._visible = value
result = True
else:
result = False
return result
def isAvoid(self):
return self._avoid
def isEmpty(self):
raise NotImplementedError
def hasContacts(self):
raise NotImplementedError
def addContact(self, contact):
raise NotImplementedError
def updateContact(self, contact):
raise NotImplementedError
def removeContact(self, contact):
raise NotImplementedError
def getTags(self):
raise NotImplementedError
def toggleGroup(self, name):
raise NotImplementedError
def setGroupsMutable(self, value):
pass
def setOnlineMode(self, value):
pass
def changeGroups(self, include = None, exclude = None):
return False
def showEmptyItem(self, value):
pass
def getGroups(self, pattern = None):
raise NotImplementedError
def getData(self, pattern = None):
groups = self.getGroups(pattern)
if groups or self._forced and pattern is None:
data = self._converter.makeVO(groups)
self._avoid = False
else:
data = None
self._avoid = True
if data and len(data) == 1 and self.isEmpty():
data = []
return data
def getContactsDict(self):
return None
def setAction(self, actionID, contact):
return self.updateContact(contact)
def setStatus(self, contact):
return self.updateContact(contact)
class _FriendsCategory(_Category):
__slots__ = ('_woGroup', '_groups', '__currentParent', '__showEmptyItems')
def __init__(self):
super(_FriendsCategory, self).__init__(CONTACTS_ALIASES.GROUP_FRIENDS_CATEGORY_ID)
self.__currentParent = self._converter.makeBaseVO()
self._woGroup = _vo_converter.FriendsWoGroupConverter(self.__currentParent)
self._groups = _vo_converter.FriendsGroupsConverter(self.__currentParent)
self.__showEmptyItems = False
def clear(self, full = False):
self._woGroup.clear(full)
self._groups.clear(full)
super(_FriendsCategory, self).clear(full)
def getTags(self):
return {_TAG.FRIEND}
def isEmpty(self):
return self._woGroup.isEmpty() and self._groups.isEmpty()
def hasContacts(self):
return not self._woGroup.isEmpty() or self._groups.hasContacts()
def toggleGroup(self, name):
group = self._groups.getGroup(name)
if group:
group.toggle()
return group is not None
def setGroupsMutable(self, value):
self._converter.setMutable(value)
self._groups.setMutable(value)
def setOnlineMode(self, value):
if value:
clazz = _vo_converter.OnlineOnlyCondition
else:
clazz = _vo_converter.OnlineTotalCondition
self._groups.setConditionClass(clazz)
self._woGroup.setConditionClass(clazz)
def showEmptyItem(self, value):
self.__showEmptyItems = value
self._groups.showEmptyItem(value)
self._woGroup.showEmptyItem(value)
def changeGroups(self, include = None, exclude = None, isOpened = False):
if include:
self._groups.setGroups(include, isOpened)
if exclude:
self._groups.removeGroups(exclude)
return True
def addContact(self, contact):
if _TAG.FRIEND not in contact.getTags():
return False
if contact.getGroups():
self._groups.setContact(contact)
else:
self._woGroup.setContact(contact)
return True
def updateContact(self, contact):
dbID = contact.getID()
for converter in (self._groups, self._woGroup):
if converter.hasContact(dbID):
converter.setContact(contact)
return True
return False
def removeContact(self, contact):
dbID = contact.getID()
for converter in (self._groups, self._woGroup):
if converter.hasContact(dbID):
converter.removeContact(dbID)
return True
return False
def getGroups(self, pattern = None):
data = self._groups.makeVO(pattern)
hasWoContacts = self._woGroup.hasContacts()
if pattern is None and not data and not hasWoContacts:
if self.__showEmptyItems:
data.append(self._woGroup.makeEmptyRow(self.__currentParent))
data.append(self._woGroup.makeEmptyRow(self.__currentParent, False, False))
else:
woList = self._woGroup.makeVO(pattern)
if len(woList) > 0:
if not hasWoContacts:
lastElement = woList[0]
if lastElement['gui']['id'] is None:
lastElement['data']['isVisible'] = False
else:
woList.append(self._woGroup.makeEmptyRow(self.__currentParent, False, False))
elif data:
woList.append(self._woGroup.makeEmptyRow(self.__currentParent, False, True))
elif pattern is None:
woList.append(self._woGroup.makeEmptyRow(self.__currentParent, False, False))
data.extend(woList)
return data
def getContactsDict(self):
resultDict = self._woGroup.getContacts()
resultDict.update(self._groups.getContacts())
return resultDict
def setAction(self, actionID, contact):
result = False
checkIsEmptyNeeded = False
if actionID == _ACTION_ID.FRIEND_REMOVED:
result = self.removeContact(contact)
elif actionID == _ACTION_ID.IGNORED_ADDED:
result = self.removeContact(contact)
checkIsEmptyNeeded = True
elif actionID == _ACTION_ID.FRIEND_ADDED:
result = self.addContact(contact)
elif actionID == _ACTION_ID.GROUPS_CHANGED:
result = self.removeContact(contact)
if result:
result = self.addContact(contact)
elif actionID == _ACTION_ID.SUBSCRIPTION_CHANGED:
result = self.updateContact(contact)
if not result:
result = self.addContact(contact)
checkIsEmptyNeeded = True
elif actionID == _ACTION_ID.NOTE_CHANGED:
result = self.updateContact(contact)
if checkIsEmptyNeeded:
if not result and self.__showEmptyItems:
if self.isEmpty() or not self.hasContacts():
result = True
return result
class _FormationCategory(_Category):
__slots__ = ('_clan', '_club', '__parentItemData')
def __init__(self):
super(_FormationCategory, self).__init__(CONTACTS_ALIASES.GROUP_FORMATIONS_CATEGORY_ID)
self.__parentItemData = self._converter.makeBaseVO()
self._clan = _vo_converter.ClanConverter(self.__parentItemData, self.playerCtx.getClanAbbrev())
self._club = _vo_converter.ClubConverter(self.__parentItemData, self.playerCtx.getMyClubName())
@storage_getter('playerCtx')
def playerCtx(self):
return None
def clear(self, full = False):
self._clan.clear(full)
self._club.clear(full)
super(_FormationCategory, self).clear(full)
def getContactsDict(self):
return self._clan.getContacts()
def getTags(self):
return {_TAG.CLAN_MEMBER, _TAG.CLUB_MEMBER}
def isEmpty(self):
return self._clan.isEmpty() and self._club.isEmpty()
def hasContacts(self):
return self._clan.hasContacts() or self._club.hasContacts()
def toggleGroup(self, name):
result = False
if name == self._clan.getName():
self._clan.toggle()
result = True
elif name == self._club.getName():
self._club.toggle()
result = True
return result
def setOnlineMode(self, value):
if value:
clazz = _vo_converter.OnlineOnlyCondition
else:
clazz = _vo_converter.OnlineTotalCondition
self._clan.setConditionClass(clazz)
self._club.setConditionClass(clazz)
def updateClanAbbrev(self):
self._clan.setClanAbbrev(self.playerCtx.getClanAbbrev())
def updateClubName(self):
self._club.setClubName(self.playerCtx.getMyClubName())
def addContact(self, contact):
result = False
tags = contact.getTags()
if _TAG.CLAN_MEMBER in tags:
result = self._clan.setContact(contact)
if _TAG.CLUB_MEMBER in tags:
result |= self._club.setContact(contact)
return result
def updateContact(self, contact):
dbID = contact.getID()
result = False
for converter in self._getIterator():
if converter.hasContact(dbID):
result |= converter.setContact(contact)
return result
def removeContact(self, contact):
dbID = contact.getID()
result = False
for converter in self._getIterator():
if converter.hasContact(dbID):
result |= converter.removeContact(dbID)
return result
def getGroups(self, pattern = None):
data = []
for converter in self._getIterator():
if not converter.isEmpty():
vos = converter.makeVO(pattern)
if vos:
data.append(vos)
if data:
data.append(self._club.makeEmptyRow(self.__parentItemData, False, False))
return data
def _getIterator(self):
for converter in (self._clan, self._club):
yield converter
class _OthersCategory(_Category):
__slots__ = ('_ignored', '_pending', '_referrers', '_referrals')
def __init__(self):
super(_OthersCategory, self).__init__(CONTACTS_ALIASES.GROUP_OTHER_CATEGORY_ID)
self._ignored = _vo_converter.IgnoredConverter(self._converter.makeBaseVO())
self._pending = _vo_converter.RqFriendshipConverter(self._converter.makeBaseVO())
self._referrers = _vo_converter.ReferrersConverter(self._converter.makeBaseVO())
self._referrals = _vo_converter.ReferralsConverter(self._converter.makeBaseVO())
def clear(self, full = False):
for group in self._iterGroups():
group.clear(full)
super(_OthersCategory, self).clear(full)
def getTags(self):
return {_TAG.IGNORED,
_TAG.REFERRER,
_TAG.REFERRAL,
_TAG.SUB_PENDING_IN}
def isEmpty(self):
for group in self._iterGroups():
if not group.isEmpty():
return False
return True
def hasContacts(self):
pass
def toggleGroup(self, name):
for group in self._iterGroups():
if group.getName() == name:
group.toggle()
return True
return False
def addContact(self, contact):
result = False
tags = contact.getTags()
if _TAG.IGNORED in tags:
result = self._ignored.setContact(contact)
if not contact.isFriend() and _TAG.SUB_PENDING_IN in tags:
result = self._pending.setContact(contact)
if _TAG.REFERRER in tags:
result = self._referrers.setContact(contact)
if _TAG.REFERRAL in tags:
result = self._referrals.setContact(contact)
return result
def updateContact(self, contact):
result = False
for group in self._iterGroups():
if group.hasContact(contact.getID()):
group.setContact(contact)
result = True
return result
def removeContact(self, contact):
dbID = contact.getID()
result = False
for group in self._iterGroups():
if group.removeContact(dbID):
result = True
return result
def getGroups(self, pattern = None):
data = []
for group in self._iterGroups():
if not group.isEmpty():
vos = group.makeVO(pattern)
if vos:
data.append(vos)
return data
def setAction(self, actionID, contact):
dbID = contact.getID()
tags = contact.getTags()
result = False
if actionID == _ACTION_ID.IGNORED_ADDED:
result = self._ignored.setContact(contact)
elif actionID in (_ACTION_ID.IGNORED_REMOVED, _ACTION_ID.FRIEND_ADDED):
result = self._ignored.removeContact(dbID)
elif actionID == _ACTION_ID.SUBSCRIPTION_CHANGED:
if not contact.isFriend() and _TAG.SUB_PENDING_IN in contact.getTags():
result = self._pending.setContact(contact)
else:
result = self._pending.removeContact(dbID)
if _TAG.REFERRER in tags:
result = self._referrers.setContact(contact)
if _TAG.REFERRAL in tags:
result = self._referrals.setContact(contact)
if actionID == _ACTION_ID.NOTE_CHANGED:
result = self.updateContact(contact)
return result
def _iterGroups(self):
for group in (self._pending,
self._ignored,
self._referrers,
self._referrals):
yield group
class _ContactsCriteria(UserTagsFindCriteria):
def __init__(self, tags):
super(_ContactsCriteria, self).__init__(tags, None)
return
def filter(self, user):
result = False
if not user.isCurrentPlayer():
result = super(_ContactsCriteria, self).filter(user)
return result
class _ContactsCategories(object):
__slots__ = ('_categories', '_onlineMode', '_pattern', '_cache')
def __init__(self):
super(_ContactsCategories, self).__init__()
self._categories = [_FriendsCategory(), _FormationCategory()]
self._onlineMode = None
self._pattern = None
self._cache = []
return
def clear(self, full = False):
for category in self._categories:
category.clear(full)
self._pattern = None
return
def isEmpty(self):
for category in self._iterCategories():
if not category.isEmpty():
return False
return True
def setOnlineMode(self, mode):
for category in self._iterCategories():
category.setOnlineMode(mode)
def showOthersCategory(self, value):
isAlreadyHasOthersCategory = self.__hasOtherCategory()
if value:
if not isAlreadyHasOthersCategory:
self._categories.append(_OthersCategory())
elif isAlreadyHasOthersCategory:
self._categories.pop()
def setVisible(self, categoryID, value):
for category in self._categories:
if category.getID() == categoryID:
category.setVisible(value)
return True
return False
def setAction(self, actionID, contact):
result = False
for idx, category in enumerate(self._iterCategories()):
if idx >= len(self._cache):
self._cache.append(None)
if category.setAction(actionID, contact):
self._cache[idx] = category.getData(self._pattern)
result = True
data = filter(lambda item: bool(item), self._cache)
if len(data) == 1 and self.isEmpty():
data = []
return (result, data)
def setStatus(self, contact):
result = False
for idx, category in enumerate(self._iterCategories()):
if idx >= len(self._cache):
self._cache.append(None)
if category.setStatus(contact):
self._cache[idx] = category.getData(self._pattern)
result = True
data = filter(lambda item: bool(item), self._cache)
if len(data) == 1 and self.isEmpty():
data = []
return (result, data)
def getCriteria(self, full = False):
tags = set()
if full:
categories = self._categories
else:
categories = self._iterCategories()
for category in categories:
tags.update(category.getTags())
return _ContactsCriteria(tags)
def addContact(self, contact):
for category in self._iterCategories():
category.addContact(contact)
def getData(self):
self._cache = []
data = []
for category in self._iterCategories():
categoryData = category.getData(self._pattern)
self._cache.append(categoryData)
if categoryData:
data.append(categoryData)
if len(data) == 1 and self.isEmpty():
data = []
return data
def applySearchFilter(self, searchCriteria):
if searchCriteria:
self._pattern = re.compile(re.escape(searchCriteria), re.I)
else:
self._pattern = None
return self.getData()
def toggleGroup(self, categoryID, groupName):
result = False
for idx, category in enumerate(self._iterCategories()):
if idx >= len(self._cache):
self._cache.append(None)
if category.getID() != categoryID:
continue
if category.toggleGroup(groupName):
self._cache[idx] = category.getData(self._pattern)
result = True
break
data = filter(lambda item: bool(item), self._cache)
if len(data) == 1 and self.isEmpty():
data = []
return (result, data)
def changeGroups(self, categoryID, include = None, exclude = None, isOpened = False):
result = False
for idx, category in enumerate(self._iterCategories()):
if idx >= len(self._cache):
self._cache.append(None)
if category.getID() != categoryID:
continue
if category.changeGroups(include, exclude, isOpened):
self._cache[idx] = category.getData(self._pattern)
result = True
break
data = filter(lambda item: bool(item), self._cache)
if len(data) == 1 and self.isEmpty():
data = []
return (result, data)
def findCategory(self, categoryID):
idx = 0
for category in self._iterCategories():
if category.getID() == categoryID:
return (idx, category)
idx += 1
return (-1, None)
def getContactsList(self):
resultDict = {}
for category in self._iterCategories():
if category.isAvoid():
continue
resultDict.update(category.getContactsDict())
resultList = []
for contact in resultDict.itervalues():
if self._pattern is not None:
if self._pattern.match(contact['criteria'][1]) is not None:
resultList.append(contact['data'])
else:
resultList.append(contact['data'])
return resultList
def _iterCategories(self):
for category in self._categories:
if category.isVisible():
yield category
def __hasOtherCategory(self):
return len(self._categories) > 2
class _OpenedTreeCreator():
def __init__(self):
self.__openedTree = None
return
def build(self, targetList):
self.__openedTree = []
for iCategory in targetList:
self.__openTree(iCategory, None, True)
return self.__openedTree
def __openTree(self, targetTreeItem, parent, isOpened):
children = targetTreeItem.get('children', None)
if isOpened:
self.__openedTree.append(targetTreeItem)
if children is not None:
for child in children:
self.__openTree(child, targetTreeItem, targetTreeItem.get('isOpened', False))
return
class ContactsDataProvider(DAAPIDataProvider):
def __init__(self):
super(ContactsDataProvider, self).__init__()
self.__categories = _ContactsCategories()
self.__showEmptyGroups = True
self.__isEmpty = True
self.__list = []
self.onTotalStatusChanged = Event.Event()
g_settingsCore.onSettingsChanged += self.__onSettingsChanged
@storage_getter('users')
def usersStorage(self):
return None
def refresh(self):
super(ContactsDataProvider, self).refresh()
@property
def collection(self):
return self.__list
def buildList(self):
self.__categories.clear()
self.__list = []
self.__setEmpty()
if self.__isEmpty:
return
contacts = self.usersStorage.getList(self.__categories.getCriteria())
if self.__showEmptyGroups:
_, friendsCategory = self.__categories.findCategory(CONTACTS_ALIASES.GROUP_FRIENDS_CATEGORY_ID)
if friendsCategory:
friendsCategory.showEmptyItem(True)
friendsCategory.changeGroups(self.usersStorage.getEmptyGroups())
for contact in contacts:
if _TAG.CACHED not in contact.getTags():
self.__categories.addContact(contact)
self.__updateCollection(self.__categories.getData())
def emptyItem(self):
return None
def pyRequestItemRange(self, startIndex, endIndex):
item_range = super(ContactsDataProvider, self).pyRequestItemRange(startIndex, endIndex)
return item_range
def getContactsList(self):
return self.__categories.getContactsList()
def isEmpty(self):
return self.__isEmpty
def hasDisplayingContacts(self):
for cName in (CONTACTS_ALIASES.GROUP_FRIENDS_CATEGORY_ID, CONTACTS_ALIASES.GROUP_FORMATIONS_CATEGORY_ID, CONTACTS_ALIASES.GROUP_OTHER_CATEGORY_ID):
_, category = self.__categories.findCategory(cName)
if category and category.hasContacts():
return True
return False
def setShowEmptyGroups(self, value):
self.__showEmptyGroups = value
_, friendsCategory = self.__categories.findCategory(CONTACTS_ALIASES.GROUP_FRIENDS_CATEGORY_ID)
if friendsCategory:
friendsCategory.showEmptyItem(self.__showEmptyGroups)
def setOnlineMode(self, value):
self.__categories.setOnlineMode(value)
def setFriendsVisible(self, value):
self.__categories.setVisible(CONTACTS_ALIASES.GROUP_FRIENDS_CATEGORY_ID, value)
def setFormationsVisible(self, value):
self.__categories.setVisible(CONTACTS_ALIASES.GROUP_FORMATIONS_CATEGORY_ID, value)
def setOthersVisible(self, value):
if value is None:
self.__categories.showOthersCategory(False)
else:
self.__categories.showOthersCategory(True)
self.__categories.setVisible(CONTACTS_ALIASES.GROUP_OTHER_CATEGORY_ID, value)
return
def setFriendsGroupMutable(self, value):
_, category = self.__categories.findCategory(CONTACTS_ALIASES.GROUP_FRIENDS_CATEGORY_ID)
if category:
category.setGroupsMutable(value)
def setSearchFilter(self, searchCriteria):
if not self.__isEmpty:
self.__updateCollection(self.__categories.applySearchFilter(searchCriteria))
return True
else:
return False
def toggleGroup(self, categoryID, groupName):
result, data = self.__categories.toggleGroup(categoryID, groupName)
if result:
self.__updateCollection(data)
self.refresh()
def addContactsListeners(self):
events = g_messengerEvents.users
events.onUsersListReceived += self.__me_onUsersListReceived
events.onUserActionReceived += self.__me_onUserActionReceived
events.onUserStatusUpdated += self.__me_onUserStatusUpdated
events.onClanMembersListChanged += self.__me_onClanMembersListChanged
events.onFriendshipRequestsAdded += self.__me_onFriendshipRequestsAdded
events.onFriendshipRequestsUpdated += self.__me_onFriendshipRequestsUpdated
events.onEmptyGroupsChanged += self.__me_onEmptyGroupsChanged
events.onNotesListReceived += self.__me_onNotesListReceived
def removeContactsListeners(self):
events = g_messengerEvents.users
events.onUsersListReceived -= self.__me_onUsersListReceived
events.onUserActionReceived -= self.__me_onUserActionReceived
events.onUserStatusUpdated -= self.__me_onUserStatusUpdated
events.onClanMembersListChanged -= self.__me_onClanMembersListChanged
events.onFriendshipRequestsAdded -= self.__me_onFriendshipRequestsAdded
events.onFriendshipRequestsUpdated -= self.__me_onFriendshipRequestsUpdated
events.onEmptyGroupsChanged -= self.__me_onEmptyGroupsChanged
events.onNotesListReceived -= self.__me_onNotesListReceived
def _dispose(self):
self.__categories.clear(True)
g_settingsCore.onSettingsChanged -= self.__onSettingsChanged
super(ContactsDataProvider, self)._dispose()
def __setEmpty(self):
groups = self.usersStorage.getEmptyGroups()
if groups and self.__showEmptyGroups:
isEmpty = False
else:
isEmpty = not self.usersStorage.getCount(self.__categories.getCriteria(True))
if self.__isEmpty != isEmpty:
self.__isEmpty = isEmpty
return True
else:
return False
def __updateContacts(self, actionID, contacts):
setAction = self.__categories.setAction
result, data = False, []
for contact in contacts:
updated, data = setAction(actionID, contact)
result |= updated
if result:
self.__updateCollection(data)
self.refresh()
self.__setEmpty()
self.onTotalStatusChanged()
def __updateCollection(self, targetList):
self.__list = _OpenedTreeCreator().build(targetList)
def __onSettingsChanged(self, diff):
if 'isColorBlind' in diff:
self.buildList()
self.refresh()
def __me_onUsersListReceived(self, tags):
if _TAG.CACHED not in tags:
if _TAG.CLUB_MEMBER in tags:
_, category = self.__categories.findCategory(CONTACTS_ALIASES.GROUP_FORMATIONS_CATEGORY_ID)
if category:
category.updateClubName()
self.buildList()
self.refresh()
self.onTotalStatusChanged()
def __me_onUserActionReceived(self, actionID, contact):
result, data = self.__categories.setAction(actionID, contact)
if result:
self.__updateCollection(data)
self.refresh()
self.__setEmpty()
self.onTotalStatusChanged()
def __me_onUserStatusUpdated(self, contact):
isEmpty = len(self.__list) > 0
result, data = self.__categories.setStatus(contact)
if result:
self.__updateCollection(data)
self.refresh()
if isEmpty != (len(self.__list) > 0):
self.onTotalStatusChanged()
def __me_onClanMembersListChanged(self):
_, category = self.__categories.findCategory(CONTACTS_ALIASES.GROUP_FORMATIONS_CATEGORY_ID)
if category:
category.updateClanAbbrev()
self.buildList()
self.refresh()
self.onTotalStatusChanged()
def __me_onFriendshipRequestsAdded(self, contacts):
self.__updateContacts(_ACTION_ID.SUBSCRIPTION_CHANGED, contacts)
def __me_onFriendshipRequestsUpdated(self, contacts):
self.__updateContacts(_ACTION_ID.SUBSCRIPTION_CHANGED, contacts)
def __me_onEmptyGroupsChanged(self, include, exclude):
if not self.__showEmptyGroups:
return
result, data = self.__categories.changeGroups(CONTACTS_ALIASES.GROUP_FRIENDS_CATEGORY_ID, include, exclude, True)
if result:
isEmpty = len(self.__list) > 0
self.__updateCollection(data)
self.refresh()
if isEmpty != (len(self.__list) > 0):
self.__setEmpty()
self.onTotalStatusChanged()
def __me_onNotesListReceived(self):
self.buildList()
self.refresh()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\messenger\gui\scaleform\data\contacts_data_provider.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:54:03 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
3c1d61bdcad5d0817c3e678f870318b7bb51cbd1
|
13d2fe21726a2a528b6eb165e6a5ebe13f08b9f2
|
/sqltext/lianxi17.py
|
2676fe262b34e540e075a06562a572641944c69e
|
[] |
no_license
|
123456thomas/python_learn
|
aa49e898b9ede549a3e1c376eae10f0f1c09ca7d
|
9891d2e988ebf8896360f60a24d61430e538bf3e
|
refs/heads/master
| 2020-04-16T10:01:55.245420
| 2019-01-13T10:30:13
| 2019-01-13T10:35:11
| 165,487,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
import pymysql
con=pymysql.connect(host="localhost", user="root", password="361365",
database="mytest", port=3306)
cur=con.cursor()
# cur.execute("create table test1(Id int primary key,Name varchar(20) not null)")
cur.execute("create table test2(id int primary key,name varchar(20) not null,userid int, foreign key(userid) references test1(Id))")
|
[
"17625809083@sina.cn"
] |
17625809083@sina.cn
|
a1d7d3f60a17d5091571fd8669f336b136cab517
|
f6d7c30a7ed343e5fe4859ceaae1cc1965d904b7
|
/htdocs/submissions/a1d7d3f60a17d5091571fd8669f336b136cab517.py
|
aea45ae8c7ebf0de25e7c0894f3e9f43a33fa3e3
|
[] |
no_license
|
pycontest/pycontest.github.io
|
ed365ebafc5be5d610ff9d97001240289de697ad
|
606015cad16170014c41e335b1f69dc86250fb24
|
refs/heads/master
| 2021-01-10T04:47:46.713713
| 2016-02-01T11:03:46
| 2016-02-01T11:03:46
| 50,828,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
d={'0':' ','1':' _ ','2':' |','3':' _|','4':'|_ ','5':'|_|','6':'| |'};k='101101111162335442555243235253'
def f(x,y):
i=int(y);x[0]+=d[k[i]];x[1]+=d[k[i+10]];x[2]+=d[k[i+20]];return x
def seven_seg(x):
return reduce(lambda x,y:x+y+'\n',reduce(f,x,['','','']),'')
|
[
"info@pycontest.net"
] |
info@pycontest.net
|
d370b6da42bc60d3d21c9bdde90a9441a4a77354
|
c33496682b760deac61fedecba3e82ce4e41dfde
|
/scripts/e259.py
|
d66c4c06b2e02569f32f6d438c8330a5424b6a19
|
[
"MIT"
] |
permissive
|
ferasalsaab/neuralnilm_prototype
|
c5e9cde02d475ac499b15fea62143e76adff07d0
|
2119292e7d5c8a137797ad3c9abf9f37e7f749af
|
refs/heads/master
| 2020-04-16T14:38:03.615279
| 2018-01-29T15:30:43
| 2018-01-29T15:30:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,253
|
py
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from lasagne.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
from copy import deepcopy
from math import sqrt
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e233
based on e131c but with:
* lag=32
* pool
e234
* init final layer and conv layer
235
no lag
236
should be exactly as 131c: no pool, no lag, no init for final and conv layer
237
putting the pool back
238
seems pooling hurts us! disable pooling.
enable lag = 32
239
BLSTM
lag = 20
240
LSTM not BLSTM
various lags
241
output is prediction
252
attempt to predict fdiff 1 sample ahead. Unfair?
253
regurgitate fdiff from 1 sample ago
254
lag of 10 time steps
255
lag of 5 time steps
257
slowly increasing lag
258
output is different appliances diff
259
start off just trying to regurgitate diff of aggregate
then swap to disaggregation (to diff)
"""
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[2500]*5,
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.0,
n_seq_per_batch=50,
# subsample_target=5,
include_diff=True,
include_power=False,
clip_appliance_power=True,
target_is_prediction=True,
lag=1,
target_is_diff=True
)
def change_learning_rate(net, epoch, learning_rate):
net.updates = partial(nesterov_momentum, learning_rate=learning_rate)
net.compile()
def change_lag(net, epoch, lag):
net.source.lag = lag
net.compile()
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
def mask_and_mean_sq_error(mask):
masked_sq_error = sq_error[mask.nonzero()]
mean = masked_sq_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
def new_source(net, epoch):
source_dict_copy = deepcopy(source_dict)
source_dict_copy['target_is_prediction'] = False
net.source = RealApplianceSource(**source_dict_copy)
net.generate_validation_data_and_set_shapes()
net.loss_function = scaled_cost
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=mse,
updates=partial(nesterov_momentum, learning_rate=0.1),
layers_config=[
{
'type': LSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1.)
}
],
layer_changes={
1001: {
'remove_from': -3,
'callback': new_source,
'new_layers': [
{
'type': DenseLayer,
'num_units': 5,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(50)))
}
]
}
}
)
def exp_x(name):
global source
# source = RealApplianceSource(**source_dict)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=(1/sqrt(50)))
}
)
net = Net(**net_dict_copy)
return net
def main():
experiment = 'a'
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
print("***********************************")
print("Preparing", full_exp_name, "...")
try:
net = exp_x(full_exp_name)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
return
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
print("EXCEPTION:", exception)
if __name__ == "__main__":
main()
|
[
"jack-list@xlk.org.uk"
] |
jack-list@xlk.org.uk
|
31bc673f6d080c081b787817a51e382be8d91600
|
8e69eee9b474587925e22413717eb82e4b024360
|
/v2.5.7/otp/chat/TalkHandle.py
|
d5919401c995d3c50e5367a9d6249955c0aa44df
|
[
"MIT"
] |
permissive
|
TTOFFLINE-LEAK/ttoffline
|
afaef613c36dc3b70514ccee7030ba73c3b5045b
|
bb0e91704a755d34983e94288d50288e46b68380
|
refs/heads/master
| 2020-06-12T15:41:59.411795
| 2020-04-17T08:22:55
| 2020-04-17T08:22:55
| 194,348,185
| 5
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,749
|
py
|
from otp.avatar.AvatarHandle import AvatarHandle
class TalkHandle(AvatarHandle):
def __init__(self, doId, message):
self.avatarId = doId
self.avatarName = None
self.accountId = None
self.accountName = None
self.addMessageInfo(message)
return
def getName(self):
return self.avatarName
def isUnderstandable(self):
return False
def isOnline(self):
return False
def addMessageInfo(self, message):
if self.avatarId == message.getSenderAvatarId():
if not self.avatarName and message.getSenderAvatarName():
self.avatarName = message.getSenderAvatarName()
if not self.accountId and message.getSenderAccountId():
self.accountId = message.getSenderAccountId()
if not self.accountName and message.getSenderAccountName():
self.accountName = message.getSenderAccountName()
else:
if self.avatarId == message.getReceiverAvatarId():
if not self.avatarName and message.getReceiverAvatarName():
self.avatarName = message.getReceiverAvatarName()
if not self.accountId and message.getReceiverAccountId():
self.accountId = message.getReceiverAccountId()
if not self.accountName and message.getReceiverAccountName():
self.accountName = message.getReceiverAccountName()
def setTalkWhisper(self, fromAV, fromAC, avatarName, chat, mods, flags):
newText, scrubbed = localAvatar.scrubTalk(chat, mods)
base.talkAssistant.receiveWhisperTalk(fromAV, avatarName, fromAC, None, self.avatarId, self.getName(), newText, scrubbed)
return
|
[
"s0mberdemise@protonmail.com"
] |
s0mberdemise@protonmail.com
|
f4a0ac7b71c7b755a827b97478b5f834db35f4d6
|
39e647e9ec8524a1cee90ef15f37a3d3bbf8ac43
|
/poet/trunk/pythonLibs/Django-1.3/tests/regressiontests/file_uploads/views.py
|
f1cd8a65ea56a2d24fc625d24925f4d5b6483f61
|
[
"BSD-3-Clause"
] |
permissive
|
AgileAdaptiveTools/POETTools
|
85158f043e73b430c1d19a172b75e028a15c2018
|
60244865dd850a3e7346f9c6c3daf74ea1b02448
|
refs/heads/master
| 2021-01-18T14:46:08.025574
| 2013-01-28T19:18:11
| 2013-01-28T19:18:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,999
|
py
|
import os
from django.core.files.uploadedfile import UploadedFile
from django.http import HttpResponse, HttpResponseServerError
from django.utils import simplejson
from models import FileModel, UPLOAD_TO
from uploadhandler import QuotaUploadHandler, ErroringUploadHandler
from django.utils.hashcompat import sha_constructor
from tests import UNICODE_FILENAME
def file_upload_view(request):
"""
Check that a file upload can be updated into the POST dictionary without
going pear-shaped.
"""
form_data = request.POST.copy()
form_data.update(request.FILES)
if isinstance(form_data.get('file_field'), UploadedFile) and isinstance(form_data['name'], unicode):
# If a file is posted, the dummy client should only post the file name,
# not the full path.
if os.path.dirname(form_data['file_field'].name) != '':
return HttpResponseServerError()
return HttpResponse('')
else:
return HttpResponseServerError()
def file_upload_view_verify(request):
"""
Use the sha digest hash to verify the uploaded contents.
"""
form_data = request.POST.copy()
form_data.update(request.FILES)
for key, value in form_data.items():
if key.endswith('_hash'):
continue
if key + '_hash' not in form_data:
continue
submitted_hash = form_data[key + '_hash']
if isinstance(value, UploadedFile):
new_hash = sha_constructor(value.read()).hexdigest()
else:
new_hash = sha_constructor(value).hexdigest()
if new_hash != submitted_hash:
return HttpResponseServerError()
# Adding large file to the database should succeed
largefile = request.FILES['file_field2']
obj = FileModel()
obj.testfile.save(largefile.name, largefile)
return HttpResponse('')
def file_upload_unicode_name(request):
# Check to see if unicode name came through properly.
if not request.FILES['file_unicode'].name.endswith(UNICODE_FILENAME):
return HttpResponseServerError()
response = None
# Check to make sure the exotic characters are preserved even
# through file save.
uni_named_file = request.FILES['file_unicode']
obj = FileModel.objects.create(testfile=uni_named_file)
full_name = u'%s/%s' % (UPLOAD_TO, uni_named_file.name)
if not os.path.exists(full_name):
response = HttpResponseServerError()
# Cleanup the object with its exotic file name immediately.
# (shutil.rmtree used elsewhere in the tests to clean up the
# upload directory has been seen to choke on unicode
# filenames on Windows.)
obj.delete()
os.unlink(full_name)
if response:
return response
else:
return HttpResponse('')
def file_upload_echo(request):
"""
Simple view to echo back info about uploaded files for tests.
"""
r = dict([(k, f.name) for k, f in request.FILES.items()])
return HttpResponse(simplejson.dumps(r))
def file_upload_quota(request):
"""
Dynamically add in an upload handler.
"""
request.upload_handlers.insert(0, QuotaUploadHandler())
return file_upload_echo(request)
def file_upload_quota_broken(request):
"""
You can't change handlers after reading FILES; this view shouldn't work.
"""
response = file_upload_echo(request)
request.upload_handlers.insert(0, QuotaUploadHandler())
return response
def file_upload_getlist_count(request):
"""
Check the .getlist() function to ensure we receive the correct number of files.
"""
file_counts = {}
for key in request.FILES.keys():
file_counts[key] = len(request.FILES.getlist(key))
return HttpResponse(simplejson.dumps(file_counts))
def file_upload_errors(request):
request.upload_handlers.insert(0, ErroringUploadHandler())
return file_upload_echo(request)
|
[
"ssaltzman@mitre.org"
] |
ssaltzman@mitre.org
|
747d2fc906faba84e61634e63935bb43a63df5db
|
b769c29a49f73b22ed1998de893aacad516025f6
|
/moziobackend/moziobackend/urls.py
|
d47184e7f4da2ab5011bd1290f8bdea09a7ebbdf
|
[] |
no_license
|
humitos/mozio-backend
|
6925dffcedd301faab5375f649673cf2bfec1ddc
|
6990282694e0081fd6606c1781906499f946758a
|
refs/heads/master
| 2021-01-10T10:06:50.984884
| 2016-02-06T21:04:29
| 2016-02-06T21:04:29
| 51,188,225
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 829
|
py
|
"""moziobackend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'api/v1/', include('serviceareas.urls')),
]
|
[
"humitos@gmail.com"
] |
humitos@gmail.com
|
fdf3461a668928331ac2e97d93cf0b12b6516007
|
11f29fef10e684553a452eb40d4b3daa696b87fc
|
/Exercicios/III/questao02.py
|
2b93b4be707623021e68aa37a4e2bff58a932a4a
|
[] |
no_license
|
asferreir/PythonParaZumbis
|
49032f999e054513409d649c9ac0b45a05af5c5d
|
fc59061dd5c64c2c7f95adf2b0d76b5af329fbb2
|
refs/heads/master
| 2020-07-18T05:18:38.253478
| 2019-09-04T14:03:51
| 2019-09-04T14:03:51
| 206,184,479
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
"""
Faça um programa que leia um nome de usuário e a sua senha e não aceite a senha igual ao nome do usuário,
mostrando uma mensagem de erro e voltando a pedir as informações.
"""
usuario = input("Informe o usuario: ")
senha = input("Informe a senha: ")
while usuario == senha:
print("Usuario e senha devem ser diferentes!")
usuario = input("Informe o usuario: ")
senha = input("Informe a senha: ")
|
[
"havour@gmail.com"
] |
havour@gmail.com
|
bcd138a01994e88753339605bd120c6d93ee7a7a
|
71e11e641824e84722c73be5e081853948a6a27d
|
/users/manage.py
|
2f5b5d3380c9c5583dc8d8c08b4d7a90d1cb5f0d
|
[] |
no_license
|
HackTzi/hacktzi-store-microservices
|
578208c86e5a80a1d3bfee23fc230e682711cae7
|
e98c05f46752b146160e49e391b4eb2bfa34ff3e
|
refs/heads/main
| 2023-04-01T14:48:36.701073
| 2021-04-15T19:44:35
| 2021-04-15T19:44:35
| 330,806,999
| 1
| 1
| null | 2021-04-15T19:44:36
| 2021-01-18T23:00:36
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 674
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'platzi_store_users.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"royjuni3431@gmail.com"
] |
royjuni3431@gmail.com
|
639dea54803f71834f720dc313e91386be35dbff
|
ef66e4d2f0ff31298c9ab93aa2c268ecf89311a6
|
/src/regex/accounts/models.py
|
9f14078df236f84e15839098be9357b07969aecc
|
[] |
no_license
|
Clarity-89/regex-it
|
dd8da6fe39374e1722d32943e4545d0ab95f31b6
|
94485198430bffc6aa14e4ed75dbfddb1c9efef9
|
refs/heads/master
| 2020-11-25T01:45:14.365413
| 2019-11-17T14:43:15
| 2019-11-17T14:43:15
| 228,435,557
| 0
| 0
| null | 2019-12-16T17:06:37
| 2019-12-16T17:06:36
| null |
UTF-8
|
Python
| false
| false
| 2,399
|
py
|
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, email, password, is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
email = self.normalize_email(email)
user = self.model(email=email, is_staff=is_staff, is_active=True,
is_superuser=is_superuser, date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email=None, password=None, **extra_fields):
return self._create_user(email, password, False, False,
**extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, True,
**extra_fields)
class User(AbstractBaseUser, PermissionsMixin):
first_name = models.CharField(_('first name'), max_length=150, blank=True)
last_name = models.CharField(_('last name'), max_length=150, blank=True)
email = models.EmailField(_('email address'), unique=True)
is_staff = models.BooleanField(
_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin site.')
)
is_active = models.BooleanField(
_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.')
)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
|
[
"sergei@maykinmedia.nl"
] |
sergei@maykinmedia.nl
|
da40c898f3633e1f5c71c0b5368a096825cc4766
|
dc980bbd2bd6078f1e49004afcc710a89ed12565
|
/test/functional/rpc_createmultisig.py
|
673cce4ed55b3bc456e58c27cfee063395103752
|
[
"MIT"
] |
permissive
|
frankjardel/isocoin
|
c21408225399b33f941c303d0e66e0db264a03c2
|
36e3ce6d64839a37c45b6e17aedfb2238c3a5257
|
refs/heads/master
| 2020-03-28T10:11:59.484529
| 2019-07-17T17:06:11
| 2019-07-17T17:06:11
| 148,090,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,736
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction* RPCs."""
from test_framework.test_framework import IsocoinTestFramework
import decimal
class RpcCreateMultiSigTest(IsocoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def get_keys(self):
node0, node1, node2 = self.nodes
self.add = [node1.getnewaddress() for _ in range(self.nkeys)]
self.pub = [node1.getaddressinfo(a)["pubkey"] for a in self.add]
self.priv = [node1.dumpprivkey(a) for a in self.add]
self.final = node2.getnewaddress()
def run_test(self):
node0,node1,node2 = self.nodes
# 50 BTC each, rest will be 25 BTC each
node0.generate(149)
self.sync_all()
self.moved = 0
for self.nkeys in [3,5]:
for self.nsigs in [2,3]:
for self.output_type in ["bech32", "p2sh-segwit", "legacy"]:
self.get_keys()
self.do_multisig()
self.checkbalances()
def checkbalances(self):
node0,node1,node2 = self.nodes
node0.generate(100)
self.sync_all()
bal0 = node0.getbalance()
bal1 = node1.getbalance()
bal2 = node2.getbalance()
height = node0.getblockchaininfo()["blocks"]
assert 150 < height < 350
total = 149*50 + (height-149-100)*25
assert bal1 == 0
assert bal2 == self.moved
assert bal0+bal1+bal2 == total
def do_multisig(self):
node0,node1,node2 = self.nodes
msig = node2.createmultisig(self.nsigs, self.pub, self.output_type)
madd = msig["address"]
mredeem = msig["redeemScript"]
if self.output_type == 'bech32':
assert madd[0:4] == "bcrt" # actually a bech32 address
# compare against addmultisigaddress
msigw = node1.addmultisigaddress(self.nsigs, self.pub, None, self.output_type)
maddw = msigw["address"]
mredeemw = msigw["redeemScript"]
# addmultisigiaddress and createmultisig work the same
assert maddw == madd
assert mredeemw == mredeem
txid = node0.sendtoaddress(madd, 40)
tx = node0.getrawtransaction(txid, True)
vout = [v["n"] for v in tx["vout"] if madd in v["scriptPubKey"].get("addresses",[])]
assert len(vout) == 1
vout = vout[0]
scriptPubKey = tx["vout"][vout]["scriptPubKey"]["hex"]
value = tx["vout"][vout]["value"]
prevtxs = [{"txid": txid, "vout": vout, "scriptPubKey": scriptPubKey, "redeemScript": mredeem, "amount": value}]
node0.generate(1)
outval = value - decimal.Decimal("0.00001000")
rawtx = node2.createrawtransaction([{"txid": txid, "vout": vout}], [{self.final: outval}])
rawtx2 = node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs-1], prevtxs)
rawtx3 = node2.signrawtransactionwithkey(rawtx2["hex"], [self.priv[-1]], prevtxs)
self.moved += outval
tx = node0.sendrawtransaction(rawtx3["hex"], True)
blk = node0.generate(1)[0]
assert tx in node0.getblock(blk)["tx"]
txinfo = node0.getrawtransaction(tx, True, blk)
self.log.info("n/m=%d/%d %s size=%d vsize=%d weight=%d" % (self.nsigs, self.nkeys, self.output_type, txinfo["size"], txinfo["vsize"], txinfo["weight"]))
if __name__ == '__main__':
RpcCreateMultiSigTest().main()
|
[
"jardelfrank42@gmail.com"
] |
jardelfrank42@gmail.com
|
4c85376e7b44cc8dd485f66e1eeb81f4d0bc174a
|
3831421b5f4f294bf8f4089b1f617cfc82c2351a
|
/MyInte/SCRIPTS/assit/chwich.py
|
c90f41f1e1228d8bc2129ef4c70da712a840b91d
|
[] |
no_license
|
jesuel51/MyInte
|
6ce31b813c51e30780115f1a5efcafd8d264ae43
|
817a6df61cb77dedf0e4a586bd09906a4b175e96
|
refs/heads/master
| 2020-05-31T01:46:35.983688
| 2019-06-03T18:17:34
| 2019-06-03T18:17:34
| 190,056,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 791
|
py
|
# as we know, the Te in the core region is not well calculated , so we main use the profile shape of ion channel to replace the electron channel .
nj=root['SETTINGS']['PHYSICS']['nj']
if root['SETTINGS']['PHYSICS']['chwich'][0]==1:
pvt_i=root['SETTINGS']['PHYSICS']['chwich'][1]
# num=int(pvt_i/0.02)+1
num=int(pvt_i*(nj-1))+1
diff_Tie=root['INPUTS']['ONETWOInput']['inone_pre']['namelis1']['tiin'][num]-root['INPUTS']['ONETWOInput']['inone_pre']['namelis1']['tein'][num]
root['INPUTS']['ONETWOInput']['inone_pre']['namelis1']['tein'][0:num]=root['INPUTS']['ONETWOInput']['inone_pre']['namelis1']['tiin'][0:num]-diff_Tie
root['INPUTS']['ONETWOInput']['inone_ss']['namelis1']['tein'][0:num]=root['INPUTS']['ONETWOInput']['inone_ss']['namelis1']['tiin'][0:num]-diff_Tie
|
[
"1018910084@qq.com"
] |
1018910084@qq.com
|
46d1baf103a1f1cb70c11ac06a2cb4d0b721acf4
|
7fa64ea3a31ea5c4ed2fc734cd45e4733e782673
|
/tensorflow/python/data/kernel_tests/interleave_dataset_op_test.py
|
f0b16591f73478f8402668e5e9286799d981ea7b
|
[
"Apache-2.0"
] |
permissive
|
TimoFu/tensorflow
|
cf9718167d6c1dfa6ac2f5ad97cae144e4bb5cc3
|
6b378af9422c203b2d99cd21031ccf50a30958a0
|
refs/heads/master
| 2020-04-08T09:46:37.399762
| 2018-11-26T22:04:34
| 2018-11-26T22:04:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,711
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
def _interleave(lists, cycle_length, block_length):
"""Reference implementation of interleave used for testing.
Args:
lists: a list of lists to interleave
cycle_length: the length of the interleave cycle
block_length: the length of the interleave block
Yields:
Elements of `lists` interleaved in the order determined by `cycle_length`
and `block_length`.
"""
num_open = 0
# `all_iterators` acts as a queue of iterators over each element of `lists`.
all_iterators = [iter(l) for l in lists]
# `open_iterators` are the iterators whose elements are currently being
# interleaved.
open_iterators = []
for i in range(cycle_length):
if all_iterators:
open_iterators.append(all_iterators.pop(0))
num_open += 1
else:
open_iterators.append(None)
while num_open or all_iterators:
for i in range(cycle_length):
if open_iterators[i] is None:
if all_iterators:
open_iterators[i] = all_iterators.pop(0)
num_open += 1
else:
continue
for _ in range(block_length):
try:
yield next(open_iterators[i])
except StopIteration:
open_iterators[i] = None
num_open -= 1
break
def _make_coordinated_sloppy_dataset(input_values, cycle_length, block_length,
num_parallel_calls):
"""Produces a dataset iterator and events to control the order of elements.
Args:
input_values: the values to generate lists to interleave from
cycle_length: the length of the interleave cycle
block_length: the length of the interleave block
num_parallel_calls: the degree of interleave parallelism
Returns:
A dataset iterator (represented as `get_next` op) and events that can be
used to control the order of output elements.
"""
# Set up threading events used to sequence when items are produced that
# are subsequently interleaved. These events allow us to deterministically
# simulate slowdowns and force sloppiness.
coordination_events = {i: threading.Event() for i in input_values}
def map_py_fn(x):
coordination_events[x].wait()
coordination_events[x].clear()
return x * x
def map_fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(x)
return dataset.map(map_fn)
options = dataset_ops.Options()
options.experimental_deterministic = False
dataset = dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
2).interleave(interleave_fn, cycle_length, block_length,
num_parallel_calls).with_options(options)
return dataset, coordination_events
def _repeat(values, count):
"""Produces a list of lists suitable for testing interleave.
Args:
values: for each element `x` the result contains `[x] * x`
count: determines how many times to repeat `[x] * x` in the result
Returns:
A list of lists of values suitable for testing interleave.
"""
return [[value] * value for value in np.tile(values, count)]
@test_util.run_all_in_graph_and_eager_modes
class InterleaveDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(
("1", [4, 5, 6], 1, 1, [
4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 4, 4, 4, 4, 5, 5, 5, 5,
5, 6, 6, 6, 6, 6, 6
]),
("2", [4, 5, 6], 2, 1, [
4, 5, 4, 5, 4, 5, 4, 5, 5, 6, 6, 4, 6, 4, 6, 4, 6, 4, 6, 5, 6, 5, 6,
5, 6, 5, 6, 5, 6, 6
]),
("3", [4, 5, 6], 2, 3, [
4, 4, 4, 5, 5, 5, 4, 5, 5, 6, 6, 6, 4, 4, 4, 6, 6, 6, 4, 5, 5, 5, 6,
6, 6, 5, 5, 6, 6, 6
]),
("4", [4, 5, 6], 7, 2, [
4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6,
6, 5, 6, 6, 5, 6, 6
]),
("5", [4, 0, 6], 2, 1,
[4, 4, 6, 4, 6, 4, 6, 6, 4, 6, 4, 6, 4, 4, 6, 6, 6, 6, 6, 6]),
)
def testPythonImplementation(self, input_values, cycle_length, block_length,
expected_elements):
input_lists = _repeat(input_values, 2)
for expected, produced in zip(
expected_elements, _interleave(input_lists, cycle_length,
block_length)):
self.assertEqual(expected, produced)
@parameterized.named_parameters(
("1", np.int64([4, 5, 6]), 1, 3, None),
("2", np.int64([4, 5, 6]), 1, 3, 1),
("3", np.int64([4, 5, 6]), 2, 1, None),
("4", np.int64([4, 5, 6]), 2, 1, 1),
("5", np.int64([4, 5, 6]), 2, 1, 2),
("6", np.int64([4, 5, 6]), 2, 3, None),
("7", np.int64([4, 5, 6]), 2, 3, 1),
("8", np.int64([4, 5, 6]), 2, 3, 2),
("9", np.int64([4, 5, 6]), 7, 2, None),
("10", np.int64([4, 5, 6]), 7, 2, 1),
("11", np.int64([4, 5, 6]), 7, 2, 3),
("12", np.int64([4, 5, 6]), 7, 2, 5),
("13", np.int64([4, 5, 6]), 7, 2, 7),
("14", np.int64([]), 2, 3, None),
("15", np.int64([0, 0, 0]), 2, 3, None),
("16", np.int64([4, 0, 6]), 2, 3, None),
("17", np.int64([4, 0, 6]), 2, 3, 1),
("18", np.int64([4, 0, 6]), 2, 3, 2),
)
def testInterleaveDataset(self, input_values, cycle_length, block_length,
num_parallel_calls):
count = 2
dataset = dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
count).interleave(
lambda x: dataset_ops.Dataset.from_tensors(x).repeat(x),
cycle_length, block_length, num_parallel_calls)
expected_output = [element for element in _interleave(
_repeat(input_values, count), cycle_length, block_length)]
self.assertDatasetProduces(dataset, expected_output)
@parameterized.named_parameters(
("1", np.float32([1., np.nan, 2., np.nan, 3.]), 1, 3, None),
("2", np.float32([1., np.nan, 2., np.nan, 3.]), 1, 3, 1),
("3", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 1, None),
("4", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 1, 1),
("5", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 1, 2),
("6", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 3, None),
("7", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 3, 1),
("8", np.float32([1., np.nan, 2., np.nan, 3.]), 2, 3, 2),
("9", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, None),
("10", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 1),
("11", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 3),
("12", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 5),
("13", np.float32([1., np.nan, 2., np.nan, 3.]), 7, 2, 7),
)
def testInterleaveDatasetError(self, input_values, cycle_length, block_length,
num_parallel_calls):
dataset = dataset_ops.Dataset.from_tensor_slices(input_values).map(
lambda x: array_ops.check_numerics(x, "message")).interleave(
dataset_ops.Dataset.from_tensors, cycle_length, block_length,
num_parallel_calls)
get_next = self.getNext(dataset)
for value in input_values:
if np.isnan(value):
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
else:
self.assertEqual(value, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testInterleaveSparse(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _interleave_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
dataset = dataset_ops.Dataset.range(10).map(_map_fn).interleave(
_interleave_fn, cycle_length=1)
get_next = self.getNext(dataset)
for i in range(10):
for j in range(2):
expected = [i, 0] if j % 2 == 0 else [0, -i]
self.assertAllEqual(expected, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
class InterleaveDatasetTestWithConfig(test_base.DatasetTestBase,
parameterized.TestCase):
@parameterized.named_parameters(
("1", np.int64([4, 5, 6]), 2, 1),
("2", np.int64([4, 5, 6]), 2, 3),
("3", np.int64([4, 5, 6]), 3, 2),
("4", np.int64([4, 0, 6]), 2, 3),
)
@test_util.run_in_graph_and_eager_modes(
config=config_pb2.ConfigProto(
inter_op_parallelism_threads=2, use_per_session_threads=True))
def testSloppyInterleaveInOrder(self, input_values, cycle_length,
block_length):
dataset, coordination_events = _make_coordinated_sloppy_dataset(
input_values, cycle_length, block_length, num_parallel_calls=1)
get_next = self.getNext(dataset)
for expected_element in _interleave(
_repeat(input_values, 2), cycle_length, block_length):
coordination_events[expected_element].set()
self.assertEqual(expected_element * expected_element,
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(
("1", np.int64([4, 5, 6]), 2, 1),
("2", np.int64([4, 5, 6]), 2, 3),
("3", np.int64([4, 5, 6]), 3, 2),
("4", np.int64([4, 0, 6]), 2, 3),
)
@test_util.run_in_graph_and_eager_modes(
config=config_pb2.ConfigProto(
inter_op_parallelism_threads=3, use_per_session_threads=True))
def testSloppyInterleaveInOrder_2(self, input_values, cycle_length,
block_length):
dataset, coordination_events = _make_coordinated_sloppy_dataset(
input_values, cycle_length, block_length, num_parallel_calls=2)
get_next = self.getNext(dataset)
for expected_element in _interleave(
_repeat(input_values, 2), cycle_length, block_length):
coordination_events[expected_element].set()
self.assertEqual(expected_element * expected_element,
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(
("1", np.int64([4, 5, 6]), 2, 1),
("2", np.int64([4, 5, 6]), 2, 3),
("3", np.int64([4, 0, 6]), 2, 3),
)
@test_util.run_in_graph_and_eager_modes(
config=config_pb2.ConfigProto(
inter_op_parallelism_threads=3, use_per_session_threads=True))
def testSloppyInterleaveOutOfOrder(self, input_values, cycle_length,
block_length):
dataset, coordination_events = _make_coordinated_sloppy_dataset(
input_values, cycle_length, block_length, num_parallel_calls=2)
get_next = self.getNext(dataset)
elements = [
x for x in _interleave(
_repeat(input_values, 2), cycle_length, block_length)
]
for i in [1, 4, 7]:
elements[i], elements[i + 1] = elements[i + 1], elements[i]
for element in elements:
coordination_events[element].set()
self.assertEqual(element * element, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@test_util.run_in_graph_and_eager_modes(
config=config_pb2.ConfigProto(
inter_op_parallelism_threads=4, use_per_session_threads=True))
def testSloppyInterleaveOutOfOrder_2(self):
input_values, cycle_length, block_length = np.int64([4, 5, 6]), 3, 2
dataset, coordination_events = _make_coordinated_sloppy_dataset(
input_values, cycle_length, block_length, num_parallel_calls=3)
get_next = self.getNext(dataset)
elements = [
x for x in _interleave(
_repeat(input_values, 2), cycle_length, block_length)
]
for i in [1, 4, 7]:
elements[i], elements[i + 1] = elements[i + 1], elements[i]
for element in elements:
coordination_events[element].set()
self.assertEqual(element * element, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
b91f6777384247c7814df308b1d06a367277abd7
|
01733042e84a768b77f64ec24118d0242b2f13b8
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/bgpipv6evpnvpws_7e7a3dec141df7b1c974f723df7f4814.py
|
f90d4c9a9f9ab563e60b864c92a4472907752c7d
|
[
"MIT"
] |
permissive
|
slieberth/ixnetwork_restpy
|
e95673905854bc57e56177911cb3853c7e4c5e26
|
23eeb24b21568a23d3f31bbd72814ff55eb1af44
|
refs/heads/master
| 2023-01-04T06:57:17.513612
| 2020-10-16T22:30:55
| 2020-10-16T22:30:55
| 311,959,027
| 0
| 0
|
NOASSERTION
| 2020-11-11T12:15:34
| 2020-11-11T12:06:00
| null |
UTF-8
|
Python
| false
| false
| 74,316
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class BgpIPv6EvpnVpws(Base):
"""BGP IPv6 Peer VPWS EVPN Configuration
The BgpIPv6EvpnVpws class encapsulates a list of bgpIPv6EvpnVpws resources that are managed by the user.
A list of resources can be retrieved from the server using the BgpIPv6EvpnVpws.find() method.
The list can be managed by using the BgpIPv6EvpnVpws.add() and BgpIPv6EvpnVpws.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'bgpIPv6EvpnVpws'
_SDM_ATT_MAP = {
'Active': 'active',
'AdRouteLabel': 'adRouteLabel',
'AdvSrv6SidInIgp': 'advSrv6SidInIgp',
'AdvSrv6SidInIgpPMSI': 'advSrv6SidInIgpPMSI',
'AdvertiseL3vniSeparately': 'advertiseL3vniSeparately',
'AdvertiseSRv6SID': 'advertiseSRv6SID',
'AdvertiseSRv6SIDPMSI': 'advertiseSRv6SIDPMSI',
'AggregatorAs': 'aggregatorAs',
'AggregatorId': 'aggregatorId',
'ArgumentLength': 'argumentLength',
'AsSetMode': 'asSetMode',
'AutoConfigOriginatingRouterIp': 'autoConfigOriginatingRouterIp',
'AutoConfigPMSITunnelId': 'autoConfigPMSITunnelId',
'AutoConfigureRdIpAddress': 'autoConfigureRdIpAddress',
'BMacFirstLabel': 'bMacFirstLabel',
'BMacSecondLabel': 'bMacSecondLabel',
'ConnectedVia': 'connectedVia',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'EnableAggregatorId': 'enableAggregatorId',
'EnableAsPathSegments': 'enableAsPathSegments',
'EnableAtomicAggregate': 'enableAtomicAggregate',
'EnableBMacSecondLabel': 'enableBMacSecondLabel',
'EnableCluster': 'enableCluster',
'EnableCommunity': 'enableCommunity',
'EnableExtendedCommunity': 'enableExtendedCommunity',
'EnableL3TargetOnlyForRouteType5': 'enableL3TargetOnlyForRouteType5',
'EnableL3vniTargetList': 'enableL3vniTargetList',
'EnableLocalPreference': 'enableLocalPreference',
'EnableMultiExitDiscriminator': 'enableMultiExitDiscriminator',
'EnableNextHop': 'enableNextHop',
'EnableOrigin': 'enableOrigin',
'EnableOriginatorId': 'enableOriginatorId',
'Errors': 'errors',
'EsiType': 'esiType',
'EsiValue': 'esiValue',
'FunctionLength': 'functionLength',
'ImportRtListSameAsExportRtList': 'importRtListSameAsExportRtList',
'IncludePmsiTunnelAttribute': 'includePmsiTunnelAttribute',
'Ipv4NextHop': 'ipv4NextHop',
'Ipv6NextHop': 'ipv6NextHop',
'L3vniImportRtListSameAsL3vniExportRtList': 'l3vniImportRtListSameAsL3vniExportRtList',
'LocBlockLength': 'locBlockLength',
'LocNodeLength': 'locNodeLength',
'LocalPreference': 'localPreference',
'MultiExitDiscriminator': 'multiExitDiscriminator',
'MulticastTunnelType': 'multicastTunnelType',
'Multiplier': 'multiplier',
'MvEnableTransposition': 'mvEnableTransposition',
'MvIncSrv6SidStructSsTlv': 'mvIncSrv6SidStructSsTlv',
'Name': 'name',
'NoOfASPathSegmentsPerRouteRange': 'noOfASPathSegmentsPerRouteRange',
'NoOfClusters': 'noOfClusters',
'NoOfCommunities': 'noOfCommunities',
'NoOfExtendedCommunity': 'noOfExtendedCommunity',
'NumBroadcastDomainV6': 'numBroadcastDomainV6',
'NumRtInExportRouteTargetList': 'numRtInExportRouteTargetList',
'NumRtInImportRouteTargetList': 'numRtInImportRouteTargetList',
'NumRtInL3vniExportRouteTargetList': 'numRtInL3vniExportRouteTargetList',
'NumRtInL3vniImportRouteTargetList': 'numRtInL3vniImportRouteTargetList',
'Origin': 'origin',
'OriginatingRouterIpv4': 'originatingRouterIpv4',
'OriginatingRouterIpv6': 'originatingRouterIpv6',
'OriginatorId': 'originatorId',
'OverridePeerAsSetMode': 'overridePeerAsSetMode',
'PmsiTunnelIDv4': 'pmsiTunnelIDv4',
'PmsiTunnelIDv6': 'pmsiTunnelIDv6',
'RdASNumber': 'rdASNumber',
'RdEvi': 'rdEvi',
'RdIpAddress': 'rdIpAddress',
'RdType': 'rdType',
'SendSRv6SIDOptionalInfo': 'sendSRv6SIDOptionalInfo',
'SendSRv6SIDOptionalInfoPMSI': 'sendSRv6SIDOptionalInfoPMSI',
'SessionStatus': 'sessionStatus',
'SetNextHop': 'setNextHop',
'SetNextHopIpType': 'setNextHopIpType',
'Srv6EndpointBehavior': 'srv6EndpointBehavior',
'Srv6EndpointBehaviorPMSI': 'srv6EndpointBehaviorPMSI',
'Srv6SIDOptionalInformation': 'srv6SIDOptionalInformation',
'Srv6SIDOptionalInformationPMSI': 'srv6SIDOptionalInformationPMSI',
'Srv6SidFlags': 'srv6SidFlags',
'Srv6SidFlagsPMSI': 'srv6SidFlagsPMSI',
'Srv6SidLoc': 'srv6SidLoc',
'Srv6SidLocLen': 'srv6SidLocLen',
'Srv6SidLocLenPMSI': 'srv6SidLocLenPMSI',
'Srv6SidLocMetric': 'srv6SidLocMetric',
'Srv6SidLocMetricPMSI': 'srv6SidLocMetricPMSI',
'Srv6SidLocPMSI': 'srv6SidLocPMSI',
'Srv6SidReserved': 'srv6SidReserved',
'Srv6SidReserved1': 'srv6SidReserved1',
'Srv6SidReserved1PMSI': 'srv6SidReserved1PMSI',
'Srv6SidReserved2': 'srv6SidReserved2',
'Srv6SidReserved2PMSI': 'srv6SidReserved2PMSI',
'Srv6SidReservedPMSI': 'srv6SidReservedPMSI',
'StackedLayers': 'stackedLayers',
'StateCounts': 'stateCounts',
'Status': 'status',
'TranpositionLength': 'tranpositionLength',
'TranpositionOffset': 'tranpositionOffset',
'UpstreamDownstreamAssignedMplsLabel': 'upstreamDownstreamAssignedMplsLabel',
'UseIpv4MappedIpv6Address': 'useIpv4MappedIpv6Address',
'UseUpstreamDownstreamAssignedMplsLabel': 'useUpstreamDownstreamAssignedMplsLabel',
}
def __init__(self, parent):
super(BgpIPv6EvpnVpws, self).__init__(parent)
@property
def BgpAsPathSegmentList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpaspathsegmentlist_4d209c5ac36c18374125f19531d4795f.BgpAsPathSegmentList): An instance of the BgpAsPathSegmentList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpaspathsegmentlist_4d209c5ac36c18374125f19531d4795f import BgpAsPathSegmentList
return BgpAsPathSegmentList(self)
@property
def BgpClusterIdList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpclusteridlist_82b17094a31a96f755045be572017577.BgpClusterIdList): An instance of the BgpClusterIdList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpclusteridlist_82b17094a31a96f755045be572017577 import BgpClusterIdList
return BgpClusterIdList(self)
@property
def BgpCommunitiesList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpcommunitieslist_2963fcaf235bccb665be655ea86cee0f.BgpCommunitiesList): An instance of the BgpCommunitiesList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpcommunitieslist_2963fcaf235bccb665be655ea86cee0f import BgpCommunitiesList
return BgpCommunitiesList(self)
@property
def BgpExportRouteTargetList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpexportroutetargetlist_ce93ce056c01eaf7643c31a7fd67768c.BgpExportRouteTargetList): An instance of the BgpExportRouteTargetList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpexportroutetargetlist_ce93ce056c01eaf7643c31a7fd67768c import BgpExportRouteTargetList
return BgpExportRouteTargetList(self)
@property
def BgpExtendedCommunitiesList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpextendedcommunitieslist_bac41900b4999f09d65f045cf8104248.BgpExtendedCommunitiesList): An instance of the BgpExtendedCommunitiesList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpextendedcommunitieslist_bac41900b4999f09d65f045cf8104248 import BgpExtendedCommunitiesList
return BgpExtendedCommunitiesList(self)
@property
def BgpImportRouteTargetList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpimportroutetargetlist_99470595cc13238e15b19c07b8af6021.BgpImportRouteTargetList): An instance of the BgpImportRouteTargetList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpimportroutetargetlist_99470595cc13238e15b19c07b8af6021 import BgpImportRouteTargetList
return BgpImportRouteTargetList(self)
@property
def BgpL3VNIExportRouteTargetList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpl3vniexportroutetargetlist_0ceb637a2c3fee9e0d0bdf68e75d9054.BgpL3VNIExportRouteTargetList): An instance of the BgpL3VNIExportRouteTargetList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpl3vniexportroutetargetlist_0ceb637a2c3fee9e0d0bdf68e75d9054 import BgpL3VNIExportRouteTargetList
return BgpL3VNIExportRouteTargetList(self)
@property
def BgpL3VNIImportRouteTargetList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpl3vniimportroutetargetlist_f9fc41787790538b1714fae483245f7d.BgpL3VNIImportRouteTargetList): An instance of the BgpL3VNIImportRouteTargetList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpl3vniimportroutetargetlist_f9fc41787790538b1714fae483245f7d import BgpL3VNIImportRouteTargetList
return BgpL3VNIImportRouteTargetList(self)
@property
def BroadcastDomainV6Vpws(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.broadcastdomainv6vpws_59a832939f2a9320bf834055352368fd.BroadcastDomainV6Vpws): An instance of the BroadcastDomainV6Vpws class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.broadcastdomainv6vpws_59a832939f2a9320bf834055352368fd import BroadcastDomainV6Vpws
return BroadcastDomainV6Vpws(self)._select()
@property
def Connector(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b.Connector): An instance of the Connector class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b import Connector
return Connector(self)
@property
def Tag(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d.Tag): An instance of the Tag class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d import Tag
return Tag(self)
@property
def Active(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Activate/Deactivate Configuration
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def AdRouteLabel(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): AD Route Label
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AdRouteLabel']))
@property
def AdvSrv6SidInIgp(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Advertise SRv6 SID in IGP
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AdvSrv6SidInIgp']))
@property
def AdvSrv6SidInIgpPMSI(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Advertise SRv6 SID in IGP
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AdvSrv6SidInIgpPMSI']))
@property
def AdvertiseL3vniSeparately(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Advertise L3 Route Separately
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AdvertiseL3vniSeparately']))
@property
def AdvertiseSRv6SID(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Advertise SRv6 SID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AdvertiseSRv6SID']))
@property
def AdvertiseSRv6SIDPMSI(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Advertise SRv6 SID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AdvertiseSRv6SIDPMSI']))
@property
def AggregatorAs(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Aggregator AS
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AggregatorAs']))
@property
def AggregatorId(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Aggregator ID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AggregatorId']))
@property
def ArgumentLength(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Argument Length
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ArgumentLength']))
@property
def AsSetMode(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): AS# Set Mode
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AsSetMode']))
@property
def AutoConfigOriginatingRouterIp(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If set to true, this field enables option to configure Originating router IP address automatically from BGP Router's local IP
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AutoConfigOriginatingRouterIp']))
@property
def AutoConfigPMSITunnelId(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Auto Configure PMSI Tunnel ID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AutoConfigPMSITunnelId']))
@property
def AutoConfigureRdIpAddress(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Auto-Configure RD IP Addresses
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AutoConfigureRdIpAddress']))
@property
def BMacFirstLabel(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): B MAC First Label
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BMacFirstLabel']))
@property
def BMacSecondLabel(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): B MAC Second Label
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BMacSecondLabel']))
@property
def ConnectedVia(self):
"""DEPRECATED
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of layers this layer is used to connect with to the wire.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectedVia'])
@ConnectedVia.setter
def ConnectedVia(self, value):
self._set_attribute(self._SDM_ATT_MAP['ConnectedVia'], value)
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def EnableAggregatorId(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable Aggregator ID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableAggregatorId']))
@property
def EnableAsPathSegments(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable AS Path Segments
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableAsPathSegments']))
@property
def EnableAtomicAggregate(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable Atomic Aggregate
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableAtomicAggregate']))
@property
def EnableBMacSecondLabel(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable B MAC Second Label
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableBMacSecondLabel']))
@property
def EnableCluster(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable Cluster
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableCluster']))
@property
def EnableCommunity(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable Community
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableCommunity']))
@property
def EnableExtendedCommunity(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable Extended Community
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableExtendedCommunity']))
@property
def EnableL3TargetOnlyForRouteType5(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable L3 Target only for Route Type 5
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableL3TargetOnlyForRouteType5']))
@property
def EnableL3vniTargetList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable L3 Target List
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableL3vniTargetList']))
@property
def EnableLocalPreference(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable Local Preference
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableLocalPreference']))
@property
def EnableMultiExitDiscriminator(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable Multi Exit
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableMultiExitDiscriminator']))
@property
def EnableNextHop(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable Next Hop
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableNextHop']))
@property
def EnableOrigin(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable Origin
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableOrigin']))
@property
def EnableOriginatorId(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable Originator ID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableOriginatorId']))
@property
def Errors(self):
"""
Returns
-------
- list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str])): A list of errors that have occurred
"""
return self._get_attribute(self._SDM_ATT_MAP['Errors'])
@property
def EsiType(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): ESI Type
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EsiType']))
@property
def EsiValue(self):
"""
Returns
-------
- list(str): ESI Value
"""
return self._get_attribute(self._SDM_ATT_MAP['EsiValue'])
@property
def FunctionLength(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Function Length
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FunctionLength']))
@property
def ImportRtListSameAsExportRtList(self):
"""
Returns
-------
- bool: Import RT List Same As Export RT List
"""
return self._get_attribute(self._SDM_ATT_MAP['ImportRtListSameAsExportRtList'])
@ImportRtListSameAsExportRtList.setter
def ImportRtListSameAsExportRtList(self, value):
self._set_attribute(self._SDM_ATT_MAP['ImportRtListSameAsExportRtList'], value)
@property
def IncludePmsiTunnelAttribute(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Include PMSI Tunnel Attribute
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IncludePmsiTunnelAttribute']))
@property
def Ipv4NextHop(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): IPv4 Next Hop
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4NextHop']))
@property
def Ipv6NextHop(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): IPv6 Next Hop
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6NextHop']))
@property
def L3vniImportRtListSameAsL3vniExportRtList(self):
"""
Returns
-------
- bool: L3 Import RT List Same As L3 Export RT List
"""
return self._get_attribute(self._SDM_ATT_MAP['L3vniImportRtListSameAsL3vniExportRtList'])
@L3vniImportRtListSameAsL3vniExportRtList.setter
def L3vniImportRtListSameAsL3vniExportRtList(self, value):
self._set_attribute(self._SDM_ATT_MAP['L3vniImportRtListSameAsL3vniExportRtList'], value)
@property
def LocBlockLength(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Locator Block Length
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocBlockLength']))
@property
def LocNodeLength(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Locator Node Length
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocNodeLength']))
@property
def LocalPreference(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Local Preference
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalPreference']))
@property
def MultiExitDiscriminator(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Multi Exit
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MultiExitDiscriminator']))
@property
def MulticastTunnelType(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Multicast Tunnel Type
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MulticastTunnelType']))
@property
def Multiplier(self):
"""
Returns
-------
- number: Number of layer instances per parent instance (multiplier)
"""
return self._get_attribute(self._SDM_ATT_MAP['Multiplier'])
@Multiplier.setter
def Multiplier(self, value):
self._set_attribute(self._SDM_ATT_MAP['Multiplier'], value)
@property
def MvEnableTransposition(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable Transposition
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MvEnableTransposition']))
@property
def MvIncSrv6SidStructSsTlv(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Include SRv6 SID Structure Sub-Sub TLV
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MvIncSrv6SidStructSsTlv']))
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NoOfASPathSegmentsPerRouteRange(self):
"""
Returns
-------
- number: Number Of AS Path Segments Per Route Range
"""
return self._get_attribute(self._SDM_ATT_MAP['NoOfASPathSegmentsPerRouteRange'])
@NoOfASPathSegmentsPerRouteRange.setter
def NoOfASPathSegmentsPerRouteRange(self, value):
self._set_attribute(self._SDM_ATT_MAP['NoOfASPathSegmentsPerRouteRange'], value)
@property
def NoOfClusters(self):
"""
Returns
-------
- number: Number of Clusters
"""
return self._get_attribute(self._SDM_ATT_MAP['NoOfClusters'])
@NoOfClusters.setter
def NoOfClusters(self, value):
self._set_attribute(self._SDM_ATT_MAP['NoOfClusters'], value)
@property
def NoOfCommunities(self):
"""
Returns
-------
- number: Number of Communities
"""
return self._get_attribute(self._SDM_ATT_MAP['NoOfCommunities'])
@NoOfCommunities.setter
def NoOfCommunities(self, value):
self._set_attribute(self._SDM_ATT_MAP['NoOfCommunities'], value)
@property
def NoOfExtendedCommunity(self):
"""
Returns
-------
- number: Number of Extended Communities
"""
return self._get_attribute(self._SDM_ATT_MAP['NoOfExtendedCommunity'])
@NoOfExtendedCommunity.setter
def NoOfExtendedCommunity(self, value):
self._set_attribute(self._SDM_ATT_MAP['NoOfExtendedCommunity'], value)
@property
def NumBroadcastDomainV6(self):
"""
Returns
-------
- number: The number of broadcast domain to be configured under EVI
"""
return self._get_attribute(self._SDM_ATT_MAP['NumBroadcastDomainV6'])
@NumBroadcastDomainV6.setter
def NumBroadcastDomainV6(self, value):
self._set_attribute(self._SDM_ATT_MAP['NumBroadcastDomainV6'], value)
@property
def NumRtInExportRouteTargetList(self):
"""
Returns
-------
- number: Number of RTs in Export Route Target List(multiplier)
"""
return self._get_attribute(self._SDM_ATT_MAP['NumRtInExportRouteTargetList'])
@NumRtInExportRouteTargetList.setter
def NumRtInExportRouteTargetList(self, value):
self._set_attribute(self._SDM_ATT_MAP['NumRtInExportRouteTargetList'], value)
@property
def NumRtInImportRouteTargetList(self):
"""
Returns
-------
- number: Number of RTs in Import Route Target List(multiplier)
"""
return self._get_attribute(self._SDM_ATT_MAP['NumRtInImportRouteTargetList'])
@NumRtInImportRouteTargetList.setter
def NumRtInImportRouteTargetList(self, value):
self._set_attribute(self._SDM_ATT_MAP['NumRtInImportRouteTargetList'], value)
@property
def NumRtInL3vniExportRouteTargetList(self):
"""
Returns
-------
- number: Number of RTs in L3 Export Route Target List(multiplier)
"""
return self._get_attribute(self._SDM_ATT_MAP['NumRtInL3vniExportRouteTargetList'])
@NumRtInL3vniExportRouteTargetList.setter
def NumRtInL3vniExportRouteTargetList(self, value):
self._set_attribute(self._SDM_ATT_MAP['NumRtInL3vniExportRouteTargetList'], value)
@property
def NumRtInL3vniImportRouteTargetList(self):
"""
Returns
-------
- number: Number of RTs in L3 Import Route Target List(multiplier)
"""
return self._get_attribute(self._SDM_ATT_MAP['NumRtInL3vniImportRouteTargetList'])
@NumRtInL3vniImportRouteTargetList.setter
def NumRtInL3vniImportRouteTargetList(self, value):
self._set_attribute(self._SDM_ATT_MAP['NumRtInL3vniImportRouteTargetList'], value)
@property
def Origin(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Origin
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Origin']))
@property
def OriginatingRouterIpv4(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Configures Originating Router IP address in IPv4 Address format
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OriginatingRouterIpv4']))
@property
def OriginatingRouterIpv6(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Configures Originating Router IP address in IPv6 Address format
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OriginatingRouterIpv6']))
@property
def OriginatorId(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Originator ID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OriginatorId']))
@property
def OverridePeerAsSetMode(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Override Peer AS# Set Mode
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OverridePeerAsSetMode']))
@property
def PmsiTunnelIDv4(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): PMSI Tunnel ID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PmsiTunnelIDv4']))
@property
def PmsiTunnelIDv6(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): PMSI Tunnel ID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PmsiTunnelIDv6']))
@property
def RdASNumber(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): EVPN Route Distinguisher AS Number (2-byte or 4-Byte)
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RdASNumber']))
@property
def RdEvi(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): EVPN Route Distinguisher Assigned Number
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RdEvi']))
@property
def RdIpAddress(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): RD IP Addresses
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RdIpAddress']))
@property
def RdType(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): EVPN RR Distinguisher Type
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RdType']))
@property
def SendSRv6SIDOptionalInfo(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If we need to advertise SRv6 SID Optional Information (Service Information sub-TLV) which is specified in next column(s)
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SendSRv6SIDOptionalInfo']))
@property
def SendSRv6SIDOptionalInfoPMSI(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If we need to advertise SRv6 SID Optional Information (Service Information sub-TLV) which is specified in next column(s)
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SendSRv6SIDOptionalInfoPMSI']))
@property
def SessionStatus(self):
"""
Returns
-------
- list(str[down | notStarted | up]): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionStatus'])
@property
def SetNextHop(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Set Next Hop
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SetNextHop']))
@property
def SetNextHopIpType(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Set Next Hop IP Type
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SetNextHopIpType']))
@property
def Srv6EndpointBehavior(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SRv6 Endpoint Behavior field Value for all routes in this Route Range
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6EndpointBehavior']))
@property
def Srv6EndpointBehaviorPMSI(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SRv6 Endpoint Behavior field Value for all routes in this Route Range
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6EndpointBehaviorPMSI']))
@property
def Srv6SIDOptionalInformation(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SRv6 SID Optional Information field Value (Service Information sub-TLV) for all routes in this Route Range
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SIDOptionalInformation']))
@property
def Srv6SIDOptionalInformationPMSI(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SRv6 SID Optional Information field Value (Service Information sub-TLV) for all routes in this Route Range
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SIDOptionalInformationPMSI']))
@property
def Srv6SidFlags(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SRv6 SID Flags Value
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidFlags']))
@property
def Srv6SidFlagsPMSI(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SRv6 SID Flags Value
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidFlagsPMSI']))
@property
def Srv6SidLoc(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SRv6 SID. It consists of Locator, Func and Args
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidLoc']))
@property
def Srv6SidLocLen(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SRv6 SID Locator Length
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidLocLen']))
@property
def Srv6SidLocLenPMSI(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SRv6 SID Locator Length
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidLocLenPMSI']))
@property
def Srv6SidLocMetric(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SRv6 SID Locator Metric
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidLocMetric']))
@property
def Srv6SidLocMetricPMSI(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SRv6 SID Locator Metric
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidLocMetricPMSI']))
@property
def Srv6SidLocPMSI(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SRv6 SID. It consists of Locator, Func and Args
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidLocPMSI']))
@property
def Srv6SidReserved(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SRv6 SID Reserved Value (SRv6 SID Service TLV Level)
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidReserved']))
@property
def Srv6SidReserved1(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SRv6 SID Reserved1 Field for Service Information sub-TLV
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidReserved1']))
@property
def Srv6SidReserved1PMSI(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SRv6 SID Reserved1 Field for Service Information sub-TLV
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidReserved1PMSI']))
@property
def Srv6SidReserved2(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SRv6 SID Reserved2 Field for Service Information sub-TLV
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidReserved2']))
@property
def Srv6SidReserved2PMSI(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SRv6 SID Reserved2 Field for Service Information sub-TLV
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidReserved2PMSI']))
@property
def Srv6SidReservedPMSI(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SRv6 SID Reserved Value (SRv6 SID Service TLV Level)
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidReservedPMSI']))
@property
def StackedLayers(self):
"""
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of secondary (many to one) child layer protocols
"""
return self._get_attribute(self._SDM_ATT_MAP['StackedLayers'])
@StackedLayers.setter
def StackedLayers(self, value):
self._set_attribute(self._SDM_ATT_MAP['StackedLayers'], value)
@property
def StateCounts(self):
"""
Returns
-------
- dict(total:number,notStarted:number,down:number,up:number): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
"""
return self._get_attribute(self._SDM_ATT_MAP['StateCounts'])
@property
def Status(self):
"""
Returns
-------
- str(configured | error | mixed | notStarted | started | starting | stopping): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
"""
return self._get_attribute(self._SDM_ATT_MAP['Status'])
@property
def TranpositionLength(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Transposition Length
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TranpositionLength']))
@property
def TranpositionOffset(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Transposition Offset
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TranpositionOffset']))
@property
def UpstreamDownstreamAssignedMplsLabel(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Upstream/Downstream Assigned MPLS Label
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UpstreamDownstreamAssignedMplsLabel']))
@property
def UseIpv4MappedIpv6Address(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Use IPv4 Mapped IPv6 Address
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UseIpv4MappedIpv6Address']))
@property
def UseUpstreamDownstreamAssignedMplsLabel(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Use Upstream/Downstream Assigned MPLS Label
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UseUpstreamDownstreamAssignedMplsLabel']))
def update(self, ConnectedVia=None, ImportRtListSameAsExportRtList=None, L3vniImportRtListSameAsL3vniExportRtList=None, Multiplier=None, Name=None, NoOfASPathSegmentsPerRouteRange=None, NoOfClusters=None, NoOfCommunities=None, NoOfExtendedCommunity=None, NumBroadcastDomainV6=None, NumRtInExportRouteTargetList=None, NumRtInImportRouteTargetList=None, NumRtInL3vniExportRouteTargetList=None, NumRtInL3vniImportRouteTargetList=None, StackedLayers=None):
"""Updates bgpIPv6EvpnVpws resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- ImportRtListSameAsExportRtList (bool): Import RT List Same As Export RT List
- L3vniImportRtListSameAsL3vniExportRtList (bool): L3 Import RT List Same As L3 Export RT List
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- NoOfASPathSegmentsPerRouteRange (number): Number Of AS Path Segments Per Route Range
- NoOfClusters (number): Number of Clusters
- NoOfCommunities (number): Number of Communities
- NoOfExtendedCommunity (number): Number of Extended Communities
- NumBroadcastDomainV6 (number): The number of broadcast domain to be configured under EVI
- NumRtInExportRouteTargetList (number): Number of RTs in Export Route Target List(multiplier)
- NumRtInImportRouteTargetList (number): Number of RTs in Import Route Target List(multiplier)
- NumRtInL3vniExportRouteTargetList (number): Number of RTs in L3 Export Route Target List(multiplier)
- NumRtInL3vniImportRouteTargetList (number): Number of RTs in L3 Import Route Target List(multiplier)
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ConnectedVia=None, ImportRtListSameAsExportRtList=None, L3vniImportRtListSameAsL3vniExportRtList=None, Multiplier=None, Name=None, NoOfASPathSegmentsPerRouteRange=None, NoOfClusters=None, NoOfCommunities=None, NoOfExtendedCommunity=None, NumBroadcastDomainV6=None, NumRtInExportRouteTargetList=None, NumRtInImportRouteTargetList=None, NumRtInL3vniExportRouteTargetList=None, NumRtInL3vniImportRouteTargetList=None, StackedLayers=None):
"""Adds a new bgpIPv6EvpnVpws resource on the server and adds it to the container.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- ImportRtListSameAsExportRtList (bool): Import RT List Same As Export RT List
- L3vniImportRtListSameAsL3vniExportRtList (bool): L3 Import RT List Same As L3 Export RT List
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- NoOfASPathSegmentsPerRouteRange (number): Number Of AS Path Segments Per Route Range
- NoOfClusters (number): Number of Clusters
- NoOfCommunities (number): Number of Communities
- NoOfExtendedCommunity (number): Number of Extended Communities
- NumBroadcastDomainV6 (number): The number of broadcast domain to be configured under EVI
- NumRtInExportRouteTargetList (number): Number of RTs in Export Route Target List(multiplier)
- NumRtInImportRouteTargetList (number): Number of RTs in Import Route Target List(multiplier)
- NumRtInL3vniExportRouteTargetList (number): Number of RTs in L3 Export Route Target List(multiplier)
- NumRtInL3vniImportRouteTargetList (number): Number of RTs in L3 Import Route Target List(multiplier)
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Returns
-------
- self: This instance with all currently retrieved bgpIPv6EvpnVpws resources using find and the newly added bgpIPv6EvpnVpws resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained bgpIPv6EvpnVpws resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ConnectedVia=None, Count=None, DescriptiveName=None, Errors=None, EsiValue=None, ImportRtListSameAsExportRtList=None, L3vniImportRtListSameAsL3vniExportRtList=None, Multiplier=None, Name=None, NoOfASPathSegmentsPerRouteRange=None, NoOfClusters=None, NoOfCommunities=None, NoOfExtendedCommunity=None, NumBroadcastDomainV6=None, NumRtInExportRouteTargetList=None, NumRtInImportRouteTargetList=None, NumRtInL3vniExportRouteTargetList=None, NumRtInL3vniImportRouteTargetList=None, SessionStatus=None, StackedLayers=None, StateCounts=None, Status=None):
"""Finds and retrieves bgpIPv6EvpnVpws resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve bgpIPv6EvpnVpws resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all bgpIPv6EvpnVpws resources from the server.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Errors (list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str]))): A list of errors that have occurred
- EsiValue (list(str)): ESI Value
- ImportRtListSameAsExportRtList (bool): Import RT List Same As Export RT List
- L3vniImportRtListSameAsL3vniExportRtList (bool): L3 Import RT List Same As L3 Export RT List
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- NoOfASPathSegmentsPerRouteRange (number): Number Of AS Path Segments Per Route Range
- NoOfClusters (number): Number of Clusters
- NoOfCommunities (number): Number of Communities
- NoOfExtendedCommunity (number): Number of Extended Communities
- NumBroadcastDomainV6 (number): The number of broadcast domain to be configured under EVI
- NumRtInExportRouteTargetList (number): Number of RTs in Export Route Target List(multiplier)
- NumRtInImportRouteTargetList (number): Number of RTs in Import Route Target List(multiplier)
- NumRtInL3vniExportRouteTargetList (number): Number of RTs in L3 Export Route Target List(multiplier)
- NumRtInL3vniImportRouteTargetList (number): Number of RTs in L3 Import Route Target List(multiplier)
- SessionStatus (list(str[down | notStarted | up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
- StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
- Status (str(configured | error | mixed | notStarted | started | starting | stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns
-------
- self: This instance with matching bgpIPv6EvpnVpws resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of bgpIPv6EvpnVpws data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the bgpIPv6EvpnVpws resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, Active=None, AdRouteLabel=None, AdvSrv6SidInIgp=None, AdvSrv6SidInIgpPMSI=None, AdvertiseL3vniSeparately=None, AdvertiseSRv6SID=None, AdvertiseSRv6SIDPMSI=None, AggregatorAs=None, AggregatorId=None, ArgumentLength=None, AsSetMode=None, AutoConfigOriginatingRouterIp=None, AutoConfigPMSITunnelId=None, AutoConfigureRdIpAddress=None, BMacFirstLabel=None, BMacSecondLabel=None, EnableAggregatorId=None, EnableAsPathSegments=None, EnableAtomicAggregate=None, EnableBMacSecondLabel=None, EnableCluster=None, EnableCommunity=None, EnableExtendedCommunity=None, EnableL3TargetOnlyForRouteType5=None, EnableL3vniTargetList=None, EnableLocalPreference=None, EnableMultiExitDiscriminator=None, EnableNextHop=None, EnableOrigin=None, EnableOriginatorId=None, EsiType=None, FunctionLength=None, IncludePmsiTunnelAttribute=None, Ipv4NextHop=None, Ipv6NextHop=None, LocBlockLength=None, LocNodeLength=None, LocalPreference=None, MultiExitDiscriminator=None, MulticastTunnelType=None, MvEnableTransposition=None, MvIncSrv6SidStructSsTlv=None, Origin=None, OriginatingRouterIpv4=None, OriginatingRouterIpv6=None, OriginatorId=None, OverridePeerAsSetMode=None, PmsiTunnelIDv4=None, PmsiTunnelIDv6=None, RdASNumber=None, RdEvi=None, RdIpAddress=None, RdType=None, SendSRv6SIDOptionalInfo=None, SendSRv6SIDOptionalInfoPMSI=None, SetNextHop=None, SetNextHopIpType=None, Srv6EndpointBehavior=None, Srv6EndpointBehaviorPMSI=None, Srv6SIDOptionalInformation=None, Srv6SIDOptionalInformationPMSI=None, Srv6SidFlags=None, Srv6SidFlagsPMSI=None, Srv6SidLoc=None, Srv6SidLocLen=None, Srv6SidLocLenPMSI=None, Srv6SidLocMetric=None, Srv6SidLocMetricPMSI=None, Srv6SidLocPMSI=None, Srv6SidReserved=None, Srv6SidReserved1=None, Srv6SidReserved1PMSI=None, Srv6SidReserved2=None, Srv6SidReserved2PMSI=None, Srv6SidReservedPMSI=None, TranpositionLength=None, TranpositionOffset=None, UpstreamDownstreamAssignedMplsLabel=None, UseIpv4MappedIpv6Address=None, UseUpstreamDownstreamAssignedMplsLabel=None):
"""Base class infrastructure that gets a list of bgpIPv6EvpnVpws device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- AdRouteLabel (str): optional regex of adRouteLabel
- AdvSrv6SidInIgp (str): optional regex of advSrv6SidInIgp
- AdvSrv6SidInIgpPMSI (str): optional regex of advSrv6SidInIgpPMSI
- AdvertiseL3vniSeparately (str): optional regex of advertiseL3vniSeparately
- AdvertiseSRv6SID (str): optional regex of advertiseSRv6SID
- AdvertiseSRv6SIDPMSI (str): optional regex of advertiseSRv6SIDPMSI
- AggregatorAs (str): optional regex of aggregatorAs
- AggregatorId (str): optional regex of aggregatorId
- ArgumentLength (str): optional regex of argumentLength
- AsSetMode (str): optional regex of asSetMode
- AutoConfigOriginatingRouterIp (str): optional regex of autoConfigOriginatingRouterIp
- AutoConfigPMSITunnelId (str): optional regex of autoConfigPMSITunnelId
- AutoConfigureRdIpAddress (str): optional regex of autoConfigureRdIpAddress
- BMacFirstLabel (str): optional regex of bMacFirstLabel
- BMacSecondLabel (str): optional regex of bMacSecondLabel
- EnableAggregatorId (str): optional regex of enableAggregatorId
- EnableAsPathSegments (str): optional regex of enableAsPathSegments
- EnableAtomicAggregate (str): optional regex of enableAtomicAggregate
- EnableBMacSecondLabel (str): optional regex of enableBMacSecondLabel
- EnableCluster (str): optional regex of enableCluster
- EnableCommunity (str): optional regex of enableCommunity
- EnableExtendedCommunity (str): optional regex of enableExtendedCommunity
- EnableL3TargetOnlyForRouteType5 (str): optional regex of enableL3TargetOnlyForRouteType5
- EnableL3vniTargetList (str): optional regex of enableL3vniTargetList
- EnableLocalPreference (str): optional regex of enableLocalPreference
- EnableMultiExitDiscriminator (str): optional regex of enableMultiExitDiscriminator
- EnableNextHop (str): optional regex of enableNextHop
- EnableOrigin (str): optional regex of enableOrigin
- EnableOriginatorId (str): optional regex of enableOriginatorId
- EsiType (str): optional regex of esiType
- FunctionLength (str): optional regex of functionLength
- IncludePmsiTunnelAttribute (str): optional regex of includePmsiTunnelAttribute
- Ipv4NextHop (str): optional regex of ipv4NextHop
- Ipv6NextHop (str): optional regex of ipv6NextHop
- LocBlockLength (str): optional regex of locBlockLength
- LocNodeLength (str): optional regex of locNodeLength
- LocalPreference (str): optional regex of localPreference
- MultiExitDiscriminator (str): optional regex of multiExitDiscriminator
- MulticastTunnelType (str): optional regex of multicastTunnelType
- MvEnableTransposition (str): optional regex of mvEnableTransposition
- MvIncSrv6SidStructSsTlv (str): optional regex of mvIncSrv6SidStructSsTlv
- Origin (str): optional regex of origin
- OriginatingRouterIpv4 (str): optional regex of originatingRouterIpv4
- OriginatingRouterIpv6 (str): optional regex of originatingRouterIpv6
- OriginatorId (str): optional regex of originatorId
- OverridePeerAsSetMode (str): optional regex of overridePeerAsSetMode
- PmsiTunnelIDv4 (str): optional regex of pmsiTunnelIDv4
- PmsiTunnelIDv6 (str): optional regex of pmsiTunnelIDv6
- RdASNumber (str): optional regex of rdASNumber
- RdEvi (str): optional regex of rdEvi
- RdIpAddress (str): optional regex of rdIpAddress
- RdType (str): optional regex of rdType
- SendSRv6SIDOptionalInfo (str): optional regex of sendSRv6SIDOptionalInfo
- SendSRv6SIDOptionalInfoPMSI (str): optional regex of sendSRv6SIDOptionalInfoPMSI
- SetNextHop (str): optional regex of setNextHop
- SetNextHopIpType (str): optional regex of setNextHopIpType
- Srv6EndpointBehavior (str): optional regex of srv6EndpointBehavior
- Srv6EndpointBehaviorPMSI (str): optional regex of srv6EndpointBehaviorPMSI
- Srv6SIDOptionalInformation (str): optional regex of srv6SIDOptionalInformation
- Srv6SIDOptionalInformationPMSI (str): optional regex of srv6SIDOptionalInformationPMSI
- Srv6SidFlags (str): optional regex of srv6SidFlags
- Srv6SidFlagsPMSI (str): optional regex of srv6SidFlagsPMSI
- Srv6SidLoc (str): optional regex of srv6SidLoc
- Srv6SidLocLen (str): optional regex of srv6SidLocLen
- Srv6SidLocLenPMSI (str): optional regex of srv6SidLocLenPMSI
- Srv6SidLocMetric (str): optional regex of srv6SidLocMetric
- Srv6SidLocMetricPMSI (str): optional regex of srv6SidLocMetricPMSI
- Srv6SidLocPMSI (str): optional regex of srv6SidLocPMSI
- Srv6SidReserved (str): optional regex of srv6SidReserved
- Srv6SidReserved1 (str): optional regex of srv6SidReserved1
- Srv6SidReserved1PMSI (str): optional regex of srv6SidReserved1PMSI
- Srv6SidReserved2 (str): optional regex of srv6SidReserved2
- Srv6SidReserved2PMSI (str): optional regex of srv6SidReserved2PMSI
- Srv6SidReservedPMSI (str): optional regex of srv6SidReservedPMSI
- TranpositionLength (str): optional regex of tranpositionLength
- TranpositionOffset (str): optional regex of tranpositionOffset
- UpstreamDownstreamAssignedMplsLabel (str): optional regex of upstreamDownstreamAssignedMplsLabel
- UseIpv4MappedIpv6Address (str): optional regex of useIpv4MappedIpv6Address
- UseUpstreamDownstreamAssignedMplsLabel (str): optional regex of useUpstreamDownstreamAssignedMplsLabel
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def Abort(self, *args, **kwargs):
"""Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
abort(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
abort(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('abort', payload=payload, response_object=None)
def RestartDown(self, *args, **kwargs):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
restartDown(SessionIndices=list)
--------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
restartDown(SessionIndices=string)
----------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('restartDown', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
"""Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
start(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
"""Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stop(SessionIndices=list)
-------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
stop(SessionIndices=string)
---------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
|
[
"andy.balogh@keysight.com"
] |
andy.balogh@keysight.com
|
7df6658e806cae939fedc20b87c13b405766e5f2
|
ae76a9296e91c50f7394c22281aa20e82315d9fa
|
/transformer/translate.py
|
33020e7efbc6ec514288094d034f6f892742adae
|
[] |
no_license
|
PeterDing/mxnet-learning
|
3cdd56162417027e07d0a3e56001018038b3fafe
|
b3cab40e650d20ee183ede63fc8c6c4ea0d8d582
|
refs/heads/master
| 2022-10-31T18:27:05.035498
| 2018-06-20T02:59:21
| 2018-06-20T02:59:21
| 129,977,891
| 1
| 1
| null | 2022-10-27T16:49:44
| 2018-04-17T23:49:33
|
Python
|
UTF-8
|
Python
| false
| false
| 593
|
py
|
from mxnet import nd
from data import make_src_mask, make_trg_mask
MAX_LEN = 20
def translate(net, src, trg_vocab, s_pad, t_bos, t_eos, t_pad):
src_mask = make_src_mask(src, s_pad)
trg_list = [t_bos]
for _ in range(MAX_LEN):
trg = nd.array([trg_vocab.to_indices(trg_list)])
trg_mask = make_trg_mask(trg, t_pad)
pred = net(src, trg, src_mask, trg_mask)
out = pred.argmax(-1)
next_idx = out[-1][0].asscalar()
if next_idx == t_eos:
break
trg_list.append(int(next_idx))
return trg_vocab.to_tokens(trg_list)
|
[
"dfhayst@gmail.com"
] |
dfhayst@gmail.com
|
da95f538ecc178c10e81d9ef871468fc6b733155
|
3700c716293b010b68f05f01099fa426449ddc42
|
/xirl/xirl/trainers/tcc.py
|
c18e3ab5171a9a099de96252834100a76ceb8337
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
riven314/google-research
|
31f3ed2635e092bc0f8c89b535e123ea731946d3
|
dd971143b85879fcac7c983b8d2b30d145e4cbdc
|
refs/heads/master
| 2023-06-14T02:03:12.475884
| 2021-07-10T18:47:33
| 2021-07-10T18:47:33
| 384,185,293
| 0
| 0
|
Apache-2.0
| 2021-07-15T03:16:47
| 2021-07-08T16:25:21
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,653
|
py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TCC trainer."""
from typing import Dict, List, Union
from ml_collections import ConfigDict
import torch
from xirl.losses import compute_tcc_loss
from xirl.trainers.base import Trainer
BatchType = Dict[str, Union[torch.Tensor, List[str]]]
class TCCTrainer(Trainer):
"""A trainer for Temporal Cycle Consistency Learning [1].
References:
[1]: arxiv.org/abs/1904.07846
"""
def __init__(
self,
model,
optimizer,
device,
config,
):
super().__init__(model, optimizer, device, config)
self.normalize_embeddings = config.MODEL.NORMALIZE_EMBEDDINGS
self.stochastic_matching = config.LOSS.TCC.STOCHASTIC_MATCHING
self.loss_type = config.LOSS.TCC.LOSS_TYPE
self.similarity_type = config.LOSS.TCC.SIMILARITY_TYPE
self.cycle_length = config.LOSS.TCC.CYCLE_LENGTH
self.temperature = config.LOSS.TCC.SOFTMAX_TEMPERATURE
self.label_smoothing = config.LOSS.TCC.LABEL_SMOOTHING
self.variance_lambda = config.LOSS.TCC.VARIANCE_LAMBDA
self.huber_delta = config.LOSS.TCC.HUBER_DELTA
self.normalize_indices = config.LOSS.TCC.NORMALIZE_INDICES
def compute_loss(
self,
embs,
batch,
):
steps = batch["frame_idxs"].to(self._device)
seq_lens = batch["video_len"].to(self._device)
# Dynamically determine the number of cycles if using stochastic
# matching.
batch_size, num_cc_frames = embs.shape[:2]
num_cycles = int(batch_size * num_cc_frames)
return compute_tcc_loss(
embs=embs,
idxs=steps,
seq_lens=seq_lens,
stochastic_matching=self.stochastic_matching,
normalize_embeddings=self.normalize_embeddings,
loss_type=self.loss_type,
similarity_type=self.similarity_type,
num_cycles=num_cycles,
cycle_length=self.cycle_length,
temperature=self.temperature,
label_smoothing=self.label_smoothing,
variance_lambda=self.variance_lambda,
huber_delta=self.huber_delta,
normalize_indices=self.normalize_indices,
)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
305e474130d764368ac102f25f9dd530a1cb4a02
|
717558d6a075163294054bd5aea4ef3234df23ad
|
/models_nonconvex_simple/fuel.py
|
ca85bfc10f4aa8dbb9960f7e1dbcefd48cd8cb23
|
[
"MIT"
] |
permissive
|
RomeoV/pyomo-MINLP-benchmarking
|
1270766397fbc4e57ea1bd0c2285fb7edf64062d
|
996d2c8ee1cb9b03fe00c6246f52294337d8b92c
|
refs/heads/master
| 2021-07-11T17:54:25.284712
| 2020-08-13T23:43:14
| 2020-08-13T23:43:14
| 185,664,992
| 8
| 1
|
MIT
| 2019-05-10T19:07:05
| 2019-05-08T19:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,580
|
py
|
# MINLP written by GAMS Convert at 08/13/20 17:37:43
#
# Equation counts
# Total E G L N X C B
# 16 7 6 3 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 16 13 3 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 39 33 6 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.b1 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b3 = Var(within=Binary,bounds=(0,1),initialize=0)
m.x4 = Var(within=Reals,bounds=(None,None),initialize=100)
m.x5 = Var(within=Reals,bounds=(None,None),initialize=100)
m.x6 = Var(within=Reals,bounds=(None,None),initialize=100)
m.x7 = Var(within=Reals,bounds=(50,700),initialize=50)
m.x8 = Var(within=Reals,bounds=(50,700),initialize=50)
m.x9 = Var(within=Reals,bounds=(50,700),initialize=50)
m.x10 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,4000),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,4000),initialize=0)
m.x15 = Var(within=Reals,bounds=(2000,4000),initialize=2000)
m.obj = Objective(expr=0.0025*m.x7**2 + 6*m.x7 + 0.0025*m.x8**2 + 6*m.x8 + 0.0025*m.x9**2 + 6*m.x9
+ 900, sense=minimize)
m.c2 = Constraint(expr= - 100*m.b1 + m.x4 >= 0)
m.c3 = Constraint(expr= - 100*m.b2 + m.x5 >= 0)
m.c4 = Constraint(expr= - 100*m.b3 + m.x6 >= 0)
m.c5 = Constraint(expr= - 500*m.b1 + m.x4 <= 0)
m.c6 = Constraint(expr= - 500*m.b2 + m.x5 <= 0)
m.c7 = Constraint(expr= - 500*m.b3 + m.x6 <= 0)
m.c8 = Constraint(expr= m.x10 + m.x13 == 3500)
m.c9 = Constraint(expr= m.x11 - m.x13 + m.x14 == 500)
m.c10 = Constraint(expr= m.x12 - m.x14 + m.x15 == 500)
m.c11 = Constraint(expr= m.x4 + m.x7 >= 400)
m.c12 = Constraint(expr= m.x5 + m.x8 >= 900)
m.c13 = Constraint(expr= m.x6 + m.x9 >= 700)
m.c14 = Constraint(expr=-(0.005*m.x4**2 + m.x4) - 50*m.b1 + m.x10 == 0)
m.c15 = Constraint(expr=-(0.005*m.x5**2 + m.x5) - 50*m.b2 + m.x11 == 0)
m.c16 = Constraint(expr=-(0.005*m.x6**2 + m.x6) - 50*m.b3 + m.x12 == 0)
|
[
"peng_zedong@126.com"
] |
peng_zedong@126.com
|
1f026430b482740e673cdd71c0097e75438afb07
|
51353dc8d2dce1c392d3a39da16a1644692de8d5
|
/dictionaryAndSets/join_2.py
|
220edaa509e1e2b38d09d0d40a595016bf99f57f
|
[] |
no_license
|
aiqingr/python-lesson
|
64cf21b9f945e92a79414069f64f84d47aa7ee66
|
21aff567f6340ae74a923a82648e25bdaac554a7
|
refs/heads/master
| 2020-05-23T04:16:06.557785
| 2019-11-12T02:50:51
| 2019-11-12T02:50:51
| 58,403,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,917
|
py
|
# Modify the program so that the exits is a dictionary rather than a list,
# with the keys being the numbers of he locations and the values being
# dictionaries holding the exits (as they do at present). No change should
# be needed to the actual code.
#
# Once that is working, create another dictionary that contains words that
# players may use. These words will be the keys, and their values will be
# a single letter that the program can use to determine which way to go.
locations = {0: "you are sitting in froint of a computer learning python",
1: "You are sitting at the end of a road before a small brick building",
2: "You are at the top of a hill",
3: "You are inside a building, a small house for a small stream",
4: "You are in a valley beside a stream",
5: "You are in the forest"}
exits = {0: {"Q": 0},
1: {"W": 2, "E": 3, "N": 5, "S": 4, "Q": 0},
2: {"N": 5, "Q": 0},
3: {"W": 1, "Q": 0},
4: {"N": 1, "W": 2, "Q": 0},
5: {"W": 2, "S": 1, "Q": 0}}
vocabulary = {"QUIT": "Q",
"NORTH": "N",
"SOUTH": "S",
"EAST": "E",
"WEST": "W"}
# print(locations[0].split())
# print(locations[3].split())
loc = 1
while True:
availableExits = ", ".join(exits[loc].keys())
print(locations[loc])
if loc == 0:
break
direction = input("Available exists are " + availableExits).upper()
print()
# Parse the user input, using our vocabulary dictionary if necessary
if len(direction) > 1: # More than one letter
words = direction.split()
for word in words: # Does it contain a word we know
if word in vocabulary:
direction = vocabulary[word]
if direction in exits[loc]:
loc = exits[loc][direction]
else:
print("You cannot go in that direction")
|
[
"ntyaiqingr@gmail.com"
] |
ntyaiqingr@gmail.com
|
8491e029f5851120fca824478d2bb40ddbbd74a3
|
b2e5677ecd4c2c0bb2d091d3371d2815dd36ffd5
|
/examples/topicmod_lda.py
|
5213c25548fa03bacd15e40f7869f26a2697836a
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
WZBSocialScienceCenter/tmtoolkit
|
f573e3b4db7975106b63a108ed146e43b443c6d6
|
02990865ee896625d5cf540bf2b0dbc159bedf38
|
refs/heads/master
| 2023-05-10T23:26:37.265300
| 2023-05-03T09:35:04
| 2023-05-03T09:35:04
| 109,812,180
| 202
| 33
|
Apache-2.0
| 2022-01-10T12:17:43
| 2017-11-07T09:11:54
|
Python
|
UTF-8
|
Python
| false
| false
| 5,671
|
py
|
"""
An example for topic modeling with LDA with focus on the new plotting functions in `tmtoolkit.corpus.visualize` and
in `tmtoolkit.topicmod.visualize`.
This examples requires that you have installed tmtoolkit with the recommended set of packages plus "lda" and have
installed an English language model for spaCy:
pip install -U "tmtoolkit[recommended,lda]"
python -m tmtoolkit setup en
For more information, see the installation instructions: https://tmtoolkit.readthedocs.io/en/latest/install.html
.. codeauthor:: Markus Konrad <markus.konrad@wzb.eu>
"""
import os.path
import matplotlib.pyplot as plt
from tmtoolkit.utils import enable_logging, pickle_data, unpickle_file
from tmtoolkit.corpus import Corpus, lemmatize, to_lowercase, remove_punctuation, remove_common_tokens, \
remove_uncommon_tokens, filter_clean_tokens, print_summary, remove_documents_by_length, dtm, \
corpus_retokenize, save_corpus_to_picklefile, load_corpus_from_picklefile
from tmtoolkit.corpus.visualize import plot_doc_lengths_hist, plot_doc_frequencies_hist, plot_vocab_counts_hist, \
plot_ranked_vocab_counts, plot_num_sents_hist, plot_sent_lengths_hist, plot_num_sents_vs_sent_length, \
plot_token_lengths_hist
from tmtoolkit.topicmod.tm_lda import evaluate_topic_models # we're using lda for topic modeling
from tmtoolkit.topicmod.evaluate import results_by_parameter
from tmtoolkit.topicmod.model_io import print_ldamodel_topic_words
from tmtoolkit.topicmod.visualize import plot_eval_results, plot_topic_word_ranked_prob, plot_doc_topic_ranked_prob
#%%
enable_logging()
#%% loading the sample corpus (English news articles)
corp_picklefile = 'data/topicmod_lda_corpus.pickle'
if os.path.exists(corp_picklefile):
docs = load_corpus_from_picklefile(corp_picklefile)
else:
docs = Corpus.from_builtin_corpus('en-NewsArticles', max_workers=1.0)
save_corpus_to_picklefile(docs, corp_picklefile)
print_summary(docs)
#%% plot some corpus summary statistics
# you can copy those and also do the plotting also after corpus transformations in the next cell
# this shows you nicely how the transformations change the distribution of words in the corpus
fig, ax = plt.subplots()
plot_doc_lengths_hist(fig, ax, docs)
plt.show()
fig, ax = plt.subplots()
plot_vocab_counts_hist(fig, ax, docs)
plt.show()
fig, ax = plt.subplots()
plot_ranked_vocab_counts(fig, ax, docs, zipf=True)
plt.show()
fig, ax = plt.subplots()
plot_doc_frequencies_hist(fig, ax, docs)
plt.show()
fig, ax = plt.subplots()
plot_num_sents_hist(fig, ax, docs)
plt.show()
fig, ax = plt.subplots()
plot_sent_lengths_hist(fig, ax, docs)
plt.show()
fig, ax = plt.subplots()
plot_num_sents_vs_sent_length(fig, ax, docs)
plt.show()
fig, ax = plt.subplots()
plot_token_lengths_hist(fig, ax, docs)
plt.show()
#%% apply preprocessing pipeline
corp_preproc_picklefile = 'data/topicmod_lda_corpus_preprocessed.pickle'
if os.path.exists(corp_preproc_picklefile):
docs = load_corpus_from_picklefile(corp_preproc_picklefile)
else:
remove_punctuation(docs)
corpus_retokenize(docs)
lemmatize(docs)
to_lowercase(docs)
filter_clean_tokens(docs, remove_numbers=True)
remove_common_tokens(docs, df_threshold=0.90)
remove_uncommon_tokens(docs, df_threshold=0.05)
remove_documents_by_length(docs, '<', 30)
save_corpus_to_picklefile(docs, corp_preproc_picklefile)
print_summary(docs)
#%% generating the document-term matrix
dtm_picklefile = 'data/topicmod_lda_dtm.pickle'
if os.path.exists(dtm_picklefile):
bow_mat, doc_labels, vocab = unpickle_file(dtm_picklefile)
else:
bow_mat, doc_labels, vocab = dtm(docs, return_doc_labels=True, return_vocab=True)
pickle_data((bow_mat, doc_labels, vocab), dtm_picklefile)
#%% running the evaluation
eval_res_picklefile = 'data/topicmod_lda_eval_res.pickle'
if os.path.exists(dtm_picklefile):
eval_results = unpickle_file(eval_res_picklefile)
else:
const_params = {
'n_iter': 1500,
'eta': 0.3,
'random_state': 20220105 # to make results reproducible
}
var_params = [{'n_topics': k, 'alpha': 10.0/k}
for k in list(range(20, 101, 20)) + [125, 150, 175, 200, 250, 300]]
metrics = ['cao_juan_2009', 'arun_2010', 'coherence_mimno_2011', 'griffiths_2004']
eval_results = evaluate_topic_models(bow_mat,
varying_parameters=var_params,
constant_parameters=const_params,
return_models=True,
metric=metrics)
pickle_data(eval_results, eval_res_picklefile)
#%% plotting evaluation results
eval_by_topics = results_by_parameter(eval_results, 'n_topics')
plot_eval_results(eval_by_topics)
plt.show()
#%% selecting the model and printing the topics' most likely words
selected_model = dict(eval_by_topics)[200]['model']
print_ldamodel_topic_words(selected_model.topic_word_, vocab=vocab)
#%% investigating, how many "top words" sufficiently describe a topic
fig, ax = plt.subplots()
plot_topic_word_ranked_prob(fig, ax, selected_model.topic_word_, n=40, log_scale=False,
highlight=[4, 12, 32], alpha=0.025)
plt.show()
# -> about 5 to 10 words aggregate most of the probability per topic
#%% investigating, how many "top topics" sufficiently describe a document
fig, ax = plt.subplots()
plot_doc_topic_ranked_prob(fig, ax, selected_model.doc_topic_, n=40, log_scale=False, highlight=list(range(4)),
alpha=0.003)
plt.show()
# -> about 10 to 15 topics aggregate most of the probability per document
|
[
"markus.konrad@wzb.eu"
] |
markus.konrad@wzb.eu
|
5954ef4325137dc49b30e115d82e1d71e116ab1b
|
353f4a3ff46a3c6fbbb866598b8561edf6e71562
|
/server/dvaapp/migrations/0021_remove_video_query.py
|
7ff21bcf1a3f150060313affe1eddd34c09f0f06
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
MrGoogol/DeepVideoAnalytics
|
ab0f72179c62fd1b0a5bddea4a9b3970678790bd
|
e25e4bf6670fabd62fe86ad68ad03a854e22aed6
|
refs/heads/master
| 2021-05-14T12:25:48.355995
| 2018-01-05T08:59:24
| 2018-01-05T08:59:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-01 17:01
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dvaapp', '0020_retriever_last_built'),
]
operations = [
migrations.RemoveField(
model_name='video',
name='query',
),
]
|
[
"akshayubhat@gmail.com"
] |
akshayubhat@gmail.com
|
458d28e1f991f3dc25217deaa2b0ce53104ef827
|
b0e738d074574af920e63f453d3628f69ce1321f
|
/watch/migrations/0006_auto_20180318_0104.py
|
7f7ea38bf96d90f7be5917e405eec50cd11b4a33
|
[] |
no_license
|
vincentmuya/neighborhood-watch
|
b23d56a9c92cefc6b4da124f337b776a9cc0ada7
|
3744d6bd5f5e63bb6b47f2c34f728e05d7bc2362
|
refs/heads/master
| 2021-04-09T14:20:25.588959
| 2018-03-20T12:04:45
| 2018-03-20T12:04:45
| 125,489,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 675
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-17 22:04
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('watch', '0005_post_title'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='title',
),
migrations.AlterField(
model_name='post',
name='user',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"vincentmuya13@gmail.com"
] |
vincentmuya13@gmail.com
|
b1f778f8944c11ea9d756134cee1e718c3326499
|
f08bee97371d28256f5f669979ea5e8e88192be0
|
/mcm_scripts/get_requests.py
|
e14e9bba40485f312f214a85ce45d67e82c273d4
|
[] |
no_license
|
CMSSNU/GenGen
|
c6cd08aad9f6929860351f8bdde19250480d59e0
|
6cec337a19748468cc920412d9cfbfc56ce61b55
|
refs/heads/master
| 2020-04-09T17:13:09.850706
| 2019-09-20T06:57:06
| 2019-09-20T06:57:06
| 160,474,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,250
|
py
|
import sys
sys.path.append('/afs/cern.ch/cms/PPD/PdmV/tools/McM/')
from rest import McM
from json import dumps
mcm = McM(dev=False)
#mcm = McM(dev=True)
# Example to get ALL requesst which are member of a given campaign and are submitted
# It uses a generic search for specified columns: query='status=submitted'
# Queries can be combined: query='status=submitted&member_of_campaign=Summer12'
#campaign_requests = mcm.get('requests', query='member_of_campaign=Summer12&status=submitted')
#for request in campaign_requests:
# print(request['prepid'])
# Example to retrieve single request dictionary
# More methods are here:
# https://cms-pdmv.cern.ch/mcm/restapi/requests/
#single_request_prepid = 'TOP-Summer12-00368'
#single_request = mcm.get('requests', single_request_prepid, method='get')
#print('Single request "%s":\n%s' % (single_request_prepid, dumps(single_request, indent=4)))
# Example how to get multiple requests using range
requests_query = """
HIG-RunIIFall17wmLHEGS-02442 -> HIG-RunIIFall17wmLHEGS-02477
"""
range_of_requests = mcm.get_range_of_requests(requests_query)
print('Found %s requests' % (len(range_of_requests)))
for request in range_of_requests:
print(request['prepid'])
print(request['time_event'])
|
[
"d4space@gmail.com"
] |
d4space@gmail.com
|
6c63500bf53bf58a8c653ea3488de0da13613156
|
5a01497e7c29e2488b6a4cb0478405239375eb66
|
/apetools/pipes/pipeenums.py
|
7c0eb61e95b8a049b278cef0d146e8154fd7cb22
|
[
"Apache-2.0"
] |
permissive
|
russell-n/oldape
|
8b4d9e996181dc1c7175f72d75c6193443da591b
|
b4d1c77e1d611fe2b30768b42bdc7493afb0ea95
|
refs/heads/master
| 2021-05-30T20:02:18.895922
| 2016-03-27T04:38:18
| 2016-03-27T04:38:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
class PipeEnum(object):
"""
A holder of constants for the StoragePipe
"""
__slots__ = ()
start = "start"
pipe = "pipe"
sink = "sink"
# end class StoragePipeEnum
|
[
"necromuralist@google.com"
] |
necromuralist@google.com
|
27b9463ca0f70caf4d98d28f2dfba0b380af9223
|
dcee93ce4b9fcf0a7ffa6ea658c403ed1fc84043
|
/Meteor/src/thirdManage/migrations/0003_auto_20170807_1623.py
|
e0d6ee7dab4b082d10e23368f50d65c656128424
|
[] |
no_license
|
henryliuom/drv-study
|
3eed96eef58138003371011034562a15ebc16b79
|
dcab011bce0f34bcf50f8ab5601eb859a5a07cb7
|
refs/heads/master
| 2021-06-06T23:49:20.869907
| 2020-07-30T09:06:48
| 2020-07-30T09:06:48
| 95,858,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-07 08:23
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('thirdManage', '0002_auto_20170807_1622'),
]
operations = [
migrations.AlterModelTable(
name='paydnses',
table='classmate_paydnses',
),
]
|
[
"henry@techdog.com"
] |
henry@techdog.com
|
5de5f713afc9e6239000c77a67d43371491966bf
|
839f9c64c0c2c4178f2a8a0166fa4a0a3f649aac
|
/tilepack/check_toi.py
|
46b3abf0ccdabc0f276edefb5d094268a0122071
|
[
"MIT"
] |
permissive
|
anilkunchalaece/tilepacks
|
1ab70a0f8ba7886e5b29fbcb761072b9d4e61b0e
|
86008e85826e5b1f97ef22ba92152b40353557a7
|
refs/heads/master
| 2020-06-04T07:44:09.179732
| 2019-06-14T12:30:03
| 2019-06-14T12:30:03
| 191,929,548
| 0
| 0
|
MIT
| 2019-06-14T11:09:34
| 2019-06-14T11:09:34
| null |
UTF-8
|
Python
| false
| false
| 1,512
|
py
|
import mercantile
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('min_lon',
type=float,
help='Bounding box minimum longitude/left')
parser.add_argument('min_lat',
type=float,
help='Bounding box minimum latitude/bottom')
parser.add_argument('max_lon',
type=float,
help='Bounding box maximum longitude/right')
parser.add_argument('max_lat',
type=float,
help='Bounding box maximum latitude/top')
parser.add_argument('min_zoom',
type=int,
help='The minimum zoom level to include')
parser.add_argument('max_zoom',
type=int,
help='The maximum zoom level to include')
args = parser.parse_args()
print("zoom\tmissing from toi\tin aoi")
for zoom in range(args.min_zoom, args.max_zoom + 1):
tiles_in_aoi = set([
'{}/{}/{}'.format(z, x, y)
for x, y, z in mercantile.tiles(
args.min_lon, args.min_lat, args.max_lon, args.max_lat,
[zoom]
)
])
with open('toi.z{}.txt'.format(zoom), 'r') as f:
tiles_in_toi = set([
l.strip()
for l in f.readlines()
])
print("{zoom:2d}\t{tiles_not_in_toi}\t{tiles_in_aoi}".format(
zoom=zoom,
tiles_not_in_toi=len(tiles_in_aoi - tiles_in_toi),
tiles_in_aoi=len(tiles_in_aoi),
))
if __name__ == '__main__':
main()
|
[
"ian.dees@gmail.com"
] |
ian.dees@gmail.com
|
24449c28191a51d1f60c35022f2c37ed430095fa
|
468e75c8b64b137621bcfb523f741b2cb791bf36
|
/GUI/component/navigation_bar.py
|
74855fdcd51b5dc1b3431d4efab8e5744b713ade
|
[
"MIT"
] |
permissive
|
acc-cosc-1336/cosc-1336-fall-2017-guilgrahl
|
dd0055b68c6c56041be2b33e56b13bd3a6a9cdc1
|
c1b6a8abf779db9e5242e84c176bb5e8fb1d97a6
|
refs/heads/master
| 2021-08-28T19:03:37.364375
| 2017-12-06T03:25:24
| 2017-12-06T03:25:24
| 103,597,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,129
|
py
|
from tkinter import LEFT
from tkinter.ttk import Frame, Button
class NavigationBar:
def __init__(self, parent, data_source):
Frame.__init__(self, parent)
self.data_source = data_source
self.init_form()
def init_form(self):
nextButton = Button(self, text="Next ", command=self.on_next)
updateButton = Button(self, text="Update ", command=self.on_update)
deleteButton = Button(self, text="Delete ", command=self.on_delete)
previousButton = Button(self, text="Previous ", command=self.on_previous)
searchButton = Button(self, text="Search ", command=self.master.on_search)
nextButton.pack(side=LEFT)
previousButton.pack(side=LEFT)
updateButton.pack(side=LEFT)
deleteButton.pack(side=LEFT)
searchButton.pack(side=LEFT)
def on_next(self):
self.data_source.next_record()
def on_update(self):
self.data_source.request_update()
def on_delete(self):
pass
def on_previous(self):
self.data_source.previous_record()
|
[
"noreply@github.com"
] |
acc-cosc-1336.noreply@github.com
|
9dc398861c6fc13e1e5d99fcf69fce298f153848
|
2670452749c6299386a33391f9fb5014db0203ec
|
/meraki/aio/api/mg_uplink_settings.py
|
313b63a05262ac239c8965b44326f42618827427
|
[
"MIT"
] |
permissive
|
npappin-wsu/dashboard-api-python
|
f9d3fc682b517e6bac437cd54101afd09b653274
|
5aedfc740f676fbf34e5f79269e8ece73421e3da
|
refs/heads/master
| 2020-06-28T17:49:44.911294
| 2020-04-14T04:27:38
| 2020-04-14T04:27:38
| 255,509,439
| 0
| 0
|
MIT
| 2020-04-14T04:24:55
| 2020-04-14T04:24:54
| null |
UTF-8
|
Python
| false
| false
| 1,524
|
py
|
class AsyncMGUplinkSettings:
def __init__(self, session):
super().__init__()
self._session = session
async def getNetworkCellularGatewaySettingsUplink(self, networkId: str):
"""
**Returns the uplink settings for your MG network.**
https://api.meraki.com/api_docs#returns-the-uplink-settings-for-your-mg-network
- networkId (string)
"""
metadata = {
'tags': ['MG uplink settings'],
'operation': 'getNetworkCellularGatewaySettingsUplink',
}
resource = f'/networks/{networkId}/cellularGateway/settings/uplink'
return await self._session.get(metadata, resource)
async def updateNetworkCellularGatewaySettingsUplink(self, networkId: str, **kwargs):
"""
**Updates the uplink settings for your MG network.**
https://api.meraki.com/api_docs#updates-the-uplink-settings-for-your-mg-network
- networkId (string)
- bandwidthLimits (object): The bandwidth settings for the 'cellular' uplink
"""
kwargs.update(locals())
metadata = {
'tags': ['MG uplink settings'],
'operation': 'updateNetworkCellularGatewaySettingsUplink',
}
resource = f'/networks/{networkId}/cellularGateway/settings/uplink'
body_params = ['bandwidthLimits']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return await self._session.put(metadata, resource, payload)
|
[
"shiychen@cisco.com"
] |
shiychen@cisco.com
|
90cd41d7f119122d566e22cb587ce4a3c2472432
|
3cd4902b67de144d8e6f36335e125d0548d8cf97
|
/src/data/Places365Data.py
|
7f17280172b9488ad0942cad3e68ec562241adf9
|
[
"MIT"
] |
permissive
|
stefantaubert/imageclef-lifelog-2019
|
5d201c2a28f15f608b9b58b94ab2ecddb5201205
|
ad49dc79db98a163c5bc282fb179c0f7730546b3
|
refs/heads/master
| 2022-10-06T12:42:30.011610
| 2022-08-29T13:35:09
| 2022-08-29T13:35:09
| 196,553,184
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 749
|
py
|
from src.io.ReadingContext import ReadingContext
from src.data.DataBase import DataBase
from src.globals import vc_cat_cols
from src.globals import vc_cat_score_cols
name_places = "places365"
class Places365Data(DataBase):
def __init__(self, ctx: ReadingContext):
return super().__init__(ctx)
def get_name(self):
return name_places
def __unify__(self, word):
cat = word.replace('_', ' ').replace('/', ' ')
#cat = cat.replace("outdoor", "").replace("indoor", "").strip()
return cat
def __get_data_dict__(self):
return self.ctx.vc_dict()
def __get_label_columns__(self):
return vc_cat_cols
def __get_score_columns__(self):
return vc_cat_score_cols
|
[
"stefan.taubert@posteo.de"
] |
stefan.taubert@posteo.de
|
d147631fbb07f6f190098326828eba033083da6b
|
7c12b6487874b3db564e5c900be55fd9e050f765
|
/hilder_deal_price/start_youda.py
|
6f3fcbd388bcabfa3c0c14dbf6a235f36f6dbcff
|
[] |
no_license
|
pjkui/githubproject
|
9f1ea63bb12903d8a72e0ecb4aa6c8c02a7a45f8
|
808cb78fc3887f35bf838d77d62308fce9e6aa5d
|
refs/heads/master
| 2022-10-06T00:11:34.493886
| 2019-10-07T13:09:18
| 2019-10-07T13:09:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 605
|
py
|
from youda_res.youda import Record
from dbfread import DBF
from youda_res.youda_match_insert import YouData
if __name__ == '__main__':
# """
# 需要把cjxx_3.DBF文件放到相同路径下
# """
# table = DBF('cjxx_3.DBF', recfactory=Record, ignore_missing_memofile=True)
# for record in table:
# record.insert()
youda = YouData('友达')
# """
# 地址去除室号,小区名切分成list
# """
# youda.format()
# """
# 匹配城市区域小区名
# """
# youda.match()
# """
# 入43成交库
# """
youda.insert_43()
|
[
"1735429225@qq.com"
] |
1735429225@qq.com
|
1b94706b73f45a94cc41deb5aa795a074bcafd09
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/127/usersdata/172/35097/submittedfiles/ex11.py
|
ecaecb512a8ce7a4951ad5382e645ab5e88c079d
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
# -*- coding: utf-8 -*-
d1=int(input('Digite o dia da data 1'))
m1=int(input('Digite o mês da data 1'))
a1=int(input('Digite o ano da data 1'))
d2=int(input('Digite o dia da data 2'))
m2=int(input('Digite o mês da data 2'))
a2=int(input('Digite o ano da data 2'))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
80aae07c6ab4e34782a351c4c412129086bfe652
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/labservices/azure-mgmt-labservices/azure/mgmt/labservices/models/environment_setting_fragment_py3.py
|
c7d3c076bda1a16b402b7d682886230622ce5461
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507
| 2020-09-04T15:48:52
| 2020-09-04T15:48:52
| 205,457,088
| 1
| 2
|
MIT
| 2020-06-16T16:38:15
| 2019-08-30T21:08:55
|
Python
|
UTF-8
|
Python
| false
| false
| 3,465
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource_py3 import Resource
class EnvironmentSettingFragment(Resource):
"""Represents settings of an environment, from which environment instances
would be created.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The identifier of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param location: The location of the resource.
:type location: str
:param tags: The tags of the resource.
:type tags: dict[str, str]
:param configuration_state: Describes the user's progress in configuring
their environment setting. Possible values include: 'NotApplicable',
'Completed'
:type configuration_state: str or
~azure.mgmt.labservices.models.ConfigurationState
:param description: Describes the environment and its resource settings
:type description: str
:param title: Brief title describing the environment and its resource
settings
:type title: str
:param resource_settings: The resource specific settings
:type resource_settings:
~azure.mgmt.labservices.models.ResourceSettingsFragment
:param provisioning_state: The provisioning status of the resource.
:type provisioning_state: str
:param unique_identifier: The unique immutable identifier of a resource
(Guid).
:type unique_identifier: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'configuration_state': {'key': 'properties.configurationState', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'title': {'key': 'properties.title', 'type': 'str'},
'resource_settings': {'key': 'properties.resourceSettings', 'type': 'ResourceSettingsFragment'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'unique_identifier': {'key': 'properties.uniqueIdentifier', 'type': 'str'},
}
def __init__(self, *, location: str=None, tags=None, configuration_state=None, description: str=None, title: str=None, resource_settings=None, provisioning_state: str=None, unique_identifier: str=None, **kwargs) -> None:
super(EnvironmentSettingFragment, self).__init__(location=location, tags=tags, **kwargs)
self.configuration_state = configuration_state
self.description = description
self.title = title
self.resource_settings = resource_settings
self.provisioning_state = provisioning_state
self.unique_identifier = unique_identifier
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
c15dbfada0d946792fde27d9be3ce0e0f63b7e15
|
365913bcc02bfdf6b6f6c246855144663f7e052b
|
/rdkit/ML/Descriptors/UnitTestDescriptors.py
|
017f4b2d6f0f9e8f9534429c526d03e0308d7d58
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
UnixJunkie/rdkit
|
d8458eadca78ba1714be5c55ba75c8e164fc1479
|
3ddb54aeef0666aeaa2200d2137884ec05cb6451
|
refs/heads/master
| 2021-06-01T22:26:53.201525
| 2017-08-15T17:00:30
| 2017-08-15T17:00:30
| 100,572,461
| 2
| 0
|
NOASSERTION
| 2019-05-29T00:58:25
| 2017-08-17T07:03:53
|
C++
|
UTF-8
|
Python
| false
| false
| 1,659
|
py
|
#
# Copyright (C) 2001,2002 greg Landrum and Rational Discovery LLC
#
""" unit testing code for descriptors
"""
import unittest
from rdkit.ML.Descriptors import CompoundDescriptors
from rdkit.TestRunner import redirect_stdout
from rdkit.six import StringIO
class TestCase(unittest.TestCase):
def setUp(self):
d = [('DED', ['NonZero', 'Mean', 'Dev']), ('M_B_electroneg', ['NonZero']),
('Cov_rad', ['Max', 'Min'])]
self.desc = CompoundDescriptors.CompoundDescriptorCalculator(d)
self.desc.BuildAtomDict()
self.tol = 0.0001
def testAtomDict(self):
# " testing the atom dict "
assert len(self.desc.atomDict.keys()) == 48, 'BuildAtomDict failed'
def testSimpleDescriptorCalc(self):
# " testing simple descriptor calculation "
composList = ['Nb', 'Nb3', 'NbPt', 'Nb2Pt']
compare = [[2.32224798203, 0.0, 1.34000003338, 1.34000003338],
[2.32224798203, 0.0, 1.34000003338, 1.34000003338],
[1.51555249095, 0.806695491076, 1.34000003338, 1.29999995232],
[1.78445098797, 0.717062658734, 1.34000003338, 1.29999995232]]
for i in range(len(composList)):
self.assertTrue(
max(
map(lambda x, y: abs(x - y), compare[i], self.desc.CalcSimpleDescriptorsForComposition(
composList[i]))) < self.tol, 'Descriptor calculation failed')
names = self.desc.GetDescriptorNames()
self.assertEqual(len(names), 4)
self.assertIn('MEAN_DED', names)
def test_exampleCode(self):
f = StringIO()
with redirect_stdout(f):
CompoundDescriptors._exampleCode()
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
[
"greg.landrum@gmail.com"
] |
greg.landrum@gmail.com
|
02870c37ceb356dfab187936f715d4a9e2a2bda0
|
75259be56c9b895970448a9e275405518cadf324
|
/src/cargos/sugar_beet.py
|
2b448e34f99c70c848987ffd39a9de6b2b283727
|
[] |
no_license
|
Azusa257/firs
|
5df946dea785515ef5303fd5ae7219bb222b9bb1
|
e824040c168c2863420d558bac64f8f01efc3e17
|
refs/heads/master
| 2023-06-10T09:36:48.358213
| 2021-06-06T20:34:16
| 2021-06-06T20:34:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 651
|
py
|
from cargo import Cargo
cargo = Cargo(
id="sugar_beet",
type_name="string(STR_CARGO_NAME_SUGAR_BEET)",
unit_name="string(STR_CARGO_NAME_SUGAR_BEET)",
type_abbreviation="string(STR_CID_SUGAR_BEET)",
sprite="NEW_CARGO_SPRITE",
weight="1.0",
is_freight="1",
cargo_classes="bitmask(CC_BULK)",
cargo_label="SGBT",
town_growth_effect="TOWNGROWTH_NONE",
town_growth_multiplier="1.0",
units_of_cargo="TTD_STR_TONS",
items_of_cargo="string(STR_CARGO_UNIT_SUGAR_BEET)",
penalty_lowerbound="5",
single_penalty_length="30",
price_factor=99,
capacity_multiplier="1",
icon_indices=(14, 1),
)
|
[
"andy@teamrubber.com"
] |
andy@teamrubber.com
|
9c026afa2692b1cfc3164dd2babc3391c3cf8218
|
4c0e871eb19d6ca5b8c550b60c4e3aa628ec729e
|
/Python记录/LeetCode/2连续字符串.py
|
264a62db73f8021d2504a5ac8693311de18f56c3
|
[] |
no_license
|
west789/Document
|
d4e7df6ff79046bf2c66ea082582feb2e7b8c29c
|
9b4de781bd2a7ecc15342098366f0123e83f6191
|
refs/heads/master
| 2021-05-26T07:31:52.444264
| 2019-05-20T07:20:07
| 2019-05-20T07:20:07
| 127,928,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
import copy
def doMain(s):
"""
:type s: str
:rtype: int
"""
# 解法1
# strDict = {}
# currentMax = 0
# maxLength = 0
# for i in range(len(s)):
# if s[i] in strDict and i-strDict[s[i]]-1<=currentMax:
# if maxLength < currentMax:
# maxLength = currentMax
# currentMax = i - strDict[s[i]] - 1
# currentMax += 1
# print(maxLength)
# print (currentMax)
# strDict[s[i]] = i
# print (strDict)
# print (maxLength if maxLength > currentMax else currentMax)
# return (maxLength if maxLength > currentMax else currentMax)
# 解法2
if not s:
return 0
longest_str = 1
substr=''
for item in s:
if item not in substr:
substr += item
else:
if len(substr) > longest_str:
longest_str = len(substr)
substr += item
substr = substr[substr.index(item)+1:]
print (substr)
if len(substr) > longest_str:
longest_str = len(substr)
return longest_str
if __name__ == '__main__':
s='pwdfwke'
doMain(s)
|
[
"738758058@qq.com"
] |
738758058@qq.com
|
f6d813cfeac9e1006402839f02d6ac002649ebc2
|
195b8d12796872c05d539aa9283fc3f407b8d8b5
|
/tempest/tempest/common/ssh.py
|
be6fe273f439ff41d0279ecd115bd590412c5f0a
|
[
"Apache-2.0"
] |
permissive
|
rvbelapure/openstack-nova-sched
|
afaa5928da3a8430b64bc23aedb251bae0e7d3ef
|
325da0e08979d79b7470d7506ced1b4210e2b696
|
refs/heads/master
| 2021-01-17T05:28:44.474242
| 2013-04-20T21:18:35
| 2013-04-20T21:18:35
| 9,082,500
| 0
| 1
| null | 2021-09-07T08:33:18
| 2013-03-28T17:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 5,444
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cStringIO import StringIO
import select
import socket
import time
import warnings
from tempest import exceptions
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import paramiko
from paramiko import RSAKey
class Client(object):
def __init__(self, host, username, password=None, timeout=300, pkey=None,
channel_timeout=10, look_for_keys=False, key_filename=None):
self.host = host
self.username = username
self.password = password
if isinstance(pkey, basestring):
pkey = RSAKey.from_private_key(StringIO(str(pkey)))
self.pkey = pkey
self.look_for_keys = look_for_keys
self.key_filename = key_filename
self.timeout = int(timeout)
self.channel_timeout = float(channel_timeout)
self.buf_size = 1024
def _get_ssh_connection(self):
"""Returns an ssh connection to the specified host."""
_timeout = True
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
_start_time = time.time()
while not self._is_timed_out(self.timeout, _start_time):
try:
ssh.connect(self.host, username=self.username,
password=self.password,
look_for_keys=self.look_for_keys,
key_filename=self.key_filename,
timeout=self.timeout, pkey=self.pkey)
_timeout = False
break
except socket.error:
continue
except paramiko.AuthenticationException:
time.sleep(5)
continue
if _timeout:
raise exceptions.SSHTimeout(host=self.host,
user=self.username,
password=self.password)
return ssh
def _is_timed_out(self, timeout, start_time):
return (time.time() - timeout) > start_time
def connect_until_closed(self):
"""Connect to the server and wait until connection is lost."""
try:
ssh = self._get_ssh_connection()
_transport = ssh.get_transport()
_start_time = time.time()
_timed_out = self._is_timed_out(self.timeout, _start_time)
while _transport.is_active() and not _timed_out:
time.sleep(5)
_timed_out = self._is_timed_out(self.timeout, _start_time)
ssh.close()
except (EOFError, paramiko.AuthenticationException, socket.error):
return
def exec_command(self, cmd):
"""
Execute the specified command on the server.
Note that this method is reading whole command outputs to memory, thus
shouldn't be used for large outputs.
:returns: data read from standard output of the command.
:raises: SSHExecCommandFailed if command returns nonzero
status. The exception contains command status stderr content.
"""
ssh = self._get_ssh_connection()
transport = ssh.get_transport()
channel = transport.open_session()
channel.fileno() # Register event pipe
channel.exec_command(cmd)
channel.shutdown_write()
out_data = []
err_data = []
select_params = [channel], [], [], self.channel_timeout
while True:
ready = select.select(*select_params)
if not any(ready):
raise exceptions.TimeoutException(
"Command: '{0}' executed on host '{1}'.".format(
cmd, self.host))
if not ready[0]: # If there is nothing to read.
continue
out_chunk = err_chunk = None
if channel.recv_ready():
out_chunk = channel.recv(self.buf_size)
out_data += out_chunk,
if channel.recv_stderr_ready():
err_chunk = channel.recv_stderr(self.buf_size)
err_data += err_chunk,
if channel.closed and not err_chunk and not out_chunk:
break
exit_status = channel.recv_exit_status()
if 0 != exit_status:
raise exceptions.SSHExecCommandFailed(
command=cmd, exit_status=exit_status,
strerror=''.join(err_data))
return ''.join(out_data)
def test_connection_auth(self):
"""Returns true if ssh can connect to server."""
try:
connection = self._get_ssh_connection()
connection.close()
except paramiko.AuthenticationException:
return False
return True
|
[
"owlpostarun@gmail.com"
] |
owlpostarun@gmail.com
|
af19c6f060d2ce1e3cf00d13a0513654d72a815a
|
209a7a4023a9a79693ec1f6e8045646496d1ea71
|
/COMP0016_2020_21_Team12-datasetsExperimentsAna/pwa/FADapp/pythonScripts/venv/Lib/site-packages/pandas/io/formats/format.py
|
e33c6cd29b92e132375d5db2762e0caef339d541
|
[
"MIT"
] |
permissive
|
anzhao920/MicrosoftProject15_Invictus
|
5e2347015411bbffbdf0ceb059df854661fb240c
|
15f44eebb09561acbbe7b6730dfadf141e4c166d
|
refs/heads/main
| 2023-04-16T13:24:39.332492
| 2021-04-27T00:47:13
| 2021-04-27T00:47:13
| 361,913,170
| 0
| 0
|
MIT
| 2021-04-26T22:41:56
| 2021-04-26T22:41:55
| null |
UTF-8
|
Python
| false
| false
| 67,942
|
py
|
"""
Internal module for formatting output data in csv, html,
and latex files. This module also applies to display formatting.
"""
from contextlib import contextmanager
from csv import QUOTE_NONE, QUOTE_NONNUMERIC
import decimal
from functools import partial
from io import StringIO
import math
import re
from shutil import get_terminal_size
from typing import (
IO,
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from unicodedata import east_asian_width
import numpy as np
from pandas._config.config import get_option, set_option
from pandas._libs import lib
from pandas._libs.missing import NA
from pandas._libs.tslibs import NaT, Timedelta, Timestamp, iNaT
from pandas._libs.tslibs.nattype import NaTType
from pandas._typing import (
ArrayLike,
CompressionOptions,
FilePathOrBuffer,
FloatFormatType,
IndexLabel,
Label,
StorageOptions,
)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_numeric_dtype,
is_scalar,
is_timedelta64_dtype,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.arrays.datetimes import DatetimeArray
from pandas.core.arrays.timedeltas import TimedeltaArray
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.construction import extract_array
from pandas.core.indexes.api import Index, MultiIndex, PeriodIndex, ensure_index
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.reshape.concat import concat
from pandas.io.common import stringify_path
from pandas.io.formats.printing import adjoin, justify, pprint_thing
if TYPE_CHECKING:
from pandas import Categorical, DataFrame, Series
FormattersType = Union[
List[Callable], Tuple[Callable, ...], Mapping[Union[str, int], Callable]
]
ColspaceType = Mapping[Label, Union[str, int]]
ColspaceArgType = Union[
str, int, Sequence[Union[str, int]], Mapping[Label, Union[str, int]]
]
common_docstring = """
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : %(col_space_type)s, optional
%(col_space)s.
header : %(header_type)s, optional
%(header)s.
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of ``NaN`` to use.
formatters : list, tuple or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List/tuple must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. This function must return a unicode string and will be
applied only to the non-``NaN`` elements, with ``NaN`` being
handled by ``na_rep``.
.. versionchanged:: 1.2.0
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
min_rows : int, optional
The number of rows to display in the console in a truncated repr
(when number of rows is above `max_rows`).
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
"""
_VALID_JUSTIFY_PARAMETERS = (
"left",
"right",
"center",
"justify",
"justify-all",
"start",
"end",
"inherit",
"match-parent",
"initial",
"unset",
)
return_docstring = """
Returns
-------
str or None
If buf is None, returns the result as a string. Otherwise returns
None.
"""
class CategoricalFormatter:
def __init__(
self,
categorical: "Categorical",
buf: Optional[IO[str]] = None,
length: bool = True,
na_rep: str = "NaN",
footer: bool = True,
):
self.categorical = categorical
self.buf = buf if buf is not None else StringIO("")
self.na_rep = na_rep
self.length = length
self.footer = footer
self.quoting = QUOTE_NONNUMERIC
def _get_footer(self) -> str:
footer = ""
if self.length:
if footer:
footer += ", "
footer += f"Length: {len(self.categorical)}"
level_info = self.categorical._repr_categories_info()
# Levels are added in a newline
if footer:
footer += "\n"
footer += level_info
return str(footer)
def _get_formatted_values(self) -> List[str]:
return format_array(
self.categorical._internal_get_values(),
None,
float_format=None,
na_rep=self.na_rep,
quoting=self.quoting,
)
def to_string(self) -> str:
categorical = self.categorical
if len(categorical) == 0:
if self.footer:
return self._get_footer()
else:
return ""
fmt_values = self._get_formatted_values()
fmt_values = [i.strip() for i in fmt_values]
values = ", ".join(fmt_values)
result = ["[" + values + "]"]
if self.footer:
footer = self._get_footer()
if footer:
result.append(footer)
return str("\n".join(result))
class SeriesFormatter:
def __init__(
self,
series: "Series",
buf: Optional[IO[str]] = None,
length: Union[bool, str] = True,
header: bool = True,
index: bool = True,
na_rep: str = "NaN",
name: bool = False,
float_format: Optional[str] = None,
dtype: bool = True,
max_rows: Optional[int] = None,
min_rows: Optional[int] = None,
):
self.series = series
self.buf = buf if buf is not None else StringIO()
self.name = name
self.na_rep = na_rep
self.header = header
self.length = length
self.index = index
self.max_rows = max_rows
self.min_rows = min_rows
if float_format is None:
float_format = get_option("display.float_format")
self.float_format = float_format
self.dtype = dtype
self.adj = get_adjustment()
self._chk_truncate()
def _chk_truncate(self) -> None:
self.tr_row_num: Optional[int]
min_rows = self.min_rows
max_rows = self.max_rows
# truncation determined by max_rows, actual truncated number of rows
# used below by min_rows
is_truncated_vertically = max_rows and (len(self.series) > max_rows)
series = self.series
if is_truncated_vertically:
max_rows = cast(int, max_rows)
if min_rows:
# if min_rows is set (not None or 0), set max_rows to minimum
# of both
max_rows = min(min_rows, max_rows)
if max_rows == 1:
row_num = max_rows
series = series.iloc[:max_rows]
else:
row_num = max_rows // 2
series = concat((series.iloc[:row_num], series.iloc[-row_num:]))
self.tr_row_num = row_num
else:
self.tr_row_num = None
self.tr_series = series
self.is_truncated_vertically = is_truncated_vertically
def _get_footer(self) -> str:
name = self.series.name
footer = ""
if getattr(self.series.index, "freq", None) is not None:
assert isinstance(
self.series.index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)
)
footer += f"Freq: {self.series.index.freqstr}"
if self.name is not False and name is not None:
if footer:
footer += ", "
series_name = pprint_thing(name, escape_chars=("\t", "\r", "\n"))
footer += f"Name: {series_name}"
if self.length is True or (
self.length == "truncate" and self.is_truncated_vertically
):
if footer:
footer += ", "
footer += f"Length: {len(self.series)}"
if self.dtype is not False and self.dtype is not None:
dtype_name = getattr(self.tr_series.dtype, "name", None)
if dtype_name:
if footer:
footer += ", "
footer += f"dtype: {pprint_thing(dtype_name)}"
# level infos are added to the end and in a new line, like it is done
# for Categoricals
if is_categorical_dtype(self.tr_series.dtype):
level_info = self.tr_series._values._repr_categories_info()
if footer:
footer += "\n"
footer += level_info
return str(footer)
def _get_formatted_index(self) -> Tuple[List[str], bool]:
index = self.tr_series.index
if isinstance(index, MultiIndex):
have_header = any(name for name in index.names)
fmt_index = index.format(names=True)
else:
have_header = index.name is not None
fmt_index = index.format(name=True)
return fmt_index, have_header
def _get_formatted_values(self) -> List[str]:
return format_array(
self.tr_series._values,
None,
float_format=self.float_format,
na_rep=self.na_rep,
leading_space=self.index,
)
def to_string(self) -> str:
series = self.tr_series
footer = self._get_footer()
if len(series) == 0:
return f"{type(self.series).__name__}([], {footer})"
fmt_index, have_header = self._get_formatted_index()
fmt_values = self._get_formatted_values()
if self.is_truncated_vertically:
n_header_rows = 0
row_num = self.tr_row_num
row_num = cast(int, row_num)
width = self.adj.len(fmt_values[row_num - 1])
if width > 3:
dot_str = "..."
else:
dot_str = ".."
# Series uses mode=center because it has single value columns
# DataFrame uses mode=left
dot_str = self.adj.justify([dot_str], width, mode="center")[0]
fmt_values.insert(row_num + n_header_rows, dot_str)
fmt_index.insert(row_num + 1, "")
if self.index:
result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values])
else:
result = self.adj.adjoin(3, fmt_values)
if self.header and have_header:
result = fmt_index[0] + "\n" + result
if footer:
result += "\n" + footer
return str("".join(result))
class TextAdjustment:
def __init__(self):
self.encoding = get_option("display.encoding")
def len(self, text: str) -> int:
return len(text)
def justify(self, texts: Any, max_len: int, mode: str = "right") -> List[str]:
return justify(texts, max_len, mode=mode)
def adjoin(self, space: int, *lists, **kwargs) -> str:
return adjoin(space, *lists, strlen=self.len, justfunc=self.justify, **kwargs)
class EastAsianTextAdjustment(TextAdjustment):
def __init__(self):
super().__init__()
if get_option("display.unicode.ambiguous_as_wide"):
self.ambiguous_width = 2
else:
self.ambiguous_width = 1
# Definition of East Asian Width
# https://unicode.org/reports/tr11/
# Ambiguous width can be changed by option
self._EAW_MAP = {"Na": 1, "N": 1, "W": 2, "F": 2, "H": 1}
def len(self, text: str) -> int:
"""
Calculate display width considering unicode East Asian Width
"""
if not isinstance(text, str):
return len(text)
return sum(
self._EAW_MAP.get(east_asian_width(c), self.ambiguous_width) for c in text
)
def justify(
self, texts: Iterable[str], max_len: int, mode: str = "right"
) -> List[str]:
# re-calculate padding space per str considering East Asian Width
def _get_pad(t):
return max_len - self.len(t) + len(t)
if mode == "left":
return [x.ljust(_get_pad(x)) for x in texts]
elif mode == "center":
return [x.center(_get_pad(x)) for x in texts]
else:
return [x.rjust(_get_pad(x)) for x in texts]
def get_adjustment() -> TextAdjustment:
use_east_asian_width = get_option("display.unicode.east_asian_width")
if use_east_asian_width:
return EastAsianTextAdjustment()
else:
return TextAdjustment()
class DataFrameFormatter:
"""Class for processing dataframe formatting options and data."""
__doc__ = __doc__ if __doc__ else ""
__doc__ += common_docstring + return_docstring
def __init__(
self,
frame: "DataFrame",
columns: Optional[Sequence[str]] = None,
col_space: Optional[ColspaceArgType] = None,
header: Union[bool, Sequence[str]] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[FormattersType] = None,
justify: Optional[str] = None,
float_format: Optional[FloatFormatType] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
max_rows: Optional[int] = None,
min_rows: Optional[int] = None,
max_cols: Optional[int] = None,
show_dimensions: Union[bool, str] = False,
decimal: str = ".",
bold_rows: bool = False,
escape: bool = True,
):
self.frame = frame
self.columns = self._initialize_columns(columns)
self.col_space = self._initialize_colspace(col_space)
self.header = header
self.index = index
self.na_rep = na_rep
self.formatters = self._initialize_formatters(formatters)
self.justify = self._initialize_justify(justify)
self.float_format = float_format
self.sparsify = self._initialize_sparsify(sparsify)
self.show_index_names = index_names
self.decimal = decimal
self.bold_rows = bold_rows
self.escape = escape
self.max_rows = max_rows
self.min_rows = min_rows
self.max_cols = max_cols
self.show_dimensions = show_dimensions
self.max_cols_fitted = self._calc_max_cols_fitted()
self.max_rows_fitted = self._calc_max_rows_fitted()
self.tr_frame = self.frame
self.truncate()
self.adj = get_adjustment()
def get_strcols(self) -> List[List[str]]:
"""
Render a DataFrame to a list of columns (as lists of strings).
"""
strcols = self._get_strcols_without_index()
if self.index:
str_index = self._get_formatted_index(self.tr_frame)
strcols.insert(0, str_index)
return strcols
@property
def should_show_dimensions(self) -> bool:
return self.show_dimensions is True or (
self.show_dimensions == "truncate" and self.is_truncated
)
@property
def is_truncated(self) -> bool:
return bool(self.is_truncated_horizontally or self.is_truncated_vertically)
@property
def is_truncated_horizontally(self) -> bool:
return bool(self.max_cols_fitted and (len(self.columns) > self.max_cols_fitted))
@property
def is_truncated_vertically(self) -> bool:
return bool(self.max_rows_fitted and (len(self.frame) > self.max_rows_fitted))
@property
def dimensions_info(self) -> str:
return f"\n\n[{len(self.frame)} rows x {len(self.frame.columns)} columns]"
@property
def has_index_names(self) -> bool:
return _has_names(self.frame.index)
@property
def has_column_names(self) -> bool:
return _has_names(self.frame.columns)
@property
def show_row_idx_names(self) -> bool:
return all((self.has_index_names, self.index, self.show_index_names))
@property
def show_col_idx_names(self) -> bool:
return all((self.has_column_names, self.show_index_names, self.header))
@property
def max_rows_displayed(self) -> int:
return min(self.max_rows or len(self.frame), len(self.frame))
def _initialize_sparsify(self, sparsify: Optional[bool]) -> bool:
if sparsify is None:
return get_option("display.multi_sparse")
return sparsify
def _initialize_formatters(
self, formatters: Optional[FormattersType]
) -> FormattersType:
if formatters is None:
return {}
elif len(self.frame.columns) == len(formatters) or isinstance(formatters, dict):
return formatters
else:
raise ValueError(
f"Formatters length({len(formatters)}) should match "
f"DataFrame number of columns({len(self.frame.columns)})"
)
def _initialize_justify(self, justify: Optional[str]) -> str:
if justify is None:
return get_option("display.colheader_justify")
else:
return justify
def _initialize_columns(self, columns: Optional[Sequence[str]]) -> Index:
if columns is not None:
cols = ensure_index(columns)
self.frame = self.frame[cols]
return cols
else:
return self.frame.columns
def _initialize_colspace(
self, col_space: Optional[ColspaceArgType]
) -> ColspaceType:
result: ColspaceType
if col_space is None:
result = {}
elif isinstance(col_space, (int, str)):
result = {"": col_space}
result.update({column: col_space for column in self.frame.columns})
elif isinstance(col_space, Mapping):
for column in col_space.keys():
if column not in self.frame.columns and column != "":
raise ValueError(
f"Col_space is defined for an unknown column: {column}"
)
result = col_space
else:
if len(self.frame.columns) != len(col_space):
raise ValueError(
f"Col_space length({len(col_space)}) should match "
f"DataFrame number of columns({len(self.frame.columns)})"
)
result = dict(zip(self.frame.columns, col_space))
return result
def _calc_max_cols_fitted(self) -> Optional[int]:
"""Number of columns fitting the screen."""
if not self._is_in_terminal():
return self.max_cols
width, _ = get_terminal_size()
if self._is_screen_narrow(width):
return width
else:
return self.max_cols
def _calc_max_rows_fitted(self) -> Optional[int]:
"""Number of rows with data fitting the screen."""
max_rows: Optional[int]
if self._is_in_terminal():
_, height = get_terminal_size()
if self.max_rows == 0:
# rows available to fill with actual data
return height - self._get_number_of_auxillary_rows()
if self._is_screen_short(height):
max_rows = height
else:
max_rows = self.max_rows
else:
max_rows = self.max_rows
return self._adjust_max_rows(max_rows)
def _adjust_max_rows(self, max_rows: Optional[int]) -> Optional[int]:
"""Adjust max_rows using display logic.
See description here:
https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
GH #37359
"""
if max_rows:
if (len(self.frame) > max_rows) and self.min_rows:
# if truncated, set max_rows showed to min_rows
max_rows = min(self.min_rows, max_rows)
return max_rows
def _is_in_terminal(self) -> bool:
"""Check if the output is to be shown in terminal."""
return bool(self.max_cols == 0 or self.max_rows == 0)
def _is_screen_narrow(self, max_width) -> bool:
return bool(self.max_cols == 0 and len(self.frame.columns) > max_width)
def _is_screen_short(self, max_height) -> bool:
return bool(self.max_rows == 0 and len(self.frame) > max_height)
def _get_number_of_auxillary_rows(self) -> int:
"""Get number of rows occupied by prompt, dots and dimension info."""
dot_row = 1
prompt_row = 1
num_rows = dot_row + prompt_row
if self.show_dimensions:
num_rows += len(self.dimensions_info.splitlines())
if self.header:
num_rows += 1
return num_rows
def truncate(self) -> None:
"""
Check whether the frame should be truncated. If so, slice the frame up.
"""
if self.is_truncated_horizontally:
self._truncate_horizontally()
if self.is_truncated_vertically:
self._truncate_vertically()
def _truncate_horizontally(self) -> None:
"""Remove columns, which are not to be displayed and adjust formatters.
Attributes affected:
- tr_frame
- formatters
- tr_col_num
"""
assert self.max_cols_fitted is not None
col_num = self.max_cols_fitted // 2
if col_num >= 1:
left = self.tr_frame.iloc[:, :col_num]
right = self.tr_frame.iloc[:, -col_num:]
self.tr_frame = concat((left, right), axis=1)
# truncate formatter
if isinstance(self.formatters, (list, tuple)):
self.formatters = [
*self.formatters[:col_num],
*self.formatters[-col_num:],
]
else:
col_num = cast(int, self.max_cols)
self.tr_frame = self.tr_frame.iloc[:, :col_num]
self.tr_col_num = col_num
def _truncate_vertically(self) -> None:
"""Remove rows, which are not to be displayed.
Attributes affected:
- tr_frame
- tr_row_num
"""
assert self.max_rows_fitted is not None
row_num = self.max_rows_fitted // 2
if row_num >= 1:
head = self.tr_frame.iloc[:row_num, :]
tail = self.tr_frame.iloc[-row_num:, :]
self.tr_frame = concat((head, tail))
else:
row_num = cast(int, self.max_rows)
self.tr_frame = self.tr_frame.iloc[:row_num, :]
self.tr_row_num = row_num
def _get_strcols_without_index(self) -> List[List[str]]:
strcols: List[List[str]] = []
if not is_list_like(self.header) and not self.header:
for i, c in enumerate(self.tr_frame):
fmt_values = self.format_col(i)
fmt_values = _make_fixed_width(
strings=fmt_values,
justify=self.justify,
minimum=int(self.col_space.get(c, 0)),
adj=self.adj,
)
strcols.append(fmt_values)
return strcols
if is_list_like(self.header):
# cast here since can't be bool if is_list_like
self.header = cast(List[str], self.header)
if len(self.header) != len(self.columns):
raise ValueError(
f"Writing {len(self.columns)} cols "
f"but got {len(self.header)} aliases"
)
str_columns = [[label] for label in self.header]
else:
str_columns = self._get_formatted_column_labels(self.tr_frame)
if self.show_row_idx_names:
for x in str_columns:
x.append("")
for i, c in enumerate(self.tr_frame):
cheader = str_columns[i]
header_colwidth = max(
int(self.col_space.get(c, 0)), *(self.adj.len(x) for x in cheader)
)
fmt_values = self.format_col(i)
fmt_values = _make_fixed_width(
fmt_values, self.justify, minimum=header_colwidth, adj=self.adj
)
max_len = max(max(self.adj.len(x) for x in fmt_values), header_colwidth)
cheader = self.adj.justify(cheader, max_len, mode=self.justify)
strcols.append(cheader + fmt_values)
return strcols
def format_col(self, i: int) -> List[str]:
frame = self.tr_frame
formatter = self._get_formatter(i)
return format_array(
frame.iloc[:, i]._values,
formatter,
float_format=self.float_format,
na_rep=self.na_rep,
space=self.col_space.get(frame.columns[i]),
decimal=self.decimal,
leading_space=self.index,
)
def _get_formatter(self, i: Union[str, int]) -> Optional[Callable]:
if isinstance(self.formatters, (list, tuple)):
if is_integer(i):
i = cast(int, i)
return self.formatters[i]
else:
return None
else:
if is_integer(i) and i not in self.columns:
i = self.columns[i]
return self.formatters.get(i, None)
def _get_formatted_column_labels(self, frame: "DataFrame") -> List[List[str]]:
from pandas.core.indexes.multi import sparsify_labels
columns = frame.columns
if isinstance(columns, MultiIndex):
fmt_columns = columns.format(sparsify=False, adjoin=False)
fmt_columns = list(zip(*fmt_columns))
dtypes = self.frame.dtypes._values
# if we have a Float level, they don't use leading space at all
restrict_formatting = any(level.is_floating for level in columns.levels)
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
def space_format(x, y):
if (
y not in self.formatters
and need_leadsp[x]
and not restrict_formatting
):
return " " + y
return y
str_columns = list(
zip(*[[space_format(x, y) for y in x] for x in fmt_columns])
)
if self.sparsify and len(str_columns):
str_columns = sparsify_labels(str_columns)
str_columns = [list(x) for x in zip(*str_columns)]
else:
fmt_columns = columns.format()
dtypes = self.frame.dtypes
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
str_columns = [
[" " + x if not self._get_formatter(i) and need_leadsp[x] else x]
for i, (col, x) in enumerate(zip(columns, fmt_columns))
]
# self.str_columns = str_columns
return str_columns
def _get_formatted_index(self, frame: "DataFrame") -> List[str]:
# Note: this is only used by to_string() and to_latex(), not by
# to_html(). so safe to cast col_space here.
col_space = {k: cast(int, v) for k, v in self.col_space.items()}
index = frame.index
columns = frame.columns
fmt = self._get_formatter("__index__")
if isinstance(index, MultiIndex):
fmt_index = index.format(
sparsify=self.sparsify,
adjoin=False,
names=self.show_row_idx_names,
formatter=fmt,
)
else:
fmt_index = [index.format(name=self.show_row_idx_names, formatter=fmt)]
fmt_index = [
tuple(
_make_fixed_width(
list(x), justify="left", minimum=col_space.get("", 0), adj=self.adj
)
)
for x in fmt_index
]
adjoined = self.adj.adjoin(1, *fmt_index).split("\n")
# empty space for columns
if self.show_col_idx_names:
col_header = [str(x) for x in self._get_column_name_list()]
else:
col_header = [""] * columns.nlevels
if self.header:
return col_header + adjoined
else:
return adjoined
def _get_column_name_list(self) -> List[str]:
names: List[str] = []
columns = self.frame.columns
if isinstance(columns, MultiIndex):
names.extend("" if name is None else name for name in columns.names)
else:
names.append("" if columns.name is None else columns.name)
return names
class DataFrameRenderer:
"""Class for creating dataframe output in multiple formats.
Called in pandas.core.generic.NDFrame:
- to_csv
- to_latex
Called in pandas.core.frame.DataFrame:
- to_html
- to_string
Parameters
----------
fmt : DataFrameFormatter
Formatter with the formating options.
"""
def __init__(self, fmt: DataFrameFormatter):
self.fmt = fmt
def to_latex(
self,
buf: Optional[FilePathOrBuffer[str]] = None,
column_format: Optional[str] = None,
longtable: bool = False,
encoding: Optional[str] = None,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
caption: Optional[str] = None,
label: Optional[str] = None,
position: Optional[str] = None,
) -> Optional[str]:
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
from pandas.io.formats.latex import LatexFormatter
latex_formatter = LatexFormatter(
self.fmt,
longtable=longtable,
column_format=column_format,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
caption=caption,
label=label,
position=position,
)
string = latex_formatter.to_string()
return save_to_buffer(string, buf=buf, encoding=encoding)
def to_html(
self,
buf: Optional[FilePathOrBuffer[str]] = None,
encoding: Optional[str] = None,
classes: Optional[Union[str, List, Tuple]] = None,
notebook: bool = False,
border: Optional[int] = None,
table_id: Optional[str] = None,
render_links: bool = False,
) -> Optional[str]:
"""
Render a DataFrame to a html table.
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
encoding : str, default “utf-8”
Set character encoding.
classes : str or list-like
classes to include in the `class` attribute of the opening
``<table>`` tag, in addition to the default "dataframe".
notebook : {True, False}, optional, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
``<table>`` tag. Default ``pd.options.display.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links.
"""
from pandas.io.formats.html import HTMLFormatter, NotebookFormatter
Klass = NotebookFormatter if notebook else HTMLFormatter
html_formatter = Klass(
self.fmt,
classes=classes,
border=border,
table_id=table_id,
render_links=render_links,
)
string = html_formatter.to_string()
return save_to_buffer(string, buf=buf, encoding=encoding)
def to_string(
self,
buf: Optional[FilePathOrBuffer[str]] = None,
encoding: Optional[str] = None,
line_width: Optional[int] = None,
) -> Optional[str]:
"""
Render a DataFrame to a console-friendly tabular output.
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
encoding: str, default “utf-8”
Set character encoding.
line_width : int, optional
Width to wrap a line in characters.
"""
from pandas.io.formats.string import StringFormatter
string_formatter = StringFormatter(self.fmt, line_width=line_width)
string = string_formatter.to_string()
return save_to_buffer(string, buf=buf, encoding=encoding)
def to_csv(
self,
path_or_buf: Optional[FilePathOrBuffer[str]] = None,
encoding: Optional[str] = None,
sep: str = ",",
columns: Optional[Sequence[Label]] = None,
index_label: Optional[IndexLabel] = None,
mode: str = "w",
compression: CompressionOptions = "infer",
quoting: Optional[int] = None,
quotechar: str = '"',
line_terminator: Optional[str] = None,
chunksize: Optional[int] = None,
date_format: Optional[str] = None,
doublequote: bool = True,
escapechar: Optional[str] = None,
errors: str = "strict",
storage_options: StorageOptions = None,
) -> Optional[str]:
"""
Render dataframe as comma-separated file.
"""
from pandas.io.formats.csvs import CSVFormatter
if path_or_buf is None:
created_buffer = True
path_or_buf = StringIO()
else:
created_buffer = False
csv_formatter = CSVFormatter(
path_or_buf=path_or_buf,
line_terminator=line_terminator,
sep=sep,
encoding=encoding,
errors=errors,
compression=compression,
quoting=quoting,
cols=columns,
index_label=index_label,
mode=mode,
chunksize=chunksize,
quotechar=quotechar,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
storage_options=storage_options,
formatter=self.fmt,
)
csv_formatter.save()
if created_buffer:
assert isinstance(path_or_buf, StringIO)
content = path_or_buf.getvalue()
path_or_buf.close()
return content
return None
def save_to_buffer(
string: str,
buf: Optional[FilePathOrBuffer[str]] = None,
encoding: Optional[str] = None,
) -> Optional[str]:
"""
Perform serialization. Write to buf or return as string if buf is None.
"""
with get_buffer(buf, encoding=encoding) as f:
f.write(string)
if buf is None:
return f.getvalue()
return None
@contextmanager
def get_buffer(buf: Optional[FilePathOrBuffer[str]], encoding: Optional[str] = None):
"""
Context manager to open, yield and close buffer for filenames or Path-like
objects, otherwise yield buf unchanged.
"""
if buf is not None:
buf = stringify_path(buf)
else:
buf = StringIO()
if encoding is None:
encoding = "utf-8"
elif not isinstance(buf, str):
raise ValueError("buf is not a file name and encoding is specified.")
if hasattr(buf, "write"):
yield buf
elif isinstance(buf, str):
with open(buf, "w", encoding=encoding, newline="") as f:
# GH#30034 open instead of codecs.open prevents a file leak
# if we have an invalid encoding argument.
# newline="" is needed to roundtrip correctly on
# windows test_to_latex_filename
yield f
else:
raise TypeError("buf is not a file name and it has no write method")
# ----------------------------------------------------------------------
# Array formatters
def format_array(
values: Any,
formatter: Optional[Callable],
float_format: Optional[FloatFormatType] = None,
na_rep: str = "NaN",
digits: Optional[int] = None,
space: Optional[Union[str, int]] = None,
justify: str = "right",
decimal: str = ".",
leading_space: Optional[bool] = True,
quoting: Optional[int] = None,
) -> List[str]:
"""
Format an array for printing.
Parameters
----------
values
formatter
float_format
na_rep
digits
space
justify
decimal
leading_space : bool, optional, default True
Whether the array should be formatted with a leading space.
When an array as a column of a Series or DataFrame, we do want
the leading space to pad between columns.
When formatting an Index subclass
(e.g. IntervalIndex._format_native_types), we don't want the
leading space since it should be left-aligned.
Returns
-------
List[str]
"""
fmt_klass: Type[GenericArrayFormatter]
if is_datetime64_dtype(values.dtype):
fmt_klass = Datetime64Formatter
elif is_datetime64tz_dtype(values.dtype):
fmt_klass = Datetime64TZFormatter
elif is_timedelta64_dtype(values.dtype):
fmt_klass = Timedelta64Formatter
elif is_extension_array_dtype(values.dtype):
fmt_klass = ExtensionArrayFormatter
elif is_float_dtype(values.dtype) or is_complex_dtype(values.dtype):
fmt_klass = FloatArrayFormatter
elif is_integer_dtype(values.dtype):
fmt_klass = IntArrayFormatter
else:
fmt_klass = GenericArrayFormatter
if space is None:
space = get_option("display.column_space")
if float_format is None:
float_format = get_option("display.float_format")
if digits is None:
digits = get_option("display.precision")
fmt_obj = fmt_klass(
values,
digits=digits,
na_rep=na_rep,
float_format=float_format,
formatter=formatter,
space=space,
justify=justify,
decimal=decimal,
leading_space=leading_space,
quoting=quoting,
)
return fmt_obj.get_result()
class GenericArrayFormatter:
def __init__(
self,
values: Any,
digits: int = 7,
formatter: Optional[Callable] = None,
na_rep: str = "NaN",
space: Union[str, int] = 12,
float_format: Optional[FloatFormatType] = None,
justify: str = "right",
decimal: str = ".",
quoting: Optional[int] = None,
fixed_width: bool = True,
leading_space: Optional[bool] = True,
):
self.values = values
self.digits = digits
self.na_rep = na_rep
self.space = space
self.formatter = formatter
self.float_format = float_format
self.justify = justify
self.decimal = decimal
self.quoting = quoting
self.fixed_width = fixed_width
self.leading_space = leading_space
def get_result(self) -> List[str]:
fmt_values = self._format_strings()
return _make_fixed_width(fmt_values, self.justify)
def _format_strings(self) -> List[str]:
if self.float_format is None:
float_format = get_option("display.float_format")
if float_format is None:
precision = get_option("display.precision")
float_format = lambda x: f"{x: .{precision:d}f}"
else:
float_format = self.float_format
if self.formatter is not None:
formatter = self.formatter
else:
quote_strings = self.quoting is not None and self.quoting != QUOTE_NONE
formatter = partial(
pprint_thing,
escape_chars=("\t", "\r", "\n"),
quote_strings=quote_strings,
)
def _format(x):
if self.na_rep is not None and is_scalar(x) and isna(x):
try:
# try block for np.isnat specifically
# determine na_rep if x is None or NaT-like
if x is None:
return "None"
elif x is NA:
return str(NA)
elif x is NaT or np.isnat(x):
return "NaT"
except (TypeError, ValueError):
# np.isnat only handles datetime or timedelta objects
pass
return self.na_rep
elif isinstance(x, PandasObject):
return str(x)
else:
# object dtype
return str(formatter(x))
vals = extract_array(self.values, extract_numpy=True)
is_float_type = (
lib.map_infer(vals, is_float)
# vals may have 2 or more dimensions
& np.all(notna(vals), axis=tuple(range(1, len(vals.shape))))
)
leading_space = self.leading_space
if leading_space is None:
leading_space = is_float_type.any()
fmt_values = []
for i, v in enumerate(vals):
if not is_float_type[i] and leading_space:
fmt_values.append(f" {_format(v)}")
elif is_float_type[i]:
fmt_values.append(_trim_zeros_single_float(float_format(v)))
else:
if leading_space is False:
# False specifically, so that the default is
# to include a space if we get here.
tpl = "{v}"
else:
tpl = " {v}"
fmt_values.append(tpl.format(v=_format(v)))
return fmt_values
class FloatArrayFormatter(GenericArrayFormatter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# float_format is expected to be a string
# formatter should be used to pass a function
if self.float_format is not None and self.formatter is None:
# GH21625, GH22270
self.fixed_width = False
if callable(self.float_format):
self.formatter = self.float_format
self.float_format = None
def _value_formatter(
self,
float_format: Optional[FloatFormatType] = None,
threshold: Optional[Union[float, int]] = None,
) -> Callable:
"""Returns a function to be applied on each value to format it"""
# the float_format parameter supersedes self.float_format
if float_format is None:
float_format = self.float_format
# we are going to compose different functions, to first convert to
# a string, then replace the decimal symbol, and finally chop according
# to the threshold
# when there is no float_format, we use str instead of '%g'
# because str(0.0) = '0.0' while '%g' % 0.0 = '0'
if float_format:
def base_formatter(v):
assert float_format is not None # for mypy
# pandas\io\formats\format.py:1411: error: "str" not callable
# [operator]
# pandas\io\formats\format.py:1411: error: Unexpected keyword
# argument "value" for "__call__" of "EngFormatter" [call-arg]
return (
float_format(value=v) # type: ignore[operator,call-arg]
if notna(v)
else self.na_rep
)
else:
def base_formatter(v):
return str(v) if notna(v) else self.na_rep
if self.decimal != ".":
def decimal_formatter(v):
return base_formatter(v).replace(".", self.decimal, 1)
else:
decimal_formatter = base_formatter
if threshold is None:
return decimal_formatter
def formatter(value):
if notna(value):
if abs(value) > threshold:
return decimal_formatter(value)
else:
return decimal_formatter(0.0)
else:
return self.na_rep
return formatter
def get_result_as_array(self) -> np.ndarray:
"""
Returns the float values converted into strings using
the parameters given at initialisation, as a numpy array
"""
def format_with_na_rep(values: ArrayLike, formatter: Callable, na_rep: str):
mask = isna(values)
formatted = np.array(
[
formatter(val) if not m else na_rep
for val, m in zip(values.ravel(), mask.ravel())
]
).reshape(values.shape)
return formatted
if self.formatter is not None:
return format_with_na_rep(self.values, self.formatter, self.na_rep)
if self.fixed_width:
threshold = get_option("display.chop_threshold")
else:
threshold = None
# if we have a fixed_width, we'll need to try different float_format
def format_values_with(float_format):
formatter = self._value_formatter(float_format, threshold)
# default formatter leaves a space to the left when formatting
# floats, must be consistent for left-justifying NaNs (GH #25061)
if self.justify == "left":
na_rep = " " + self.na_rep
else:
na_rep = self.na_rep
# separate the wheat from the chaff
values = self.values
is_complex = is_complex_dtype(values)
values = format_with_na_rep(values, formatter, na_rep)
if self.fixed_width:
if is_complex:
result = _trim_zeros_complex(values, self.decimal)
else:
result = _trim_zeros_float(values, self.decimal)
return np.asarray(result, dtype="object")
return values
# There is a special default string when we are fixed-width
# The default is otherwise to use str instead of a formatting string
float_format: Optional[FloatFormatType]
if self.float_format is None:
if self.fixed_width:
if self.leading_space is True:
fmt_str = "{value: .{digits:d}f}"
else:
fmt_str = "{value:.{digits:d}f}"
float_format = partial(fmt_str.format, digits=self.digits)
else:
float_format = self.float_format
else:
float_format = lambda value: self.float_format % value
formatted_values = format_values_with(float_format)
if not self.fixed_width:
return formatted_values
# we need do convert to engineering format if some values are too small
# and would appear as 0, or if some values are too big and take too
# much space
if len(formatted_values) > 0:
maxlen = max(len(x) for x in formatted_values)
too_long = maxlen > self.digits + 6
else:
too_long = False
with np.errstate(invalid="ignore"):
abs_vals = np.abs(self.values)
# this is pretty arbitrary for now
# large values: more that 8 characters including decimal symbol
# and first digit, hence > 1e6
has_large_values = (abs_vals > 1e6).any()
has_small_values = (
(abs_vals < 10 ** (-self.digits)) & (abs_vals > 0)
).any()
if has_small_values or (too_long and has_large_values):
if self.leading_space is True:
fmt_str = "{value: .{digits:d}e}"
else:
fmt_str = "{value:.{digits:d}e}"
float_format = partial(fmt_str.format, digits=self.digits)
formatted_values = format_values_with(float_format)
return formatted_values
def _format_strings(self) -> List[str]:
return list(self.get_result_as_array())
class IntArrayFormatter(GenericArrayFormatter):
def _format_strings(self) -> List[str]:
if self.leading_space is False:
formatter_str = lambda x: f"{x:d}".format(x=x)
else:
formatter_str = lambda x: f"{x: d}".format(x=x)
formatter = self.formatter or formatter_str
fmt_values = [formatter(x) for x in self.values]
return fmt_values
class Datetime64Formatter(GenericArrayFormatter):
def __init__(
self,
values: Union[np.ndarray, "Series", DatetimeIndex, DatetimeArray],
nat_rep: str = "NaT",
date_format: None = None,
**kwargs,
):
super().__init__(values, **kwargs)
self.nat_rep = nat_rep
self.date_format = date_format
def _format_strings(self) -> List[str]:
""" we by definition have DO NOT have a TZ """
values = self.values
if not isinstance(values, DatetimeIndex):
values = DatetimeIndex(values)
if self.formatter is not None and callable(self.formatter):
return [self.formatter(x) for x in values]
fmt_values = values._data._format_native_types(
na_rep=self.nat_rep, date_format=self.date_format
)
return fmt_values.tolist()
class ExtensionArrayFormatter(GenericArrayFormatter):
def _format_strings(self) -> List[str]:
values = extract_array(self.values, extract_numpy=True)
formatter = self.formatter
if formatter is None:
formatter = values._formatter(boxed=True)
if is_categorical_dtype(values.dtype):
# Categorical is special for now, so that we can preserve tzinfo
array = values._internal_get_values()
else:
array = np.asarray(values)
fmt_values = format_array(
array,
formatter,
float_format=self.float_format,
na_rep=self.na_rep,
digits=self.digits,
space=self.space,
justify=self.justify,
decimal=self.decimal,
leading_space=self.leading_space,
quoting=self.quoting,
)
return fmt_values
def format_percentiles(
percentiles: Union[
np.ndarray, List[Union[int, float]], List[float], List[Union[str, float]]
]
) -> List[str]:
"""
Outputs rounded and formatted percentiles.
Parameters
----------
percentiles : list-like, containing floats from interval [0,1]
Returns
-------
formatted : list of strings
Notes
-----
Rounding precision is chosen so that: (1) if any two elements of
``percentiles`` differ, they remain different after rounding
(2) no entry is *rounded* to 0% or 100%.
Any non-integer is always rounded to at least 1 decimal place.
Examples
--------
Keeps all entries different after rounding:
>>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
['1.999%', '2.001%', '50%', '66.667%', '99.99%']
No element is rounded to 0% or 100% (unless already equal to it).
Duplicates are allowed:
>>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
"""
percentiles = np.asarray(percentiles)
# It checks for np.NaN as well
with np.errstate(invalid="ignore"):
if (
not is_numeric_dtype(percentiles)
or not np.all(percentiles >= 0)
or not np.all(percentiles <= 1)
):
raise ValueError("percentiles should all be in the interval [0,1]")
percentiles = 100 * percentiles
int_idx = np.isclose(percentiles.astype(int), percentiles)
if np.all(int_idx):
out = percentiles.astype(int).astype(str)
return [i + "%" for i in out]
unique_pcts = np.unique(percentiles)
to_begin = unique_pcts[0] if unique_pcts[0] > 0 else None
to_end = 100 - unique_pcts[-1] if unique_pcts[-1] < 100 else None
# Least precision that keeps percentiles unique after rounding
prec = -np.floor(
np.log10(np.min(np.ediff1d(unique_pcts, to_begin=to_begin, to_end=to_end)))
).astype(int)
prec = max(1, prec)
out = np.empty_like(percentiles, dtype=object)
out[int_idx] = percentiles[int_idx].astype(int).astype(str)
out[~int_idx] = percentiles[~int_idx].round(prec).astype(str)
return [i + "%" for i in out]
def is_dates_only(
values: Union[np.ndarray, DatetimeArray, Index, DatetimeIndex]
) -> bool:
# return a boolean if we are only dates (and don't have a timezone)
if not isinstance(values, Index):
values = values.ravel()
values = DatetimeIndex(values)
if values.tz is not None:
return False
values_int = values.asi8
consider_values = values_int != iNaT
one_day_nanos = 86400 * 1e9
even_days = (
np.logical_and(consider_values, values_int % int(one_day_nanos) != 0).sum() == 0
)
if even_days:
return True
return False
def _format_datetime64(x: Union[NaTType, Timestamp], nat_rep: str = "NaT") -> str:
if x is NaT:
return nat_rep
return str(x)
def _format_datetime64_dateonly(
x: Union[NaTType, Timestamp],
nat_rep: str = "NaT",
date_format: Optional[str] = None,
) -> str:
if x is NaT:
return nat_rep
if date_format:
return x.strftime(date_format)
else:
return x._date_repr
def get_format_datetime64(
is_dates_only: bool, nat_rep: str = "NaT", date_format: Optional[str] = None
) -> Callable:
if is_dates_only:
return lambda x: _format_datetime64_dateonly(
x, nat_rep=nat_rep, date_format=date_format
)
else:
return lambda x: _format_datetime64(x, nat_rep=nat_rep)
def get_format_datetime64_from_values(
values: Union[np.ndarray, DatetimeArray, DatetimeIndex], date_format: Optional[str]
) -> Optional[str]:
""" given values and a date_format, return a string format """
if isinstance(values, np.ndarray) and values.ndim > 1:
# We don't actually care about the order of values, and DatetimeIndex
# only accepts 1D values
values = values.ravel()
ido = is_dates_only(values)
if ido:
return date_format or "%Y-%m-%d"
return date_format
class Datetime64TZFormatter(Datetime64Formatter):
def _format_strings(self) -> List[str]:
""" we by definition have a TZ """
values = self.values.astype(object)
ido = is_dates_only(values)
formatter = self.formatter or get_format_datetime64(
ido, date_format=self.date_format
)
fmt_values = [formatter(x) for x in values]
return fmt_values
class Timedelta64Formatter(GenericArrayFormatter):
def __init__(
self,
values: Union[np.ndarray, TimedeltaIndex],
nat_rep: str = "NaT",
box: bool = False,
**kwargs,
):
super().__init__(values, **kwargs)
self.nat_rep = nat_rep
self.box = box
def _format_strings(self) -> List[str]:
formatter = self.formatter or get_format_timedelta64(
self.values, nat_rep=self.nat_rep, box=self.box
)
return [formatter(x) for x in self.values]
def get_format_timedelta64(
values: Union[np.ndarray, TimedeltaIndex, TimedeltaArray],
nat_rep: str = "NaT",
box: bool = False,
) -> Callable:
"""
Return a formatter function for a range of timedeltas.
These will all have the same format argument
If box, then show the return in quotes
"""
values_int = values.astype(np.int64)
consider_values = values_int != iNaT
one_day_nanos = 86400 * 1e9
even_days = (
np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0
)
if even_days:
format = None
else:
format = "long"
def _formatter(x):
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if not isinstance(x, Timedelta):
x = Timedelta(x)
result = x._repr_base(format=format)
if box:
result = f"'{result}'"
return result
return _formatter
def _make_fixed_width(
strings: List[str],
justify: str = "right",
minimum: Optional[int] = None,
adj: Optional[TextAdjustment] = None,
) -> List[str]:
if len(strings) == 0 or justify == "all":
return strings
if adj is None:
adjustment = get_adjustment()
else:
adjustment = adj
max_len = max(adjustment.len(x) for x in strings)
if minimum is not None:
max_len = max(minimum, max_len)
conf_max = get_option("display.max_colwidth")
if conf_max is not None and max_len > conf_max:
max_len = conf_max
def just(x: str) -> str:
if conf_max is not None:
if (conf_max > 3) & (adjustment.len(x) > max_len):
x = x[: max_len - 3] + "..."
return x
strings = [just(x) for x in strings]
result = adjustment.justify(strings, max_len, mode=justify)
return result
def _trim_zeros_complex(str_complexes: np.ndarray, decimal: str = ".") -> List[str]:
"""
Separates the real and imaginary parts from the complex number, and
executes the _trim_zeros_float method on each of those.
"""
trimmed = [
"".join(_trim_zeros_float(re.split(r"([j+-])", x), decimal))
for x in str_complexes
]
# pad strings to the length of the longest trimmed string for alignment
lengths = [len(s) for s in trimmed]
max_length = max(lengths)
padded = [
s[: -((k - 1) // 2 + 1)] # real part
+ (max_length - k) // 2 * "0"
+ s[-((k - 1) // 2 + 1) : -((k - 1) // 2)] # + / -
+ s[-((k - 1) // 2) : -1] # imaginary part
+ (max_length - k) // 2 * "0"
+ s[-1]
for s, k in zip(trimmed, lengths)
]
return padded
def _trim_zeros_single_float(str_float: str) -> str:
"""
Trims trailing zeros after a decimal point,
leaving just one if necessary.
"""
str_float = str_float.rstrip("0")
if str_float.endswith("."):
str_float += "0"
return str_float
def _trim_zeros_float(
str_floats: Union[np.ndarray, List[str]], decimal: str = "."
) -> List[str]:
"""
Trims the maximum number of trailing zeros equally from
all numbers containing decimals, leaving just one if
necessary.
"""
trimmed = str_floats
number_regex = re.compile(fr"^\s*[\+-]?[0-9]+\{decimal}[0-9]*$")
def is_number_with_decimal(x):
return re.match(number_regex, x) is not None
def should_trim(values: Union[np.ndarray, List[str]]) -> bool:
"""
Determine if an array of strings should be trimmed.
Returns True if all numbers containing decimals (defined by the
above regular expression) within the array end in a zero, otherwise
returns False.
"""
numbers = [x for x in values if is_number_with_decimal(x)]
return len(numbers) > 0 and all(x.endswith("0") for x in numbers)
while should_trim(trimmed):
trimmed = [x[:-1] if is_number_with_decimal(x) else x for x in trimmed]
# leave one 0 after the decimal points if need be.
result = [
x + "0" if is_number_with_decimal(x) and x.endswith(decimal) else x
for x in trimmed
]
return result
def _has_names(index: Index) -> bool:
if isinstance(index, MultiIndex):
return com.any_not_none(*index.names)
else:
return index.name is not None
class EngFormatter:
"""
Formats float values according to engineering format.
Based on matplotlib.ticker.EngFormatter
"""
# The SI engineering prefixes
ENG_PREFIXES = {
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: "u",
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y",
}
def __init__(self, accuracy: Optional[int] = None, use_eng_prefix: bool = False):
self.accuracy = accuracy
self.use_eng_prefix = use_eng_prefix
def __call__(self, num: Union[int, float]) -> str:
"""
Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng(0) # for self.accuracy = 0
' 0'
>>> format_eng(1000000) # for self.accuracy = 1,
# self.use_eng_prefix = True
' 1.0M'
>>> format_eng("-1e-6") # for self.accuracy = 2
# self.use_eng_prefix = False
'-1.00E-06'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
"""
dnum = decimal.Decimal(str(num))
if decimal.Decimal.is_nan(dnum):
return "NaN"
if decimal.Decimal.is_infinite(dnum):
return "inf"
sign = 1
if dnum < 0: # pragma: no cover
sign = -1
dnum = -dnum
if dnum != 0:
pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = decimal.Decimal(0)
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
int_pow10 = int(pow10)
if self.use_eng_prefix:
prefix = self.ENG_PREFIXES[int_pow10]
else:
if int_pow10 < 0:
prefix = f"E-{-int_pow10:02d}"
else:
prefix = f"E+{int_pow10:02d}"
mant = sign * dnum / (10 ** pow10)
if self.accuracy is None: # pragma: no cover
format_str = "{mant: g}{prefix}"
else:
format_str = f"{{mant: .{self.accuracy:d}f}}{{prefix}}"
formatted = format_str.format(mant=mant, prefix=prefix)
return formatted
def set_eng_float_format(accuracy: int = 3, use_eng_prefix: bool = False) -> None:
"""
Alter default behavior on how float is formatted in DataFrame.
Format float in engineering format. By accuracy, we mean the number of
decimal digits after the floating point.
See also EngFormatter.
"""
set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
set_option("display.column_space", max(12, accuracy + 9))
def get_level_lengths(
levels: Any, sentinel: Union[bool, object, str] = ""
) -> List[Dict[int, int]]:
"""
For each index in each level the function returns lengths of indexes.
Parameters
----------
levels : list of lists
List of values on for level.
sentinel : string, optional
Value which states that no new index starts on there.
Returns
-------
Returns list of maps. For each level returns map of indexes (key is index
in row and value is length of index).
"""
if len(levels) == 0:
return []
control = [True] * len(levels[0])
result = []
for level in levels:
last_index = 0
lengths = {}
for i, key in enumerate(level):
if control[i] and key == sentinel:
pass
else:
control[i] = False
lengths[last_index] = i - last_index
last_index = i
lengths[last_index] = len(level) - last_index
result.append(lengths)
return result
def buffer_put_lines(buf: IO[str], lines: List[str]) -> None:
"""
Appends lines to a buffer.
Parameters
----------
buf
The buffer to write to
lines
The lines to append.
"""
if any(isinstance(x, str) for x in lines):
lines = [str(x) for x in lines]
buf.write("\n".join(lines))
|
[
"ana.kapros@yahoo.ro"
] |
ana.kapros@yahoo.ro
|
7f1dedeaf91d770dd0311fae73c3bbe5539079c9
|
3e24611b7315b5ad588b2128570f1341b9c968e8
|
/Template/Graph2tex.py
|
451adf31f5883e688507bbbf8b1b66755fe1027d
|
[
"BSD-2-Clause"
] |
permissive
|
bioCKO/lpp_Script
|
dc327be88c7d12243e25557f7da68d963917aa90
|
0cb2eedb48d4afa25abc2ed7231eb1fdd9baecc2
|
refs/heads/master
| 2022-02-27T12:35:05.979231
| 2019-08-27T05:56:33
| 2019-08-27T05:56:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
#!/usr/bin/env python
#coding:utf-8
"""
Author: --<>
Purpose:
Created: 2017/5/9
"""
import re,os
from optparse import OptionParser
if __name__ == "__main__":
usage = '''usage: python2.7 %prog [options]
'''
parser = OptionParser(usage =usage )
parser.add_option("-i", "--Input", action="store",
dest="inputData",
help="Input Data")
parser.add_option("-o", "--Output", action="store",
dest="Output",
help=" Output")
parser.add_option("-c", "--Caption", action="store",
dest="Caption",
help=" Caption")
(options, args) = parser.parse_args()
inputData = os.path.abspath( options.inputData )
Output = os.path.abspath( options.Output )
Caption = options.Caption
OUTPUT = open( Output,'w' )
OUTPUT.write("""
\\begin{figure}[H]
\\centering
\\includegraphics[width=0.8\\textwidth]{%s}
\\captionsetup{labelsep=period}
\\caption{%s}
\\end{figure}
"""%(inputData,Caption))
|
[
"409511038@qq.com"
] |
409511038@qq.com
|
187eea89a649f18b50a7c8997ccffecd3bbf6cdd
|
684329a9a5d49a444f6f9e0a832db4aca4baef2c
|
/mvp/newsletter/admin.py
|
6b21e204de9a31de2c3c5dae9903408d27980f8e
|
[] |
no_license
|
theparadoxer02/Shipping-Monk
|
f12735c809fadac5a5e462fd762559fca7d45986
|
0f0548cf85ff05ee4bfe65ccf0b739e0ad340bc9
|
refs/heads/master
| 2021-01-19T21:32:31.696389
| 2017-02-20T03:42:14
| 2017-02-20T03:42:14
| 82,513,484
| 0
| 1
| null | 2017-02-20T03:37:52
| 2017-02-20T03:37:52
| null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
from django.contrib import admin
# Register your models here.
from .forms import SignUpForm
from .models import SignUp
class SignUpAdmin(admin.ModelAdmin):
list_display = ["__str__", "timestamp", "updated"]
form = SignUpForm
#class Meta:
#model = SignUp
admin.site.register(SignUp, SignUpAdmin)
|
[
"rohit.yadav848@yahoo.com"
] |
rohit.yadav848@yahoo.com
|
7e33503d2dada68f3026bb0368169aee76d50f17
|
8b9b46bef13f2a562ce976f791ef30472b0e4652
|
/2020-04/4-08/19删除链表的倒数第N个节点/19.py
|
640fb5a14b7ef56a4a8f025b6677d86925d5dd60
|
[] |
no_license
|
Annihilation7/Leetcode-Love
|
0d1db2776b79f4c65fd2781b2d0031d1efd1ef14
|
3fa96c81f92595cf076ad675ba332e2b0eb0e071
|
refs/heads/master
| 2021-03-21T17:06:35.260644
| 2020-05-07T14:12:44
| 2020-05-07T14:12:44
| 247,314,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,009
|
py
|
# -*- coding: utf-8 -*-
# Editor : Pycharm
# File : 19.py
# Author : ZhenyuMa
# Created : 2020/4/9 9:33 下午
# Description : 双指针的题
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
left_node = head
right_node = head
while n:
try:
right_node = right_node.next
except Exception:
print('invalid input.')
return None
n -= 1
if right_node is None:
ret_node = head.next
head.next = None
return ret_node
while right_node.next:
right_node = right_node.next
left_node = left_node.next
del_node = left_node.next
left_node.next = del_node.next
del_node.next = None # 便于回收
return head
|
[
"763366463@qq.com"
] |
763366463@qq.com
|
71292a94bbd048605f8de67ed6a624d57f94b230
|
5167d0792b35d2214329d8e692734a1e058efba5
|
/Linked List/rearrange.py
|
2e07e6237d60229d2228eb9eaee46ced54102d30
|
[] |
no_license
|
mmenghnani/CS_learn
|
6dac86ede403fedad6ecfb69b05472b98e605e50
|
482f97ae5f2cb696ea82dd695d5b68b2aaf12742
|
refs/heads/master
| 2020-05-04T15:03:02.422491
| 2018-02-12T16:33:53
| 2018-02-12T16:33:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
def rearrange(head):
slow = head
fast = slow.next
while fast and fast.next:
slow = slow.next
fast = fast.next.next
head1 = head
head2 = slow.next
slow.next = None
reverselist(head2)
head = None
curr = head
while head1 or head2:
if head1 :
curr.next = head1
curr = curr.next
head1 = head1.next
if head2:
curr.next = head2
curr = curr.next
head2 = head2.next
head = head.next
'''
Time Complexity of this solution is O(n).
'''
|
[
"sahilgaucho@gmail.com"
] |
sahilgaucho@gmail.com
|
29d0f5a4b0dcb2ba354cf97f7bcac67f166558ac
|
2a318f4c8372c75224b2d79106ef52d8f4375e71
|
/python/get_mailfolder.py
|
654694c9ec1369366db52a5e282ebd09ee8cedf9
|
[] |
no_license
|
keyur32/graph-snippets
|
0d4bacc66b5fb0bbfddb73695fa61a5538eaf038
|
e416d3ad86abdb30449325c06758e8cc6d73c137
|
refs/heads/master
| 2021-01-23T05:29:59.155567
| 2017-06-01T02:11:23
| 2017-06-01T02:11:23
| 92,971,791
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
import http.client
conn = http.client.HTTPSConnection("graph.microsoft.com")
conn.request("GET", "/v1.0/me/mailFolders/%7Bid%7D")
res = conn.getresponse()
data = res.read()
print(data.decode("utf-8"))
|
[
"keyur32@hotmail.com"
] |
keyur32@hotmail.com
|
6871bc58de9ec6556a53a23b30a28172dd055fad
|
9edd6cd0aac07bc3a433ec1a99b7922c4e8256ba
|
/marketing/migrations/0005_emailmarketing.py
|
6f6bc7c5c5efcc52d525a68e64f3975d02fb56e0
|
[] |
no_license
|
NahidAkhtar84/NShopping
|
87935f3119c918baed8d1ea3348c48028b686dfe
|
e466414853348a30bcb5e3096b847cc89a6c0976
|
refs/heads/master
| 2023-03-14T15:18:20.560228
| 2021-03-02T20:59:39
| 2021-03-02T20:59:39
| 343,904,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 664
|
py
|
# Generated by Django 3.1.5 on 2021-02-24 21:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('marketing', '0004_auto_20210223_0718'),
]
operations = [
migrations.CreateModel(
name='emailMarketing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"nahid.ibne.akhtar@gmail.com"
] |
nahid.ibne.akhtar@gmail.com
|
aea490cd24449091a1368b875ec5fe09f42dc668
|
a1eb0bb73680bc42af97eea6b4d7811453dc6758
|
/SVM/venv/Scripts/easy_install-script.py
|
b246adbc41772d1275dc0105e83226f24307b6aa
|
[] |
no_license
|
PotatoPig/machine-learning
|
23c2ba5e7cf9d66c92309437d47d139bbf4e866f
|
eb7ae7b8bc03d765e508b1a1c222ea15d25b1c21
|
refs/heads/master
| 2020-07-22T15:00:03.607116
| 2019-09-09T06:47:33
| 2019-09-09T06:47:33
| 207,239,820
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
#!D:\CS_Project\MachineLearning\SVM\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
|
[
"Binhan Xu"
] |
Binhan Xu
|
d16cfbe7414805ba8bbe1c033534772a2c15925c
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/LArCalorimeter/LArMonTools/share/LArMonTools_jobOptions_withOddCells.py
|
fdeb466c1e4d98dc42670897d1a9cecf12eb5aea
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949
| 2020-01-19T03:59:35
| 2020-01-19T03:59:35
| 234,836,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,208
|
py
|
# most of them should be initialized already in RecExCommission
ToolSvc = Service( "ToolSvc" )
if not 'CheckEveryNoEvents' in dir():
CheckEveryNoEvents=100
#----------------------------------------------------------
theApp.Dlls += [ "AthenaMonitoring"]
theApp.Dlls += [ "LArMonTools"]
theApp.TopAlg += ["AthenaMon/LArMon1"]
LArMon1 = Algorithm( "LArMon1" )
LArMon1.CheckEveryNoEvents=CheckEveryNoEvents
# include all monitoring tools
#include ("LArMonTools/LAr2DNoiseMonTool_jobOptions.py" )
#include ("LArMonTools/LArDigitNoiseMonTool_jobOptions.py" )
# include ("LArMonTools/LArDigitSimpleMonTool_jobOptions.py" )
#include ("LArMonTools/LArDigMonTool_jobOptions.py" )
#include ("LArMonTools/LArFEBMon_jobOptions.py" )
#include ("LArMonTools/LArRawChannelMonTool_jobOptions.py" )
#include ("LArMonTools/LArRawChannelNoiseMonTool_jobOptions.py" )
#include ("LArMonTools/LArScaNoiseMonTool_jobOptions.py" )
include ("LArMonTools/LArEventInfoMonTool_jobOptions.py" )
#include ("LArMonTools/LArAccumDigMonTool_jobOptions.py")
#include ("LArMonTools/LArFebNoiseMonTool_jobOptions.py")
#include ("LArMonTools/LArOddCellsMonTool_jobOptions.py")
#include ("LArMonTools/LArRoughCorrelMonTool_jobOptions.py")
|
[
"rushioda@lxplus754.cern.ch"
] |
rushioda@lxplus754.cern.ch
|
b38c4139c0650c0fe99411a7886d9897a6e474ed
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/10/usersdata/124/24764/submittedfiles/testes.py
|
cb319ab269e7c22443456c6988769cf5a8eb8080
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,158
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
def vabsol(x):
if x < 0:
x = -1*x
return x
def calculopi(y):
c = 3
d = 2
for i in range (0, y, 1):
if i%2 != 0:
c = c - (4/(d*(d+1)*(d+2)))
elif i%2 == 0:
c = c + (4/(d*(d+1)*(d+2)))
d = d + 2
return c
def cos(z, epsilon):
cosz = 1
v = 2
fat = 1
cont = 1
for i in range (v, 0, -1):
fat = fat*i
d = (z**2)/fat
if o%2 != 0:
cosz = cosz - d
elif o%2 == 0:
cosz = cosz + d
while True:
if epsilon <= d:
v = v + 2
fat = 1
cont = cont + 1
else:
break
return cosz
def razaurea(m, epsilon):
pi = calculopi(m)
f = 2*cos(pi/5, epsilon)
return f
m = int(input('Digite o número m de termos da fórmula de pi: '))
epsilon = input('Digite o epsilon para o cálculo da razão áurea: ')
m = vabsol(m)
print('Valor aproximado de pi: %.15f' %calculopi(m))
print('Valor aproximado da razão áurea: %.15f' %razaurea(m, epsilon))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
78485c52167656823797fb21bb81f97525dfa5fa
|
d94f758b2a5120fab69f48c7888b232599a05602
|
/app/user/serializers.py
|
2b2c06b1279b9ee6248e204ffe9d87b5bf248944
|
[
"MIT"
] |
permissive
|
zdravkob98/recipe-app-api
|
51b74d961b13a0dec7ca31320d148bc804ae41a1
|
1aa236f69ee3960833219cabd6c9293d6d0f2ba4
|
refs/heads/main
| 2023-02-16T00:29:15.110453
| 2021-01-13T17:33:14
| 2021-01-13T17:33:14
| 328,614,779
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,730
|
py
|
from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""serializer for the users object"""
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""Update a user, setting the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the user authentication object"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
"""Validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authorization')
attrs['user'] = user
return attrs
|
[
"zdravkobonev@abv.bg"
] |
zdravkobonev@abv.bg
|
96f9490be55b387dd0ae99a0ba8576011052572e
|
8db94de54b604475fa874c9f2c22c07aeb73e57a
|
/singular-value-decomposition/data/cleaner.py
|
0bc381051da4a28aa45e54677873ad705130e737
|
[] |
no_license
|
silky/programmers-introduction-to-mathematics
|
5a13d533f2efc24eb55d37d2a908cf7095d27858
|
41e432012f5a6163db5bb2d77b8cd094877927b6
|
refs/heads/master
| 2020-04-03T04:31:33.992744
| 2018-10-27T22:27:07
| 2018-10-27T22:27:07
| 155,016,472
| 1
| 0
| null | 2018-10-27T22:47:38
| 2018-10-27T22:47:38
| null |
UTF-8
|
Python
| false
| false
| 2,161
|
py
|
import json
import os
from nltk.corpus import stopwords, wordnet
from nltk.stem.wordnet import WordNetLemmatizer
from nltk import pos_tag, word_tokenize
def loadRaw(directory):
documents = dict()
for filename in os.listdir(directory):
if filename[-3:] == 'txt':
with open(os.path.join(directory, filename), 'r') as infile:
documents[filename] = infile.read()
return documents
allWords = None
def words():
global allWords
dirname = os.path.dirname(__file__)
with open(os.path.join(dirname, 'one-grams.txt'), 'r') as infile:
allWords = [line.strip() for line in infile]
return set(allWords)
# Extract a list of tokens from a cleaned string.
def tokenize(s):
stopWords = set(stopwords.words('english'))
wordsToKeep = words() - stopWords
return [x.lower() for x in word_tokenize(s)
if x in wordsToKeep and len(x) >= 3]
def wordnetPos(tag):
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
def process(output_filename="all_stories.json"):
print("Loading...")
dirname = os.path.dirname(__file__)
documentDict = loadRaw(os.path.join(dirname, 'cnn-stories'))
documents = []
print("Cleaning...")
i = 0
for filename, documentText in documentDict.items():
tokens = tokenize(documentText)
tagged_tokens = pos_tag(tokens)
wnl = WordNetLemmatizer()
stemmedTokens = [wnl.lemmatize(word, wordnetPos(tag)).lower()
for word, tag in tagged_tokens]
documents.append({
'filename': filename,
'text': documentText,
'words': stemmedTokens,
})
if i % 100 == 0:
print(i)
i += 1
print("Writing to disk...")
with open(os.path.join(dirname, output_filename), 'w') as outfile:
outfile.write(json.dumps(documents))
print("Done!")
if __name__ == "__main__":
process()
|
[
"j2kun@users.noreply.github.com"
] |
j2kun@users.noreply.github.com
|
666718bff6602e071ff4eec4558e2b234c5ebacb
|
28d971fe35e5cf9d5446b712c1100dbd1f5236aa
|
/boxx/tool/toolIo.py
|
f6e1f25e364856fa657792da3c4b9d16d166250c
|
[] |
no_license
|
snoworld888/boxx
|
d396833167c0d020a2066296490ae17c718ae2ea
|
f494e265cc85790b3dc15aaa693055b7c783a932
|
refs/heads/master
| 2021-03-26T00:47:44.553795
| 2020-03-13T09:36:20
| 2020-03-13T09:36:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,365
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os, sys
import warnings
from functools import wraps
from ..ylsys import py2, sysi
class BoxxException(Exception):
'''
root Exception for boxx
'''
pass
class BoxxWarning(Warning):
'''
root warninng for boxx
'''
pass
class OffScreenWarning(BoxxWarning):
pass
class Except():
'''
get traceback frame in with
>>> with excep:
>>> 1/0
>>> dira(excep)
'''
def __init__(self, deep=0):
self.deep = deep
def __enter__(self):
pass
def __exit__(self, typee, value, traceback):
deep = self.deep
while deep:
deep -= 1
traceback = traceback.tb_next
self.type = typee
self.value = self.v = value
self.traceback = self.t = traceback
self.frame = self.f = traceback.tb_frame
excep = Except()
def getExcept(fun):
'''
exec `fun()` and return (Exception, trace, frame)
'''
try:
exc = Except(1)
with exc:
fun()
except Exception as ee:
e = ee
return e, exc.traceback, exc.frame
def warn(msg, warnType=BoxxWarning, filename=None, line=None, module='boxx', blue=False):
'''
log a warning of type warnType warn will auto fill filename and line
'''
msg = '''%s
%s'''%(('\x1b[36m%s\x1b[0m' if blue else '%s')% 'warning from boxx', msg)
if filename is None or line is None:
f = sys._getframe(1)
c = f.f_code
filename = c.co_filename if filename is None else filename
line = c.co_firstlineno if line is None else line
warnings.warn_explicit(msg, warnType, filename, line, module)
warn1timeCache = {}
@wraps(warn)
def warn1time(msg, *l, **kv):
'''
log a warning of type warnType warn will auto fill filename and line
warn only one time
'''
if not warn1timeCache.get(msg):
warn(msg, *l, **kv)
warn1timeCache[msg] = True
getsize = os.path.getsize
def getsizem(path='.'):
'''
返回 path 的大小 支持文件夹 单位为 MB
'''
if os.path.isdir(path):
return sum([getsizem(os.path.join(path, p)) for p in os.listdir(path)])
return os.path.getsize(path)/float(1024**2)
def fileJoinPath(_file_,path='.'):
'''
返回 __file__ + 相对路径 path 后的绝对路径
'''
from os.path import abspath,join,dirname
apath = abspath(join(dirname(abspath(_file_)),path))
return apath
def filename(path):
'''
将路径和后缀名除去 只留下文件名字
'''
filen = name = os.path.basename(path)
if '.' in name:
filen = name[:name.rindex('.')]
return filen
def relfile(relative_path):
'''
Return a absolute version of a relative_path relative the __file__
'''
frame = sys._getframe(1)
if '__file__' not in frame.f_globals:
return relative_path
_file_ = frame.f_globals['__file__']
abspath = os.path.abspath(os.path.join(_file_, '..', relative_path))
return abspath
def listdir(path=None):
path = path or '.'
return os.listdir(path)
def openread(path, encoding='utf-8'):
'''
返回path文件的文本内容
'''
if py2:
with open(path, 'r') as f:
return f.read()
with open(path, 'r', encoding=encoding) as f:
strr = f.read()
return strr
def openwrite(strr, path, mode='w', encoding='utf-8'):
'''
将strr写入path
'''
if py2:
with open(path, mode) as f:
f.write(strr)
return path
with open(path, mode, encoding=encoding) as f:
f.write(strr)
return path
def validFilename(filename, replaceBy='_'):
'''
return validate filename
'''
import re
if sysi.win:
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
else:
rstr = r"[\/]" # ' / '
newName = re.sub(rstr, replaceBy, filename)
return newName
def first_exist_dir(*dirs):
"""Input dirs and return the first exist dir.
If none dir exist, return First
"""
if len(dirs) == 1 and isinstance(dirs[0], (list, tuple)):
dirs = dirs[0]
for dirr in dirs:
if os.path.isdir(dirr):
return dirr
return dirs[0]
def loadjson(path):
import json
with open(path, 'r') as f:
js = json.load(f)
return js
def savejson(obj, path, indent=None):
import json
import numpy as np
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
with open(path, 'w') as f:
json.dump(obj, f, indent=indent, cls=NumpyEncoder)
return path
def replaceTabInPy(dirr='.'):
'''
将所有tab换成4个空格
'''
from glob import glob
from .toolLog import log
pys = glob(os.path.join(dirr, '*.py'))
for py in pys:
code = openread(py)
log(py,code.count('\t'))
new = code.replace('\t',' '*4)
openwrite(new, py)
def saveData(data, name='pickle_of_boxx', log=False): #保存进度
'''
保存二进制数据
'''
import pickle
if log:
print('正在将数据写入',os.path.abspath('.'),'下的文件:“'+name+'”,请稍等。。。')
with open(name, "wb") as f:
pickle.dump(data,f)
if log:
print('\n文件“'+name+'”已保存在',os.path.abspath('.'),'目录下!')
def loadData(name='pickle_of_boxx', log=False): #载入数据
import pickle
if not os.path.isfile(name):
print('在',os.path.abspath('.'),'目录下,“'+name+'”文件不存在,操作失败!')
if log:
print('正在读取',os.path.abspath('.'),'目录下的文件:“'+name+'”\n请稍等。。。')
with open(name,"rb") as f:
data = pickle.load(f)
f.close()
if log:
print('文件:“'+name+'”读取成功!')
return data
def browserOpen(url):
'''
open url with browser
if can't open browser raise warn
'''
import webbrowser
if not webbrowser.open_new_tab(url):
from boxx import warn
warn('''can't open url with web browser, plaese open url:"%s" in your browser'''%url)
if __name__ == "__main__":
pass
|
[
"ylxx@live.com"
] |
ylxx@live.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.