blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
36969d424f08e5d04a86eb5d2b92bcdee5857471 | b05761d771bb5a85d39d370c649567c1ff3eb089 | /venv/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/bleach/sanitizer.pyi | 5985b82f7cff9a9bc9aeed52c06537ad938744f2 | [] | no_license | JawshyJ/Coding_Practice | 88c49cab955eab04609ec1003b6b8c20f103fc06 | eb6b229d41aa49b1545af2120e6bee8e982adb41 | refs/heads/master | 2023-02-19T10:18:04.818542 | 2023-02-06T21:22:58 | 2023-02-06T21:22:58 | 247,788,631 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | pyi | /home/runner/.cache/pip/pool/d7/e7/3f/5aebc314311520dc92b9912e681abf45f6237fb515ef86fda6afb5e584 | [
"37465112+JawshyJ@users.noreply.github.com"
] | 37465112+JawshyJ@users.noreply.github.com |
c33efb9b294913ee7c0aaf0b4a477a1f9f6da1a0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04031/s911055242.py | 2a1826d064d99651357766b4f3af57ce3138f525 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | N=int(input())
*A,=map(int,input().split())
mx=max(A)
mn=min(A)
ans=float('inf')
for i in range(mn,mx+1):
ans=min(ans, sum([(k-i)**2 for k in A]))
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1f4558a5b374477c18f6a0f04940c2e603d2344e | 0c1e0e5bda62ef78ad08bfea4806677bacaa89fd | /handlers/handlers/pattern.py | c52e35f756a1173211061def2c25ccd9fc5c51f1 | [
"BSD-3-Clause"
] | permissive | adammck/rapidsms-contrib-apps-dev | fb9d47e94e562c03053229fc8226002ba29994cf | b6ef8c9c3726ca0b706b984b93342e24c62fd430 | refs/heads/master | 2020-12-24T21:36:25.118552 | 2010-06-16T17:59:37 | 2010-06-16T17:59:37 | 722,199 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import re
from .base import BaseHandler
class PatternHandler(BaseHandler):
@classmethod
def _pattern(cls):
if hasattr(cls, "pattern"):
return re.compile(cls.pattern, re.IGNORECASE)
@classmethod
def dispatch(cls, router, msg):
pattern = cls._pattern()
if pattern is None:
return False
match = pattern.match(msg.text)
if match is None:
return False
cls(router, msg).handle(*match.groups())
return True
| [
"adam.mckaig@gmail.com"
] | adam.mckaig@gmail.com |
36bd81137e33b4e9f60e2937e945148dc44496cd | 6ec2f30240a7a28e6e2cc0472a4553eb02af983e | /tests/orm/relations/test_has_many.py | a61ca0fbcde7e8768103cf6528cb1471d5f29a20 | [
"MIT"
] | permissive | KarthickNamakkalKrishnan/eloquent | 9d2bbd4af50958eea88e04820bbeeb16cff822f2 | 0638b688d5fd0c1a46b7471dd465eeb4c2f84666 | refs/heads/master | 2020-03-27T13:23:07.091643 | 2015-05-24T18:41:58 | 2015-05-24T18:41:58 | 146,605,953 | 0 | 1 | MIT | 2018-08-29T13:42:50 | 2018-08-29T13:42:49 | null | UTF-8 | Python | false | false | 9,435 | py | # -*- coding: utf-8 -*-
import arrow
from flexmock import flexmock, flexmock_teardown
from ... import EloquentTestCase
from eloquent.query.builder import QueryBuilder
from eloquent.query.grammars import QueryGrammar
from eloquent.query.expression import QueryExpression
from eloquent.orm.builder import Builder
from eloquent.orm.model import Model
from eloquent.orm.relations import HasMany
from eloquent.orm.collection import Collection
class OrmHasManyTestCase(EloquentTestCase):
def tearDown(self):
flexmock_teardown()
def test_create_properly_creates_new_model(self):
relation = self._get_relation()
created = flexmock(Model(), save=lambda: True, set_attribute=lambda: None)
created.should_receive('save').once().and_return(True)
relation.get_related().should_receive('new_instance').once().with_args({'name': 'john'}).and_return(created)
created.should_receive('set_attribute').with_args('foreign_key', 1)
self.assertEqual(created, relation.create(name='john'))
def test_find_or_new_finds_model(self):
relation = self._get_relation()
model = flexmock()
model.foo = 'bar'
relation.get_query().should_receive('find').once().with_args('foo', ['*']).and_return(model)
model.should_receive('set_attribute').never()
self.assertEqual('bar', relation.find_or_new('foo').foo)
def test_find_or_new_returns_new_model_with_foreign_key_set(self):
relation = self._get_relation()
relation.get_query().should_receive('find').once().with_args('foo', ['*']).and_return(None)
model = flexmock()
model.foo = 'bar'
relation.get_related().should_receive('new_instance').once().with_args().and_return(model)
model.should_receive('set_attribute').once().with_args('foreign_key', 1)
self.assertEqual('bar', relation.find_or_new('foo').foo)
def test_first_or_new_finds_first_model(self):
relation = self._get_relation()
relation.get_query().should_receive('where').once().with_args({'foo': 'bar'}).and_return(relation.get_query())
model = flexmock()
model.foo = 'bar'
relation.get_query().should_receive('first').once().with_args().and_return(model)
model.should_receive('set_attribute').never()
self.assertEqual('bar', relation.first_or_new(foo='bar').foo)
def test_first_or_new_returns_new_model_with_foreign_key_set(self):
relation = self._get_relation()
relation.get_query().should_receive('where').once().with_args({'foo': 'bar'}).and_return(relation.get_query())
relation.get_query().should_receive('first').once().with_args().and_return(None)
model = flexmock()
model.foo = 'bar'
relation.get_related().should_receive('new_instance').once().with_args().and_return(model)
model.should_receive('set_attribute').once().with_args('foreign_key', 1)
self.assertEqual('bar', relation.first_or_new(foo='bar').foo)
def test_first_or_create_finds_first_model(self):
relation = self._get_relation()
relation.get_query().should_receive('where').once().with_args({'foo': 'bar'}).and_return(relation.get_query())
model = flexmock()
model.foo = 'bar'
relation.get_query().should_receive('first').once().with_args().and_return(model)
model.should_receive('set_attribute').never()
self.assertEqual('bar', relation.first_or_create(foo='bar').foo)
def test_first_or_create_returns_new_model_with_foreign_key_set(self):
relation = self._get_relation()
relation.get_query().should_receive('where').once().with_args({'foo': 'bar'}).and_return(relation.get_query())
relation.get_query().should_receive('first').once().with_args().and_return(None)
model = flexmock()
model.foo = 'bar'
relation.get_related().should_receive('new_instance').once().with_args({'foo': 'bar'}).and_return(model)
model.should_receive('save').once().and_return(True)
model.should_receive('set_attribute').once().with_args('foreign_key', 1)
self.assertEqual('bar', relation.first_or_create(foo='bar').foo)
def test_update_or_create_finds_first_model_and_updates(self):
relation = self._get_relation()
relation.get_query().should_receive('where').once().with_args({'foo': 'bar'}).and_return(relation.get_query())
model = flexmock()
model.foo = 'bar'
relation.get_query().should_receive('first').once().with_args().and_return(model)
relation.get_related().should_receive('new_instance').never()
model.should_receive('fill').once().with_args({'foo': 'baz'})
model.should_receive('save').once()
self.assertEqual('bar', relation.update_or_create({'foo': 'bar'}, {'foo': 'baz'}).foo)
def test_update_or_create_creates_new_model_with_foreign_key_set(self):
relation = self._get_relation()
relation.get_query().should_receive('where').once().with_args({'foo': 'bar'}).and_return(relation.get_query())
relation.get_query().should_receive('first').once().with_args().and_return(None)
model = flexmock()
model.foo = 'bar'
relation.get_related().should_receive('new_instance').once().and_return(model)
model.should_receive('fill').once().with_args({'foo': 'baz'})
model.should_receive('save').once()
model.should_receive('set_attribute').once().with_args('foreign_key', 1)
self.assertEqual('bar', relation.update_or_create({'foo': 'bar'}, {'foo': 'baz'}).foo)
def test_update_updates_models_with_timestamps(self):
relation = self._get_relation()
relation.get_related().should_receive('uses_timestamps').once().and_return(True)
now = arrow.get()
relation.get_related().should_receive('fresh_timestamp').once().and_return(now)
relation.get_query().should_receive('update').once().with_args({'foo': 'bar', 'updated_at': now}).and_return('results')
self.assertEqual('results', relation.update(foo='bar'))
def test_relation_is_properly_initialized(self):
relation = self._get_relation()
model = flexmock(Model())
model.should_receive('set_relation').once().with_args('foo', Collection)
models = relation.init_relation([model], 'foo')
self.assertEqual([model], models)
def test_eager_constraints_are_properly_added(self):
relation = self._get_relation()
relation.get_query().get_query().should_receive('where_in').once().with_args('table.foreign_key', [1, 2])
model1 = OrmHasOneModelStub()
model1.id = 1
model2 = OrmHasOneModelStub()
model2.id = 2
relation.add_eager_constraints([model1, model2])
def test_models_are_properly_matched_to_parents(self):
relation = self._get_relation()
result1 = OrmHasOneModelStub()
result1.foreign_key = 1
result2 = OrmHasOneModelStub()
result2.foreign_key = 2
result3 = OrmHasOneModelStub()
result3.foreign_key = 2
model1 = OrmHasOneModelStub()
model1.id = 1
model2 = OrmHasOneModelStub()
model2.id = 2
model3 = OrmHasOneModelStub()
model3.id = 3
relation.get_related().should_receive('new_collection').replace_with(lambda l: Collection(l))
models = relation.match([model1, model2, model3], Collection([result1, result2, result3]), 'foo')
self.assertEqual(1, models[0].foo[0].foreign_key)
self.assertEqual(1, len(models[0].foo))
self.assertEqual(2, models[1].foo[0].foreign_key)
self.assertEqual(2, models[1].foo[1].foreign_key)
self.assertEqual(2, len(models[1].foo))
self.assertFalse(hasattr(models[2], 'foo'))
def test_relation_count_query_can_be_built(self):
relation = self._get_relation()
query = flexmock(QueryBuilder(None, QueryGrammar(), None))
builder = Builder(query)
builder.get_query().should_receive('select').once()
relation.get_parent().should_receive('get_table').and_return('table')
builder.should_receive('where').once().with_args('table.foreign_key', '=', QueryExpression)
parent_query = flexmock(QueryBuilder(None, None, None))
relation.get_query().should_receive('get_query').and_return(parent_query)
grammar = flexmock()
parent_query.should_receive('get_grammar').once().and_return(grammar)
grammar.should_receive('wrap').once().with_args('table.id')
relation.get_relation_count_query(builder, builder)
def _get_relation(self):
flexmock(Builder)
query = flexmock(QueryBuilder(None, QueryGrammar(), None))
builder = Builder(query)
builder.should_receive('where').with_args('table.foreign_key', '=', 1)
related = flexmock(Model())
builder.should_receive('get_model').and_return(related)
parent = flexmock(Model())
parent.should_receive('get_attribute').with_args('id').and_return(1)
parent.should_receive('get_created_at_column').and_return('created_at')
parent.should_receive('get_updated_at_column').and_return('updated_at')
parent.should_receive('new_query').and_return(builder)
return HasMany(builder, parent, 'table.foreign_key', 'id')
class OrmHasOneModelStub(Model):
pass
| [
"sebastien.eustace@gmail.com"
] | sebastien.eustace@gmail.com |
9e153351e31534753951fd99189961e27ffe9bac | 53181572c4b22df4b569a9901bcd5347a3459499 | /ceit_191221/py200509_zhou/module_app.py | 7dd4e30336f697cc8816410ec7f0136415944b69 | [] | no_license | edu-athensoft/ceit4101python_student | 80ef067b77421fce76d04f778d5c6de8b12f676c | 33cfa438c062d45e8d246b853e93d3c14b92ff2d | refs/heads/master | 2020-07-30T01:04:21.084384 | 2020-07-27T02:21:57 | 2020-07-27T02:21:57 | 210,027,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | """
"""
import math
import ceit_191221.py200509_zhou.module_arithmetic as arithm
import ceit_191221.py200509_zhou.module_logic as logic
print(math.sqrt(36))
print(math.e)
print("=== test arithmetic module ===")
print(arithm.add(1,2))
print(arithm.sub(1,2))
print(arithm.PI)
print(arithm.e)
print("=== test logic module ===")
print(logic.logic_and(True, False))
print(logic.logic_or(5>3, 3>4))
print(logic.logic_not(1!=1))
| [
"inf.athensoft@hotmail.com"
] | inf.athensoft@hotmail.com |
cfcb1f1d9d54c9b98db16f1e70bb0452ad49661b | 61747f324eaa757f3365fd7bf5ddd53ea0db47d1 | /casepro/contacts/migrations/0012_field_is_visible.py | 2d3bc7e266cf8daf0d1229fddf9e9e442fa74db0 | [
"BSD-3-Clause"
] | permissive | BlueRidgeLabs/casepro | f8b0eefa8f961dd2fdb5da26a48b619ebc1f8c12 | 8ef509326f3dfa80bb44beae00b60cc6c4ac7a24 | refs/heads/master | 2022-01-24T09:01:18.881548 | 2017-12-05T18:46:05 | 2017-12-05T18:49:42 | 113,502,588 | 0 | 0 | null | 2017-12-07T21:57:37 | 2017-12-07T21:57:37 | null | UTF-8 | Python | false | false | 471 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contacts', '0011_migrate_suspend_groups'),
]
operations = [
migrations.AddField(
model_name='field',
name='is_visible',
field=models.BooleanField(default=False, help_text='Whether this field is visible to partner users'),
),
]
| [
"rowanseymour@gmail.com"
] | rowanseymour@gmail.com |
a00d084afd5b74ce9fe9afdb606d0f19d08dae62 | ae56a9df49059c785e10e34b55ea827616950940 | /progress/models/iteration.py | a0aa1b0e5b1fbf251d3b81df0f97f7e397d834cd | [] | no_license | robinharms/Progress | 04a7ccc11f44378209b3492fad34e79ad7dc9e9a | ff31ed3a4a67e831636668d326d7218e0ff0e4b8 | refs/heads/master | 2021-01-15T13:11:27.267833 | 2011-11-13T15:49:06 | 2011-11-13T15:49:06 | 2,527,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | from betahaus.pyracont import BaseFolder
from betahaus.pyracont.decorators import content_factory
from zope.interface import implements
from progress import ProgressMF as _
from progress.models.interfaces import IIteration
@content_factory('Iteration')
class Iteration(BaseFolder):
""" Iteration. """
implements(IIteration)
allowed_contexts = ('Project')
content_type = 'Iteration'
display_name = _(u"Iteration")
schemas = {'add':'iteration', 'edit':'iteration'}
| [
"robin@betahaus.net"
] | robin@betahaus.net |
2c8d4504af1d3a0bc4d263d58de39cbe265e4b23 | b8856b7ec201fad5621593f93b7a8d9844276234 | /models/warmtune_model.py | be3ba4b483ec8605ebee81059f2cb3e11f422ceb | [] | no_license | wangjianbing1998/RLLL | cbb2e7724e9a140115e70886b39423a7002e1715 | 413c83effe8dd60de756362b854fcab978fe530c | refs/heads/master | 2023-06-03T09:29:18.165851 | 2021-06-21T08:35:59 | 2021-06-21T08:35:59 | 336,836,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,540 | py | from losses import create_loss
from models.base_model import BaseModel
from networks import create_net
class WarmtuneModel(BaseModel):
@staticmethod
def modify_commandline_options(parser):
"""Add new dataset-specific options, and rewrite default values for existing options.
num_classes is the number of classes per task
for example, num_classes = [10,10,10], means the number of classes on taks1 is 10, and then so on.
Parameters:
parser -- original option parser
Returns:
the modified parser.
"""
parser.add_argument('--net_name', type=str, default="alexnet", choices=["alexnet", "imagenet"],
help='network select from alexnet|imagenet', )
parser.add_argument('--loss_name', type=str, default="total", choices=["total"],
help='loss select from total', )
parser.add_argument('--taskdataset_name', type=str, default="total", choices=["total"],
help='loss from total', )
return parser
@staticmethod
def default_value(opt):
return opt
def __init__(self, opt):
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
# specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images.
self.visual_names = []
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks.
# you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them.
# define networks; you can use opt.isTrain to specify different behaviors for training and test.
self.net_names = ["main"]
self.net_main = create_net(opt)
self.loss_criterion = create_loss(opt)
self.init_optimizers(opt)
self.loss_names = getattr(self.loss_criterion, "loss_names")
"""
unfreeze shared_cnn_layers, shared_fc_layers and other_layers,
calculate other loss
not backward
"""
self.plus_other_loss = False
self.need_backward = False
self.max_step = 1
def setup(self, task_index=0, step=1):
if step == 1:
BaseModel.setup(self, task_index) # call the initialization method of BaseModel
self.shared_fc_layers = True
self.shared_cnn_layers = False
self.other_layers = False
self.task_layer = True
else:
raise ValueError(f'warmtune Expected 1<=step<={self.max_step}, but got {step}')
| [
"2553627958@qq.com"
] | 2553627958@qq.com |
e30064cad618b7334b32d7eca60aab02f3fda11c | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/4c03abfcd6c043968b6580de192f54d9.py | 960f45654af9d4c711d83ebf96542fe63d2a5e97 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 472 | py | # -*- coding: utf-8 -*-
def hey(userString):
"""
Takes a string, and returns a string that represents Bob's reply.
Args:
userString (string): a valid string.
Returns:
string: Bob's reply.
"""
if not userString or userString.isspace():
return "Fine. Be that way!"
elif userString.isupper():
return "Whoa, chill out!"
elif userString.endswith('?'):
return "Sure."
return "Whatever."
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
bc3619e0f18ea5c12a6981707fbeecc720133446 | 8b675ca56bae3a1b622eff991f8786963712d12f | /notebooks/python/modis_level1b_read.py | 3f3dc7eeb0c02f4249650ee94fb5e45816ee16c8 | [
"MIT"
] | permissive | KayhanB21/a301_code | b4dd7d8bdb2a4170211965abee707f48da4cbb23 | 4237b4e538bd999f5ac1b20f6b25b4c4e03bb09c | refs/heads/master | 2021-09-23T14:39:14.333193 | 2018-09-24T19:44:50 | 2018-09-24T19:44:50 | 150,385,043 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,399 | py |
# coding: utf-8
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Introduction" data-toc-modified-id="Introduction-1"><span class="toc-item-num">1 </span>Introduction</a></span></li><li><span><a href="#Reading-modis-data" data-toc-modified-id="Reading-modis-data-2"><span class="toc-item-num">2 </span>Reading modis data</a></span><ul class="toc-item"><li><span><a href="#Installing-pyhdf" data-toc-modified-id="Installing-pyhdf-2.1"><span class="toc-item-num">2.1 </span>Installing pyhdf</a></span></li><li><span><a href="#Downloading-the-data-and-reading-the-file" data-toc-modified-id="Downloading-the-data-and-reading-the-file-2.2"><span class="toc-item-num">2.2 </span>Downloading the data and reading the file</a></span></li><li><span><a href="#Navigating-the-file-system" data-toc-modified-id="Navigating-the-file-system-2.3"><span class="toc-item-num">2.3 </span>Navigating the file system</a></span></li><li><span><a href="#Using-pydf-to-get-metadata" data-toc-modified-id="Using-pydf-to-get-metadata-2.4"><span class="toc-item-num">2.4 </span>Using pydf to get metadata</a></span></li><li><span><a href="#Find-all-the-datasets-using-pyhdf.SD.datasets()" data-toc-modified-id="Find-all-the-datasets-using-pyhdf.SD.datasets()-2.5"><span class="toc-item-num">2.5 </span>Find all the datasets using pyhdf.SD.datasets()</a></span></li><li><span><a href="#open-one-of-the-datasets-(number-4,-EV_1KM_Emissive)-and-get-its-shape-and-data-type" data-toc-modified-id="open-one-of-the-datasets-(number-4,-EV_1KM_Emissive)-and-get-its-shape-and-data-type-2.6"><span class="toc-item-num">2.6 </span>open one of the datasets (number 4, EV_1KM_Emissive) and get its shape and data type</a></span></li><li><span><a href="#Get-the-first-row-of-the-first-channel-and-find-its-numpy-dtype" data-toc-modified-id="Get-the-first-row-of-the-first-channel-and-find-its-numpy-dtype-2.7"><span class="toc-item-num">2.7 </span>Get the first row of the first channel and find its numpy dtype</a></span></li><li><span><a href="#get-all-the-rows-and-columns-for-the-first-channel" data-toc-modified-id="get-all-the-rows-and-columns-for-the-first-channel-2.8"><span class="toc-item-num">2.8 </span>get all the rows and columns for the first channel</a></span></li><li><span><a href="#Find-the-attributes-for-EV_1KM_Emissive" data-toc-modified-id="Find-the-attributes-for-EV_1KM_Emissive-2.9"><span class="toc-item-num">2.9 </span>Find the attributes for EV_1KM_Emissive</a></span></li><li><span><a href="#Print-the-first-1000-characters-of-the-Metadata.0-string" data-toc-modified-id="Print-the-first-1000-characters-of-the-Metadata.0-string-2.10"><span class="toc-item-num">2.10 </span>Print the first 1000 characters of the Metadata.0 string</a></span></li></ul></li><li><span><a href="#Now-plot-the-data-using-imshow" data-toc-modified-id="Now-plot-the-data-using-imshow-3"><span class="toc-item-num">3 </span>Now plot the data using imshow</a></span><ul class="toc-item"><li><span><a href="#find-the-index-for-channel-30" data-toc-modified-id="find-the-index-for-channel-30-3.1"><span class="toc-item-num">3.1 </span>find the index for channel 30</a></span></li><li><span><a href="#Let-python-figure-this-out" data-toc-modified-id="Let-python-figure-this-out-3.2"><span class="toc-item-num">3.2 </span>Let python figure this out</a></span></li><li><span><a href="#Read-channel-30-at-index-9-into-a-numpy-array-of-type-uint16" data-toc-modified-id="Read-channel-30-at-index-9-into-a-numpy-array-of-type-uint16-3.3"><span class="toc-item-num">3.3 </span>Read channel 30 at index 9 into a numpy array of type uint16</a></span></li><li><span><a href="#Plot-the-channel-30-image" data-toc-modified-id="Plot-the-channel-30-image-3.4"><span class="toc-item-num">3.4 </span>Plot the channel 30 image</a></span></li></ul></li><li><span><a href="#For-Wednesday-(don't-need-to-hand-in)" data-toc-modified-id="For-Wednesday-(don't-need-to-hand-in)-4"><span class="toc-item-num">4 </span>For Wednesday (don't need to hand in)</a></span></li><li><span><a href="#Write-the-calibrated-channel-out-for-safekeeping" data-toc-modified-id="Write-the-calibrated-channel-out-for-safekeeping-5"><span class="toc-item-num">5 </span>Write the calibrated channel out for safekeeping</a></span></li><li><span><a href="#Move-the-file-to-data_dir" data-toc-modified-id="Move-the-file-to-data_dir-6"><span class="toc-item-num">6 </span>Move the file to data_dir</a></span></li></ul></div>
# In[1]:
from pathlib import Path
from pyhdf.SD import SD, SDC
import pprint
import numpy as np
from matplotlib import pyplot as plt
import context
from a301.utils.data_read import download
# # Introduction
#
# This notebook assumes that you have gone to the LAADS DAAC archive and downloaded a Modis Level1b 5 minute granule from the Aqua satellite (a MYD021KM hdf file). Below we use the pyhdf module to read a single channel (channel 30) centered at 9.7 microns according to [the Modis channel listing](https://modis.gsfc.nasa.gov/about/specifications.php). We plot the raw counts from that channel using the
# [matplotlib imshow](https://matplotlib.org/examples/images_contours_and_fields/image_demo.html) function to display the raw image
#
# If you don't have a MYD021KM file you can grab mine by changing
#
# get_data=False
#
# to True in the next cell.
#
#
# In[2]:
get_data=False
if get_data:
modis_file="MYD021KM.A2013222.2105.061.2018047235850.hdf"
download(modis_file)
# # Reading modis data
# The general layout of a Modis data file is given in the [modis users guide](https://www.dropbox.com/s/ckd3dv4n7nxc9p0/modis_users_guide.pdf?dl=0) but we'll only need a fraction of the information in that manual. Modis data is written in [hdf4 format](https://portal.hdfgroup.org/display/HDF4/HDF4), which in python can be read with the [pyhdf module](http://hdfeos.github.io/pyhdf/modules/SD.html#sd-module-key-features).
#
#
# ## Installing pyhdf
#
# So far we have used the [anaconda default channel](https://conda.io/docs/user-guide/tasks/manage-channels.html) to install miniconda and other libraries. Not every conda package is in the default channel. For example, I have my own channel called [phaustin](https://anaconda.org/phaustin/repo) which I use to distribute software I write. Much of the conda software for the scientific community is distributed in the [conda-forge channel](https://conda-forge.org/) -- this includes the pyhdf module.
#
# To install pyhdf from the conda-forge channel, the conda command looks like:
#
# conda install -c conda-forge pyhdf
#
# On Macs, you will also need to update your jpeg library to match pyhdf:
#
# conda install -c conda-forge jpeg
# ## Downloading the data and reading the file
#
# I downloaded my modis hdf file into the downloads folder in my home directory. The pathlib.Path object gives me a way to find that folder, regardless of who I am or whether I'm running on windows or a mac.
# ## Navigating the file system
#
# I need to tell python where my satellite data is kept. One possibility is to leave it
# in the browser Downloads folder, which is usually beneath your home directory.
# Here's how to do this with pathlib:
# In[3]:
home = Path.home()
print(home)
data_dir = home / Path("Downloads")
print(data_dir)
# A better choice would be someplace within the a301 folder tree. I know this notebook is
# in the tree, so I can create a new folder called a301_code/data, and since I know I am
# currently in a301/notebooks, I can find it like this:
# In[4]:
#Path.cwd finds the "current working directory"
this_dir=Path.cwd()
#move up one one folder and down to data
data_dir = this_dir.parent / Path('data')
# pathlib Path objects have a ton of features. One of these is "globbing", which means using wildcard characters to find groups of files.
#
# See http://pbpython.com/pathlib-intro.html and https://docs.python.org/3.6/library/pathlib.html
#
# The next cell shows how globbing is used:
# In[5]:
hdf_files=list(data_dir.glob("MYD021KM*2110*.hdf"))
hdf_files
# ## Using pydf to get metadata
#
# I can convert the filename from a Path object to a string object and pass it to pyhdf.SD
# to find out how many datasets and attributes there are in the file
#
# In the cell below I use [f-strings](https://realpython.com/python-f-strings/) to simplify the print command,
# and split the string up across multiple lines by enclosing it in a tuple. This works because
# when python sees that the individual lines aren't separated by a comma, it concatenates them together.
# The character \n means "newline"
#
# Note that I need to construct the full path to the data file so pyhdf can find it. pyhdf was
# written before pathlib (which was introduced in python 3.5),
# so I need to convert the Path object to a simple string using str()
# In[6]:
file_name = str(data_dir / Path(hdf_files[0]))
print(f'reading {file_name}')
the_file = SD(file_name, SDC.READ)
stars='*'*50
print((f'\n{stars}\nnumber of datasets, number of attributes'
f'={the_file.info()}\n{stars}\n'
f'\nHere is the help file for the info funtion:\n'))
help(SD.info)
# ## Find all the datasets using pyhdf.SD.datasets()
#
# The datasets method creates a dictionary holding pointers to the 31 datasets.
# List them below:
# In[7]:
datasets_dict = the_file.datasets()
for idx,sds in enumerate(datasets_dict.keys()):
print(idx,sds)
# ## open one of the datasets (number 4, EV_1KM_Emissive) and get its shape and data type
# In[8]:
longwave_data = the_file.select('EV_1KM_Emissive') # select sds
print(longwave_data.info())
help(longwave_data.info)
# ## Get the first row of the first channel and find its numpy dtype
#
# (unit16 is "unsigned 16 bit integer", which is how the modis raw counts are stored)
# In[9]:
data_row = longwave_data[0,0,:] # get sds data
print(data_row.shape,data_row.dtype)
# ## get all the rows and columns for the first channel
# In[10]:
longwave_data[0,:,:]
# ## Find the attributes for EV_1KM_Emissive
# In[11]:
pprint.pprint(longwave_data.attributes() )
# ## Print the first 1000 characters of the Metadata.0 string
#
# Date, orbit number, etc. are stored in a long string attribute called 'StructMetadata.0'. The \t character is a tab stop.
# In[12]:
pprint.pprint(the_file.attributes()['StructMetadata.0'][:1000])
# # Now plot the data using imshow
# In[13]:
longwave_bands = the_file.select('Band_1KM_Emissive')
# In[14]:
longwave_bands.attributes()
# Note that only channels 20 to 36 are in the Emissive dataset (see [the Modis channel listing](https://modis.gsfc.nasa.gov/about/specifications.php))
# ## find the index for channel 30
#
# Count the following and convince yourself that channel 30 is index 9, starting from 0
# In[15]:
band_nums=longwave_bands.get()
print(f'here are the modis channels in the emissive dataset \n{band_nums}')
# ## Let python figure this out
#
# We don't want to have to count, so use numpy.searchsorted to find the the index with value closest to 30
#
# We need to turn that index (type int64) into a plain python int so it can be used to specify the channel
# (float doesn't work)
# In[16]:
ch30_index=np.searchsorted(band_nums,30.)
print(ch30_index.dtype)
ch30_index = int(ch30_index)
print(f'channel 30 is located at index {ch30_index}')
# ## Read channel 30 at index 9 into a numpy array of type uint16
# In[17]:
ch30_data = longwave_data[ch30_index,:,:]
print(ch30_data.shape)
print(ch30_data.dtype)
# ## Plot the channel 30 image
#
# Use [imshow with a colorbar](https://matplotlib.org/gallery/color/colorbar_basics.html#sphx-glr-gallery-color-colorbar-basics-py)
# In[18]:
fig,ax = plt.subplots(1,1,figsize = (10,14))
CS=ax.imshow(ch30_data)
cax=fig.colorbar(CS)
ax.set_title('uncalibrated counts')
#
# add a label to the colorbar and flip it around 270 degrees
#
out=cax.ax.set_ylabel('Chan 30 raw counts')
out.set_verticalalignment('bottom')
out.set_rotation(270)
print(ch30_data.shape)
# # For Wednesday (don't need to hand in)
#
# To turn the raw counts into pixel radiances, you need to apply equation 5.8 on p. 36 of the
# [modis users guide](https://www.dropbox.com/s/ckd3dv4n7nxc9p0/modis_users_guide.pdf?dl=0):
#
# $Radiances = (RawData - offset) \times scale$
#
# We have just read the RawData, the offset and the scale are stored in two vectors that are attributes of the Emissive dataset. Make a version of the figure above, but plot Channel 30 radiance (in W/m^2/micron/sr), rather than raw counts.
#
# Hint: Here is how you get the scale and offset for Channel 30.
#
#
#
# In[19]:
scales=longwave_data.attributes()['radiance_scales']
offsets=longwave_data.attributes()['radiance_offsets']
ch30_scale=scales[ch30_index]
ch30_offset=offsets[ch30_index]
print(f'scale: {ch30_scale}, offset: {ch30_offset}')
# In[20]:
ch30_calibrated =(ch30_data - ch30_offset)*ch30_scale
# In[21]:
fig,ax = plt.subplots(1,1,figsize = (10,14))
CS=ax.imshow(ch30_calibrated)
cax=fig.colorbar(CS)
ax.set_title('Channel 30 radiance')
#
# add a label to the colorbar and flip it around 270 degrees
#
out=cax.ax.set_ylabel('Chan radiance $(W\,m^{-2}\,\mu m^{-1}\,sr^{-1})$')
out.set_verticalalignment('bottom')
out.set_rotation(270)
# # Write the calibrated channel out for safekeeping
#
# Follow the example here: https://hdfeos.org/software/pyhdf.php
# In[24]:
# Create an HDF file
outname="ch30_out.hdf"
sd = SD(outname, SDC.WRITE | SDC.CREATE)
# Create a dataset
sds = sd.create("ch30", SDC.FLOAT64, ch30_calibrated.shape)
# Fill the dataset with a fill value
sds.setfillvalue(0)
# Set dimension names
dim1 = sds.dim(0)
dim1.setname("row")
dim2 = sds.dim(1)
dim2.setname("col")
# Assign an attribute to the dataset
sds.units = "W/m^2/micron/sr"
# Write data
sds[:,:] = ch30_calibrated
# Close the dataset
sds.endaccess()
# Flush and close the HDF file
sd.end()
# In[26]:
get_ipython().system('ls')
# # Move the file to data_dir
# In[23]:
local_file = Path.cwd() / Path(outname)
to_file = data_dir / Path(outname)
local_file.rename(to_file)
| [
"paustin@eos.ubc.ca"
] | paustin@eos.ubc.ca |
e830b157069bcfde9667a89f80d9fb613801e550 | a44d63a9bf64b2206849fa152d9202c62ccb4ec1 | /apistar/commands/__init__.py | a5e5204ad1311634ada06fae30517f57d0b78dd9 | [
"BSD-3-Clause"
] | permissive | hjwp/apistar | 9df7240322eebe43f6eee1e7be397f1105882d5c | cc1dba1721bcaf678d1a6f5631ec3bab2211b3cd | refs/heads/master | 2021-01-18T23:05:25.155566 | 2017-04-03T15:13:50 | 2017-04-03T15:17:32 | 87,088,116 | 1 | 0 | null | 2017-04-03T15:12:26 | 2017-04-03T15:12:26 | null | UTF-8 | Python | false | false | 84 | py | from apistar.commands.base import new, run, test
__all__ = ['new', 'run', 'test']
| [
"tom@tomchristie.com"
] | tom@tomchristie.com |
9b6e80564dc6b5b1ea505ca4a5332edb4a1f7e84 | 13e16bf7e622abd858594bff8a4f3e3616700817 | /python/ccxt/async_support/hbtc.py | a963c1b4723a5e581d817db4d98585614192c749 | [
"MIT"
] | permissive | TelosGlobal/ccxt | de58effbe7fc100df1fb86d95bed4af900d0ce5b | afabfa2992ed68b1604e2775d462fc05c298396f | refs/heads/master | 2023-04-07T03:15:21.633396 | 2021-04-09T17:13:20 | 2021-04-09T17:13:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79,433 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import BadResponse
from ccxt.base.errors import NullResponse
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class hbtc(Exchange):
def describe(self):
return self.deep_extend(super(hbtc, self).describe(), {
'id': 'hbtc',
'name': 'HBTC',
'countries': ['CN'],
'rateLimit': 2000,
'version': 'v1',
'has': {
'cancelOrder': True,
'CORS': False,
'createOrder': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchBidAsk': True,
'fetchBidsAsks': True,
'fetchClosedOrders': True,
'fetchCurrencies': False,
'fetchDepositAddress': False,
'fetchDeposits': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingLimits': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'3m': '3m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
'3d': '3d',
'1w': '1w',
'1M': '1M',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/80134449-70663300-85a7-11ea-8942-e204cdeaab5d.jpg', # 交易所LOGO
'api': {
'quote': 'https://api.hbtc.com/openapi/quote', # 市场API数据端点
'contract': 'https://api.hbtc.com/openapi/contract', # 合约API数据端点
'option': 'https://api.hbtc.com/openapi/option', # 合约API数据端点
'public': 'https://api.hbtc.com/openapi', # 公共API数据端点
'private': 'https://api.hbtc.com/openapi', # 私有API数据端点
'zendesk': 'https://hbtc.zendesk.com/hc/en-us',
},
'www': 'https://www.hbtc.com', # 公司主页
'referral': 'https://www.hbtc.com/register/O2S8NS', # 邀请链接
'doc': 'https://github.com/bhexopen/BHEX-OpenApi/tree/master/doc', # openapi文档地址
'fees': 'https://hbtc.zendesk.com/hc/zh-cn/articles/360009274694', # 费率介绍
},
'api': {
'public': {
'get': [
'ping',
'time',
'brokerInfo', # 查询当前broker交易规则和symbol信息
'getOptions',
],
},
'quote': {
'get': [
'depth', # 获取深度
'depth/merged',
'trades', # 获取当前最新成交
'klines', # 获取K线数据
'ticker/24hr', # 获取24小时价格变化数据
'ticker/price',
'ticker/bookTicker',
'contract/index', # 获取合约标的指数价格
'contract/depth', # 获取合约深度
'contract/depth/merged',
'contract/trades', # 获取合约最近成交,
'contract/klines', # 获取合约的K线数据
'contract/ticker/24hr',
'option/index',
'option/depth',
'option/depth/merged',
'option/trades',
'option/klines',
'option/ticker/24hr',
],
},
'contract': {
'get': [
# public
'insurance',
'fundingRate', # 获取资金费率信息
# private
'openOrders', # 查询合约当前委托
'historyOrders', # 查询合约历史委托
'getOrder', # 查询合约订单详情
'myTrades', # 查询合约历史成交
'positions', # 查询合约当前持仓
'account', # 查询合约账户信息
],
'post': [
'order', # 创建合约订单
'modifyMargin', # 修改保证金
],
'delete': [
'order/cancel', # 取消合约订单
'order/batchCancel',
],
},
'option': {
'get': [
'openOrders',
'positions',
'historyOrders',
# 'getOrder',
'myTrades',
'settlements',
'account',
],
'post': [
'order',
],
'delete': [
'order/cancel',
],
},
'private': {
'get': [
'order', # 查询订单
'openOrders', # 查询当前委托
'historyOrders', # 查询历史委托
'account', # 获取当前账户信息
'myTrades', # 查询历史成交
'depositOrders',
'withdrawalOrders',
'withdraw/detail',
'balance_flow',
],
'post': [
'order', # 创建新订单
'order/test',
'userDataStream',
'subAccount/query',
'transfer',
'user/transfer',
'withdraw',
],
'put': [
'userDataStream',
],
'delete': [
'order', # 取消订单
'userDataStream',
],
},
},
'precisionMode': TICK_SIZE,
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.001,
'taker': 0.001,
},
},
'exceptions': {
'exact': {
# general server or network errors
'-1000': ExchangeError, # An unknown error occured while processing the request
'-1001': ExchangeError, # Internal error, unable to process your request. Please try again
'-1002': AuthenticationError, # You are not authorized to execute self request. Request need API Key included in. We suggest that API Key be included in any request
'-1003': RateLimitExceeded, # Too many requests, please use the websocket for live updates
'-1004': BadRequest,
'-1005': PermissionDenied,
'-1006': BadResponse, # An unexpected response was received from the message bus. Execution status unknown. OPEN API server find some exception in execute request.Please report to Customer service
'-1007': RequestTimeout, # Timeout waiting for response from backend server. Send status unknown, execution status unknown
'-1014': InvalidOrder, # Unsupported order combination
'-1015': RateLimitExceeded, # Reach the rate limit.Please slow down your request speed
'-1016': ExchangeNotAvailable, # This service is no longer available
'-1020': NotSupported, # This operation is not supported
'-1021': BadRequest, # Timestamp for self request is outside of the recvWindow
'-1022': AuthenticationError, # Signature for self request is not valid
# request issues
'-1100': BadRequest, # Illegal characters found in a parameter
'-1101': BadRequest, # Too many parameters sent for self endpoint
'-1102': BadRequest, # A mandatory parameter was not sent, was empty/null, or malformed
'-1103': BadRequest, # An unknown parameter was sent
'-1104': BadRequest, # Not all sent parameters were read
'-1105': BadRequest, # A parameter was empty
'-1106': BadRequest, # A parameter was sent when not required
'-1111': BadRequest, # Precision is over the maximum defined for self asset
'-1112': NullResponse, # No orders on book for symbol
'-1114': InvalidOrder, # TimeInForce parameter sent when not required
'-1115': InvalidOrder, # Invalid timeInForce
'-1116': InvalidOrder, # Invalid orderType
'-1117': InvalidOrder, # Invalid side
'-1118': InvalidOrder, # New client order ID was empty
'-1119': InvalidOrder, # Original client order ID was empty
'-1120': BadRequest, # Invalid interval
'-1121': BadSymbol, # Invalid symbol
'-1125': AuthenticationError, # This listenKey does not exist
'-1127': BadRequest, # Lookup interval is too big
'-1128': BadRequest, # Combination of optional parameters invalid
'-1130': BadRequest, # Invalid data sent for a parameter
'-1131': InsufficientFunds,
'-1132': InvalidOrder, # Order price too high
'-1133': InvalidOrder, # Order price lower than the minimum,please check general broker info
'-1134': InvalidOrder, # Order price decimal too long,please check general broker info
'-1135': InvalidOrder, # Order quantity too large
'-1136': InvalidOrder, # Order quantity lower than the minimum
'-1137': InvalidOrder, # Order quantity decimal too long
'-1138': InvalidOrder, # Order price exceeds permissible range
'-1139': InvalidOrder, # Order has been filled
'-1140': InvalidOrder, # Transaction amount lower than the minimum
'-1141': InvalidOrder, # Duplicate clientOrderId
'-1142': InvalidOrder, # Order has been canceled
'-1143': OrderNotFound, # Cannot be found on order book
'-1144': InvalidOrder, # Order has been locked
'-1145': InvalidOrder, # This order type does not support cancellation
'-1146': RequestTimeout, # Order creation timeout
'-1147': RequestTimeout, # Order cancellation timeout
'-1149': InvalidOrder, # Create order failed
'-1187': InvalidAddress, # Withdrawal address not in whitelist
'-2010': InvalidOrder, # NEW_ORDER_REJECTED
'-2011': InvalidOrder, # CANCEL_REJECTED
'-2013': OrderNotFound, # Order does not exist
'-2014': AuthenticationError, # API-key format invalid
'-2015': AuthenticationError, # Invalid API-key, IP, or permissions for action
'-2016': ExchangeError, # No trading window could be found for the symbol. Try ticker/24hrs instead
},
},
# exchange-specific options
'options': {
'fetchTickers': {
'method': 'quoteGetTicker24hr',
},
},
})
async def fetch_time(self, params={}):
response = await self.publicGetTime(params)
#
# {
# "serverTime": 1527777538000
# }
#
return self.safe_integer(response, 'serverTime')
def parse_market(self, market, type='spot'):
filters = self.safe_value(market, 'filters', [])
id = self.safe_string(market, 'symbol')
baseId = self.safe_string(market, 'baseAsset')
quoteId = self.safe_string(market, 'quoteAsset')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
spot = True
future = False
option = False
inverse = False
if type == 'future':
symbol = id
spot = False
future = True
inverse = self.safe_value(market, 'inverse', False)
baseId = self.safe_string(market, 'underlying')
base = self.safe_currency_code(baseId)
elif type == 'option':
symbol = id
spot = False
option = True
amountMin = None
amountMax = None
priceMin = None
priceMax = None
costMin = None
for j in range(0, len(filters)):
filter = filters[j]
filterType = self.safe_string(filter, 'filterType')
if filterType == 'LOT_SIZE':
amountMin = self.safe_number(filter, 'minQty')
amountMax = self.safe_number(filter, 'maxQty')
if filterType == 'PRICE_FILTER':
priceMin = self.safe_number(filter, 'minPrice')
priceMax = self.safe_number(filter, 'maxPrice')
if filterType == 'MIN_NOTIONAL':
costMin = self.safe_number(filter, 'minNotional')
if (costMin is None) and (amountMin is not None) and (priceMin is not None):
costMin = amountMin * priceMin
precision = {
'price': self.safe_number_2(market, 'quotePrecision', 'quoteAssetPrecision'),
'amount': self.safe_number(market, 'baseAssetPrecision'),
}
limits = {
'amount': {
'min': amountMin,
'max': amountMax,
},
'price': {
'min': priceMin,
'max': priceMax,
},
'cost': {
'min': costMin,
'max': None,
},
}
return {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'type': type,
'spot': spot,
'future': future,
'option': option,
'inverse': inverse,
'precision': precision,
'limits': limits,
'info': market,
}
async def fetch_markets(self, params={}):
response = await self.publicGetBrokerInfo(params)
#
# {
# "timezone":"UTC",
# "serverTime":"1588015885118",
# "brokerFilters":[],
# "symbols":[
# {
# "filters":[
# {"minPrice":"0.01","maxPrice":"100000.00000000","tickSize":"0.01","filterType":"PRICE_FILTER"},
# {"minQty":"0.0005","maxQty":"100000.00000000","stepSize":"0.000001","filterType":"LOT_SIZE"},
# {"minNotional":"5","filterType":"MIN_NOTIONAL"}
# ],
# "exchangeId":"301",
# "symbol":"BTCUSDT",
# "symbolName":"BTCUSDT",
# "status":"TRADING",
# "baseAsset":"BTC",
# "baseAssetPrecision":"0.000001",
# "quoteAsset":"USDT",
# "quotePrecision":"0.01",
# "icebergAllowed":false
# },
# ],
# "options":[
# {
# "filters":[
# {"minPrice":"0.01","maxPrice":"100000.00000000","tickSize":"0.01","filterType":"PRICE_FILTER"},
# {"minQty":"0.01","maxQty":"100000.00000000","stepSize":"0.001","filterType":"LOT_SIZE"},
# {"minNotional":"1","filterType":"MIN_NOTIONAL"}
# ],
# "exchangeId":"301",
# "symbol":"BTC0501CS8500",
# "symbolName":"BTC0501CS8500",
# "status":"TRADING",
# "baseAsset":"BTC0501CS8500",
# "baseAssetPrecision":"0.001",
# "quoteAsset":"BUSDT",
# "quotePrecision":"0.01",
# "icebergAllowed":false
# },
# ],
# "contracts":[
# {
# "filters":[
# {"minPrice":"0.1","maxPrice":"100000.00000000","tickSize":"0.1","filterType":"PRICE_FILTER"},
# {"minQty":"1","maxQty":"100000.00000000","stepSize":"1","filterType":"LOT_SIZE"},
# {"minNotional":"0.000001","filterType":"MIN_NOTIONAL"}
# ],
# "exchangeId":"301",
# "symbol":"BTC-PERP-REV",
# "symbolName":"BTC-PERP-REV",
# "status":"TRADING",
# "baseAsset":"BTC-PERP-REV",
# "baseAssetPrecision":"1",
# "quoteAsset":"USDT",
# "quoteAssetPrecision":"0.1",
# "icebergAllowed":false,
# "inverse":true,
# "index":"BTCUSDT",
# "marginToken":"TBTC",
# "marginPrecision":"0.00000001",
# "contractMultiplier":"1.0",
# "underlying":"TBTC",
# "riskLimits":[
# {"riskLimitId":"200000001","quantity":"1000000.0","initialMargin":"0.01","maintMargin":"0.005"},
# {"riskLimitId":"200000002","quantity":"2000000.0","initialMargin":"0.02","maintMargin":"0.01"},
# {"riskLimitId":"200000003","quantity":"3000000.0","initialMargin":"0.03","maintMargin":"0.015"},
# {"riskLimitId":"200000004","quantity":"4000000.0","initialMargin":"0.04","maintMargin":"0.02"}
# ]
# },
# {
# "filters":[
# {"minPrice":"0.1","maxPrice":"100000.00000000","tickSize":"0.1","filterType":"PRICE_FILTER"},
# {"minQty":"1","maxQty":"100000.00000000","stepSize":"1","filterType":"LOT_SIZE"},
# {"minNotional":"0.000001","filterType":"MIN_NOTIONAL"}
# ],
# "exchangeId":"301",
# "symbol":"BTC-SWAP",
# "symbolName":"BTC-SWAP",
# "status":"TRADING",
# "baseAsset":"BTC-SWAP",
# "baseAssetPrecision":"1",
# "quoteAsset":"USDT",
# "quoteAssetPrecision":"0.1",
# "icebergAllowed":false,
# "inverse":true,
# "index":"BTCUSDT",
# "marginToken":"BTC",
# "marginPrecision":"0.00000001",
# "contractMultiplier":"1.0",
# "underlying":"BTC",
# "riskLimits":[
# {"riskLimitId":"500000001","quantity":"1000000.0","initialMargin":"0.01","maintMargin":"0.005"},
# {"riskLimitId":"500000002","quantity":"2000000.0","initialMargin":"0.02","maintMargin":"0.01"},
# {"riskLimitId":"500000003","quantity":"3000000.0","initialMargin":"0.03","maintMargin":"0.015"},
# {"riskLimitId":"500000004","quantity":"4000000.0","initialMargin":"0.04","maintMargin":"0.02"}
# ]
# },
# {
# "filters":[
# {"minPrice":"0.1","maxPrice":"100000.00000000","tickSize":"0.1","filterType":"PRICE_FILTER"},
# {"minQty":"1","maxQty":"100000.00000000","stepSize":"1","filterType":"LOT_SIZE"},
# {"minNotional":"0.000000001","filterType":"MIN_NOTIONAL"}
# ],
# "exchangeId":"301",
# "symbol":"BTC-PERP-BUSDT",
# "symbolName":"BTC-PERP-BUSDT",
# "status":"TRADING",
# "baseAsset":"BTC-PERP-BUSDT",
# "baseAssetPrecision":"1",
# "quoteAsset":"BUSDT",
# "quoteAssetPrecision":"0.1",
# "icebergAllowed":false,
# "inverse":false,
# "index":"BTCUSDT",
# "marginToken":"BUSDT",
# "marginPrecision":"0.0001",
# "contractMultiplier":"0.0001",
# "underlying":"TBTC",
# "riskLimits":[
# {"riskLimitId":"600000132","quantity":"1000000.0","initialMargin":"0.01","maintMargin":"0.005"},
# {"riskLimitId":"600000133","quantity":"2000000.0","initialMargin":"0.02","maintMargin":"0.01"},
# {"riskLimitId":"600000134","quantity":"3000000.0","initialMargin":"0.03","maintMargin":"0.015"},
# {"riskLimitId":"600000135","quantity":"4000000.0","initialMargin":"0.04","maintMargin":"0.02"}
# ]
# },
# ]
# }
#
result = []
symbols = self.safe_value(response, 'symbols', [])
for i in range(0, len(symbols)):
market = self.parse_market(symbols[i], 'spot')
result.append(market)
options = self.safe_value(response, 'options', [])
for i in range(0, len(options)):
market = self.parse_market(options[i], 'option')
result.append(market)
contracts = self.safe_value(response, 'contracts', [])
for i in range(0, len(contracts)):
market = self.parse_market(contracts[i], 'future')
result.append(market)
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit # default 40, max 40
response = await self.quoteGetDepth(self.extend(request, params))
#
# {
# "time":1588068913453,
# "bids":[
# ["0.025278","0.0202"],
# ["0.025277","16.1132"],
# ["0.025276","7.9056"],
# ]
# "asks":[
# ["0.025302","5.9999"],
# ["0.025303","34.9151"],
# ["0.025304","92.391"],
# ]
# }
#
timestamp = self.safe_integer(response, 'time')
return self.parse_order_book(response, timestamp)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.quoteGetTicker24hr(self.extend(request, params))
#
# {
# "time":1588069860794,
# "symbol":"BNB0501PS16",
# "bestBidPrice":"0.2129",
# "bestAskPrice":"0.3163",
# "volume":"33547",
# "quoteVolume":"10801.987",
# "lastPrice":"0.2625",
# "highPrice":"0.3918",
# "lowPrice":"0.2625",
# "openPrice":"0.362",
# }
#
return self.parse_ticker(response, market)
async def fetch_bid_ask(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.quoteGetTickerBookTicker(self.extend(request, params))
#
# {
# "symbol": "LTCBTC",
# "bidPrice": "4.00000000",
# "bidQty": "431.00000000",
# "askPrice": "4.00000200",
# "askQty": "9.00000000"
# }
#
return self.parse_ticker(response, market)
async def fetch_bids_asks(self, symbols=None, params={}):
await self.load_markets()
response = await self.quoteGetTickerBookTicker(params)
#
# [
# {
# "symbol": "LTCBTC",
# "bidPrice": "4.00000000",
# "bidQty": "431.00000000",
# "askPrice": "4.00000200",
# "askQty": "9.00000000"
# },
# {
# "symbol": "ETHBTC",
# "bidPrice": "0.07946700",
# "bidQty": "9.00000000",
# "askPrice": "100000.00000000",
# "askQty": "1000.00000000"
# },
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
options = self.safe_value(self.options, 'fetchTickers', {})
defaultMethod = self.safe_string(options, 'method', 'quoteGetTicker24hr')
defaultType = self.safe_string(options, 'type', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = defaultMethod
if type == 'future':
method = 'quoteGetContractTicker24hr'
elif type == 'option':
method = 'quoteGetOptionTicker24hr'
response = await getattr(self, method)(query)
#
# [
# {
# "time": 1538725500422,
# "symbol": "ETHBTC",
# "lastPrice": "4.00000200",
# "openPrice": "99.00000000",
# "highPrice": "100.00000000",
# "lowPrice": "0.10000000",
# "volume": "8913.30000000"
# },
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_balance(self, params={}):
await self.load_markets()
options = self.safe_value(self.options, 'fetchBalance', {})
defaultType = self.safe_string(options, 'type', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = 'privateGetAccount'
if type == 'future':
method = 'contractGetAccount'
elif type == 'option':
method = 'optionGetAccount'
response = await getattr(self, method)(query)
#
# spot
#
# {
# 'balances': [
# {
# 'asset': 'ALGO',
# 'free': '0',
# 'locked': '0'
# },
# {
# 'asset': 'BHT',
# 'free': '0',
# 'locked': '0'
# }
# ]
# }
#
# contract
#
# {
# "BUSDT":{
# "total":"1000",
# "availableMargin":"1000",
# "positionMargin":"0",
# "orderMargin":"0",
# "tokenId":"BUSDT"
# },
# "TBTC":{
# "total":"0.5",
# "availableMargin":"0.5",
# "positionMargin":"0",
# "orderMargin":"0",
# "tokenId":"TBTC"
# }
# }
#
# option
#
# {
# "optionAsset":"",
# "balances":[
# {
# "tokenName":"USDT",
# "free":"0.0",
# "locked":"0.0",
# "margin":"0.0"
# },
# {
# "tokenName":"BUSDT",
# "free":"0.0",
# "locked":"0.0",
# "margin":"0.0"
# }
# ]
# }
#
balances = self.safe_value(response, 'balances')
result = {'info': response}
if balances is not None:
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string_2(balance, 'asset', 'tokenName')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_number(balance, 'free')
account['used'] = self.safe_number(balance, 'locked')
result[code] = account
else:
currencyIds = list(response.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
balance = response[currencyId]
account = self.account()
account['free'] = self.safe_number(balance, 'availableMargin')
account['total'] = self.safe_number(balance, 'total')
result[code] = account
return self.parse_balance(result)
async def fetch_trades(self, symbol, since=None, limit=50, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.quoteGetTrades(self.extend(request, params))
#
# [
# {"price":"0.025344","time":1588084082060,"qty":"1","isBuyerMaker":false},
# {"price":"0.02535","time":1588084086021,"qty":"0.553","isBuyerMaker":true},
# {"price":"0.025348","time":1588084097037,"qty":"1","isBuyerMaker":false},
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1587906000000, # open time
# "0.1761", # open
# "0.1761", # high
# "0.1761", # low
# "0.1761", # close
# "0", # base volume
# 0, # close time
# "0", # quote volume
# 0, # number of trades
# "0", # taker buy base asset volume
# "0" # taker buy quote asset volume
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'interval': self.timeframes[timeframe],
}
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit # default 500, max 500
response = await self.quoteGetKlines(self.extend(request, params))
#
# [
# [1587906000000,"0.1761","0.1761","0.1761","0.1761","0",0,"0",0,"0","0"],
# [1587906180000,"0.1761","0.1761","0.1761","0.1761","0",0,"0",0,"0","0"],
# [1587906360000,"0.1761","0.1848","0.1761","0.1848","53",0,"9.7944",1,"0","0"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
request = {
# if only fromId is set,it will get orders < that fromId in descending order
# if only toId is set, it will get orders > that toId in ascending order
# if fromId is set and toId is set, it will get orders < that fromId and > that toId in descending order
# if fromId is not set and toId it not set, most recent order are returned in descending order
# 'fromId': '43287482374',
# 'toId': '43287482374',
# 'endTime': self.milliseconds(), # optional, spot only
}
defaultType = self.safe_string(self.options, 'type', 'spot')
options = self.safe_value(self.options, 'fetchMyTrades', {})
fetchMyTradesType = self.safe_string(options, 'type', defaultType)
type = self.safe_string(params, 'type', fetchMyTradesType)
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
type = market['type']
query = self.omit(params, 'type')
if limit is not None:
# spot default 500, max 1000
# futures and options default 20, max 1000
request['limit'] = limit
method = 'privateGetMyTrades'
if type == 'future':
method = 'contractGetMyTrades'
else:
if type == 'option':
method = 'optionGetMyTrades'
else:
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a `symbol` argument for ' + type + ' markets')
market = self.market(symbol)
request['symbol'] = market['id']
# spot only?
if since is not None:
request['startTime'] = since
if since is not None:
request['startTime'] = since
response = await getattr(self, method)(self.extend(request, query))
#
# spot
#
# [
# {
# "id":"616384027512920576",
# "symbol":"TBTCBUSDT",
# "orderId":"616384027202542080",
# "matchOrderId":"605124954767266560",
# "price":"6826.06",
# "qty":"0.1",
# "commission":"0.682606",
# "commissionAsset":"BUSDT",
# "time":"1588214701982",
# "isBuyer":false,
# "isMaker":false,
# "fee":{
# "feeTokenId":"BUSDT",
# "feeTokenName":"BUSDT",
# "fee":"0.682606"
# }
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderSide = side.upper()
orderType = type.upper()
request = {
'symbol': market['id'],
# BUY or SELL for spot and options
'side': orderSide,
# GTC, FOK, IOC for spot and options
# GTC, FOK, IOC, LIMIT_MAKER for futures
# 'timeInForce': 'GTC',
}
query = params
method = 'privatePostOrder'
if market['type'] == 'future':
if (orderSide != 'BUY_OPEN') and (orderSide != 'SELL_OPEN') and (orderSide != 'BUY_CLOSE') and (orderSide != 'SELL_CLOSE'):
raise NotSupported(self.id + ' createOrder() does not support order side ' + side + ' for ' + market['type'] + ' markets, only BUY_OPEN, SELL_OPEN, BUY_CLOSE and SELL_CLOSE are supported')
if (orderType != 'LIMIT') and (orderType != 'STOP'):
raise NotSupported(self.id + ' createOrder() does not support order type ' + type + ' for ' + market['type'] + ' markets, only LIMIT and STOP are supported')
clientOrderId = self.safe_value(params, 'clientOrderId')
if clientOrderId is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a clientOrderId parameter for ' + market['type'] + ' markets, supply clientOrderId in the params argument')
leverage = self.safe_value(params, 'leverage')
if leverage is None and (orderSide == 'BUY_OPEN' or orderSide == 'SELL_OPEN'):
raise NotSupported(self.id + ' createOrder() requires a leverage parameter for ' + market['type'] + ' markets if orderSide is BUY_OPEN or SELL_OPEN')
method = 'contractPostOrder'
priceType = self.safe_string(params, 'priceType')
if priceType is None:
request['price'] = self.price_to_precision(symbol, price)
else:
request['priceType'] = priceType
if priceType == 'INPUT':
request['price'] = self.price_to_precision(symbol, price)
request['orderType'] = type.upper() # LIMIT, STOP
request['quantity'] = self.amount_to_precision(symbol, amount)
# request['leverage'] = 1 # not required for closing orders
request['leverage'] = leverage
request['clientOrderId'] = clientOrderId
# optional
# request['priceType'] = 'INPUT', # INPUT, OPPONENT, QUEUE, OVER, MARKET
# request['triggerPrice'] = 123.45
else:
if market['type'] == 'option':
method = 'optionPostOrder'
newClientOrderId = self.safe_value_2(params, 'clientOrderId', 'newClientOrderId')
if newClientOrderId is not None:
request['newClientOrderId'] = newClientOrderId
request['type'] = orderType
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['quantity'] = self.amount_to_precision(symbol, amount)
elif type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
createMarketBuyOrderRequiresPrice = self.safe_value(self.options, 'createMarketBuyOrderRequiresPrice', True)
if createMarketBuyOrderRequiresPrice:
if price is not None:
amount = amount * price
else:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False and supply the total cost value in the 'amount' argument(the exchange-specific behaviour)")
precision = market['precision']['price']
request['quantity'] = self.decimal_to_precision(amount, TRUNCATE, precision, self.precisionMode)
else:
request['quantity'] = self.amount_to_precision(symbol, amount)
query = self.omit(query, ['clientOrderId', 'newClientOrderId'])
response = await getattr(self, method)(self.extend(request, query))
#
# spot
#
# {
# "symbol":"TBTCBUSDT",
# "orderId":"616376654496877056",
# "clientOrderId":"158821382304516955",
# "transactTime":"1588213823080",
# "price":"0",
# "origQty":"1000",
# "executedQty":"0",
# "status":"NEW",
# "timeInForce":"GTC",
# "type":"MARKET",
# "side":"BUY"
# }
#
# contract
#
# {
# 'time': '1570759718825',
# 'updateTime': '0',
# 'orderId': '469961015902208000',
# 'clientOrderId': '6423344174',
# 'symbol': 'BTC-PERP-REV',
# 'price': '8200',
# 'leverage': '12.08',
# 'origQty': '5',
# 'executedQty': '0',
# 'avgPrice': '0',
# 'marginLocked': '0.00005047',
# 'orderType': 'LIMIT',
# 'side': 'BUY_OPEN',
# 'fees': [],
# 'timeInForce': 'GTC',
# 'status': 'NEW',
# 'priceType': 'INPUT'
# }
#
return self.parse_order(response, market)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
clientOrderId = self.safe_value_2(params, 'origClientOrderId', 'clientOrderId')
request = {}
defaultType = self.safe_string(self.options, 'type', 'spot')
options = self.safe_value(self.options, 'cancelOrder', {})
cancelOrderType = self.safe_string(options, 'type', defaultType)
type = self.safe_string(params, 'type', cancelOrderType)
query = self.omit(params, 'type')
if clientOrderId is not None:
request['origClientOrderId'] = clientOrderId
query = self.omit(query, ['origClientOrderId', 'clientOrderId'])
else:
request['orderId'] = id
method = 'privateDeleteOrder'
orderType = self.safe_string(query, 'orderType')
if orderType is not None:
type = 'future'
if type == 'future':
method = 'contractDeleteOrderCancel'
if orderType is None:
raise ArgumentsRequired(self.id + " cancelOrder() requires an orderType parameter, pass the {'orderType': 'LIMIT'} or {'orderType': 'STOP'} in params argument")
request['orderType'] = orderType
else:
if type == 'option':
method = 'optionDeleteOrderCancel'
response = await getattr(self, method)(self.extend(request, query))
#
# spot
#
# {
# 'exchangeId': '301',
# 'symbol': 'BHTUSDT',
# 'clientOrderId': '0',
# 'orderId': '499890200602846976',
# 'status': 'CANCELED'
# }
#
# futures
#
# {
# "time":"1588353669383",
# "updateTime":"0",
# "orderId":"617549770304599296",
# "clientOrderId":"test-001",
# "symbol":"BTC-PERP-REV",
# "price":"10000",
# "leverage":"1",
# "origQty":"100",
# "executedQty":"0",
# "avgPrice":"0",
# "marginLocked":"0",
# "orderType":"LIMIT",
# "side":"SELL_OPEN",
# "fees":[],
# "timeInForce":"GTC",
# "status":"CANCELED",
# "priceType":"INPUT",
# }
#
return self.parse_order(response)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
request = {
# if orderId is set, it will get orders < that orderId otherwise most recent orders are returned
# 'orderId': '43287482374',
}
defaultType = self.safe_string(self.options, 'type', 'spot')
options = self.safe_value(self.options, 'fetchOpenOrders', {})
fetchOpenOrdersType = self.safe_string(options, 'type', defaultType)
type = self.safe_string(params, 'type', fetchOpenOrdersType)
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
type = market['type']
query = self.omit(params, 'type')
if limit is not None:
request['limit'] = limit # default 500, max 1000
method = 'privateGetOpenOrders'
if type == 'future':
method = 'contractGetOpenOrders'
elif type == 'option':
method = 'optionGetOpenOrders'
response = await getattr(self, method)(self.extend(request, query))
#
# spot
#
# [
# {
# 'orderId': '499902955766523648',
# 'clientOrderId': '157432907618453',
# 'exchangeId': '301',
# 'symbol': 'BHTUSDT',
# 'price': '0.01',
# 'origQty': '50',
# 'executedQty': '0',
# 'cummulativeQuoteQty': '0',
# 'avgPrice': '0',
# 'status': 'NEW',
# 'timeInForce': 'GTC',
# 'type': 'LIMIT',
# 'side': 'BUY',
# 'stopPrice': '0.0',
# 'icebergQty': '0.0',
# 'time': '1574329076202',
# 'updateTime': '0',
# 'isWorking': True
# }
# ]
#
# futures
#
# [
# {
# "time":"1588353669383",
# "updateTime":"0",
# "orderId":"617549770304599296",
# "clientOrderId":"test-001",
# "symbol":"BTC-PERP-REV",
# "price":"10000",
# "leverage":"1",
# "origQty":"100",
# "executedQty":"0",
# "avgPrice":"0",
# "marginLocked":"0.01",
# "orderType":"LIMIT",
# "side":"SELL_OPEN",
# "fees":[],
# "timeInForce":"GTC",
# "status":"NEW",
# "priceType":"INPUT"
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
request = {
# if orderId is set, it will get orders < that orderId otherwise most recent orders are returned
# 'orderId': '43287482374',
# 'endTime': self.milliseconds(), # optional
}
defaultType = self.safe_string(self.options, 'type', 'spot')
options = self.safe_value(self.options, 'fetchClosedOrders', {})
fetchClosedOrdersType = self.safe_string(options, 'type', defaultType)
type = self.safe_string(params, 'type', fetchClosedOrdersType)
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
type = market['type']
query = self.omit(params, 'type')
if limit is not None:
request['limit'] = limit # default 500, max 1000
if since is not None:
request['startTime'] = since
method = 'privateGetHistoryOrders'
if type == 'future':
method = 'contractGetHistoryOrders'
elif type == 'option':
method = 'optionGetHistoryOrders'
response = await getattr(self, method)(self.extend(request, query))
#
# spot
#
# [
# {
# "orderId":"616384027202542080",
# "clientOrderId":"158821470194414688",
# "exchangeId":"301",
# "symbol":"TBTCBUSDT",
# "price":"0",
# "origQty":"0.1",
# "executedQty":"0.1",
# "cummulativeQuoteQty":"682.606",
# "avgPrice":"6826.06",
# "status":"FILLED",
# "timeInForce":"GTC",
# "type":"MARKET",
# "side":"SELL",
# "stopPrice":"0.0",
# "icebergQty":"0.0",
# "time":"1588214701974",
# "updateTime":"0",
# "isWorking":true
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
clientOrderId = self.safe_value_2(params, 'origClientOrderId', 'clientOrderId')
request = {}
defaultType = self.safe_string(self.options, 'type', 'spot')
options = self.safe_value(self.options, 'fetchOrder', {})
fetchOrderType = self.safe_string(options, 'type', defaultType)
type = self.safe_string(params, 'type', fetchOrderType)
query = self.omit(params, 'type')
if clientOrderId is not None:
request['origClientOrderId'] = clientOrderId
query = self.omit(query, ['origClientOrderId', 'clientOrderId'])
else:
request['orderId'] = id
method = 'privateGetOrder'
if type == 'future':
method = 'contractGetGetOrder'
elif type == 'option':
method = 'optionGetGetOrder'
response = await getattr(self, method)(self.extend(request, query))
return self.parse_order(response)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
currency = None
request = {
# 'fromId': 'string', # if fromId is set, it will get deposits > that fromId, otherwise most recent deposits are returned
}
if code is not None:
currency = self.currency(code)
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit
response = await self.privateGetDepositOrders(self.extend(request, params))
#
# [
# {
# 'time': '1565769575929',
# 'orderId': '428100569859739648',
# 'token': 'USDT',
# 'address': '',
# 'addressTag': '',
# 'fromAddress': '',
# 'fromAddressTag': '',
# 'quantity': '1100',
# },
# ]
#
return self.parse_transactions(response, currency, since, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
currency = None
request = {
# 'fromId': 'string', # if fromId is set, it will get deposits > that fromId, otherwise most recent deposits are returned
}
if code is not None:
currency = self.currency(code)
request['token'] = currency['id']
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalOrders(self.extend(request, params))
#
# [
# {
# "time":"1536232111669",
# "orderId":"90161227158286336",
# "accountId":"517256161325920",
# "tokenId":"BHC",
# "tokenName":"BHC",
# "address":"0x815bF1c3cc0f49b8FC66B21A7e48fCb476051209",
# "addressExt":"address tag",
# "quantity":"14", # Withdrawal qty
# "arriveQuantity":"14", # Arrived qty
# "statusCode":"PROCESSING_STATUS",
# "status":3,
# "txid":"",
# "txidUrl":"",
# "walletHandleTime":"1536232111669",
# "feeTokenId":"BHC",
# "feeTokenName":"BHC",
# "fee":"0.1",
# "requiredConfirmNum":0, # Required confirmations
# "confirmNum":0, # Confirmations
# "kernelId":"", # BEAM and GRIN only
# "isInternalTransfer": False # True if self transfer is internal
# }
# ]
#
return self.parse_transactions(response, currency, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
clientOrderId = self.safe_string(params, 'clientOrderId', self.uuid())
request = {
'clientOrderId': clientOrderId,
'tokenId': currency['id'],
'address': address, # the withdrawal address must be in current tag list in your PC/APP client
'withdrawQuantity': amount,
# 'chainType': 'OMNI', # OMNI, ERC20, TRC20
}
if tag is not None:
request['addressExt'] = tag
response = await self.privatePostWithdraw(self.extend(request, params))
#
# {
# "status": 0,
# "success": True,
# "needBrokerAudit": False, # Whether self request needs broker auit
# "orderId": "423885103582776064" # Id for successful withdrawal
# }
#
return {
'info': response,
'id': self.safe_string(response, 'orderId'),
}
async def fetch_accounts(self, params={}):
response = await self.privatePostSubAccountQuery(params)
#
# [
# {
# "accountId": "122216245228131",
# "accountName": "createSubAccountByCurl", # sub-account name
# "accountType": 1, # 1 token trading, 2 options, 3 futures
# "accountIndex": 1, # 0 main account, 1 sub-account
# },
# ]
#
result = []
for i in range(0, len(response)):
account = response[i]
accountId = self.safe_string(account, 'accountId')
accountType = self.safe_string(account, 'accountType')
type = accountType
if accountType == '1':
type = 'spot'
elif accountType == '2':
type = 'option'
elif accountType == '3':
type = 'future'
result.append({
'id': accountId,
'type': type,
'currency': None,
'info': account,
})
return result
async def fetch_ledger(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'accountType': 1, # spot 1, options 2, futures 3
'accountIndex': 0, # main 0, sub-account 1
'fromFlowId': '', # flowId to start from
'endFlowId': '', # flowId to end with
'endTime': 1588450533040,
}
currency = None
if code is not None:
currency = self.currency(code)
request['tokenId'] = currency['id']
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit # default 500, max 500
response = await self.privateGetBalanceFlow(self.extend(request, params))
#
# [
# {
# "id": "539870570957903104",
# "accountId": "122216245228131",
# "tokenId": "BTC",
# "tokenName": "BTC",
# "flowTypeValue": 51,
# "flowType": "USER_ACCOUNT_TRANSFER",
# "flowName": "Transfer",
# "change": "-12.5",
# "total": "379.624059937852365", # after change
# "created": "1579093587214"
# },
# {
# "id": "536072393645448960",
# "accountId": "122216245228131",
# "tokenId": "USDT",
# "tokenName": "USDT",
# "flowTypeValue": 7,
# "flowType": "AIRDROP",
# "flowName": "Airdrop",
# "change": "-2000",
# "total": "918662.0917630848",
# "created": "1578640809195"
# }
# ]
#
return self.parse_ledger(response, currency, since, limit)
def parse_ledger_entry(self, item, currency=None):
#
# {
# "id": "539870570957903104",
# "accountId": "122216245228131",
# "tokenId": "BTC",
# "tokenName": "BTC",
# "flowTypeValue": 51,
# "flowType": "USER_ACCOUNT_TRANSFER",
# "flowName": "Transfer",
# "change": "-12.5",
# "total": "379.624059937852365", # after change
# "created": "1579093587214"
# }
#
# {
# "id": "536072393645448960",
# "accountId": "122216245228131",
# "tokenId": "USDT",
# "tokenName": "USDT",
# "flowTypeValue": 7,
# "flowType": "AIRDROP",
# "flowName": "Airdrop",
# "change": "-2000",
# "total": "918662.0917630848",
# "created": "1578640809195"
# }
#
currencyId = self.safe_string(item, 'tokenId')
code = self.safe_currency_code(currencyId, currency)
amount = self.safe_number(item, 'change')
after = self.safe_number(item, 'total')
direction = 'out' if (amount < 0) else 'in'
before = None
if after is not None and amount is not None:
difference = amount if (direction == 'out') else -amount
before = self.sum(after, difference)
timestamp = self.safe_integer(item, 'created')
type = self.parse_ledger_entry_type(self.safe_string(item, 'flowType'))
id = self.safe_string(item, 'id')
account = self.safe_string(item, 'accountId')
return {
'id': id,
'currency': code,
'account': account,
'referenceAccount': None,
'referenceId': None,
'status': None,
'amount': amount,
'before': before,
'after': after,
'fee': None,
'direction': direction,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'type': type,
'info': item,
}
def parse_ledger_entry_type(self, type):
types = {
'TRADE': 'trade',
'FEE': 'fee',
'TRANSFER': 'transfer',
'DEPOSIT': 'transaction',
'MAKER_REWARD': 'rebate',
'PNL': 'pnl',
'SETTLEMENT': 'settlement',
'LIQUIDATION': 'liquidation',
'FUNDING_SETTLEMENT': 'settlement',
'USER_ACCOUNT_TRANSFER': 'transfer',
'OTC_BUY_COIN': 'trade',
'OTC_SELL_COIN': 'trade',
'OTC_FEE': 'fee',
'OTC_TRADE': 'trade',
'ACTIVITY_AWARD': 'referral',
'INVITATION_REFERRAL_BONUS': 'referral',
'REGISTER_BONUS': 'referral',
'AIRDROP': 'airdrop',
'MINE_REWARD': 'reward',
}
return self.safe_string(types, type, type)
def parse_transaction_status(self, status):
statuses = {
'BROKER_AUDITING_STATUS': 'pending',
'BROKER_REJECT_STATUS': 'failed',
'AUDITING_STATUS': 'pending',
'AUDIT_REJECT_STATUS': 'failed',
'PROCESSING_STATUS': 'pending',
'WITHDRAWAL_SUCCESS_STATUS': 'ok',
'WITHDRAWAL_FAILURE_STATUS': 'failed',
'BLOCK_MINING_STATUS': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# 'time': '1565769575929',
# 'orderId': '428100569859739648',
# 'token': 'USDT',
# 'address': '',
# 'addressTag': '',
# 'fromAddress': '',
# 'fromAddressTag': '',
# 'quantity': '1100',
# }
#
# fetchWithdrawals
#
# {
# "time":"1536232111669",
# "orderId":"90161227158286336",
# "accountId":"517256161325920",
# "tokenId":"BHC",
# "tokenName":"BHC",
# "address":"0x815bF1c3cc0f49b8FC66B21A7e48fCb476051209",
# "addressExt":"address tag",
# "quantity":"14", # Withdrawal qty
# "arriveQuantity":"14", # Arrived qty
# "statusCode":"PROCESSING_STATUS",
# "status":3,
# "txid":"",
# "txidUrl":"",
# "walletHandleTime":"1536232111669",
# "feeTokenId":"BHC",
# "feeTokenName":"BHC",
# "fee":"0.1",
# "requiredConfirmNum":0, # Required confirmations
# "confirmNum":0, # Confirmations
# "kernelId":"", # BEAM and GRIN only
# "isInternalTransfer": False # True if self transfer is internal
# }
#
id = self.safe_string(transaction, 'orderId')
address = self.safe_string(transaction, 'address')
tag = self.safe_string_2(transaction, 'addressExt', 'addressTag')
if tag is not None:
if len(tag) < 1:
tag = None
addressFrom = self.safe_string(transaction, 'fromAddress')
tagFrom = self.safe_string(transaction, 'fromAddressTag')
if tagFrom is not None:
if len(tagFrom) < 1:
tagFrom = None
currencyId = self.safe_string(transaction, 'tokenId')
code = self.safe_currency_code(currencyId, currency)
timestamp = self.safe_integer(transaction, 'time')
txid = self.safe_string(transaction, 'txid')
if txid == '':
txid = None
type = None
status = self.parse_transaction_status(self.safe_string(transaction, 'statusCode'))
if status is None:
type = 'deposit'
status = 'ok'
else:
type = 'withdrawal'
amount = self.safe_number(transaction, 'quantity')
feeCost = self.safe_number(transaction, 'fee')
fee = None
if feeCost is not None:
feeCurrencyId = self.safe_string(transaction, 'feeTokenId')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'currency': feeCurrencyCode,
'cost': feeCost,
}
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': addressFrom,
'address': address,
'addressTo': address,
'tagFrom': tagFrom,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def parse_ticker(self, ticker, market=None):
#
# fetchTicker, fetchTickers
#
# {
# "time":1588069860794,
# "symbol":"BNB0501PS16",
# "bestBidPrice":"0.2129",
# "bestAskPrice":"0.3163",
# "volume":"33547",
# "quoteVolume":"10801.987",
# "lastPrice":"0.2625",
# "highPrice":"0.3918",
# "lowPrice":"0.2625",
# "openPrice":"0.362",
# }
#
# fetchBidAsk, fetchBidAsks
#
# {
# "symbol": "LTCBTC",
# "bidPrice": "4.00000000",
# "bidQty": "431.00000000",
# "askPrice": "4.00000200",
# "askQty": "9.00000000"
# }
#
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_integer(ticker, 'time')
open = self.safe_number(ticker, 'openPrice')
close = self.safe_number(ticker, 'lastPrice')
change = None
percentage = None
average = None
if (open is not None) and (close is not None):
change = close - open
average = self.sum(open, close) / 2
if (close is not None) and (close > 0):
percentage = (change / open) * 100
quoteVolume = self.safe_number(ticker, 'quoteVolume')
baseVolume = self.safe_number(ticker, 'volume')
vwap = self.vwap(baseVolume, quoteVolume)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'highPrice'),
'low': self.safe_number(ticker, 'lowPrice'),
'bid': self.safe_number_2(ticker, 'bestBidPrice', 'bidPrice'),
'bidVolume': self.safe_number(ticker, 'bidQty'),
'ask': self.safe_number_2(ticker, 'bestAskPrice', 'askPrice'),
'askVolume': self.safe_number(ticker, 'askQty'),
'vwap': vwap,
'open': open,
'close': close,
'last': close,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def parse_trade(self, trade, market):
#
# fetchTrades(public)
#
# {
# "price":"0.025344",
# "time":1588084082060,
# "qty":"1",
# "isBuyerMaker":false
# }
#
# fetchMyTrades(private)
#
# spot
#
# {
# "id":"616384027512920576",
# "symbol":"TBTCBUSDT",
# "orderId":"616384027202542080",
# "matchOrderId":"605124954767266560",
# "price":"6826.06",
# "qty":"0.1",
# "commission":"0.682606",
# "commissionAsset":"BUSDT",
# "time":"1588214701982",
# "isBuyer":false,
# "isMaker":false,
# "fee":{
# "feeTokenId":"BUSDT",
# "feeTokenName":"BUSDT",
# "fee":"0.682606"
# }
# }
#
id = self.safe_string(trade, 'id')
timestamp = self.safe_number(trade, 'time')
type = None
orderId = self.safe_string(trade, 'orderId')
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'qty')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
side = None
takerOrMaker = None
if 'isBuyerMaker' in trade:
side = 'sell' if trade['isBuyerMaker'] else 'buy'
else:
isMaker = self.safe_value(trade, 'isMaker')
if isMaker is not None:
takerOrMaker = 'maker' if isMaker else 'taker'
isBuyer = self.safe_value(trade, 'isBuyer')
side = 'buy' if isBuyer else 'sell'
fee = None
feeCost = self.safe_number(trade, 'commission')
if feeCost is not None:
feeCurrencyId = self.safe_string(trade, 'commissionAsset')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
symbol = None
if (symbol is None) and (market is not None):
symbol = market['symbol']
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'order': orderId,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "symbol":"TBTCBUSDT",
# "orderId":"616376654496877056",
# "clientOrderId":"158821382304516955",
# "transactTime":"1588213823080",
# "price":"0",
# "origQty":"1000",
# "executedQty":"0",
# "status":"NEW",
# "timeInForce":"GTC",
# "type":"MARKET",
# "side":"BUY"
# }
#
# fetchOrder, fetchOpenOrders, fetchClosedOrders
#
# spot
#
# {
# "orderId":"616384027202542080",
# "clientOrderId":"158821470194414688",
# "exchangeId":"301",
# "symbol":"TBTCBUSDT",
# "price":"0",
# "origQty":"0.1",
# "executedQty":"0.1",
# "cummulativeQuoteQty":"682.606",
# "avgPrice":"6826.06",
# "status":"FILLED",
# "timeInForce":"GTC",
# "type":"MARKET",
# "side":"SELL",
# "stopPrice":"0.0",
# "icebergQty":"0.0",
# "time":"1588214701974",
# "updateTime":"0",
# "isWorking":true
# }
#
# future
#
# {
# time: "1588353669383",
# updateTime: "0",
# orderId: "617549770304599296",
# clientOrderId: "test-001",
# symbol: "BTC-PERP-REV",
# price: "10000",
# leverage: "1",
# origQty: "100",
# executedQty: "0",
# avgPrice: "0",
# marginLocked: "0",
# orderType: "LIMIT",
# side: "SELL_OPEN",
# fees: [],
# timeInForce: "GTC",
# status: "CANCELED",
# priceType: "INPUT"
# }
#
#
id = self.safe_string(order, 'orderId')
clientOrderId = self.safe_string(order, 'clientOrderId')
timestamp = self.safe_integer(order, 'time')
if timestamp is None:
timestamp = self.safe_integer(order, 'transactTime')
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market)
type = self.safe_string_lower(order, 'type')
side = self.safe_string_lower(order, 'side')
price = self.safe_number(order, 'price')
average = self.safe_number(order, 'avgPrice')
amount = None
cost = self.safe_number(order, 'cummulativeQuoteQty')
filled = None
remaining = None
if type is None:
type = self.safe_string_lower(order, 'orderType')
if (market is not None) and market['inverse']:
cost = self.safe_number(order, 'executedQty')
amount = None
if cost == 0.0:
filled = 0
else:
amount = self.safe_number(order, 'origQty')
if type == 'market':
price = None
if side == 'buy':
amount = None
filled = self.safe_number(order, 'executedQty')
if filled is not None:
if amount is not None:
remaining = amount - filled
if average == 0.0:
average = None
status = self.parse_order_status(self.safe_string(order, 'status'))
timeInForce = self.safe_string(order, 'timeInForce')
stopPrice = self.safe_number(order, 'stopPrice')
result = {
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'side': side,
'price': price,
'stopPrice': stopPrice,
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'trades': None,
'fee': None,
'fees': None,
}
fees = self.safe_value(order, 'fees', [])
numFees = len(fees)
if numFees > 0:
result['fees'] = []
for i in range(0, len(fees)):
feeCost = self.safe_number(fees[i], 'fee')
if feeCost is not None:
feeCurrencyId = self.safe_string(fees[i], 'feeToken')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
result['fees'].append({
'cost': feeCost,
'currency': feeCurrencyCode,
})
return result
def parse_order_status(self, status):
statuses = {
'NEW': 'open',
'CANCELED': 'canceled',
'FILLED': 'closed',
'PARTIALLY_FILLED': 'open',
'PENDING_CANCEL': 'canceled',
}
return self.safe_string(statuses, status, status)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
isPublicContract = (api == 'contract') and ((path == 'insurance') or (path == 'fundingRate'))
if (api == 'public') or (api == 'quote') or isPublicContract:
if params:
url += '?' + self.urlencode(params)
else:
timestamp = self.milliseconds()
self.check_required_credentials()
request = self.extend({
'timestamp': timestamp,
}, query)
# 准备待签名数据
auth = self.urlencode(request)
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256)
request['signature'] = signature
headers = {
'X-BH-APIKEY': self.apiKey,
}
if method == 'POST':
body = self.urlencode(request)
headers = self.extend({
'Content-Type': 'application/x-www-form-urlencoded',
}, headers)
else:
url += '?' + self.urlencode(request)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'code' in response:
code = self.safe_string(response, 'code')
if code != '0':
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], code, feedback)
raise ExchangeError(feedback)
| [
"travis@travis-ci.org"
] | travis@travis-ci.org |
6c30cc83abc164f84e868bcf6a78c763b1a44126 | 4be56098894a95da5964622fc4102b69e4530ab6 | /题库/522.最长特殊序列II.py | a9659fcabb96a178be129f0563ca29356f5c0718 | [] | no_license | ACENDER/LeetCode | 7c7c7ecc8d0cc52215272f47ec34638637fae7ac | 3383b09ab1246651b1d7b56ab426a456f56a4ece | refs/heads/master | 2023-03-13T19:19:07.084141 | 2021-03-15T09:29:21 | 2021-03-15T09:29:21 | 299,332,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : 522.最长特殊序列II.py
| [
"1641429327@qq.com"
] | 1641429327@qq.com |
1c7f31793ca39ebee732df11958237c8047a2673 | 97e534b26a76bf0d954e166841179979748bcfa2 | /objects/migrations/0053_claim.py | d7a2ba564d665e051111a4877f2f93d1ea61ffae | [] | no_license | mehdi1361/http_server | 3a8bd73ce44307ee2b7761d1211671ca8cb0f3ba | d8a962c55165ef0237bfb26d27d9cfa11a415a5d | refs/heads/develop | 2022-12-11T00:44:11.089407 | 2019-01-20T12:02:48 | 2019-01-20T12:02:48 | 166,656,299 | 0 | 0 | null | 2022-12-07T23:53:22 | 2019-01-20T12:02:05 | HTML | UTF-8 | Python | false | false | 1,519 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-27 07:15
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('objects', '0052_auto_20180626_1018'),
]
operations = [
migrations.CreateModel(
name='Claim',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, verbose_name='created date')),
('updated_date', models.DateTimeField(auto_now=True, verbose_name='created date')),
('params', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True, verbose_name='params')),
('coin', models.PositiveIntegerField(default=10, verbose_name='coin')),
('gem', models.PositiveIntegerField(default=10, verbose_name='gem')),
('is_used', models.BooleanField(default=False, verbose_name='used')),
('league_player', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='claims', to='objects.LeagueUser', verbose_name='league_user')),
],
options={
'db_table': 'claims',
'verbose_name': 'claim',
'verbose_name_plural': 'claims',
},
),
]
| [
"mhd.mosavi@gmail.com"
] | mhd.mosavi@gmail.com |
443c3d059b335a119f3da8698934ed9e86c8a915 | 0c68c989a8473801743c4f2d2087c223b2fd47fa | /2020_hashcode/practice/pizza.py | 97016dd9bdb9f3d6de66d64c983e089b25e4c9ef | [] | no_license | tabletenniser/Google_code_jam | 4a105e8e9bc27269430a1c90f4c7a59a4d4bc7a1 | 1c300e3b953e2e744a2c3bb963b5d2f46f9caee1 | refs/heads/master | 2021-06-12T22:10:23.675590 | 2021-03-27T22:25:06 | 2021-03-27T22:25:06 | 179,803,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,754 | py | def get_opt_solution(max_slices, pizza_slices):
types_of_pizzas = len(pizza_slices)
prev_dp_table = [0 for _ in range(max_slices)]
prev_solution_table = [list() for _ in range(max_slices)]
for i in range(types_of_pizzas):
dp_table = [0 for _ in range(max_slices)]
solution_table = [list() for _ in range(max_slices)]
print(i)
for j in range(max_slices):
if pizza_slices[i] > j:
dp_table[j] = prev_dp_table[j]
solution_table[j] = prev_solution_table[j]
else:
dp_table_value_select = prev_dp_table[j-pizza_slices[i]] + pizza_slices[i]
if prev_dp_table[j] > dp_table_value_select:
dp_table[j] = prev_dp_table[j]
solution_table[j] = prev_solution_table[j]
else:
dp_table[j] = dp_table_value_select
solution_table[j] = prev_solution_table[j-pizza_slices[i]] + [i]
# print(dp_table)
# print(solution_table)
prev_dp_table = dp_table
prev_solution_table = solution_table
return solution_table[-1]
def get_approx_solution(max_slices, pizza_slices):
cur_clice_count = 0
cur_solution = []
types_of_pizzas = len(pizza_slices)
for i in range(types_of_pizzas-1, -1, -1):
p = pizza_slices[i]
if cur_clice_count + p < max_slices:
cur_clice_count += p
cur_solution.append(i)
cur_solution.sort()
return cur_solution
max_slices, pizza_types = [int(s) for s in input().split(" ")]
pizza_slices = [int(s) for s in input().split(" ")]
opt_sol=get_opt_solution(max_slices, pizza_slices)
print(len(opt_sol))
print(' '.join(map(str, opt_sol)))
| [
"tabletenniser@gmail.com"
] | tabletenniser@gmail.com |
1a47559dd1499bafa8598161835aa02a0e49cf0b | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/program/util/ProgramDiffFilter.pyi | d2835f42e1b5c7c90b7f8965f54c4cbdb0b67b07 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,593 | pyi | from typing import List
import ghidra.program.util
import java.lang
class ProgramDiffFilter(object):
"""
The ProgramDiffFilter is used when determining or working with
differences between two programs.
It indicates the types of program differences we are interested in.
Each difference type can be set to true, indicating interest in
differences of that type between two programs. False indicates no interest
in this type of program difference.
Valid filter types are:
BYTE_DIFFS, CODE_UNIT_DIFFS,
PLATE_COMMENT_DIFFS, PRE_COMMENT_DIFFS, EOL_COMMENT_DIFFS,
REPEATABLE_COMMENT_DIFFS, POST_COMMENT_DIFFS,
REFERENCE_DIFFS,
USER_DEFINED_DIFFS, BOOKMARK_DIFFS,
SYMBOL_DIFFS,
EQUATE_DIFFS, FUNCTION_DIFFS, PROGRAM_CONTEXT_DIFFS.
Predefined filter type combinations are:
COMMENT_DIFFS and ALL_DIFFS.
"""
ALL_DIFFS: int = 32767
BOOKMARK_DIFFS: int = 4096
BYTE_DIFFS: int = 2
CODE_UNIT_DIFFS: int = 4
COMMENT_DIFFS: int = 248
EOL_COMMENT_DIFFS: int = 8
EQUATE_DIFFS: int = 512
FUNCTION_DIFFS: int = 2048
FUNCTION_TAG_DIFFS: int = 16384
PLATE_COMMENT_DIFFS: int = 64
POST_COMMENT_DIFFS: int = 32
PRE_COMMENT_DIFFS: int = 16
PROGRAM_CONTEXT_DIFFS: int = 1
REFERENCE_DIFFS: int = 256
REPEATABLE_COMMENT_DIFFS: int = 128
SYMBOL_DIFFS: int = 1024
USER_DEFINED_DIFFS: int = 8192
@overload
def __init__(self):
"""
Creates new ProgramDiffFilter with none of the diff types selected.
"""
...
@overload
def __init__(self, type: int):
"""
Creates new ProgramDiffFilter with the specified diff types selected.
@param type one or more of the diff types "OR"ed together.
<BR>i.e. CODE_UNIT_DIFFS | SYMBOL_DIFFS
"""
...
@overload
def __init__(self, filter: ghidra.program.util.ProgramDiffFilter):
"""
Creates new ProgramDiffFilter equivalent to the specified ProgramDiffFilter.
@param filter the diff filter this one should equal.
"""
...
def addToFilter(self, filter: ghidra.program.util.ProgramDiffFilter) -> None:
"""
set this filter to look for types of differences in addition to those
types where it is already looking for differences.
The filter that is passed as a parameter indicates the additional types
of differences.
@param filter filter indicating the additional types of differences
to look for between the programs.
"""
...
def clearAll(self) -> None:
"""
Sets all the defined types of differences to false.
Filter indicates no interest in any difference types.
"""
...
def equals(self, obj: object) -> bool:
"""
Determines whether or not this filter is equal to the object that
is passed in.
@param obj the object to compare this one with.
@return true if the filter matches this one.
"""
...
def getClass(self) -> java.lang.Class: ...
def getFilter(self, type: int) -> bool:
"""
getFilter determines whether or not the specified type of filter is set.
@param type the set bits indicate the type of differences we want to
check as being set in the filter.
<BR>For example, one or more of the diff types "OR"ed together.
<BR>i.e. CODE_UNIT_DIFFS | SYMBOL_DIFFS
@return true if filtering for the specified type of differences.
"""
...
@staticmethod
def getPrimaryTypes() -> List[int]:
"""
Gets all the valid individual types of differences for this filter.
These are also referred to as primary difference types.
@return an array containing all the currently defined difference types
"""
...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def selectAll(self) -> None:
"""
Sets all the defined types of differences to true.
Filter indicates interest in all difference types.
"""
...
def setFilter(self, type: int, filter: bool) -> None:
"""
setFilter specifies whether or not the indicated type of difference will be
included by the filter (true) or not included (false).
@param type the set bits indicate the type of differences we want to
look for in the programs.
<BR>For example, one or more of the diff types "OR"ed together.
<BR>i.e. CODE_UNIT_DIFFS | SYMBOL_DIFFS
@param filter true if you want to determine differences of the specified type.
"""
...
def toString(self) -> unicode:
"""
Returns a string representation of the current settings for this filter.
"""
...
@staticmethod
def typeToName(type: int) -> unicode:
"""
<CODE>typeToName()</CODE> returns the name of the difference type.
Only predefined types, as specified in <CODE>ProgramDiffFilter</CODE>,
will return a name. Otherwise, an empty string is returned.
@param type the type of difference whose name is wanted.
@return the name of the predefined difference type. Otherwise, the empty string.
"""
...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
de767a446ec4679f03d7c8f8350383eb74a5ea8e | 0e478f3d8b6c323c093455428c9094c45de13bac | /src/OTLMOW/OEFModel/Classes/AWeg.py | fdc3b0800ca5134a20c84931626c320d54719df4 | [
"MIT"
] | permissive | davidvlaminck/OTLMOW | c6eae90b2cab8a741271002cde454427ca8b75ba | 48f8c357c475da1d2a1bc7820556843d4b37838d | refs/heads/main | 2023-01-12T05:08:40.442734 | 2023-01-10T15:26:39 | 2023-01-10T15:26:39 | 432,681,113 | 3 | 1 | MIT | 2022-06-20T20:36:00 | 2021-11-28T10:28:24 | Python | UTF-8 | Python | false | false | 325 | py | # coding=utf-8
from OTLMOW.OEFModel.EMObject import EMObject
# Generated with OEFClassCreator. To modify: extend, do not edit
class AWeg(EMObject):
"""Wegen : A-weg"""
typeURI = 'https://lgc.data.wegenenverkeer.be/ns/installatie#AWeg'
label = 'Autosnelweg'
def __init__(self):
super().__init__()
| [
"david.vlaminck@mow.vlaanderen.be"
] | david.vlaminck@mow.vlaanderen.be |
8e9d4e496e365e8cebd9c1fcdd4d5915c6a06d19 | b8e29b6e957b0a55571f7cffc4357666a43fb56e | /mxnet/insightface/insightface/src/data/glint2lst.py | 60f7cbea9faa5f680ca985c8d7ed8ea79d00d8a4 | [
"MIT",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] | permissive | aliyun/alibabacloud-aiacc-demo | b9bbe565021757ecaea0e7d7209632cbdb5cc8ab | 2e49deeb38d12d4af4c5e50bb15d731c4bbf4cf1 | refs/heads/master | 2023-05-14T08:09:33.067050 | 2023-05-04T08:19:51 | 2023-05-04T08:19:51 | 228,604,743 | 38 | 14 | Apache-2.0 | 2022-06-22T02:41:01 | 2019-12-17T11:46:44 | Python | UTF-8 | Python | false | false | 1,158 | py |
import sys
import os
import numpy as np
input_dir = sys.argv[1]
targets = sys.argv[2]
targets = targets.strip().split(',')
lmap = {}
for ds in targets:
#image_dir = os.path.join(input_dir, ds)
lmk_file = os.path.join(input_dir, "%s_lmk"%(ds))
if not os.path.exists(lmk_file):
lmk_file = os.path.join(input_dir, "%s_lmk.txt"%(ds))
if not os.path.exists(lmk_file):
continue
#print(ds)
idx = 0
for line in open(lmk_file, 'r'):
idx+=1
vec = line.strip().split(' ')
assert len(vec)==12 or len(vec)==11
image_file = os.path.join(input_dir, vec[0])
assert image_file.endswith('.jpg')
vlabel = -1 #test mode
if len(vec)==12:
label = int(vec[1])
if label in lmap:
vlabel = lmap[label]
else:
vlabel = len(lmap)
lmap[label] = vlabel
lmk = np.array([float(x) for x in vec[2:]], dtype=np.float32)
else:
lmk = np.array([float(x) for x in vec[1:]], dtype=np.float32)
lmk = lmk.reshape( (5,2) ).T
lmk_str = "\t".join( [str(x) for x in lmk.flatten()] )
print("0\t%s\t%d\t0\t0\t0\t0\t%s"%(image_file, vlabel, lmk_str))
#if idx>10:
# break
| [
"ziqi.yzq@alibaba-inc.com"
] | ziqi.yzq@alibaba-inc.com |
e85086b27f26e7ad3ddf0dcd9b1299ef6799cbcc | 169d809f45dedcaa3c7b1b49912d8b025abe18d9 | /factors.py | 6f66a11e8dd9fcecc3dbd419474103c0e3974875 | [] | no_license | bermec/challenges | 8a82d1d38d1ed1a0fc3f258443bc0054efc977a6 | 9fb092f20f12b4eaa808e758f00f482a49346c88 | refs/heads/master | 2021-10-08T05:05:56.803332 | 2018-12-08T00:20:20 | 2018-12-08T00:20:20 | 109,448,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | def factors(n):
lst = []
for x in range(1, n):
if n % x == 0:
lst.append(x)
return lst
def reduce_fraction(n, n2):
temp = 0
temp2 = 0
master_lst = []
master_lst2 = []
lst1 = (factors(n))
lst2 = (factors(n2))
master_lst.extend(lst1)
master_lst.extend(lst2)
for items in master_lst:
if items not in master_lst2:
master_lst2.append(items)
print(master_lst2)
#master_lst2.remove(1)
for x in range(0, len(master_lst2)):
num = master_lst2[x]
if n % num == 0 and n2 % num == 0:
div = n / master_lst2[x]
div2 = n2 / master_lst2[x]
temp = div
temp2 = div2
return (int(div), int(div2))
if __name__ == '__main__':
ans = factors(10 **3 - 1)
print(ans)
| [
"rog@pynguins.com"
] | rog@pynguins.com |
ce7ccdfe707353b7a81653921ba80ee0e0973723 | ea4e262f3dc18a089895fef08bedefc60b66e373 | /supervised_learning/0x0D-RNNs/5-bi_forward.py | ce1c6a1dd901ad3532e8ae1b7a72f5765d7c2bf5 | [] | no_license | d1sd41n/holbertonschool-machine_learning | 777899d4914e315883ba0c887d891c0c8ab01c8a | 5f86dee95f4d1c32014d0d74a368f342ff3ce6f7 | refs/heads/main | 2023-07-17T09:22:36.257702 | 2021-08-27T03:44:24 | 2021-08-27T03:44:24 | 317,399,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | #!/usr/bin/env python3
"""[summary]
Returns:
[type]: [description]
"""
import numpy as np
class BidirectionalCell:
"""[summary]
"""
def __init__(self, i, h, o):
"""[summary]
Args:
i ([type]): [description]
h ([type]): [description]
o ([type]): [description]
"""
self.Whf = np.random.normal(size=(i + h, h))
self.Whb = np.random.normal(size=(i + h, h))
self.Wy = np.random.normal(size=(i + h + o, o))
self.bhf = np.zeros((1, h))
self.bhb = np.zeros((1, h))
self.by = np.zeros((1, o))
def forward(self, h_prev, x_t):
"""[summary]
Args:
h_prev ([type]): [description]
x_t ([type]): [description]
Returns:
[type]: [description]
"""
h_x = np.concatenate((h_prev, x_t), axis=1)
return np.tanh(np.matmul(
h_x, self.Whf
) + self.bhf)
| [
"1498@holbertonschool.com"
] | 1498@holbertonschool.com |
44680252265c4e7606a0c2ce3a626f198f9c5331 | fdcab1845d5f8b98e126d0e99c92fefdd426c850 | /experiments/pollen_met.py | f2c970387f18d9db8fba1f92490d566a51d312e1 | [
"MIT"
] | permissive | jjsanderson/DataBot | 197d9157be152ce679fd97f58557be7373b3f7f9 | 22d86b9bad2cd12bef735065a68619d114bfd6b1 | refs/heads/main | 2022-05-14T16:30:53.195429 | 2021-09-22T14:53:18 | 2021-09-22T14:53:18 | 187,226,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """Test retrieving pollen data from Met Office forecasts.
Replacing Bendadryl-sponsored service queried by pypollen module, as
that's proved unreliable in practice (just stops providing responses),
with no error.
"""
# Register with Met Office DataPoint service:
# https://www.metoffice.gov.uk/datapoint
from clientsecrets import metkey
# Oh, nuts: pollen count isn't included in the site data. Ugh.
| [
"lists@quernstone.com"
] | lists@quernstone.com |
45fcc0bee0c3a2ecebf1f0434ab531597d3bf8fd | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_Class2981.py | fc7c0615444d12cc4ebb6913a25a09662303e053 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,244 | py | # qubit number=4
# total number=39
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=31
prog.cz(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.h(input_qubit[3]) # number=30
prog.x(input_qubit[3]) # number=11
prog.h(input_qubit[3]) # number=13
prog.cz(input_qubit[0],input_qubit[3]) # number=14
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[3],input_qubit[1]) # number=19
prog.cx(input_qubit[3],input_qubit[0]) # number=36
prog.z(input_qubit[3]) # number=37
prog.cx(input_qubit[3],input_qubit[0]) # number=38
prog.x(input_qubit[3]) # number=35
prog.h(input_qubit[1]) # number=20
prog.rx(-3.141592653589793,input_qubit[3]) # number=26
prog.h(input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[2]) # number=17
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=27
prog.cz(input_qubit[1],input_qubit[0]) # number=28
prog.h(input_qubit[0]) # number=29
prog.cx(input_qubit[1],input_qubit[0]) # number=22
prog.cx(input_qubit[2],input_qubit[1]) # number=34
prog.x(input_qubit[1]) # number=23
prog.x(input_qubit[1]) # number=24
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class2981.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
61508d630274d9f4fcd2da8ef98105c85249f470 | 60611d5c40a73c0b7f420ef825b0721d0f4f5cb5 | /autodata/spiders/edmunds_feature_spider.py | ff43dc4128dec959cc229fcd7ba4dfdd83e06b4d | [] | no_license | Miksztowi/AutomotiveData | 315047e5b32e04cba72387bf3a1c341140dceba7 | fd413ad3b5672cf0ccde809033122f4a0adca60e | refs/heads/master | 2021-01-15T11:57:37.896995 | 2017-08-25T09:27:50 | 2017-08-25T09:27:50 | 99,636,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,355 | py | # -*- coding:utf-8 -*-
import scrapy
import MySQLdb
import json
from autodata.items import EdmundsItem
import logging
import autodata.settings as settings
class EdmundsFeatureSpider(scrapy.Spider):
logger = logging.getLogger(__name__)
name = 'edmunds_feature_spider'
custom_settings = {
'CONCURRENT_REQUESTS': 88,
'DOWNLOAD_DELAY': 0,
'LOG_FILE': 'edmunds_feature.log'
}
def __init__(self):
self.connect = MySQLdb.connect(
user=settings.DB_USER,
password=settings.DB_PASSWORD,
host=settings.DB_HOST,
db=settings.DB
)
self.cursor = self.connect.cursor()
def start_requests(self):
self.cursor.execute('SELECT id FROM edmunds_cars WHERE name IS NULL')
style_ids = [x[0] for x in self.cursor.fetchall()]
base_url = 'https://www.edmunds.com/api/groundwork/feature/styles?styleIds={}'
for style_id in style_ids:
url = base_url.format(style_id)
yield scrapy.Request(
url=url,
meta={'style_id': style_id}
)
def parse(self, response):
res_json = json.loads(response.text)
item = EdmundsItem()
if res_json.get('styles'):
styles = res_json.pop('styles')[0] # list
features = styles['features']
item['name'] = styles['name']
item['id'] = styles['id']
item['baseMsrp'] = styles['baseMsrp']
item['msrpWithTypicalOptions'] = styles['msrpWithTypicalOptions']
item['mpg'] = styles['mpg']
item['totalSeating'] = styles['totalSeating']
item['colors'] = json.dumps(styles['colors'])
item['safety'] = json.dumps(features['Safety'])
item['comfort_convenience'] = json.dumps(features['Comfort & Convenience'])
item['performance'] = json.dumps(features['Performance'])
item['technology'] = json.dumps(features['Technology'])
item['fuel'] = json.dumps(features['Fuel'])
item['engine'] = json.dumps(features['Engine'])
item['measurements'] = json.dumps(features['Measurements'])
item['frontseats'] = json.dumps(features['Frontseats'])
item['rearseats'] = json.dumps(features['Rearseats'])
item['drive_train'] = json.dumps(features['Drive Train'])
item['power_feature'] = json.dumps(features['Power Feature'])
item['instrumentation'] = json.dumps(features['Instrumentation'])
item['suspension'] = json.dumps(features['Suspension'])
item['in_car_entertainment'] = json.dumps(features['In Car Entertainment'])
item['warranty'] = json.dumps(features['Warranty'])
item['telematics'] = json.dumps(features['Telematics'])
item['tires_and_wheels'] = json.dumps(features['Tires and Wheels'])
item['interior_options'] = json.dumps(features['Interior Options'])
item['exterior_options'] = json.dumps(features['Exterior Options'])
item['packages'] = json.dumps(features['Packages'])
return item
def spider_closed(self, spider):
self.cursor.close()
self.connect.close()
self.connect.close()
spider.logger.info('Spider closed: %s', spider.name)
| [
"binwengan@gmail.com"
] | binwengan@gmail.com |
15b03267a304db2a215fee50bb9987974ec5f0ce | 2f67ac3081bbb6ac190dd2b7ea981a6b510a42c7 | /projeto/produto/models.py | 53042281b04b8d6d2cd5d2471c753bc1fece286d | [] | no_license | jm0216/estoque | 2fec817336327a5088cd8ff43b786787475fa86f | 7a03bd61cade1e1056d2894a78afb37c4969715c | refs/heads/master | 2020-08-03T22:26:47.146230 | 2019-09-30T03:26:49 | 2019-09-30T03:26:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 880 | py | from django.db import models
from django.urls import reverse_lazy
class Produto(models.Model):
importado = models.BooleanField(default=False)
ncm = models.CharField('NCM', max_length=8)
produto = models.CharField(max_length=100, unique=True)
preco = models.DecimalField('preço', max_digits=7, decimal_places=2)
estoque = models.IntegerField('estoque atual')
estoque_minimo = models.PositiveIntegerField('estoque mínimo', default=0)
data = models.DateField(null=True, blank=True)
class Meta:
ordering = ('produto',)
def __str__(self):
return self.produto
def get_absolute_url(self):
return reverse_lazy('produto:produto_detail', kwargs={'pk': self.pk})
def to_dict_json(self):
return {
'pk': self.pk,
'produto': self.produto,
'estoque': self.estoque,
}
| [
"rg3915@yahoo.com.br"
] | rg3915@yahoo.com.br |
8424fb37043b1238fef3ebb6c071cfff717d5db1 | 75d8667735782cd1d0eb4877e52c89da5cd92dde | /nova/tests/unit/__init__.py | 545088bbe157e6cddd5b973e0d539e80500ae27d | [
"Apache-2.0"
] | permissive | bopopescu/nova-token | ffecfd3ec561936b7d9d7e691bc57383cde05436 | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | refs/heads/master | 2022-11-22T09:53:31.073483 | 2016-05-14T02:47:01 | 2016-05-15T22:02:55 | 282,105,621 | 0 | 0 | Apache-2.0 | 2020-07-24T02:42:19 | 2020-07-24T02:42:18 | null | UTF-8 | Python | false | false | 2,131 | py | begin_unit
comment|'# Copyright 2010 United States Government as represented by the'
nl|'\n'
comment|'# Administrator of the National Aeronautics and Space Administration.'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
string|'"""\n:mod:`nova.tests.unit` -- Nova Unittests\n=====================================================\n\n.. automodule:: nova.tests.unit\n :platform: Unix\n"""'
newline|'\n'
nl|'\n'
name|'import'
name|'eventlet'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
nl|'\n'
name|'eventlet'
op|'.'
name|'monkey_patch'
op|'('
name|'os'
op|'='
name|'False'
op|')'
newline|'\n'
nl|'\n'
comment|'# NOTE(alaski): Make sure this is done after eventlet monkey patching otherwise'
nl|'\n'
comment|'# the threading.local() store used in oslo_messaging will be initialized to'
nl|'\n'
comment|'# threadlocal storage rather than greenthread local. This will cause context'
nl|'\n'
comment|'# sets and deletes in that storage to clobber each other.'
nl|'\n'
comment|'# NOTE(comstud): Make sure we have all of the objects loaded. We do this'
nl|'\n'
comment|'# at module import time, because we may be using mock decorators in our'
nl|'\n'
comment|'# tests that run at import time.'
nl|'\n'
name|'objects'
op|'.'
name|'register_all'
op|'('
op|')'
newline|'\n'
endmarker|''
end_unit
| [
"dmg@uvic.ca"
] | dmg@uvic.ca |
031519f8b4369daea30a2504dbd50ad7e005a5e3 | 0db0d08edd59df7cd634b66e2786bc1a0eb52048 | /httprunner/built_in.py | 32bbb851741b0732ed712492e06b36eeaa448cbb | [
"MIT"
] | permissive | liuyihuicaicloud/HttpRunner | 8bbbb25c51bb04439e8350a8d4187368e0138d85 | 0e63747c395e27b5d5952446ca9a86dd5ce15b95 | refs/heads/master | 2020-03-18T22:57:41.799491 | 2018-03-27T09:28:22 | 2018-03-27T09:28:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,864 | py | """
Built-in dependent functions used in YAML/JSON testcases.
"""
import json
import datetime
import random
import re
import string
import time
from httprunner.exception import ParamsError
from httprunner.utils import string_type
def gen_random_string(str_len):
""" generate random string with specified length
"""
return ''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(str_len))
def get_timestamp(str_len=13):
""" get timestamp string, length can only between 0 and 16
"""
if isinstance(str_len, int) and 0 < str_len < 17:
return str(time.time()).replace(".", "")[:str_len]
raise ParamsError("timestamp length can only between 0 and 16.")
def get_current_date(fmt="%Y-%m-%d"):
""" get current date, default format is %Y-%m-%d
"""
return datetime.datetime.now().strftime(fmt)
""" built-in comparators
"""
def equals(check_value, expect_value):
assert check_value == expect_value
def less_than(check_value, expect_value):
assert check_value < expect_value
def less_than_or_equals(check_value, expect_value):
assert check_value <= expect_value
def greater_than(check_value, expect_value):
assert check_value > expect_value
def greater_than_or_equals(check_value, expect_value):
assert check_value >= expect_value
def not_equals(check_value, expect_value):
assert check_value != expect_value
def string_equals(check_value, expect_value):
assert str(check_value) == str(expect_value)
def length_equals(check_value, expect_value):
assert isinstance(expect_value, int)
assert len(check_value) == expect_value
def length_greater_than(check_value, expect_value):
assert isinstance(expect_value, int)
assert len(check_value) > expect_value
def length_greater_than_or_equals(check_value, expect_value):
assert isinstance(expect_value, int)
assert len(check_value) >= expect_value
def length_less_than(check_value, expect_value):
assert isinstance(expect_value, int)
assert len(check_value) < expect_value
def length_less_than_or_equals(check_value, expect_value):
assert isinstance(expect_value, int)
assert len(check_value) <= expect_value
def contains(check_value, expect_value):
assert isinstance(check_value, (list, tuple, dict, string_type))
assert expect_value in check_value
def contained_by(check_value, expect_value):
assert isinstance(expect_value, (list, tuple, dict, string_type))
assert check_value in expect_value
def type_match(check_value, expect_value):
def get_type(name):
if isinstance(name, type):
return name
elif isinstance(name, str):
try:
return __builtins__[name]
except KeyError:
raise ValueError(name)
else:
raise ValueError(name)
assert isinstance(check_value, get_type(expect_value))
def regex_match(check_value, expect_value):
assert isinstance(expect_value, string_type)
assert isinstance(check_value, string_type)
assert re.match(expect_value, check_value)
def startswith(check_value, expect_value):
assert str(check_value).startswith(str(expect_value))
def endswith(check_value, expect_value):
assert str(check_value).endswith(str(expect_value))
""" built-in hooks
"""
def get_charset_from_content_type(content_type):
""" extract charset encoding type from Content-Type
@param content_type
e.g.
application/json; charset=UTF-8
application/x-www-form-urlencoded; charset=UTF-8
@return: charset encoding type
UTF-8
"""
content_type = content_type.lower()
if "charset=" not in content_type:
return None
index = content_type.index("charset=") + len("charset=")
return content_type[index:]
def setup_hook_prepare_kwargs(method, url, kwargs):
if method == "POST":
content_type = kwargs.get("headers", {}).get("content-type")
if content_type and "data" in kwargs:
# if request content-type is application/json, request data should be dumped
if content_type.startswith("application/json"):
kwargs["data"] = json.dumps(kwargs["data"])
# if charset is specified in content-type, request data should be encoded with charset encoding
charset = get_charset_from_content_type(content_type)
if charset:
kwargs["data"] = kwargs["data"].encode(charset)
def setup_hook_httpntlmauth(method, url, kwargs):
if "httpntlmauth" in kwargs:
from requests_ntlm import HttpNtlmAuth
auth_account = kwargs.pop("httpntlmauth")
kwargs["auth"] = HttpNtlmAuth(
auth_account["username"], auth_account["password"])
def teardown_hook_sleep_1_secs(resp_obj):
""" sleep 1 seconds after request
"""
time.sleep(1)
| [
"httprunner"
] | httprunner |
4f4d7086f94610330443c76fdfe29be17caf5e0f | e690fa7a2e622f3c187b0349b37bca8fc40b2801 | /problem.py | e22603401e65ec441aafabf357efa839ca6c5a7f | [] | no_license | davidpendergast/proof-that-p-equals-np | 058503e8357b92de76c0438b634bbe37655816f6 | f9f4a59c2215ecffc2c466ee23de10bdc6e6b066 | refs/heads/master | 2020-07-03T05:11:38.270370 | 2016-11-19T08:04:10 | 2016-11-19T08:04:10 | 74,195,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,314 | py | ALL_PROBLEMS = []
def add_to_list(prob):
ALL_PROBLEMS.append(prob)
return prob
class Problem:
def __init__(self, name, problem_input, verifier):
"""
Args:
name (string): Name of problem.
input (:obj:`list` of int): Inputs that will be fed to Programs.
verifier (:lambda: `list` of int -> boolean): Polynomial-time function that checks
the validity of a program output.
"""
self.name = name
self._input = problem_input
self._verifier = verifier
def is_correct(self, program_output):
return self._verifier(program_output)
def input(self):
return self._input[:]
"""
Seeks a program that outputs at least one 7.
bf solution: +[+.]
"""
SEVEN_PROBLEM = add_to_list(Problem("Output a 7",[], lambda x : 7 in x))
"""
Seeks a program that outputs a single 3.
bf solution: +++.
"""
SINGLE_THREE_PROBLEM = add_to_list(Problem("Output a Single 3", [], lambda x: x == [3]))
"""
Seeks a program that outputs the given inputs.
bf solution: ,[.,]
"""
_oti_input = [36, 60, 24, 5]
OUTPUT_THE_INPUTS = add_to_list(Problem("Identity Function", _oti_input, lambda x: x == _oti_input))
"""
Swap two inputs.
bf solution: ,>,.<.
P = 251855
"""
SWAP_TWO_INPUTS = add_to_list(Problem("Swap Two Inputs", [32, -4], lambda x: len(x) == 2 and x[0] == -4 and x[1] == 32))
"""
Find the sum of two inputs.
bf solution: ,>,[<+>-]<.
P = big
"""
_v1 = 25
_v2 = 11
SUM_TWO_INPUTS = add_to_list(Problem("Sum Two Inputs", [_v1, _v2], lambda x: len(x) == 1 and x[0] == _v1 + _v2))
"""
Classic NP-Complete zero sum subset problem.
bf poly-time solution: ????
"""
_zss_input_1 = [3, -3]
_zss_input_2 = [3, -2, -1]
_zss_input_3 = [1, 3, -5, 2, -4]
_input = _zss_input_1
def _check_zss(problem_input, output):
return all(i in problem_input for i in output) and sum(output) == 0 and len(set(output)) == len(problem_input)
ZERO_SUM_SUBSET_1 = add_to_list(Problem("Zero Sum Subset 1", _zss_input_1, lambda x: _check_zss(_zss_input_1, x)))
ZERO_SUM_SUBSET_2 = add_to_list(Problem("Zero Sum Subset 2", _zss_input_2, lambda x: _check_zss(_zss_input_2, x)))
ZERO_SUM_SUBSET_3 = add_to_list(Problem("Zero Sum Subset 3", _zss_input_3, lambda x: _check_zss(_zss_input_3, x)))
| [
"dlp75@case.edu"
] | dlp75@case.edu |
8deaca07f905b80cb87f8a8e613ca7cdf7d98a56 | 371889e50171ae59d1ca6bb4b8f88ceabf8e105e | /Fundamentals/DataTypesAndVariablesExercise/T01IntegerOperations.py | f6cf921351dbd14f125c716aa4684147964e1b68 | [] | no_license | RuzhaK/pythonProject | ca56c98df029896ca0658d5b2d103e21bbfeb71c | 1f2f05f071d0efd356996731831b375aa255bec3 | refs/heads/master | 2023-03-14T06:56:34.463786 | 2021-03-08T07:52:37 | 2021-03-08T07:52:37 | 343,766,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | a=int(input())
b=int(input())
c=int(input())
c=int(input()) | [
"rkaraatanasova@gmail.com"
] | rkaraatanasova@gmail.com |
c24eb7d9bd2351befb0b5c1422aba4adf9be007a | 0386aecc51ca70621598d892d33847f5ee1ff393 | /server/app.py | 5d82acd21bb11d15f5e823f85252bbc05f1cff0a | [] | no_license | scmmishra/noteblock-beta | f82f0e3347ad10262f8512ceea59e184b78549fb | 33b4ab54cc04ba8462ed26af65b1436571cc52a5 | refs/heads/master | 2020-04-29T18:24:43.195975 | 2019-03-21T05:37:27 | 2019-03-21T05:37:27 | 176,323,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,789 | py | from datetime import datetime
from flask import Flask, jsonify, request
import uuid
from flask_cors import CORS
# configuration
DEBUG = True
# instantiate the app
app = Flask(__name__)
app.config.from_object(__name__)
CORS(app)
NOTES = [{
"title": "Mumbai Hackathon",
"id": uuid.uuid4().hex,
"author": "Shivam Mishra",
"created": datetime.now(),
"note": {
"type": "doc",
"content": [
{
"type": "heading",
"attrs": {
"level": 1
},
"content": [
{
"type": "text",
"text": "This is Mumbai Hackathon"
}
]
},
{
"type": "paragraph",
"content": [
{
"type": "text",
"text": "Mumbai Hackathon is Annual Open Source Hackathon organized by the ERPNext Foundation and Don Bosco Institute of Technology. Every year, we welcome students, developers and designers from across the country to create incredible open source projects at Mumbai's largest Open Source Hackathon."
}
]
},
{
"type": "paragraph",
"content": [
{
"type": "text",
"text": "Here's the "
},
{
"type": "text",
"marks": [
{
"type": "link",
"attrs": {
"href": "https://github.com/MumbaiHackathon"
}
}
],
"text": "repositories"
},
{
"type": "text",
"text": " of the amazing projects built at Mumbai Hackathon."
}
]
},
{
"type": "heading",
"attrs": {
"level": 2
},
"content": [
{
"type": "text",
"text": "Details for Mumbai Hackathon 2019"
}
]
},
{
"type": "bullet_list",
"content": [
{
"type": "list_item",
"content": [
{
"type": "paragraph",
"content": [
{
"type": "text",
"text": "Date: 16th & 17th March 2019"
}
]
}
]
},
{
"type": "list_item",
"content": [
{
"type": "paragraph",
"content": [
{
"type": "text",
"text": "Timings:"
}
]
},
{
"type": "bullet_list",
"content": [
{
"type": "list_item",
"content": [
{
"type": "paragraph",
"content": [
{
"type": "text",
"text": "9 AM - 6 PM on 16th March."
}
]
}
]
},
{
"type": "list_item",
"content": [
{
"type": "paragraph",
"content": [
{
"type": "text",
"text": "9 AM - 12 PM on 17th March."
}
]
}
]
}
]
}
]
},
{
"type": "list_item",
"content": [
{
"type": "paragraph",
"content": [
{
"type": "text",
"text": "Prize Amount: Rs. 50,000"
}
]
}
]
}
]
}
]
}
},
{
"title": "Some More Note",
"id": uuid.uuid4().hex,
"author": "Shivam Mishra",
"created": datetime.now(),
"note": {
"type": "doc",
"content": [
{
"type": "paragraph",
"content": [
{
"type": "text",
"text": "This is some text."
}
]
}
]
}}]
@app.route("/api/notes", methods=['GET', 'POST'])
def notes():
response_object = {'status': 'success'}
response_object['notes'] = NOTES
return jsonify(response_object)
@app.route("/api/note/<note_id>", methods=['GET', 'POST'])
def single_note(note_id):
response_object = {'status': 'success'}
note = list(filter(lambda d: d['id'] == note_id, NOTES))
response_object['note'] = note
return jsonify(response_object)
@app.route("/api/ping", methods=['GET', 'POST'])
def ping():
response_object = {'status': 'success'}
response_object['message'] = "pong"
return jsonify(response_object)
@app.route("/")
def index():
return "Hello"
if __name__ == "__main__":
app.run() | [
"scm.mymail@gmail.com"
] | scm.mymail@gmail.com |
73f02b54e142ca8847ebe4278f41f5c50c7b370a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03242/s879564820.py | 77ea389167a1e41f6fb3e2ee175b767be724c41a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | s = list(str(input()))
ans = ""
for i in range(3):
if s[i] == "1":
ans = ans + "9"
else:
ans = ans + "1"
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a8e6e4c4bbe79dc52bb7a9f7155614c35f8c84aa | 86162656dc87928c35ac73f8ab1b3475304557c7 | /arelle/examples/plugin/validateSchemaLxml.py | 16ba4a7e7cbc2edb499088729a072b7617abe3bb | [
"Apache-2.0"
] | permissive | lmsanch/Arelle | 7558037f191cce70d71695e3192420af48c6984f | 45a8f922695832990653ec230e3fd3ffe8d3e4fa | refs/heads/master | 2021-01-18T16:48:24.104496 | 2013-04-13T07:26:51 | 2013-04-13T07:26:51 | 9,717,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,109 | py | '''
Save DTS is an example of a plug-in to both GUI menu and command line/web service
that will save the files of a DTS into a zip file.
(c) Copyright 2012 Mark V Systems Limited, All rights reserved.
'''
import threading
from lxml import etree
def validateSchemaWithLxml(modelXbrl, cntlr=None):
class schemaResolver(etree.Resolver):
def resolve(self, url, id, context):
if url.startswith("file:///__"):
url = importedFilepaths[int(url[10:])]
filepath = modelXbrl.modelManager.cntlr.webCache.getfilename(url)
return self.resolve_filename(filepath, context)
entryDocument = modelXbrl.modelDocument
# test of schema validation using lxml (trial experiment, commented out for production use)
from arelle import ModelDocument
imports = []
importedNamespaces = set()
importedFilepaths = []
'''
for mdlSchemaDoc in entryDocument.referencesDocument.keys():
if (mdlSchemaDoc.type == ModelDocument.Type.SCHEMA and
mdlSchemaDoc.targetNamespace not in importedNamespaces):
# actual file won't pass through properly, fake with table reference
imports.append('<xsd:import namespace="{0}" schemaLocation="file:///__{1}"/>'.format(
mdlSchemaDoc.targetNamespace, len(importedFilepaths)))
importedNamespaces.add(mdlSchemaDoc.targetNamespace)
importedFilepaths.append(mdlSchemaDoc.filepath)
'''
def importReferences(referencingDocument):
for mdlSchemaDoc in referencingDocument.referencesDocument.keys():
if (mdlSchemaDoc.type == ModelDocument.Type.SCHEMA and
mdlSchemaDoc.targetNamespace not in importedNamespaces):
importedNamespaces.add(mdlSchemaDoc.targetNamespace)
importReferences(mdlSchemaDoc) # do dependencies first
# actual file won't pass through properly, fake with table reference
imports.append('<xsd:import namespace="{0}" schemaLocation="file:///__{1}"/>'.format(
mdlSchemaDoc.targetNamespace, len(importedFilepaths)))
importedFilepaths.append(mdlSchemaDoc.filepath)
importReferences(entryDocument)
# add schemas used in xml validation but not DTS discovered
for mdlDoc in modelXbrl.urlDocs.values():
if mdlDoc.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.LINKBASE):
schemaLocation = mdlDoc.xmlRootElement.get("{http://www.w3.org/2001/XMLSchema-instance}schemaLocation")
if schemaLocation:
ns = None
for entry in schemaLocation.split():
if ns is None:
ns = entry
else:
if ns not in importedNamespaces:
imports.append('<xsd:import namespace="{0}" schemaLocation="file:///__{1}"/>'.format(
ns, len(importedFilepaths)))
importedNamespaces.add(ns)
importedFilepaths.append(entry)
ns = None
schemaXml = '<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">\n{0}</xsd:schema>\n'.format(
'\n'.join(imports))
# trace schema files referenced
with open("c:\\temp\\test.xml", "w") as fh:
fh.write(schemaXml)
modelXbrl.modelManager.showStatus(_("lxml validator loading xml schema"))
schema_root = etree.XML(schemaXml)
import time
startedAt = time.time()
parser = etree.XMLParser()
parser.resolvers.add(schemaResolver())
schemaDoc = etree.fromstring(schemaXml, parser=parser, base_url=entryDocument.filepath+"-dummy-import.xsd")
schema = etree.XMLSchema(schemaDoc)
from arelle.Locale import format_string
modelXbrl.info("info:lxmlSchemaValidator", format_string(modelXbrl.modelManager.locale,
_("schema loaded in %.2f secs"),
time.time() - startedAt))
modelXbrl.modelManager.showStatus(_("lxml schema validating"))
# check instance documents and linkbases (sort for inst doc before linkbases, and in file name order)
for mdlDoc in sorted(modelXbrl.urlDocs.values(), key=lambda mdlDoc: (-mdlDoc.type, mdlDoc.filepath)):
if mdlDoc.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.LINKBASE):
startedAt = time.time()
docXmlTree = etree.parse(mdlDoc.filepath)
modelXbrl.info("info:lxmlSchemaValidator", format_string(modelXbrl.modelManager.locale,
_("schema validated in %.3f secs"),
time.time() - startedAt),
modelDocument=mdlDoc)
if not schema.validate(docXmlTree):
for error in schema.error_log:
modelXbrl.error("lxmlSchema:{0}".format(error.type_name.lower()),
error.message,
modelDocument=mdlDoc,
sourceLine=error.line)
modelXbrl.modelManager.showStatus(_("lxml validation done"), clearAfter=3000)
if cntlr is not None:
# if using GUI controller, not cmd line or web service, select the errors window when done
cntlr.uiThreadQueue.put((cntlr.logSelect, []))
def validateSchemaWithLxmlMenuEntender(cntlr, validationmenu):
# Insert as 2nd menu item for the lxml schema validation
validationmenu.insert_command(1, label="Validate schema with lxml",
underline=0,
command=lambda: validateSchemaWithLxmlMenuCommand(cntlr) )
def validateSchemaWithLxmlMenuCommand(cntlr):
# save DTS menu item has been invoked
if cntlr.modelManager is None or cntlr.modelManager.modelXbrl is None:
cntlr.addToLog(_("No taxonomy loaded."))
return
# do the schema validation in background (and afterwards focus on GUI messages pane)
thread = threading.Thread(target=lambda dts=cntlr.modelManager.modelXbrl, c=cntlr: validateSchemaWithLxml(dts, c))
thread.daemon = True
thread.start()
def validateSchemaWithLxmlCommandLineOptionExtender(parser):
# extend command line options with a save DTS option
parser.add_option("--validateSchemaWithLxml",
action="store_true",
dest="validateSchemaLxml",
help=_("Validate the schema with lxml (experimental)"))
def validateSchemaWithLxmlCommandLineXbrlRun(cntlr, options, modelXbrl):
# extend XBRL-loaded run processing for this option
if options.validateSchemaLxml:
if cntlr.modelManager is None or cntlr.modelManager.modelXbrl is None:
cntlr.addToLog(_("No taxonomy loaded."))
return
validateSchemaWithLxml(cntlr.modelManager.modelXbrl)
'''
Do not use _( ) in pluginInfo itself (it is applied later, after loading
'''
__pluginInfo__ = {
'name': 'Validate Schema with Lxml',
'version': '0.9',
'description': "This plug-in provides schema validation using lxml. As of 2012-05 "
" lxml does not properly schema validate XBRL schemas, which is why"
" it is provided in a plug-in instead of the main build. "
"For the GUI, this feature is inserted to the tools->validation menu 2nd position. "
"This is an experimental feature, not suitable for XBRL production use until lxml"
" schema validation becomes reliable for XBRL schemas.",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2012 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'CntlrWinMain.Menu.Validation': validateSchemaWithLxmlMenuEntender,
'CntlrCmdLine.Options': validateSchemaWithLxmlCommandLineOptionExtender,
'CntlrCmdLine.Xbrl.Run': validateSchemaWithLxmlCommandLineXbrlRun,
}
| [
"fischer@markv.com"
] | fischer@markv.com |
a80a43bf9df1f79df7c4d2f5247a091075e74414 | 97f38bc0dff9498c43d13f15f4b26000874a840f | /pysp/plugins/phboundextension.py | 3fb85820612be72973a048cca3e7bf3ad0e437e9 | [
"BSD-3-Clause"
] | permissive | tayucanjujieyihan/pysp | 2975330f3a7f1c2aa56d9a69be2bdd08a632d3e9 | 98dbc9f6d500b0b2485a89bb22813e6c51b64411 | refs/heads/main | 2023-05-06T17:33:07.306607 | 2021-05-26T22:44:28 | 2021-05-26T22:44:28 | 442,712,534 | 1 | 0 | NOASSERTION | 2021-12-29T08:43:26 | 2021-12-29T08:43:26 | null | UTF-8 | Python | false | false | 12,275 | py | # ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
# May 2015 issue: reals really should not be *fixed* to compute an inner bound
# It seems like a lot to change their bounds, but that is probably the way to go.
from __future__ import division
import os
from pyomo.common.plugin import implements, alias, SingletonPlugin
from pysp import phextension
from pyomo.core.base import minimize
from pysp.phboundbase import (_PHBoundBase,
ExtractInternalNodeSolutionsforInner)
class _PHBoundExtensionImpl(_PHBoundBase):
def __init__(self):
_PHBoundBase.__init__(self)
def _iteration_k_bound_solves(self, ph, storage_key):
# storage key is for results (e.g. the ph iter number)
# ** Some code might use the values stored in the scenario solutions
# to perform a weighted vote in the case of discrete
# variables, so it is important that we execute this
# before perform any new subproblem solves.
# candidate_sol is sometimes called xhat
try:
candidate_sol = ExtractInternalNodeSolutionsforInner(ph)
except:
print("Failed to extract candiate xhat for "
"inner bound computation using xhat_method %s. "
"Skipping inner bound computation."
% (ph._xhat_method))
candidate_sol = None
# Caching the current set of ph solutions so we can restore
# the original results. We modify the scenarios and re-solve -
# which messes up the warm-start, which can seriously impact
# the performance of PH. plus, we don't want lower bounding to
# impact the primal PH in any way - it should be free of any
# side effects.
self.CachePHSolution(ph)
# Save the current fixed state and fix queue.
self.RelaxPHFixedVariables(ph)
# Assuming the weight terms are already active but proximal
# terms need to be deactivated deactivate all proximal terms
# and activate all weight terms.
self.DeactivatePHObjectiveProximalTerms(ph)
if candidate_sol is not None:
# Deactivate the weight terms.
self.DeactivatePHObjectiveWeightTerms(ph)
# Fix all non-leaf stage variables involved
# in non-anticipativity conditions to the most
# recently computed xbar (or something like it)
# integers should be truly fixed, but reals require special care
self.FixScenarioTreeVariables(ph, candidate_sol)
# now change over to finding a feasible incumbent.
if ph._verbose:
print("Computing objective %s bound" %
("inner" if self._is_minimizing else "outer"))
failures = ph.solve_subproblems(warmstart=not ph._disable_warmstarts,
exception_on_failure=False)
if len(failures):
print("Failed to compute %s bound at xhat due to "
"one or more solve failures" %
("inner" if self._is_minimizing else "outer"))
self._inner_bound_history[storage_key] = \
float('inf') if self._is_minimizing else float('-inf')
self._inner_status_history[storage_key] = self.STATUS_SOLVE_FAILED
else:
if ph._verbose:
print("Successfully completed PH bound extension "
"fixed-to-xhat solves for iteration %s\n"
"- solution statistics:\n" % (storage_key))
if ph._scenario_tree.contains_bundles():
ph.report_bundle_objectives()
ph.report_scenario_objectives()
# Compute the inner bound on the objective function.
IBval, IBstatus = self.ComputeInnerBound(ph, storage_key)
self._inner_bound_history[storage_key] = IBval
self._inner_status_history[storage_key] = IBstatus
# Undo FixScenarioTreeVariables
self.RestoreLastPHChange(ph)
# Undo DeactivatePHObjectiveWeightTerms
self.RestoreLastPHChange(ph)
else:
if self._is_minimizing:
self._inner_bound_history[storage_key] = float('inf')
else:
self._inner_bound_history[storage_key] = float('-inf')
self._inner_status_history[storage_key] = self.STATUS_NONE
# push the updated inner bound to PH, for reporting purposes.
ph._update_reported_bounds(inner = self._inner_bound_history[storage_key])
# It is possible weights have not been pushed to instance
# parameters (or transmitted to the phsolverservers) at this
# point.
ph._push_w_to_instances()
failures = ph.solve_subproblems(warmstart=not ph._disable_warmstarts,
exception_on_failure=False)
if len(failures):
print("Failed to compute duality-based bound due to "
"one or more solve failures")
self._outer_bound_history[storage_key] = \
float('-inf') if self._is_minimizing else float('inf')
self._outer_status_history[storage_key] = self.STATUS_SOLVE_FAILED
else:
if ph._verbose:
print("Successfully completed PH bound extension "
"weight-term only solves for iteration %s\n"
"- solution statistics:\n" % (storage_key))
if ph._scenario_tree.contains_bundles():
ph.report_bundle_objectives()
ph.report_scenario_objectives()
# Compute the outer bound on the objective function.
self._outer_bound_history[storage_key], \
self._outer_status_history[storage_key] = \
self.ComputeOuterBound(ph, storage_key)
ph._update_reported_bounds(outer = self._outer_bound_history[storage_key])
# Restore ph to its state prior to entering this method (e.g.,
# fixed variables, scenario solutions, proximal terms)
self.RestorePH(ph)
############ Begin Callback Functions ##############
def reset(self, ph):
"""Invoked to reset the state of a plugin to that of post-construction"""
self.__init__()
def pre_ph_initialization(self,ph):
"""
Called before PH initialization.
"""
pass
def post_instance_creation(self, ph):
"""
Called after PH initialization has created the scenario
instances, but before any PH-related
weights/variables/parameters/etc are defined!
"""
pass
def post_ph_initialization(self, ph):
"""
Called after PH initialization
"""
if ph._verbose:
print("Invoking post initialization callback "
"in phboundextension")
self._is_minimizing = True if (ph._objective_sense == minimize) else False
# TODO: Check for ph options that may not be compatible with
# this plugin and warn / raise exception
# grab the update interval from the environment variable, if
# it exists.
update_interval_variable_name = "PHBOUNDINTERVAL"
update_interval_file_name = "PHB_.DAT"
if os.path.isfile(update_interval_file_name):
print("phboundextension is getting the update interval from file=",
update_interval_file_name)
with open(update_interval_file_name) as ifile:
ifileval = ifile.read()
if isinstance(ifileval, int):
print ("update interval=",ifileval)
self._update_interval = ifileval
else:
raise RuntimeError("The value must be of type integer, but the value read="+str(ifileval))
elif update_interval_variable_name in os.environ:
self._update_interval = int(os.environ[update_interval_variable_name])
print("phboundextension using update interval="
+str(self._update_interval)+", extracted from "
"environment variable="+update_interval_variable_name)
else:
print("phboundextension using default update "
"interval="+str(self._update_interval))
def post_iteration_0_solves(self, ph):
"""
Called after the iteration 0 solves
"""
if ph._verbose:
print("Invoking post iteration 0 solve callback "
"in phboundextension")
if ph._ph_warmstarted:
print("PH warmstart detected. Bound computation requires solves "
"after iteration 0.")
self.pre_iteration_k_solves(ph)
return
# Always compute a lower/upper bound here because it requires
# no work. The instances (or bundles) have already been
# solved with the original (non-PH-augmented) objective and
# are loaded with results.
#
# Note: We will still obtain a bound using the weights
# computed from PH iteration 0 in the
# pre_iteration_k_solves callback.
#
ph_iter = None
# Note: It is important that the mipgap is not adjusted
# between the time after the subproblem solves
# and before now.
self._outer_bound_history[ph_iter], \
self._outer_status_history[ph_iter] = \
self.ComputeOuterBound(ph, ph_iter)
# dlw May 2016: the reported bound gets set for general iterations right after
# assignment to the history, so we do it here also
ph._update_reported_bounds(outer = self._outer_bound_history[ph_iter])
def post_iteration_0(self, ph):
"""
Called after the iteration 0 solves, averages computation, and weight computation
"""
pass
def pre_iteration_k_solves(self, ph):
"""
Called immediately before the iteration k solves
"""
if ph._verbose:
print("Invoking pre iteration k solve callback "
"in phboundextension")
#
# Note: We invoke this callback pre iteration k in order to
# obtain a PH bound using weights computed from the
# PREVIOUS iteration's scenario solutions (including
# those of iteration zero).
#
ph_iter = ph._current_iteration-1
if (ph_iter % self._update_interval) != 0:
return
self._iteration_k_bound_solves(ph, ph_iter)
def post_iteration_k_solves(self, ph):
"""
Called after the iteration k solves!
"""
pass
def post_iteration_k(self, ph):
"""
Called after the iteration k is finished, after weights have been updated!
"""
pass
def post_ph_execution(self, ph):
"""
Called after PH has terminated!
"""
if ph._verbose:
print("Invoking post execution callback in phboundextension")
#
# Note: We invoke this callback in order to compute a bound
# using the weights obtained from the final PH
# iteration.
#
ph_iter = ph._current_iteration
self._iteration_k_bound_solves(ph, ph_iter)
self.ReportBoundHistory()
self.ReportBestBound()
class phboundextension(SingletonPlugin, _PHBoundExtensionImpl):
implements(phextension.IPHExtension)
alias("phboundextension")
def __init__(self):
_PHBoundExtensionImpl.__init__(self)
| [
"jsiirola@users.noreply.github.com"
] | jsiirola@users.noreply.github.com |
a40ffcdf4abd0ed80e5920c73394506cb415f90e | f3b233e5053e28fa95c549017bd75a30456eb50c | /bace_input/L4D/4D-3K_wat_20Abox/set_2.py | 63587ba7792a0b793966efb059f21c70d4a63902 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | import os
dir = '/mnt/scratch/songlin3/run/bace/L4D/wat_20Abox/ti_one-step/4D_3K/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_2.in'
temp_pbs = filesdir + 'temp_2.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_2.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_2.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
68d772854072130cf95bf0eaa72dad9bbf69f7b9 | eb9da50f89a0ffaf6e46972f06abba05ea2e4132 | /footmark/ess/configuration.py | 582bbd055622fb2580b0cb41d110e411f12ca50c | [
"Apache-2.0"
] | permissive | alibaba/footmark | 0cf8e09ccf504541a3871e4026f8c7674e6c803c | bf698b5b4a0995dd84f81e7c4e9424cd9e120027 | refs/heads/master | 2023-03-12T21:15:09.244440 | 2022-07-18T08:09:18 | 2022-07-18T08:09:18 | 120,717,365 | 13 | 33 | Apache-2.0 | 2022-07-18T08:05:16 | 2018-02-08T05:52:28 | Python | UTF-8 | Python | false | false | 3,174 | py | """
Represents an ECS Instance
"""
from footmark.ess.essobject import TaggedESSObject
class ScalingConfiguration(TaggedESSObject):
"""
Represents an scaling configuration.
"""
def __init__(self, connection=None):
super(ScalingConfiguration, self).__init__(connection)
self.tags = {}
def __repr__(self):
return 'Scaling Configuration:%s' % self.id
def __getattr__(self, name):
if name == 'id':
return self.scaling_configuration_id
if name == 'name':
return self.scaling_configuration_name
if name == 'group_id':
return self.scaling_group_id
if name in ('state', 'status'):
return self.lifecycle_state
raise AttributeError("Object {0} does not have attribute {1}".format(self.__repr__(), name))
def __setattr__(self, name, value):
if name == 'id':
self.scaling_configuration_id = value
if name == 'name':
self.scaling_configuration_name = value
if name == 'group_id':
self.scaling_group_id = value
if name == 'lifecycle_state':
value = value.lower()
if name in ('state', 'status'):
self.lifecycle_state = value
if name == 'tags' and value:
v = {}
for tag in value['tag']:
if tag.get('tag_key'):
v[tag.get('tag_key')] = tag.get('tag_value', None)
value = v
super(TaggedESSObject, self).__setattr__(name, value)
def _update(self, updated):
self.__dict__.update(updated.__dict__)
def update(self, validate=False):
"""
Update the instance's state information by making a call to fetch
the current instance attributes from the service.
:type validate: bool
:param validate: By default, if ECS returns no data about the
instance the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from ECS.
"""
rs = self.connection.describe_configurations(self.scaling_group_id, [self.id])
if len(rs) > 0:
for r in rs:
if r.id == self.id:
self._update(r)
elif validate:
raise ValueError('%s is not a valid Scaling Configuration ID' % self.id)
return self.state
def active(self):
"""
Start the instance.
"""
return self.connection.start_instances([self.id])
def inactive(self, force=False):
"""
Stop the instance
:type force: bool
:param force: Forces the instance to stop
:rtype: list
:return: A list of the instances stopped
"""
return self.connection.stop_instances([self.id], force)
def terminate(self):
"""
Terminate the instance
:type force: bool
:param force: Forces the instance to terminate
"""
return self.connection.terminate_configuration(self.id)
| [
"guimin.hgm@alibaba-inc.com"
] | guimin.hgm@alibaba-inc.com |
0ebc946b6fbaa7618e2639c08e22675bae960431 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-3/4abe9ff1444f31e42faa13ab9b760f0384a691ee-<threshold_minimum>-bug.py | 82fe2af14cd0fabfd2d4bbd4fa3d7977f92ff5b0 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,608 | py | def threshold_minimum(image, nbins=256, max_iter=10000):
'Return threshold value based on minimum method.\n\n The histogram of the input `image` is computed and smoothed until there are\n only two maxima. Then the minimum in between is the threshold value.\n\n Parameters\n ----------\n image : (M, N) ndarray\n Input image.\n nbins : int, optional\n Number of bins used to calculate histogram. This value is ignored for\n integer arrays.\n max_iter: int, optional\n Maximum number of iterations to smooth the histogram.\n\n Returns\n -------\n threshold : float\n Upper threshold value. All pixels with an intensity higher than\n this value are assumed to be foreground.\n\n Raises\n ------\n RuntimeError\n If unable to find two local maxima in the histogram or if the\n smoothing takes more than 1e4 iterations.\n\n References\n ----------\n .. [1] C. A. Glasbey, "An analysis of histogram-based thresholding\n algorithms," CVGIP: Graphical Models and Image Processing,\n vol. 55, pp. 532-537, 1993.\n .. [2] Prewitt, JMS & Mendelsohn, ML (1966), "The analysis of cell\n images", Annals of the New York Academy of Sciences 128: 1035-1053\n :DOI:`10.1111/j.1749-6632.1965.tb11715.x`\n\n Examples\n --------\n >>> from skimage.data import camera\n >>> image = camera()\n >>> thresh = threshold_minimum(image)\n >>> binary = image > thresh\n '
def find_local_maxima_idx(hist):
maximum_idxs = list()
direction = 1
for i in range((hist.shape[0] - 1)):
if (direction > 0):
if (hist[(i + 1)] < hist[i]):
direction = (- 1)
maximum_idxs.append(i)
elif (hist[(i + 1)] > hist[i]):
direction = 1
return maximum_idxs
(hist, bin_centers) = histogram(image.ravel(), nbins, source_range='image')
smooth_hist = np.copy(hist).astype(np.float64)
for counter in range(max_iter):
smooth_hist = ndi.uniform_filter1d(smooth_hist, 3)
maximum_idxs = find_local_maxima_idx(smooth_hist)
if (len(maximum_idxs) < 3):
break
if (len(maximum_idxs) != 2):
raise RuntimeError('Unable to find two maxima in histogram')
elif (counter == (max_iter - 1)):
raise RuntimeError('Maximum iteration reached for histogramsmoothing')
threshold_idx = np.argmin(smooth_hist[maximum_idxs[0]:(maximum_idxs[1] + 1)])
return bin_centers[(maximum_idxs[0] + threshold_idx)] | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
7620f9cf293949dd49959feab537b7f6f7f2e31e | 0ca41d628d7b34cc27487e889af62d1da17350bd | /openstack/network/v2/statuses.py | 99068d276846df3d3bbfeaa63cd46efe9f7b1250 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ArvinSong/huaweicloud-sdk-python | 9d5fcaf2ff93143de0e676a2fcc99f9a4b79fd9f | 227cd7f68a07974b16794c371f110d1927571fd0 | refs/heads/master | 2020-08-08T15:08:32.276961 | 2019-09-30T03:52:22 | 2019-09-30T03:52:22 | 213,855,251 | 1 | 0 | NOASSERTION | 2019-10-09T07:52:23 | 2019-10-09T07:52:23 | null | UTF-8 | Python | false | false | 1,186 | py | # -*- coding:utf-8 -*-
# Copyright 2018 Huawei Technologies Co.,Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from openstack import resource2
from openstack.network import network_service
class Statuses(resource2.Resource):
resource_key = 'statuses'
resources_key = 'statuses'
base_path = '/lbaas/loadbalancers/%(loadbalance_id)s/statuses'
service = network_service.NetworkService()
allow_create = False
allow_get = True
allow_update = False
allow_delete = False
allow_list = False
# loadbalancer
loadbalancer = resource2.Body("loadbalancer", type=dict)
# loadbalancer id
loadbalance_id = resource2.URI("loadbalance_id")
| [
"769837173@qq.com"
] | 769837173@qq.com |
ffe67684bd72535366fe4685e80555f0e7c0ac0b | 32aa592fc3b7376b8fb36c0ac2245e6571fb7bdd | /datbase4.py | 0b901173cb7de7c8fc6d558bb3f096c56f52cca7 | [] | no_license | 1234567890boo/ywviktor | 00063a1c58b392cb4230791a9cffced6d2864889 | 12b18887243e9b64fb08db4ad440c7144bdf8cbb | refs/heads/master | 2022-05-14T12:43:43.422329 | 2022-04-30T04:24:05 | 2022-04-30T04:24:05 | 57,740,866 | 0 | 0 | null | 2020-06-29T00:22:12 | 2016-05-01T18:48:27 | Python | UTF-8 | Python | false | false | 129 | py | import sqlite3
conn=sqlite3.connect('sample4.db')
c=conn.cursor()
SELECT COUNT(studentid) FROM STUDENTS
conn.commit()
c.close()
| [
"you@example.com"
] | you@example.com |
ce596f62fcc6f217f7ff9a62753caafb1dedf83c | e423c84898f3fbb1e43c49475aedad0d6547c22f | /tensorflow/python/keras/distribute/dataset_creator_model_fit_test_base.py | 371b154447c68c0073caf1758a97f3daf509fc5f | [
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | yash982000/tensorflow | 9a9cfd1433bae84a5f197c20f5d3526883f781a2 | 0d09ce8fc82d8eaf9734bd50ea5f0bf739be766c | refs/heads/master | 2023-05-09T07:35:24.800146 | 2021-05-31T16:15:35 | 2021-05-31T16:15:35 | 358,957,873 | 1 | 0 | Apache-2.0 | 2021-05-31T16:15:36 | 2021-04-17T18:50:16 | null | UTF-8 | Python | false | false | 7,420 | py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DatasetCreator` with `Model.fit` across usages and strategies."""
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.keras import callbacks as callbacks_lib
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.layers import core as core_layers
from tensorflow.python.keras.layers.preprocessing import string_lookup
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.keras.utils import dataset_creator
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class DatasetCreatorModelFitTestBase(test.TestCase, parameterized.TestCase):
"""The base class for DatasetCreator with Model.fit tests."""
def _get_dataset_fn(self, use_lookup_layer):
if use_lookup_layer:
filepath = os.path.join(self.get_temp_dir(), "vocab")
with open(filepath, "w") as f:
f.write("\n".join(["earth", "wind", "and", "fire"]))
def dataset_fn(input_context):
del input_context
lookup_layer = string_lookup.StringLookup(
num_oov_indices=1, vocabulary=filepath)
x = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
y = np.array([0, 1])
map_fn = lambda x, y: (lookup_layer(x), y)
return dataset_ops.DatasetV2.from_tensor_slices(
(x, y)).shuffle(10).repeat().batch(2).map(map_fn)
else:
def dataset_fn(input_context):
del input_context
x = random_ops.random_uniform((10, 10))
y = random_ops.random_uniform((10,))
return dataset_ops.DatasetV2.from_tensor_slices(
(x, y)).shuffle(10).repeat().batch(2)
return dataset_fn
def _model_compile(self,
strategy,
steps_per_execution=1,
run_eagerly=False,
with_normalization_layer=False,
use_lookup_layer=False):
class ResultAssertingCallback(callbacks_lib.Callback):
"""A callback that asserts the result of the tests."""
def __init__(self):
self._prev_epoch = -1
def on_epoch_end(self, epoch, logs=None):
logging.info("testModelFit: epoch=%r, logs=%r", epoch, logs)
if epoch <= self._prev_epoch:
raise RuntimeError("Epoch is supposed to be larger than previous.")
self._prev_epoch = epoch
is_loss_float = (
logs.get("loss", None) is not None and
isinstance(logs["loss"], (float, np.floating)))
if not is_loss_float:
raise RuntimeError("loss is supposed to be in the logs and float.")
def on_train_end(self, logs=None):
if self._prev_epoch != 9:
raise RuntimeError("Unexpected last epoch: {}".format(
self._prev_epoch))
with strategy.scope():
model = sequential.Sequential([core_layers.Dense(10)])
if with_normalization_layer:
norm = keras.layers.BatchNormalization(
axis=-1, input_shape=(4, 4, 3), momentum=0.8)
model.add(norm)
model.add(core_layers.Dense(1, activation="sigmoid"))
self._accuracy_metric = keras.metrics.Accuracy()
model.compile(
gradient_descent.SGD(),
loss="binary_crossentropy",
metrics=[self._accuracy_metric],
steps_per_execution=steps_per_execution,
run_eagerly=run_eagerly)
return model, [ResultAssertingCallback()]
def _model_fit(self,
strategy,
steps_per_execution=1,
validation_data=None,
x=None,
steps_per_epoch=10,
run_eagerly=False,
with_normalization_layer=False,
callbacks=None,
use_lookup_layer=False):
if callbacks is None:
callbacks = []
model, default_callbacks = self._model_compile(strategy,
steps_per_execution,
run_eagerly,
with_normalization_layer,
use_lookup_layer)
callbacks += default_callbacks
x = x or dataset_creator.DatasetCreator(
self._get_dataset_fn(use_lookup_layer))
validation_data = (
validation_data or
dataset_creator.DatasetCreator(self._get_dataset_fn(use_lookup_layer)))
model.fit(
x,
epochs=10,
steps_per_epoch=steps_per_epoch,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=steps_per_epoch)
return model
def _model_evaluate(self,
strategy,
steps_per_execution=1,
validation_data=None,
steps=10,
run_eagerly=False,
with_normalization_layer=False,
callbacks=None):
if callbacks is None:
callbacks = []
model, default_callbacks = self._model_compile(
strategy,
steps_per_execution,
run_eagerly,
with_normalization_layer,
)
callbacks += default_callbacks
def dataset_fn(input_context):
del input_context
x = random_ops.random_uniform((10, 10))
y = random_ops.random_uniform((10, 1))
return dataset_ops.DatasetV2.from_tensor_slices(
(x, y)).shuffle(10).repeat().batch(8)
validation_data = (
validation_data or dataset_creator.DatasetCreator(dataset_fn))
model.evaluate(x=validation_data, steps=steps, callbacks=callbacks)
return model
def _model_predict(
self,
strategy,
model=None,
steps_per_execution=1,
test_data=None,
steps=10,
with_normalization_layer=False,
):
callbacks = []
if model is None:
model, default_callbacks = self._model_compile(
strategy,
steps_per_execution,
with_normalization_layer=with_normalization_layer,
)
callbacks += default_callbacks
def create_test_data():
x = constant_op.constant([1., 2., 3., 1., 5., 1.])
return dataset_ops.DatasetV2.from_tensor_slices(x).repeat().batch(2)
test_data = test_data or create_test_data()
predictions = model.predict(x=test_data, steps=steps, callbacks=callbacks)
predictions = np.around(predictions, 4)
return model, predictions
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
5201444f1f49e41e337b827110bb30274cc8a6e4 | 633f28bc4f1aa3954e33ccea95a935d46e103024 | /src/tests/plugins/test_ticketoutputpdf.py | 088a5ac545801e6893c3aaafb10e4ae06bf82aff | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | sha2017/pretix | 442ebbf33938ccc9d03e89bb2bbd67b576a0d4f6 | dcd6f2d8d12f69d68f18fb9b1b6fe8927d60bfe9 | refs/heads/master | 2020-12-24T09:53:31.542612 | 2016-10-21T08:49:09 | 2016-10-21T08:49:09 | 68,192,732 | 0 | 0 | null | 2016-09-14T09:25:45 | 2016-09-14T09:25:44 | null | UTF-8 | Python | false | false | 1,808 | py | from datetime import timedelta
from decimal import Decimal
from io import BytesIO
import pytest
from django.utils.timezone import now
from PyPDF2 import PdfFileReader
from pretix.base.models import (
Event, Item, ItemVariation, Order, OrderPosition, Organizer,
)
from pretix.plugins.ticketoutputpdf.ticketoutput import PdfTicketOutput
@pytest.fixture
def env():
o = Organizer.objects.create(name='Dummy', slug='dummy')
event = Event.objects.create(
organizer=o, name='Dummy', slug='dummy',
date_from=now(), live=True
)
o1 = Order.objects.create(
code='FOOBAR', event=event, email='dummy@dummy.test',
status=Order.STATUS_PENDING,
datetime=now(), expires=now() + timedelta(days=10),
total=Decimal('13.37'), payment_provider='banktransfer'
)
shirt = Item.objects.create(event=event, name='T-Shirt', default_price=12)
shirt_red = ItemVariation.objects.create(item=shirt, default_price=14, value="Red")
OrderPosition.objects.create(
order=o1, item=shirt, variation=shirt_red,
price=12, attendee_name=None, secret='1234'
)
OrderPosition.objects.create(
order=o1, item=shirt, variation=shirt_red,
price=12, attendee_name=None, secret='5678'
)
return event, o1
@pytest.mark.django_db
def test_generate_pdf(env, mocker):
mocked = mocker.patch('reportlab.pdfgen.canvas.Canvas.drawString')
event, order = env
event.settings.set('ticketoutput_pdf_code_x', 30)
event.settings.set('ticketoutput_pdf_code_y', 50)
event.settings.set('ticketoutput_pdf_code_s', 2)
o = PdfTicketOutput(event)
fname, ftype, buf = o.generate(order)
assert ftype == 'application/pdf'
pdf = PdfFileReader(BytesIO(buf))
assert pdf.numPages == 2
assert mocked.called
| [
"mail@raphaelmichel.de"
] | mail@raphaelmichel.de |
6bb7485728d62675725bc6326bf68cdb4cdef2ad | 2d0bada349646b801a69c542407279cc7bc25013 | /src/vai_optimizer/pytorch_binding/pytorch_nndct/nn/modules/interpolate.py | 5b30027d57596d14e2a282f4d1a6439e43baedca | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"Apache-2.0",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"BSD-2-Clause",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] | permissive | Xilinx/Vitis-AI | 31e664f7adff0958bb7d149883ab9c231efb3541 | f74ddc6ed086ba949b791626638717e21505dba2 | refs/heads/master | 2023-08-31T02:44:51.029166 | 2023-07-27T06:50:28 | 2023-07-27T06:50:28 | 215,649,623 | 1,283 | 683 | Apache-2.0 | 2023-08-17T09:24:55 | 2019-10-16T21:41:54 | Python | UTF-8 | Python | false | false | 1,564 | py |
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
import pytorch_nndct.utils as py_utils
__all__ = ['Interpolate']
class deephi_Interpolate(torch.nn.Module):
def __init__(self, *args, **kwards):
super(deephi_Interpolate, self).__init__(*args, **kwards)
self.node = None
self.quant_mode, self.quantizer = maybe_get_quantizer()
def forward(self,
input,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None):
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
output = torch.nn.functional.interpolate(qinput, size, scale_factor, mode,
align_corners)
output = quantize_tensors([output], self.node)[0]
return output
@py_utils.register_quant_op
def Interpolate(*args, **kwargs):
return deephi_Interpolate(*args, **kwargs)
| [
"do-not-reply@gitenterprise.xilinx.com"
] | do-not-reply@gitenterprise.xilinx.com |
fc402ae81366952acb346097c3df72e9c0ad4921 | f17014cb6742281806a889cda35cef6b04ca2956 | /torch/testing/_internal/distributed/distributed_test.py | 2dc0d37c69755b95ddd47aed82457e57ea133dd5 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | VishveshDhobe/pytorch | 8ab55af440f2e3da841f0bfbcb7eef1e50f33ccb | e4bc785dd57b15ae091eb8e8ca71a604da9b3fb2 | refs/heads/master | 2023-01-05T17:13:35.845908 | 2020-11-04T16:27:55 | 2020-11-04T16:29:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164,090 | py | import copy
from collections import namedtuple
import itertools
import random
import math
import os
import sys
import time
import tempfile
import unittest
from contextlib import contextmanager, suppress
from datetime import timedelta
from functools import reduce
from typing import Union, NamedTuple
import torch
import torch.cuda
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel.distributed import _dump_DDP_relevant_env_vars
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed.distributed_c10d import _get_default_group, AllreduceOptions, GroupMember
from torch.testing._internal.common_utils import FILE_SCHEMA
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
TEST_SKIPS,
initialize_temp_directories,
cleanup_temp_dir,
simple_sparse_reduce_tests,
skip_if_rocm,
skip_if_small_worldsize,
skip_if_lt_x_gpu,
skip_if_no_gpu,
require_n_gpus_for_nccl_backend,
requires_nccl_version,
captured_output,
)
from torch._utils_internal import TEST_MASTER_ADDR as MASTER_ADDR
from torch._utils_internal import TEST_MASTER_PORT as MASTER_PORT
try:
import torchvision
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
if sys.platform == 'win32':
import msvcrt
else:
import fcntl
class Foo:
def __init__(self, x):
self.x = x
def __eq__(self, other):
return self.__dict__ == other.__dict__
f = Foo(10)
f.bar = 1
collectives_object_test_list = [
{"key1": 3, "key2": 4, "key3": {"nested": True}},
f,
"foo",
[1, 2, True, "string", [4, 5, "nested"]],
]
# Dummy NamedTuple data structures to test DDP support for NamedTuple types.
EXPECTED_FIELDS = ("a", "b")
TestNamedTupleInput_0 = namedtuple("NamedTuple", EXPECTED_FIELDS)
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
BACKEND = os.environ["BACKEND"]
INIT_METHOD = os.getenv("INIT_METHOD", "env://")
DEFAULT_TIMEOUT = 300
CUSTOMIZED_TIMEOUT = {"test_DistributedDataParallel": 500}
class _FC2(nn.Module):
def __init__(self):
super(_FC2, self).__init__()
self.fc = nn.Linear(10, 50, bias=True)
self.fc.bias.requires_grad = False
def forward(self, x):
x = self.fc(x)
return x
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = _FC2()
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(torch.tensor([2, 2]).long(),
requires_grad=False)
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
class Task(nn.Module):
def __init__(self):
super().__init__()
self.p = nn.Parameter(torch.ones(2, 2))
def forward(self, x):
return self.p + x
class BatchNormNet(nn.Module):
def __init__(self):
super(BatchNormNet, self).__init__()
self.fc1 = nn.Linear(2, 40, bias=False)
self.bn = nn.BatchNorm1d(4)
self.fc2 = nn.Linear(40, 4, bias=False)
def forward(self, x):
x = torch.reshape(self.fc1(x), (-1, 4, 10))
x = self.bn(x)
x = torch.reshape(x, (-1, 40))
x = self.fc2(x)
return F.softmax(x, dim=1)
DDP_NET = Net()
BN_NET = BatchNormNet()
ONLY_SBN_NET = nn.SyncBatchNorm(2, momentum=0.99)
def get_timeout(test_id):
test_name = test_id.split(".")[-1]
if test_name in CUSTOMIZED_TIMEOUT:
return CUSTOMIZED_TIMEOUT[test_name]
else:
return DEFAULT_TIMEOUT
def require_backend(backends):
if BACKEND not in backends:
return unittest.skip("Test requires backend to be one of %s" % backends)
return lambda func: func
def require_backends_available(backends):
def check(backend):
if backend == dist.Backend.GLOO:
return dist.is_gloo_available()
if backend == dist.Backend.NCCL:
return dist.is_nccl_available()
if backend == dist.Backend.MPI:
return dist.is_mpi_available()
return False
if not all(check(dist.Backend(backend)) for backend in backends):
return unittest.skip(
"Test requires backends to be available %s" % backends)
return lambda func: func
def require_world_size(world_size):
if int(os.environ["WORLD_SIZE"]) < world_size:
return unittest.skip("Test requires world size of %d" % world_size)
return lambda func: func
def apply_hack_for_nccl():
# This is a hack for a known NCCL issue using multiprocess
# in conjunction with multiple threads to manage different GPUs which
# may cause ncclCommInitRank to fail.
# http://docs.nvidia.com/deeplearning/sdk/nccl-release-notes/rel_2.1.4.html#rel_2.1.4
# It slows down the performance of collective operations.
# Without this setting NCCL might throw unhandled error.
os.environ["NCCL_MAX_NRINGS"] = "1"
@contextmanager
def _lock():
TEMP_DIR = os.environ["TEMP_DIR"]
lockfile = os.path.join(TEMP_DIR, "lockfile")
with open(lockfile, "w") as lf:
try:
if sys.platform == 'win32':
msvcrt.locking(lf.fileno(), msvcrt.LK_RLCK, 1)
yield
else:
fcntl.flock(lf.fileno(), fcntl.LOCK_EX)
yield
finally:
if sys.platform == 'win32':
msvcrt.locking(lf.fileno(), msvcrt.LK_UNLCK, 1)
else:
fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
lf.close()
def _build_tensor(size, value=None, dtype=torch.float, device_id=None):
if value is None:
value = size
if device_id is None:
return torch.empty(size, size, size, dtype=dtype).fill_(value)
else:
return torch.empty(size, size, size, dtype=dtype).fill_(value).cuda(device_id)
def _build_multidim_tensor(dim, dim_size, value=None, dtype=torch.float):
if value is None:
value = size
return torch.empty(size=[dim_size for _ in range(dim)], dtype=dtype).fill_(value)
class Barrier(object):
barrier_id = 0
@classmethod
def init(cls):
cls.barrier_id = 0
barrier_dir = os.path.join(os.environ["TEMP_DIR"], "barrier")
for f_name in os.listdir(barrier_dir):
os.unlink(os.path.join(barrier_dir, f_name))
@classmethod
def sync(cls, wait_for=None, timeout=10):
if wait_for is None:
wait_for = dist.get_world_size()
cls.barrier_id += 1
barrier_dir = os.path.join(os.environ["TEMP_DIR"], "barrier")
pid = str(os.getpid())
barrier_file = os.path.join(barrier_dir, pid)
with _lock():
with open(barrier_file, "w") as f:
f.write(str(cls.barrier_id))
start_time = time.time()
while True:
arrived = 0
with _lock():
for f_name in os.listdir(barrier_dir):
with open(os.path.join(barrier_dir, f_name), "r") as f:
data = f.read()
if int(data) >= cls.barrier_id:
arrived += 1
if arrived == wait_for:
break
if time.time() - start_time > timeout:
raise RuntimeError("barrier timeout")
time.sleep(0.1)
class TestDistBackend(MultiProcessTestCase):
@classmethod
def setUpClass(cls):
os.environ["MASTER_ADDR"] = str(MASTER_ADDR)
os.environ["MASTER_PORT"] = str(MASTER_PORT)
# os.environ["WORLD_SIZE"] = str(WORLD_SIZE)
super().setUpClass()
def setUp(self):
super().setUp()
# initialize temp directories
initialize_temp_directories()
# initialize Barrier
Barrier.init()
def tearDown(self):
cleanup_temp_dir()
super().tearDown()
@property
def init_method(self):
return "{}{file_name}".format(FILE_SCHEMA, file_name=self.file_name)
@classmethod
def _run(cls, rank, test_name, file_name):
if BACKEND == 'nccl' and not torch.cuda.is_available():
sys.exit(TEST_SKIPS['no_cuda'].exit_code)
self = cls(test_name)
self.rank = rank
self.file_name = file_name
try:
dist.init_process_group(
init_method=self.init_method,
backend=BACKEND,
world_size=int(self.world_size),
rank=self.rank,
)
except RuntimeError as e:
if "recompile" in e.args[0]:
sys.exit(TEST_SKIPS["backend_unavailable"].exit_code)
raise
# Execute barrier prior to running test to ensure that every process
# has finished initialization and that the following test
# immediately exiting due to a skip doesn't cause flakiness.
self._barrier()
# self.id() == e.g. '__main__.TestDistributed.test_get_rank'
# We're retreiving a corresponding test and executing it.
getattr(self, test_name)()
self._barrier()
dist.destroy_process_group()
sys.exit(0)
# Needed since MultiProcessTestCase assumes a world_size of 4, but we
# run these tests under other various world_sizes.
@property
def world_size(self):
return os.environ["WORLD_SIZE"]
class DistributedTest:
class _DistTestBase:
def _barrier(self, *args, **kwargs):
Barrier.sync(*args, **kwargs)
def _init_group_test(self, **kwargs):
group = [1, 2]
group_id = dist.new_group(group, **kwargs)
rank = dist.get_rank()
if rank not in group:
return ([], None, rank)
return (group, group_id, rank)
def _init_full_group_test(self, **kwargs):
group = list(range(0, dist.get_world_size()))
group_id = dist.new_group(**kwargs)
rank = dist.get_rank()
return (group, group_id, rank)
def _init_global_test(self):
group = list(range(0, dist.get_world_size()))
group_id = dist.group.WORLD
rank = dist.get_rank()
return (group, group_id, rank)
# HELPER FOR MULTIGPU TESTS
def _init_multigpu_helper(self):
"""Multigpu tests are designed to simulate the multi nodes with multi
GPUs on each node. Nccl backend requires equal #GPUs in each process.
On a single node, all visible GPUs are evenly
divided to subsets, each process only uses a subset.
"""
nGPUs = torch.cuda.device_count()
world_size = dist.get_world_size()
visible_devices = range(nGPUs)
if BACKEND == "nccl":
apply_hack_for_nccl()
nGPUs_per_process = nGPUs // world_size
rank_to_GPU = {
i: list(
visible_devices[i * nGPUs_per_process: (i + 1) * nGPUs_per_process]
)
for i in range(world_size)
}
return rank_to_GPU
def test_dump_DDP_relevant_env_vars(self):
with captured_output() as (out, _):
_dump_DDP_relevant_env_vars()
lines = out.getvalue().splitlines()
def format_line(var):
return "env:%s=%s" % (var, os.environ[var] if var in os.environ else "N/A")
# Check relevant env vars
vars = [
"MASTER_ADDR",
"MASTER_PORT",
"WORLD_SIZE",
"NCCL_TOPO_DUMP_FILE", # N/A
]
for var in vars:
line = format_line(var)
self.assertIn(line, lines)
# Check irrelevant env vars
vars = [
"xxx",
"yyy",
"zzz",
]
for var in vars:
line = format_line(var)
self.assertNotIn(line, lines)
# GET RANK
def test_get_rank(self):
test_dir = os.path.join(os.environ["TEMP_DIR"], "test_dir")
pid = str(os.getpid())
num_processes = dist.get_world_size()
with open(os.path.join(test_dir, pid), "w") as f:
f.write(str(dist.get_rank()))
self._barrier()
all_ranks = set()
for f_name in os.listdir(test_dir):
with open(os.path.join(test_dir, f_name), "r") as f:
all_ranks.add(int(f.read()))
self.assertEqual(len(all_ranks), num_processes)
self._barrier()
if dist.get_rank() == 0:
for f_name in os.listdir(test_dir):
os.unlink(os.path.join(test_dir, f_name))
self._barrier()
def test_get_backend(self):
if dist.get_world_size() > 2:
group = [1, 2]
else:
group = [0, 1]
group_id = dist.new_group(group)
backend_str = BACKEND.lower()
self.assertEqual(dist.get_backend(), backend_str)
if dist.get_rank() in group:
self.assertEqual(dist.get_backend(group_id), backend_str)
else:
with self.assertRaisesRegex(RuntimeError, "Invalid process group specified"):
dist.get_backend(group_id)
def test_Backend_enum_class(self):
# test parsing
backend = BACKEND.lower()
self.assertEqual(dist.Backend(BACKEND.upper()), backend)
self.assertEqual(dist.Backend(BACKEND), backend)
with self.assertRaisesRegex(ValueError, "Invalid backend: 'undefined'"):
dist.Backend("undefined")
with self.assertRaisesRegex(ValueError, "Invalid backend: 'xYz'"):
dist.Backend("xYz")
with self.assertRaises(ValueError):
dist.Backend(None)
with self.assertRaises(ValueError):
dist.Backend(3)
with self.assertRaises(ValueError):
dist.Backend(["gloo"])
# Test destroy
def test_destroy_group(self):
if dist.get_world_size() > 2:
group = [1, 2]
else:
group = [0, 1]
group_id = dist.new_group(group)
self._barrier()
dist.destroy_process_group(group_id)
# Test get rank and size of group
def test_get_rank_size_group(self):
if dist.get_world_size() > 2:
group = [1, 2]
else:
group = [0, 1]
group_id = dist.new_group(group)
if dist.get_rank() in group:
self.assertEqual(dist.get_world_size(group_id), 2)
self.assertTrue(dist.get_rank(group_id) in list(range(2)))
else:
self.assertEqual(dist.get_world_size(group_id), -1)
self.assertEqual(dist.get_rank(group_id), -1)
# Test destroy full groups
def test_destroy_full_group(self):
_, group_id, _ = self._init_full_group_test()
self._barrier()
dist.destroy_process_group(group_id)
# Test get rank and size of full group
def test_get_rank_size_full_group(self):
_, group_id, _ = self._init_full_group_test()
self.assertEqual(dist.get_world_size(group_id), dist.get_world_size())
self.assertEqual(dist.get_rank(group_id), dist.get_rank())
def _test_barrier_timeout(self, group_id, timeout):
local_rank = dist.get_rank(group_id)
# Only execute barrier on rank == 0, causing it to timeout
if local_rank == 0:
expected_time = time.time() + timeout.total_seconds()
with self.assertRaisesRegex(Exception, " (Timed out|closed|timeout) "):
dist.barrier(group_id)
self.assertGreaterEqual(time.time(), expected_time)
else:
time.sleep(timeout.total_seconds())
@unittest.skipIf(BACKEND != "gloo", "Only gloo backend supports timeouts")
@unittest.skipIf(
not INIT_METHOD.startswith("file://"),
"Requires file:// initialization method. " +
"Both tcp:// and env:// rely on the TCP store for which "
"reinitialization has proven racy."
)
def test_barrier_timeout_global(self):
dist.destroy_process_group()
# Explicitly pass world size to the barrier because we've
# just destroyed any state in torch.distributed.
self._barrier(wait_for=int(os.environ["WORLD_SIZE"]))
# Reinitialize global process group
timeout = timedelta(seconds=1)
dist.init_process_group(
init_method=INIT_METHOD,
backend=BACKEND,
world_size=int(os.environ["WORLD_SIZE"]),
rank=self.rank,
timeout=timeout,
)
self._test_barrier_timeout(dist.group.WORLD, timeout)
@skip_if_small_worldsize
@unittest.skipIf(BACKEND != "gloo", "Only gloo backend supports timeouts")
def test_barrier_timeout_group(self):
timeout = timedelta(seconds=1)
_, group_id, _ = self._init_group_test(timeout=timeout)
if group_id is not None:
self._test_barrier_timeout(group_id, timeout)
@unittest.skipIf(BACKEND != "gloo", "Only gloo backend supports timeouts")
def test_barrier_timeout_full_group(self):
timeout = timedelta(seconds=1)
_, group_id, _ = self._init_full_group_test(timeout=timeout)
if group_id is not None:
self._test_barrier_timeout(group_id, timeout)
# This test helper can only be used when using the Gloo or NCCL backend
# **and** both the Gloo and NCCL backends are available.
# See the @skip annotations below.
def _test_group_override_backend(self, initializer):
if BACKEND == "gloo":
new_backend = "nccl"
if BACKEND == "nccl":
new_backend = "gloo"
group, group_id, rank = initializer(backend=new_backend)
if group_id is None:
return
if new_backend == "gloo":
self.assertTrue(isinstance(group_id, dist.ProcessGroupGloo))
if new_backend == "nccl":
self.assertTrue(isinstance(group_id, dist.ProcessGroupNCCL))
self.assertEqual(rank, group[dist.get_rank(group_id)])
self.assertEqual(len(group), dist.get_world_size(group_id))
# Pin device (so we avoid NCCL race conditions/deadlocks).
group_rank = dist.get_rank(group_id)
torch.cuda.set_device(group_rank)
# Run broadcast of CUDA tensor (so it works for both Gloo and NCCL).
tensor = _build_tensor(2, value=group_rank).cuda()
dist.broadcast(tensor, src=group[0], group=group_id)
self.assertEqual(_build_tensor(2, value=0), tensor.to("cpu"))
@require_backend({"gloo", "nccl"})
@require_backends_available({"gloo", "nccl"})
@require_world_size(3)
@skip_if_lt_x_gpu(2)
def test_backend_group(self):
self._test_group_override_backend(self._init_group_test)
@require_backend({"gloo", "nccl"})
@require_backends_available({"gloo", "nccl"})
@skip_if_lt_x_gpu(3)
def test_backend_full_group(self):
self._test_group_override_backend(self._init_full_group_test)
# NCCL Batch SEND RECV
@skip_if_no_gpu
@unittest.skip("NCCL P2P is not enabled for OSS builds")
@unittest.skipIf(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version(2700, "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_nccl(self):
self._barrier()
rank = dist.get_rank()
rank_to_GPU = self._init_multigpu_helper()
device_id = rank_to_GPU[rank][0]
p2p_op_list = []
for val in ["1", "0"]:
os.environ["NCCL_BLOCKING_WAIT"] = val
for src in range(0, dist.get_world_size()):
send_tensor = _build_tensor(rank + 1, device_id=device_id)
recv_tensor = _build_tensor(src + 1, value=-1, device_id=device_id)
recv_op = dist.P2POp(dist.irecv, recv_tensor, src)
p2p_op_list.append(recv_op)
send_op = dist.P2POp(dist.isend, send_tensor, src)
p2p_op_list.append(send_op)
reqs = dist.batch_isend_irecv(p2p_op_list)
for req in reqs:
req.wait()
self._barrier()
@skip_if_no_gpu
@unittest.skipIf(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version(2700, "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_self_nccl(self):
self._barrier()
rank = dist.get_rank()
rank_to_GPU = self._init_multigpu_helper()
device_id = rank_to_GPU[rank][0]
p2p_op_list = []
if rank == 0:
send_tensor = _build_tensor(rank + 1, device_id=device_id)
recv_tensor = _build_tensor(rank + 1, value=-1, device_id=device_id)
recv_op = dist.P2POp(dist.irecv, recv_tensor, 0)
p2p_op_list.append(recv_op)
send_op = dist.P2POp(dist.isend, send_tensor, 0)
p2p_op_list.append(send_op)
reqs = dist.batch_isend_irecv(p2p_op_list)
for req in reqs:
req.wait()
self._barrier()
@skip_if_no_gpu
@skip_if_small_worldsize
@unittest.skip("NCCL P2P is not enabled for OSS builds")
@unittest.skipIf(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version(2700, "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_no_rank_zero_nccl(self):
self._barrier()
rank = dist.get_rank()
rank_to_GPU = self._init_multigpu_helper()
device_id = rank_to_GPU[rank][0]
p2p_op_list = []
if rank == 1:
peer = 2
elif rank == 2:
peer = 1
if rank in [1, 2]:
send_tensor = _build_tensor(rank + 1, device_id=device_id)
recv_tensor = _build_tensor(peer + 1, value=-1, device_id=device_id)
recv_op = dist.P2POp(dist.irecv, recv_tensor, peer)
p2p_op_list.append(recv_op)
send_op = dist.P2POp(dist.isend, send_tensor, peer)
p2p_op_list.append(send_op)
reqs = dist.batch_isend_irecv(p2p_op_list)
for req in reqs:
req.wait()
self._barrier()
# GLOO Batch SEND RECV CPU
@unittest.skipIf(BACKEND != "gloo", "GLOO Batch Send Recv CPU")
def test_batch_isend_irecv_gloo(self):
self._barrier()
rank = dist.get_rank()
p2p_op_list = []
for src in range(0, dist.get_world_size()):
if src == rank:
continue
send_tensor = _build_tensor(rank + 1)
recv_tensor = _build_tensor(src + 1, value=-1)
recv_op = dist.P2POp(dist.irecv, recv_tensor, src)
p2p_op_list.append(recv_op)
send_op = dist.P2POp(dist.isend, send_tensor, src)
p2p_op_list.append(send_op)
reqs = dist.batch_isend_irecv(p2p_op_list)
for req in reqs:
req.wait()
self._barrier()
# GLOO Batch SEND RECV CPU with provided tags
@unittest.skipIf(BACKEND != "gloo", "GLOO Batch Send Recv CPU")
def test_batch_isend_irecv_gloo_tags(self):
self._barrier()
rank = dist.get_rank()
p2p_op_list = []
for src in range(0, dist.get_world_size()):
if src == rank:
continue
send_tensor = _build_tensor(rank + 1)
recv_tensor = _build_tensor(src + 1, value=-1)
recv_op = dist.P2POp(dist.irecv, recv_tensor, src, tag=src)
p2p_op_list.append(recv_op)
send_op = dist.P2POp(dist.isend, send_tensor, src, tag=rank)
p2p_op_list.append(send_op)
reqs = dist.batch_isend_irecv(p2p_op_list)
for req in reqs:
req.wait()
self._barrier()
# NCCL Batch SEND RECV Tensor Error
@unittest.skipIf(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version(2700, "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_tensor_err(self):
self._barrier()
rank = dist.get_rank()
if rank == 0:
rank_to_GPU = self._init_multigpu_helper()
device_id = rank_to_GPU[rank][0]
with self.assertRaisesRegex(
RuntimeError, "Tensors must be CUDA and dense"
):
send_tensor = _build_tensor(rank + 1)
send_op = dist.P2POp(dist.isend, send_tensor, 1)
req = dist.batch_isend_irecv([send_op])
req.wait()
# NCCL Batch SEND RECV Op Error
@unittest.skipIf(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version(2700, "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_op_err(self):
self._barrier()
rank = dist.get_rank()
if rank == 0:
rank_to_GPU = self._init_multigpu_helper()
device_id = rank_to_GPU[rank][0]
with self.assertRaisesRegex(
RuntimeError, "^Invalid ``op``"
):
send_tensor = _build_tensor(rank + 1, device_id=device_id)
send_op = dist.P2POp(dist.broadcast, send_tensor, 1)
req = dist.batch_isend_irecv([send_op])
req.wait()
# NCCL Batch SEND RECV p2p_op_list Error
@unittest.skipIf(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version(2700, "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_op_list_err(self):
self._barrier()
rank = dist.get_rank()
if rank == 0:
rank_to_GPU = self._init_multigpu_helper()
device_id = rank_to_GPU[rank][0]
with self.assertRaisesRegex(
RuntimeError, "^Invalid ``p2p_op_list``"
):
send_tensor = _build_tensor(rank + 1)
req = dist.batch_isend_irecv([1, 2])
req.wait()
# NCCL Batch SEND RECV Mixed Backend Error
@unittest.skipIf(BACKEND != "nccl", "NCCL Batch Send Recv Only")
@requires_nccl_version(2700, "Need NCCL 2.7+ for send/recv")
def test_batch_isend_irecv_mixed_backend_err(self):
self._barrier()
rank = dist.get_rank()
rank_to_GPU = self._init_multigpu_helper()
device_id = rank_to_GPU[rank][0]
group_gloo = dist.new_group(ranks=[0, 1], backend="gloo")
group_nccl = dist.new_group(ranks=[0, 1], backend="nccl")
if rank == 0:
with self.assertRaisesRegex(
RuntimeError, "All groups need to use the same backend"
):
send_tensor = _build_tensor(rank + 1)
send_op_gloo = dist.P2POp(dist.isend, send_tensor, 1, group_gloo)
send_op_nccl = dist.P2POp(dist.isend, send_tensor, 1, group_nccl)
req = dist.batch_isend_irecv([send_op_gloo, send_op_nccl])
req.wait()
# NCCL SEND RECV
@skip_if_no_gpu
@unittest.skipIf(BACKEND != "nccl", "NCCL Send Recv Only")
@requires_nccl_version(2700, "Need NCCL 2.7+ for send/recv")
def test_send_recv_nccl(self):
rank = dist.get_rank()
rank_to_GPU = self._init_multigpu_helper()
device_id = rank_to_GPU[rank][0]
tensor = _build_tensor(rank + 1, device_id=device_id)
for src in range(0, dist.get_world_size()):
if src == rank:
# Send mode
for dst in range(0, dist.get_world_size()):
if dst == rank:
continue
dist.send(tensor, dst)
else:
# Recv mode
expected_tensor = _build_tensor(src + 1)
output_tensor = _build_tensor(src + 1, value=-1, device_id=device_id)
dist.recv(output_tensor, src)
self.assertEqual(output_tensor, expected_tensor)
self._barrier()
# SEND RECV
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support send/recv")
def test_send_recv(self):
rank = dist.get_rank()
tensor = _build_tensor(rank + 1)
for src in range(0, dist.get_world_size()):
if src == rank:
# Send mode
for dst in range(0, dist.get_world_size()):
if dst == rank:
continue
dist.send(tensor, dst)
else:
# Recv mode
expected_tensor = _build_tensor(src + 1)
output_tensor = _build_tensor(src + 1, value=-1)
dist.recv(output_tensor, src)
self.assertEqual(output_tensor, expected_tensor)
self._barrier()
# SEND RECV ANY SOURCE
@unittest.skipIf(
BACKEND == "nccl", "Nccl does not support send/recv from any source"
)
def test_send_recv_any_source(self):
rank = dist.get_rank()
tensor = _build_tensor(10, value=rank)
recv_ranks = set()
for dst in range(0, dist.get_world_size()):
if dst == rank:
# Recv mode
for dst in range(0, dist.get_world_size()):
if dst == rank:
continue
output_tensor = _build_tensor(10, value=-1)
sender = dist.recv(output_tensor)
# Assert the scalar value "sender" that should be
# equal to the rank of the sender is equal to all
# values in the received tensor.
self.assertTrue(output_tensor.eq(sender).all())
recv_ranks.add(sender)
else:
# Send mode
dist.send(tensor, dst)
self.assertEqual(len(recv_ranks), dist.get_world_size() - 1)
self._barrier()
# SEND RECV WITH TAG
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support send/recv")
def test_send_recv_with_tag(self):
rank = dist.get_rank()
world_size = dist.get_world_size()
tensor = _build_tensor(10, value=rank)
for dst in range(0, world_size):
if dst == rank:
# Recv mode
for src in range(0, world_size):
if src == rank:
continue
output_tensor = _build_tensor(10, value=-1)
dist.recv(output_tensor, src, tag=src)
self.assertTrue(output_tensor.eq(src).all())
else:
# Send mode
dist.send(tensor, dst, tag=rank)
# ISEND
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support isend")
def test_isend(self):
rank = dist.get_rank()
world_size = dist.get_world_size()
if rank == 0:
requests = [
dist.isend(_build_tensor(dest, 10), dest)
for dest in range(1, world_size)
]
for request in requests:
request.wait()
self.assertTrue(request.is_completed())
else:
tensor = _build_tensor(rank, -1)
dist.recv(tensor, 0)
self.assertEqual(tensor, _build_tensor(rank, 10))
self._barrier()
# IRECV
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support irecv")
def test_irecv(self):
rank = dist.get_rank()
world_size = dist.get_world_size()
if rank == 0:
expected_tensors = [_build_tensor(src, -1) for src in range(1, world_size)]
requests = [
dist.irecv(expected_tensors[src - 1], src)
for src in range(1, world_size)
]
for src in range(1, world_size):
requests[src - 1].wait()
self.assertTrue(requests[src - 1].is_completed())
self.assertEqual(expected_tensors[src - 1], _build_tensor(src, 10))
else:
tensor = _build_tensor(rank, 10)
dist.send(tensor, 0)
self._barrier()
# BROADCAST
def _test_broadcast_helper(
self, group, group_id, rank, cuda=False, rank_to_GPU=None, with_options=False
):
for dtype, value, requires_cuda in [
(torch.float, -1e-10, False),
(torch.double, -1e-100, False),
(torch.half, -0.1, True),
(torch.int8, -2, False),
(torch.uint8, 129, False),
(torch.int, -1e5, False),
(torch.long, -1e15, False),
]:
if requires_cuda and not cuda:
continue
for src in group:
expected_tensor = _build_tensor(src + 1, value, dtype)
if cuda:
expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])
if rank == src:
if with_options:
opts = dist.BroadcastOptions()
opts.rootTensor = 0
opts.rootRank = src
group_id.broadcast([expected_tensor], opts).wait()
else:
dist.broadcast(expected_tensor, src, group_id)
else:
tensor = _build_tensor(src + 1, -1, dtype)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
if with_options:
opts = dist.BroadcastOptions()
opts.rootTensor = 0
opts.rootRank = src
group_id.broadcast([tensor], opts).wait()
else:
dist.broadcast(tensor, src, group_id)
self.assertEqual(tensor.size(), expected_tensor.size())
self.assertEqual(tensor.ne(expected_tensor).max(), torch.tensor(False))
self._barrier()
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_broadcast(self):
group, group_id, rank = self._init_global_test()
self._test_broadcast_helper(group, group_id, rank)
@unittest.skipIf(
BACKEND != "gloo" and BACKEND != "nccl",
"Only Gloo and Nccl backend supports CUDA allReduce",
)
@skip_if_no_gpu
@skip_if_rocm
def test_broadcast_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU)
@skip_if_small_worldsize
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_broadcast_group(self):
group, group_id, rank = self._init_group_test()
self._test_broadcast_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_broadcast_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_broadcast_helper(group, group_id, rank)
@unittest.skipIf(
BACKEND != "nccl",
"Only NCCL backend supports high priority stream",
)
@skip_if_no_gpu
def test_nccl_high_priority_stream(self):
group, _, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
new_port = str(MASTER_PORT + 1)
os.environ['MASTER_PORT'] = new_port
gen_iterator = dist.rendezvous('env://', rank, dist.get_world_size())
store, rank, size = next(gen_iterator)
store = dist.PrefixStore(new_port, store)
opts = dist.ProcessGroupNCCL.Options()
opts.is_high_priority = False
group_id = dist.ProcessGroupNCCL(store, rank, size, opts)
self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU, True)
# REDUCE
def _test_reduce_helper(
self,
group,
group_id,
rank,
op,
master_value,
worker_value,
expected_value,
cuda=False,
rank_to_GPU=None,
):
for src in group:
if rank == src:
tensor = _build_tensor(src + 1).fill_(master_value)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
dist.reduce(tensor, src, op, group_id)
self.assertEqual(tensor, _build_tensor(src + 1, expected_value))
else:
tensor = _build_tensor(src + 1).fill_(worker_value)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
dist.reduce(tensor, src, op, group_id)
self._barrier()
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_sum(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@unittest.skipIf(BACKEND != "nccl", "Only Nccl supports CUDA reduce")
@skip_if_no_gpu
@skip_if_rocm
def test_reduce_sum_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + 10 * (len(group) - 1),
True,
rank_to_GPU,
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_product(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_min(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_max(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
@skip_if_small_worldsize
def test_reduce_group_sum(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
@skip_if_small_worldsize
def test_reduce_group_product(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
@skip_if_small_worldsize
def test_reduce_group_min(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
@skip_if_small_worldsize
def test_reduce_group_max(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_full_group_sum(self):
group, group_id, rank = self._init_full_group_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_full_group_product(self):
group, group_id, rank = self._init_full_group_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_full_group_min(self):
group, group_id, rank = self._init_full_group_test()
self._test_reduce_helper(group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_full_group_max(self):
group, group_id, rank = self._init_full_group_test()
self._test_reduce_helper(group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10)
@skip_if_no_gpu
@require_backend({"gloo", "nccl"})
@skip_if_rocm
def test_all_reduce_result_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
for src in group:
if rank == src:
tensor = _build_tensor(src + 1, 2)
else:
tensor = _build_tensor(src + 1, 10)
tensor = tensor.cuda(rank_to_GPU[rank][0])
opts = AllreduceOptions()
opts.reduceOp = dist.ReduceOp.SUM
if group_id == GroupMember.WORLD:
work = _get_default_group().allreduce([tensor], opts)
else:
work = group_id.allreduce([tensor], opts)
if BACKEND == "gloo":
# Calling result right the work is finished should throw exception.
# Here we have a race condition, we may not assume the work is not
# finished by the time we run next lines.
try:
with self.assertRaisesRegex(
RuntimeError,
"Work needs to be completed before calling result"):
work.result()
except AssertionError:
# Exception was not raised, ensure is_completed()
self.assertTrue(work.is_completed())
work.wait()
result = work.result()
else:
# In case of NCCL we should be able to retrieve pointer to the result
# even before work is finished.
result = work.result()
work.wait()
expected_value = 2 + (10 * (len(group) - 1))
self.assertEqual(result, [_build_tensor(src + 1, expected_value)])
self._barrier()
# ALL REDUCE
def _test_all_reduce_helper(
self,
group,
group_id,
rank,
op,
master_value,
worker_value,
expected_value,
cuda=False,
rank_to_GPU=None,
dtype=torch.float,
):
for src in group:
curr_value = master_value if rank == src else worker_value
tensor = _build_tensor(src + 1, dtype=dtype).fill_(curr_value)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
dist.all_reduce(tensor, op, group_id)
expected_tensor = _build_tensor(src + 1, expected_value, dtype=dtype)
self.assertEqual(tensor, expected_tensor)
self._barrier()
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_sum(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@unittest.skipIf(
BACKEND != "gloo",
"Only Gloo backend will have CUDA allReduce tested",
)
@skip_if_no_gpu
def test_all_reduce_sum_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
True,
rank_to_GPU,
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_sum_complex(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
complex(2, 3),
complex(10, 11),
complex(2, 3) + (complex(10, 11) * (len(group) - 1)),
dtype=torch.cfloat,
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_complex_unsupported_ops(self):
unsupported_ops = [dist.ReduceOp.MAX, dist.ReduceOp.MIN, dist.ReduceOp.PRODUCT,
dist.ReduceOp.BAND, dist.ReduceOp.BOR, dist.ReduceOp.BXOR]
group, group_id, rank = self._init_global_test()
for unsupported_op in unsupported_ops:
with self.assertRaisesRegex(RuntimeError, "all_reduce does not support"):
dist.all_reduce(_build_tensor(1, dtype=torch.cfloat), unsupported_op, group_id)
@unittest.skipIf(
BACKEND != "gloo",
"Only Gloo backend will have CUDA allReduce tested",
)
@skip_if_no_gpu
def test_all_reduce_sum_cuda_complex(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
complex(2, 3),
complex(10, 11),
complex(2, 3) + (complex(10, 11) * (len(group) - 1)),
True,
rank_to_GPU,
dtype=torch.cfloat,
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_product(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_min(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_max(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10
)
@skip_if_small_worldsize
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_group_sum(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@skip_if_small_worldsize
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_group_product(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@skip_if_small_worldsize
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_group_min(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1
)
@skip_if_small_worldsize
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_group_max(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_full_group_sum(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_full_group_product(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_full_group_min(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_full_group_max(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10
)
# SPARSE ALL REDUCE
def _test_sparse_all_reduce_sum(self, fn):
group, group_id, rank = self._init_global_test()
tests = simple_sparse_reduce_tests(
rank,
dist.get_world_size(),
num_inputs=1)
for (inputs, outputs) in tests:
tensors = [fn(input) for input in inputs]
dist.all_reduce(tensors[0], dist.ReduceOp.SUM, group_id)
self.assertEqual(tensors[0], outputs[0])
@unittest.skipIf(BACKEND != "gloo", "Only Gloo backend support sparse all reduce")
def test_sparse_all_reduce_sum(self):
self._test_sparse_all_reduce_sum(lambda t: t)
@unittest.skipIf(BACKEND != "gloo", "Only Gloo backend support sparse all reduce")
@skip_if_no_gpu
@skip_if_rocm
def test_sparse_all_reduce_sum_cuda(self):
self._test_sparse_all_reduce_sum(lambda t: t.clone().cuda())
# ALL REDUCE - COALESCED
@staticmethod
def _all_reduce_coalesced_sum_test_cases(group_size):
return (
[2, 3, complex(2, 3)],
[10, 11, complex(10, 11)],
[2 + 10 * (group_size - 1), 3 + 11 * (group_size - 1), complex(2, 3) + complex(10, 11) * (group_size - 1)],
[torch.float, torch.float, torch.cfloat],
)
@staticmethod
def _all_reduce_coalesced_product_test_cases(group_size):
return (
[1, 2],
[3, 4],
[1 * 3 ** (group_size - 1), 2 * 4 ** (group_size - 1)],
[torch.float, torch.float],
)
@staticmethod
def _all_reduce_coalesced_min_test_cases(group_size):
return (
[1, 4],
[2, 3],
[1, 3],
[torch.float, torch.float],
)
@staticmethod
def _all_reduce_coalesced_max_test_cases(group_size):
return (
[1, 4],
[2, 3],
[2, 4],
[torch.float, torch.float],
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_coalesced_max_complex_unsupported(self):
group, group_id, rank = self._init_global_test()
with self.assertRaisesRegex(RuntimeError, "all_reduce does not support"):
dist.all_reduce_coalesced([_build_tensor(1, dtype=torch.cfloat)], dist.ReduceOp.MAX, group_id)
def _test_all_reduce_coalesced_helper(
self,
group,
group_id,
rank,
op,
cuda=False,
rank_to_GPU=None,
):
test_case_func = {
dist.ReduceOp.SUM: self._all_reduce_coalesced_sum_test_cases,
dist.ReduceOp.PRODUCT: self._all_reduce_coalesced_product_test_cases,
dist.ReduceOp.MIN: self._all_reduce_coalesced_min_test_cases,
dist.ReduceOp.MAX: self._all_reduce_coalesced_max_test_cases
}[op]
master_values, worker_values, expected_values, dtypes = test_case_func(len(group))
for src in group:
curr_values = master_values if rank == src else worker_values
tensors = [
_build_tensor(src + 1, val, dtype=dtype)
for dtype, val in zip(dtypes, curr_values)
]
if cuda:
tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]
dist.all_reduce_coalesced(tensors, op, group_id)
expected_tensors = [
_build_tensor(src + 1, expected_value, dtype=dtype)
for dtype, expected_value in zip(dtypes, expected_values)
]
self.assertEqual(
tensors,
expected_tensors
)
self._barrier()
@require_backend({"gloo"})
def test_all_reduce_coalesced_sum(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
cuda=False,
rank_to_GPU=None,
)
@require_backend({"gloo"})
def test_all_reduce_coalesced_product(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
cuda=False,
rank_to_GPU=None,
)
@require_backend({"gloo"})
def test_all_reduce_coalesced_min(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.MIN,
cuda=False,
rank_to_GPU=None,
)
@require_backend({"gloo"})
def test_all_reduce_coalesced_max(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.MAX,
cuda=False,
rank_to_GPU=None
)
@skip_if_small_worldsize
@require_backend({"gloo"})
def test_all_reduce_coalesced_group_sum(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
cuda=False,
rank_to_GPU=None
)
@skip_if_small_worldsize
@require_backend({"gloo"})
def test_all_reduce_coalesced_group_product(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
cuda=False,
rank_to_GPU=None
)
@skip_if_small_worldsize
@require_backend({"gloo"})
def test_all_reduce_coalesced_group_min(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.MIN,
cuda=False,
rank_to_GPU=None
)
@skip_if_small_worldsize
@require_backend({"gloo"})
def test_all_reduce_coalesced_group_max(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.MAX,
cuda=False,
rank_to_GPU=None
)
@require_backend({"gloo"})
def test_all_reduce_coalesced_full_group_sum(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
cuda=False,
rank_to_GPU=None
)
@require_backend({"gloo"})
def test_all_reduce_coalesced_full_group_product(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
cuda=False,
rank_to_GPU=None
)
@require_backend({"gloo"})
def test_all_reduce_coalesced_full_group_min(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.MIN,
cuda=False,
rank_to_GPU=None,
)
@require_backend({"gloo"})
def test_all_reduce_coalesced_full_group_max(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_coalesced_helper(
group,
group_id,
rank,
dist.ReduceOp.MAX,
cuda=False,
rank_to_GPU=None
)
# SCATTER
def _test_scatter_helper(self, group, group_id, rank):
for dest in group:
tensor = _build_tensor(dest + 1, -1)
expected_tensor = _build_tensor(dest + 1, rank)
tensors = (
[_build_tensor(dest + 1, i) for i in group] if rank == dest else []
)
dist.scatter(tensor, src=dest, scatter_list=tensors, group=group_id)
self.assertEqual(tensor, expected_tensor)
self._barrier()
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_scatter_checks(self):
group, group_id, rank = self._init_global_test()
one = torch.ones([1])
# Specify scatter_list argument only on source rank.
output = one.clone() * -1
if rank == 0:
scatter_list = [one.clone() * i for i in group]
dist.scatter(output, src=0, scatter_list=scatter_list)
else:
dist.scatter(output, src=0)
self.assertEqual(output, one * rank)
# Don't specify src argument.
output = one.clone() * -1
if rank == 0:
scatter_list = [one.clone() * i for i in group]
dist.scatter(output, scatter_list=scatter_list)
else:
dist.scatter(output)
self.assertEqual(output, one * rank)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support scatter")
def test_scatter(self):
group, group_id, rank = self._init_global_test()
self._test_scatter_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support scatter")
@skip_if_small_worldsize
def test_scatter_group(self):
group, group_id, rank = self._init_group_test()
self._test_scatter_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support scatter")
def test_scatter_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_scatter_helper(group, group_id, rank)
# GATHER
def _test_gather_helper(self, group, group_id, rank):
for dest in group:
tensor = _build_tensor(dest + 1, rank)
tensors = (
[_build_tensor(dest + 1, -1) for i in group] if rank == dest else []
)
dist.gather(tensor, dst=dest, gather_list=tensors, group=group_id)
if rank == dest:
expected_tensors = [_build_tensor(dest + 1, i) for i in group]
for t1, t2 in zip(tensors, expected_tensors):
self.assertEqual(t1, t2)
self._barrier()
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_gather_checks(self):
group, group_id, rank = self._init_global_test()
one = torch.ones([1])
# Specify gather_list argument only on destination rank.
if rank == 0:
gather_list = [one.clone() for _ in group]
dist.gather(one * rank, dst=0, gather_list=gather_list)
for i in group:
self.assertEqual(gather_list[i], one * i)
else:
dist.gather(one * rank, dst=0)
# Don't specify dst argument.
if rank == 0:
gather_list = [one.clone() for _ in group]
dist.gather(one * rank, gather_list=gather_list)
for i in group:
self.assertEqual(gather_list[i], one * i)
else:
dist.gather(one * rank)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_gather(self):
group, group_id, rank = self._init_global_test()
self._test_gather_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
@skip_if_small_worldsize
def test_gather_group(self):
group, group_id, rank = self._init_group_test()
self._test_gather_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_gather_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_gather_helper(group, group_id, rank)
# ALL GATHER
def _test_all_gather_helper(
self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float
):
for dest in group:
tensor = _build_tensor(dest + 1, rank, dtype=dtype)
tensors = [_build_tensor(dest + 1, -1, dtype=dtype) for i in group]
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]
dist.all_gather(tensors, tensor, group_id)
expected_tensors = [_build_tensor(dest + 1, i, dtype=dtype) for i in group]
for t1, t2 in zip(tensors, expected_tensors):
self.assertEqual(t1, t2)
self._barrier()
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_gather(self):
group, group_id, rank = self._init_global_test()
self._test_all_gather_helper(group, group_id, rank)
@unittest.skipIf(BACKEND != "nccl", "Only Nccl supports CUDA all gather")
@unittest.skipIf(BACKEND == "nccl", "CUDA all gather skipped for NCCL")
@skip_if_no_gpu
def test_all_gather_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_gather_helper(group, group_id, rank, True, rank_to_GPU)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_gather_complex(self):
group, group_id, rank = self._init_global_test()
self._test_all_gather_helper(group, group_id, rank, dtype=torch.cfloat)
@unittest.skipIf(BACKEND != "nccl", "Only Nccl supports CUDA all gather")
@unittest.skipIf(BACKEND == "nccl", "CUDA all gather skipped for NCCL")
@skip_if_no_gpu
def test_all_gather_cuda_complex(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_gather_helper(group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat)
@skip_if_small_worldsize
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_gather_group(self):
group, group_id, rank = self._init_group_test()
self._test_all_gather_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_gather_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_gather_helper(group, group_id, rank)
def _run_all_gather_coalesced_and_verify(
self, output_tensor_lists, input_tensors, expected_tensors, group_id
):
"""
Helper that runs all_gather_coalesced and returns true if output
matches expectations.
"""
dist.all_gather_coalesced(
output_tensor_lists, input_tensors, group_id)
for l1, l2 in zip(output_tensor_lists, expected_tensors):
for t1, t2 in zip(l1, l2):
if not torch.equal(t1, t2):
return False
return True
def _test_all_gather_coalesced_helper(
self, group, group_id, rank, dtype=torch.float
):
# TODO: Instead we should probably go through _rank_not_in_group
# mechanism to disable sending tensors
if group_id is not None:
for test_case_id in range(2, 5):
# Make sure we create tensors of incompatible sizes, e.g.
# [1], [2x2], [3x3x3] ... to be sent in one batch
input_tensors = [
_build_multidim_tensor(
tensor_id,
tensor_id,
rank + tensor_id,
dtype=dtype) for tensor_id in range(
1, test_case_id)
]
output_tensor_lists = [
[
_build_multidim_tensor(
tensor_id, tensor_id, -1, dtype=dtype) for tensor_id in range(
1, test_case_id)
] for _ in group
]
expected_tensors = [
[
_build_multidim_tensor(
tensor_id,
tensor_id,
rank_iter + tensor_id,
dtype=dtype) for tensor_id in range(
1, test_case_id)
] for rank_iter in group
]
assert self._run_all_gather_coalesced_and_verify(
output_tensor_lists, input_tensors,
expected_tensors, group_id
), "output tensors do not match expected ouputs"
self._barrier()
@unittest.skipIf(BACKEND == "nccl", "all_gather_coalesced does not support NCCL")
@unittest.skipIf(BACKEND == "mpi", "all_gather_coalesced does not support MPI")
def test_all_gather_coalesced_simple(self):
group, group_id, rank = self._init_global_test()
self._test_all_gather_coalesced_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "nccl", "all_gather_coalesced does not support NCCL")
@unittest.skipIf(BACKEND == "mpi", "all_gather_coalesced does not support MPI")
def test_all_gather_coalesced_complex(self):
group, group_id, rank = self._init_global_test()
self._test_all_gather_coalesced_helper(group, group_id, rank, dtype=torch.cfloat)
@skip_if_small_worldsize
@unittest.skipIf(BACKEND == "nccl", "all_gather_coalesced does not support NCCL")
@unittest.skipIf(BACKEND == "mpi", "all_gather_coalesced does not support MPI")
def test_all_gather_coalesced_group(self):
group, group_id, rank = self._init_group_test()
self._test_all_gather_coalesced_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "nccl", "all_gather_coalesced does not support NCCL")
@unittest.skipIf(BACKEND == "mpi", "all_gather_coalesced does not support MPI")
def test_all_gather_coalesced_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_gather_coalesced_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "nccl", "all_gather_coalesced does not support NCCL")
@unittest.skipIf(BACKEND == "mpi", "all_gather_coalesced does not support MPI")
def test_all_gather_coalesced_with_empty(self):
group, group_id, rank = self._init_global_test()
input_tensors = [
rank * torch.ones([2, 2]),
torch.ones([0]),
(rank + 1) * torch.ones([3, 3]),
torch.ones([0]),
torch.ones([0])
]
output_tensors_lists = [
[
-1 * torch.ones([2, 2]),
-1 * torch.ones([0]),
-1 * torch.ones([3, 3]),
-1 * torch.ones([0]),
-1 * torch.ones([0])
] for _ in group
]
expected_tensors = [
[
r * torch.ones([2, 2]),
torch.ones([0]),
(r + 1) * torch.ones([3, 3]),
torch.ones([0]),
torch.ones([0])
] for r in group
]
assert self._run_all_gather_coalesced_and_verify(
output_tensors_lists, input_tensors, expected_tensors, group_id)
self._barrier()
# AllToAll
def _test_all_to_all_single_equal_split_helper(
self,
group,
group_id,
rank,
cuda=False,
rank_to_GPU=None,
):
if group_id is not None:
size = len(group)
in_tensor = torch.ones([size, size]) * rank
expected_tensor = torch.cat([torch.ones([1, size]) * i for i in group])
out_tensor = torch.ones([size, size]) * -1
if cuda:
in_tensor = in_tensor.cuda(rank_to_GPU[rank][0])
expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])
out_tensor = out_tensor.cuda(rank_to_GPU[rank][0])
dist.all_to_all_single(out_tensor, in_tensor, group=group_id)
self.assertEqual(out_tensor, expected_tensor)
self._barrier()
def _test_all_to_all_single_unequal_split_helper(
self,
group,
group_id,
rank,
cuda=False,
rank_to_GPU=None,
):
if group_id is not None:
size = len(group)
in_splits = [i + 1 for i in group]
out_splits = [rank + 1 for _ in group]
in_tensor = torch.ones([sum(in_splits), size]) * rank
out_tensor = torch.ones([(rank + 1) * size, size])
expected_tensor = torch.cat([torch.ones([rank + 1, size]) * i for i in group])
if cuda:
in_tensor = in_tensor.cuda(rank_to_GPU[rank][0])
expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])
out_tensor = out_tensor.cuda(rank_to_GPU[rank][0])
dist.all_to_all_single(
out_tensor, in_tensor, out_splits, in_splits, group=group_id)
self.assertEqual(out_tensor, expected_tensor)
self._barrier()
def _test_all_to_all_helper(self, group, group_id, rank):
if group_id is not None:
size = len(group)
in_splits = [i + 1 for i in group]
in_tensors = [
torch.ones([in_splits[i], size]) * rank for i, _ in enumerate(group)
]
out_tensors = [torch.ones([(rank + 1), size]) for _ in group]
expected_tensors = [torch.ones([rank + 1, size]) * i for i in group]
dist.all_to_all(out_tensors, in_tensors, group=group_id)
for t1, t2 in zip(out_tensors, expected_tensors):
self.assertEqual(t1, t2)
self._barrier()
@unittest.skipIf(
BACKEND != "mpi", "Only MPI supports CPU all_to_all_single"
)
def test_all_to_all_single_equal_split(self):
group, group_id, rank = self._init_global_test()
self._test_all_to_all_single_equal_split_helper(group, group_id, rank)
@unittest.skip("NCCL A2A is not enabled for OSS builds")
@unittest.skipIf(
BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single"
)
@skip_if_no_gpu
@skip_if_rocm
def test_all_to_all_single_equal_split_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_to_all_single_equal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
)
@unittest.skipIf(
BACKEND != "mpi", "Only MPI supports CPU all_to_all_single"
)
def test_all_to_all_single_unequal_split(self):
group, group_id, rank = self._init_global_test()
self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)
@unittest.skip("NCCL A2A is not enabled for OSS builds")
@unittest.skipIf(
BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single"
)
@skip_if_no_gpu
@skip_if_rocm
def test_all_to_all_single_unequal_split_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_to_all_single_unequal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
)
@unittest.skipIf(BACKEND != "mpi", "Only MPI supports all_to_all")
def test_all_to_all(self):
group, group_id, rank = self._init_global_test()
self._test_all_to_all_helper(group, group_id, rank)
@unittest.skipIf(
BACKEND != "mpi", "Only MPI supports CPU all_to_all_single"
)
@skip_if_small_worldsize
def test_all_to_all_single_equal_split_group(self):
group, group_id, rank = self._init_group_test()
self._test_all_to_all_single_equal_split_helper(group, group_id, rank)
@unittest.skip("NCCL A2A is not enabled for OSS builds")
@unittest.skipIf(
BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single"
)
@skip_if_no_gpu
@skip_if_rocm
@skip_if_small_worldsize
def test_all_to_all_single_equal_split_group_cuda(self):
group, group_id, rank = self._init_group_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_to_all_single_equal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
)
@unittest.skipIf(
BACKEND != "mpi", "Only MPI supports CPU all_to_all_single"
)
@skip_if_small_worldsize
def test_all_to_all_single_unequal_split_group(self):
group, group_id, rank = self._init_group_test()
self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)
@unittest.skip("NCCL A2A is not enabled for OSS builds")
@unittest.skipIf(
BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single"
)
@skip_if_no_gpu
@skip_if_rocm
@skip_if_small_worldsize
def test_all_to_all_single_unequal_split_group_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_to_all_single_unequal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
)
@unittest.skipIf(BACKEND != "mpi", "Only MPI supports all_to_all")
@skip_if_small_worldsize
def test_all_to_all_group(self):
group, group_id, rank = self._init_group_test()
self._test_all_to_all_helper(group, group_id, rank)
@unittest.skipIf(
BACKEND != "mpi", "Only MPI supports CPU all_to_all_single"
)
def test_all_to_all_single_equal_split_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_to_all_single_equal_split_helper(group, group_id, rank)
@unittest.skip("NCCL A2A is not enabled for OSS builds")
@unittest.skipIf(
BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single"
)
@skip_if_no_gpu
@skip_if_rocm
def test_all_to_all_single_equal_split_full_group_cuda(self):
group, group_id, rank = self._init_full_group_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_to_all_single_equal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
)
@unittest.skipIf(
BACKEND != "mpi", "Only MPI supports CPU all_to_all_single"
)
def test_all_to_all_single_unequal_split_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)
@unittest.skip("NCCL A2A is not enabled for OSS builds")
@unittest.skipIf(
BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single"
)
@skip_if_no_gpu
@skip_if_rocm
def test_all_to_all_single_unequal_split_full_group_cuda(self):
group, group_id, rank = self._init_full_group_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_to_all_single_unequal_split_helper(
group,
group_id,
rank,
True,
rank_to_GPU,
)
@unittest.skipIf(BACKEND != "mpi", "Only MPI supports all_to_all")
def test_all_to_all_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_to_all_helper(group, group_id, rank)
# BARRIER
def _test_barrier_helper(
self, group, group_id, rank, cuda=False, rank_to_GPU=None):
WAIT_TIME = 0.3 # seconds
for dest in group:
expected_time = torch.DoubleTensor(1).fill_(0.0)
if cuda:
expected_time = expected_time.cuda(rank_to_GPU[rank][0])
if dest == rank:
expected_time.fill_(time.time() + WAIT_TIME)
dist.broadcast(expected_time, dest, group_id)
time.sleep(WAIT_TIME + 0.1) # sleep a little bit longer
dist.barrier(group_id)
else:
dist.broadcast(expected_time, dest, group_id)
dist.barrier(group_id)
self.assertGreaterEqual(
float(time.time()),
float(expected_time[0]),
"destination rank: %d, my rank: %d" % (dest, rank) +
" (if you see this failure, please report in #14554)")
# Use higher timeout for the instance where the test runs
# against a subgroup and uses a CUDA tensor for expected time.
# The CUDA initialization for the participating processes can
# take long enough for the barrier timeout to trigger on the
# process that doesn't participate in the group.
self._barrier(timeout=20)
@skip_if_no_gpu
@unittest.skipIf(BACKEND == "mpi", "MPI doesn't supports GPU barrier")
@skip_if_rocm
def test_barrier_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)
@skip_if_small_worldsize
@skip_if_no_gpu
@unittest.skipIf(BACKEND == "mpi", "MPI doesn't supports GPU barrier")
@skip_if_rocm
def test_barrier_group_cuda(self):
group, group_id, rank = self._init_group_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)
@skip_if_small_worldsize
@skip_if_no_gpu
@unittest.skipIf(BACKEND == "mpi", "MPI doesn't supports GPU barrier")
def test_barrier_full_group_cuda(self):
group, group_id, rank = self._init_full_group_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)
@unittest.skipIf(BACKEND == "nccl", "NCCL does not support CPU barrier")
def test_barrier(self):
group, group_id, rank = self._init_global_test()
self._test_barrier_helper(group, group_id, rank)
@skip_if_small_worldsize
@unittest.skipIf(BACKEND == "nccl", "NCCL does not support CPU barrier")
def test_barrier_group(self):
group, group_id, rank = self._init_group_test()
self._test_barrier_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "nccl", "NCCL does not support CPU barrier")
def test_barrier_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_barrier_helper(group, group_id, rank)
def _test_broadcast_multigpu_helper(self, group, group_id, rank, rank_to_GPU):
for src in group:
expected_tensor = _build_tensor(src + 1)
tensors = [
_build_tensor(src + 1, -1).cuda(device=i) for i in rank_to_GPU[rank]
]
if rank == src:
tensors[0] = expected_tensor.cuda(device=rank_to_GPU[rank][0])
dist.broadcast_multigpu(tensors, src, group_id)
for tensor in tensors:
self.assertEqual(tensor, expected_tensor)
self._barrier()
@unittest.skipIf(BACKEND == "mpi", "MPI doesn't support broadcast multigpu")
@unittest.skipIf(BACKEND == "nccl", "NCCL broadcast multigpu skipped")
@skip_if_no_gpu
def test_broadcast_multigpu(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_broadcast_multigpu_helper(group, group_id, rank, rank_to_GPU)
def _test_all_reduce_multigpu_helper(
self,
group,
group_id,
rank,
rank_to_GPU,
op,
master_value,
worker_value,
expected_value,
dtype=torch.float,
):
for src in group:
curr_value = master_value if rank == src else worker_value
tensors = [
_build_tensor(src + 1, curr_value, dtype=dtype).cuda(device=i)
for i in rank_to_GPU[rank]
]
dist.all_reduce_multigpu(tensors, op, group_id)
expected_tensor = _build_tensor(src + 1, expected_value, dtype=dtype)
for tensor in tensors:
self.assertEqual(tensor, expected_tensor)
self._barrier()
@unittest.skipIf(BACKEND == "mpi", "MPI doesn't support broadcast multigpu")
@unittest.skipIf(BACKEND == "nccl", "CUDA all_reduce multigpu skipped for NCCL")
@skip_if_no_gpu
def test_all_reduce_multigpu(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_reduce_multigpu_helper(
group,
group_id,
rank,
rank_to_GPU,
dist.ReduceOp.SUM,
2,
10,
(2 + 10 * (len(group) - 1)) * len(rank_to_GPU[0]),
)
@unittest.skipIf(BACKEND == "mpi", "MPI doesn't support broadcast multigpu")
@unittest.skipIf(BACKEND == "nccl", "CUDA all_reduce multigpu skipped for NCCL")
@skip_if_no_gpu
def test_all_reduce_multigpu_complex(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_reduce_multigpu_helper(
group,
group_id,
rank,
rank_to_GPU,
dist.ReduceOp.SUM,
complex(2, 3),
complex(10, 11),
(complex(2, 3) + complex(10, 11) * (len(group) - 1)) * len(rank_to_GPU[0]),
dtype=torch.cfloat,
)
def _test_reduce_multigpu_helper(
self,
group,
group_id,
rank,
rank_to_GPU,
op,
master_value,
worker_value,
expected_value,
):
for src in group:
if rank == src:
tensors = [
_build_tensor(src + 1, master_value).cuda(device=i)
for i in rank_to_GPU[rank]
]
dist.reduce_multigpu(tensors, src, op, group_id)
expected_tensor = _build_tensor(src + 1, expected_value)
self.assertEqual(tensors[0], expected_tensor)
else:
tensors = [
_build_tensor(src + 1, worker_value).cuda(device=i)
for i in rank_to_GPU[rank]
]
dist.reduce_multigpu(tensors, src, op, group_id)
self._barrier()
@unittest.skipIf(BACKEND != "nccl", "Only Nccl backend supports reduce multigpu")
@skip_if_no_gpu
@skip_if_rocm
def test_reduce_multigpu(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_reduce_multigpu_helper(
group,
group_id,
rank,
rank_to_GPU,
dist.ReduceOp.SUM,
2,
10,
(2 + 10 * (len(group) - 1)) * len(rank_to_GPU[0]),
)
def _test_all_gather_multigpu_helper(self, group, group_id, rank, rank_to_GPU, dtype=torch.float):
for dest in group:
tensors = [
_build_tensor(dest + 1, dtype=dtype).cuda(device=i) for i in rank_to_GPU[rank]
]
# construct expected output along with
# a place holder to receive all gather results
output_tensors = []
expected_output = []
output_per_gpu = (
[_build_tensor(dest + 1, -1, dtype=dtype)] * len(rank_to_GPU[0]) * len(group)
)
expected_per_gpu = (
[_build_tensor(dest + 1, dtype=dtype)] * len(rank_to_GPU[0]) * len(group)
)
for gpu in rank_to_GPU[rank]:
output_tensors.append([t.cuda(device=gpu) for t in output_per_gpu])
expected_output.append([t.cuda(device=gpu) for t in expected_per_gpu])
dist.all_gather_multigpu(output_tensors, tensors, group_id)
self.assertEqual(output_tensors, expected_output)
self._barrier()
@unittest.skipIf(BACKEND != "nccl", "Only Nccl backend supports allgather multigpu")
@skip_if_no_gpu
def test_all_gather_multigpu(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_gather_multigpu_helper(group, group_id, rank, rank_to_GPU)
@unittest.skipIf(BACKEND != "nccl", "Only Nccl backend supports allgather multigpu")
@skip_if_no_gpu
def test_all_gather_multigpu_complex(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_gather_multigpu_helper(group, group_id, rank, rank_to_GPU, dtype=torch.cfloat)
def _model_step(self, model):
for param in model.parameters():
if param.grad is not None:
with torch.no_grad():
param += param.grad
param.grad = None
def _model_step_with_zero_grad(self, model):
for param in model.parameters():
if param.grad is not None:
with torch.no_grad():
param += param.grad
param.grad.requires_grad_(False)
param.grad.zero_()
def _prepare_dummy_data(self, local_bs):
# global_bs for DDP should be divisible by WORLD_SIZE
world_size = int(os.environ["WORLD_SIZE"])
global_bs = world_size * local_bs
input_cpu = torch.randn(global_bs, 2)
target = torch.randn(global_bs, 4)
loss = nn.MSELoss()
return global_bs, input_cpu, target, loss
# END TO END TEST FOR DISTRIBUTEDDATAPARALLEL
def _test_DDP_helper(self, model, input_var, target, loss, scale_factor=1.0):
model.train()
output = model(input_var)
l = loss(output, target) * scale_factor
l.backward()
def _assert_equal_param(self, param_gpu, param_DDP):
self.assertEqual(len(param_gpu), len(param_DDP))
for p_gpu, p_DDP in zip(param_gpu, param_DDP):
self.assertEqual(p_gpu, p_DDP)
def _test_DDP_5iter(
self, model_base, model_DDP, input, target, loss, local_bs, rank, batch_size, test_save,
offset=None, world_size=0, zero_grad=False
):
for idx in range(5):
# single cpu/gpu training
self._test_DDP_helper(model_base, input, target, loss)
if offset is None:
offset = rank * local_bs
# DDP training, DDP scatters subsets of input_cpu to nodes/GPUs
self._test_DDP_helper(
model_DDP,
input[offset: offset + local_bs],
target[offset: offset + local_bs],
loss,
world_size * local_bs / batch_size if world_size != 0 else 1,
)
# Update weights and run a second iteration to shake out errors
if zero_grad:
self._model_step_with_zero_grad(model_base)
self._model_step_with_zero_grad(model_DDP)
else:
self._model_step(model_base)
self._model_step(model_DDP)
self._assert_equal_param(
list(model_base.parameters()), list(model_DDP.module.parameters())
)
# Shuffle the input so that DDP input is different
input = input[torch.randperm(batch_size)]
# save the model in the middle and reload
if test_save and idx == 2 and INIT_METHOD.startswith("file://"):
with tempfile.NamedTemporaryFile() as tmp:
if sys.platform == 'win32':
torch.save(model_DDP, tmp)
tmp.seek(0)
model_DDP = torch.load(tmp)
else:
torch.save(model_DDP, tmp.name)
model_DDP = torch.load(tmp.name)
with tempfile.TemporaryFile() as tmp_file:
torch.save(model_DDP, tmp_file)
tmp_file.seek(0)
saved_model = torch.load(tmp_file)
for k in model_DDP.state_dict():
self.assertEqual(model_DDP.state_dict()[k], saved_model.state_dict()[k])
def _test_DistributedDataParallel(self, gpu_subset, rank, output_device=None, gradient_as_bucket_view=False):
# Run a simple end to end DDP model, use result of single node model
# as baseline
# cpu training setup
model = DDP_NET
# single gpu training setup
model_gpu = copy.deepcopy(model)
model_gpu.cuda(gpu_subset[0])
# DDP training setup
model_DDP = copy.deepcopy(model)
model_DDP.cuda(gpu_subset[0])
model_DDP = nn.parallel.DistributedDataParallel(
model_DDP, device_ids=gpu_subset, gradient_as_bucket_view=gradient_as_bucket_view
)
# test serializable/unserializable
with tempfile.NamedTemporaryFile() as tmp:
if sys.platform == 'win32':
torch.save(model_DDP, tmp)
tmp.seek(0)
model_DDP = torch.load(tmp)
else:
torch.save(model_DDP, tmp.name)
model_DDP = torch.load(tmp.name)
# dummy data initialization
local_bs = len(gpu_subset)
global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs)
# check two model parameters over 5 iterations
self._test_DDP_5iter(
model_gpu,
model_DDP,
input_cpu.cuda(gpu_subset[0]),
target.cuda(gpu_subset[0]),
loss,
local_bs,
rank,
global_bs,
True,
)
self._barrier()
def _test_DistributedDataParallelCPU(self, gradient_as_bucket_view=False):
# Run a simple end to end DDP-CPU model, use result of single node
# model as baseline
group, group_id, rank = self._init_global_test()
# cpu training setup
model_base = DDP_NET
# DDP-CPU training setup
model_DDP = copy.deepcopy(model_base)
model_DDP = nn.parallel.DistributedDataParallel(
model_DDP, gradient_as_bucket_view=gradient_as_bucket_view)
# dummy data initialization
local_bs = 2
global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs)
# check two model parameters over 5 iterations
self._test_DDP_5iter(
model_base, model_DDP, input_cpu, target, loss, local_bs, rank, global_bs, False, zero_grad=True
)
self._barrier()
@unittest.skipIf(
BACKEND == "nccl", "nccl does not support DDP on CPU models"
)
def test_DistributedDataParallelCPU(self):
self._test_DistributedDataParallelCPU()
@unittest.skipIf(
BACKEND == "nccl", "nccl does not support DDP on CPU models"
)
def test_DistributedDataParallelCPU_grad_is_view(self):
self._test_DistributedDataParallelCPU(gradient_as_bucket_view=True)
@unittest.skipIf(BACKEND != 'nccl' and BACKEND != 'gloo',
"Only Nccl & Gloo backend support DistributedDataParallel")
def test_DistributedDataParallel_requires_grad(self):
# a module without gradients shouldn't be accepted
self.assertRaises(AssertionError, lambda: nn.parallel.DistributedDataParallel(nn.Module()))
self._barrier()
@unittest.skipIf(
BACKEND != "nccl" and BACKEND != "gloo",
"Only NCCL and GLOO backend support DistributedDataParallel",
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
@skip_if_rocm
def test_DistributedDataParallel_non_default_stream(self):
stream = torch.cuda.Stream(self.rank)
rank = self.rank
with torch.cuda.stream(stream):
net = torch.nn.parallel.DistributedDataParallel(
torch.nn.Linear(1, 1, bias=False).cuda(rank), device_ids=[rank]
)
for i in range(1000):
# Clear gradients manually
grad = net.module.weight.grad
if grad is not None:
grad.requires_grad_(False)
grad.zero_()
# Forward + BW
batch = torch.tensor([rank]).float().cuda(rank)
loss = net(batch).sum()
loss.backward()
# For each worker, the gradient on the weight should be worker_rank.
grad = net.module.weight.grad
avg = grad.clone()
# All-reducing the gradient averages should give us the gradient
# average. If not, then one of the workers has not correctly
# written back the averaged gradient before this all-reduce call.
dist.all_reduce(avg)
world_size = int(os.environ["WORLD_SIZE"])
avg.div_(world_size)
expected_grad = sum(i for i in range(world_size)) / world_size
self.assertEqual(
avg[0, 0],
expected_grad,
msg=f"Expected gradient of {expected_grad} but got {avg} on rank {self.rank}",
)
@unittest.skipIf(BACKEND != 'nccl' and BACKEND != 'gloo',
"Only Nccl & Gloo backend support DistributedDataParallel")
@skip_if_no_gpu
@skip_if_rocm
def test_DistributedDataParallel(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
gpus = list(rank_to_GPU[rank])
self._test_DistributedDataParallel(gpu_subset=gpus, rank=rank)
# test output_device
self._test_DistributedDataParallel(gpu_subset=gpus, rank=rank, output_device=torch.device('cuda'))
# test device_ids
gpus = [torch.device('cuda:' + str(i)) for i in gpus]
self._test_DistributedDataParallel(gpu_subset=gpus, rank=rank, output_device=torch.device('cuda'))
@unittest.skipIf(BACKEND != 'nccl' and BACKEND != 'gloo',
"Only Nccl & Gloo backend support DistributedDataParallel")
@skip_if_no_gpu
@skip_if_rocm
def test_DistributedDataParallel_with_grad_is_view(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
gpus = list(rank_to_GPU[rank])
self._test_DistributedDataParallel(gpu_subset=gpus, rank=rank, gradient_as_bucket_view=True)
# test output_device
self._test_DistributedDataParallel(
gpu_subset=gpus, rank=rank, output_device=torch.device('cuda'), gradient_as_bucket_view=True)
# test device_ids
gpus = [torch.device('cuda:' + str(i)) for i in gpus]
self._test_DistributedDataParallel(
gpu_subset=gpus, rank=rank, output_device=torch.device('cuda'), gradient_as_bucket_view=True)
def _test_DistributedDataParallel_SyncBatchNorm(self, gpu_subset, rank, local_bs, global_bs, offset, output_device=None):
# Run a simple end to end DDP model, use result of single node model
# as baseline
# cpu training setup
model = BN_NET
# single gpu training setup
model_gpu = copy.deepcopy(model)
model_gpu.cuda(gpu_subset[0])
# DDP training setup
model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))
model_DDP.cuda(gpu_subset[0])
model_DDP = nn.parallel.DistributedDataParallel(
model_DDP, device_ids=gpu_subset
)
# test serializable/unserializable
with tempfile.NamedTemporaryFile() as tmp:
if sys.platform == 'win32':
torch.save(model_DDP, tmp)
tmp.seek(0)
model_DDP = torch.load(tmp)
else:
torch.save(model_DDP, tmp.name)
model_DDP = torch.load(tmp.name)
# data initialization
input_cpu = torch.randn(global_bs, 2)
target = torch.randn(global_bs, 4)
loss = nn.MSELoss()
# check two model parameters over 5 iterations
self._test_DDP_5iter(
model_gpu,
model_DDP,
input_cpu.cuda(gpu_subset[0]),
target.cuda(gpu_subset[0]),
loss,
local_bs,
rank,
global_bs,
True,
offset,
dist.get_world_size()
)
self._barrier()
@unittest.skipIf(BACKEND != 'nccl' and BACKEND != 'gloo',
"Only Nccl & Gloo backend support DistributedDataParallel")
@skip_if_no_gpu
def test_DistributedDataParallel_SyncBatchNorm(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
# DDP does not support replicating BN layers within a process, hence
# testing with one module replica per process
gpus = [rank]
num_processes = dist.get_world_size()
local_bs = 2
bs_offset = int(rank * 2)
global_bs = int(num_processes * 2)
self._test_DistributedDataParallel_SyncBatchNorm(
gpu_subset=gpus,
rank=rank,
local_bs=local_bs,
global_bs=global_bs,
offset=bs_offset)
# test output_device
self._test_DistributedDataParallel_SyncBatchNorm(
gpu_subset=gpus,
rank=rank,
local_bs=local_bs,
global_bs=global_bs,
offset=bs_offset,
output_device=torch.device('cuda'))
# test device_ids
gpus = [torch.device('cuda:' + str(i)) for i in gpus]
self._test_DistributedDataParallel_SyncBatchNorm(
gpu_subset=gpus,
rank=rank,
local_bs=local_bs,
global_bs=global_bs,
offset=bs_offset,
output_device=torch.device('cuda'))
@unittest.skipIf(BACKEND != 'nccl' and BACKEND != 'gloo',
"Only Nccl & Gloo backend support DistributedDataParallel")
@skip_if_no_gpu
def test_DistributedDataParallel_SyncBatchNorm_2D_Input(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
# DDP does not support replicating BN layers within a process, hence
# testing with one module replica per process
gpus = [rank]
model = nn.BatchNorm1d(2)
# single gpu training setup
model_gpu = copy.deepcopy(model)
model_gpu.cuda(gpus[0])
# DDP training setup
model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))
model_DDP.cuda(gpus[0])
model_DDP = nn.parallel.DistributedDataParallel(
model_DDP, device_ids=gpus
)
local_bs = len(gpus) * 2
global_bs = dist.get_world_size() * local_bs
input_cpu = torch.randn(global_bs, 2)
target = torch.randn(global_bs, 2)
loss = nn.MSELoss()
# disabling cudnn.
# SyncBatchNorm goes through native_batch_norm kernel, this avoids the
# numerical issue created by the divergent code path.
with torch.backends.cudnn.flags(False):
# check two model parameters over 5 iterations
self._test_DDP_5iter(
model_gpu,
model_DDP,
input_cpu.cuda(gpus[0]),
target.cuda(gpus[0]),
loss,
local_bs,
rank,
global_bs,
True
)
self._barrier()
@unittest.skipIf(BACKEND != 'nccl' and BACKEND != 'gloo',
"Only Nccl & Gloo backend support DistributedDataParallel")
@skip_if_no_gpu
@require_world_size(2)
@skip_if_rocm
def test_DistributedDataParallel_SyncBatchNorm_Single_Input_Per_Process(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
# DDP does not support replicating BN layers within a process, hence
# testing with one module replica per process
gpus = [rank]
model = nn.BatchNorm1d(2)
# single gpu training setup
model_gpu = copy.deepcopy(model)
model_gpu.cuda(gpus[0])
# DDP training setup
model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))
model_DDP.cuda(gpus[0])
model_DDP = nn.parallel.DistributedDataParallel(
model_DDP, device_ids=gpus
)
local_bs = 1
global_bs = dist.get_world_size()
input_cpu = torch.randn(global_bs, 2)
target = torch.randn(global_bs, 2)
loss = nn.MSELoss()
# disabling cudnn.
# SyncBatchNorm goes through native_batch_norm kernel, this avoids the
# numerical issue created by the divergent code path.
with torch.backends.cudnn.flags(False):
# check two model parameters over 5 iterations
self._test_DDP_5iter(
model_gpu,
model_DDP,
input_cpu.cuda(gpus[0]),
target.cuda(gpus[0]),
loss,
local_bs,
rank,
global_bs,
True
)
self._barrier()
@unittest.skipIf(BACKEND != 'nccl' and BACKEND != 'gloo',
"Only Nccl & Gloo backend support DistributedDataParallel")
@skip_if_no_gpu
def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_Running_Value(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
model = nn.parallel.DistributedDataParallel(ONLY_SBN_NET.cuda(rank), device_ids=[rank])
input_var = []
for i in range(dist.get_world_size()):
input_var_rank = torch.cat([
torch.ones(2, 1, 10 ** (i + 1)) * (0.1 ** (i - 1)),
torch.ones(2, 1, 10 ** (i + 1)) * (0.3 ** (i - 1))
], dim=1)
input_var.append(input_var_rank)
all_input_var = torch.cat(
[x.permute(1, 0, 2).contiguous().view(ONLY_SBN_NET.num_features, -1) for x in input_var],
dim=1
).cuda(rank)
for i in range(100):
y = model(input_var[rank].cuda(rank))
y.mean().backward()
running_mean, running_var = model.module.running_mean, model.module.running_var
torch.testing.assert_allclose(running_mean, all_input_var.mean(1))
torch.testing.assert_allclose(running_var, all_input_var.var(1))
@unittest.skipIf(BACKEND != 'nccl' and BACKEND != 'gloo',
"Only Nccl & Gloo backend support DistributedDataParallel")
@skip_if_no_gpu
def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_gradient(self):
group, group_id, rank = self._init_global_test()
# only do single GPU per process
gpus = [rank]
# cpu training setup
model = BN_NET
num_processes = dist.get_world_size()
local_bs = rank + 2
bs_offset = int((rank + 3) * rank / 2)
global_bs = int((num_processes + 3) * num_processes / 2)
self._test_DistributedDataParallel_SyncBatchNorm(
gpu_subset=gpus,
rank=rank,
local_bs=local_bs,
global_bs=global_bs,
offset=bs_offset)
@skipIfNoTorchVision
def test_SyncBatchNorm_process_group(self):
# When adopting `convert_sync_batchnorm` to convert a `nn.modules`,
# it need to recursively pass the `process_group` in the module when the `SyncBatchNorm`
# is nested in a sub-module or sub-sub-module (e.g. resnet50 in torchvision.models).
process_ids = 0
process_group = torch.distributed.new_group([process_ids])
res50_model = torchvision.models.resnet50()
res50_model_sync = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(res50_model), process_group)
process_group_sync = res50_model_sync.layer1[0].bn1.process_group
self.assertEqual(process_group_sync, process_group)
def _run_reduction_test(
self, tensor, expected_tensor, op, reduction_fn=dist.all_reduce, dst=None
):
if reduction_fn != dist.all_reduce and dst is None:
raise ValueError(f"Reduction fn {reduction_fn} must specify dst!")
if dst is not None:
reduction_fn(tensor, dst, op)
# Only destination rank tensor is expected to have final result.
if dist.get_rank() == dst:
self.assertEqual(tensor, expected_tensor)
else:
reduction_fn(tensor, op)
self.assertEqual(tensor, expected_tensor)
@require_backend({"nccl"})
@require_backends_available({"nccl"})
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_nccl_backend_bool_allreduce(self):
torch.cuda.set_device(self.rank)
# Run all_reduce with PRODUCT
element = self.rank % 2 == 0
for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]:
input_tensor = torch.tensor([element, element]).to(self.rank)
self._run_reduction_test(
input_tensor, torch.tensor([False, False]).to(self.rank), op
)
# Ensure that all ranks contributing True (cast to 1) results in the
# correct reduction.
input_tensor = torch.tensor([True, True]).to(self.rank)
expected_tensor = input_tensor.clone()
self._run_reduction_test(
input_tensor, expected_tensor, op
)
# Run all_reduce with SUM
for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]:
input_tensor = torch.tensor([element, element]).to(self.rank)
self._run_reduction_test(
input_tensor, torch.tensor([True, True]).to(self.rank), op
)
# TODO: NCCL backend does not work correctly for bitwise reduction ops
# (see https://github.com/pytorch/pytorch/issues/41362). Add tests for
# these once it is supported.
@require_backend({"nccl"})
@require_backends_available({"nccl"})
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_nccl_backend_bool_allgather(self):
torch.cuda.set_device(self.rank)
inp = {0: [True, True], 1: [False, True]}
input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)
# Preserve a copy of the tensor to compare against after allgather.
input_tensor_copy = input_tensor.clone()
tensor_list = [
torch.tensor([False, False]).to(self.rank)
for _ in range(dist.get_world_size())
]
dist.all_gather(tensor_list, input_tensor)
self.assertEqual(len(tensor_list), dist.get_world_size())
for i, t in enumerate(tensor_list):
expected = torch.tensor(inp[i % 2]).to(self.rank)
self.assertEqual(t, expected)
# Ensure that the input tensor is not modified, since this collective
# does not modify its input.
self.assertEqual(input_tensor_copy, input_tensor)
@require_backend({"nccl"})
@require_backends_available({"nccl"})
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
@skip_if_rocm
def test_nccl_backend_bool_reduce(self):
torch.cuda.set_device(self.rank)
inp = {0: [True, True], 1: [False, False]}
# Run reduce() with product op
for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]:
input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)
expected = torch.tensor([False, False]).to(self.rank)
self._run_reduction_test(
input_tensor, expected, op, dist.reduce, dst=0
)
# Ensure that all ranks contributing True (cast to 1) results in the
# correct reduction.
input_tensor = torch.tensor([True, True]).to(self.rank)
expected_tensor = input_tensor.clone()
self._run_reduction_test(
input_tensor, expected_tensor, op, dist.reduce, dst=0
)
for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]:
input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)
expected = (
torch.tensor([True, True]).to(self.rank)
if self.rank == 0
else input_tensor.clone()
)
self._run_reduction_test(
input_tensor, expected, op, dist.reduce, dst=0
)
@require_backend({"nccl"})
@require_backends_available({"nccl"})
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_nccl_backend_bool_broadcast(self):
tensor_size = 10
bcast_tensor = torch.tensor(
[
(random.random() < 0.5 if self.rank == 0 else False)
for _ in range(tensor_size)
]
).to(self.rank)
dist.broadcast(bcast_tensor, src=0)
# Now allgather and ensure the tensors are equal.
tensor_list = [
torch.tensor([False for _ in range(tensor_size)]).to(self.rank)
for _ in range(dist.get_world_size())
]
dist.all_gather(tensor_list, bcast_tensor)
expected = tensor_list[0]
for tensor in tensor_list[1:]:
self.assertEqual(tensor, expected)
@unittest.skipIf(
BACKEND != "nccl" and BACKEND != "gloo",
"Only NCCL and GLOO backend support DistributedDataParallel",
)
@skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"]))
def test_DistributedSampler_padding(self):
# Tests padding of distributed sampler.
world_size = dist.get_world_size()
# Simulates the 'casual' dataset size
dataset_size = 100 + world_size + 1
dataset = [torch.ones(1).to(self.rank) * i for i in range(dataset_size)]
# Simulates the 'tiny' dataset size
dataset_tiny_size = max(world_size // 2 - 1, 1)
dataset_tiny = [torch.ones(1).to(self.rank) * i for i in range(dataset_tiny_size)]
# Specifying drop_last=True will cause the tail of the data to be dropped.
dist_sampler = DistributedSampler(dataset=dataset, drop_last=True)
local_num_samples, local_dataset_size = (
dist_sampler.num_samples,
dist_sampler.total_size,
)
# The effective dataset size should be the greatest integer that is <=
# dataset_size that is divisible by the world_size. This is to ensure each
# rank processes the same number of samples.
effective_dataset_size = (
math.ceil((dataset_size - world_size) / world_size)
if dataset_size % world_size != 0
else dataset_size / world_size
)
self.assertEqual(local_num_samples, effective_dataset_size)
self.assertEqual(local_dataset_size, local_num_samples * world_size)
indices_list = list(iter(dist_sampler))
self.assertEqual(len(indices_list), local_num_samples)
def validate_global_samples(local_num_samples):
# Ensure that each rank processes the same number of samples.
world_samples = [
torch.LongTensor([0]).to(self.rank) for _ in range(world_size)
]
dist.all_gather(world_samples, torch.tensor([local_num_samples]).to(self.rank))
world_samples = [sample.item() for sample in world_samples]
self.assertEqual(len(set(world_samples)), 1)
validate_global_samples(local_num_samples)
# drop_last=False is the default and will add additional indices to be sampled,
# increasing the effective dataset size.
dist_sampler_added_samples = DistributedSampler(dataset=dataset)
local_num_samples, local_dataset_size = (
dist_sampler_added_samples.num_samples,
dist_sampler_added_samples.total_size,
)
# The effective dataset size is the smallest integer that is >= dataset_size
# and divisible by the world size.
self.assertEqual(
local_num_samples, math.ceil(dataset_size / world_size)
)
self.assertEqual(local_dataset_size, local_num_samples * world_size)
indices_list = list(iter(dist_sampler_added_samples))
self.assertEqual(len(indices_list), local_num_samples)
# Ensure that each rank processes the same number of samples.
validate_global_samples(local_num_samples)
# Ensure additional samples are padded even when
# the extremely small dataset is given.
dist_sampler_added_samples_tiny = DistributedSampler(dataset=dataset_tiny)
local_num_samples, local_dataset_size = (
dist_sampler_added_samples_tiny.num_samples,
dist_sampler_added_samples_tiny.total_size,
)
self.assertEqual(
local_num_samples, math.ceil(dataset_tiny_size / world_size)
)
self.assertEqual(local_dataset_size, local_num_samples * world_size)
indices_list = list(iter(dist_sampler_added_samples_tiny))
self.assertEqual(len(indices_list), local_num_samples)
validate_global_samples(local_num_samples)
@require_backend({"nccl", "gloo"})
@require_n_gpus_for_nccl_backend(int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"])
def test_allgather_object(self):
# Only set device for NCCL backend since it must use GPUs.
backend = os.environ["BACKEND"]
if backend == "nccl":
# Case where rank != GPU device.
next_rank = (self.rank + 1) % int(self.world_size)
torch.cuda.set_device(next_rank)
gather_objects = collectives_object_test_list
output_gathered = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(
output_gathered, gather_objects[self.rank % len(gather_objects)]
)
for i, val in enumerate(output_gathered):
expected = gather_objects[i % len(gather_objects)]
self.assertEqual(val, expected)
output_gathered = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(
output_gathered, gather_objects[self.rank % len(gather_objects)]
)
@require_backend({"gloo"})
@unittest.skipIf(BACKEND == "nccl", "NCCL does not support gather")
def test_gather_object(self):
# Ensure stateful objects can be gathered
gather_objects = collectives_object_test_list
output_gathered = [None for _ in range(dist.get_world_size())]
gather_on_rank = 0
my_rank = dist.get_rank()
dist.gather_object(
gather_objects[self.rank % len(gather_objects)],
object_gather_list=output_gathered if my_rank == gather_on_rank else None,
dst=gather_on_rank,
)
if my_rank != gather_on_rank:
self.assertEqual(
output_gathered, [None for _ in range(dist.get_world_size())]
)
else:
for i, val in enumerate(output_gathered):
expected = gather_objects[i % len(gather_objects)]
self.assertEqual(val, expected)
# Validate errors when objects can't be pickled.
class Bar:
pass
b = Bar()
gather_objects = [b for _ in range(dist.get_world_size())]
with self.assertRaisesRegex(AttributeError, "Can't pickle local object"):
dist.all_gather_object(
[None for _ in range(dist.get_world_size())], gather_objects[self.rank]
)
@require_backend({"nccl"})
@require_backends_available({"nccl"})
@skip_if_lt_x_gpu(2)
def test_nccl_gather_object_err(self):
output_gathered = [None for _ in range(dist.get_world_size())]
gather_on_rank = 0
# Case where rank != GPU device.
my_rank = dist.get_rank()
next_rank = (my_rank + 1) % dist.get_world_size()
torch.cuda.set_device(next_rank)
with self.assertRaisesRegex(
RuntimeError, "ProcessGroupNCCL does not support gather"
):
dist.gather_object(
"foo",
object_gather_list=output_gathered
if my_rank == gather_on_rank
else None,
dst=gather_on_rank,
)
def validate_net_equivalence(self, net):
# Helper to validate synchronization of nets across ranks.
net_module_states = list(net.module.state_dict().values())
# Check that all tensors in module's state_dict() are equal.
for t in net_module_states:
tensor_list = [
torch.zeros_like(t) for _ in range(dist.get_world_size())
]
dist.all_gather(tensor_list, t)
for tensor in tensor_list:
self.assertEqual(tensor, t)
@require_backend({"gloo", "nccl"})
@require_backends_available({"gloo", "nccl"})
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_ddp_sync_params_and_buffers(self):
# Test that after calling _sync_params_and_buffers, models across ranks
# are the same and are equal to the model on the input rank.
dim = 2
rank = self.rank
rank_to_broadcast = 1
# Seed to ensure that ranks are initialized with different initial models.
torch.manual_seed(rank)
model = nn.Linear(dim, dim, bias=False)
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1
)
new_model = nn.Linear(dim, dim, bias=False).cuda(rank)
net.module = copy.deepcopy(new_model)
# Assert params are different
net_module_states = list(net.module.state_dict().values())
for t in net_module_states:
tensor_list = [
torch.zeros_like(t) for _ in range(dist.get_world_size())
]
dist.all_gather(tensor_list, t)
for i, tensor in enumerate(tensor_list):
if i == rank:
self.assertEqual(t, tensor)
else:
# tensor from another rank should be different.
self.assertNotEqual(t, tensor)
net._sync_params_and_buffers(authoritative_rank=rank_to_broadcast)
# Now all model params should be the same.
self.validate_net_equivalence(net)
# Since the network params were broadcast from rank_to_broadcast, validate that
# they are the same as new_model on rank_to_broadcast.
if rank == rank_to_broadcast:
expected_states = new_model.state_dict().values()
for t, expected in zip(net_module_states, expected_states):
self.assertEqual(t, expected)
@require_backend({"gloo", "nccl"})
@require_backends_available({"gloo", "nccl"})
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_ddp_grad_div_uneven_inputs(self):
# Test gradient division during training with join() API. If
# divide_by_initial_world_size=False, we scale by the effective world
# size when allreducing grads.
dim = 5
batch = 1
grad_scale = 50
rank = self.rank
model = nn.Linear(dim, dim, bias=False)
inp = torch.ones(batch, dim, device=self.rank) * grad_scale
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1
)
n_iters = 3
if self.rank > 0:
n_iters += 2
with net.join(divide_by_initial_world_size=False):
for _ in range(n_iters):
loss = net(inp).sum()
loss.backward()
# The grad is always expected_grad, since we divide by the number
# of currently active processes and inactive processes contribute
# zero gradient. If we kept dividing by static initial world
# size as processes leave, the grad would be smaller.
expected_grad = torch.ones(dim, dim, device=self.rank) * grad_scale
param = list(net.parameters())[0]
self.assertEqual(expected_grad, param.grad)
# Avoid accumulating grads so that it's the same every iteration
net.zero_grad()
torch.cuda.synchronize(device=self.rank)
# If divide_by_initial_world_size=True (default), we always scale grads
# by the initial world_size.
with net.join(divide_by_initial_world_size=True):
for i in range(n_iters):
loss = net(inp).sum()
loss.backward()
effective_ws = dist.get_world_size()
if i >= 3:
effective_ws -= 1
expected_grad = (
torch.ones(dim, dim, device=self.rank) * grad_scale * effective_ws
) / dist.get_world_size()
param = list(net.parameters())[0]
self.assertEqual(expected_grad, param.grad)
# Avoid accumulating grad so that it's the same every iteration.
net.zero_grad()
torch.cuda.synchronize(device=self.rank)
@require_backend({"gloo", "nccl"})
@require_backends_available({"gloo", "nccl"})
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_ddp_join_model_equivalence(self):
# Verifies equivalence with model training locally and with DDP under
# the join context manager.
batch = 3
dim = 10
learning_rate = 0.03
model = nn.Linear(dim, dim, bias=False)
inp = torch.rand(batch, dim, device=self.rank)
local_model = copy.deepcopy(model)
local_model = local_model.cuda(self.rank)
rank_to_iter_mapping = {rank : 2 * (rank + 1) for rank in range(dist.get_world_size())}
# run local model
local_iters = sum(rank_to_iter_mapping.values())
local_optim = torch.optim.SGD(local_model.parameters(), lr=learning_rate)
for _ in range(local_iters):
local_optim.zero_grad()
out = local_model(inp)
loss = out.sum()
loss.backward()
local_optim.step()
# run DDP model with join API
num_iters = rank_to_iter_mapping[self.rank]
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(self.rank), device_ids=[self.rank]
)
ddp_optim = torch.optim.SGD(
model.parameters(), lr=learning_rate * dist.get_world_size()
)
with net.join():
for i in range(num_iters):
ddp_optim.zero_grad()
out = net(inp)
loss = out.sum()
loss.backward()
torch.cuda.synchronize(device=self.rank)
ddp_optim.step()
# Validate model state dicts are equal
for (_, local_tensor), (_, dist_tensor) in zip(
local_model.state_dict().items(), net.module.state_dict().items()
):
self.assertEqual(local_tensor, dist_tensor)
def _run_uneven_inputs_test(
self, test_case, iteration_mapping, find_unused_params,
):
model = test_case.model
inp = test_case.inp
rank = self.rank
sync_interval = test_case.sync_interval
# Ensure all outsanding GPU work is comlete so this test runs independently.
dist.barrier()
# Bucket_cap_mb is intentionally low to test allreduce scheduling when
# there are many buckets.
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(rank),
device_ids=[rank],
bucket_cap_mb=1,
find_unused_parameters=find_unused_params,
)
# Determine num iters for this rank via the passed in mapping.
num_iters = iteration_mapping[rank]
with net.join():
for i in range(num_iters):
# Use model.no_sync() to disable grad synchronization every
# sync_interval.
if i % sync_interval != 0:
context = net.no_sync()
else:
context = suppress()
with context:
if isinstance(inp, tuple):
loss = net(*inp).sum()
else:
loss = net(inp).sum()
loss.backward()
self._model_step(net)
# Ensure completion of GPU kernels (including allreduce). If the
# join API is not properly implemented, then this should hang
# since the allreduce will hang.
torch.cuda.synchronize(device=rank)
# Ensure completion of all GPU kernels.
torch.cuda.synchronize(device=rank)
self.assertTrue(net._authoritative_rank)
# All ranks should have agreed on the same authoritative_rank!
final_rank_tensor = torch.tensor([net._authoritative_rank], device=self.rank)
tensor_list = [
torch.zeros_like(final_rank_tensor)
for _ in range(dist.get_world_size())
]
dist.all_gather(tensor_list, final_rank_tensor)
max_rank = dist.get_world_size() - 1
self.assertSetEqual({max_rank}, set(tensor.item() for tensor in tensor_list))
# Ensure that all models are the same across ranks after all have joined.
self.validate_net_equivalence(net)
dist.barrier()
@require_backend({"gloo", "nccl"})
@require_backends_available({"gloo", "nccl"})
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_ddp_uneven_inputs(self):
class DDPUnevenTestInput(NamedTuple):
name: str
model: nn.Module
inp: Union[torch.tensor, tuple]
sync_interval: int
dim = 1000
batch = 1
# Create a variety of models to run uneven input tests on.
large_model = nn.Sequential(
nn.Conv2d(1, 20, 5),
nn.ReLU(),
nn.Conv2d(20, 32, 5),
nn.ReLU(),
nn.Conv2d(32, 256, 5),
nn.ReLU(),
)
small_model = nn.Linear(dim, dim, bias=False)
bn_net = BatchNormNet()
class UnusedParamModule(nn.Module):
def __init__(self, unused_params_rank):
super().__init__()
self.t0 = Task()
self.t1 = Task()
self.unused_params_rank = unused_params_rank
def task_parameters(self):
return (self.t0.p, self.t1.p)
def forward(self, x, rank):
return (
self.t1(self.t0(x))
if rank != self.unused_params_rank
else self.t1(x)
)
unjoined_rank_with_unused_params_model = UnusedParamModule(1)
joined_rank_with_unused_params_model = UnusedParamModule(0)
rank = self.rank
models_to_test = [
# Network with batchnorm
DDPUnevenTestInput(
name="batch_norm_net",
model=bn_net,
inp=torch.ones(batch, 2, device=rank),
sync_interval=1
),
DDPUnevenTestInput(
name="large_conv_model",
model=large_model,
inp=torch.ones(batch, batch, dim, dim, device=rank),
sync_interval=1,
),
DDPUnevenTestInput(
name="small_model",
model=small_model,
inp=torch.ones(batch, dim, device=rank),
sync_interval=1,
),
# Unused parameter test where rank that does not join early has unused params
DDPUnevenTestInput(
name="unjoined_rank_with_unused_params_model",
model=unjoined_rank_with_unused_params_model,
inp=(torch.ones(batch, 2, device=rank), rank),
sync_interval=1,
),
# Unused parameter test where rank that does join early has unused params
DDPUnevenTestInput(
name="joined_rank_with_unused_params_model",
model=joined_rank_with_unused_params_model,
inp=(torch.ones(batch, 2, device=rank), rank),
sync_interval=1,
),
]
# Add resnet model if we have torchvision installed.
if HAS_TORCHVISION:
resnet_model = torchvision.models.resnet50()
models_to_test.append(
DDPUnevenTestInput(
name="resnet_model",
model=resnet_model,
inp=torch.ones(1, 3, 1000, 1000),
sync_interval=1,
)
)
# Test with no_sync every 2, 3, 4, ... iterations.
models_with_sync = []
for i, test_input in enumerate(models_to_test):
models_with_sync.append(
DDPUnevenTestInput(
name=test_input.name,
model=test_input.model,
inp=test_input.inp,
sync_interval=i + 2,
)
)
models_to_test.extend(models_with_sync)
# 0 iteration tests for when one process does not train model at all, so
# we must shadow the broadcast calls made when rebuilding buckets.
baseline_num_iters = [0, 5]
iteration_offsets = [2, 3, 10]
num_uneven_ranks = [1]
if dist.get_world_size() > 2:
num_uneven_ranks.append(2)
iteration_mappings = []
# Generate rank : num_iters mappings for various uneven input scenarios.
# This includes cases where rank 0 joins early and all other ranks join
# later, and scenarios where multiple ranks join early, but at different
# iterations, and later ranks join later.
for num_early_join_ranks in num_uneven_ranks:
for baseline_iter in baseline_num_iters:
for offset in iteration_offsets:
mapping = {
rank: baseline_iter for rank in range(0, num_early_join_ranks)
}
# if num_early_join_ranks > 1, ranks > 0 that will join early
# iterate offset//2 more times than rank 0, to test nodes
# depleting inputs at different times.
if num_early_join_ranks > 1:
for rank in mapping.keys():
if rank > 0:
mapping[rank] += offset // 2
mapping.update(
{
rank: baseline_iter + offset
for rank in range(
num_early_join_ranks, dist.get_world_size()
)
}
)
iteration_mappings.append(mapping)
for (test_case, iteration_mapping) in itertools.product(
models_to_test, iteration_mappings
):
if self.rank == 0:
print(
f"""Running test: {test_case.name} sync interval
{test_case.sync_interval} with iteration mapping
{iteration_mapping}"""
)
self._run_uneven_inputs_test(
test_case,
iteration_mapping,
find_unused_params=("unused_params_model" in test_case.name),
)
@require_backend({"gloo", "nccl"})
@require_backends_available({"gloo", "nccl"})
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_ddp_uneven_input_join_disable(self):
# tests that if net.join() with enable=False is specified, DDP works as
# expected with even inputs.
torch.manual_seed(self.rank)
net = torch.nn.parallel.DistributedDataParallel(
torch.nn.Linear(1, 1).cuda(self.rank), device_ids=[self.rank]
)
inp = torch.ones(1) * self.rank
n_iters = 5
world_size = dist.get_world_size()
with net.join(enable=False):
for _ in range(n_iters):
# Clear grads
grad = net.module.weight.grad
if grad is not None:
grad.requires_grad_(False)
grad.zero_()
out = net(inp)
loss = out.sum()
loss.backward()
# Validate gradients to ensure that we divide by the correct
# world_size when join mode is disabled.
expected_grad = sum(i for i in range(world_size)) / world_size
self.assertEqual(
net.module.weight.grad.item(), expected_grad
)
self.assertFalse(net.ddp_join_enabled)
self.validate_net_equivalence(net)
@require_backend({"gloo", "nccl"})
@require_backends_available({"gloo", "nccl"})
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_ddp_uneven_input_exception(self):
# Tests that exceptions during training are correctly propagated by the
# context manager.
error_str = "Intentional error"
class ExceptionModule(nn.Module):
def __init__(self):
super().__init__()
self.param = nn.Parameter(torch.ones(1, requires_grad=True))
def forward(self, _):
raise ValueError(error_str)
exception_module = ExceptionModule()
net = torch.nn.parallel.DistributedDataParallel(
exception_module.cuda(self.rank), device_ids=[self.rank]
)
inp = torch.ones(1)
with self.assertRaisesRegex(ValueError, error_str):
with net.join():
out = net(inp)
loss = out.sum()
loss.backward()
@require_backend({"gloo", "nccl"})
@require_backends_available({"gloo", "nccl"})
@skip_if_lt_x_gpu(4)
@skip_if_rocm
def test_ddp_uneven_inputs_replicated_error(self):
# Tests that the context manager errors out in SPMD mode.
group = dist.new_group([0, 1])
if self.rank < 2:
model = nn.Linear(1, 1, bias=False)
rank_to_device = {0: [0, 1], 1: [2, 3]}
devices = rank_to_device[self.rank]
net = torch.nn.parallel.DistributedDataParallel(
model.cuda(devices[0]), device_ids=devices, process_group=group
)
with self.assertRaisesRegex(
ValueError, r"DDP join\(\) API does not support Single-Process Multi-GPU"
):
with net.join():
pass
# We need a barrier since otherwise non-participating processes exit too early
# and cause a timeout.
self._barrier(timeout=60)
@require_backend({"nccl", "gloo"})
@require_n_gpus_for_nccl_backend(int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"])
def test_broadcast_object_list(self):
# Only set device for NCCL backend since it must use GPUs.
backend = os.environ["BACKEND"]
if backend == "nccl":
# Case where rank != GPU device.
next_rank = (self.rank + 1) % int(self.world_size)
torch.cuda.set_device(next_rank)
src_rank = 0
objects = collectives_object_test_list if self.rank == src_rank else [None for _ in collectives_object_test_list]
# Single object test
single_obj_list = [objects[0]]
if self.rank != src_rank:
self.assertNotEqual(single_obj_list[0], collectives_object_test_list[0])
dist.broadcast_object_list(single_obj_list, src=0)
self.assertEqual(single_obj_list[0], collectives_object_test_list[0])
# Multiple input objects test
if self.rank != src_rank:
self.assertNotEqual(objects, collectives_object_test_list)
dist.broadcast_object_list(objects, src=0)
self.assertEqual(objects, collectives_object_test_list)
@require_backend({"gloo", "nccl"})
@require_backends_available({"gloo", "nccl"})
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_ddp_ignore_params_arg(self):
class TestModel(nn.Module):
def __init__(self, rank):
self.rank = rank
super(TestModel, self).__init__()
self.fc1 = nn.Linear(1, 1, bias=False)
# Proxy that will be materialized to another architecture later.
# (after wrapping model with DDP)
if self.rank == 0:
self.fc2 = nn.Linear(1, 10, bias=False)
else:
self.fc2 = nn.Linear(10, 10, bias=False)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
device_id = self.rank
# Ensure the test works for both find_unused_parameter and broadcast_buffer settings.
for (find_unused, broadcast_buffers) in itertools.product([False, True], [False, True]):
model = TestModel(self.rank).float().to(device_id)
# Note that the model can have different shape buffers if we pass
# them in to be ignored as well.
model.fc2.register_buffer(
"ignore_buffer", torch.zeros(5 + self.rank, device=self.rank)
)
proxy_params = list(model.fc2.parameters())
proxy_buffers = list(model.fc2.buffers())
model_fc2_name = [
module_name
for module_name, module in model.named_modules()
if module is model.fc2
][0]
proxy_param_names = [
f"{model_fc2_name}.{param_name}"
for param_name, _ in model.fc2.named_parameters()
]
proxy_buffer_names = [
f"{model_fc2_name}.{buf_name}"
for buf_name, _ in model.fc2.named_buffers()
]
# Specify that we should ignore proxy_params since it will be
# materialized later.
torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(
model, proxy_param_names + proxy_buffer_names
)
ddp = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[device_id],
find_unused_parameters=find_unused,
broadcast_buffers=broadcast_buffers,
)
# Materialize new params. These are not registered in DDP and thus
# don't have autograd hooks installed on them.
ddp.module.fc2 = nn.Linear(1, 1, bias=False).to(device_id)
# local model with the new materialized parameters.
local_model = copy.deepcopy(ddp.module).cuda(self.rank)
inp = torch.ones(1, dtype=torch.float).to(device_id) * (self.rank + 1)
for i in range(6):
ddp(inp).sum().backward()
local_model(inp).sum().backward()
# materialized param grad is not touched by DDP, so its grad should
# be the same as if running locally.
for materialized_param, local_param in zip(
ddp.module.fc2.parameters(), local_model.fc2.parameters()
):
self.assertEqual(materialized_param.grad, local_param.grad)
# fc1 parameter grad should still be different, due to allreduce.
for synced_param, local_param in zip(
ddp.module.fc1.parameters(), local_model.fc1.parameters()
):
self.assertFalse(synced_param.grad == local_param.grad)
# Proxy module grad should not be touched
for proxy_param in proxy_params:
self.assertTrue(proxy_param.grad is None)
# Synchronize since we run multiple iterations of this test, to
# isolate failure hangs.
torch.cuda.synchronize(device=self.rank)
@require_backend({"gloo", "nccl"})
@require_backends_available({"gloo", "nccl"})
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_ddp_unused_params_rebuild_buckets_exception(self):
class ToyModel(nn.Module):
def __init__(self):
super(ToyModel, self).__init__()
self.net1 = nn.Linear(10, 10, bias=False)
self.net2 = nn.Linear(10, 10, bias=False)
def forward(self, x):
return self.net1(x)
ddp = torch.nn.parallel.DistributedDataParallel(
ToyModel().cuda(self.rank), device_ids=[self.rank]
)
for i in range(2):
inp = torch.rand(1, 10)
if i > 0:
# On 2nd iteration, this will fail during rebuild_buckets,
# but we should report an error regarding unused parameters
# since that is the underlying root cause.
with self.assertRaisesRegex(
RuntimeError,
"Expected to have finished reduction in the prior iteration",
):
ddp(inp).sum().backward()
else:
ddp(inp).sum().backward()
@require_backend({"gloo", "nccl"})
@require_backends_available({"gloo", "nccl"})
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_ddp_shared_grad_acc_unused_params(self):
# When find_unused_parameters=True, ensure we mark unused parameters
# even if they share gradient accumulators.
class ToyModel(nn.Module):
def __init__(self):
super(ToyModel, self).__init__()
# net1, bias, and net1.bias are all unused params.
self.net1 = nn.Linear(10, 5, bias=False)
self.bias = nn.Parameter(torch.zeros(5))
# net1.bias and self.bias are names for the same underlying
# parameter, so they share the same grad acc. This caused
# the bug reported in https://github.com/pytorch/pytorch/issues/41324.
self.net1.bias = self.bias
self.net2 = nn.Linear(10, 5)
def forward(self, x):
return self.net2(x)
torch.cuda.set_device(self.rank)
model = ToyModel().to(torch.cuda.current_device())
ddp_model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[self.rank], find_unused_parameters=True
)
inp = torch.randn(20, 10, device=self.rank)
for i in range(6):
out = ddp_model(inp)
loss = out.sum()
loss.backward()
@require_backend({"gloo", "nccl"})
@require_backends_available({"gloo", "nccl"})
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_ddp_device(self):
m = nn.Linear(10, 10).to(self.rank)
expected_len = 2
class TensorWrapper:
__slots__ = ['t', 'moved_to_gpu']
def __init__(self, t):
self.t = t
self.moved_to_gpu = False
# Handlers for specific types of validation we want to do based on
# the input type.
def tuple_and_list_validator(x):
self.assertTrue(len(x), expected_len)
self.assertEqual(1, len(set(t.device for t in x)))
self.assertEqual(x[0].device.index, self.rank)
return x[0] + x[1]
def namedtuple_validator(x):
self.assertEqual(x._fields, EXPECTED_FIELDS)
self.assertEqual(x.a.device.index, x.b.device.index)
self.assertEqual(x.a.device.index, self.rank)
return x.a + x.b
def custom_type_validator(x):
self.assertTrue(x.moved_to_gpu or (str(x.t.device) == "cpu"))
x.t = x.t.to(self.rank)
x.moved_to_gpu = True
return x.t
def dict_validator(x):
self.assertTrue(EXPECTED_FIELDS[0] in x.keys())
self.assertTrue(EXPECTED_FIELDS[1] in x.keys())
self.assertEqual(1, len(set(t.device for t in x.values())))
self.assertEqual(x[EXPECTED_FIELDS[0]].device.index, self.rank)
return x[EXPECTED_FIELDS[0]] + x[EXPECTED_FIELDS[1]]
validators = {
TensorWrapper: custom_type_validator,
tuple: tuple_and_list_validator,
list: tuple_and_list_validator,
TestNamedTupleInput_0: namedtuple_validator,
TestNamedTupleInput_1: namedtuple_validator,
dict: dict_validator,
}
class ToyModel(torch.nn.Module):
def __init__(_self): # noqa: B902
super().__init__()
_self.lin = nn.Linear(10, 10, bias=False)
def forward(_self, x, expected_type): # noqa: B902
# Similar to scatter, the recursive to in the single-device
# case does not move tensors if they are in a custom type.
self.assertTrue(isinstance(x, expected_type))
fwd_tensor = validators[expected_type](x)
return _self.lin(fwd_tensor)
model = torch.nn.parallel.DistributedDataParallel(
ToyModel().to(self.rank), device_ids=[self.rank]
)
def train_iter(inp, input_type):
for _ in range(4):
out = model(inp, input_type)
out.sum().backward()
# CPU tuple input, should be moved to the proper device before call
# to forward.
inp = tuple(torch.randn(10, 10) for _ in range(expected_len))
train_iter(inp, tuple)
# List CPU input, should be moved to proper device before call to
# forward.
inp = [torch.randn(10, 10) for _ in range(expected_len)]
train_iter(inp, list)
# Custom type containing tensor. The type is maintained, but the
# device is not propagated (which is what happens with scatter too)
inp = TensorWrapper(torch.randn(10, 10))
train_iter(inp, TensorWrapper)
# NamedTuple input. The type should be maintained and tensor inputs
# should be moved to the correct device as in scatter.
batch = 5
dim = 10
a = torch.rand(batch, dim)
b = torch.rand(batch, dim)
inp = TestNamedTupleInput_0(a, b)
train_iter(inp, type(inp))
inp = TestNamedTupleInput_1(a, b)
train_iter(inp, type(inp))
# dictionary input.
inp = {
EXPECTED_FIELDS[0]: a,
EXPECTED_FIELDS[1]: b,
}
train_iter(inp, type(inp))
@require_backend({"gloo", "nccl"})
@require_backends_available({"gloo", "nccl"})
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_ddp_namedtuple(self):
batch = 5
dim = 10
a = torch.rand(batch, dim, device=self.rank)
b = torch.rand(batch, dim, device=self.rank)
class NamedTupleModule(torch.nn.Module):
def __init__(_self): # noqa
super().__init__()
_self.lin = nn.Linear(10, 1)
def forward(_self, input, expected_type): # noqa
# Without NamedTuple support, this would be of type tuple.
self.assertTrue(
isinstance(input, expected_type),
f"Expected type {expected_type} but got {type(input)}",
)
self.assertEqual(input._fields, EXPECTED_FIELDS)
self.assertEqual(a, input.a)
self.assertEqual(b, input.b)
return _self.lin(torch.mul(input.a, input.b))
model = torch.nn.parallel.DistributedDataParallel(
NamedTupleModule().cuda(self.rank), device_ids=[self.rank]
)
inp = TestNamedTupleInput_0(a, b)
# The following would fail if DDP does not propagate NamedTuples correctly.
model(inp, type(inp))
inp = TestNamedTupleInput_1(a, b)
model(inp, type(inp))
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
417625fbe2ab321c1645fec025cb16c02390da3b | e09c01a6eb6fb87d1398f7c8502ecbfa19f28850 | /docker/pycef/verify.py | 813e070b2103d765e526cefb9028dea8987382b7 | [
"JSON",
"BSD-3-Clause",
"Artistic-2.0",
"LicenseRef-scancode-secret-labs-2011",
"LGPL-2.0-or-later",
"MIT",
"ISC",
"Artistic-1.0-Perl",
"Apache-2.0",
"Python-2.0",
"Unlicense",
"LicenseRef-scancode-generic-cla"
] | permissive | demisto/dockerfiles | a8f4bb2c291d694a1ea9bf73800a7cb05508f0ff | 6fb9b8cd786985fa7504f7e44575b7b573dd963f | refs/heads/master | 2023-08-22T07:57:50.346861 | 2023-08-20T12:48:10 | 2023-08-20T12:48:10 | 161,347,705 | 57 | 141 | MIT | 2023-09-14T15:03:44 | 2018-12-11T14:39:27 | Brainfuck | UTF-8 | Python | false | false | 615 | py | import pycef
cef = "Jul 14 2020 00:49:42 myvxkp.manage.trendmicro.com CEF:0|Trend Micro|Apex Central|2019|WB:36|36|3|deviceExternalId=1 rt=Jun 21 2020 07:56:09 GMT+00:00 app=5 cnt=1 dpt=80 act=2 src=10.128.0.11 cs1Label=SLF_PolicyName cs1=Internal User Policy deviceDirection=2 cat=36 dvchost=CU-PRO1-8254-2 request=http://www.eicar.org/download/eicar.com.txt duser=TRENDMICROAPEX-\\admin shost=TRENDMICROAPEX- deviceProcessName=C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe cn3Label=Web_Reputation_Rating cn3=49 deviceFacility=Apex One cn2Label=SLF_SeverityLevel cn2=100 "
a = pycef.parse(cef)
| [
"noreply@github.com"
] | demisto.noreply@github.com |
30d407573895e37be8c589a7c6732422960405a5 | 93ad56acf9e48d590768c2c2500d76ec2c4b6bb1 | /app/main/errors.py | 7403c733b99cdc878224c88e068d40eec4dc92b1 | [] | no_license | nanohaikaros/learn_flask | ce87657cdddc6fb12bb09db625732ced370f0434 | cf3fa1ebd968da226d8f71dadbf24d6656f0ab4d | refs/heads/master | 2021-01-19T22:46:38.087567 | 2017-05-22T06:33:32 | 2017-05-22T06:33:32 | 88,866,035 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | from flask import render_template
from . import main
@main.app_errorhandler(403)
def forbidden(e):
return render_template('403.html'), 403
@main.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@main.app_errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500 | [
"1371689491@qq.com"
] | 1371689491@qq.com |
4d07559abce454e6988074e172fc27bcd712241c | a2f6e449e6ec6bf54dda5e4bef82ba75e7af262c | /venv/Lib/site-packages/pandas/tests/io/test_stata.py | 5708459bbeee5168c52eb369a9e89caf6e5cf5be | [] | no_license | mylonabusiness28/Final-Year-Project- | e4b79ccce6c19a371cac63c7a4ff431d6e26e38f | 68455795be7902b4032ee1f145258232212cc639 | refs/heads/main | 2023-07-08T21:43:49.300370 | 2021-06-05T12:34:16 | 2021-06-05T12:34:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:ac3c82a633dbd93b8dd3e42a92c364056f31c17d5aab641a1531a16c854100ce
size 80351
| [
"chuksajeh1@gmail.com"
] | chuksajeh1@gmail.com |
183474e30fec8665fd8abb76258ae4b4c206c2f2 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/security/azure-mgmt-security/azure/mgmt/security/aio/operations/_regulatory_compliance_standards_operations.py | 8a7c10f30b23bf8e13a849a86fea24e6ef2a177e | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 8,189 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RegulatoryComplianceStandardsOperations:
"""RegulatoryComplianceStandardsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.security.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
filter: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.RegulatoryComplianceStandardList"]:
"""Supported regulatory compliance standards details and state.
:param filter: OData filter. Optional.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RegulatoryComplianceStandardList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.security.models.RegulatoryComplianceStandardList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegulatoryComplianceStandardList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-01-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RegulatoryComplianceStandardList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/regulatoryComplianceStandards'} # type: ignore
async def get(
self,
regulatory_compliance_standard_name: str,
**kwargs
) -> "_models.RegulatoryComplianceStandard":
"""Supported regulatory compliance details state for selected standard.
:param regulatory_compliance_standard_name: Name of the regulatory compliance standard object.
:type regulatory_compliance_standard_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RegulatoryComplianceStandard, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.RegulatoryComplianceStandard
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegulatoryComplianceStandard"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-01-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'regulatoryComplianceStandardName': self._serialize.url("regulatory_compliance_standard_name", regulatory_compliance_standard_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RegulatoryComplianceStandard', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/regulatoryComplianceStandards/{regulatoryComplianceStandardName}'} # type: ignore
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
94e898b82eb56429ee14e655db9f586c9926c987 | d852a39939776a2374320f4a1371e307ec5722a8 | /evphoto/urls.py | a2d81337a3a03304700da1a3d0a130bb7dc7b6f1 | [
"MIT"
] | permissive | raikel/evphoto | 5dfd4ecb5a3516ecd636260a0ba75d4ba1970e55 | 2c3702b2adcd53b6e03a2d596bd79e174e1a93a8 | refs/heads/main | 2023-02-13T05:07:30.618069 | 2021-01-14T19:21:13 | 2021-01-14T19:21:13 | 329,567,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py | """evphoto URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic import TemplateView
from .apidoc import swagger_view, redoc_view
urlpatterns = [
path('', TemplateView.as_view(template_name='photos/index.html')),
path('admin/', admin.site.urls),
path('api/', include('photos.urls', 'photos',)),
] + (
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) +
static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
)
if settings.API_DOCS:
urlpatterns.extend([
path('api/docs/swagger/', swagger_view, name='docs-swagger'),
path('api/docs/redoc/', redoc_view, name='docs-redoc'),
])
| [
"raikelbl@gmail.com"
] | raikelbl@gmail.com |
f1ee1173e27fd577e8aa1b4c4e3acdc2cf4db1c3 | 58959ee3e92cfb4dee217df94a1eb5d0fb4a238e | /qiskit/transpiler/passes/layout/sabre_layout.py | 1ed9153fb08b4d7f1a0217c878203609801f4c5a | [
"Apache-2.0"
] | permissive | AGaliciaMartinez/qiskit-terra | e5d143393ee31af28e7fefddc52219c375d54066 | e70ce504149b40287cfb8bff3ac525159dd46a40 | refs/heads/master | 2023-03-16T09:50:59.840070 | 2022-12-11T14:25:14 | 2022-12-11T14:25:14 | 219,828,601 | 0 | 0 | Apache-2.0 | 2023-03-06T13:15:22 | 2019-11-05T18:59:22 | Python | UTF-8 | Python | false | false | 14,571 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Layout selection using the SABRE bidirectional search approach from Li et al.
"""
import copy
import logging
import numpy as np
import rustworkx as rx
from qiskit.converters import dag_to_circuit
from qiskit.transpiler.passes.layout.set_layout import SetLayout
from qiskit.transpiler.passes.layout.full_ancilla_allocation import FullAncillaAllocation
from qiskit.transpiler.passes.layout.enlarge_with_ancilla import EnlargeWithAncilla
from qiskit.transpiler.passes.layout.apply_layout import ApplyLayout
from qiskit.transpiler.passmanager import PassManager
from qiskit.transpiler.layout import Layout
from qiskit.transpiler.basepasses import TransformationPass
from qiskit.transpiler.exceptions import TranspilerError
from qiskit._accelerate.nlayout import NLayout
from qiskit._accelerate.sabre_layout import sabre_layout_and_routing
from qiskit._accelerate.sabre_swap import (
Heuristic,
NeighborTable,
)
from qiskit.transpiler.passes.routing.sabre_swap import process_swaps, apply_gate
from qiskit.tools.parallel import CPU_COUNT
logger = logging.getLogger(__name__)
class SabreLayout(TransformationPass):
"""Choose a Layout via iterative bidirectional routing of the input circuit.
Starting with a random initial `Layout`, the algorithm does a full routing
of the circuit (via the `routing_pass` method) to end up with a
`final_layout`. This final_layout is then used as the initial_layout for
routing the reverse circuit. The algorithm iterates a number of times until
it finds an initial_layout that reduces full routing cost.
This method exploits the reversibility of quantum circuits, and tries to
include global circuit information in the choice of initial_layout.
By default this pass will run both layout and routing and will transform the
circuit so that the layout is applied to the input dag (meaning that the output
circuit will have ancilla qubits allocated for unused qubits on the coupling map
and the qubits will be reordered to match the mapped physical qubits) and then
routing will be applied (inserting :class:`~.SwapGate`s to account for limited
connectivity). This is unlike most other layout passes which are :class:`~.AnalysisPass`
objects and just find an initial layout and set that on the property set. This is
done because by default the pass will run parallel seed trials with different random
seeds for selecting the random initial layout and then selecting the routed output
which results in the least number of swap gates needed.
You can use the ``routing_pass`` argument to have this pass operate as a typical
layout pass. When specified this will use the specified routing pass to select an
initial layout only and will not run multiple seed trials.
**References:**
[1] Li, Gushu, Yufei Ding, and Yuan Xie. "Tackling the qubit mapping problem
for NISQ-era quantum devices." ASPLOS 2019.
`arXiv:1809.02573 <https://arxiv.org/pdf/1809.02573.pdf>`_
"""
def __init__(
self,
coupling_map,
routing_pass=None,
seed=None,
max_iterations=3,
swap_trials=None,
layout_trials=None,
skip_routing=False,
):
"""SabreLayout initializer.
Args:
coupling_map (Coupling): directed graph representing a coupling map.
routing_pass (BasePass): the routing pass to use while iterating.
If specified this pass operates as an :class:`~.AnalysisPass` and
will only populate the ``layout`` field in the property set and
the input dag is returned unmodified. This argument is mutually
exclusive with the ``swap_trials`` and the ``layout_trials``
arguments and if this is specified at the same time as either
argument an error will be raised.
seed (int): seed for setting a random first trial layout.
max_iterations (int): number of forward-backward iterations.
swap_trials (int): The number of trials to run of
:class:`~.SabreSwap` for each iteration. This is equivalent to
the ``trials`` argument on :class:`~.SabreSwap`. If this is not
specified (and ``routing_pass`` isn't set) by default the number
of physical CPUs on your local system will be used. For
reproducibility between environments it is best to set this
to an explicit number because the output will potentially depend
on the number of trials run. This option is mutually exclusive
with the ``routing_pass`` argument and an error will be raised
if both are used.
layout_trials (int): The number of random seed trials to run
layout with. When > 1 the trial that resuls in the output with
the fewest swap gates will be selected. If this is not specified
(and ``routing_pass`` is not set) then the number of local
physical CPUs will be used as the default value. This option is
mutually exclusive with the ``routing_pass`` argument and an error
will be raised if both are used.
skip_routing (bool): If this is set ``True`` and ``routing_pass`` is not used
then routing will not be applied to the output circuit. Only the layout
will be returned in the property set. This is a tradeoff to run custom
routing with multiple layout trials, as using this option will cause
SabreLayout to run the routing stage internally but not use that result.
Raises:
TranspilerError: If both ``routing_pass`` and ``swap_trials`` or
both ``routing_pass`` and ``layout_trials`` are specified
"""
super().__init__()
self.coupling_map = coupling_map
self._neighbor_table = None
if self.coupling_map is not None:
if not self.coupling_map.is_symmetric:
# deepcopy is needed here to avoid modifications updating
# shared references in passes which require directional
# constraints
self.coupling_map = copy.deepcopy(self.coupling_map)
self.coupling_map.make_symmetric()
self._neighbor_table = NeighborTable(rx.adjacency_matrix(self.coupling_map.graph))
if routing_pass is not None and (swap_trials is not None or layout_trials is not None):
raise TranspilerError("Both routing_pass and swap_trials can't be set at the same time")
self.routing_pass = routing_pass
self.seed = seed
self.max_iterations = max_iterations
self.trials = swap_trials
if swap_trials is None:
self.swap_trials = CPU_COUNT
else:
self.swap_trials = swap_trials
if layout_trials is None:
self.layout_trials = CPU_COUNT
else:
self.layout_trials = layout_trials
self.skip_routing = skip_routing
def run(self, dag):
"""Run the SabreLayout pass on `dag`.
Args:
dag (DAGCircuit): DAG to find layout for.
Returns:
DAGCircuit: The output dag if swap mapping was run
(otherwise the input dag is returned unmodified).
Raises:
TranspilerError: if dag wider than self.coupling_map
"""
if len(dag.qubits) > self.coupling_map.size():
raise TranspilerError("More virtual qubits exist than physical.")
# Choose a random initial_layout.
if self.routing_pass is not None:
if self.seed is None:
seed = np.random.randint(0, np.iinfo(np.int32).max)
else:
seed = self.seed
rng = np.random.default_rng(seed)
physical_qubits = rng.choice(self.coupling_map.size(), len(dag.qubits), replace=False)
physical_qubits = rng.permutation(physical_qubits)
initial_layout = Layout({q: dag.qubits[i] for i, q in enumerate(physical_qubits)})
self.routing_pass.fake_run = True
# Do forward-backward iterations.
circ = dag_to_circuit(dag)
rev_circ = circ.reverse_ops()
for _ in range(self.max_iterations):
for _ in ("forward", "backward"):
pm = self._layout_and_route_passmanager(initial_layout)
new_circ = pm.run(circ)
# Update initial layout and reverse the unmapped circuit.
pass_final_layout = pm.property_set["final_layout"]
final_layout = self._compose_layouts(
initial_layout, pass_final_layout, new_circ.qregs
)
initial_layout = final_layout
circ, rev_circ = rev_circ, circ
# Diagnostics
logger.info("new initial layout")
logger.info(initial_layout)
for qreg in dag.qregs.values():
initial_layout.add_register(qreg)
self.property_set["layout"] = initial_layout
self.routing_pass.fake_run = False
return dag
dist_matrix = self.coupling_map.distance_matrix
original_qubit_indices = {bit: index for index, bit in enumerate(dag.qubits)}
original_clbit_indices = {bit: index for index, bit in enumerate(dag.clbits)}
dag_list = []
for node in dag.topological_op_nodes():
cargs = {original_clbit_indices[x] for x in node.cargs}
if node.op.condition is not None:
for clbit in dag._bits_in_condition(node.op.condition):
cargs.add(original_clbit_indices[clbit])
dag_list.append(
(
node._node_id,
[original_qubit_indices[x] for x in node.qargs],
cargs,
)
)
((initial_layout, final_layout), swap_map, gate_order) = sabre_layout_and_routing(
len(dag.clbits),
dag_list,
self._neighbor_table,
dist_matrix,
Heuristic.Decay,
self.seed,
self.max_iterations,
self.swap_trials,
self.layout_trials,
)
# Apply initial layout selected.
original_dag = dag
layout_dict = {}
num_qubits = len(dag.qubits)
for k, v in initial_layout.layout_mapping():
if k < num_qubits:
layout_dict[dag.qubits[k]] = v
initital_layout = Layout(layout_dict)
self.property_set["layout"] = initital_layout
# If skip_routing is set then return the layout in the property set
# and throwaway the extra work we did to compute the swap map
if self.skip_routing:
return dag
# After this point the pass is no longer an analysis pass and the
# output circuit returned is transformed with the layout applied
# and swaps inserted
dag = self._apply_layout_no_pass_manager(dag)
# Apply sabre swap ontop of circuit with sabre layout
final_layout_mapping = final_layout.layout_mapping()
self.property_set["final_layout"] = Layout(
{dag.qubits[k]: v for (k, v) in final_layout_mapping}
)
mapped_dag = dag.copy_empty_like()
canonical_register = dag.qregs["q"]
qubit_indices = {bit: idx for idx, bit in enumerate(canonical_register)}
original_layout = NLayout.generate_trivial_layout(self.coupling_map.size())
for node_id in gate_order:
node = original_dag._multi_graph[node_id]
process_swaps(
swap_map,
node,
mapped_dag,
original_layout,
canonical_register,
False,
qubit_indices,
)
apply_gate(mapped_dag, node, original_layout, canonical_register, False, layout_dict)
return mapped_dag
def _apply_layout_no_pass_manager(self, dag):
"""Apply and embed a layout into a dagcircuit without using a ``PassManager`` to
avoid circuit<->dag conversion.
"""
ancilla_pass = FullAncillaAllocation(self.coupling_map)
ancilla_pass.property_set = self.property_set
dag = ancilla_pass.run(dag)
enlarge_pass = EnlargeWithAncilla()
enlarge_pass.property_set = ancilla_pass.property_set
dag = enlarge_pass.run(dag)
apply_pass = ApplyLayout()
apply_pass.property_set = enlarge_pass.property_set
dag = apply_pass.run(dag)
return dag
def _layout_and_route_passmanager(self, initial_layout):
"""Return a passmanager for a full layout and routing.
We use a factory to remove potential statefulness of passes.
"""
layout_and_route = [
SetLayout(initial_layout),
FullAncillaAllocation(self.coupling_map),
EnlargeWithAncilla(),
ApplyLayout(),
self.routing_pass,
]
pm = PassManager(layout_and_route)
return pm
def _compose_layouts(self, initial_layout, pass_final_layout, qregs):
"""Return the real final_layout resulting from the composition
of an initial_layout with the final_layout reported by a pass.
The routing passes internally start with a trivial layout, as the
layout gets applied to the circuit prior to running them. So the
"final_layout" they report must be amended to account for the actual
initial_layout that was selected.
"""
trivial_layout = Layout.generate_trivial_layout(*qregs)
qubit_map = Layout.combine_into_edge_map(initial_layout, trivial_layout)
final_layout = {v: pass_final_layout._v2p[qubit_map[v]] for v in initial_layout._v2p}
return Layout(final_layout)
| [
"noreply@github.com"
] | AGaliciaMartinez.noreply@github.com |
7c27410170786c5d93f67b9ebcfa51fd96f37230 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02266/s420424562.py | 880d6c89e7de0c5785e81f9f9077c6c0f5b05e62 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | from queue import LifoQueue
MAP = input()
que = LifoQueue()
res = LifoQueue()
for i, m in enumerate(MAP):
if m=='\\':
que.put(i)
elif m=='/':
if not que.empty():
j = que.get(False)
v = i - j
t = (j, v)
while not res.empty():
pre = res.get(False)
if (pre[0] > j):
t = (t[0], t[1] + pre[1])
else:
res.put(pre)
res.put(t)
break
else:
res.put(t)
summaly = 0
lakes = []
while not res.empty():
v = res.get()
lakes.append(v[1])
summaly += v[1]
print(summaly)
print(len(lakes), *(reversed(lakes)))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4601aa9f22da55cb30b966de539a25b4e9f6c58d | 55628a9a08a6b6646b4a8aa74bedbf2e3fd7d850 | /.history/master_20200119193629.py | 2db10fab9ef6d872c3dc3a5b8b648d2a52490261 | [] | no_license | StRobertCHSCS/final-project-team | c115dc11b318f7ac782c94860a8801bb558bd107 | 48907e72813c4dd3b48ff36f794f6fce04533219 | refs/heads/master | 2020-12-03T22:35:37.833893 | 2020-01-31T04:05:38 | 2020-01-31T04:05:38 | 231,506,873 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,480 | py | '''
-**make snake longer when eaten
- FIGURE OUT HOW TO KNOW WHERE TO ADD THE NEXT BLOCK (MOVE LAST LOCATION TO BACK)
-fix player_location lists, so that the list only has the location of the current snake location, not infinite list (done)
- fix apple so disappers when you go over it (done)
'''
import arcade
import random
# Set how many rows and columns we will have
ROW_COUNT = 29
COLUMN_COUNT = 51
# This sets the WIDTH and HEIGHT of each grid location
WIDTH = 20
HEIGHT = 20
# This sets the margin between each cell
# and on the edges of the screen.
MARGIN = 5
# Do the math to figure out our screen dimensions
SCREEN_WIDTH = (WIDTH + MARGIN) * COLUMN_COUNT + MARGIN
SCREEN_HEIGHT = (HEIGHT + MARGIN) * ROW_COUNT + MARGIN
up = False
down = False
left = False
right = False
player_x_column = 5
player_y_row = 5
body = 1
snake_pos = []
apple_x = random.randint(0, COLUMN_COUNT)
apple_y = random.randint(0, ROW_COUNT)
apple_display = True
grid_texture = arcade.load_texture("29x51_grid.jpg")
def on_update(delta_time):
snake_move()
def on_draw():
arcade.start_render()
grid_background()
snake()
apple()
def grid_background():
arcade.draw_texture_rectangle(SCREEN_WIDTH//2, SCREEN_HEIGHT//2, grid_texture.width, grid_texture.height, grid_texture, 0)
def snake_move():
global player_x, player_y, player_x_column, player_y_row
if (0 <= player_x_column < COLUMN_COUNT) and (0 <= player_y_row < ROW_COUNT):
if up:
player_y_row += 1
elif down:
player_y_row -= 1
elif right:
player_x_column += 1
elif left:
player_x_column -= 1
# for i in range (1):
# player_loaction_x = player_loaction_x(player_x_column)
# player_loaction_y.append(player_y_row)
else:
restart()
# Player coordinates
player_x = (MARGIN + WIDTH) * player_x_column + MARGIN + WIDTH // 2
player_y = (MARGIN + HEIGHT) * player_y_row + MARGIN + HEIGHT // 2
def restart():
global player_x_column, player_y_row
global up, down, left, right
player_x_column = 5
player_y_row = 5
snake_len = []
body = 1
up = False
down = False
left = False
right = False
print ("You died")
def snake():
global player_x_column, player_y_row, apple_x, apple_y, snake_len, body
arcade.draw_rectangle_filled(player_x , player_y, WIDTH, HEIGHT, arcade.color.BLUE)
snake_len = [[player_x_column, player_y_row]]
if (body > 1):
for num in range (1, body):
snake_len.append([10 + num, 10])
# snake_len[i]= snake_len[i-1]
snake_pos.append([player_x_column, player_y_row])
if body < len(snake_pos):
snake_pos.pop(0)
print(snake_len, "body", body, len(snake_pos), snake_pos)
# for index in range (body - 1, 0, -1):
# player_x_column = snake_len[index - 1][0]
# player_y_row = snake_len[index - 1][1]
# snake_len[index]
for i in range (body):
arcade.draw_rectangle_filled(
(MARGIN + WIDTH) * snake_len[i][0] + MARGIN + WIDTH // 2,
(MARGIN + HEIGHT) * snake_len[i][1] + MARGIN + HEIGHT // 2 ,
WIDTH, HEIGHT, arcade.color.BLUE)
def apple():
global apple_x, apple_y, apple_x_coordinate, apple_y_coordinate, body, snake_len
global SPEED
apple_x_coordinate = (MARGIN + WIDTH) * apple_x + MARGIN + WIDTH // 2
apple_y_coordinate = (MARGIN + HEIGHT) * apple_y + MARGIN + HEIGHT // 2
if (player_x_column == apple_x) and (player_y_row == apple_y):
apple_display = False
body += 1
print ("hit")
else:
apple_display = True
if apple_display is True:
arcade.draw_rectangle_filled(apple_x_coordinate, apple_y_coordinate, WIDTH, HEIGHT, arcade.color.RED)
elif apple_display is False:
apple_x = random.randint(0, COLUMN_COUNT)
apple_y = random.randint(0, ROW_COUNT)
apple_x_coordinate = (MARGIN + WIDTH) * apple_x + MARGIN + WIDTH // 2
apple_y_coordinate = (MARGIN + HEIGHT) * apple_y + MARGIN + HEIGHT // 2
apple_display == True
def on_key_press(key, modifiers):
global up, down, left, right
if key == arcade.key.W:
up = True
down = False
right = False
left = False
elif key == arcade.key.S:
down = True
up = False
right = False
left = False
elif key == arcade.key.A:
left = True
up = False
down = False
right = False
elif key == arcade.key.D:
right = True
up = False
down = False
left = False
def on_key_release(key, modifiers):
pass
def on_mouse_press(x, y, button, modifiers):
pass
def setup():
global grid
# global player_x_column, apple_x, player_y_row, apple_y, SPEED
# SPEED = 10
# if (player_x_column == apple_x) and (player_y_row == apple_y):
# SPEED += 5
arcade.open_window(SCREEN_WIDTH, SCREEN_HEIGHT, "snake")
arcade.set_background_color(arcade.color.BLACK)
arcade.schedule(on_update, 1/10)
# Override arcade window methods
window = arcade.get_window()
window.on_draw = on_draw
window.on_key_press = on_key_press
window.on_key_release = on_key_release
window.on_mouse_press = on_mouse_press
arcade.run()
if __name__ == '__main__':
setup() | [
"clementina1023@gmail.com"
] | clementina1023@gmail.com |
b04b76458382bec38d6c2113df7825f5c942223b | d125c002a6447c3f14022b786b07712a7f5b4974 | /tests/functional/arno/indices/test_lower_bound_desc_02_segments_01.py | 690211e08ed9cda8673a99c976031908ca55972f | [
"MIT"
] | permissive | FirebirdSQL/firebird-qa | 89d5b0035071f9f69d1c869997afff60c005fca9 | cae18186f8c31511a7f68248b20f03be2f0b97c6 | refs/heads/master | 2023-08-03T02:14:36.302876 | 2023-07-31T23:02:56 | 2023-07-31T23:02:56 | 295,681,819 | 3 | 2 | MIT | 2023-06-16T10:05:55 | 2020-09-15T09:41:22 | Python | UTF-8 | Python | false | false | 2,148 | py | #coding:utf-8
"""
ID: index.lower-bound-desc-2-segments
TITLE: DESC 2-segment index lower bound
DESCRIPTION:
Check if all 5 values are fetched with "equals" operator over first segment and
"lower than or equal" operator on second segment. 2 values are bound to the lower
segments and 1 value is bound to the upper segment.
FBTEST: functional.arno.indices.lower_bound_desc_02_segments_01
"""
import pytest
from firebird.qa import *
init_script = """CREATE TABLE Table_2_10 (
F1 INTEGER,
F2 INTEGER
);
COMMIT;
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 1);
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 2);
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 3);
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 4);
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 5);
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 6);
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 7);
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 8);
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 9);
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 10);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 1);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 2);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 3);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 4);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 5);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 6);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 7);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 8);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 9);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 10);
COMMIT;
CREATE DESC INDEX I_Table_2_10_DESC ON Table_2_10 (F1, F2);
COMMIT;
"""
db = db_factory(init=init_script)
test_script = """SET PLAN ON;
SELECT
t.F1,
t.F2
FROM
Table_2_10 t
WHERE
t.F1 = 2 and t.F2 <= 5;"""
act = isql_act('db', test_script)
expected_stdout = """PLAN (T INDEX (I_TABLE_2_10_DESC))
F1 F2
============ ============
2 1
2 2
2 3
2 4
2 5"""
@pytest.mark.version('>=3')
def test_1(act: Action):
act.expected_stdout = expected_stdout
act.execute()
assert act.clean_stdout == act.clean_expected_stdout
| [
"pcisar@ibphoenix.cz"
] | pcisar@ibphoenix.cz |
4d4d446dea7a1fe92f5f2c1ea6b02b40da00c543 | a479a5773fd5607f96c3b84fed57733fe39c3dbb | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/state/__init__.py | d5b7c2d5ae95cc62dff09701a8fb36a39539399e | [
"Apache-2.0"
] | permissive | napalm-automation/napalm-yang | 839c711e9294745534f5fbbe115e0100b645dbca | 9148e015b086ebe311c07deb92e168ea36fd7771 | refs/heads/develop | 2021-01-11T07:17:20.226734 | 2019-05-15T08:43:03 | 2019-05-15T08:43:03 | 69,226,025 | 65 | 64 | Apache-2.0 | 2019-05-15T08:43:24 | 2016-09-26T07:48:42 | Python | UTF-8 | Python | false | false | 98,243 | py | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container describes IS MT-ISN state.
"""
__slots__ = ("_path_helper", "_extmethods", "__type")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-isn",
"state",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/state/type (identityref)
YANG Description: The type of TLV being described. The type of TLV is
expressed as a canonical name.
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/state/type (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: The type of TLV being described. The type of TLV is
expressed as a canonical name.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'AREA_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:AREA_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IIS_NEIGHBORS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IIS_NEIGHBORS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'INSTANCE_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:INSTANCE_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'AUTHENTICATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:AUTHENTICATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'PURGE_OI': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:PURGE_OI': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'EXTENDED_IS_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_ALIAS_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_ALIAS_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_INTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'NLPID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:NLPID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_EXTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'EXTENDED_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'DYNAMIC_NAME': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:DYNAMIC_NAME': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_ISN': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_ISN': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MULTI_TOPOLOGY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MULTI_TOPOLOGY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ROUTER_CAPABILITIES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ROUTER_CAPABILITIES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}},), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
type = __builtin__.property(_get_type)
_pyangbind_elements = OrderedDict([("type", type)])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container describes IS MT-ISN state.
"""
__slots__ = ("_path_helper", "_extmethods", "__type")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-isn",
"state",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/state/type (identityref)
YANG Description: The type of TLV being described. The type of TLV is
expressed as a canonical name.
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/state/type (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: The type of TLV being described. The type of TLV is
expressed as a canonical name.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'AREA_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:AREA_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IIS_NEIGHBORS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IIS_NEIGHBORS': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'INSTANCE_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:INSTANCE_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'AUTHENTICATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:AUTHENTICATION': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'PURGE_OI': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:PURGE_OI': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'EXTENDED_IS_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ISIS_ALIAS_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ISIS_ALIAS_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_INTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'NLPID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:NLPID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_EXTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'EXTENDED_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'DYNAMIC_NAME': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:DYNAMIC_NAME': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV4_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV4_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_SRLG': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_TE_ROUTER_ID': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_ISN': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_ISN': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MULTI_TOPOLOGY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MULTI_TOPOLOGY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_IPV4_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'MT_IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:MT_IPV6_REACHABILITY': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'ROUTER_CAPABILITIES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}, 'oc-isis-lsdb-types:ROUTER_CAPABILITIES': {'@module': 'openconfig-isis-lsdb-types', '@namespace': 'http://openconfig.net/yang/isis-lsdb-types'}},), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AREA_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IIS_NEIGHBORS": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:INSTANCE_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:AUTHENTICATION": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:PURGE_OI": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IS_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ISIS_ALIAS_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:NLPID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_EXTERNAL_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:EXTENDED_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:DYNAMIC_NAME": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV4_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_SRLG": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_TE_ROUTER_ID": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_ISN": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IS_NEIGHBOR_ATTRIBUTE": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MULTI_TOPOLOGY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_INTERFACE_ADDRESSES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV4_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:MT_IPV6_REACHABILITY": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
"oc-isis-lsdb-types:ROUTER_CAPABILITIES": {
"@module": "openconfig-isis-lsdb-types",
"@namespace": "http://openconfig.net/yang/isis-lsdb-types",
},
},
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
type = __builtin__.property(_get_type)
_pyangbind_elements = OrderedDict([("type", type)])
| [
"dbarrosop@dravetech.com"
] | dbarrosop@dravetech.com |
3c39fb15eff05bb0b750c408fe4b51bb2ece9eb1 | 01733042e84a768b77f64ec24118d0242b2f13b8 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/globals/protocolstack/radiusglobals/radiusglobals.py | 732fa15ba936da62be6717259bf7eb06313f5074 | [
"MIT"
] | permissive | slieberth/ixnetwork_restpy | e95673905854bc57e56177911cb3853c7e4c5e26 | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | refs/heads/master | 2023-01-04T06:57:17.513612 | 2020-10-16T22:30:55 | 2020-10-16T22:30:55 | 311,959,027 | 0 | 0 | NOASSERTION | 2020-11-11T12:15:34 | 2020-11-11T12:06:00 | null | UTF-8 | Python | false | false | 5,069 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class RadiusGlobals(Base):
"""Global settings for the RADIUS extension.
The RadiusGlobals class encapsulates a list of radiusGlobals resources that are managed by the user.
A list of resources can be retrieved from the server using the RadiusGlobals.find() method.
The list can be managed by using the RadiusGlobals.add() and RadiusGlobals.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'radiusGlobals'
_SDM_ATT_MAP = {
'ObjectId': 'objectId',
}
def __init__(self, parent):
super(RadiusGlobals, self).__init__(parent)
@property
def DhcpOptionSet(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.globals.protocolstack.dhcpglobals.dhcpoptionset.dhcpoptionset.DhcpOptionSet): An instance of the DhcpOptionSet class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.globals.protocolstack.dhcpglobals.dhcpoptionset.dhcpoptionset import DhcpOptionSet
return DhcpOptionSet(self)
@property
def ObjectId(self):
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
def add(self):
"""Adds a new radiusGlobals resource on the server and adds it to the container.
Returns
-------
- self: This instance with all currently retrieved radiusGlobals resources using find and the newly added radiusGlobals resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained radiusGlobals resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ObjectId=None):
"""Finds and retrieves radiusGlobals resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve radiusGlobals resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all radiusGlobals resources from the server.
Args
----
- ObjectId (str): Unique identifier for this object
Returns
-------
- self: This instance with matching radiusGlobals resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of radiusGlobals data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the radiusGlobals resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"andy.balogh@keysight.com"
] | andy.balogh@keysight.com |
13f9ee52c3ae3189d989e10eec846b988ed1b723 | e85e846960750dd498431ac8412d9967646ff98d | /certificates/migrations/0007_auto_20170222_1005.py | 09bd66c793fc3f42e6ff4fa3b92692a45a7f4742 | [] | no_license | onosaburo/clublink_django | 19368b4a59b3aed3632883ceffe3326bfc7a61a6 | d2f6024b6224ea7f47595481b3382b8d0670584f | refs/heads/master | 2022-03-30T05:30:12.288354 | 2020-01-27T18:09:11 | 2020-01-27T18:09:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-02-22 10:05
# flake8: noqa
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('certificates', '0006_auto_20170222_0827'),
]
operations = [
migrations.AlterModelOptions(
name='certificate',
options={'permissions': (('can_create', 'Can create certificates'), ('can_view', 'Can view certificates'))},
),
]
| [
"bestwork888@outlook.com"
] | bestwork888@outlook.com |
06784a03823b09ab1c522efa2cce9f839dc4592b | 99b92de16867f4846b9119e94d13377a58c182f4 | /doped_cath/tst/migrations/tp/0006_load_nDOPE.py | a6bd4c6354b2c256866c882e3299a25ce42abbd8 | [] | no_license | CATH-summer-2017/domchop | 8ef37f8e218ff99e7f5e1c7a75c406327ad22ff0 | d0c717ebc0541eba0d196a3c5885e4edf83a0ecb | refs/heads/master | 2020-12-14T07:20:43.062710 | 2017-09-25T23:21:05 | 2017-09-25T23:21:05 | 95,545,543 | 2 | 3 | null | 2017-06-30T11:16:10 | 2017-06-27T10:13:23 | null | UTF-8 | Python | false | false | 1,506 | py |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
# def load_classes_sql():
# from coffeehouse.settings import PROJECT_DIR
# import os
# sql_statements = open(os.path.join(PROJECT_DIR,'tst/sql/load_classes.sql'), 'r').read()
# return sql_statements
'''
INSERT INTO DJANGO_CATH.tst_classification (id,Class,arch,topo,homsf,s35,s60,s95,s100,version_id,level_id)
select * from CATH.temp_class;
'''
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def load_nDOPE(apps, schema_editor):
import sys
# classification = apps.get_model("tst", "classification")
domain = apps.get_model("tst", "domain")
import os
cwd = os.getcwd()
print >>sys.stdout, '\n\n\n %s'%cwd
dope_file=('tst/migrations/bak/nDOPE-s35-v410.csv');
import csv
cnt = 0;
cmax = file_len(dope_file);
with open(dope_file,'r') as f:
c = csv.reader(f);
for row in c:
dom = domain.objects.get(domain_id=row[0])
dom.nDOPE = float(row[1]);
dom.save()
cnt += 1
if not cnt%100:
print >>sys.stdout, '%d of %d lines loaded'%(cnt,cmax);
# f.readlines()
# for d in domain.objects.
def do_nothing(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('tst', '0005_make_parents'),
]
operations = [
migrations.RunPython(load_nDOPE,do_nothing),
]
| [
"shouldsee.gem@gmail.com"
] | shouldsee.gem@gmail.com |
34d116201474c7e62a2d9adcc6b8d3a85318c2d2 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /zqMREZ2MQd9M5jNfM_9.py | d93270c362002418e307c213ec315c4133aacb07 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | """
Create a function that takes a number as an argument and returns `True` or
`False` depending on whether the number is symmetrical or not. A number is
symmetrical when it is the same as its reverse.
### Examples
is_symmetrical(7227) ➞ True
is_symmetrical(12567) ➞ False
is_symmetrical(44444444) ➞ True
is_symmetrical(9939) ➞ False
is_symmetrical(1112111) ➞ True
### Notes
N/A
"""
def is_symmetrical(num):
num=str(num)
f=num[::-1]
if num==f:
return bool(1)
return bool(0)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
d9d7d3810f2986d67be89873e3fb282685427dc5 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /J9fCHDa3yYJWnK3A7_17.py | a1830f2d014b3eddfc13778136564b2dc07fd3af | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | """
A **happy number** is a number which yields a `1` by repeatedly summing up the
square of its digit. If such a process results in an endless cycle of numbers
containing `4`, the number is said to be an **unhappy number**.
Create a function that accepts a number and determines whether the number is a
_happy number_ or not. Return `True` if so, `False` otherwise.
### Examples
is_happy(67) ➞ False
is_happy(89) ➞ False
is_happy(139) ➞ True
is_happy(1327) ➞ False
is_happy(2871) ➞ False
is_happy(3970) ➞ True
### Notes
* You are expected to solve this challenge via recursion.
* You can check on the **Resources** tab for more details about recursion.
* A non-recursive version of this challenge can be found in [here](https://edabit.com/challenge/rGAcibgZ6u9MtasfW).
"""
def f(n):
return sum(int(d)**2 for d in str(n))
def is_happy(n):
return True if n == 1 else False if n == 4 else is_happy(f(n))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
352548896ec7b76f0e9e9426ba84f24f31e81d5f | b214439d1f529a0b22a6f3afdad62752e87393f1 | /config.py | cecee97cdf84aedb880e165cd80b4b4b01e0aea0 | [] | no_license | LGY2008/GZ05UiHMTTAutoTest | 3dc245c00d9f0ac5dc107a85872b30ade3e64d25 | 87fe766ca7673b2ebfc7ef2e5c5141c89a6792ef | refs/heads/master | 2020-11-27T09:58:09.136575 | 2019-12-21T07:23:25 | 2019-12-21T07:23:25 | 229,391,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | import os
BASE_PATH = os.path.dirname(__file__)
# print(BASE_PATH)
# print("__file__获取当前模块所在路径:", __file__)
| [
"150691555@qq.com"
] | 150691555@qq.com |
f476eb1935ab5f29eb6380a213b3e4fd835af68f | bc8509d57a162fb685da06a98c67dc8130d96316 | /src/nninst/utils/numpy.py | 9607ffcb70f0a6dc19d1c7f583b44e22f2c0c57c | [
"Apache-2.0"
] | permissive | Ptolemy-DL/Ptolemy | 2065e2d157d641010567062410bee4608691d059 | f72a531286d17c69e0e2e84d0ad8a5b0587e2e08 | refs/heads/master | 2023-05-29T08:58:18.328258 | 2021-06-15T09:28:16 | 2021-06-15T09:28:16 | 284,590,756 | 115 | 5 | NOASSERTION | 2020-10-24T04:18:51 | 2020-08-03T03:06:35 | Python | UTF-8 | Python | false | false | 2,781 | py | from typing import Union
import numpy as np
__all__ = ["argtopk", "arg_approx", "arg_approx_signed", "repeat", "concatenate"]
def get_int_k(array: np.ndarray, k: Union[int, float]) -> int:
if type(k) is float:
if 0.0 < k < 1.0:
k = round(array.size * k)
if k == array.size:
return array.size - 1
elif k == 0:
return 1
return k
else:
raise ValueError()
else:
return k
def argtopk(array: np.ndarray, k: Union[int, float]) -> np.ndarray:
k = get_int_k(array, k)
if k == 1:
return np.array([np.argmax(array)])
else:
return np.argpartition(array, -k, axis=None)[-k:]
def arg_sorted_topk(array: np.ndarray, k: Union[int, float]) -> np.ndarray:
# topk_index = argtopk(array, k)
# sorted_index = np.array(list(reversed(np.argsort(array[topk_index]))))
# return topk_index[sorted_index]
k = get_int_k(array, k)
return np.argsort(array)[::-1][:k]
def arg_approx(array: np.ndarray, precision: float) -> np.ndarray:
if (1 / array.size) >= precision:
return np.array([np.argmax(array)])
input_sum = array.sum()
if input_sum <= 0:
return np.array([np.argmax(array)])
input = array.flatten()
threshold = input_sum * precision
sorted_input = input.copy()
sorted_input[::-1].sort()
# topk = np.argmax(sorted_input.cumsum() >= threshold)
topk = sorted_input.cumsum().searchsorted(threshold)
if topk == len(input):
return np.where(input > 0)[0]
else:
return argtopk(input, topk + 1)
# def arg_approx(array: np.ndarray, precision: float) -> np.ndarray:
# input_sum = array.sum()
# if input_sum == 0:
# return np.array([], dtype=np.int64)
# input = array.flatten()
# threshold = input_sum * precision
# sorted_input = input.copy()
# sorted_input[::-1].sort()
# # topk = np.argmax(sorted_input.cumsum() >= threshold)
# topk = sorted_input.cumsum().searchsorted(threshold)
# return argtopk(input, topk + 1)
def arg_approx_signed(array: np.ndarray, precision: float) -> np.ndarray:
result = []
for input in [array.copy(), -array]:
input[input < 0] = 0
result.append(arg_approx(input, precision))
return np.concatenate(result)
def repeat(a: int, repeats: int) -> np.ndarray:
# if repeats > 1:
# return np.repeat(a, repeats)
# elif repeats == 1:
# return np.array([a])
# else:
# return np.array([])
return np.repeat(a, repeats)
def concatenate(a_tuple, axis=0, out=None, dtype=np.int64) -> np.ndarray:
if len(a_tuple) == 0:
return np.array([], dtype=dtype)
else:
return np.concatenate(a_tuple, axis, out)
| [
"ygan10@ur.rochester.edu"
] | ygan10@ur.rochester.edu |
3ecf11e01e3b723c2b1431b7b60c91b9a4447008 | 4749d3cf395522d90cb74d1842087d2f5671fa87 | /alice/LC406.py | ebd41c6469e26d50cf2299deca09ce35e1e6efae | [] | no_license | AliceTTXu/LeetCode | c1ad763c3fa229362350ce3227498dfb1f022ab0 | ed15eb27936b39980d4cb5fb61cd937ec7ddcb6a | refs/heads/master | 2021-01-23T11:49:49.903285 | 2018-08-03T06:00:16 | 2018-08-03T06:00:16 | 33,470,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | class Solution(object):
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
people.sort(key = lambda x: [-x[0], x[1]])
out = []
for x in people:
out.insert(x[1], x)
return out | [
"aliceadelice@gmail.com"
] | aliceadelice@gmail.com |
c1e70ef156cf238a0729e59e883d766bdc7079de | 0be8fcf11032836c40d6cf80404130bfeeb4f147 | /formulas/constants.py | 07ac7d5860ed343c693632960b394553b6992c42 | [] | no_license | granttremblay/formulas | 7b13661b8248deeb84a42ce9fa8baaa5fc7d8667 | a56dbfa92fbcaef194d6735a0d5fb38d48308a36 | refs/heads/master | 2021-01-11T22:26:17.165819 | 2016-11-25T04:19:10 | 2016-11-25T04:19:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,044 | py | from sympy import symbols
from sympy import pi as sym_pi
from formulas.base import Formula
try:
import yt.utilities.physical_constants as yt_pc
except ImportError:
yt_pc = None
try:
import astropy.constants as astropy_pc
except ImportError:
astropy_pc = None
try:
from pint import UnitRegistry
pint_pc = UnitRegistry(system='cgs')
except ImportError:
pint_pc = None
yt_map = {"m_e": "me",
"m_p": "mp",
"m_h": "mh",
"k_B": "kboltz"}
astropy_map = {}
pint_map = {"G": "newtonian_constant_of_gravitation",
"k_B": "k"}
class FormulaConstant(Formula):
def __init__(self, name, value):
name = symbols(name)
super(FormulaConstant, self).__init__(name, [], [name])
self.param_values[str(name)] = value
self._value = value
def set_param_values(self, **kwargs):
"""
Set the values of one or more parameters.
"""
if self.num_params > 0:
raise RuntimeError("Can't change the value of a constant!")
def clear_param_values(self):
"""
Set all of the parameter values to None.
"""
if self.num_params > 0:
raise RuntimeError("Can't change the value of a constant!")
@property
def value(self):
return self._value
class FormulaPi(FormulaConstant):
def __init__(self):
Formula.__init__(self, sym_pi, [], [])
@property
def value(self):
return self.formula.evalf()
class PhysicalConstants(object):
def __init__(self, constants, map):
self.constants = constants
self.map = map
def __getattr__(self, item):
const = self.map.get(item, item)
return FormulaConstant(item, 1.0*getattr(self.constants, const))
pi = FormulaPi()
if yt_pc is not None:
yt_constants = PhysicalConstants(yt_pc, yt_map)
if astropy_pc is not None:
astropy_constants = PhysicalConstants(astropy_pc, astropy_map)
if pint_pc is not None:
pint_constants = PhysicalConstants(pint_pc, pint_map) | [
"jzuhone@gmail.com"
] | jzuhone@gmail.com |
68dc9fbe7e29ec923121a4526fc054557776fd07 | e0980f704a573894350e285f66f4cf390837238e | /.history/streams/blocks_20201019123819.py | 9808ed21b1d2b74118bd70924d30f80e541f48fd | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | from wagtail.core import blocks
from wagtail.images.blocks import ImageChooserBlock
class TitleBlock(blocks.StructBlock):
text = blocks.CharBlock(
required = True,
elp_text='Tekst do wyświetlenia',
)
class Meta:
template = 'streams/title_block.html'
icon = 'edycja'
label = 'Tytuł'
help_text = 'Wyśrodkowany tekst do wyświetlenia na stronie.'
class CardsBlock(blocks.StructBlock):
cards = blocks.ListBlock(
blocks.StructBlock(
[
('title', blocks.CharBlock(max_length=100, help_text = 'Pogrubiony tytuł tej karty. Maksymalnie 100 znaków.')),
('text', blocks.TextBlock(max_length=255, help_text='Opcjonalny tekst tej karty. Maksymalnie 255 znaków.')),
('image', ImageChooserBlock(help_text = 'Obraz zostanie automatycznie przycięty o 570 na 370 pikseli')),
('link_')
]
)
)
class Meta:
template = 'streams/card_block.html'
icon = 'image'
label = 'Karty standardowe'
| [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
55ee6222654cb56bc64aa38ff54959a133fa43b6 | f8ad6963bfc851657ea50c6a036cfad29cdd7f60 | /Books/GodOfPython/P00_OriginalSource/ch14/exercise/idpw.py | 21c3d7198abd9ed03005393d1a2ab537906e0965 | [] | no_license | foru120/PythonRepository | e1ab0265c0f50ef2e9acdf7447237c913560692b | db6b6be0f9fb91b0a81a3b6a2ec5631daab10f98 | refs/heads/master | 2021-01-01T06:53:11.728109 | 2019-04-25T13:52:50 | 2019-04-25T13:52:50 | 97,541,222 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | import tkinter
def reset():
text_id.set('')
text_pw.set('')
def login():
f = open ("C:/gop/ch14/exercise/idpw.txt", 'r')
try:
for s in f:
idpw = s.split('/')
if (idpw[0] == text_id.get()) and (idpw[1].strip()==text_pw.get()):
frame.destroy()
label = tkinter.Label(text = "{} login..".format(text_id.get()))
label.pack()
return 0
reset()
except:
print("exception occured")
finally:
f.close()
root = tkinter.Tk()
root.title("login")
root.geometry('180x120')
text_id = tkinter.StringVar(value='')
text_pw = tkinter.StringVar(value='')
frame = tkinter.Frame(root)
frame.pack()
button = tkinter.Button(frame, text = 'reset', command = reset)
button.grid(row=0, column=0, columnspan = 2)
button = tkinter.Button(frame, text = 'login', command = login)
button.grid(row=3, column=0, columnspan = 2)
label = tkinter.Label(frame, text = 'ID')
label.grid(row = 1, column = 0)
entry_id = tkinter.Entry(frame, textvariable = text_id)
entry_id.grid(row = 1, column = 1)
label = tkinter.Label(frame, text = 'PW')
label.grid(row = 2, column = 0)
entry_pw = tkinter.Entry(frame, textvariable = text_pw, show='*')
entry_pw.grid(row = 2, column = 1)
root.mainloop()
| [
"broodsky1122@hanmail.net"
] | broodsky1122@hanmail.net |
543f648806301c64f30a4f1e315ba24aba1a0b57 | d0d21c4e00d2674dcd0e0e07c47576f15706a064 | /munge/03-random-forest-on-tfidf.py | 6b63bd838c012ebb5e9e59757fd64060f5985289 | [] | no_license | paulhendricks/kaggle-home-depot-product-search-relevance | 1312d874c91b5f0eb6f420d23fe5813db222868a | cd2e68976ab4a6eb224c55f80a0b851bb37a9fde | refs/heads/master | 2021-01-10T05:25:31.073869 | 2016-01-19T15:14:47 | 2016-01-19T15:14:47 | 49,902,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,453 | py | import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer
from sklearn.metrics import mean_squared_error
attributes = pd.read_csv("./data/attributes.csv")
product_descriptions = pd.read_csv("./data/product_descriptions.csv")
train = pd.read_csv("./data/train.csv")
test = pd.read_csv("./data/test.csv")
sample_submission = pd.read_csv("./data/sample_submission.csv")
count = CountVectorizer()
docs = np.array(['The sun is shining',
'The weather is sweet',
'The sun is shining and the weather is sweet'])
bag = count.fit_transform(docs)
tfidf = TfidfTransformer()
np.set_printoptions(precision=2)
print(tfidf.fit_transform(count.fit_transform(docs)).toarray())
data = 1
from nltk.stem.porter import PorterStemmer
porter = PorterStemmer()
def tokenizer_porter(text):
return [porter.stem(word) for word in text.split()]
tokenizer_porter('runners like running and thus they run')
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
stop = stopwords.words('english')
[w for w in tokenizer_porter('a runner likes running and runs a lot')[-10:] if w not in stop]
tfidf = TfidfVectorizer(strip_accents=None,
lowercase=False,
preprocessor=None)
rf_rfidf = Pipeline([('vect', tfidf), ('rf', RandomForestRegressor(random_state=0))])
X_train = train['search_term'].values
y_train = train['relevance'].values
rf_rfidf.fit(X_train, y_train)
X_test =test['search_term'].values
y_hat = rf_rfidf.predict(rf_rfidf.transform(X_test))
X_train, X_validation, y_train, y_validation = train_test_split(train['search_term'].values,
train['relevance'].values, test_size=0.4,
random_state=0)
rf_rfidf = Pipeline([('vect', tfidf), ('rf', RandomForestRegressor(random_state=0))])
rf_rfidf.fit(X_train, y_train)
y_hat = rf_rfidf.predict(X_validation)
mean_squared_error(y_true=y_validation, y_pred=y_hat)
X_test = test['search_term'].values
sample_submission['relevance'] = rf_rfidf.predict(X_test)
sample_submission.to_csv('./data/random_forest_submission.csv', index=False)
| [
"paul.hendricks.2013@owu.edu"
] | paul.hendricks.2013@owu.edu |
66a7413029ca2510d8617ddeb21324b7203e0da2 | c1373f7b6956468b45db6c664b119322e667ac5e | /Study/baekjoon/hw/17609.py | 6c5dcf2987055f70bac2678287a966e9c3484626 | [] | no_license | JaeLinJoo/Python-Team-Notes | e01f2010852d0c4365cdc9d087c305e876c90f9a | 869dae62e489686eab56bc58bdea8c391ffbca19 | refs/heads/master | 2023-09-02T14:10:02.032521 | 2021-10-06T16:20:04 | 2021-10-06T16:20:04 | 339,348,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | #회문
### ↓↓↓↓↓↓↓↓↓↓↓↓↓solution↓↓↓↓↓↓↓↓↓↓↓↓↓↓
def pseudo(x, left, right):
while left < right:
if x[left] == x[right]:
left += 1
right -= 1
else:
return False
return True
def palindrome(x,left, right):
while left < right:
if x[left] == x[right]:
left += 1
right -= 1
else:
res1 = pseudo(x,left+1, right)
res2 = pseudo(x,left,right-1)
if res1 == True or res2 == True:
return 1
else:
return 2
return 0
t = int(input())
for i in range(t):
x = input()
res = palindrome(x,0,len(x)-1)
print(res)
############시간초과############
# 회문
def palindrome(x):
ps = False
# 회문인 경우
if x == x[::-1]:
return 0
# 유사회문의 경우
else:
for s in x:
pseudo = x.replace(s,'')
if pseudo == pseudo[::-1]:
ps = True
continue
if ps:
return 1
else:
return 2
t = int(input())
res = []
for i in range(t):
x = input()
res.append(palindrome(x))
for r in res:
print(r)
| [
"replituser@example.com"
] | replituser@example.com |
750217b5c01d3385971f10c801f303fa66cf729a | 98743080e28537d635364a998aa1988551dffcdf | /cride/cride/users/urls.py | 1cd703d1b08c9189fbc30cfb0e5b5957ed36c4c6 | [
"MIT"
] | permissive | INFINITY-RUBER/Curso_Django-avanzado_proyect | 75561cbc54a8057d1d2b536f34b2506233f40f80 | 1850a73a3648cfca17fdbf48b40dbd3eadd9f7fd | refs/heads/main | 2023-06-11T05:49:43.330422 | 2021-06-29T17:11:26 | 2021-06-29T17:11:26 | 374,808,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | """ Users URLs."""
# Django
from django.urls import path, include
# Django REST Framework
from rest_framework.routers import DefaultRouter
# Views
from .views import users as user_views
# from cride.users.views import (UserLoginAPIView, UserSignUpAPIView,
# AccountVerificationAPIView)
# urlpatterns = [
# path('users/login/', UserLoginAPIView.as_view(), name='login'),
# path('users/signup/', UserSignUpAPIView.as_view(), name='signup'),
# path('users/verify/', AccountVerificationAPIView.as_view(), name='verify'),
# ]
router = DefaultRouter()
router.register(r'users', user_views.UserViewSet, basename='users')
urlpatterns = [path('', include(router.urls))] | [
"ruberhenandez@gmail.com"
] | ruberhenandez@gmail.com |
bcc862b5a9cd861cbe6356c4fb7bc1316aeb2392 | 8bda8911512f1c454f5e75ef36f3d828661b1479 | /dfs_bfs/test08.py | 6bfdb60652a5ad808b25d0d77843e9f27c78d695 | [] | no_license | choijaehoon1/backjoon | 0f5909a1e1d416f8f431d6b986754af7eb6a3396 | a0411dba08c057a312733e38683246162256e61d | refs/heads/master | 2023-02-26T12:28:33.733297 | 2021-02-05T13:28:33 | 2021-02-05T13:28:33 | 280,430,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | from collections import deque
N, M = map(int, input().split())
visit = [0 for _ in range(100001)]
queue = deque()
queue.append([N,0])
# print(queue)
while queue:
pos = queue[0][0]
depth = queue[0][1]
if pos == M: # 기준점이 됫을때의 depth를 찍으면 최단거리임
break
queue.popleft()
visit[pos] = 1
if pos-1 >= 0 and visit[pos-1] == 0:
queue.append([pos-1, depth+1])
if pos+1 <= 100000 and visit[pos+1] == 0:
queue.append([pos+1, depth+1])
if pos*2 <= 100000 and visit[pos*2] == 0:
queue.append([pos*2, depth+1])
print(queue[0][1])
| [
"wogns_20@naver.com"
] | wogns_20@naver.com |
b230197181bdab76dc21bbefbc4970e62ae9d85b | 746bf62ae3599f0d2dcd620ae37cd11370733cc3 | /leetcode/minimumpathsum.py | 0b3383cd94fdea6db18babd6a9663eaa701785c1 | [] | no_license | wanglinjie/coding | ec0e614343b39dc02191455165eb1a5c9e6747ce | 350f28cad5ec384df476f6403cb7a7db419de329 | refs/heads/master | 2021-04-22T14:00:48.825959 | 2017-05-02T12:49:05 | 2017-05-02T12:49:05 | 48,011,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,417 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# date:20160712
class Solution(object):
def minPathSum(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
Given a m x n grid filled with non-negative numbers,
find a path from top left to bottom right which minimizes the sum of all numbers along its path.
Note: You can only move either down or right at any point in time.
思路:
动态规划思想
使用一个新的矩阵存储到当前位置的最短路径
"""
m = len(grid)
n = len(grid[0])
paths = []
for i in xrange(m):
paths.append([0] * n)
for i in xrange(m):
for j in xrange(n):
if i:
if j:
# 当前位置左边和上边较小值和当前位置数字相加
paths[i][j] = grid[i][j] + min(paths[i-1][j], paths[i][j-1])
else:
# 在矩阵最左侧,只能从上面元素到当前位置
paths[i][j] = grid[i][j] + paths[i-1][j]
else:
if j:
paths[i][j] = grid[i][j] + paths[i][j-1]
else:
paths[i][j] = grid[i][j]
print paths
return paths[m-1][n-1]
grid = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
so = Solution()
print so.minPathSum(grid) | [
"hitwhwlj@163.com"
] | hitwhwlj@163.com |
35decafd0911bc653eafe7e1928f9aafc9d264c6 | 4aa3e91f50443894de3d2d5339df7cbcb2c88bfd | /t_08ALFARO.CAJO/CAJO/concatenacion/ejercicio7.py | 8e68e8fce9f5780988f0b1da353beaf938a9ee63 | [] | no_license | Piero942/T08_Alfaro.Cajo | d7164ab2c22116037739b32680cd82eafef0cfa1 | 8e1d05459d15093d0a8ecc46c7caf7e225b53e5f | refs/heads/master | 2020-10-02T07:43:03.482372 | 2019-12-13T02:01:09 | 2019-12-13T02:01:09 | 227,733,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | # 2 3 4 5 6 7
# 0123456789012345678901234567890123456789012345678901234567890123
msg="El ser humano nace con inteligencia, pero debe aprender a pensar"
print( "aprende" + " " + "con" + " " + "inteligencia" )
| [
"palfaro@unprg.edu.pe"
] | palfaro@unprg.edu.pe |
6c3401ae9e10e1085074c0663728a44a7a382a3d | 3492b97a904d7397c13c03334d0abe23484024bc | /custom_website_calendar/models/calendar_appointment.py | c33c793529e61f93f2aece8aa29b751c565aaae9 | [] | no_license | hassanfadl/vendor_portal_app | e578262ae25261e6740e4329cb45168ec2e790b4 | ebf8f4f7452190c986fbc9c225b8660fe0bd4bd4 | refs/heads/main | 2023-08-23T10:12:07.838305 | 2021-10-30T04:44:40 | 2021-10-30T04:44:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,912 | py | import pytz
import random
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError, UserError
from datetime import datetime, date
from datetime import datetime, timedelta, time
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from babel.dates import format_datetime
import calendar as cal
from odoo.tools.misc import get_lang
class InheritedCalendarAppointmentSlot(models.Model):
_inherit = "calendar.appointment.slot"
number_of_slots = fields.Integer(string="Number of Limited Slots", required=False, default=1,
help="Number of slot per day / per session.")
class InheritedCalendarAppointmentType(models.Model):
_inherit = "calendar.appointment.type"
appointment_ids = fields.One2many(comodel_name="calendar.event", inverse_name="appointment_type_id", string="Appointments", required=False, )
@api.constrains('slot_ids', 'employee_ids', 'appointment_ids')
def check_limit_slots(self):
for record in self:
if record.slot_ids and record.employee_ids:
count_initial_slots = len(record.employee_ids)
for slot in record.slot_ids:
if slot.number_of_slots < count_initial_slots:
raise ValidationError(_("The number of slots is less than the assigned employees. The minimum required slot(s) is %s." % count_initial_slots))
def _slots_available(self, slots, first_day, last_day, employee=None):
""" Fills the slot stucture with an available employee
:param slots: slots structure generated by _slots_generate
:param first_day: start datetime in UTC
:param last_day: end datetime in UTC
:param employee: if set, only consider this employee
if not set, consider all employees assigned to this appointment type
"""
def is_work_available(start_dt, end_dt, intervals):
""" check if the slot is contained in the employee's work hours (defined by intervals)
"""
def find_start_index():
""" find the highest index of intervals for which the start_date (element [0]) is before (or at) start_dt
"""
def recursive_find_index(lower_bound, upper_bound):
if upper_bound - lower_bound <= 1:
if intervals[upper_bound][0] <= start_dt:
return upper_bound
return lower_bound
index = (upper_bound + lower_bound) // 2
if intervals[index][0] <= start_dt:
return recursive_find_index(index, upper_bound)
else:
return recursive_find_index(lower_bound, index)
if start_dt <= intervals[0][0] - tolerance:
return -1
if end_dt >= intervals[-1][1] + tolerance:
return -1
return recursive_find_index(0, len(intervals) - 1)
if not intervals:
return False
tolerance = timedelta(minutes=1)
start_index = find_start_index()
if start_index != -1:
for index in range(start_index, len(intervals)):
if intervals[index][1] >= end_dt - tolerance:
return True
if len(intervals) == index + 1 or intervals[index + 1][0] - intervals[index][1] > tolerance:
return False
return False
def is_calendar_available(slot, events, employee):
""" Returns True if the given slot doesn't collide with given events for the employee
"""
start_dt = slot['UTC'][0]
end_dt = slot['UTC'][1]
print(start_dt, end_dt)
event_in_scope = lambda ev: (
fields.Date.to_date(ev.start) <= fields.Date.to_date(end_dt)
and fields.Date.to_date(ev.stop) >= fields.Date.to_date(start_dt)
)
for ev in events.filtered(event_in_scope):
if ev.allday:
# allday events are considered to take the whole day in the related employee's timezone
event_tz = pytz.timezone(ev.event_tz or employee.user_id.tz or self.env.user.tz or slot['slot'].appointment_type_id.appointment_tz or 'UTC')
ev_start_dt = datetime.combine(fields.Date.from_string(ev.start_date), time.min)
ev_stop_dt = datetime.combine(fields.Date.from_string(ev.stop_date), time.max)
ev_start_dt = event_tz.localize(ev_start_dt).astimezone(pytz.UTC).replace(tzinfo=None)
ev_stop_dt = event_tz.localize(ev_stop_dt).astimezone(pytz.UTC).replace(tzinfo=None)
if ev_start_dt < end_dt and ev_stop_dt > start_dt:
return False
elif fields.Datetime.to_datetime(ev.start_datetime) < end_dt and fields.Datetime.to_datetime(ev.stop_datetime) > start_dt:
return False
return True
workhours = {}
meetings = {}
# With context will be used in resource.calendar to force the referential user
# for work interval computing to the *user linked to the employee*
available_employees = [emp.with_context(tz=emp.user_id.tz) for emp in (employee or self.employee_ids)]
random.shuffle(available_employees)
for slot in slots:
for emp_pos, emp in enumerate(available_employees):
if emp_pos not in workhours:
workhours[emp_pos] = [
(interval[0].astimezone(pytz.UTC).replace(tzinfo=None),
interval[1].astimezone(pytz.UTC).replace(tzinfo=None))
for interval in emp.resource_calendar_id._work_intervals_batch(
first_day, last_day, resources=emp.resource_id,
)[emp.resource_id.id]
]
if is_work_available(slot['UTC'][0], slot['UTC'][1], workhours[emp_pos]):
if emp_pos not in meetings:
# note: no check is made on the attendee's status (accepted/declined/...)
meetings[emp_pos] = self.env['calendar.event'].search([
('partner_ids.user_ids', '=', emp.user_id.id),
('start', '<', fields.Datetime.to_string(last_day.replace(hour=23, minute=59, second=59))),
('stop', '>', fields.Datetime.to_string(first_day.replace(hour=0, minute=0, second=0)))
])
slot['employee_id'] = emp
break
def _get_appointment_slots(self, timezone, employee=None):
""" Fetch available slots to book an appointment
:param timezone: timezone string e.g.: 'Europe/Brussels' or 'Etc/GMT+1'
:param employee: if set will only check available slots for this employee
:returns: list of dicts (1 per month) containing available slots per day per week.
complex structure used to simplify rendering of template
"""
self.ensure_one()
appt_tz = pytz.timezone(self.appointment_tz)
requested_tz = pytz.timezone(timezone)
first_day = requested_tz.fromutc(datetime.utcnow() + relativedelta(hours=self.min_schedule_hours))
last_day = requested_tz.fromutc(datetime.utcnow() + relativedelta(days=self.max_schedule_days))
# Compute available slots (ordered)
slots = self._slots_generate(first_day.astimezone(appt_tz), last_day.astimezone(appt_tz), timezone)
if not employee or employee in self.employee_ids:
self._slots_available(slots, first_day.astimezone(pytz.UTC), last_day.astimezone(pytz.UTC), employee)
# Compute calendar rendering and inject available slots
today = requested_tz.fromutc(datetime.utcnow())
start = today
month_dates_calendar = cal.Calendar(0).monthdatescalendar
months = []
while (start.year, start.month) <= (last_day.year, last_day.month):
dates = month_dates_calendar(start.year, start.month)
for week_index, week in enumerate(dates):
for day_index, day in enumerate(week):
mute_cls = weekend_cls = today_cls = None
today_slots = []
if day.weekday() in (cal.SUNDAY, cal.SATURDAY):
weekend_cls = 'o_weekend'
if day == today.date() and day.month == today.month:
today_cls = 'o_today'
if day.month != start.month:
mute_cls = 'text-muted o_mute_day'
else:
# slots are ordered, so check all unprocessed slots from until > day
while slots and (slots[0][timezone][0].date() <= day):
if (slots[0][timezone][0].date() == day) and ('employee_id' in slots[0]):
ev_start = datetime.strptime(slots[0][timezone][0].strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S')
ev_stop = datetime.strptime(slots[0][timezone][0].strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S') + relativedelta(hours=self.appointment_duration)
appointment_counts = self.env['calendar.event'].sudo().search_count([
('appointment_type_id', '=', self.id),
('start', '=', ev_start - relativedelta(hours=8)),
('stop', '=', ev_stop - relativedelta(hours=8))])
count_limit = len(self.employee_ids.ids)
for slot_weekday in self.slot_ids:
if int(slot_weekday.weekday) == (int(day.weekday()) + 1) and float(slot_weekday.hour) == float(ev_start.hour + (ev_start.minute / 60.0)):
count_limit = slot_weekday.number_of_slots
if appointment_counts:
if appointment_counts < count_limit:
today_slots.append({
'employee_id': slots[0]['employee_id'].id,
'datetime': slots[0][timezone][0].strftime('%Y-%m-%d %H:%M:%S'),
'hours': slots[0][timezone][0].strftime('%H:%M') + ' (' + str(count_limit - appointment_counts) + ' Remaining Slot/s)'
})
else:
today_slots.append({
'employee_id': slots[0]['employee_id'].id,
'datetime': slots[0][timezone][0].strftime('%Y-%m-%d %H:%M:%S'),
'hours': slots[0][timezone][0].strftime('%H:%M') + ' (' + str(count_limit) + ' Remaining Slot/s)'
})
slots.pop(0)
dates[week_index][day_index] = {
'day': day,
'slots': today_slots,
'mute_cls': mute_cls,
'weekend_cls': weekend_cls,
'today_cls': today_cls
}
months.append({
'month': format_datetime(start, 'MMMM Y', locale=get_lang(self.env).code),
'weeks': dates
})
start = start + relativedelta(months=1)
return months | [
"dennisboysilva@gmail.com"
] | dennisboysilva@gmail.com |
40e95cb6d1ecd25ec81f2e9f679e937390f9151c | f68d246ea82f980706bfa574da91d99797c29b38 | /code/sequentialSearch.py | 3f21c9fcc1154eb22d72f6c04f1a8470444e9477 | [] | no_license | nicolas4d/Problem-Solving-with-Algorithms-and-Data-Structures-using-Python | 40684370ab0c8a22894aa58c0479da6697ea0a13 | 5c7595cab3c5501e4b4177b700708a2609c74e30 | refs/heads/master | 2020-12-02T13:43:49.547926 | 2020-02-01T14:19:08 | 2020-02-01T14:19:08 | 231,025,645 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | def sequentialSearch(alist, item):
pos = 0
found = False
while pos < len(alist) and not found:
if alist[pos] == item :
found = True
else :
pos = pos + 1
return found
testlist = [1, 2, 32, 8, 17, 19, 42, 13, 0]
print(sequentialSearch(testlist, 3))
print(sequentialSearch(testlist, 13))
'''
False
True
'''
| [
"nicolas4d@foxmail.com"
] | nicolas4d@foxmail.com |
671f77b7b77c513a3c58149ed8238a00effc68b0 | f35bb12066639698a94847cba4b4628aede1da70 | /contests/python/atcoder_beginners_selection/02_ABC081A/main.py | 8d579ec866c41987ae3796146e0a348fe59101f3 | [] | no_license | fly1tkg/atcoder-python-note | 7e74382a8867b07bb7a926988ac854a3b84e020b | 6051b771c0a0399ce8caf1e24256a9909101b0e7 | refs/heads/main | 2023-08-26T23:52:14.766576 | 2021-10-30T11:58:38 | 2021-10-30T11:58:38 | 363,686,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | l = list(input())
result = 0
for i in l:
if i == '1':
result += 1
print(result) | [
"fly1tkg@gmail.com"
] | fly1tkg@gmail.com |
6bdd4cb44ebe52649aa072e82ce0f6cf397c91c6 | 7bd15f37ffd26f9f0470cae2b4c1ef491c35c5c1 | /python/dirigible/feedback/urls.py | 64c3810ceefb7a6b75b23e864325bb941af66522 | [
"MIT"
] | permissive | bwhmather/dirigible-spreadsheet | 0063aba1cec7df1dc4fc0d5dbbcfaeeb1dad932f | ff0414912110553a5d0f317495cdba39a077a044 | refs/heads/master | 2020-12-28T22:53:10.312409 | 2014-09-14T16:15:59 | 2014-09-14T16:15:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | # Copyright (c) 2010 Resolver Systems Ltd, PythonAnywhere LLP
# See LICENSE.md
#
from django.conf.urls.defaults import *
from dirigible.feedback.views import submit
urlpatterns = patterns('',
url(
r'^submit/$',
submit,
name="feedback_submit"
),
)
| [
"hjwp2@cantab.net"
] | hjwp2@cantab.net |
7dcfcc49c5a5737c6513c87e0674a48f21405088 | 479b1e59d99b30c7c9440416a2e95702cef7e3e1 | /Chapter07/transformer/transformer/Constants.py | 1b6ba9302e2d1d4fd8684e08921133ef1b6726ca | [
"MIT"
] | permissive | kpranke/Getting-Started-with-Deep-Learning-for-Natural-Language-Processing | 28007fa4361fdfe191a48f9576690028ed4bbd14 | 89f35a8e327bd9143fdb44e84b8f7b4fdc8ae58d | refs/heads/main | 2023-02-23T06:41:13.351541 | 2021-01-23T11:56:08 | 2021-01-23T11:56:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | PAD = 0
UNK = 1
BOS = 2
EOS = 3
PAD_WORD = '<blank>'
UNK_WORD = '<unk>'
BOS_WORD = '<s>'
EOS_WORD = '</s>'
| [
"41231825+bpbpublications@users.noreply.github.com"
] | 41231825+bpbpublications@users.noreply.github.com |
c7c95faecb272485b632e0ee5d8c8ae918aabe5c | a39a8e8d1192ca475c6ed348b79e10108c1fa6ba | /WebApp/admin.py | aa3bf10d1d4fce749276c043cb777099722ae42a | [] | no_license | gitNikhilsahu/SQLiteProject | 2d7bac75456d793e97424ed0a939a4a4b87cf5d0 | bfa49909059789f6e3ba47cd274782de6b6414c5 | refs/heads/master | 2020-09-14T09:44:57.481471 | 2019-11-21T05:21:22 | 2019-11-21T05:21:22 | 223,093,719 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | from django.contrib import admin
from WebApp.models import Emp
# class EmpAdmin(admin.ModelAdmin):
# list_display = ['EmpID', 'EmpName', 'EmpSal', 'EmpAdd']
admin.site.register(Emp) | [
"Nikhilsahu.in@gmail.com"
] | Nikhilsahu.in@gmail.com |
5167277aeef9cb935f9bc676a1027f0ba41df2c9 | 8d15427c02f7ea265985159c7bf67df2f9731991 | /ApiTest/case/test_db.py | dd65ca78ebdaac25d0806ad61e9f962a355c61d7 | [] | no_license | yolotester/learngit | aa862b7f0ecbeb5c056b8ca576c8a00efc55f62c | d885b520757097c1d984d1cdda5d242ee5c6a5d6 | refs/heads/master | 2023-04-07T19:48:23.153638 | 2023-03-17T02:03:05 | 2023-03-17T02:03:05 | 248,145,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | '''
目标:在unittest中使用读取数据库封装类
'''
# 导包 unittest read_database
import unittest
from tools.read_database import ReadDB
import logging
logging.basicConfig(level=logging.INFO, format= '%(asctime)s-%(levelname)s-%(name)s-%(message)s')
logger = logging.getLogger(__name__)
# 创建测试类,继承unittese.TestCase
class TestDB(object):
# 新建测试方法
def test_db(self):
# 要执行的sql语句
sql = 'select * from goods'
# 调用数据库封装类的主要方法并接收数据
data = ReadDB().get_sql_one(sql)
# 对结果进行断言
self.assertEqual(1, data[0])
if __name__ == '__main__':
unittest.main()
| [
"yolo@ying31.com"
] | yolo@ying31.com |
b082be539c1caf0875da89e785561da6f00009ef | 25d8bac5635ac1cc3577a3593a4512e042ea7ecd | /scripts/simplehttpserver-example-1.py | d1f9965dbfb74211c5c5666c8a39df5691eef1e4 | [] | no_license | mtslong/demo | 2333fa571d6d9def7bdffc90f7bcb623b15e6e4b | a78b74e0eea7f84df489f5c70969b9b4797a4873 | refs/heads/master | 2020-05-18T18:28:48.237100 | 2013-11-11T16:10:11 | 2013-11-11T16:10:11 | 4,136,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | import SimpleHTTPServer
import SocketServer
# minimal web server. serves files relative to the
# current directory.
PORT = 8000
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "serving at port", PORT
httpd.serve_forever()
## serving at port 8000
## localhost - - [11/Oct/1999 15:07:44] code 403, message Directory listing not sup
## ported
## localhost - - [11/Oct/1999 15:07:44] "GET / HTTP/1.1" 403 -
## localhost - - [11/Oct/1999 15:07:56] "GET /samples/sample.htm HTTP/1.1" 200 -
| [
"mofeng@netease.com"
] | mofeng@netease.com |
5d23db06cd5064470408bdb43c1240099438ca90 | 2eca1ddd6016499dec09459d9981fdb11ad21ec3 | /doc/vcs.py | 144dfd8fc7abceda8b62b6458f327734b0bf1875 | [
"BSD-2-Clause"
] | permissive | AWhetter/plac | 85eb9a09879a06ae33ba0760d69517f06de61157 | c5e786d08600fe0ac2158bc50a7d83cf25a59c7f | refs/heads/master | 2020-10-02T06:44:54.314142 | 2019-10-27T07:41:16 | 2019-10-27T07:41:16 | 227,724,805 | 0 | 0 | BSD-2-Clause | 2019-12-13T00:52:05 | 2019-12-13T00:52:04 | null | UTF-8 | Python | false | false | 963 | py | "A Fake Version Control System"
import plac # this implementation also works with Python 2.4
commands = 'checkout', 'commit', 'status'
@plac.annotations(url='url of the source code')
def checkout(url):
"A fake checkout command"
return ('checkout ', url)
@plac.annotations(message=('commit message', 'option'))
def commit(message):
"A fake commit command"
return ('commit ', message)
@plac.annotations(quiet=('summary information', 'flag', 'q'))
def status(quiet):
"A fake status command"
return ('status ', quiet)
def __missing__(name):
return ('Command %r does not exist' % name,)
def __exit__(etype, exc, tb):
"Will be called automatically at the end of the intepreter loop"
if etype in (None, GeneratorExit): # success
print('ok')
main = __import__(__name__) # the module imports itself!
if __name__ == '__main__':
import plac
for out in plac.call(main, version='0.1.0'):
print(out)
| [
"michele.simionato@gmail.com"
] | michele.simionato@gmail.com |
8ff7174f22a50a8d9d3d834f9c24f2e797fb4c06 | e0980f704a573894350e285f66f4cf390837238e | /.history/home/models_20201030104943.py | e2b8486c5ac363b0730c2b639a493695b4a0b0c1 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,533 | py | from django.db import models
from wagtail.core.models import Page
from wagtail.core.fields import StreamField
from wagtail.admin.edit_handlers import FieldPanel, PageChooserPanel, StreamFieldPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.snippets.blocks import SnippetChooserBlock
from streams import blocks
new_table_options = {
'minSpareRows': 0,
'startRows': 4,
'startCols': 4,
'colHeaders': False,
'rowHeaders': True,
'contextMenu': [
'row_above',
'row_below',
'---------',
'col_left',
'col_right',
'---------',
'remove_row',
'remove_col',
'---------',
'undo',
'redo'
],
'editor': 'text',
'stretchH': 'all',
'renderer': 'text',
'autoColumnSize': False,
}
class HomePage(Page):
parent_page_types = ['wagtailcore.Page']
subpage_types = ['flex.FlexPage', 'services.Service']
max_count = 1
lead_text = models.CharField(
max_length = 140,
blank = True,
help_text = 'Podtytuł pod tytułem banera'
)
button = models.ForeignKey(
'wagtailcore.Page',
blank = True,
null = True,
related_name = '+',
help_text = 'Wybierz opcjonalną stronę, do której chcesz utworzyć łącze',
on_delete = models.SET_NULL,
)
button_text = models.CharField(
max_length = 50,
default = 'Czytaj więcej',
blank = False,
help_text = 'Przycisk tekstowy'
)
banner_background_image = models.ForeignKey(
'wagtailimages.Image',
blank = False,
null =True,
related_name = '+',
help_text = 'Obraz tła baneru',
on_delete = models.SET_NULL,
)
body = StreamField([
('title', blocks.TitleBlock()),
('cards', blocks.CardsBlock()),
('image_and_text', blocks.ImageAndTextBlock()),
('cta', blocks.CallToActionBlock()),
('testimonial', SnippetChooserBlock(
target_model='testimonials.Testimonial',
template = 'streams/testimonial_block.html'
)),
('pricing_table', blocks.PricingTableBlock(table_options=new_table_options)),
], null=True, blank=True)
content_panels = Page.content_panels + [
FieldPanel('lead_text'),
PageChooserPanel('button'),
FieldPanel('button_text'),
ImageChooserPanel('banner_background_image'),
StreamFieldPanel('body'),
]
| [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
b2089437e5bdd8c6a97f1ddd6a80405f05e99c6f | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/LibvirtGConfig/DomainSmartcard.py | dcc9023485b0fa42ceac54edf0a345beccfffb10 | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 15,891 | py | # encoding: utf-8
# module gi.repository.LibvirtGConfig
# from /usr/lib64/girepository-1.0/LibvirtGConfig-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gobject as __gobject
from .DomainDevice import DomainDevice
class DomainSmartcard(DomainDevice):
"""
:Constructors:
::
DomainSmartcard(**properties)
"""
def bind_property(self, *args, **kwargs): # real signature unknown
pass
def bind_property_full(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def chain(self, *args, **kwargs): # real signature unknown
pass
def compat_control(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def connect(self, *args, **kwargs): # real signature unknown
pass
def connect_after(self, *args, **kwargs): # real signature unknown
pass
def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect
"""
Connect a callback to the given signal with optional user data.
:param str detailed_signal:
A detailed signal to connect to.
:param callable handler:
Callback handler to connect to the signal.
:param *data:
Variable data which is passed through to the signal handler.
:param GObject.ConnectFlags connect_flags:
Flags used for connection options.
:returns:
A signal id which can be used with disconnect.
"""
pass
def connect_object(self, *args, **kwargs): # real signature unknown
pass
def connect_object_after(self, *args, **kwargs): # real signature unknown
pass
def disconnect(*args, **kwargs): # reliably restored by inspect
""" signal_handler_disconnect(instance:GObject.Object, handler_id:int) """
pass
def disconnect_by_func(self, *args, **kwargs): # real signature unknown
pass
def emit(self, *args, **kwargs): # real signature unknown
pass
def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect
""" Deprecated, please use stop_emission_by_name. """
pass
def error_quark(self): # real signature unknown; restored from __doc__
""" error_quark() -> int """
return 0
def find_property(self, property_name): # real signature unknown; restored from __doc__
""" find_property(self, property_name:str) -> GObject.ParamSpec """
pass
def force_floating(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def freeze_notify(self): # reliably restored by inspect
"""
Freezes the object's property-changed notification queue.
:returns:
A context manager which optionally can be used to
automatically thaw notifications.
This will freeze the object so that "notify" signals are blocked until
the thaw_notify() method is called.
.. code-block:: python
with obj.freeze_notify():
pass
"""
pass
def getv(self, names, values): # real signature unknown; restored from __doc__
""" getv(self, names:list, values:list) """
pass
def get_alias(self): # real signature unknown; restored from __doc__
""" get_alias(self) -> str """
return ""
def get_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def get_properties(self, *args, **kwargs): # real signature unknown
pass
def get_property(self, *args, **kwargs): # real signature unknown
pass
def get_qdata(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def get_schema(self): # real signature unknown; restored from __doc__
""" get_schema(self) -> str """
return ""
def handler_block(obj, handler_id): # reliably restored by inspect
"""
Blocks the signal handler from being invoked until
handler_unblock() is called.
:param GObject.Object obj:
Object instance to block handlers for.
:param int handler_id:
Id of signal to block.
:returns:
A context manager which optionally can be used to
automatically unblock the handler:
.. code-block:: python
with GObject.signal_handler_block(obj, id):
pass
"""
pass
def handler_block_by_func(self, *args, **kwargs): # real signature unknown
pass
def handler_disconnect(*args, **kwargs): # reliably restored by inspect
""" signal_handler_disconnect(instance:GObject.Object, handler_id:int) """
pass
def handler_is_connected(*args, **kwargs): # reliably restored by inspect
""" signal_handler_is_connected(instance:GObject.Object, handler_id:int) -> bool """
pass
def handler_unblock(*args, **kwargs): # reliably restored by inspect
""" signal_handler_unblock(instance:GObject.Object, handler_id:int) """
pass
def handler_unblock_by_func(self, *args, **kwargs): # real signature unknown
pass
def install_properties(self, pspecs): # real signature unknown; restored from __doc__
""" install_properties(self, pspecs:list) """
pass
def install_property(self, property_id, pspec): # real signature unknown; restored from __doc__
""" install_property(self, property_id:int, pspec:GObject.ParamSpec) """
pass
def interface_find_property(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def interface_install_property(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def interface_list_properties(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def is_floating(self): # real signature unknown; restored from __doc__
""" is_floating(self) -> bool """
return False
def list_properties(self): # real signature unknown; restored from __doc__
""" list_properties(self) -> list, n_properties:int """
return []
def new(self, type, root_name, schema): # real signature unknown; restored from __doc__
""" new(type:GType, root_name:str, schema:str) -> LibvirtGConfig.Object """
pass
def newv(self, object_type, parameters): # real signature unknown; restored from __doc__
""" newv(object_type:GType, parameters:list) -> GObject.Object """
pass
def new_from_xml(self, type, root_name, schema, xml): # real signature unknown; restored from __doc__
""" new_from_xml(type:GType, root_name:str, schema:str, xml:str) -> LibvirtGConfig.Object """
pass
def notify(self, property_name): # real signature unknown; restored from __doc__
""" notify(self, property_name:str) """
pass
def notify_by_pspec(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def override_property(self, property_id, name): # real signature unknown; restored from __doc__
""" override_property(self, property_id:int, name:str) """
pass
def ref(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def ref_sink(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def replace_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def replace_qdata(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def run_dispose(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def set_address(self, redirdev, address=None): # real signature unknown; restored from __doc__
""" set_address(redirdev:LibvirtGConfig.DomainRedirdev, address:LibvirtGConfig.DomainAddress=None) """
pass
def set_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def set_properties(self, *args, **kwargs): # real signature unknown
pass
def set_property(self, *args, **kwargs): # real signature unknown
pass
def steal_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def steal_qdata(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def stop_emission(self, detailed_signal): # reliably restored by inspect
""" Deprecated, please use stop_emission_by_name. """
pass
def stop_emission_by_name(*args, **kwargs): # reliably restored by inspect
""" signal_stop_emission_by_name(instance:GObject.Object, detailed_signal:str) """
pass
def thaw_notify(self): # real signature unknown; restored from __doc__
""" thaw_notify(self) """
pass
def to_xml(self): # real signature unknown; restored from __doc__
""" to_xml(self) -> str """
return ""
def unref(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def validate(self): # real signature unknown; restored from __doc__
""" validate(self) """
pass
def watch_closure(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def weak_ref(self, *args, **kwargs): # real signature unknown
pass
def _force_floating(self, *args, **kwargs): # real signature unknown
""" force_floating(self) """
pass
def _ref(self, *args, **kwargs): # real signature unknown
""" ref(self) -> GObject.Object """
pass
def _ref_sink(self, *args, **kwargs): # real signature unknown
""" ref_sink(self) -> GObject.Object """
pass
def _unref(self, *args, **kwargs): # real signature unknown
""" unref(self) """
pass
def _unsupported_data_method(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def _unsupported_method(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def __copy__(self, *args, **kwargs): # real signature unknown
pass
def __deepcopy__(self, *args, **kwargs): # real signature unknown
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, **properties): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
g_type_instance = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
priv = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
qdata = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
ref_count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__gpointer__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__grefcount__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
props = None # (!) real value is '<gi._gi.GProps object at 0x7fa8bee9ef10>'
__class__ = None # (!) real value is "<class 'gi.types.GObjectMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': ObjectInfo(DomainSmartcard), '__module__': 'gi.repository.LibvirtGConfig', '__gtype__': <GType GVirConfigDomainSmartcard (94643613940064)>, '__doc__': None, '__gsignals__': {}, 'set_address': gi.FunctionInfo(set_address), 'parent': <property object at 0x7fa8bf19e770>, 'priv': <property object at 0x7fa8bf19e860>})"
__gdoc__ = 'Object GVirConfigDomainSmartcard\n\nProperties from GVirConfigObject:\n schema -> gchararray: Schema\n The doc RNG schema\n node -> gpointer: XML Node\n The XML node this config object corresponds to\n doc -> GVirConfigXmlDoc: XML Doc\n The XML doc this config object corresponds to\n\nSignals from GObject:\n notify (GParam)\n\n'
__gsignals__ = {}
__gtype__ = None # (!) real value is '<GType GVirConfigDomainSmartcard (94643613940064)>'
__info__ = ObjectInfo(DomainSmartcard)
| [
"ttys3@outlook.com"
] | ttys3@outlook.com |
f714c0d4977db7e7c55232f29b48fe1670ee1204 | f620403443b2c0affaed53505c002f35dc68020c | /Prediction/GetEntropy.py | 8e857882fcbac924009a016effa2b606dbd3965e | [] | no_license | ZhuJiahui/CTMTS | c552b3026deb47879f9aa5bde4b002cf6283858d | 9f8981f6e61900a68a38ae0392e01771beee9651 | refs/heads/master | 2021-01-12T10:18:27.579697 | 2016-12-14T02:23:29 | 2016-12-14T02:23:29 | 76,416,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,562 | py | # -*- coding: utf-8 -*-
'''
Created on 2014年8月3日
@author: ZhuJiahui506
'''
import os
import numpy as np
from TextToolkit import quick_write_list_to_text
import time
def get_topic_entropy(read_directory, write_filename):
file_number = sum([len(files) for root, dirs, files in os.walk(read_directory)])
all_e = []
for i in range(file_number):
PHAI = np.loadtxt(read_directory + '/' + str(i + 1) + '.txt')
#出现单个
if len(PHAI) >= 300:
PHAI = np.array([PHAI])
this_e_list = []
for j in range(len(PHAI)):
temp_e = 0.0
for k in range(len(PHAI[j])):
if PHAI[j][k] > 0.00001:
temp_e += (-1.0 * PHAI[j][k] * np.log2(PHAI[j][k]))
this_e_list.append(temp_e)
all_e.append(str(np.average(this_e_list)))
quick_write_list_to_text(all_e, write_filename)
if __name__ == '__main__':
start = time.clock()
now_directory = os.getcwd()
root_directory = os.path.dirname(now_directory) + '/'
write_directory1 = root_directory + u'dataset/prediction'
write_filename = write_directory1 + u'/topic_entropy.txt'
read_directory = root_directory + u'dataset/DCTM/mctrwctm_ct_word'
if (not(os.path.exists(write_directory1))):
os.mkdir(write_directory1)
get_topic_entropy(read_directory, write_filename)
print 'Total time %f seconds' % (time.clock() - start)
print 'Complete !!!'
| [
"zhujiahui@outlook.com"
] | zhujiahui@outlook.com |
06a5d62253a4c5307e3f07e5b4263d38c33c0137 | 17c7bace346c9f49318becd1f9769c9aee3fa650 | /credit_calc/config.py | cde448593ebe35abcf9a8f3e754ebf53b07607a5 | [] | no_license | KonishchevDmitry/credit-calculator | 789a06bad7dfe27ed5284afcb5d8b620d5ba5ae1 | 4c4dd811416e968eb68bbd490966c285de99c2c5 | refs/heads/master | 2021-01-10T16:46:22.273992 | 2013-11-18T08:17:11 | 2013-11-18T08:17:11 | 51,710,627 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,806 | py | from decimal import Decimal, DecimalException
from object_validator import validate
from object_validator import InvalidValueError
from object_validator import String, List, Dict, DictScheme
import python_config
from credit_calc.util import InvalidDateError
from credit_calc.util import get_date
class _Date(String):
def validate(self, obj):
super(_Date, self).validate(obj)
try:
return get_date(obj)
except InvalidDateError:
raise InvalidValueError(obj)
class _Amount(String):
def validate(self, obj):
super(_Amount, self).validate(obj)
try:
amount = Decimal(obj)
except DecimalException:
raise InvalidValueError(obj)
if amount <= 0:
raise InvalidValueError(obj)
return amount
class _Interest(String):
def validate(self, obj):
super(_Interest, self).validate(obj)
try:
interest = Decimal(obj)
except DecimalException:
raise InvalidValueError(obj)
if interest <= 0 or interest >= 100:
raise InvalidValueError(obj)
return interest
def get_credits(config_path):
return _get_config(config_path)["credits"]
def _get_config(config_path):
config = python_config.load(config_path)
try:
return validate("config", config, DictScheme({
"credits": List(DictScheme({
"amount": _Amount(),
"interest": _Interest(),
"start_date": _Date(),
"end_date": _Date(),
"payments": Dict(_Date(), _Amount(), optional=True),
}))
}))
except Exception as e:
raise Exception("Error while parsing '{}' configuration file: {}".format(config_path, e))
| [
"konishchev@gmail.com"
] | konishchev@gmail.com |
f0649e568131fbf007f89191ec6673659501d413 | 328397a6ff6e109069e02fa8fe42255910939cb8 | /venvs/edxapp/lib/python2.7/site-packages/xblock/__init__.py | 27a1cc562b2dbacb168d5c5618273230d192cfb3 | [] | no_license | UOMx/CITeS-VM-edxapp | fd1294367d090314dc46d8517027845fe178a2e7 | de3d1b297fa99d61cf32addb981cdfc55aec9891 | refs/heads/master | 2022-12-03T08:45:26.284451 | 2017-02-13T12:44:17 | 2017-02-13T12:44:17 | 81,821,365 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | """
XBlock Courseware Components
"""
import os
import warnings
import xblock.core
import xblock.fields
class XBlockMixin(xblock.core.XBlockMixin):
"""
A wrapper around xblock.core.XBlockMixin that provides backwards compatibility for the old location.
Deprecated.
"""
def __init__(self, *args, **kwargs):
warnings.warn("Please use xblock.core.XBlockMixin", DeprecationWarning, stacklevel=2)
super(XBlockMixin, self).__init__(*args, **kwargs)
# For backwards compatability, provide the XBlockMixin in xblock.fields
# without causing a circular import
xblock.fields.XBlockMixin = XBlockMixin
VERSION_FILE = os.path.join(os.path.dirname(__file__), 'VERSION.txt')
__version__ = open(VERSION_FILE).read().strip()
| [
"menuka.14@cse.mrt.ac.lk"
] | menuka.14@cse.mrt.ac.lk |
9703e11091f4c627c362b31dd90d4d91c92160f8 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/3/usersdata/107/321/submittedfiles/ex1.py | 652ffcb7e07cb3de6ae9c7173519b76c1e3e40b5 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | # -*- coding: utf-8 -*-
from __future__ import division
a=input('digite o valor de a:')
b=input('digite o valor de b:')
c=input('digite o valor de c:')
d=(b**2)-(4*a*c)
if d>=0:
x1=((-b)+(d**1/2))/2*a
x2=((-b)-(d**1/2))/2*a
print x1('%.2f' %x1)
print x2('%.2f' %x2)
else:
print('não existe raízes reais:')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
d40b3e7ce7ced0c63212ada2f3f056a7575618c6 | be36a3b4aec92734e7b8562eff411310d4d7ba78 | /core/utils.py | c2dea9e22447aca033fd7a4e75bacc37692c0dcf | [] | no_license | dpitkevics/Jooglin.Crawler | 9d409c7960557dbaaab3e925fc5de0c762185186 | 1a3b7c7fa5a1132413dff1ca73c655caf8e2a637 | refs/heads/master | 2021-01-22T06:54:37.682214 | 2015-04-22T14:51:18 | 2015-04-22T14:51:18 | 34,395,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | import types
import defer
def iterator(var):
if isinstance(var, types.GeneratorType):
return var
if isinstance(var, list) or isinstance(var, tuple):
return iter(var)
return iter((var,))
def isstring(obj):
try:
return isinstance(obj, str)
except NameError:
return isinstance(obj, str)
class DeferredList(defer.Deferred):
def __init__(self, deferredList):
"""Initialize a DeferredList"""
self.result_list = [None] * len(deferredList)
super(DeferredList, self).__init__()
self.finished_count = 0
for index, deferred in enumerate(deferredList):
deferred.add_callbacks(
self._cb_deferred,
self._cb_deferred,
callback_args=(index,),
errback_args=(index,)
)
def _cb_deferred(self, result, index):
"""(internal) Callback for when one of my deferreds fires.
"""
self.result_list[index] = result
self.finished_count += 1
if self.finished_count == len(self.result_list):
self.callback(self.result_list)
return result
| [
"daniels.pitkevics@gmail.com"
] | daniels.pitkevics@gmail.com |
7643eccc000d560f997e82014987c8affdaa2a73 | 7c13186da7fba1e4da5f6a9c85b8ef00b68797e0 | /utils/log.py | 497d7836db9badc3768d5809ec3eee654474f021 | [] | no_license | rohitsuratekar/FeedbackScan | 02d54949c293f1ce956b62b5597697db3bfe07ac | b7598a85cb9c92946edb96713f51baae73b22f0d | refs/heads/master | 2021-05-04T17:52:12.655790 | 2019-11-28T10:43:24 | 2019-11-28T10:43:24 | 120,281,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,454 | py | """
Copyright © 2017 Rohit Suratekar
Code from this file is released under MIT Licence 2017.
use "Log" for logging information and "OUTPUT" for saving information
"""
import logging
import os
from settings import *
from utils.functions import get_uid
# Creates UID for current job
CURRENT_JOB = get_uid()
if not os.path.exists(OUTPUT_FOLDER):
os.makedirs(OUTPUT_FOLDER)
class AppFilter(logging.Filter):
"""
Adds custom field in log file
"""
def filter(self, record):
record.uid = CURRENT_JOB
return True
LOG = logging.getLogger('log')
LOG.setLevel(logging.INFO)
LOG.addFilter(AppFilter())
if STORE_SCRIPT_LOG:
log_file = logging.FileHandler(
OUTPUT_FOLDER + "/" + NAME_OF_SCRIPT_LOG_FILE)
log_file.setFormatter(
logging.Formatter('%(uid)s %(asctime)s %(filename)s : %(message)s'))
LOG.addHandler(log_file)
if PRINT_TO_CONSOLE:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(uid)s %(asctime)s %(filename)s : %(message)s')
console.setFormatter(formatter)
LOG.addHandler(console)
OUTPUT = logging.getLogger('output')
OUTPUT.setLevel(logging.INFO)
OUTPUT.addFilter(AppFilter())
output_file = logging.FileHandler(OUTPUT_FOLDER + "/" + NAME_OF_OUTPUT_FILE)
output_file.setFormatter(logging.Formatter('%(uid)s: %(message)s'))
OUTPUT.addHandler(output_file)
| [
"rohitsuratekar@gmail.com"
] | rohitsuratekar@gmail.com |
59cc24e786dee4a7056482c5231c640cfdb53a44 | aa853a9094fff4b6e9b0ddc7469be29ad5f0f811 | /poi_sale_discounts/__manifest__.py | 3d2b68437c88ed4c757428677067f4c312115397 | [] | no_license | blue-connect/illuminati | 40a13e1ebeaceee39f17caa360f79e8deeaebf58 | 6682e60630064641474ddb2d8cbc520e30f64832 | refs/heads/master | 2022-01-06T00:55:58.465611 | 2018-11-24T04:30:03 | 2018-11-24T04:30:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,563 | py | ##############################################################################
#
# Poiesis Consulting, OpenERP Partner
# Copyright (C) 2013 Poiesis Consulting (<http://www.poiesisconsulting.com>).
# Developed by: Grover Menacho
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sale Discounts',
'version': '11.0.0.1',
'category': 'Sales',
'summary': 'Descuento por monto y porcentaje',
'description': """
Sale Discounts
===================================
This module adds discounts per amount and percentage to sale orders
""",
'author': 'Poiesis Consulting',
'website': 'http://www.poiesisconsulting.com',
'depends': ['sale'],
'data': [
'views/sale.xml',
],
'installable': True,
'active': False,
'application': True,
# 'certificate': 'certificate',
}
| [
"yori.quisbert@poiesisconsulting.com"
] | yori.quisbert@poiesisconsulting.com |
159d572f827307176e035f46eae4776db924f081 | 4ececb4fc19d2a88178ea633c421a1a41e643e05 | /apps/breakfast/tools/Life/tools/dashboard/DisplayFrame.py | e2c67276021453f2a74335d0261ec1a970a448a1 | [] | no_license | liqiang76/tinyos_cxl | 0251fd377f6f88eb4c853e5d79ee896e9bc2596b | 6ce948f3202fe358867c7992d022f12896fc5160 | refs/heads/master | 2021-01-23T09:33:35.904553 | 2014-09-18T21:18:36 | 2014-09-18T21:18:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,455 | py | #!/usr/bin/env python
# Copyright (c) 2014 Johns Hopkins University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
# - Neither the name of the copyright holders nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import Tkinter
from Tkinter import *
class DisplayFrame(Frame):
def __init__(self, parent, hub, **args):
Frame.__init__(self, parent, **args)
self.hub = hub
self.currentType = "All Types"
self.currentSite = "All Sites"
self.currentView = None
self.nodes = []
self.simplot = None
self.gFrame = None
#self.initUI()
self.frame = Frame(self)
self.frame.grid(column=0, row=0)
def addSimplot(self, simplot):
self.simplot = simplot
#def insertAll(self):
# """ Update sample interval for all nodes in network.
# """
# interval = int(self.intervalVar.get())
# self.intervalVar.set("")
#
# for node in self.hub.node.leafs:
# (oldInterval, oldChannel) = self.hub.node.leafs[node]
# self.hub.node.leafs[node] = (interval, oldChannel)
#
# self.hub.node.saveSettings()
# self.hub.node.redrawAllNodes()
def updateType(self, type):
""" Update the sample interval for nodes with a specific sensor type attached
"""
self.currentType = type
def updateSite(self, site):
""" Update the sample interval for nodes with a specific sensor type attached
"""
self.currentSite = site
self.currentView = "multi"
def updateSelection(self):
multiplexers = self.hub.node.multiplexers
membership = self.hub.node.membership
# store final selection in this list
self.nodes = []
# membership key contains all detected leaf nodes
for leaf in membership:
# only select node if site is correct or all sites are selected
if (self.currentSite == str(membership[leaf])) or (self.currentSite == "All Sites"):
# add node to final selection map if all types are selected
if self.currentType == "All Types":
self.nodes.append(leaf)
else:
# go through list of attached multiplexer boards
# and see if any attached sensors match the selected type
# plexer is a {node->list(multiplexer)}-map
if leaf in multiplexers:
multiplexerList = multiplexers[leaf]
# plex is a list of multiplexers
for multiplexer in multiplexerList:
# each multiplexer is a tuble with multiplexer id and 8 sensor type channels
for sensor in multiplexer[1:9]:
print leaf, sensor
if self.currentType == str(sensor):
self.nodes.append(leaf)
def redrawAll(self):
if self.currentView == "multi":
self.updateSelection()
# disable graph
if self.gFrame:
self.gFrame.grid_forget()
# show UI
self.frame.grid_forget()
self.frame = Frame(self)
label = Label(self.frame, text="Update sample interval for nodes:")
label.grid(column=0, row=0, columnspan=2, sticky=W)
label = Label(self.frame, text="Site: %s" % self.currentSite)
label.grid(column=0, row=1, columnspan=2, sticky=W)
label = Label(self.frame, text="Type: %s" % self.currentType)
label.grid(column=0, row=2, columnspan=2, sticky=W)
label = Label(self.frame, text="Number of nodes selected:")
label.grid(column=0, row=3, sticky=E)
label = Label(self.frame, text=str(len(self.nodes)))
label.grid(column=1, row=3, sticky=E)
label = Label(self.frame, text="New sample interval:")
label.grid(column=0, row=4, sticky=E)
self.intervalVar = StringVar()
entry = Entry(self.frame, textvariable=self.intervalVar)
entry.grid(column=1, row=4)
button = Button(self.frame, text="Update", command=self.insertDict)
button.grid(column=1, row=5)
for n, node in enumerate(sorted(self.nodes)):
label = Label(self.frame, text=node)
label.grid(column=0, row=6+n, sticky=E)
self.frame.grid(column=0, row=0)
elif self.currentView == "node":
self.updateNode()
def updateRouter(self, router):
""" Show Router information
"""
self.currentView = "router"
# show UI
self.frame.grid_forget()
self.frame = Frame(self)
label = Label(self.frame, text="Router information")
label.grid(column=0, row=0, columnspan=2, sticky=W)
label = Label(self.frame, text="Node ID:")
label.grid(column=0, row=1, sticky=E)
label = Label(self.frame, text=router)
label.grid(column=1, row=1, sticky=E)
# change channel
label = Label(self.frame, text="New channel:")
label.grid(column=0, row=2, sticky=E)
self.channelVar = StringVar()
entry = Entry(self.frame, textvariable=self.channelVar)
entry.grid(column=1, row=2)
button = Button(self.frame, text="Update", command=self.channelDict)
button.grid(column=1, row=3)
self.initGraph(0, 2)
self.frame.grid(column=0, row=0)
def updateNode(self):
""" Update the sample interval for one or more leaf nodes.
"""
self.currentView = "node"
# show UI
self.frame.grid_forget()
self.frame = Frame(self)
numberOfNodes = len(self.nodes)
if numberOfNodes > 1:
label = Label(self.frame, text="Update sample interval")
label.grid(column=0, row=0, columnspan=2, sticky=W)
label = Label(self.frame, text="Number of nodes selected:")
label.grid(column=0, row=1, sticky=E)
label = Label(self.frame, text=str(numberOfNodes))
label.grid(column=1, row=1, sticky=E)
label = Label(self.frame, text="New sample interval:")
label.grid(column=0, row=2, sticky=E)
self.intervalVar = StringVar()
entry = Entry(self.frame, textvariable=self.intervalVar)
entry.grid(column=1, row=2)
button = Button(self.frame, text="Update", command=self.insertDict)
button.grid(column=1, row=3)
# show list of all nodes selected
for n, node in enumerate(sorted(self.nodes)):
label = Label(self.frame, text=node)
label.grid(column=0, row=4+n, sticky=E)
# disable graph
if self.gFrame:
self.gFrame.grid_forget()
elif numberOfNodes == 1:
node = self.nodes[0]
label = Label(self.frame, text="Update sample interval")
label.grid(column=0, row=0, columnspan=2, sticky=W)
label = Label(self.frame, text="Node selected:")
label.grid(column=0, row=1, sticky=E)
label = Label(self.frame, text=node)
label.grid(column=1, row=1, sticky=E)
# header
label = Label(self.frame, text="Interval")
label.grid(column=1, row=2, sticky=E)
label = Label(self.frame, text="Channel")
label.grid(column=2, row=2, sticky=E)
# when read from network
(nodeId, readInterval, readChannel, role) = self.hub.node.originalSettings[node]
label = Label(self.frame, text="Current:")
label.grid(column=0, row=3, sticky=E)
label = Label(self.frame, text=readInterval)
label.grid(column=1, row=3, sticky=E)
label = Label(self.frame, text=readChannel)
label.grid(column=2, row=3, sticky=E)
# queued for transmission
(nodeId, reqInterval, reqChannel, role) = self.hub.node.settings[node]
if reqInterval != readInterval:
interval = reqInterval
else:
interval = "None"
if reqChannel != readChannel:
channel = reqChannel
else:
channel = "None"
label = Label(self.frame, text="Requested:")
label.grid(column=0, row=4, sticky=E)
label = Label(self.frame, text=interval)
label.grid(column=1, row=4, sticky=E)
label = Label(self.frame, text=channel)
label.grid(column=2, row=4, sticky=E)
# new sampling interval
label = Label(self.frame, text="New:")
label.grid(column=0, row=5, sticky=E)
self.intervalVar = StringVar()
entry = Entry(self.frame, textvariable=self.intervalVar)
entry.grid(column=1, row=5)
button = Button(self.frame, text="Update", command=self.insertDict)
button.grid(column=1, row=6)
# show information about single leaf node
self.initGraph(0, 7)
self.frame.grid(column=0, row=0)
def insertDict(self):
""" Update global node dictionary, save it to settings file and update UI
"""
print self.nodes
try:
interval = int(self.intervalVar.get())
except ValueError:
pass
else:
for node in self.nodes:
if node in self.hub.node.settings:
# read old settings
(nodeId, oldInterval, oldChannel, role) = self.hub.node.settings[node]
# store new settings
self.hub.node.settings[node] = (nodeId, interval, oldChannel, role)
self.redrawAll()
self.hub.node.redrawAllNodes()
self.hub.node.saveSettings()
finally:
self.intervalVar.set("")
def channelDict(self):
""" Update global router dictionary, save it to settings file and update UI
"""
pass
def infoPlex(self, plex):
""" Show information about multiplexer.
"""
self.currentView = "plex"
info = self.hub.node.db.getPlex(plex)
# show UI
self.frame.grid_forget()
self.frame = Frame(self)
label = Label(self.frame, text="Multiplexer selected")
label.grid(column=0, row=0, columnspan=2, sticky=W)
label = Label(self.frame, text="Multiplexer ID:")
label.grid(column=0, row=1, sticky=E)
label = Label(self.frame, text=plex)
label.grid(column=1, row=1, sticky=E)
idFrame = Frame(self.frame)
label = Label(idFrame, text="Channel")
label.grid(column=0, row=0)
label = Label(idFrame, text="Type")
label.grid(column=1, row=0)
label = Label(idFrame, text="ID")
label.grid(column=2, row=0)
line = 1
for i in range(0,8):
(type, id) = info[i]
if (type is not None) and (type != 0):
Label(idFrame, text=str(i+1)).grid(column=0, row=line)
Label(idFrame, text=type).grid(column=1, row=line)
Label(idFrame, text=id).grid(column=2, row=line)
line += 1
idFrame.grid(column=0, row=2, columnspan=2, sticky=W)
self.initGraph(0, 3)
self.frame.grid(column=0, row=0)
def initGraph(self, x, y):
if self.gFrame:
self.gFrame.grid_forget()
WIDTH = 200
HEIGHT = 200
bgcolor = self.cget('bg')
self.gFrame = Frame(self, width=WIDTH, height=HEIGHT)
self.graph = self.simplot.makeGraphBase(self.gFrame, WIDTH, HEIGHT, xtitle="title", ytitle="title", background=bgcolor)
self.sym = self.simplot.makeSymbols([[0,0]], marker="dot", size=1, fillcolor="red")
self.obj = self.simplot.makeGraphObjects([self.sym])
self.graph.draw(self.obj, xaxis=(0,9), yaxis=(0,9))
self.graph.grid(column=1, row=1)
self.gFrame.grid_propagate(False)
self.gFrame.grid(column=x, row=y)
#def changeRouterChannel(self):
#try:
# channel = int(self.intervalVar.get())
#except ValueError:
# pass
#else:
# for node in self.nodes:
# # see if changes has already been requested for this node
# if node in self.hub.node.leafs:
# # read old settings
# (oldInterval, oldChannel) = self.hub.node.leafs[node]
#
# # store new settings
# self.hub.node.leafs[node] = (interval, oldChannel)
#
# elif node in self.hub.node.leafs:
# # read old settings
# (oldInterval, oldChannel) = self.hub.node.leafs[node]
#
# # store new settings
# #self.hub.node.leafs[node] = (interval, oldChannel)
# self.hub.node.leafs[node] = (interval, oldChannel)
| [
"qiang@QServ.cs.uh.edu"
] | qiang@QServ.cs.uh.edu |
7c7e3a076f3991ceeb423e9bcd8b085e7cbac791 | 4b01c5f80ca469bc86c4016c338bf1d6b3214c56 | /src/automats/domain_reader.py | 4feb6290691e9f1f7331b1ebaff501c5d833f5b4 | [] | no_license | DRSolomon/zenaida | 0093c34b124649c706c2a6119eacee021692b527 | 7099771641be3e53bcefcaf162d515b3b2de99c5 | refs/heads/master | 2020-12-10T12:15:09.564031 | 2020-01-11T17:36:02 | 2020-01-11T17:36:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,579 | py | #!/usr/bin/env python
# domain_reader.py
"""
.. module:: domain_reader
.. role:: red
Zenaida domain_reader() Automat
EVENTS:
* :red:`all-contacts-ok`
* :red:`error`
* :red:`response`
* :red:`run`
"""
#------------------------------------------------------------------------------
import logging
import time
import datetime
from django.conf import settings
from email.utils import formatdate
#------------------------------------------------------------------------------
from automats import automat
from zen import zclient
from zen import zerrors
from zen import zdomains
#------------------------------------------------------------------------------
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------------
class DomainReader(automat.Automat):
"""
This class implements all the functionality of ``domain_reader()`` state machine.
"""
def __init__(self, verify_registrant=True, debug_level=0, log_events=False, log_transitions=False, raise_errors=False, **kwargs):
"""
Builds `domain_reader()` state machine.
"""
self.verify_registrant = verify_registrant
if log_events is None:
log_events=settings.DEBUG
if log_transitions is None:
log_transitions=settings.DEBUG
super(DomainReader, self).__init__(
name="domain_reader",
state="AT_STARTUP",
outputs=[],
debug_level=debug_level,
log_events=log_events,
log_transitions=log_transitions,
raise_errors=raise_errors,
**kwargs
)
def init(self):
"""
Method to initialize additional variables and flags
at creation phase of `domain_reader()` machine.
"""
def state_changed(self, oldstate, newstate, event, *args, **kwargs):
"""
Method to catch the moment when `domain_reader()` state were changed.
"""
def state_not_changed(self, curstate, event, *args, **kwargs):
"""
This method intended to catch the moment when some event was fired in the `domain_reader()`
but automat state was not changed.
"""
def A(self, event, *args, **kwargs):
"""
The state machine code, generated using `visio2python <http://bitdust.io/visio2python/>`_ tool.
"""
#---AT_STARTUP---
if self.state == 'AT_STARTUP':
if event == 'run':
self.state = 'DOMAIN_INFO'
self.doInit(*args, **kwargs)
self.doEppDomainInfo(*args, **kwargs)
#---DOMAIN_INFO---
elif self.state == 'DOMAIN_INFO':
if event == 'response' and self.isCode(1000, *args, **kwargs):
self.state = 'CONTACTS_INFO'
self.doVerifyRegistrant(*args, **kwargs)
self.doPrepareContactsList(*args, **kwargs)
self.doEppContactInfoMany(*args, **kwargs)
elif event == 'error' or ( event == 'response' and not self.isCode(1000, *args, **kwargs) ):
self.state = 'FAILED'
self.doReportFailed(event, *args, **kwargs)
self.doDestroyMe(*args, **kwargs)
#---CONTACTS_INFO---
elif self.state == 'CONTACTS_INFO':
if event == 'all-contacts-ok':
self.state = 'REGISTRANT_INFO'
self.doEppContactInfoRegistrant(*args, **kwargs)
elif event == 'error' or ( event == 'response' and not self.isCode(1000, *args, **kwargs) ):
self.state = 'FAILED'
self.doReportFailed(event, *args, **kwargs)
self.doDestroyMe(*args, **kwargs)
#---REGISTRANT_INFO---
elif self.state == 'REGISTRANT_INFO':
if event == 'error' or ( event == 'response' and not self.isCode(1000, *args, **kwargs) ):
self.state = 'FAILED'
self.doReportFailed(event, *args, **kwargs)
self.doDestroyMe(*args, **kwargs)
elif event == 'response' and self.isCode(1000, *args, **kwargs):
self.state = 'DONE'
self.doReportDone(*args, **kwargs)
self.doDestroyMe(*args, **kwargs)
#---DONE---
elif self.state == 'DONE':
pass
#---FAILED---
elif self.state == 'FAILED':
pass
return None
def isCode(self, *args, **kwargs):
"""
Condition method.
"""
return args[0] == int(args[1]['epp']['response']['result']['@code'])
def doInit(self, *args, **kwargs):
"""
Action method.
"""
self.target_domain = args[0]
self.result = {}
def doVerifyRegistrant(self, *args, **kwargs):
"""
Action method.
"""
if not self.verify_registrant:
return
self.registrant_epp_id = args[0]['epp']['response']['resData']['infData'].get('registrant', None)
if not self.registrant_epp_id:
logger.error('domain registrant unknown from response: %s' % self.target_domain.name)
self.event('error', zerrors.EPPRegistrantUnknown(response=args[0]))
return
known_domain = zdomains.domain_find(domain_name=self.target_domain.name)
if not known_domain:
return
if known_domain.registrant.epp_id == self.registrant_epp_id:
return
logger.error('domain known to belong to another registrant: %s' % self.current_domain_name)
self.event('error', zerrors.EPPRegistrantAuthFailed(response=args[0]))
def doPrepareContactsList(self, *args, **kwargs):
"""
Action method.
"""
try:
response_contacts = args[0]['epp']['response']['resData']['infData']['contact']
except:
response_contacts = []
if not isinstance(response_contacts, list):
response_contacts = [response_contacts, ]
self.domain_contacts = [{'type': i['@type'], 'id': i['#text']} for i in response_contacts]
self.result.update({
'name': args[0]['epp']['response']['resData']['infData']['name'],
'roid': str(args[0]['epp']['response']['resData']['infData']['roid']),
'crDate': self._date_transform(args[0]['epp']['response']['resData']['infData'].get('crDate', '')),
'upDate': self._date_transform(args[0]['epp']['response']['resData']['infData'].get('upDate', '')),
'exDate': self._date_transform(args[0]['epp']['response']['resData']['infData'].get('exDate', '')),
'admin': {},
'tech': {},
'billing': {},
'registrant': {},
'hostnames': [],
})
def doEppDomainInfo(self, *args, **kwargs):
"""
Action method.
"""
try:
response = zclient.cmd_domain_info(
domain=self.target_domain.name,
auth_info=self.target_domain.auth_key or None,
raise_for_result=False,
)
except zerrors.EPPError as exc:
self.log(self.debug_level, 'Exception in doEppDomainInfo: %s' % exc)
self.event('error', exc)
else:
self.event('response', response)
def doEppContactInfoMany(self, *args, **kwargs):
"""
Action method.
"""
for contact in self.domain_contacts:
try:
response = zclient.cmd_contact_info(
contact_id=contact['id'],
raise_for_result=False,
)
response_code = int(args[1]['epp']['response']['result']['@code'])
except zerrors.EPPError as exc:
self.log(self.debug_level, 'Exception in doEppContactInfoMany: %s' % exc)
self.event('error', exc)
return
self.event('response', response)
if response_code != 1000:
return
d = response['epp']['response']['resData']['infData']
self.result[contact['type']] = {
'id': str(d['id']),
'email': str(d['email']),
'voice': str(d.get('voice', '')),
'fax': str(d.get('fax', '')),
}
postal_info_list = d['postalInfo'] if isinstance(d['postalInfo'], list) else [d['postalInfo'], ]
local_address = False
for postal_info in postal_info_list:
if postal_info['@type'] == 'loc':
local_address = True
self.result[contact['type']].update(self._extract_postal_info(postal_info))
break
if not local_address:
for postal_info in postal_info_list:
self.result[contact['type']].update(self._extract_postal_info(postal_info))
self.event('all-contacts-ok')
def doEppContactInfoRegistrant(self, *args, **kwargs):
"""
Action method.
"""
try:
response = zclient.cmd_contact_info(
contact_id=self.registrant_epp_id,
raise_for_result=False,
)
except zerrors.EPPError as exc:
self.log(self.debug_level, 'Exception in doEppContactInfoRegistrant: %s' % exc)
self.event('error', exc)
else:
self.event('response', response)
def doReportDone(self, *args, **kwargs):
"""
Action method.
"""
def doReportFailed(self, event, *args, **kwargs):
"""
Action method.
"""
def doDestroyMe(self, *args, **kwargs):
"""
Remove all references to the state machine object to destroy it.
"""
self.destroy()
def _date_transform(self, epp_date):
if not epp_date:
return ''
return formatdate(time.mktime(datetime.datetime.strptime(
epp_date, '%Y-%m-%dT%H:%M:%S.%fZ').timetuple()), True)
def _extract_postal_info(self, pi):
return {
'name': pi.get('name', ''),
'org': pi.get('org', ''),
'cc': pi.get('addr', {}).get('cc'),
'city': pi.get('addr', {}).get('city'),
'pc': pi.get('addr', {}).get('pc'),
'sp': pi.get('addr', {}).get('sp'),
'street': (' '.join(pi.get('addr', {}).get('street'))) if isinstance(
pi.get('addr', {}).get('street'), list) else pi.get('addr', {}).get('street'),
}
| [
"penev.veselin@gmail.com"
] | penev.veselin@gmail.com |
db9bcc7f0c51bda307511eeacac685e0fe2d1bf7 | 27034876dc372d24d0c659b35e2da394e8d633ca | /tests/test_server/test_base.py | 0b404657eba53018b803f44636f686ccffdf6812 | [
"MIT"
] | permissive | gc-ss/idom | 48458aeb1084889accd8026feb8e58900cff23b4 | bf2a6beb09700ebd81cb569af1ef22fcc46ffcb6 | refs/heads/main | 2023-04-19T13:07:27.925291 | 2021-05-08T18:26:11 | 2021-05-08T18:26:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | import pytest
import sanic
import idom
from idom.server.sanic import PerClientStateServer
from idom.server.utils import find_builtin_server_type
@idom.component
def AnyComponent():
pass
def test_no_application_until_running():
server = find_builtin_server_type("PerClientStateServer")(AnyComponent)
with pytest.raises(RuntimeError, match="No application"):
server.application
def test_cannot_register_app_twice():
server = PerClientStateServer(AnyComponent)
server.register(sanic.Sanic())
with pytest.raises(RuntimeError, match="Already registered"):
server.register(sanic.Sanic())
| [
"ryan.morshead@gmail.com"
] | ryan.morshead@gmail.com |
3c1cd7d7c6d73f40e800dbe2d10e8ddd73c6f16b | 1503cbcd6b50354523e8a720c880de0adb8dd678 | /bot | 9db29a7b760381bb5f060082443a1a84c747f54b | [] | no_license | caphrim007/botcheck | d7c1ef913c1cd38fcf1bee93fa3984a7d681c881 | 1408b1ea9a8ffc1f8c8358df41d077d59b3b9495 | refs/heads/master | 2016-09-11T05:13:00.556531 | 2013-02-05T17:47:41 | 2013-02-05T17:47:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,934 | #! /usr/bin/env python
import os, sys, getopt, glob
import signal
import createDaemon
import botcheck
import botsqlite
from time import sleep, strftime, localtime
# signal handler function
def handle_sig(signum, frame):
"""handle signal"""
global sigterm_received, sighup_received, debug_lines
debug_lines += [
"Signal handler called with signal %s\n" % signum ]
if signum == signal.SIGTERM:
sigterm_received = True
elif signum == signal.SIGHUP:
sighup_received = True
daemon = False
debug = False
server = None
port = None
# main process, parse command arguments
try:
opts, args = getopt.getopt(sys.argv[1:], '', ["daemon","debug","server=","port="] )
except getopt.GetoptError, msg:
sys.exit(1)
for o, a in opts:
if o in ('--daemon'):
daemon = True
if o in ('--debug'):
debug = True
if o in ('--server'):
server = a
if o in ('--port'):
port = int(a)
if daemon:
# go into daemon mode
retCode = createDaemon.createDaemon()
sqldb = botsqlite.botsqlite('bot.db')
nick = sqldb.random_nick()
nick_id = sqldb.get_nick_id(nick)
is_exempt = sqldb.nick_is_exempt(nick)
if (not is_exempt):
sqldb.add_exemption(nick, "all")
debug_lines = []
out_file_name = 'log/botcheck_%s_%s.out' % (strftime('%m-%d_%H:%M', localtime()),server)
# save our pid for signals
pid = os.getpid()
# open a log file for stdout and stderr
log = open(out_file_name, 'w')
if debug:
sys.stdout = log
sys.stderr = log
sys.stdout.flush()
# signal flag
sigterm_received = False
sighup_received = False
signal.signal( signal.SIGTERM, handle_sig )
signal.signal( signal.SIGHUP, handle_sig )
# report file stat results as floats
os.stat_float_times( True )
if server is None and port is None:
print "You must specify both an IRC server and port to connect to"
sys.exit(1)
server_id = sqldb.get_server_id(server,port)
sqldb.add_connection(pid,nick_id,server_id)
bot = botcheck.botcheck(nick, server, port)
bot.start()
| [
"caphrim007@gmail.com"
] | caphrim007@gmail.com | |
71579406432d7fe7285db017b4e9a222908657e8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02646/s083923096.py | 1d20b8ebcdffa787072d93b40af0fe261e1c510f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | A,V = map(int,input().split())
B,W = map(int,input().split())
T = int(input())
dist = abs(A-B)
diff = V-W
if dist > diff * T:
print("NO")
else:
print("YES") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d9d9816f1ba294edaa8c7b9b634c243f41bf84ec | 40ebd19289ff0d60efcd63d306b9d02698928b3e | /tests/util.py | f38f378034a55c6f52f006eef95ac5816bfaa739 | [] | no_license | lic91/android-platform-ndk | 69b81b27c4548b3500d362b10402d37384bf5d73 | cded731c566e78864f69610182aa04e1da9687a3 | refs/heads/master | 2020-12-02T16:30:06.099404 | 2016-04-29T13:33:19 | 2016-04-29T13:33:34 | 58,011,041 | 0 | 1 | null | 2016-05-04T00:39:06 | 2016-05-04T00:39:06 | null | UTF-8 | Python | false | false | 994 | py | #
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import os
def color_string(string, color):
colors = {
'green': '\033[92m',
'red': '\033[91m',
'yellow': '\033[93m',
}
end_color = '\033[0m'
return colors[color] + string + end_color
@contextlib.contextmanager
def cd(path):
curdir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(curdir)
| [
"danalbert@google.com"
] | danalbert@google.com |
4aa330b332c7e104942f09ca0c4c39a76b550364 | cda52154e416a8d9d629221391609c3b84e408fd | /bairanalysis/workflows/temporalpatterns.py | 31d147c56633d079e42d1315049cf31f9bffb8f1 | [] | no_license | gpiantoni/bairanalysis | b7b72bf696389a08dc0d4e03d2bd41917cb5e641 | fd22be962eb863384017e98fc6b5ac9a3e527971 | refs/heads/master | 2022-12-09T01:01:06.864491 | 2020-08-27T14:13:27 | 2020-08-27T14:13:27 | 183,632,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,467 | py | from nipype import MapNode, Node, Workflow
from nipype.interfaces.utility import IdentityInterface
from nipype.interfaces.fsl import FLIRT
from .preproc.preproc import create_workflow_preproc_spm
from .preproc.mri_realign import create_workflow_coreg_epi2t1w
from .glm.temporalpatterns import (
create_workflow_temporalpatterns_fsl,
)
input_node.inputs.t1w = str(
SUBJECTS_DIR
/ 'sub-beilen/ses-UMCU7Tdaym13/anat/sub-beilen_ses-UMCU7Tdaym13_acq-wholebrain_T1w.nii')
input_node.inputs.t2star_fov = str(
SUBJECTS_DIR
/ 'sub-beilen/ses-UMCU7Tdaym13/T2star/sub-beilen_ses-UMCU7Tdaym13_acq-visualcortex_T2star.nii.gz')
input_node.inputs.t2star_whole = str(
SUBJECTS_DIR
/ 'sub-beilen/ses-UMCU7Tdaym13/T2star/sub-beilen_ses-UMCU7Tdaym13_acq-wholebrain_T2star.nii.gz')
input_node.inputs.bold = str(
SUBJECTS_DIR
/ 'sub-beilen/ses-UMCU7Tdaym13/func/sub-beilen_ses-UMCU7Tdaym13_task-bairtemporalpattern_run-1_bold.nii')
input_node.inputs.events = str(
SUBJECTS_DIR
/ 'sub-beilen/ses-UMCU7Tdaym13/func/sub-beilen_ses-UMCU7Tdaym13_task-bairtemporalpattern_run-1_events.tsv')
def create_workflow_temporalpatterns_7T(subjects, runs):
input_node = Node(IdentityInterface(fields=[
'bold',
'events',
't2star_fov',
't2star_whole',
't1w',
]), name='input')
coreg_tstat = MapNode(
interface=FLIRT(), name='realign_result_to_anat',
iterfield=['in_file', ])
coreg_tstat.inputs.apply_xfm = True
w = Workflow('temporalpatterns_7T')
w_preproc = create_workflow_preproc_spm()
w_spatialobject = create_workflow_temporalpatterns_fsl()
w_coreg = create_workflow_coreg_epi2t1w()
w.connect(input_node, 'bold', w_preproc, 'input.bold')
w.connect(input_node, 'events', w_spatialobject, 'input.events')
w.connect(input_node, 't2star_fov', w_coreg, 'input.t2star_fov')
w.connect(input_node, 't2star_whole', w_coreg, 'input.t2star_whole')
w.connect(input_node, 't1w', w_coreg, 'input.t1w')
w.connect(input_node, 't1w', coreg_tstat, 'reference')
w.connect(w_preproc, 'realign.realigned_files', w_spatialobject, 'input.bold')
w.connect(w_preproc, 'realign.mean_image', w_coreg, 'input.bold_mean')
w.connect(w_spatialobject, 'output.T_image', coreg_tstat, 'in_file')
w.connect(w_coreg, 'output.mat_epi2t1w', coreg_tstat, 'in_matrix_file')
return w
| [
"github@gpiantoni.com"
] | github@gpiantoni.com |
d5a861fcdbbe4ab79ae9c3c44757e51ad51d78aa | cbd240e3b02113707e9dc9e7b6e4f956ecaf1ea8 | /mains/main_dgm_dessins_auxiliary_convnet_convvae.py | 60f7350a509f9c2a7bfcbb232acdba2ec3f4565c | [] | no_license | spell00/NeuralNetworksZoo | 3feb100886c22ecc8ff45987c68b40b56096434f | 2502dc3f835afd2111339800e5a8ae42f60e7359 | refs/heads/master | 2020-05-19T10:17:43.829983 | 2019-05-18T17:45:25 | 2019-05-18T17:45:25 | 184,967,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,784 | py | from numpy import genfromtxt
from torchvision import transforms, datasets
import torch
import os
import numpy as np
import pandas as pd
from sklearn import preprocessing
def __main__():
from data_preparation.GeoParser import GeoParser
from dimension_reduction.ordination import ordination2d
from sklearn.decomposition import PCA
from IPython.display import Image
import pandas as pd
import numpy as np
from models.semi_supervised.deep_generative_models.models.auxiliary_dgm import AuxiliaryDeepGenerativeModel
from utils.utils import dict_of_int_highest_elements, plot_evaluation
# files_destinations
home_path = "/home/simon/"
destination_folder = "annleukemia"
data_folder = "data"
results_folder = "results"
meta_destination_folder = "pandas_meta_df"
plots_folder_path = "/".join([home_path, destination_folder, results_folder, "plots/"])
#dataset_name = "gse33000_and_GSE24335_GSE44768_GSE44771_GSE44770"
dataset_name = "dessins"
activation = "relu"
#nrep = 3
betas=(0.9, 0.999)
vae_flavour = "o-sylvester"
early_stopping = 200
labels_per_class = 10000
n_epochs = 1000
warmup = 100
gt_input = 10000
# if ladder is yes builds a ladder vae. Do not combine with auxiliary (yet; might be possible and relatively
# not too hard to implement, but might be overkill. Might be interesting too)
translate = "n"
# Types of deep generative model
# Convolution neural network (convolutional VAE and convolutional classifier)
use_conv_ae = True #Not applicable if not sequence (images, videos, sentences, DNA...)
use_convnet = True
# Ladder VAE (L-VAE)
ladder = False
# Auxiliary Variational Auto-Encoder (A-VAE)
auxiliary = True
# Load pre-computed vae (unsupervised learning)
load_vae = False
lr = 1e-3
l1 = 0.
l2 = 0.
batch_size = 16
mc = 1 # seems to be a problem when mc > 1 for display only, results seem good
iw = 1 # seems to be a problem when iw > 1 for display only, results seem good
# Neurons layers
a_dim = 20
h_dims_classifier = [256]
h_dims = [256]
z_dims = [20]
# number of flows
number_of_flows = 3
num_elements = 1
# Files destinations
load_from_disk = True
load_merge = False
home_path = "/home/simon/"
destination_folder = "annleukemia"
data_folder = "data"
results_folder = "results"
meta_destination_folder = "pandas_meta_df"
plots_folder_path = "/".join([home_path, destination_folder,
results_folder, "plots/"])
dgm = AuxiliaryDeepGenerativeModel(vae_flavour, z_dims, h_dims, n_flows=number_of_flows, a_dim=a_dim,
num_elements=num_elements, is_hebb_layers=True,
gt_input=gt_input)
dgm.set_configs(home_path=home_path, results_folder=results_folder, data_folder=data_folder,
destination_folder=destination_folder, dataset_name=dataset_name, lr=lr,
meta_destination_folder="meta_pandas_dataframes", csv_filename="csv_loggers",
is_unlabelled=True)
dgm.load_local_dataset(root_train="/home/simon/annleukemia/data/kaggle_dessins/train",
root_valid="/home/simon/annleukemia/data/kaggle_dessins/valid",
root_test="/home/simon/annleukemia/data/kaggle_dessins/test", n_classes=31,
batch_size=batch_size, labels_per_class=labels_per_class,
extra_class=True, unlabelled_train_ds=True, normalize=True, mu=0.5, var=0.5)
is_example = False
# GET ordination from this!
train = np.vstack([x[0].data.numpy() for x in dgm.x_train])
# unlabelled_train = np.vstack([x[0].data.numpy() for x in dgm.unlabelled_x_train])
targets = np.vstack([x[1].data.numpy() for x in dgm.x_train])
labels = [x.tolist().index(1) for x in targets]
dgm.define_configurations(early_stopping=early_stopping, warmup=warmup, flavour=vae_flavour)
dgm.set_data(labels_per_class=labels_per_class, is_example=True, extra_class=True)
planes_classifier = [1, 8, 16, 32, 64, 128, 256]
classifier_kernels = [3, 3, 3, 3, 3, 3, 3]
classifier_pooling_layers = [True, True, True, True, True, True, False, False]
planes_ae = [1, 8, 16, 32, 64, 128, 256]
kernels_ae = [3, 3, 3, 3, 3, 3, 3]
padding_ae = [1, 1, 1, 1, 1, 1]
pooling_layers_ae = [1, 1, 1, 1, 1, 1]
dgm.set_conv_adgm_layers(h_dims=h_dims_classifier, input_shape=[1, 100, 100], hs_ae=h_dims,
use_conv_classifier=use_convnet, planes_classifier=planes_classifier,
classifier_kernels=classifier_kernels, classifier_pooling_layers=classifier_pooling_layers,
planes_ae=planes_ae, padding_ae=padding_ae, pooling_layers_ae=pooling_layers_ae,
kernels_ae=kernels_ae)
#dgm.set_dgm_layers()
# import the M1 in the M1+M2 model (Kingma et al, 2014). Not sure if it still works...
if load_vae:
print("Importing the model: ", dgm.model_file_name)
if use_conv_ae:
dgm.import_cvae()
else:
dgm.load_model()
# dgm.set_dgm_layers_pretrained()
dgm.cuda()
# dgm.vae.generate_random(False, batch_size, z1_size, [1, 28, 28])
dgm.run(n_epochs, auxiliary, mc, iw, lambda1=l1, lambda2=l2, verbose=1,
show_progress=10, show_pca_train=10, show_lda_train=10, show_pca_generated=10, clip_grad=0.1,
is_input_pruning=False, start_pruning=10000, show_lda_generated=10, warmup_n=-1, alpha_rate=0.1, t_max=.1)
if __name__ == "__main__":
__main__()
| [
"simonjpelletier@gmail.com"
] | simonjpelletier@gmail.com |
07317e3c804cedcc539ee97d5e4c6b5126832b5f | c97b9ae1bf06757ba61f90905e4d9b9dd6498700 | /venv/Lib/site-packages/tensorflow/python/keras/api/_v1/keras/preprocessing/__init__.py | 15ae238ba1d8576e0c3cd99c440aa03f8374c1f0 | [] | no_license | Rahulk1p/image-processor | f7ceee2e3f66d10b2889b937cdfd66a118df8b5d | 385f172f7444bdbf361901108552a54979318a2d | refs/heads/main | 2023-03-27T10:09:46.080935 | 2021-03-16T13:04:02 | 2021-03-16T13:04:02 | 348,115,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:3fa8cd64112c86a97ce1ef382a86af5034fac9d3e73f7de7c6fc2ce0c50db3e3
size 670
| [
"rksc.k1p@gmail.com"
] | rksc.k1p@gmail.com |
137aee76bca4f5d313b8daa2a18995728dc70147 | 644bcdabf35261e07c2abed75986d70f736cb414 | /python-project/Defis/Euler_84_Test.py | 34fe0128eb2eacdea1c5bbf7d41d1defe839bb75 | [] | no_license | matcianfa/playground-X1rXTswJ | f967ab2c2cf3905becafb6d77e89a31414d014de | 67859b496e407200afb2b1d2b32bba5ed0fcc3f0 | refs/heads/master | 2023-04-03T11:56:15.878757 | 2023-03-24T15:52:37 | 2023-03-24T15:52:37 | 122,226,979 | 5 | 20 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | #Ne pas oublier de changer le module à importer
module="Defis/Euler_84"
import sys
import io
#On récupère les données de l'utilisateur
sauvegarde_stdout=sys.stdout
sys.stdout=io.StringIO()
from Euler_84 import *
count1 = sys.stdout.getvalue()[:-1]
sys.stdout=sauvegarde_stdout
from ma_bao import *
#La réponse
reponse=101524
#message d'aide si besoin
help="N'oublie pas d'utiliser print pour afficher le resultat"
def send_msg(channel, msg):
print("TECHIO> message --channel \"{}\" \"{}\"".format(channel, msg))
def success():
send_msg("Tests validés","Bravo !")
afficher_correction(module)
print("TECHIO> success true")
def fail():
print("TECHIO> success false")
def test():
try:
assert str(count1) == str(reponse), "Le résultat obtenu est {} mais ce n'est pas le bon.".format(str(count1))
send_msg("Tests validés","Le résultat cherché est bien {}".format(str(count1)))
success()
except AssertionError as e:
fail()
send_msg("Oops! ", e)
if help:
send_msg("Aide 💡", help)
if __name__ == "__main__": test()
| [
"noreply@github.com"
] | matcianfa.noreply@github.com |
28274fb58547ee1ebcf33d9a281abab79d4ab29a | e7280a7cd9e6a03e5ef129e8f82167e6213eef9c | /website/webapps/django/myproject/myproject/sitemaps.py | 0e164fc2f818121b1e613a6b225c193f6cdbc9a8 | [
"MIT"
] | permissive | jensontham/pythonsingapore | da7b872880e7a9a3805ca4991bce889b558cf9d0 | 8a64afcf3e17d5d42e494f6f67b83f93861a63e4 | refs/heads/master | 2020-12-25T05:29:04.474391 | 2012-11-12T12:06:57 | 2012-11-12T12:06:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | """Sitemaps that are global to the project (i.e. the django-cms pages)."""
from django.contrib.sitemaps import Sitemap
from cms.utils.moderator import get_page_queryset
from cmsplugin_blog.models import EntryTitle
class BlogSitemap(Sitemap):
changefreq = "weekly"
priority = 0.7
def items(self):
return EntryTitle.objects.filter(entry__is_published=True)
def lastmod(self, obj):
return obj.entry.pub_date
def location(self, obj):
location = obj.get_absolute_url()
return location
class PagesSitemap(Sitemap):
changefreq = "weekly"
priority = 0.4
def items(self):
page_queryset = get_page_queryset(None)
all_pages = page_queryset.published().filter(login_required=False)
return all_pages
def lastmod(self, page):
pass
def location(self, obj):
location = obj.get_absolute_url()
return location
| [
"mbrochh@gmail.com"
] | mbrochh@gmail.com |
21d450387a2b29adafb5273f746a5d0cd2bc4a9c | 7437ad1203ff272a482e4a7c7266afdbc7a0e619 | /lra/models/gpu_16g/linear_transformer_exp_convspe_k128_shr/listops/r3/config.py | 7cccd5f68123865cd3aeeef76d176371cd488305 | [] | no_license | maximzubkov/spe | 4ccc59d538a2cb4e5f9b0118ef79933eed0b8d95 | d877feb0f6b935152e5431ce374606ba72c08d65 | refs/heads/main | 2023-08-23T02:08:14.253693 | 2021-10-05T17:25:36 | 2021-10-05T17:25:36 | 385,636,912 | 0 | 0 | null | 2021-10-05T17:25:37 | 2021-07-13T14:42:19 | Jupyter Notebook | UTF-8 | Python | false | false | 1,739 | py | # Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration and hyperparameter sweeps."""
import functools
from fast_self_attention import fast_self_attention as favor
import jax
import jax_spe as spe
from lra_benchmarks.models.layers.spe import make_spe_transform_fn
from lra_benchmarks.listops.configs import base_listops_config
def get_config():
"""Get the default hyperparameter configuration."""
config = base_listops_config.get_config()
config.random_seed = 2
config.model_type = "transformer"
config.attention_fn = favor.make_fast_generalized_attention(
qkv_dim=config.qkv_dim // config.num_heads,
features_type='deterministic',
kernel_fn=jax.lax.exp,
lax_scan_unroll=16)
config.model_kwargs = dict(
add_pos_emb=False,
qk_transform_fn_factory=functools.partial(
make_spe_transform_fn,
spe_cls=spe.ConvSPE,
spe_kwargs=dict(
num_realizations=64,
kernel_size=128
),
shared=True
)
)
config.batch_size = 8
config.learning_rate = config.learning_rate / 32 * 8
config.num_train_steps = 10000
config.eval_frequency = config.eval_frequency * 4
return config
def get_hyper(hyper):
return hyper.product([])
| [
"zubkov.md@phystech.edu"
] | zubkov.md@phystech.edu |
55c3bc5b3dec8357a238c4cd264073832ffac18c | 492693d325dad3adcb09601c54a5b7b0d00cfdef | /drf_admin/apps/system/views/users.py | 0927063cdb15a3fb458c58aa591fd13a7613fb6a | [
"MIT"
] | permissive | HHHyuming/drf_admin | c682e7c284a9747175a81833aacb5e3fc67a2e42 | 956ab1a96964a8af06b0697e228a3d4238dce109 | refs/heads/master | 2023-03-19T23:52:06.521389 | 2021-03-10T15:28:50 | 2021-03-10T15:28:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,152 | py | # -*- coding: utf-8 -*-
"""
@author : Wang Meng
@github : https://github.com/tianpangji
@software : PyCharm
@file : users.py
@create : 2020/6/27 17:55
"""
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import mixins
from rest_framework.exceptions import ValidationError
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from drf_admin.apps.system.serializers.users import UsersSerializer, UsersPartialSerializer, ResetPasswordSerializer
from drf_admin.utils.views import AdminViewSet
from oauth.models import Users
from system.filters.users import UsersFilter
from system.models import Permissions
class UsersViewSet(AdminViewSet):
"""
create:
用户--新增
用户新增, status: 201(成功), return: 新增用户信息
destroy:
用户--删除
用户删除, status: 204(成功), return: None
multiple_delete:
用户--批量删除
用户批量删除, status: 204(成功), return: None
update:
用户--修改
用户修改, status: 200(成功), return: 修改后的用户信息
partial_update:
用户--局部修改
用户局部修改(激活/锁定), status: 200(成功), return: 修改后的用户信息
list:
用户--获取列表
用户列表信息, status: 200(成功), return: 用户信息列表
retrieve:
用户--详情
用户详情信息, status: 200(成功), return: 单个用户信息详情
"""
queryset = Users.objects.all()
serializer_class = UsersSerializer
filter_backends = (DjangoFilterBackend, SearchFilter, OrderingFilter)
filter_class = UsersFilter
search_fields = ('username', 'name', 'mobile', 'email')
ordering_fields = ('id',)
def get_serializer_class(self):
if self.action == 'partial_update':
return UsersPartialSerializer
else:
return UsersSerializer
class ResetPasswordAPIView(mixins.UpdateModelMixin, GenericAPIView):
"""
patch:
用户--重置密码
用户重置密码, status: 200(成功), return: None
"""
queryset = Users.objects.all()
serializer_class = ResetPasswordSerializer
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
class PermissionsAPIView(APIView):
"""
get:
用户--获取用户拥有权限ID列表
获取用户拥有权限ID列表, status: 200(成功), return: 用户拥有权限ID列表
"""
def get(self, request, pk):
try:
user = Users.objects.get(id=pk)
except Users.DoesNotExist:
raise ValidationError('无效的用户ID')
# admin角色
if 'admin' in user.roles.values_list('name', flat=True) or user.is_superuser:
return Response(data={'results': Permissions.objects.values_list('id', flat=True)})
# 其他角色
return Response(data={'results': list(filter(None, set(user.roles.values_list('permissions__id', flat=True))))})
| [
"921781999@qq.com"
] | 921781999@qq.com |
3e56de98eccdbf080daea3fec92c9ce8b459e040 | 255b6dcca3866279653574b03d1be70c8210e2a8 | /provenance/ProvMod.py | 5410efe3f06684a0ff11ff75ba2b0eaa1284d5ec | [] | no_license | pseudoPixels/Collaborative_provmod_sci_workflow | 5b10b4f0f9025bf0a015ef1a81a79325800c7ee0 | 695ab286bc010013308747ca0c62888e7b604c35 | refs/heads/master | 2021-08-23T00:49:42.923785 | 2017-12-02T00:21:50 | 2017-12-02T00:21:50 | 112,797,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,786 | py | # version 2.4.1
import uuid
import logging
import configuration
import os.path
import couchdb
import getpass
from typing import Any
import collections
failure = False
# to disable all logging
#logging.disable(logging.CRITICAL)
# to re-enable all logging
logging.disable(logging.NOTSET)
# logger for the current script
logger_name = 'Model_Logger'
# log file for the whole experiment
log_file = 'workflow.log'
# create the loggers that you want to use in this file
# params : logger_name, output_file
info = configuration.logger_info(logger_name, log_file)
info_start = configuration.logger_info_start(logger_name, log_file)
info_end = configuration.logger_info_end(logger_name, log_file)
deb = configuration.logger_debug(logger_name, log_file)
warn = configuration.logger_warn(logger_name, log_file)
err = configuration.logger_error(logger_name, log_file)
fatalerr = configuration.logger_fatal(logger_name, log_file)
class User:
def __init__(self):
self.id = uuid.uuid4()
self.name =getpass.getuser()
msg = []
msg.append('event :: ' + 'user invocation')
msg.append('id :: ' + str(self.id))
msg.append('name :: ' + str(self.name))
deb.debug(';'.join(msg))
USER = User()
class Data:
def __init__(self):
self.id = None
self.ref = None
self.user = None
class Object(Data):
def __init__(self, reference):
# type: (Any) -> None
self.id = uuid.uuid4()
self.user = USER.id
if failure is True:
# preoondition management
pass
else:
# if all preconditions passed
try:
self.ref = reference
msg = []
msg.append('event :: ' + 'object data creation')
msg.append('id :: ' + str(self.id))
msg.append('type :: ' + str(type(reference)) )
msg.append('value :: ' + str(reference))
msg.append('user :: ' + str(USER.id))
deb.debug(';'.join(msg))
except Exception as e:
# if any further error occurs somehow
msg = []
msg.append('event :: ' + 'object data creation')
msg.append('id :: ' + str(self.id))
msg.append('type :: ' + str(type(reference)))
msg.append('value :: ' + str(reference))
msg.append('user :: ' + str(USER.id))
msg.append('error :: ' + str(e))
err.error(';'.join(msg))
class File(Data):
def __init__(self, f):
# type: (file) -> None
self.id = uuid.uuid4()
self.user = USER.id
if failure is True:
# precondition management
pass
elif not isinstance(f, file):
# if file not found
msg = []
msg.append('event :: ' + 'file data creation')
msg.append('id :: ' + str(self.id))
msg.append('user :: ' + str(USER.id))
msg.append('error :: ' + 'file not found')
err.error(';'.join(msg))
else:
# if all exceptions passed
try:
self.ref = f
msg = []
msg.append('event :: ' + 'file data creation')
msg.append('id :: ' + str(self.id))
msg.append('type :: ' + str(type(f)))
msg.append('source :: ' + str(f.name))
msg.append('user :: ' + str(USER.id))
deb.debug(';'.join(msg))
except Exception as e:
# if any further exception occurs
msg = []
msg.append('event :: ' + 'file data creation')
msg.append('id :: ' + str(self.id))
msg.append('type :: ' + str(type(f)))
msg.append('source :: ' + str(f.name))
msg.append('user :: ' + str(USER.id))
msg.append('error :: ' + str(e))
err.error(';'.join(msg))
class Document(Data):
def __init__(self, document):
# type: (couchdb.Document) -> None
self.id = uuid.uuid4()
self.user = USER.id
if failure is True:
# precondition management
pass
elif not isinstance(document, couchdb.Document):
msg = []
msg.append('event :: ' + 'document data creation')
msg.append('id :: ' + str(self.id))
msg.append('user :: ' + str(USER.id))
msg.append('error :: ' + 'document not found')
err.error(';'.join(msg))
else:
# if all exceptions passed
try:
self.ref = document
msg = []
msg.append('event :: ' + 'document data creation')
msg.append('id :: ' + str(self.id))
msg.append('type :: ' + str(type(document)))
msg.append('address :: ' + str(document))
msg.append('user :: ' + str(USER.id))
deb.debug(';'.join(msg))
except Exception as e:
# if any further exception occurs
msg = []
msg.append('event :: ' + 'document data creation')
msg.append('id :: ' + str(self.id))
msg.append('type :: ' + str(type(document)))
msg.append('address :: ' + str(document))
msg.append('user :: ' + str(USER.id))
msg.append('error :: ' + str(e))
err.error(';'.join(msg))
class Module:
def logStart(self):
msg = []
msg.append('event :: ' + 'module start')
msg.append('id :: ' + str(self.id))
msg.append('user :: ' + str(USER.id))
deb.debug(';'.join(msg))
def logEnd(self):
msg = []
msg.append('event :: ' + 'module start')
msg.append('id :: ' + str(self.id))
msg.append('user :: ' + str(USER.id))
deb.debug(';'.join(msg))
def body(self):
"""
:param interfaceParam:
:return:
"""
def __init__(self, *args):
self.id = uuid.uuid4()
self.user = USER.id
self.P = args
try:
msg = []
count = 0
for i in args:
if isinstance(i, Object) or isinstance(i, File) or isinstance(i, Document):
msg.append('p@' + str(count) + ' :: ' + str(i.id))
msg.append('p@@' + str(count) + ' :: ' + str(i.__class__.__name__))
count += 1
else:
msg.append('p@' + str(count) + ' :: ' + str(i))
msg.append('p@@' + str(count) + ' :: ' + str(i.__class__.__name__))
count += 1
msg.append('event :: ' + 'module creation')
msg.append('id :: ' + str(self.id))
msg.append('name :: ' + str(self.__class__.__name__))
msg.append('user :: ' + str(USER.id))
deb.debug(';'.join(msg))
except Exception as e:
msg = []
count = 0
for i in args:
if isinstance(i, Object) or isinstance(i, File) or isinstance(i, Document):
msg.append('p@' + str(count) + ' :: ' + str(i.id))
msg.append('p@@' + str(count) + ' :: ' + str(i.__class__.__name__))
count += 1
else:
msg.append('p@' + str(count) + ' :: ' + str(i))
msg.append('p@@' + str(count) + ' :: ' + str(i.__class__.__name__))
count += 1
msg.append('event :: ' + 'module creation')
msg.append('id :: ' + str(self.id))
msg.append('name :: ' + str(self.__class__.__name__))
msg.append('user :: ' + str(USER.id))
msg.append('error :: ' + str(e))
err.error(';'.join(msg))
def run(self, when = True, false_return = None):
if when is True:
self.logStart()
self.outgoing = self.body()
msg = []
if isinstance(self.outgoing, collections.Iterable):
count = 0
for i in self.outgoing:
if isinstance(i, Object) or isinstance(i, File) or isinstance(i, Document):
msg.append('o@' + str(count) + ' :: ' + str(i.id))
msg.append('o@@' + str(count) + ' :: ' + str(i.__class__.__name__))
count += 1
else:
if isinstance(self.outgoing, Object) or isinstance(self.outgoing, File) or isinstance(self.outgoing, Document):
msg.append('o@' + '0' + ' :: ' + str(self.outgoing.id))
msg.append('o@@' + '0' + ' :: ' + str(self.outgoing.__class__.__name__))
msg.append('event :: ' + 'module execution true')
msg.append('id :: ' + str(self.id))
msg.append('name :: ' + str(self.__class__.__name__))
msg.append('user :: ' + str(USER.id))
deb.debug(';'.join(msg))
self.logEnd()
return self.outgoing
else:
msg = []
count = 0
for i in false_return:
msg.append('o@' + str(count) + ' :: ' + str(i))
msg.append('o@@' + str(count) + ' :: ' + str(i.__class__.__name__))
count += 1
msg.append('event :: ' + 'module execution false')
msg.append('id :: ' + str(self.id))
msg.append('name :: ' + str(self.__class__.__name__))
msg.append('user :: ' + str(USER.id))
deb.debug(';'.join(msg))
return false_return
| [
"golam.mostaeen@usask.ca"
] | golam.mostaeen@usask.ca |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.