max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
procuret/http/query_parameter.py | Procuret/procuret-python | 0 | 12772451 | <reponame>Procuret/procuret-python<gh_stars>0
"""
Procuret Python
QueryParameter Module
author: <EMAIL>
"""
from typing import Any, TypeVar, Type, List
T = TypeVar('T', bound='QueryParameter')
class QueryParameter:
"""A single URL parameter, e.g. beep=boop"""
def __init__(
self,
key: str,
value: Any
) -> None:
assert isinstance(key, str)
str(value) # provoke error early
self._key = key
self._value = value
self._url_representation = self._represent(value)
return
key = property(lambda s: s._key)
def __str__(self) -> str:
return self._key + '=' + self._url_representation
@classmethod
def remove_targets_with(
cls: Type[T],
key: str,
targets: List[T]
) -> List[T]:
retained_targets: List[QueryParameter] = list()
for target in targets:
if target._key == key:
continue
retained_targets.append(target)
continue
return targets
@staticmethod
def _represent(value: Any) -> str:
if isinstance(value, str):
return value
if isinstance(value, bool):
if value is True:
return 'true'
return 'false'
return str(value)
| 2.9375 | 3 |
tests/http_tests/test_courtesy_sleep.py | jayvdb/alcazar | 0 | 12772452 | <reponame>jayvdb/alcazar
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------------------------------------------------------------
# includes
# 2+3 compat
from __future__ import absolute_import, division, print_function, unicode_literals
# 3rd parties
import requests
# alcazar
from alcazar import HttpClient
from alcazar.config import ScraperConfig
# tests
from .plumbing import FetcherFixture, ClientFixture, ServerFixture, compile_test_case_classes
#----------------------------------------------------------------------------------------------------------------------------------
class CourtesySleepTestServer(object):
def redirect(self):
return {
'body': b'',
'status': 301,
'headers': {'Location': '/landing'},
}
def landing(self):
return b'You got redirected'
def make_send_base_wrapper(send_base):
def wrapper(prepared_request, **kwargs):
if '.test' in prepared_request.url:
response = requests.Response()
response.status_code = 0
return response
else:
return send_base(prepared_request, **kwargs)
return wrapper
class CourtesySleepTestClient(HttpClient):
def __init__(self, base_config, **kwargs):
kwargs.setdefault('logger', None)
super(CourtesySleepTestClient, self).__init__(base_config, **kwargs)
adapter = self.session.adapters['http://']
adapter._sleep = lambda seconds: setattr(self, 'actual_sleep', seconds)
adapter.send_base = make_send_base_wrapper(adapter.send_base)
def submit(self, request, config, **kwargs):
self.actual_sleep = 0
return super(CourtesySleepTestClient, self).submit(request, config, **kwargs)
#----------------------------------------------------------------------------------------------------------------------------------
class CourtesySleepTests(object):
__fixtures__ = [
FetcherFixture.__subclasses__(),
[ClientFixture],
[ServerFixture],
]
new_server = CourtesySleepTestServer
def new_client(self, **kwargs):
base_config = ScraperConfig.from_kwargs(kwargs)
return CourtesySleepTestClient(base_config)
def assertDidntSleep(self):
return self.assertEqual(
self.client.actual_sleep,
0,
)
def assertDidSleep(self, expected=5):
return self.assertLess(
abs(self.client.actual_sleep - expected),
0.5,
)
def test_no_courtesy_sleep_across_domains(self):
self.fetch('http://a.test/')
self.assertDidntSleep()
self.fetch('http://b.test/')
self.assertDidntSleep()
self.fetch('http://c.test/')
self.assertDidntSleep()
def test_no_courtesy_sleep_across_hosts(self):
self.fetch('http://a.example.test/')
self.assertDidntSleep()
self.fetch('http://b.example.test/')
self.assertDidntSleep()
self.fetch('http://c.example.test/')
self.assertDidntSleep()
def test_default_courtesy_sleep_same_domain(self):
self.fetch('http://a.test/')
self.assertDidntSleep()
self.fetch('http://a.test/')
self.assertDidSleep()
self.fetch('http://a.test/')
self.assertDidSleep()
def test_default_courtesy_sleep_same_domain_diff_port(self):
self.fetch('http://a.test/')
self.assertDidntSleep()
self.fetch('http://a.test:80/')
self.assertDidSleep()
self.fetch('http://a.test:81/')
self.assertDidntSleep()
self.fetch('http://a.test:82/')
self.assertDidntSleep()
def test_courtesy_sleep_remembers_last_request(self):
all_urls = ('http://a.test/', 'http://b.test/', 'http://c.test/')
for url in all_urls:
self.fetch(url)
self.assertDidntSleep()
for url in all_urls:
self.fetch(url)
self.assertDidSleep()
def test_redirect_works_as_expected(self):
self.assertEqual(
self.fetch('/redirect').text,
'You got redirected',
)
self.assertDidntSleep()
def test_allow_redirects_false_still_sleeps(self):
for sweep in (1, 2):
self.fetch('/redirect', allow_redirects=False)
if sweep == 1:
self.assertDidntSleep()
else:
self.assertDidSleep()
# def test_set_courtesy_sleep_in_constructor(self):
# with self.alt_client(courtesy_seconds=10):
# self.fetch('http://a.test/')
# self.fetch('http://a.test/')
# self.assertDidSleep(10)
# def test_disable_courtesy_sleep_in_constructor_with_none(self):
# with self.alt_client(courtesy_seconds=None):
# self.fetch('http://a.test/')
# self.fetch('http://a.test/')
# self.assertDidSleep()
# def test_disable_courtesy_sleep_in_constructor_with_zero(self):
# with self.alt_client(courtesy_seconds=0):
# self.fetch('http://a.test/')
# self.fetch('http://a.test/')
# self.assertDidntSleep()
def test_set_courtesy_sleep_in_method(self):
self.fetch('http://a.test/')
self.fetch('http://a.test/', courtesy_seconds=12)
self.assertDidSleep(12)
def test_disable_courtesy_sleep_in_method_with_none(self):
self.fetch('http://a.test/')
self.fetch('http://a.test/', courtesy_seconds=None)
self.assertDidntSleep()
def test_disable_courtesy_sleep_in_method_with_zero(self):
self.fetch('http://a.test/')
self.fetch('http://a.test/', courtesy_seconds=0)
self.assertDidntSleep()
#----------------------------------------------------------------------------------------------------------------------------------
compile_test_case_classes(globals())
#----------------------------------------------------------------------------------------------------------------------------------
| 2.234375 | 2 |
airflow/bin/create_user.py | Ahrli/fast_tools | 1 | 12772453 | import airflow
from airflow import models,settings
from airflow.contrib.auth.backends.password_auth import PasswordUser
user = PasswordUser(models.User())
user.username = 'admin'
user.email = '<EMAIL>'
user.password = '<PASSWORD>'
#user.superuser = '1'
session = settings.Session()
session.add(user)
session.commit()
session.close()
exit()
| 1.953125 | 2 |
cgvt_site/cgvt_site/auth_pipeline.py | ConsultingAtVT/CGVTWebsite | 0 | 12772454 | import requests
from apps.core.models import UserProfile
from django.conf import settings
from social.exceptions import AuthFailed
USER_INFO_LI_REQUEST_URL = ('https://api.linkedin.com/v1/people/~:('
'id,'
'firstName,'
'lastName,'
'emailAddress,'
'pictureUrl,'
'publicProfileUrl)'
'?oauth2_access_token={}'
'&format=json')
def update_or_create_user_profile(backend, user, response, *args, **kwargs):
li_access_token = response.get('access_token')
li_resp = requests.get(USER_INFO_LI_REQUEST_URL.format(li_access_token))
li_resp_json = li_resp.json()
li_email = li_resp_json.get('emailAddress')
if li_email not in settings.VALID_EMAILS:
raise AuthFailed(backend, 'This is not a whitelisted email')
user_profile, created = UserProfile.objects.get_or_create(user=user)
user_profile.li_email = li_email
user_profile.li_first_name = li_resp_json.get('firstName')
user_profile.li_last_name = li_resp_json.get('lastName')
user_profile.li_picture_url = li_resp_json.get('pictureUrl')
user_profile.li_profile_url = li_resp_json.get('publicProfileUrl')
user_profile.save()
| 2.25 | 2 |
caracara/common/module.py | CrowdStrike/falconpy-tools | 2 | 12772455 | """
Generic Caracara API module.
This module contains the the FalconApiModule class, which represents a generic
Caracara API module. All modules, including Hosts, Prevention Policies, etc.
derive from this abstract base class.
"""
import logging
from abc import ABC, abstractmethod
from falconpy import OAuth2
class FalconApiModule(ABC):
"""
Meta class for a generic Caracara API Module.
Each module provides API Methods.
"""
@property
@abstractmethod
def name(self) -> str:
"""Store the name for the developer to use when instantiating the API module."""
@property
@abstractmethod
def help(self) -> str:
"""Store the help string to be made available for each API module."""
def __init__(self, api_authentication: OAuth2):
"""Configure a Caracara API module with a FalconPy OAuth2 module."""
class_name = self.__class__.__name__
self.logger = logging.getLogger(class_name)
self.logger.debug("Initialising API module: %s", class_name)
self.api_authentication = api_authentication
| 2.734375 | 3 |
src/app/lib/ssl.py | t4skforce/PenTestingUnit | 0 | 12772456 | import os
import socket
import ssl
from OpenSSL import crypto, SSL
import random
def getSSLContext(app=None,config_folder="/tmp/config", cert_file="app.crt", key_file="app.key"):
""" Create SSL Cert in config folder if it does not exists """
if not os.path.exists(config_folder):
os.makedirs(config_folder)
CERT_FILE = os.path.join(config_folder, cert_file)
KEY_FILE = os.path.join(config_folder, key_file)
if not os.path.exists(CERT_FILE) or not os.path.exists(KEY_FILE):
if app != None: app.logger.info(" * Generating Certificate files (%s,%s)"%(cert_file,key_file))
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 4096)
cert = crypto.X509()
cert.get_subject().C = "US"
cert.get_subject().ST = "Example"
cert.get_subject().L = "Example"
cert.get_subject().O = "Example Company"
cert.get_subject().OU = "Example Organization"
cert.get_subject().CN = socket.gethostname()
cert.set_serial_number(random.randint(1, 100000))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(10*365*24*60*60)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, 'sha1')
open(CERT_FILE, "wt").write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
open(KEY_FILE, "wt").write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k))
if hasattr(ssl, 'SSLContext'):
if app != None: app.logger.info(" * Creating perfect forward secrey SSL Context")
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.set_ecdh_curve('prime256v1')
context.verify_mode = ssl.CERT_REQUIRED
context.set_ciphers('ECDHE-RSA-AES256-SHA')
context.options |= ssl.OP_NO_COMPRESSION
context.options |= ssl.OP_SINGLE_ECDH_USE
context.options |= ssl.OP_CIPHER_SERVER_PREFERENCE
context.load_cert_chain(CERT_FILE, KEY_FILE)
else:
#if app != None: app.logger.warning(" ! No perfect forward secrecy supported. Update your python version!")
context = (CERT_FILE,KEY_FILE)
#context = SSL.Context(SSL.TLSv1_2_METHOD)
#context.use_privatekey_file(os.path.join(config_folder, key_file))
#context.use_certificate_file(os.path.join(config_folder, cert_file))
return context
| 2.6875 | 3 |
arvestust/models/image.py | lehvitus/arvestust | 1 | 12772457 | import os
import uuid
import inflection
from django.db import models
from django.urls import reverse
from django.template.defaultfilters import slugify
from django.utils.translation import gettext_lazy as _
from django.conf import settings
from .abstracts import ArvestustRecord, ArvestustFile
from .validators.file import validate_file_size, validate_storage_quota
def file_upload_path(instance, filename):
name, extension = os.path.splitext(filename)
file = f'{str(instance.uuid)}{extension}'
folder = inflection.pluralize(str(instance.content_type))
return f'{folder}/{instance.content_object.uuid}/{file}'
class Image(ArvestustFile):
file = models.ImageField(
upload_to=file_upload_path,
verbose_name=_('file'),
storage=getattr(settings, 'PUBLIC_FILE_STORAGE'),
)
class Meta:
db_table = 'arvestust_images'
indexes = [models.Index(fields=['created_at'])]
ordering = ['-created_at']
def get_absolute_url(self):
return reverse('image-detail', kwargs={'slug': self.slug})
| 2.03125 | 2 |
UServer/test/jwt_test.py | soybean217/lora-python | 0 | 12772458 | from flask import Flask
from flask_jwt import JWT, jwt_required, current_identity
from werkzeug.security import safe_str_cmp
class User(object):
def __init__(self, id, username, password):
self.id = id
self.username = username
self.password = password
def __str__(self):
return "User(id='%s')" % self.id
users = [
User(1, 'user1', '<PASSWORD>'),
User(2, 'user2', '<PASSWORD>'),
]
username_table = {u.username: u for u in users}
userid_table = {u.id: u for u in users}
def authenticate(username, password):
user = username_table.get(username, None)
if user and safe_str_cmp(user.password.encode('utf-8'), password.encode('utf-8')):
return user
def identity(payload):
user_id = payload['identity']
return userid_table.get(user_id, None)
app = Flask(__name__)
app.debug = True
app.config['SECRET_KEY'] = 'super-secret'
jwt = JWT(app, authenticate, identity)
@app.route('/protected')
@jwt_required()
def protected():
return '%s' % current_identity
if __name__ == '__main__':
app.run() | 2.796875 | 3 |
mass_flask_core/signals/__init__.py | mass-project/mass_server | 8 | 12772459 | from mongoengine import signals
from mass_flask_core.models import AnalysisSystem, Report
from .dispatch_request import update_dispatch_request_for_new_sample, create_requests_for_new_analysis_system
from .copy_report_tags import copy_tags_from_report_to_sample
def connect_signals():
signals.post_save.connect(update_dispatch_request_for_new_sample)
signals.post_save.connect(create_requests_for_new_analysis_system, sender=AnalysisSystem)
signals.post_save.connect(copy_tags_from_report_to_sample, sender=Report)
| 1.945313 | 2 |
bit_manipulation/bit/bit_challenge.py | stephank007/python_challenges | 0 | 12772460 | <reponame>stephank007/python_challenges<filename>bit_manipulation/bit/bit_challenge.py
#!/usr/bin/env python
# coding: utf-8
# This notebook was prepared by [<NAME>](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# # Challenge Notebook
# ## Problem: Implement common bit manipulation operations: get_bit, set_bit, clear_bit, clear_bits_msb_to_index, clear_bits_msb_to_lsb, update_bit.
#
# * [Constraints](#Constraints)
# * [Test Cases](#Test-Cases)
# * [Algorithm](#Algorithm)
# * [Code](#Code)
# * [Unit Test](#Unit-Test)
# * [Solution Notebook](#Solution-Notebook)
# ## Constraints
#
# * Can we assume the inputs are valid?
# * No
# * Can we assume this fits memory?
# * Yes
# ## Test Cases
#
# * None as a number input -> Exception
# * Negative index -> Exception
#
# ### get_bit
# number = 0b10001110, index = 3
# expected = True
# ### set_bit
# number = 0b10001110, index = 4
# expected = 0b10011110
# ### clear_bit
# number = 0b10001110, index = 3
# expected = 0b10000110
# ### clear_bits_msb_to_index
# number = 0b10001110, index = 3
# expected = 0b00000110
# ### clear_bits_index_to_lsb
# number = 0b10001110, index = 3
# expected = 0b10000000
# ### update_bit
# number = 0b10001110, index = 3, value = 1
# expected = 0b10001110
# number = 0b10001110, index = 3, value = 0
# expected = 0b10000110
# number = 0b10001110, index = 0, value = 1
# expected = 0b10001111
# ## Algorithm
#
# Refer to the [Solution Notebook](). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
# ## Code
# In[ ]:
class Bit(object):
def __init__(self, number):
# TODO: Implement me
pass
def get_bit(self, index):
# TODO: Implement me
pass
def set_bit(self, index):
# TODO: Implement me
pass
def clear_bit(self, index):
# TODO: Implement me
pass
def clear_bits_msb_to_index(self, index):
# TODO: Implement me
pass
def clear_bits_index_to_lsb(self, index):
# TODO: Implement me
pass
def update_bit(self, index, value):
# TODO: Implement me
pass
# ## Unit Test
# **The following unit test is expected to fail until you solve the challenge.**
# In[ ]:
# %load test_bit.py
import unittest
class TestBit(unittest.TestCase):
def test_bit(self):
number = int('10001110', base=2)
bit = Bit(number)
self.assertEqual(bit.get_bit(index=3), True)
expected = int('10011110', base=2)
self.assertEqual(bit.set_bit(index=4), expected)
bit = Bit(number)
expected = int('10000110', base=2)
self.assertEqual(bit.clear_bit(index=3), expected)
bit = Bit(number)
expected = int('00000110', base=2)
self.assertEqual(bit.clear_bits_msb_to_index(index=3), expected)
bit = Bit(number)
expected = int('10000000', base=2)
self.assertEqual(bit.clear_bits_index_to_lsb(index=3), expected)
bit = Bit(number)
self.assertEqual(bit.update_bit(index=3, value=1), number)
bit = Bit(number)
expected = int('10000110', base=2)
self.assertEqual(bit.update_bit(index=3, value=0), expected)
bit = Bit(number)
expected = int('10001111', base=2)
self.assertEqual(bit.update_bit(index=0, value=1), expected)
print('Success: test_bit')
def main():
test = TestBit()
test.test_bit()
if __name__ == '__main__':
main()
# ## Solution Notebook
#
# Review the [Solution Notebook]() for a discussion on algorithms and code solutions.
| 3.671875 | 4 |
kitsune/wiki/tests/test_es.py | navgurukul-shivani18/kitsune | 4 | 12772461 | from datetime import datetime, timedelta
from nose.tools import eq_
from kitsune.products.tests import ProductFactory, TopicFactory
from kitsune.search.tests.test_es import ElasticTestCase
from kitsune.wiki.tests import (
DocumentFactory, RevisionFactory, HelpfulVoteFactory, RedirectRevisionFactory)
from kitsune.wiki.models import DocumentMappingType, RevisionMetricsMappingType
class DocumentUpdateTests(ElasticTestCase):
def test_add_and_delete(self):
"""Adding a doc should add it to the search index; deleting should
delete it."""
doc = DocumentFactory()
RevisionFactory(document=doc, is_approved=True)
self.refresh()
eq_(DocumentMappingType.search().count(), 1)
doc.delete()
self.refresh()
eq_(DocumentMappingType.search().count(), 0)
def test_translations_get_parent_tags(self):
t1 = TopicFactory(display_order=1)
t2 = TopicFactory(display_order=2)
p = ProductFactory()
doc1 = DocumentFactory(
title=u'Audio too loud',
products=[p],
topics=[t1, t2])
RevisionFactory(document=doc1, is_approved=True)
doc2 = DocumentFactory(title=u'Audio too loud bork bork', parent=doc1, tags=[u'badtag'])
RevisionFactory(document=doc2, is_approved=True)
# Verify the parent has the right tags.
doc_dict = DocumentMappingType.extract_document(doc1.id)
eq_(sorted(doc_dict['topic']), sorted([t1.slug, t2.slug]))
eq_(doc_dict['product'], [p.slug])
# Verify the translation has the parent's tags.
doc_dict = DocumentMappingType.extract_document(doc2.id)
eq_(sorted(doc_dict['topic']), sorted([t1.slug, t2.slug]))
eq_(doc_dict['product'], [p.slug])
def test_wiki_topics(self):
"""Make sure that adding topics to a Document causes it to
refresh the index.
"""
t = TopicFactory(slug=u'hiphop')
eq_(DocumentMappingType.search().filter(topic=t.slug).count(), 0)
doc = DocumentFactory()
RevisionFactory(document=doc, is_approved=True)
self.refresh()
eq_(DocumentMappingType.search().filter(topic=t.slug).count(), 0)
doc.topics.add(t)
self.refresh()
eq_(DocumentMappingType.search().filter(topic=t.slug).count(), 1)
doc.topics.clear()
self.refresh()
# Make sure the document itself is still there and that we didn't
# accidentally delete it through screwed up signal handling:
eq_(DocumentMappingType.search().filter().count(), 1)
eq_(DocumentMappingType.search().filter(topic=t.slug).count(), 0)
def test_wiki_products(self):
"""Make sure that adding products to a Document causes it to
refresh the index.
"""
p = ProductFactory(slug=u'desktop')
eq_(DocumentMappingType.search().filter(product=p.slug).count(), 0)
doc = DocumentFactory()
RevisionFactory(document=doc, is_approved=True)
self.refresh()
eq_(DocumentMappingType.search().filter(product=p.slug).count(), 0)
doc.products.add(p)
self.refresh()
eq_(DocumentMappingType.search().filter(product=p.slug).count(), 1)
doc.products.remove(p)
self.refresh()
# Make sure the document itself is still there and that we didn't
# accidentally delete it through screwed up signal handling:
eq_(DocumentMappingType.search().filter().count(), 1)
eq_(DocumentMappingType.search().filter(product=p.slug).count(), 0)
def test_wiki_no_revisions(self):
"""Don't index documents without approved revisions"""
# Create a document with no revisions and make sure the
# document is not in the index.
doc = DocumentFactory()
self.refresh()
eq_(DocumentMappingType.search().count(), 0)
# Create a revision that's not approved and make sure the
# document is still not in the index.
RevisionFactory(document=doc, is_approved=False)
self.refresh()
eq_(DocumentMappingType.search().count(), 0)
def test_wiki_redirects(self):
"""Make sure we don't index redirects"""
# First create a revision that doesn't have a redirect and
# make sure it's in the index.
doc = DocumentFactory(title=u'wool hats')
RevisionFactory(document=doc, is_approved=True)
self.refresh()
eq_(DocumentMappingType.search().query(document_title__match='wool').count(), 1)
# Now create a revision that is a redirect and make sure the
# document is removed from the index.
RedirectRevisionFactory(document=doc)
self.refresh()
eq_(DocumentMappingType.search().query(document_title__match='wool').count(), 0)
def test_wiki_keywords(self):
"""Make sure updating keywords updates the index."""
# Create a document with a revision with no keywords. It
# shouldn't show up with a document_keywords term query for
# 'wool' since it has no keywords.
doc = DocumentFactory(title=u'wool hats')
RevisionFactory(document=doc, is_approved=True)
self.refresh()
eq_(DocumentMappingType.search().query(
document_keywords='wool').count(), 0)
RevisionFactory(document=doc, is_approved=True, keywords='wool')
self.refresh()
eq_(DocumentMappingType.search().query(document_keywords='wool').count(), 1)
def test_recent_helpful_votes(self):
"""Recent helpful votes are indexed properly."""
# Create a document and verify it doesn't show up in a
# query for recent_helpful_votes__gt=0.
r = RevisionFactory(is_approved=True)
self.refresh()
eq_(DocumentMappingType.search().filter(
document_recent_helpful_votes__gt=0).count(), 0)
# Add an unhelpful vote, it still shouldn't show up.
HelpfulVoteFactory(revision=r, helpful=False)
r.document.save() # Votes don't trigger a reindex.
self.refresh()
eq_(DocumentMappingType.search().filter(
document_recent_helpful_votes__gt=0).count(), 0)
# Add an helpful vote created 31 days ago, it still shouldn't show up.
created = datetime.now() - timedelta(days=31)
HelpfulVoteFactory(revision=r, helpful=True, created=created)
r.document.save() # Votes don't trigger a reindex.
self.refresh()
eq_(DocumentMappingType.search().filter(
document_recent_helpful_votes__gt=0).count(), 0)
# Add an helpful vote created 29 days ago, it should show up now.
created = datetime.now() - timedelta(days=29)
HelpfulVoteFactory(revision=r, helpful=True, created=created)
r.document.save() # Votes don't trigger a reindex.
self.refresh()
eq_(DocumentMappingType.search().filter(
document_recent_helpful_votes__gt=0).count(), 1)
class RevisionMetricsTests(ElasticTestCase):
def test_add_and_delete(self):
"""Adding a revision should add it to the index.
Deleting should delete it.
"""
r = RevisionFactory()
self.refresh()
eq_(RevisionMetricsMappingType.search().count(), 1)
r.delete()
self.refresh()
eq_(RevisionMetricsMappingType.search().count(), 0)
def test_data_in_index(self):
"""Verify the data we are indexing."""
p = ProductFactory()
base_doc = DocumentFactory(locale='en-US', products=[p])
d = DocumentFactory(locale='es', parent=base_doc)
r = RevisionFactory(document=d, is_approved=True)
self.refresh()
eq_(RevisionMetricsMappingType.search().count(), 1)
data = RevisionMetricsMappingType.search()[0]
eq_(data['is_approved'], r.is_approved)
eq_(data['locale'], d.locale)
eq_(data['product'], [p.slug])
eq_(data['creator_id'], r.creator_id)
| 2.171875 | 2 |
examples/ex06_benchmark.py | alchem0x2A/vasp-interactive-test | 11 | 12772462 | <gh_stars>10-100
"""Run benchmark for VaspInteractive on different molecule systems
Benchmark molecules taken from https://wiki.fysik.dtu.dk/gpaw/devel/ase_optimize/ase_optimize.html
"""
import numpy as np
import os
import tempfile
import random
from time import time
from pathlib import Path
import tempfile
from ase.db import connect
from ase.optimize import BFGS
from vasp_interactive import VaspInteractive
from vasp_interactive.vasp_interactive import parse_outcar_iterations
from ase.calculators.vasp import Vasp
curdir = Path(__file__).parent
systems = []
with connect(curdir / "systems.db") as conn:
for row in conn.select():
atoms = row.toatoms()
# make pbc=True only for VASP
atoms.pbc = True
systems.append(atoms)
# default parameters that shared by all vasp calculators
default_params = dict(xc="pbe", ismear=0, sigma=0.01, kspacing=0.5, kgamma=True, npar=4)
fmax = 0.05
# Following functions do the relaxation and returns ionic / electronic steps with wall time
def relax_vasp_interactive(atoms):
"""Vasp Interactive"""
atoms = atoms.copy()
with tempfile.TemporaryDirectory() as tmpdir:
params = dict(directory=tmpdir)
params.update(default_params)
calc = VaspInteractive(**params)
t_ = time()
with calc:
atoms.calc = calc
# Suppress output
dyn = BFGS(atoms)
dyn.run(fmax=fmax)
n_ion, n_elec = calc.read_all_iterations()
t_wall = time() - t_
e = atoms.get_potential_energy()
return e, n_ion - 1, n_elec[:-1], t_wall
def relax_vasp_bfgs(atoms):
"""Classic vasp + bfgs, no wave function reloading"""
atoms = atoms.copy()
with tempfile.TemporaryDirectory() as tmpdir:
params = dict(ibrion=-1, nsw=0, directory=tmpdir)
params.update(default_params)
calc = Vasp(**params)
atoms.calc = calc
dyn = BFGS(atoms, logfile=None)
n_elec = []
n_ion = 1
# Use manual force threshold in order to read the iterations
t_ = time()
f = np.abs(atoms.get_forces()).max()
n_elec.append(calc.read_number_of_iterations())
while f > fmax:
dyn.step()
n_ion += 1
f = np.abs(atoms.get_forces()).max()
n_elec.append(calc.read_number_of_iterations())
n_elec = np.array(n_elec)
t_wall = time() - t_
e = atoms.get_potential_energy()
return e, n_ion, n_elec, t_wall
def relax_vasp(atoms):
"""Classic vasp"""
atoms = atoms.copy()
with tempfile.TemporaryDirectory() as tmpdir:
params = dict(istart=0, ediffg=-fmax, ibrion=2, nsw=500, directory=tmpdir)
params.update(default_params)
calc = Vasp(**params)
atoms.calc = calc
t_ = time()
atoms.get_potential_energy()
n_ion, n_elec = parse_outcar_iterations(calc.load_file("OUTCAR"))
t_wall = time() - t_
e = atoms.get_potential_energy()
return e, n_ion, n_elec, t_wall
def compute():
import pickle
res_file = curdir / "benchmark.pkl"
if res_file.is_file():
with open(res_file, "rb") as fd:
results = pickle.load(fd)
else:
results = dict()
# Collect data
for i in range(len(systems)):
atoms = systems[i]
name = atoms.get_chemical_formula()
if name in results.keys():
print(f"Results for {name} loaded from pickle")
else:
res = dict()
print(f"Relaxation for {name}")
print("\tVasp Interactive...")
res["vasp-inter"] = relax_vasp_interactive(atoms)
print("\tVasp BFGS...")
res["vasp-bfgs"] = relax_vasp_bfgs(atoms)
print("\tVasp only...")
res["vasp"] = relax_vasp(atoms)
results[name] = res
# Save at each epoch
with open(res_file, "wb") as fd:
pickle.dump(results, fd, protocol=3)
return results
def plot_benchmark(results):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(1, 2, figsize=(10, 4))
ax1 = axes[0]
ax2 = axes[1]
w = 0.15
# N electronic steps
n1s = []
n2s = []
# time
t1s = []
t2s = []
for i, key in enumerate(results.keys()):
e, n, n1, t1 = results[key]["vasp-inter"]
e, n, n2, t2 = results[key]["vasp-bfgs"]
e, n, n3, t3 = results[key]["vasp"]
# number of steps
n1s.append(np.sum(n1) - np.sum(n3))
n2s.append(np.sum(n2) - np.sum(n3))
# time
t1s.append(t1 / t3)
t2s.append(t2 / t3)
d = np.arange(len(results))
w1 = 0.2
ax1.bar(d - w, t1s, w * 2, label="VaspInteractive + BFGS")
ax1.bar(d + w, t2s, w * 2, label="Vasp + BFGS")
ax1.axhline(y=1, ls="--", color="grey")
ax1.set_xticks(d)
ax1.set_xticklabels(list(results.keys()))
ax1.set_title("Rel. Time to Pure VASP")
ax1.set_ylabel(r"$t / t_{\mathrm{VASP}}$")
ax1.legend()
# steps plot
ax2.bar(d - w, n1s, w * 2, label="VaspInteractive + BFGS")
ax2.bar(d + w, n2s, w * 2, label="Vasp + BFGS")
ax2.set_xticks(d)
ax2.axhline(y=0, ls="--", color="grey")
ax2.set_xticklabels(list(results.keys()))
ax2.set_title("Rel. Total Electronic SCFs to Pure VASP")
ax2.set_ylabel(r"$N^{\mathrm{SCF}} - N^{\mathrm{SCF}}_{\mathrm{VASP}}$")
ax2.legend()
fig.tight_layout()
fig.savefig(curdir / "benchmark.png")
def plot_details(results):
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
name = "CAu8O"
disp_name = {
"vasp-inter": "VaspInteractive + BFGS",
"vasp-bfgs": "Vasp + BFGS",
"vasp": "Pure VASP",
}
for met in ("vasp-inter", "vasp-bfgs", "vasp"):
steps = results[name][met][2]
ax.step(range(1, len(steps) + 1), steps, label=disp_name[met])
ax.legend()
ax.set_xlabel("Ionic steps")
ax.set_ylabel("Electronic SCF per Ionic Cycle")
ax.set_title("CO on Au(111) surface (CAu8O)")
fig.tight_layout()
fig.savefig(curdir / "details.png")
if __name__ == "__main__":
results = compute()
plot_benchmark(results)
plot_details(results)
| 2.125 | 2 |
src/edgeorder/azext_edgeorder/generated/custom.py | haroonf/azure-cli-extensions | 1 | 12772463 | <filename>src/edgeorder/azext_edgeorder/generated/custom.py
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
def edgeorder_order_show(client,
name,
resource_group_name,
location):
return client.get_order_by_name(order_name=name,
resource_group_name=resource_group_name,
location=location)
def edgeorder_list_config(client,
configuration_filters,
skip_token=None,
registered_features=None,
location_placement_id=None,
quota_id=None):
configurations_request = {}
configurations_request['configuration_filters'] = configuration_filters
configurations_request['customer_subscription_details'] = {}
if registered_features is not None:
configurations_request['customer_subscription_details']['registered_features'] = registered_features
if location_placement_id is not None:
configurations_request['customer_subscription_details']['location_placement_id'] = location_placement_id
if quota_id is not None:
configurations_request['customer_subscription_details']['quota_id'] = quota_id
if len(configurations_request['customer_subscription_details']) == 0:
del configurations_request['customer_subscription_details']
return client.list_configurations(skip_token=skip_token,
configurations_request=configurations_request)
def edgeorder_list_family(client,
filterable_properties,
expand=None,
skip_token=None,
registered_features=None,
location_placement_id=None,
quota_id=None):
product_families_request = {}
product_families_request['filterable_properties'] = filterable_properties
product_families_request['customer_subscription_details'] = {}
if registered_features is not None:
product_families_request['customer_subscription_details']['registered_features'] = registered_features
if location_placement_id is not None:
product_families_request['customer_subscription_details']['location_placement_id'] = location_placement_id
if quota_id is not None:
product_families_request['customer_subscription_details']['quota_id'] = quota_id
if len(product_families_request['customer_subscription_details']) == 0:
del product_families_request['customer_subscription_details']
return client.list_product_families(expand=expand,
skip_token=skip_token,
product_families_request=product_families_request)
def edgeorder_list_metadata(client,
skip_token=None):
return client.list_product_families_metadata(skip_token=skip_token)
def edgeorder_list_operation(client):
return client.list_operations()
def edgeorder_address_list(client,
resource_group_name=None,
filter_=None,
skip_token=None):
if resource_group_name:
return client.list_by_group(resource_group_name=resource_group_name,
filter=filter_,
skip_token=skip_token)
return client.list(filter=filter_,
skip_token=skip_token)
def edgeorder_address_show(client,
address_name,
resource_group_name):
return client.get_address_by_name(address_name=address_name,
resource_group_name=resource_group_name)
def edgeorder_address_create(client,
address_name,
resource_group_name,
location,
contact_details,
tags=None,
shipping_address=None):
address_resource = {}
if tags is not None:
address_resource['tags'] = tags
address_resource['location'] = location
if shipping_address is not None:
address_resource['shipping_address'] = shipping_address
address_resource['contact_details'] = contact_details
return client.begin_create_address(address_name=address_name,
resource_group_name=resource_group_name,
address_resource=address_resource)
def edgeorder_address_update(client,
address_name,
resource_group_name,
if_match=None,
tags=None,
shipping_address=None,
contact_details=None):
address_update_parameter = {}
if tags is not None:
address_update_parameter['tags'] = tags
if shipping_address is not None:
address_update_parameter['shipping_address'] = shipping_address
if contact_details is not None:
address_update_parameter['contact_details'] = contact_details
return client.begin_update_address(address_name=address_name,
resource_group_name=resource_group_name,
if_match=if_match,
address_update_parameter=address_update_parameter)
def edgeorder_address_delete(client,
address_name,
resource_group_name):
return client.begin_delete_address_by_name(address_name=address_name,
resource_group_name=resource_group_name)
def edgeorder_order_list(client,
resource_group_name=None,
skip_token=None):
if resource_group_name:
return client.list_by_group(resource_group_name=resource_group_name,
skip_token=skip_token)
return client.list(skip_token=skip_token)
def edgeorder_order_item_list(client,
resource_group_name=None,
filter_=None,
expand=None,
skip_token=None):
if resource_group_name:
return client.list_by_group(resource_group_name=resource_group_name,
filter=filter_,
expand=expand,
skip_token=skip_token)
return client.list(filter=filter_,
expand=expand,
skip_token=skip_token)
def edgeorder_order_item_show(client,
order_item_name,
resource_group_name,
expand=None):
return client.get_order_item_by_name(order_item_name=order_item_name,
resource_group_name=resource_group_name,
expand=expand)
def edgeorder_order_item_create(client,
order_item_name,
resource_group_name,
order_item_resource):
return client.begin_create_order_item(order_item_name=order_item_name,
resource_group_name=resource_group_name,
order_item_resource=order_item_resource)
def edgeorder_order_item_update(client,
order_item_name,
resource_group_name,
if_match=None,
tags=None,
notification_email_list=None,
notification_preferences=None,
transport_preferences=None,
encryption_preferences=None,
management_resource_preferences=None,
shipping_address=None,
contact_details=None):
order_item_update_parameter = {}
if tags is not None:
order_item_update_parameter['tags'] = tags
if notification_email_list is not None:
order_item_update_parameter['notification_email_list'] = notification_email_list
order_item_update_parameter['preferences'] = {}
if notification_preferences is not None:
order_item_update_parameter['preferences']['notification_preferences'] = notification_preferences
if transport_preferences is not None:
order_item_update_parameter['preferences']['transport_preferences'] = transport_preferences
if encryption_preferences is not None:
order_item_update_parameter['preferences']['encryption_preferences'] = encryption_preferences
if management_resource_preferences is not None:
order_item_update_parameter['preferences']['management_resource_preferences'] = management_resource_preferences
if len(order_item_update_parameter['preferences']) == 0:
del order_item_update_parameter['preferences']
order_item_update_parameter['forward_address'] = {}
if shipping_address is not None:
order_item_update_parameter['forward_address']['shipping_address'] = shipping_address
if contact_details is not None:
order_item_update_parameter['forward_address']['contact_details'] = contact_details
if len(order_item_update_parameter['forward_address']) == 0:
del order_item_update_parameter['forward_address']
return client.begin_update_order_item(order_item_name=order_item_name,
resource_group_name=resource_group_name,
if_match=if_match,
order_item_update_parameter=order_item_update_parameter)
def edgeorder_order_item_delete(client,
order_item_name,
resource_group_name):
return client.begin_delete_order_item_by_name(order_item_name=order_item_name,
resource_group_name=resource_group_name)
def edgeorder_order_item_cancel(client,
order_item_name,
resource_group_name,
reason):
cancellation_reason = {}
cancellation_reason['reason'] = reason
return client.cancel_order_item(order_item_name=order_item_name,
resource_group_name=resource_group_name,
cancellation_reason=cancellation_reason)
def edgeorder_order_item_return(client,
order_item_name,
resource_group_name,
return_reason,
service_tag=None,
shipping_box_required=None,
shipping_address=None,
contact_details=None):
return_order_item_details = {}
return_order_item_details['return_reason'] = return_reason
if service_tag is not None:
return_order_item_details['service_tag'] = service_tag
if shipping_box_required is not None:
return_order_item_details['shipping_box_required'] = shipping_box_required
else:
return_order_item_details['shipping_box_required'] = False
return_order_item_details['return_address'] = {}
if shipping_address is not None:
return_order_item_details['return_address']['shipping_address'] = shipping_address
if contact_details is not None:
return_order_item_details['return_address']['contact_details'] = contact_details
if len(return_order_item_details['return_address']) == 0:
del return_order_item_details['return_address']
return client.begin_return_order_item(order_item_name=order_item_name,
resource_group_name=resource_group_name,
return_order_item_details=return_order_item_details)
| 1.703125 | 2 |
utils/matlab_call_hevc_tools/saveFileYuv.py | roman-vygon/pyFAST | 0 | 12772464 | # Generated with SMOP 0.41
from libsmop import *
# saveFileYuv.m
def saveFileYuv(mov=None, fileName=None, mode=None):
# save RGB movie [0, 255] to YUV 4:2:0 file
if 1 == mode:
fileId = fopen(fileName, 'w')
# saveFileYuv.m:11
else:
if 2 == mode:
fileId = fopen(fileName, 'a')
# saveFileYuv.m:15
else:
fileId = fopen(fileName, 'w')
# saveFileYuv.m:19
dim = size(mov(1).cdata)
# saveFileYuv.m:25
nrFrame = length(mov)
# saveFileYuv.m:27
for f in arange(1, nrFrame, 1).reshape(-1):
imgRgb = frame2im(mov(f))
# saveFileYuv.m:33
imgYuv = reshape(convertRgbToYuv(reshape(imgRgb, dot(dim(1), dim(2)), 3)), dim(1), dim(2), 3)
# saveFileYuv.m:39
buf = reshape(imgYuv(arange(), arange(), 1).T, [], 1)
# saveFileYuv.m:45
count = fwrite(fileId, buf, 'uchar')
# saveFileYuv.m:47
buf = reshape(imgYuv(arange(1, end(), 2), arange(1, end(), 2), 2).T, [], 1)
# saveFileYuv.m:53
count = fwrite(fileId, buf, 'uchar')
# saveFileYuv.m:55
buf = reshape(imgYuv(arange(1, end(), 2), arange(1, end(), 2), 3).T, [], 1)
# saveFileYuv.m:61
count = fwrite(fileId, buf, 'uchar')
# saveFileYuv.m:63
fclose(fileId)
| 2.1875 | 2 |
hello.py | iFenr1r/d2bot | 0 | 12772465 | #!/usr/bin/env python
# -*- coding: utf-8 -*
from __future__ import print_function
from cloudant import Cloudant
import atexit
import cf_deployment_tracker
import os
import od_python
import time
import json
from flask import jsonify, Flask, current_app, render_template, request, json
import csv
import sys
from od_python.rest import ApiException
from pprint import pprint
from flask_cors import CORS, cross_origin
from watson_developer_cloud import ConversationV1
# Emit Bluemix deployment event
cf_deployment_tracker.track()
app = Flask(__name__)
CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
Playerapi = od_python.PlayersApi()
Heroapi = od_python.HeroesApi()
Herostats = od_python.HeroStatsApi()
db_name = 'mydb'
client = None
db = None
if 'VCAP_SERVICES' in os.environ:
vcap = json.loads(os.getenv('VCAP_SERVICES'))
print('Found VCAP_SERVICES')
if 'cloudantNoSQLDB' in vcap:
creds = vcap['cloudantNoSQLDB'][0]['credentials']
user = creds['username']
password = creds['password']
url = 'https://' + creds['host']
client = Cloudant(user, password, url=url, connect=True)
db = client.create_database(db_name, throw_on_exists=False)
elif os.path.isfile('vcap-local.json'):
with open('vcap-local.json') as f:
vcap = json.load(f)
print('Found local VCAP_SERVICES')
creds = vcap['services']['cloudantNoSQLDB'][0]['credentials']
user = creds['username']
password = <PASSWORD>['password']
url = 'https://' + creds['host']
client = Cloudant(user, password, url=url, connect=True)
db = client.create_database(db_name, throw_on_exists=False)
# On Bluemix, get the port number from the environment variable PORT
# When running this app on the local machine, default the port to 8000
port = int(os.getenv('PORT', 8000))
def maisjogados(ID):
try:
maisjogados = ("<div class='row'><div class='col-sm-12'><p class='text-center nomeplayer' id='nome'>Herois mais jogados:</p></div>")
api_stats = Playerapi.players_account_id_heroes_get(ID)
api_heroi = Heroapi.heroes_get()
api_hero_image = Herostats.hero_stats_get()
m = 1
for i in range(0,19):
if api_stats[i].hero_id > 23:
m = 2
else:
m =1
nomehero = str(api_heroi[api_stats[i].hero_id - m].localized_name)
icone = str(api_hero_image[api_stats[i].hero_id - m].icon)
jogadas = str(api_stats[i].games)
porcentagem = str(round(float(api_stats[i].win) / float(api_stats[i].games) * 100,1))
maisjogados = maisjogados + ("<div class='row'><div class='col-sm-12'><p class='text-center nomeplayer' id='hero'>Heroi: " + nomehero + " Icone: <img src='http://cdn.dota2.com" + icone + "'> Partidas jogadas: "+ jogadas + " % Vitoria: " + porcentagem+"</p></div>")
return maisjogados
except ApiException as e:
print("Exception when calling BenchmarksApi->benchmarks_get: %s\n" % e)
#PEGA MMR ESTIMADO
def perfil():
return "OLA PORRA"
def perfill(id_player):
try:
api_response = Playerapi.players_account_id_get(id_player)
resp = ("<div class='row'><div class='col-sm-4'><p class='text-center nomeplayer' id='nome'>NICKNAME: "+api_response.profile.personaname+"</p></div>")
resp = resp + ("<div class='col-sm-4'><p class='text-center nomeplayer' id='nome'>MMR ESTIMADO: "+ str(api_response.mmr_estimate.estimate)+"</p></div>")
resp = resp + ("<div class='col-sm-4'><p class='text-center nomeplayer' id='nome'>MMR SOLO: "+str(api_response.solo_competitive_rank)+"</p></div>")
resp = resp + str(maisjogados(id_player))
return resp
except ApiException as e:
print("Exception when calling BenchmarksApi->benchmarks_get: %s\n" % e)
def herostats(id_heroi):
try:
# GET /heroStats
if id_heroi > 23:
id_heroi = id_heroi-1
api_response = Heroapi.heroes_get()
resp = ("Heroi: " + str(api_response[id_heroi-1].localized_name))
resp = resp + ("\nID: " + str(api_response[id_heroi-1].id))
return resp
except ApiException as e:
print("Exception when calling HeroStatsApi->hero_stats_get: %s\n" % e)
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route('/', methods=['POST', 'GET'])
@cross_origin()
def bot():
texto = request.form['text']
print(texto)
conversation = ConversationV1(
username='a33eb2c9-d218-4e05-a8ff-a46b<PASSWORD>1',
password='<PASSWORD>',
version='2017-05-26'
)
context = {}
workspace_id = '96cbce3b-2fd3-49b0-<PASSWORD>-da<PASSWORD>'
user = texto
response = conversation.message(
workspace_id=workspace_id,
message_input={'text': user},
context=context
)
context = response['context']
#se há intenções e dialogo
if response['intents'] or response['entities']:
if response['intents']:
intent = (json.dumps(response['intents'][0]['intent'],indent = 2))
intent = intent[1:-1]
dialog = (json.dumps(response['output']['nodes_visited'][0],indent = 2))
dialog = dialog[1:-1]
#se há resesposta à intenção
if response['output']['text']:
resposta = json.dumps(response['output']['text'][0],sort_keys=True, indent=4)
resposta = resposta[1:-1]
resposta = resposta.encode('utf-8')
resp = (resposta.decode('unicode-escape'))
#se entrou no nó x,y,z
if dialog == "perfil_e_id":
ID = (json.dumps(response['entities'][0]['value'],indent = 2))
ID = ID[1:-1]
print (ID)
resp = perfill(ID)
elif dialog == "perfil":
return resp
ID = texto
print ("Ikrl : " + ID)
resp = perfil()
elif dialog == "heroi":
ID = (json.dumps(response['entities'][0]['value']))
ID = ID[1:-1]
resp = herostats(int(ID))
return resp
elif dialog == "counterheroi":
ID = (json.dumps(response['entities'][0]['value']))
ID = ID[1:-1]
resp = herostats(int(ID))
return resp
else:
resposta = json.dumps(response['output']['text'][0],sort_keys=True, indent=4)
resposta = resposta[1:-1]
resposta = resposta.encode('utf-8')
resp = (resposta.decode('unicode-escape'))
print (resp)
return resp
if __name__ == "__main__":
app.run()
| 2.078125 | 2 |
xl_tensorflow/models/vision/classification/__init__.py | Lannister-Xiaolin/xl_tensorflow | 0 | 12772466 | <filename>xl_tensorflow/models/vision/classification/__init__.py
#!usr/bin/env python3
# -*- coding: UTF-8 -*-
from .utils import AutoAugment, RandAugment, image_from_tfrecord, images2tfrecord
from .efficientnet import EfficientNetB0, EfficientNetB1, EfficientNetB2, EfficientNetB3, EfficientNetB4, \
EfficientNetB5, EfficientNetB6, EfficientNetB7, EfficientNetLiteB4, EfficientNetLiteB3, EfficientNetLiteB2, \
EfficientNetLiteB1, EfficientNetLiteB0
from .darknet import DarkNet53,CspDarkNet53 | 1.304688 | 1 |
setup.py | luigibonati/md-stateinterpreter | 1 | 12772467 |
"""
stateinterpreter
Interpretation of metastable states from MD simulations
"""
import sys
from setuptools import setup, find_packages, Extension
import versioneer
import numpy
os_name = sys.platform
compile_args = ["-O3", "-ffast-math", "-march=native", "-fopenmp" ]
libraries = ["m"]
link_args = ['-fopenmp']
if os_name.startswith('darwin'):
#clang compilation
compile_args.insert(-1, "-Xpreprocessor")
libraries.append("omp")
link_args.insert(-1, "-Xpreprocessor")
__cython__ = False # command line option, try-import, ...
try:
import Cython
__cython__ = True
except ModuleNotFoundError:
__cython__ = False
ext = '.pyx' if __cython__ else '.c'
short_description = "Interpretation of metastable states from MD simulations".split("\n")[0]
# from https://github.com/pytest-dev/pytest-runner#conditional-requirement
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
try:
with open("README.md", "r") as handle:
long_description = handle.read()
except:
long_description = None
ext_modules=[
Extension("stateinterpreter.utils._compiled_numerics",
["stateinterpreter/utils/_compiled_numerics"+ext],
libraries=libraries,
include_dirs=[numpy.get_include()],
extra_compile_args = compile_args,
extra_link_args= link_args
)
]
if __cython__:
from Cython.Build import cythonize
ext_modules = cythonize(ext_modules)
setup(
# Self-descriptive entries which should always be present
name='stateinterpreter',
author='<NAME> <<EMAIL>>, <NAME> <pietro.<EMAIL>li.iit>"',
description=short_description,
long_description=long_description,
long_description_content_type="text/markdown",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='MIT',
# Which Python importable modules should be included when your package is installed
# Handled automatically by setuptools. Use 'exclude' to prevent some specific
# subpackage(s) from being added, if needed
packages=find_packages(),
# Optional include package data to ship with your package
# Customize MANIFEST.in if the general case does not suit your needs
# Comment out this line to prevent the files from being packaged with your software
include_package_data=True,
# Allows `setup.py test` to work correctly with pytest
setup_requires=[] + pytest_runner,
ext_modules = ext_modules,
zip_safe = False,
# Additional entries you may want simply uncomment the lines you want and fill in the data
# url='http://www.my_package.com', # Website
# install_requires=[], # Required packages, pulls from pip if needed; do not use for Conda deployment
# platforms=['Linux',
# 'Mac OS-X',
# 'Unix',
# 'Windows'], # Valid platforms your code works on, adjust to your flavor
# python_requires=">=3.5", # Python version restrictions
# Manual control if final package is compressible or not, set False to prevent the .egg from being made
# zip_safe=False,
) | 1.882813 | 2 |
cogs/count.py | OrangutanGaming/OG_SelfBot | 0 | 12772468 | <gh_stars>0
from discord.ext import commands
import discord
class Count():
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=["mcount"])
async def msgcount(self, ctx, user: discord.Member = None, channel: discord.TextChannel = None):
counter = 0
tmp = await ctx.send(self.bot.blank + "Counting messages...")
if not user:
user = ctx.message.author
if not channel:
channel = ctx.message.channel
async for log in channel.history(limit=99, before=ctx.message):
if log.author == user:
counter += 1
await ctx.message.delete()
counter += 1
if counter == 100:
await tmp.edit(content="{} has at least {} messages in {}".format(user, counter, channel.mention))
elif counter == 1:
await tmp.edit(content="{} has 1 message in {}".format(user, channel.mention))
elif counter <= 99:
await tmp.edit(content="{} has {} messages in {}".format(user, counter, channel.mention))
else:
await tmp.edit(content="Counter Bug")
@commands.command(aliases=["amcount"])
async def amsgcount(self, ctx, channel: discord.TextChannel = None):
counter = 0
tmp = await ctx.send(self.bot.blank + "Counting messages...")
if not channel:
channel = ctx.message.channel
async for message in channel.history(before=ctx.message, limit=99):
counter += 1
await ctx.message.delete()
counter += 1
if counter == 100:
await tmp.edit(content="There are now at least {} messages in {}".format(counter, channel.mention))
elif counter == 1:
await tmp.edit(content="There is now 1 message in {}".format(channel.mention))
elif counter <= 99:
await tmp.edit(content="There are now {} messages in {}".format(counter, channel.mention))
else:
await tmp.edit(content="Counter Bug")
@commands.command(aliases=["recents", "last"])
async def recent(self, ctx, user: discord.Member = None, channel: discord.TextChannel = None):
if not channel:
channel = ctx.message.channel
if not user:
user = ctx.message.author
quote = None
async for message in channel.history(before=ctx.message, limit=100):
if message.author == user:
quote = message
embed = discord.Embed(description=quote.content)
embed.set_author(name=quote.author.name, icon_url=quote.author.avatar_url)
embed.set_footer(text=(quote.created_at))
await ctx.message.delete()
await ctx.send(embed=embed)
return
if not quote:
continue
embed = discord.Embed(description="No message found")
await ctx.send(embed=embed)
await ctx.message.delete()
return
def setup(bot):
bot.add_cog(Count(bot)) | 2.765625 | 3 |
testasm/serve.py | leetim/leetim.github.io | 0 | 12772469 | <reponame>leetim/leetim.github.io<filename>testasm/serve.py
import SimpleHTTPServer
import SocketServer
PORT = 8000
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
httpd.serve_forever()
| 2.109375 | 2 |
sharper.py | TomasRoj/PythonCLI | 0 | 12772470 | <filename>sharper.py
import click
from pyfiglet import Figlet
from PIL import Image, ImageFilter, ImageFont, ImageDraw, ImageEnhance
def sharpen(path):
try:
img = Image.open(path)
img.filter(ImageFilter.SHARPEN()).show()
click.secho("Image processed sucesfully!", fg="green", bold=True)
except Exception as e:
print(e)
| 3.015625 | 3 |
api/serializers.py | SchoolOrchestration/ProcessEngineV2 | 0 | 12772471 | <filename>api/serializers.py
'''
Serializers for all the things
'''
from rest_framework import serializers
from .models import (
Process,
ProcessDefinition,
ProcessTask,
RegisteredService,
RegisteredTask,
Task,
Result
)
from django.contrib.auth import get_user_model
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = '__all__'
exclude_fields = ('password',)
class RegisteredServiceSerializer(serializers.ModelSerializer):
"""Simple serializer for professions"""
class Meta:
model = RegisteredService
fields = '__all__'
class RegisteredTaskSerializer(serializers.ModelSerializer):
"""Simple serializer for professions"""
class Meta:
model = RegisteredTask
fields = '__all__'
class ProcessTaskSerializer(serializers.ModelSerializer):
registered_task = RegisteredTaskSerializer(many=False, read_only=True)
class Meta:
model = ProcessTask
fields = '__all__'
# depth = 1
class ProcessDefinitionSerializer(serializers.ModelSerializer):
"""Simple serializer for professions"""
tasks = ProcessTaskSerializer(source='processtask_set', read_only=True, many=True)
class Meta:
model = ProcessDefinition
fields = '__all__'
class ResultSerializer(serializers.ModelSerializer):
class Meta:
model = Result
fields = '__all__'
class TaskSerializer(serializers.ModelSerializer):
result_set = ResultSerializer(many=True, read_only=True)
class Meta:
model = Task
fields = '__all__'
class ProcessSerializer(serializers.ModelSerializer):
"""Simple serializer for professions"""
task_set = TaskSerializer(many=True, read_only=True)
class Meta:
model = Process
fields = '__all__'
depth = 1 | 2.375 | 2 |
src/data/initial_investigation.py | CarolineFuglsang/san_fransico_crime_data | 0 | 12772472 | <filename>src/data/initial_investigation.py<gh_stars>0
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# loading raw data
raw = pd.read_csv("./data/raw/sf_data.csv", sep=';')
raw_districts = pd.read_csv("./data/raw/sf_districts.csv", sep=';')
raw.head(10)
# ---------------------------------------------------------------
# Category vs. Description
raw.category.nunique()
(raw.groupby(['category'], as_index = False)
.agg(N = ('id', 'count'))
)
raw.description.nunique()
gb_desc = (raw.groupby(['description'], as_index = False)
.agg(N = ('id', 'count'))
.sort_values(by = 'N')
)
# 46% of descriptions sees less than 100 observations
sns.histplot(data = gb_desc.query('N<=5000'), x = 'N')
gb_desc.query('N <= 100').shape[0]
# are descriptions uniquely contained in a category?
desc_dict = dict()
for i, cat in enumerate(raw.category.unique()):
desc_dict[cat] = raw.query(f"category == '{cat}'").description.unique()
intersect_df = pd.DataFrame(columns = ["cat_1", "cat_2", "intersection"])
for i, cat_1 in enumerate(raw.category.unique()):
for j, cat_2 in enumerate(raw.category.unique()):
if cat_1 == cat_2:
continue
cat_1_descs = set(desc_dict[cat_1])
cat_2_descs = set(desc_dict[cat_2])
intersect = cat_1_descs.intersection(cat_2_descs)
if len(intersect) != 0:
tmp = pd.DataFrame({'cat_1': cat_1,
'cat_2': cat_2,
'intersection': list(intersect)})
intersect_df = pd.concat([intersect_df, tmp])
# 9 incidences where a description is within more than one category the incidences have been found twice but in the interest of time I choose not to correct this for the time being.
# Idea for coding it better: Group by followed by comparison to descriptions in all categories with a higher index than the current category:
(raw.groupby(["category"])
.agg(desc_unique = ("description", lambda x: x.unique()))
.reset_index()
)
#--------------------------------------------------------------------
# label vs. category
# the label is unique within a category
label_df = (raw.groupby(["category"])
.agg(label_unique = ("label", lambda x: x.unique()))
.reset_index()
.sort_values(by='label_unique')
)
# -----------------------------------------------------------------
# The amount of districts
raw_districts.district.nunique()
| 2.859375 | 3 |
zentral/contrib/munki/osx_package/builder.py | VegarM/zentral | 1 | 12772473 | import os
from django.urls import reverse
from zentral.contrib.munki.forms import EnrollmentForm
from zentral.utils.osx_package import EnrollmentPackageBuilder
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class MunkiZentralEnrollPkgBuilder(EnrollmentPackageBuilder):
name = "Zentral Munki Enrollment"
form = EnrollmentForm
package_name = "zentral_munki_enroll.pkg"
base_package_identifier = "io.zentral.munki_enroll"
build_tmpl_dir = os.path.join(BASE_DIR, "build.tmpl")
def extra_build_steps(self):
# munki zentral postflight script
postflight_script = self.get_root_path("usr/local/zentral/munki/zentral_postflight")
self.replace_in_file(postflight_script,
(("%TLS_HOSTNAME%", self.get_tls_hostname()),
("%TLS_SERVER_CERTS%", self.include_tls_server_certs())))
# postinstall script
enrollment_url = "https://{}{}".format(self.get_tls_hostname(), reverse("munki:enroll"))
postinstall_script = self.get_build_path("scripts", "postinstall")
self.replace_in_file(postinstall_script,
(("%TLS_HOSTNAME%", self.get_tls_hostname()),
("%TLS_CA_CERT%", self.include_tls_ca_cert()),
("%ENROLLMENT_SECRET%", self.build_kwargs["enrollment_secret_secret"]),
("%ENROLLMENT_URL%", enrollment_url)))
| 2.171875 | 2 |
tf_tile/tilings.py | ccrndn/tf-tile | 9 | 12772474 | # Here we provide the key functions for tile-coding. To avoid huge dimensionality expansion, we have tiled
# per feature variable, but using feature-column cross functionality a pair of feature-variables
# also can be tiled, and also higher orders.
from typing import List
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import math_ops
class Tilings(object):
def __init__(self, tile_strategy_boundaries, num_tilings):
self.num_tilings = num_tilings
self.tile_strategy_boundaries = tile_strategy_boundaries
def _get_stack_tiling_boundaries(self, boundaries) -> List[List[float]]:
boundaries = np.array(boundaries)
each_bucket_resolution = np.array(
[float(boundaries[i + 1] - boundaries[i]) / self.num_tilings for i in range(len(boundaries) - 1)] + [0])
return [list(boundaries + i * each_bucket_resolution) for i in range(self.num_tilings)]
@staticmethod
def _get_tiles(input_data, list_boundaries: List[List[float]]):
all_tiles = []
input_tensor = tf.cast(input_data, tf.float64)
for i, boundaries in enumerate(list_boundaries):
bucketized_tensor = math_ops.bucketize(input_tensor, boundaries)
bucketized_tensor = tf.reshape(bucketized_tensor, (-1, 1))
bucketized_tensor = tf.math.add(bucketized_tensor, i * (len(boundaries) - 1))
all_tiles.append(bucketized_tensor)
return tf.concat(all_tiles, axis=1)
def get_features_tiles(self, features):
features_tiles = dict()
for feature_name, boundaries in self.tile_strategy_boundaries.items():
list_boundaries = self._get_stack_tiling_boundaries(boundaries)
features_tiles[feature_name] = Tilings._get_tiles(features[feature_name], list_boundaries)
return features_tiles
| 2.53125 | 3 |
setup.py | EarthObservationSimulator/eosim-gui | 0 | 12772475 | from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(
name='EOSim',
version='0.1',
description='Earth Observation Simulation',
author='BAERI',
author_email='<EMAIL>',
packages=['eosim'],
scripts=[
],
# Cartopy installation may not work due to additional dependencies it requires.
# CartoPy dependencies must be installed before running this setup.
# If using conda, cartopy along with its dependencies can be installed using the command `conda install -c conda-forge cartopy`
# In case of Runtime errors involving numpy, try the following command: `pip install numpy --upgrade --ignore-installed`
install_requires=['numpy', 'pandas', 'scipy', 'lowtran', 'astropy', 'cartopy']
)
| 1.695313 | 2 |
tripleo_common/utils/process.py | openstack/tripleo-common | 52 | 12772476 | # Copyright (c) 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to handle processes."""
import logging
import os
from oslo_concurrency import processutils
LOG = logging.getLogger(__name__)
def execute(*cmd, **kwargs):
"""Convenience wrapper around oslo's execute() method.
Executes and logs results from a system command. See docs for
oslo_concurrency.processutils.execute for usage.
:param \\*cmd: positional arguments to pass to processutils.execute()
:param use_standard_locale: keyword-only argument. True | False.
Defaults to False. If set to True,
execute command with standard locale
added to environment variables.
:param log_stdout: keyword-only argument. True | False. Defaults
to True. If set to True, logs the output.
:param \\*\\*kwargs: keyword arguments to pass to processutils.execute()
:returns: (stdout, stderr) from process execution
:raises: UnknownArgumentError on receiving unknown arguments
:raises: ProcessExecutionError
:raises: OSError
"""
logger = kwargs.pop('logger', LOG)
use_standard_locale = kwargs.pop('use_standard_locale', False)
if use_standard_locale:
env = kwargs.pop('env_variables', os.environ.copy())
env['LC_ALL'] = 'C'
kwargs['env_variables'] = env
log_stdout = kwargs.pop('log_stdout', True)
result = processutils.execute(*cmd, **kwargs)
logger.debug('Execution completed, command line is "%s"',
' '.join(map(str, cmd)))
if log_stdout:
logger.debug('Command stdout is: "%s"', result[0])
logger.debug('Command stderr is: "%s"', result[1])
return result
| 2.1875 | 2 |
setup.py | natfoster82/ocean-commander | 0 | 12772477 | <filename>setup.py<gh_stars>0
from setuptools import setup, find_packages
setup(
name='ocean',
version='0.1',
packages=find_packages(),
include_package_data=True,
install_requires=[
'Click',
],
entry_points='''
[console_scripts]
example=ocean.scripts.example:cli
manage=ocean.scripts.manage:cli
''',
)
| 1.382813 | 1 |
Maze.py | arduman4/Simple-maze-game | 0 | 12772478 | from collections import deque
from random import *
class Maze():
def __init__(self, player, canvas, y, x):
self.player = player
self.canvas = canvas
self.size = {"x": 2 * x - 1,
"y": 2 * y - 1}
# 0 = air, 1 = wall, 2 = player. 3 = target
def generate(self):
def UnvisitedNeighbours(cell):
neighbours = []
if cell[0] + 2 < self.size["x"]:
if maze[cell[0] + 2][cell[1]]:
neighbours.append("r")
if cell[0] - 2 >= 0:
if maze[cell[0] - 2][cell[1]]:
neighbours.append("l")
if cell[1] + 2 < self.size["y"]:
if maze[cell[0]][cell[1] + 2]:
neighbours.append("u")
if cell[1] - 2 >= 0:
if maze[cell[0]][cell[1] - 2]:
neighbours.append("d")
return neighbours
stack = deque()
maze = []
for i in range(self.size["x"]):
maze.append([True] * (self.size["y"]))
# Choose the initial cell, mark it as visited and push it to the stack
current_cell = [randrange(2, self.size["x"], 2), randrange(2, self.size["y"], 2)]
maze[current_cell[0]][current_cell[1]] = False
stack.append(current_cell)
# While the stack is not empty
while(len(stack) > 0):
# Pop a cell from the stack and make it a current cell
current_cell = stack.pop()
# maze[current_cell[0]][current_cell[1]] = False
# If the current cell has any neighbours which have not been visited
if len(neighbours := UnvisitedNeighbours(current_cell)):
# Push the current cell to the stack
stack.append(current_cell)
# Choose one of the unvisited neighbours
direction = choice(neighbours)
# Remove the wall between the current cell and the chosen cell
if direction == "u":
maze[current_cell[0]][current_cell[1] + 1] = False
maze[current_cell[0]][current_cell[1] + 2] = False
stack.append([current_cell[0], current_cell[1] + 2])
elif direction == "d":
maze[current_cell[0]][current_cell[1] - 1] = False
maze[current_cell[0]][current_cell[1] - 2] = False
stack.append([current_cell[0], current_cell[1] - 2])
elif direction == "l":
maze[current_cell[0] - 1][current_cell[1]] = False
maze[current_cell[0] - 2][current_cell[1]] = False
stack.append([current_cell[0] - 2, current_cell[1]])
elif direction == "r":
maze[current_cell[0] + 1][current_cell[1]] = False
maze[current_cell[0] + 2][current_cell[1]] = False
stack.append([current_cell[0] + 2, current_cell[1]])
maze[0][0] = 2
self.maze = maze
"""
Choose the initial cell, mark it as visited and push it to the stack
While the stack is not empty
Pop a cell from the stack and make it a current cell
If the current cell has any neighbours which have not been visited
Push the current cell to the stack
Choose one of the unvisited neighbours
Remove the wall between the current cell and the chosen cell
Mark the chosen cell as visited and push it to the stack
"""
def build(self, player, scale):
self.scale = scale
self.obstacles = []
self.player = player
for y in range(len(self.maze)):
for x in range(len(self.maze[y])):
if self.maze[y][x] == 1:
self.obstacles.append(self.canvas.create_rectangle(
x * self.scale, y * self.scale,
x * self.scale + self.scale, y * self.scale + self.scale,
fill="black"))
elif self.maze[y][x] == 2:
player.setCoords((x + 0.5) * scale, (y + 0.5) * scale)
self.playerCoords = {"x": x, "y": y}
def canMove(self, _direction, move=False):
newPos = []
direction = _direction.lower()
if direction == "up":
newPos = [self.playerCoords["x"], self.playerCoords["y"] - 1]
elif direction == "down":
newPos = [self.playerCoords["x"], self.playerCoords["y"] + 1]
elif direction == "left":
newPos = [self.playerCoords["x"] - 1, self.playerCoords["y"]]
elif direction == "right":
newPos = [self.playerCoords["x"] + 1, self.playerCoords["y"]]
else:
raise ValueError("Unknown direction: " + direction +
". Direction must be one of these: up, down, left, right (ignore case).")
# out of maze bounds
if newPos[0] < 0 or newPos[1] < 0:
return False
if newPos[1] > len(self.maze) - 1:
return False
if newPos[0] > len(self.maze[newPos[1]]) - 1:
return False
# obstacle
if self.maze[newPos[1]][newPos[0]] == 1:
return False
if move:
self.playerCoords["x"] = newPos[0]
self.playerCoords["y"] = newPos[1]
return True
| 3.890625 | 4 |
pytest_azurepipelines.py | Anthchirp/pytest-azurepipelines | 0 | 12772479 | <reponame>Anthchirp/pytest-azurepipelines
# -*- coding: utf-8 -*-
import os.path
import io
import pkg_resources
import sys
import pytest
__version__ = "1.0.0rc5"
DEFAULT_PATH = "test-output.xml"
DEFAULT_COVERAGE_PATH = "coverage.xml"
def pytest_addoption(parser):
group = parser.getgroup("pytest_azurepipelines")
group.addoption(
"--test-run-title",
action="store",
dest="azure_run_title",
default="Pytest results",
help="Set the Azure test run title.",
)
group.addoption(
"--napoleon-docstrings",
action="store_true",
dest="napoleon",
default=False,
help="If using Google, NumPy, or PEP 257 multi-line docstrings.",
)
group.addoption(
"--no-coverage-upload",
action="store_true",
dest="no_coverage_upload",
default=False,
help="Skip uploading coverage results to Azure Pipelines.",
)
group.addoption(
"--no-docker-discovery",
action="store_true",
dest="no_docker_discovery",
default=False,
help="Skip detecting running inside a Docker container.",
)
group.addoption(
"--force-xunit",
action="store_true",
dest="force_xunit",
default=False,
help="Force output using (experimental) xUnit2 XML.",
)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
if not config.getoption("force_xunit"):
nunit_xmlpath = config.getoption("--nunitxml")
if not nunit_xmlpath:
config.option.nunit_xmlpath = DEFAULT_PATH
else:
xmlpath = config.getoption("--junitxml")
if not xmlpath:
print("SET")
config.option.xmlpath = DEFAULT_PATH
if not config.getini("junit_family"):
config._inicache["junit_family"] = "xunit2" # YOLO
# ensure coverage creates xml format
if config.pluginmanager.has_plugin("pytest_cov"):
config.option.cov_report["xml"] = os.path.normpath(
os.path.abspath(
os.path.expanduser(os.path.expandvars(DEFAULT_COVERAGE_PATH))
)
)
if "html" not in config.option.cov_report:
config.option.cov_report["html"] = None
def get_resource_folder_path():
resources_folder_name = "resources"
ancestor = pkg_resources.resource_filename(__name__, "")
# traverse to parent folder until a child folder with name "resources"
# is found, or the root is reached
while not os.path.exists(os.path.join(ancestor, resources_folder_name)):
ancestor = os.path.dirname(ancestor)
if not ancestor or ancestor == "/":
if os.path.exists(resources_folder_name):
break
raise RuntimeError("Could not find the path to resources folder.")
return os.path.join(ancestor, resources_folder_name)
def get_resource_file_content(file_name):
with open(os.path.join(get_resource_folder_path(), file_name), mode="rt") as source:
return source.read()
def inline_css_into_each_html_report_file(reportdir):
"""
Since <link> does not work inside the iframe used by Azure DevOps,
inline the CSS styles into each HTML file generated by pytest report.
This enables a good UX when reading reports in the portal.
"""
style_fragment = (
"\n<style>\n" + get_resource_file_content("style.css") + "\n</style>\n"
)
# since pytest-cov generates a flat folder, we don't need recursion here
for file in os.listdir(reportdir):
if file.endswith(".html"):
full_path = os.path.join(reportdir, file)
with open(full_path, mode="rt", encoding="utf8") as f:
new_text = f.read().replace("</head>", style_fragment + "</head>")
with open(full_path, mode="wt", encoding="utf8") as f:
f.write(new_text)
def try_to_inline_css_into_each_html_report_file(reportdir):
try:
inline_css_into_each_html_report_file(reportdir)
except Exception as ex:
print(
"##vso[task.logissue type=warning;]{0}{1}".format(
"Failed to inline CSS styles in coverage reports. Error: ", str(ex)
)
)
@pytest.hookimpl(trylast=True)
def pytest_sessionfinish(session, exitstatus):
if not session.config.getoption("force_xunit"):
xmlpath = session.config.option.nunit_xmlpath
mode = "NUnit"
else:
xmlpath = session.config.option.xmlpath
mode = "xUnit"
# This mirrors https://github.com/pytest-dev/pytest/blob/38adb23bd245329d26b36fd85a43aa9b3dd0406c/src/_pytest/junitxml.py#L368-L369
xmlabspath = os.path.normpath(
os.path.abspath(os.path.expanduser(os.path.expandvars(xmlpath)))
)
mountinfo = None
if not session.config.getoption("no_docker_discovery") and os.path.isfile(
"/.dockerenv"
):
with io.open(
"/proc/1/mountinfo",
"rb",
) as fobj:
mountinfo = fobj.read()
mountinfo = mountinfo.decode(sys.getfilesystemencoding())
if mountinfo:
xmlabspath = apply_docker_mappings(mountinfo, xmlabspath)
# Set the run title in the UI to a configurable setting
description = (
session.config.option.azure_run_title.replace("'", "")
.replace(";", "")
.replace("]", "")
)
if not session.config.getoption("no_docker_discovery"):
print(
"##vso[results.publish type={2};runTitle={1};publishRunAttachments=true;]{0}".format(
xmlabspath, description, mode
)
)
else:
print("Skipping uploading of test results because --no-docker-discovery set.")
if exitstatus != 0 and session.testsfailed > 0 and not session.shouldfail:
buildid = os.getenv("BUILD_BUILDID")
print(
f"##vso[task.logissue type=error;]{session.testsfailed} test(s) out of {session.testscollected} test(s) failed. "
)
with open(f"{description}.md", "w") as fh:
fh.write("### I wonder\n")
fh.write("what this may look like.\n\n")
fh.write(
f"[Results of the test run:](https://dev.azure.com/zocalo/python-zocalo/_build/results?buildId={buildid}&view=ms.vss-test-web.build-test-results-tab)"
)
fh.write(
f"{session.testsfailed} test(s) out of {session.testscollected} test(s) failed.\n"
)
print(
"##vso[task.uploadsummary]"
+ os.path.normpath(os.path.abspath(f"{description}.md"))
)
print("##vso[task.complete result=Failed;]Marking task as failed...")
session.exitstatus = pytest.ExitCode.OK
if (
not session.config.getoption("no_coverage_upload")
and not session.config.getoption("no_docker_discovery")
and session.config.pluginmanager.has_plugin("pytest_cov")
):
covpath = os.path.normpath(
os.path.abspath(
os.path.expanduser(os.path.expandvars(DEFAULT_COVERAGE_PATH))
)
)
reportdir = os.path.normpath(os.path.abspath("htmlcov"))
if os.path.exists(covpath):
if mountinfo:
covpath = apply_docker_mappings(mountinfo, covpath)
reportdir = apply_docker_mappings(mountinfo, reportdir)
try_to_inline_css_into_each_html_report_file(reportdir)
print(
"##vso[codecoverage.publish codecoveragetool=Cobertura;summaryfile={0};reportdirectory={1};]".format(
covpath, reportdir
)
)
else:
print(
"##vso[task.logissue type=warning;]{0}".format(
"Coverage XML was not created, skipping upload."
)
)
else:
print("Skipping uploading of coverage data.")
def apply_docker_mappings(mountinfo, dockerpath):
"""
Parse the /proc/1/mountinfo file and apply the mappings so that docker
paths are transformed into the host path equivalent so the Azure Pipelines
finds the file assuming the path has been bind mounted from the host.
"""
for line in mountinfo.splitlines():
words = line.split(" ")
if len(words) < 5:
continue
docker_mnt_path = words[4]
host_mnt_path = words[3]
if dockerpath.startswith(docker_mnt_path):
dockerpath = "".join(
[
host_mnt_path,
dockerpath[len(docker_mnt_path) :],
]
)
return dockerpath
def pytest_warning_recorded(warning_message, *args, **kwargs):
print("##vso[task.logissue type=warning;]{0}".format(str(warning_message.message)))
def pytest_runtestloop(session):
pytest_runtestloop.test_count = len(session.items)
def pytest_runtest_logstart(nodeid, location):
print(nodeid, end=" ")
def pytest_runtest_logfinish():
# prevent the default implementation to try to show
# pytest's default progress
pass
def pytest_runtest_logreport(report):
if report.when == "call":
print(report.outcome)
if report.outcome == "failed":
print("\n")
print("=" * 80)
print(
f"##vso[task.logissue type=error]Test failure: {report.nodeid}%0D%0A"
+ report.longreprtext.replace("\n", "%0D%0A")
)
print("=" * 80)
if report.when == "teardown":
tests_count = getattr(pytest_runtestloop, "test_count", 0)
tests_taken = getattr(pytest_runtest_logreport, "tests_taken", 0) + 1
pytest_runtest_logreport.tests_taken = tests_taken
percent_reported = getattr(pytest_runtest_logreport, "percent_reported", -1)
percent = (100 * tests_taken) // tests_count
if percent != percent_reported:
print(f"##vso[task.setprogress value={percent};]running tests")
pytest_runtest_logreport.percent_reported = percent
@pytest.fixture
def record_pipelines_property(record_nunit_property):
# Proxy for Nunit fixture, just incase we later change the API
return record_nunit_property
@pytest.fixture
def add_pipelines_attachment(add_nunit_attachment):
# Proxy for Nunit fixture, just incase we later change the API
return add_nunit_attachment
| 2.046875 | 2 |
news/migrations/0001_initial.py | niuwenju/minicms | 83 | 12772480 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=256, verbose_name='\u6807\u9898')),
('slug', models.CharField(max_length=256, verbose_name='\u7f51\u5740', db_index=True)),
('content', models.TextField(default='', verbose_name='\u5185\u5bb9', blank=True)),
('published', models.BooleanField(default=True, verbose_name='\u6b63\u5f0f\u53d1\u5e03')),
('author', models.ForeignKey(verbose_name='\u4f5c\u8005', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'verbose_name': '\u6559\u7a0b',
'verbose_name_plural': '\u6559\u7a0b',
},
),
migrations.CreateModel(
name='Column',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256, verbose_name='\u680f\u76ee\u540d\u79f0')),
('slug', models.CharField(max_length=256, verbose_name='\u680f\u76ee\u7f51\u5740', db_index=True)),
('intro', models.TextField(default='', verbose_name='\u680f\u76ee\u7b80\u4ecb')),
],
options={
'ordering': ['name'],
'verbose_name': '\u680f\u76ee',
'verbose_name_plural': '\u680f\u76ee',
},
),
migrations.AddField(
model_name='article',
name='column',
field=models.ManyToManyField(to='news.Column', verbose_name='\u5f52\u5c5e\u680f\u76ee'),
),
]
| 1.789063 | 2 |
setup.py | emsellem/pymusepipe | 5 | 12772481 | # -*- coding: utf-8 -*-"""
"""
Setup file for pymusepipe.
Use setup.cfg to configure your project.
"""
# Licensed under a MIT style license - see LICENSE.txt
from __future__ import absolute_import, division, print_function
from setuptools import setup, find_packages
version = {}
with open("src/pymusepipe/version.py") as fp:
exec(fp.read(), version)
with open('README.md', 'r') as f:
readme = f.read()
with open('LICENSE.txt') as f:
license = f.read()
setup(name='pymusepipe',
version = version['__version__'],
description='python module to reduce MUSE Raw data and combine them',
long_description=readme,
long_description_content_type="text/markdown",
keywords=['MUSE', 'DATAREDUCTION'],
url="https://github.com/emsellem/pymusepipe",
download_url="https://github.com/emsellem/pymusepipe/archive/v2.9.6.beta.tar.gz",
author='<NAME>',
author_email='<EMAIL>',
license="MIT",
packages=find_packages(exclude=('tests', 'docs')),
install_requires=['mpdaf', 'numpy', 'scipy', 'astropy'],
include_package_data=True,
zip_safe=False,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 1.476563 | 1 |
src/coronabot.py | ryyaan2004/coronabot | 0 | 12772482 | <filename>src/coronabot.py<gh_stars>0
import collections
import requests
import string
import slack
import os
import copy
from flask import Flask
class PartialFormatter(string.Formatter):
def __init__(self, missing=' 0', bad_fmt='0'):
self.missing, self.bad_fmt = missing, bad_fmt
def get_field(self, field_name, args, kwargs):
# Handle a key not found
try:
val = super(PartialFormatter, self).get_field(field_name, args, kwargs)
# Python 3, 'super().get_field(field_name, args, kwargs)' works
except (KeyError, AttributeError):
val = None, field_name
return val
def format_field(self, value, spec):
# handle an invalid format
if value is None:
return self.missing
try:
return super(PartialFormatter, self).format_field(value, spec)
except ValueError:
if self.bad_fmt is not None:
return self.bad_fmt
else:
raise
def ratio(str_numerator, str_denominator):
try:
return int(str_numerator) / int(str_denominator)
except TypeError:
return 'NaN'
except ZeroDivisionError:
return 'NaN'
def percentage(num):
try:
return "{0:.0%}".format(num)
except ValueError:
return 'NaN'
def accumulate(accumulator, json_entry):
try:
accumulator['Confirmed'] += int(json_entry['Confirmed'])
except TypeError:
accumulator['Confirmed'] += 0
try:
accumulator['Deaths'] += int(json_entry['Deaths'])
except TypeError:
accumulator['Deaths'] += 0
try:
accumulator['Recovered'] += int(json_entry['Recovered'])
except TypeError:
accumulator['Recovered'] += 0
def country_table_string(json_list):
fmt = PartialFormatter()
r = "```"
r += fmt.format("|{:>25}|{:>10}|{:>10}|{:>10}|{:>5}|{:>5}|\n", "Country", "Confirmed", "Deaths", "Recovered", "D/C"
, "R/C")
r += fmt.format("|{:>25}|{:>10}|{:>10}|{:>10}|{:>5}|{:>5}|\n", "", "", "", "", "", "")
for item in json_list:
thing = item['attributes']
deaths_to_confirmed = ratio(thing['Deaths'], thing['Confirmed'])
recovered_to_confirmed = ratio(thing['Recovered'], thing['Confirmed'])
r += fmt.format("|{:>25}|{:>10,}|{:>10,}|{:>10,}|{:>5}|{:>5}|\n", thing['Country_Region'], thing['Confirmed'],
thing['Deaths'], thing['Recovered'], percentage(deaths_to_confirmed),
percentage(recovered_to_confirmed))
r += "```"
return r
def summary_table_string(json_list):
totals = collections.Counter()
for item in json_list:
thing = item['attributes']
accumulate(totals, thing)
fmt = PartialFormatter()
deaths_to_confirmed = ratio(totals['Deaths'], totals['Confirmed'])
recovered_to_confirmed = ratio(totals['Recovered'], totals['Confirmed'])
r = "```\n"
r += fmt.format("|{:>25}|{:>10}|{:>10}|{:>10}|{:>5}|{:>5}|\n", "Country", "Confirmed", "Deaths", "Recovered", "D/C"
, "R/C")
r += fmt.format("|{:>25}|{:>10}|{:>10}|{:>10}|{:>5}|{:>5}|\n", "", "", "", "", "", "")
r += fmt.format("|{:>25}|{:>10,}|{:>10,}|{:>10,}|{:>5}|{:>5}|\n", "Worldwide", totals['Confirmed']
, totals['Deaths'], totals['Recovered'], percentage(deaths_to_confirmed)
, percentage(recovered_to_confirmed))
r += "```\n"
r += jhu_url()
return r
def chunk(lst, size):
return (lst[pos:pos + size] for pos in range(0, len(lst), size))
def jhu_url():
"""Returns the interactive web dashboard created/maintained by Johns Hopkins University"""
url = "https://www.arcgis.com/apps/opsdashboard/index.html#/bda7594740fd40299423467b48e9ecf6"
return "For the Johns Hopkins University interactive dashboard visit this link: {}".format(url)
app = Flask(__name__)
@app.route("/")
def corona():
token = os.environ['SLACK_TOKEN']
channel = os.environ['SLACK_CHANNEL_ID']
if token is None or channel is None:
print('both a slack token and a channel must be provided')
exit(1)
arcgisUrl = "https://services1.arcgis.com/0MSEUqKaxRlEPj5g/arcgis/rest/services/ncov_cases/FeatureServer/2/query?f=json&where=1%3D1&returnGeometry=false&spatialRel=esriSpatialRelIntersects&outFields=*&orderByFields=Confirmed%20desc&resultOffset=0&resultRecordCount=250&cacheHint=true"
result = requests.get(arcgisUrl)
countries = result.json()['features']
original = copy.deepcopy(countries)
limit = os.environ.get('LIMIT')
client = slack.WebClient(token=token)
if limit is not None:
limit = int(limit)
countries = countries[:limit]
client.chat_postMessage(channel=channel, text='Top {} countries by confirmed cases'.format(limit))
# push to slack
for group in chunk(countries, 50):
client.chat_postMessage(channel=channel, text=country_table_string(group))
client.chat_postMessage(channel=channel, text=summary_table_string(original))
return "OK"
if __name__ == '__main__':
port = os.environ.get('PORT', 5000)
app.run(host='0.0.0.0', port=port)
| 2.640625 | 3 |
app.py | oeg-upm/SOMEF-server | 0 | 12772483 | <reponame>oeg-upm/SOMEF-server<gh_stars>0
import os
import json
from flask import Flask, render_template, jsonify, request, send_from_directory
from werkzeug.utils import secure_filename
#import requests
import sys
app = Flask(__name__)
@app.route("/")
def home():
return render_template('home.html')
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1].isdigit():
app.run(debug=True, port=int(sys.argv[1]))
elif len(sys.argv) == 3 and sys.argv[2].isdigit():
app.run(debug=True, host=sys.argv[1], port=int(sys.argv[2]))
else:
app.run(debug=True)
| 2.21875 | 2 |
src/infovae.py | act65/mri-reconstruction | 8 | 12772484 | import tensorflow as tf
import numpy as np
import src.utils as utils
"""
Implementation of InfoVAE
https://arxiv.org/abs/1706.02262
"""
def reparameterise(x, n, stddev):
"""
Model each output as bing guassian distributed.
Use the reparameterisation trick so we can sample while remaining
differentiable.
"""
with tf.name_scope('reparameterise'):
z_mean = x[:,:,:,:n]
z_stddev = x[:,:,:,n:]
e = tf.random_normal(tf.shape(z_mean), stddev=stddev)
# TODO log_var or stddev?
return z_mean + tf.square(z_stddev)*e
def compute_kernel(x, y):
"""
Compute the distance between x and y using a guassian kernel.
"""
x_size = tf.shape(x)[0]
y_size = tf.shape(y)[0]
dim = tf.shape(x)[1]
tiled_x = tf.tile(tf.reshape(x, [x_size, 1, dim]), [1, y_size, 1])
tiled_y = tf.tile(tf.reshape(y, [1, y_size, dim]), [x_size, 1, 1])
return tf.exp(-tf.reduce_mean(tf.square(tiled_x - tiled_y), axis=2) / tf.cast(dim, tf.float32))
def compute_mmd(x, y):
"""
Calculate the maximum mean disrepancy..
"""
x_kernel = compute_kernel(x, x)
y_kernel = compute_kernel(y, y)
xy_kernel = compute_kernel(x, y)
return tf.reduce_mean(x_kernel) + tf.reduce_mean(y_kernel) - 2 * tf.reduce_mean(xy_kernel)
def gaussian_d(x, y):
"""
A conceptual lack of understanding here.
Do I need a dx to calculate this over?
Doesnt make sense for a single point!?
"""
d = tf.norm(x - y, axis=1)
return tf.exp(-0.5*d)/(tf.sqrt(2*tf.constant(np.pi)))
def pz(z):
"""
Estimate p(z) using our prior on z.
"""
z = tf.layers.flatten(z)
return gaussian_d(z , tf.zeros_like(z))
def px_z(x_, y):
# the added noise in the hidden layer.
return gaussian_d(tf.layers.flatten(y[:,:,:,:1]),
tf.layers.flatten(x_))
def pz_x(h, z):
# the added noise in the final layer.
shape = h.get_shape().as_list()
return gaussian_d(tf.layers.flatten(h[:,:,:,:shape[-1]//2]),
tf.layers.flatten(z))
def p_bayes(x_, y, h, z):
"""
If p(z | x) is far away from p(z) then p(x) is low
p(x) = p(x | z) p(z) / p(z | x)
"""
return px_z(x_, y) * pz(z) / pz_x(h, z)
# def KL_divergence(p, q):
# return tf.reduce_sum(p * tf.log(p/q), axis=-1)
#
# def bayesian_surprise(z):
# """
#
# """
# return kl(z, prior)
class InfoVAE():
def __init__(self, n_hidden, width, depth, stddev=0.0001):
"""
Args:
"""
self.n_hidden = n_hidden
self.width = width
self.depth = depth
self.n_channels = 1
self.stddev = stddev
self.construct()
def construct(self):
"""
Constructs:
encoder (tf.keras.Model): encode the gradient into the hidden space
decoder (tf.keras.Model): decodes a hidden state into an image
"""
layers = []
layers.append(tf.keras.layers.Conv2D(self.width, 4, strides=(2, 2),
padding='same',
# input_shape=(28,28,1)
))
layers.append(tf.keras.layers.Activation(tf.keras.activations.selu))
for i in range(self.depth):
layers.append(tf.keras.layers.Conv2D(self.width,
4,
strides=(2, 2),
padding='same'),)
layers.append(tf.keras.layers.Activation(tf.keras.activations.selu))
layers.append(tf.keras.layers.Conv2D(self.n_hidden*2,
1,
strides=(1, 1),
padding='same'))
self.encoder = tf.keras.Sequential(layers)
# decoder
layers = []
layers.append(tf.keras.layers.Conv2DTranspose(self.width, 4, strides=(2, 2),
padding='same',
# input_shape=(1,1,self.n_hidden)
))
layers.append(tf.keras.layers.Activation(tf.keras.activations.selu))
for _ in range(self.depth):
layers.append(tf.keras.layers.Conv2DTranspose(self.width, 4, strides=(2, 2), padding='same'))
layers.append(tf.keras.layers.Activation(tf.keras.activations.selu))
layers.append(tf.keras.layers.Conv2DTranspose(self.n_channels*2, 1, strides=(1, 1), padding='same'))
self.decoder = tf.keras.Sequential(layers)
def __call__(self, x):
"""
Args:
x (tf.tensor): the input
shape is [None, width, height, channels],
dtype is tf.float32
"""
with tf.name_scope('infovae'):
self.h = self.encoder(x)
self.z = reparameterise(self.h, self.n_hidden, self.stddev)
self.y = self.decoder(self.z)
self.x_ = reparameterise(self.y, self.n_channels, self.stddev)
return self.x_
def make_losses(self, x, y=None):
self.x = x
if y is None:
print('...')
y = self.__call__(self.x)
with tf.name_scope('loss'):
recon_loss = tf.losses.sigmoid_cross_entropy(
logits=tf.layers.flatten(y),
multi_class_labels=tf.layers.flatten(self.x))
latent_loss = compute_mmd(tf.layers.flatten(self.z),
tf.layers.flatten(tf.random_normal(shape=tf.shape(self.z))))
return recon_loss, latent_loss
def make_contractive_loss(self):
# assumes make_losses has already been called
print(self.h, self.x)
dhdx = tf.gradients(self.h, self.x)[0]
print(dhdx)
if dhdx is None:
raise ValueError()
return tf.reduce_mean(tf.reduce_sum(tf.square(dhdx), axis=[1,2,3]))
def estimate_density(self, x):
x_ = self.__call__(x)
return p_bayes(x_, self.y, self.h, self.z)
@staticmethod
def preprocess(x):
im = np.reshape(x, [-1, 28, 28, 1])
im = np.round(im).astype(np.float32) # NOTE important !?
return np.pad(im, [(0,0), (2,2), (2,2), (0,0)], 'constant', constant_values=0)
if __name__ == '__main__':
tf.enable_eager_execution()
x = tf.random_normal((100, 28, 28, 1))
nn = InfoVAE(12, 16, 3)
x_ = nn(x)
# loss = nn.make_losses(x)
assert x_.shape == x.shape
| 2.75 | 3 |
telepot/test/test35a_admin.py | mpunkenhofer/bs-irc-telegram-bot | 0 | 12772485 | <filename>telepot/test/test35a_admin.py
import sys
import asyncio
import telepot
import telepot.namedtuple
import telepot.aio
from telepot.aio.routing import by_content_type, make_content_type_routing_table
class AdminBot(telepot.aio.Bot):
async def on_chat_message(self, msg):
content_type, chat_type, chat_id = telepot.glance(msg)
if 'edit_date' not in msg:
await self.sendMessage(chat_id, 'Edit the message, please.')
else:
await self.sendMessage(chat_id, 'Add me to a group, please.')
r = telepot.aio.helper.Router(by_content_type(), make_content_type_routing_table(self))
self._router.routing_table['chat'] = r.route
async def on_new_chat_member(self, msg, new_chat_member):
print('New chat member:', new_chat_member)
content_type, chat_type, chat_id = telepot.glance(msg)
r = await self.getChat(chat_id)
print(r)
r = await self.getChatAdministrators(chat_id)
print(r)
print(telepot.namedtuple.ChatMemberArray(r))
r = await self.getChatMembersCount(chat_id)
print(r)
TOKEN = sys.argv[1]
bot = AdminBot(TOKEN)
loop = asyncio.get_event_loop()
#loop.set_debug(True)
loop.create_task(bot.message_loop())
print('Send me a text message ...')
loop.run_forever()
| 2.203125 | 2 |
skbandit/bandits/base.py | dourouc05/scikit-bandit | 0 | 12772486 | <reponame>dourouc05/scikit-bandit
from abc import ABC, abstractmethod
from typing import Union, List, Dict
import numpy as np
class Bandit(ABC):
"""A stochastic, multi-armed bandit player.
A bandit player can be implemented using three methods:
* a constructor: to create the player (required parameters, number of arms, etc.)
* `pull()`: decide the current arm to pull (an integer number, starting at 0)
* `reward(arm, reward)`: when pulling the `arm`, the player got a `reward`. This function is supposed to, but does
not have to, be called after each call to `pull()`
* `rewards(rewards)`: in a full-information or semi-bandit setting, the rewards associated with all arms (or
just those that were played) at a given round
"""
def __init__(self, n_arms: int):
self._n_arms = n_arms
@abstractmethod
def pull(self, context: Union[None, np.ndarray] = None) -> Union[int, List[int]]:
pass
def reward(self, arm: int, reward: float, context: Union[None, np.ndarray] = None) -> None:
raise NotImplementedError
def rewards(self, reward: Union[List[float], Dict[int, float]], context: Union[None, np.ndarray] = None) -> None:
raise NotImplementedError
@property
def n_arms(self):
return self._n_arms
class RewardAccumulatorMixin:
def __init__(self, n_arms: int):
self._arm_counts = [0] * n_arms
self._total_rewards = [0.0] * n_arms
def reward(self, arm: int, reward: float, **kwargs) -> None:
self._arm_counts[arm] += 1
self._total_rewards[arm] += reward
@property
def total_rewards(self):
return self._total_rewards
@property
def arm_counts(self):
return self._arm_counts
# TODO: Explore what others implement:
# https://github.com/jkomiyama/banditlib
# https://www.di.ens.fr/~cappe/Code/PymaBandits/
# https://github.com/flaviotruzzi/AdBandits
# http://banditslilian.gforge.inria.fr/index.html -> https://smpybandits.readthedocs.io/en/latest/docs/Policies.html#submodules
| 3.75 | 4 |
tastyworks/__init__.py | olyoberdorf/tastyworks_api | 198 | 12772487 | <gh_stars>100-1000
import logging
import sys
log = logging.getLogger(__name__)
log.propagate = False
out_hdlr = logging.StreamHandler(sys.stdout)
out_hdlr.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
out_hdlr.setLevel(logging.INFO)
log.addHandler(out_hdlr)
log.setLevel(logging.INFO)
root = logging.getLogger()
root.addHandler(out_hdlr)
root.propagate = False
root.setLevel(logging.INFO)
| 2.03125 | 2 |
Multi Processing.py | christianandrew/Tugas_Akhir | 0 | 12772488 | import time
import multiprocessing
import TFLite_detection_webcam
import X
def machinelearning():
exec(TFLite_detection_webcam)
def sensors():
exec(
p1 = multiprocessing.Process(target=machinelearning(),args=[])
p2 = multiprocessing.Process(target=,args=[])
p3 = multiprocessing.Process(target=,args=[])
if __name__== '__main__':
p1.start()
p2.start()
p3.start()
p1.join()
p2.join()
p3.join()
finish = time.perf_counter()
print("Finished running after: ",finish) | 2.796875 | 3 |
crash_course/ch13/exec/stars.py | dantin/python-by-example | 0 | 12772489 | <reponame>dantin/python-by-example
import pygame
import sys
from pygame.sprite import Group
from pygame.sprite import Sprite
class Settings():
"""A class that stores all settings."""
def __init__(self):
"""Initialize the game's settings."""
# Screen settings
self.screen_width = 1200
self.screen_height = 800
self.bg_color = (230, 230, 230)
class Star(Sprite):
"""A class to represent a single star."""
def __init__(self, settings, screen):
"""Initialize star and set its starting position."""
super().__init__()
self.settings = settings
self.screen = screen
# load star image and set its rect attribute.
self.image = pygame.image.load('images/star.bmp')
self.rect = self.image.get_rect()
# Start each new star near the top left of the screen.
self.rect.x = self.rect.width
self.rect.y = self.rect.height
def blitme(self):
"""Draw the start at its current location."""
self.screen.blit(self.image, self.rect)
def check_keydown_events(event, settings, screen):
"""Respond to key presses."""
if event.key == pygame.K_q:
sys.exit()
def check_keyup_events(event):
"""Respond to key releases."""
pass
def check_events(settings, screen):
"""Respond to key presses and mouse events."""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, settings, screen)
elif event.type == pygame.KEYUP:
check_keyup_events(event)
def update_screen(settings, screen, stars):
"""Update images on the screen and flip to the new screen."""
# Redraw the screen during each pass through the loop.
screen.fill(settings.bg_color)
# Redraw all stars.
stars.draw(screen)
# Make the most recently drawn screen visible.
pygame.display.flip()
def get_number_rows(settings, star_height):
"""Determine the number of rows of stars that fit on the screen."""
avaiable_space_y = (settings.screen_height - star_height)
number_rows = int(avaiable_space_y / (2 * star_height))
return number_rows
def get_number_stars_x(settings, star_width):
"""Determine the number of stars that fit in a row."""
avaiable_space_x = settings.screen_width - star_width
number_stars_x = int(avaiable_space_x / (2 * star_width))
return number_stars_x
def create_star(settings, screen, stars, star_number, row_number):
"""Create a star and place it in the row."""
star = Star(settings, screen)
star_width = star.rect.width
star_height = star.rect.height
star.x = star_width + 2 * star_width * star_number
star.rect.x = star.x
star.rect.y = star_height + 2 * star_height * row_number
stars.add(star)
def create_stars(settings, screen, stars):
"""Create a full group of stars."""
# Create a star and find the number of stars in a row.
star = Star(settings, screen)
number_stars_x = get_number_stars_x(settings, star.rect.width)
number_rows = get_number_rows(settings, star.rect.height)
# Create group of stars.
for row_number in range(number_rows):
for star_number in range(number_stars_x):
create_star(settings, screen, stars, star_number, row_number)
def run():
"""Initialize pygame, settings and screen objects."""
pygame.init()
settings = Settings()
screen = pygame.display.set_mode(
(settings.screen_width, settings.screen_height))
pygame.display.set_caption('Stars')
# Make a group to store stars.
stars = Group()
# Create group of start.
create_stars(settings, screen, stars)
# Main loop
while True:
check_events(settings, screen)
update_screen(settings, screen, stars)
run()
| 3.578125 | 4 |
leetcode/python/93.restore-ip-addresses.py | phiysng/leetcode | 3 | 12772490 | <filename>leetcode/python/93.restore-ip-addresses.py<gh_stars>1-10
from typing import List
# valid ip address has 4 number, each of them is [0,255] and is valid decimal number
# which indicates we need to rule out number like this : [01,00]
class Solution:
def __init__(self):
self.validIp = []
def restoreIpAddresses(self, s: str) -> List[str]:
self.validIp.clear()
self._restoreIpAddresses(s, [], 3)
return self.validIp
# ip is either 0 or 123 , 0 can't be the hightest bit of non-zero number
def checkValidIpNum(self, s: str):
return len(s) != 1 and s[0] == '0'
def _restoreIpAddresses(self, s: str, partial: List[str], iteration: int) -> None:
if iteration < 0:
return
if iteration == 0:
if len(s) >= 4:
return
if self.checkValidIpNum(s):
return
_i = int(s)
if 0 <= _i <= 255:
partial.append(s)
self.validIp.append('.'.join(partial))
partial.pop()
return
for i in range(1, 4):
if len(s[i:]) < iteration:
return
if len(s[i:]) > iteration * 3:
continue
sub_s = s[:i]
if i != 1 and sub_s[0] == '0':
continue
if iteration == 3:
partial = []
if int(sub_s) > 255:
continue
partial.append(sub_s)
# recursive to next level
self._restoreIpAddresses(s[i:], partial, iteration - 1)
partial.pop() # pay attention, DFS need to pop this back
| 3.46875 | 3 |
projects/blueprints/data_visualisation/world_gdp.py | only-romano/junkyard | 0 | 12772491 | <filename>projects/blueprints/data_visualisation/world_gdp.py
#! Json practice
import json
import pygal_maps_world.maps as pm
from countries import get_country_code
from pygal.style import RotateStyle as RS, LightColorizedStyle as LCS
# Load the data into a list.
filename = 'gdp2.json'
with open(filename) as f:
pop_data = json.load(f)
# Buold a dictionary of population data.
cc_gdp = {}
for pop_dict in pop_data:
if pop_dict['Year'] == 2016:
country_name = pop_dict['Country Name']
gdp = int(float(pop_dict['Value']))
code = get_country_code(country_name)
if code:
cc_gdp[code] = gdp
# Group the countries into 3 population levels.
cc_pops_1, cc_pops_2, cc_pops_3 = {}, {}, {}
for cc, pop in cc_gdp.items():
if pop < 50000000000:
cc_pops_1[cc] = pop
elif pop < 500000000000:
cc_pops_2[cc] = pop
else:
cc_pops_3[cc] = pop
# See how many countries are in each level.
print(len(cc_pops_1), len(cc_pops_2), len(cc_pops_3))
# Styling
wm_style = RS('#336699', base_style=LCS)
wm = pm.World(style=wm_style)
wm.title = 'GDP in 2016, by Country'
wm.add('Up to 50 bil', cc_pops_1)
wm.add('50 to 500 bil', cc_pops_2)
wm.add('500+ bil', cc_pops_3)
wm.render_to_file('world_gdp.svg')
| 3.15625 | 3 |
01Firmware/PDM_USB/PDM_USB/src/endpoint0/chanstringgen.py | simongapp/xmos_usb_mems_interface | 9 | 12772492 |
def genstrings(outputChanCount, chanString, portString, structureString, adc_dac):
for i in range(1,outputChanCount):
print "#if (NUM_USB_CHAN_{c} > {i}-1)\n\
.{s}ChanStr_{i} = \"\"\n\
#if ({i} < I2S_CHANS_{adcdac}+1)\n\
\"Analogue {i}\"\n\
#endif\n\
#if (({i} < SPDIF_{p}_INDEX+2+1) && ({i} > SPDIF_{p}_INDEX)) && defined(SPDIF_{p})\n\
#if ({i} < I2S_CHANS_{adcdac}+1)\n\
\"/\"\n\
#endif\n\
#if({i} - SPDIF_{p}_INDEX == 1)\n\
\"SPDIF 1\"\n\
#elif({i} - SPDIF_{p}_INDEX == 2)\n\
\"SPDIF 2\"\n\
#endif\n\
#endif\n\
#if (({i} < ADAT_{p}_INDEX+8+1) && ({i} > ADAT_{p}_INDEX)) && defined(ADAT_{p})\n\
#if (({i} < SPDIF_{p}_INDEX+2+1) && ({i} > SPDIF_{p}_INDEX)) && defined(SPDIF_{p}) || ({i} < I2S_CHANS_{adcdac}+1)\n\
\"/\"\n\
#endif\n\
#if({i} - ADAT_TX_INDEX == 1)\n\
\"ADAT 1\"\n\
#elif({i} - ADAT_TX_INDEX == 2)\n\
\"ADAT 2\"\n\
#elif({i} - ADAT_TX_INDEX == 3)\n\
\"ADAT 3\"\n\
#elif({i} - ADAT_TX_INDEX == 4)\n\
\"ADAT 4\"\n\
#elif({i} - ADAT_TX_INDEX == 5)\n\
\"ADAT 5\"\n\
#elif({i} - ADAT_TX_INDEX == 6)\n\
\"ADAT 6\"\n\
#elif({i} - ADAT_TX_INDEX == 7)\n\
\"ADAT 7\"\n\
#elif({i} - ADAT_TX_INDEX == 8)\n\
\"ADAT 8\"\n\
#endif\n\
#endif\n\
,\n#endif\n".format(i=i, c=chanString, p=portString, s=structureString, adcdac=adc_dac);
return;
print "/* AUTOGENERATED using chanstringgen.py */\n"
print "/* Not very nice looking but the standard preprocessor is not very powerful\n and we save some memory over doing this all at runtime */"
print "/* Output Strings */\n\n"
genstrings(33, "OUT", "TX", "output", "DAC");
print "/* Input Strings */\n\n"
genstrings(33, "IN", "RX", "input", "ADC");
| 2.71875 | 3 |
cracking-code-interview/chapter_03/3-5_sort_stack.py | italo-batista/problems-solving | 0 | 12772493 | <reponame>italo-batista/problems-solving<gh_stars>0
import sys
sys.path.append('./datastructures')
from datastructures import Stack
def sort_stack(stack):
temp_stack = Stack()
threshold = stack.pop()
while not stack.is_empty():
if stack.peek() >= threshold:
stack_node = stack.pop()
temp_stack.push(threshold)
threshold = stack_node
elif stack.peek() < threshold:
temp_stack.push(threshold)
threshold = stack.pop()
while not temp_stack.is_empty() and threshold < temp_stack.peek():
stack.push(temp_stack.pop())
temp_stack.push(threshold)
if not stack.is_empty():
threshold = stack.pop()
temp_stack.push(threshold)
while not temp_stack.is_empty():
stack.push(temp_stack.pop())
return stack
if __name__ == '__main__': # tests
stack = Stack()
original_values = [8, 3, 5, 9, 7, 2]
for v in original_values:
stack.push(v)
sorted_stack = Stack()
sorted_values = sorted(original_values, reverse=True)
for v in sorted_values:
sorted_stack.push(v)
expected_sorted_stack = sort_stack(stack)
while not expected_sorted_stack.is_empty() and not sorted_stack.is_empty():
assert expected_sorted_stack.pop() == sorted_stack.pop()
| 3.703125 | 4 |
exonum-java-binding/exonum_launcher_java_plugins/exonum_java_runtime_plugin/plugin.py | skletsun/exonum-java-binding | 0 | 12772494 | # Copyright 2019 The Exonum Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Any
from exonum_launcher.runtimes.runtime import RuntimeSpecLoader
try:
from .proto import deploy_arguments_pb2
except (ModuleNotFoundError, ImportError):
raise RuntimeError("Protobuf definition is not found")
class JavaDeploySpecLoader(RuntimeSpecLoader):
"""Artifact spec encoder for Java runtime"""
def encode_spec(self, config: Dict[str, Any]) -> bytes:
spec = deploy_arguments_pb2.DeployArguments()
spec.artifact_filename = config["artifact_filename"]
return spec.SerializeToString()
| 2.0625 | 2 |
src/jNlp/eProcessing.py | Reynolddoss/jProcessing | 133 | 12772495 | <filename>src/jNlp/eProcessing.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pkg_resources import resource_stream
import sys, os, subprocess
from subprocess import call
import xml.etree.cElementTree as etree
import nltk
from nltk.stem.wordnet import WordNetLemmatizer
if __name__ == '__main__':
pass
| 1.75 | 2 |
pretrain_model.py | MatheusNtg/character-bert-pretraining | 27 | 12772496 | <filename>pretrain_model.py
# coding=utf-8
# Copyright (c) 2020, <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Script for pre-training BERT / CharacterBERT.
NOTE: this is adapted from an older version of:
https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/BERT/run_pretraining.py
"""
import os
import csv
import math
import random
import logging
import argparse
import datetime
import warnings
from concurrent.futures import ProcessPoolExecutor
import h5py
import numpy as np
from tqdm import tqdm, trange
import torch
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import (
DataLoader, RandomSampler, SequentialSampler, Dataset
)
import amp_C
import apex_C
from apex import amp
from apex.optimizers import FusedLAMB
from apex.parallel import DistributedDataParallel
from apex.parallel.distributed import flat_dist_call
from apex.amp import _amp_state
from schedulers import LinearWarmUpScheduler, PolyWarmUpScheduler
from transformers import (
BertConfig, BertTokenizer, BertForPreTraining,
CharacterBertConfig, CharacterBertTokenizer, CharacterBertForPreTraining
)
from utils.distributed import is_main_process
warnings.filterwarnings("ignore")
WORKDIR = os.environ['WORKDIR']
LOGGING_FORMAT = "%(asctime)s | PID: %(process)d | %(filename)s | %(levelname)s - %(message)s"
logging.basicConfig(format=LOGGING_FORMAT, datefmt="%d/%m/%Y %H:%M:%S", level=logging.INFO)
IGNORE_INDEX = torch.nn.CrossEntropyLoss().ignore_index
def set_all_random_seeds(random_seed: int, verbose: bool = True):
r"""Sets the initial random seed to a specific value."""
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(random_seed)
if verbose:
logging.info("Setting random seed to: %d", random_seed)
class PretrainingDataset(Dataset):
r"""
PyTorch Dataset subclass that allows easy access to the pre-training
data previously stored in an .hdf5 file.
Args:
hdf5_fpath (:obj:`str`):
Path to an .hdf5 file contraining the pre-training data.
max_masked_tokens_per_input (:obj:`int`):
Hard limit on the number of masked tokens per input sequence.
This is therefore also a limit on the number of MLM predictions
per input sequence.
"""
def __init__(self,
hdf5_fpath: str,
max_masked_tokens_per_input
):
self.hdf5_fpath = hdf5_fpath
self.max_masked_tokens_per_input = max_masked_tokens_per_input
file_in = h5py.File(hdf5_fpath, "r")
keys = [
'input_ids',
'input_mask',
'segment_ids',
'masked_lm_positions',
'masked_lm_ids',
'next_sentence_labels'
]
self.inputs = [np.asarray(file_in[key][:]) for key in keys]
file_in.close()
def __len__(self):
"""Returns the total number of samples in the pre-training dataset."""
return len(self.inputs[0])
def __getitem__(self, index):
"""Returns the sample at the provided index."""
# Get elements at `index` as torch tensors
[
input_ids, input_mask, segment_ids,
masked_lm_positions, masked_lm_ids, next_sentence_labels
] = [
torch.from_numpy(element[index].astype(np.int64)) if i < 5
else torch.from_numpy(np.asarray(element[index].astype(np.int64)))
for i, element in enumerate(self.inputs)
]
# MLM labels is IGNORE_INDEX everywhere and `token_id` at masked positions
index = self.max_masked_tokens_per_input
masked_lm_labels = IGNORE_INDEX * torch.ones((input_ids.shape[0],), dtype=torch.long)
padded_mask_indices = (masked_lm_positions == 0).nonzero()
if len(padded_mask_indices) != 0:
index = padded_mask_indices[0].item()
masked_lm_labels[masked_lm_positions[:index]] = masked_lm_ids[:index]
return [
input_ids, segment_ids, input_mask,
masked_lm_labels, next_sentence_labels
]
def create_pretraining_dataloader(
hdf5_fpath: str,
max_masked_tokens_per_input: int,
batch_size: int
):
r"""
Makes a PyTorch DataLoader for producing random batches of pre-training
tensors using data stored in an .hdf5 file. This also returns the path
to the .hdf5 file for ... TODO: figure out why?
Args:
hdf5_fpath (:obj:`str`):
Path to an .hdf5 file contraining the pre-training data.
max_masked_tokens_per_input (:obj:`int`):
Hard limit on the number of masked tokens per input sequence.
This is therefore also a limit on the number of MLM predictions
per input sequence.
batch_size (:obj:`int`):
Batch size of tensors returned by the DataLoader.
"""
pretraining_data = PretrainingDataset(
hdf5_fpath=hdf5_fpath,
max_masked_tokens_per_input=max_masked_tokens_per_input
)
train_sampler = RandomSampler(pretraining_data)
train_dataloader = DataLoader(
pretraining_data,
sampler=train_sampler,
batch_size=batch_size,
num_workers=4,
pin_memory=True
)
return train_dataloader, hdf5_fpath
def parse_args():
r"""Parses a number of arguments to set as attributes for `ModelPretrainer`."""
parser = argparse.ArgumentParser()
##################################################################
# Required parameters:
# ---------------------------------------------------------------
# - input/output dirs
# - model config (BertConfig / CharacterBertConfig)
# - a flag for pre-training CharacterBERT instead of BERT
##################################################################
parser.add_argument(
"--hdf5_directory",
type=str, required=True,
help=\
"Path to a directory contraining hdf5 files: training.*.hdf5, "
"validation.*.hdf5 and test.*.hdf5 files."
)
parser.add_argument(
"--output_directory",
type=str, required=True,
help=\
"Path to a directory where model checkpoints and metrics "
"will be saved."
)
parser.add_argument(
'--is_character_bert',
action='store_true',
help="Pre-train CharacterBERT instead of BERT."
)
##################################################################
# Other parameters
##################################################################
# Parameters related to checkpoint handling
parser.add_argument(
'--random_seed',
required=False, type=int, default=42,
help=\
"An intial seed for controlling some of the randomness."
)
parser.add_argument(
"--local_rank",
type=int, default=-1,
help=\
"Identifier of the current process within the distributed process "
"group. This is always `-1` when distributed training is deactivated."
)
parser.add_argument(
'--phase1_end_step',
type=int, default=7038,
help=\
"Number of training steps (backprops) in pre-training phase n°1: "
"`max_input_length=128`and `max_masked_tokens_per_input=20`."
)
parser.add_argument(
'--phase2',
action='store_true',
help=\
"Whether it is pre-training phase n°2: "
"`max_input_length=512`and `max_masked_tokens_per_input=80`."
)
parser.add_argument(
"--init_checkpoint",
type=str, default=None,
help="An initial checkpoint to start pre-training from."
)
parser.add_argument(
"--resume_pretraining",
action='store_true',
help="Whether to resume pre-training from a checkpoint."
)
parser.add_argument(
'--resume_step',
type=int, default=-1,
help=\
"Step to resume pre-training from. By default, this is `-1` "
"which results in resuming from the latest checkpoint available."
)
##################################################################
# Training hyperparameters
parser.add_argument(
'--max_input_length',
required=False, type=int, default=128,
help=\
"Maximum sequence length for the model input. "
"Set this according to the input .hdf5 files contents."
)
parser.add_argument(
"--max_masked_tokens_per_input",
type=int, default=20,
help=\
"Hard limit on the number of tokens that can be masked. "
"Set this according to the input .hdf5 files contents."
)
parser.add_argument(
'--num_accumulation_steps',
type=int, default=512,
help=\
"Number of steps (forward passes) during which gradients are "
"accumulated before running a single model parameters update."
)
parser.add_argument(
"--target_batch_size",
type=int, default=8192,
help=\
"Target batch size post-accumulation (actual batch size is "
"derived from the number of accumulation steps). For example, if "
"`target_batch_size=32` and `num_accumulation_steps=4` then the "
"actual batch size will be `32/4 = 8`. This is useful for "
"achieving larger batch sizes while keeping an actual batch size "
"that is small enough to fit in memory."
)
parser.add_argument(
"--learning_rate",
type=float, default=6e-3,
help="The initial learning rate for the FusedLAMB optimizer."
)
parser.add_argument(
"--warmup_proportion",
type=float, default=0.2843,
help=\
"A value of X means that learning rate will increase during "
"(100*X)%% of pre-training steps before reaching the desired value "
"then decrease to 0 during the rest of pre-training steps."
)
parser.add_argument(
"--total_steps",
type=float, default=7038,
help="Total number of pre-training steps to perform."
)
##################################################################
# fp16 related parameters
parser.add_argument(
'--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit"
)
parser.add_argument(
'--loss_scale',
type=float, default=0.0,
help='Loss scaling, positive power of 2 values can improve fp16 convergence.'
)
parser.add_argument(
'--allreduce_post_accumulation',
action='store_true',
help="Whether to do allreduces during gradient accumulation steps."
)
parser.add_argument(
'--allreduce_post_accumulation_fp16',
action='store_true',
help="Whether to do fp16 allreduce post accumulation.")
##################################################################
# Logging and checkpointing
parser.add_argument(
'--do_validation',
action='store_true',
help="Whether to run a validation step before checkpointing."
)
parser.add_argument(
'--checkpoint_interval',
type=int, default=200,
help=\
"Number of model updates before a model checkpoint is saved."
)
parser.add_argument(
'--num_checkpoints_to_keep',
type=int, default=3,
help=\
"Maximum number of checkpoints to keep."
)
parser.add_argument(
'--log_freq',
type=float, default=1.0,
help='Frequency of logging loss.'
)
parser.add_argument(
'--tensorboard_id',
type=str, default='default',
help="Name of the directory where Tensorboard logs will be saved."
)
args = parser.parse_args()
return args
class ModelPretrainer:
r"""A helper class for pre-training BERT and CharacterBERT models."""
def __init__(self, args):
self.start_datetime = datetime.datetime.now()
# Set attributes from parsed arguments
self.hdf5_directory = args.hdf5_directory
self.output_directory = args.output_directory
self.tensorboard_id = args.tensorboard_id
self.is_character_bert = args.is_character_bert
self.local_rank = args.local_rank
self.phase1_end_step = args.phase1_end_step
self.phase2 = args.phase2
self.init_checkpoint = args.init_checkpoint
self.resume_pretraining = args.resume_pretraining
self.resume_step = args.resume_step
self.max_input_length = args.max_input_length
self.max_masked_tokens_per_input = args.max_masked_tokens_per_input
self.target_batch_size = args.target_batch_size
self.learning_rate = args.learning_rate
self.total_steps = args.total_steps
self.warmup_proportion = args.warmup_proportion
self.num_accumulation_steps = args.num_accumulation_steps
self.allreduce_post_accumulation = args.allreduce_post_accumulation
self.fp16 = args.fp16
self.loss_scale = args.loss_scale
self.allreduce_post_accumulation_fp16 = args.allreduce_post_accumulation_fp16
self.log_freq = args.log_freq
self.do_validation = args.do_validation
self.checkpoint_interval = args.checkpoint_interval
self.num_checkpoints_to_keep = args.num_checkpoints_to_keep
self.random_seed = args.random_seed
self.is_main_process = (
self.local_rank in [-1, 0]) and is_main_process()
if self.is_main_process:
logging.info('Preparing to run pre-training using parameters:')
for argname, argvalue in vars(args).items():
logging.info(' * %s: %s', argname, argvalue)
# Set the random seed for reproducibility
set_all_random_seeds(self.random_seed, verbose=self.is_main_process)
# Make sure CUDA is available (it won't be if you're not using GPUs):
assert torch.cuda.is_available(), "CUDA is unavailable (are you using GPUs?)"
# Set CUDA-related attributes
self.training_is_distributed = (self.local_rank != -1)
if self.training_is_distributed:
torch.cuda.set_device(self.local_rank)
self.device = torch.device("cuda", self.local_rank)
# Initialize distributed backend (takes care of sychronizing nodes/GPUs)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
self.n_gpu = 1
else:
# TODO: test this
self.device = torch.device("cuda")
self.n_gpu = torch.cuda.device_count()
self.allreduce_post_accumulation = False
self.allreduce_post_accumulation_fp16 = False
if self.num_accumulation_steps == 1:
self.allreduce_post_accumulation = False
self.allreduce_post_accumulation_fp16 = False
logging.info(
"Distributed Training: %s, Number of GPUs: %d, Device: `%s`, Local Rank: `%s` (is_main: `%s`)",
self.training_is_distributed, self.n_gpu, self.device, self.local_rank, self.is_main_process,
)
# Derive actual batch size from target batch size and accumulation steps:
assert self.num_accumulation_steps >= 1, \
"`num_accumulation_steps` should be greater or equal to 1"
assert self.target_batch_size % self.num_accumulation_steps == 0, \
"`target_batch_size` should be divisible by `num_accumulation_steps`"
self.batch_size = self.target_batch_size // self.num_accumulation_steps
# Make sure self.output_directory is empty when starting a training from scratch:
if not self.resume_pretraining:
os.makedirs(self.output_directory, exist_ok=True)
assert not any([
fname.startswith('ckpt')
for fname in os.listdir(self.output_directory)]), \
"Output directory should be empty when not resuming from a previous checkpoint"
self.global_step = None # training step counter
self.checkpoint = None # checkpoint for resuming training
self.model = None # actual model we are pre-training
self.optimizer = None # the optimizer (FusedLAMB)
self.lr_scheduler = None # the scheduler (PolyWarmUpScheduler)
self.tensorboard_writer = None # helper for logging loss to Tensorboard
self.best_validation_loss = float(1e6) # best val. loss achieved so far
self.most_recent_ckpts_paths = [] # list of most recent ckpt paths
def prepare_model_optimizer_and_scheduler(self):
r"""Prepares the model, the optimizer and the learning rate scheduler."""
###################################################################
# MODEL PREPARATION
# -----------------
# - step 1: Initialize a random model from config
# - step 2: Load model weights from checkpoint if any
# - step 3: Move model to device (GPU)
###################################################################
# Initialize a random model according to a specific config:
# NOTE: here we load from a physical path instead of using a keyword
# as compute nodes may not allow downloading from online hubs
if self.is_character_bert:
model_config = CharacterBertConfig.from_pretrained(
os.path.join(WORKDIR, 'data', 'character-bert'))
model = CharacterBertForPreTraining(model_config)
else:
model_config = BertConfig.from_pretrained(
os.path.join(WORKDIR, 'data', 'bert-base-uncased'))
model = BertForPreTraining(model_config)
if self.is_main_process:
logging.info(
"Initialized %s using Config:\n%s",
"CharacterBERT" if self.is_character_bert else "BERT",
model_config
)
# Load checkpoint if any:
if not self.resume_pretraining:
# CASE: no checkpoint -> training from scratch
self.global_step = 0
if self.is_main_process:
logging.info("Pre-training from scratch (good luck!)")
else:
if self.init_checkpoint:
# CASE: load checkpoint from direct path
self.global_step = 0
init_checkpoint = self.init_checkpoint
if self.is_main_process:
logging.info(
"Resuming pre-training from specific checkpoint `%s`",
init_checkpoint
)
else:
# CASE: load checkpoint from resume_step
if self.is_main_process:
logging.info(
"Resuming pre-training from step `%s`. "
"Looking inside `output_directory` for checkpoints...",
self.resume_step
)
if self.resume_step == -1:
# CASE: resume_step == -1, load latest checkpoint
model_names = [
fname
for fname in os.listdir(self.output_directory)
if fname.endswith(".pt")]
assert model_names, "Could not find any checkpoints to resume from."
self.resume_step = max([
int(x.split('.pt')[0].split('_')[1].strip())
for x in model_names]) # TODO: find a better way for this
if self.is_main_process:
logging.info(
"Resuming from latest checkpoint: ckpt_%s.pt",
self.resume_step
)
else:
# CASE: resume_step == X, load checkpoint: `ckpt_X.pt`
if self.is_main_process:
logging.info(
"Resuming from checkpoint: ckpt_%s.pt",
self.resume_step
)
self.global_step = self.resume_step
init_checkpoint = os.path.join(
self.output_directory, f"ckpt_{self.resume_step}.pt")
# Load the actual checkpoint file
self.checkpoint = torch.load(
init_checkpoint, map_location="cpu"
)
# NOTE: Keeping these lines below as a reminder that re-training on
# a different domain with CharacterBERT requires changing the
# output layer with a topK tokens matrix from the new domain.
# # Case where we would retrain a general_domain CharacterBERT
# # on the medical domain. Don't use the general domain output layer:
# if self.is_medical_domain and self.is_character_bert and (not self.phase2):
# model.load_state_dict(
# {
# k: v for (k, v) in self.checkpoint['model'].items()
# # Don't load output matrix from general domain model
# if not k.startswith('cls.predictions') # ignoring the old output layer
# },
# strict=False)
# if self.is_main_process:
# logging.warning(
# "Loaded model weights from `%s`, "
# "but ignored the `cls.predictions` module.",
# init_checkpoint)
# # General case: load weights from checkpoint
# else:
# model.load_state_dict(self.checkpoint['model'], strict=True)
# if self.is_main_process:
# logging.info('Loaded model weights from `%s`',
# init_checkpoint)
# General case: load weights from checkpoint
model.load_state_dict(self.checkpoint['model'], strict=True)
if self.is_main_process:
logging.info('Loaded model weights from `%s`', init_checkpoint)
# Deduce previous steps from phase1 when in phase2
if self.phase2 and not self.init_checkpoint:
self.global_step -= self.phase1_end_step
if self.is_main_process:
logging.info("Training will start at global_step=%s", self.global_step)
# Move model to GPU:
model.to(self.device)
if self.is_main_process:
logging.info("Model was moved to device: %s", self.device)
###################################################################
# OPTIMIZER / SCHEDULER PREPARATION
# ---------------------------------
# - step 1: Define the optimizer (FusedLAMB w/ some weight decay)
# - step 2: Define the learning rate scheduler (PolyWarmUpScheduler)
###################################################################
# Initialize an optimizer:
no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] # no weight decay
optimizer_grouped_parameters = [
{
'params': [
param for name, param in model.named_parameters()
if not any((nd in name) for nd in no_decay)],
'weight_decay': 0.01
},
{
'params': [
param for name, param in model.named_parameters()
if any((nd in name) for nd in no_decay)],
'weight_decay': 0.0
}
]
optimizer = FusedLAMB(
optimizer_grouped_parameters, lr=self.learning_rate)
if self.is_main_process:
logging.info("Using optimizer: %s", optimizer)
# Initialize a learning rate scheduler:
self.lr_scheduler = PolyWarmUpScheduler(
optimizer,
warmup=self.warmup_proportion,
total_steps=self.total_steps
)
if self.is_main_process:
logging.info("Using scheduler: %s", self.lr_scheduler)
###################################################################
# OTHER PREPARATION STEPS
# -----------------------
# - step 1: Set up Mixed Precision training (fp16) if required
# - step 2: Load optimizer stat from checkpoint if any
# - step 2: Set up DataParallel
###################################################################
# Set up fp16:
if self.fp16:
if self.is_main_process:
logging.info("Setting up `Almost FP16` Mixed Precision...")
if self.loss_scale == 0:
model, optimizer = amp.initialize(
model, optimizer, opt_level="O2", loss_scale="dynamic")
else:
model, optimizer = amp.initialize(
model, optimizer, opt_level="O2", loss_scale=self.loss_scale)
amp._amp_state.loss_scalers[0]._loss_scale = 2**20
# Load optimizer state from checkpoint
if self.resume_pretraining:
if self.is_main_process:
logging.info("Loading optimizer state from checkpoint...")
if self.phase2 or self.init_checkpoint:
keys = list(self.checkpoint['optimizer']['state'].keys())
# Override hyperparameters from previous self.checkpoint
for key in keys:
self.checkpoint['optimizer']['state'][key]['step'] = self.global_step
for i, _ in enumerate(self.checkpoint['optimizer']['param_groups']):
self.checkpoint['optimizer']['param_groups'][i]['step'] = self.global_step
self.checkpoint['optimizer']['param_groups'][i]['t_total'] = self.total_steps
self.checkpoint['optimizer']['param_groups'][i]['warmup'] = self.warmup_proportion
self.checkpoint['optimizer']['param_groups'][i]['lr'] = self.learning_rate
if self.is_main_process:
logging.info("Overwrote the following parameters with new values:")
logging.info("* step: %s", self.global_step)
logging.info("* t_total: %s", self.total_steps)
logging.info("* warmup: %s", self.warmup_proportion)
logging.info("* lr: %s", self.learning_rate)
optimizer.load_state_dict(self.checkpoint['optimizer'])
# Restore AMP master parameters
if self.fp16:
if self.is_main_process:
logging.info("Restoring AMP master parameters (optimizer)...")
optimizer._lazy_init_maybe_master_weights()
optimizer._amp_stash.lazy_init_called = True
optimizer.load_state_dict(self.checkpoint['optimizer'])
for param, saved_param in zip(amp.master_params(optimizer), self.checkpoint['master params']):
param.data.copy_(saved_param.data)
# Distribute model
if self.training_is_distributed:
if not self.allreduce_post_accumulation:
model = DistributedDataParallel(
model,
message_size=250000000,
gradient_predivide_factor=\
torch.distributed.get_world_size()
)
else:
flat_dist_call(
[param.data for param in model.parameters()],
torch.distributed.broadcast,
(0,)
)
elif self.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Set the values of self.model and self.optimizer
self.model = model
self.optimizer = optimizer
def run_validation(self):
r"""Runs a validation step and returns a boolean saying if the model has improved."""
# Build a list of validation .hdf5 file paths:
files = []
for fname in os.listdir(self.hdf5_directory):
fpath = os.path.join(self.hdf5_directory, fname)
if os.path.isfile(fpath) and fname.startswith('validation.') and fname.endswith('.hdf5'):
files.append(fpath)
f_start_id = 0
files.sort()
num_files = len(files)
# Select first .hdf5 file
if \
torch.distributed.is_initialized() \
and torch.distributed.get_world_size() > num_files:
remainder = torch.distributed.get_world_size() % num_files
hdf5_fpath = files[
(
f_start_id * torch.distributed.get_world_size()
+ torch.distributed.get_rank()
+ remainder * f_start_id
) % num_files
]
else:
hdf5_fpath = files[
(
f_start_id * torch.distributed.get_world_size()
+ torch.distributed.get_rank()
) % num_files
]
# Set previous_file variable for next iteration
previous_file = hdf5_fpath
# Load the pre-training data from the .hdf5 file
pretraining_data = PretrainingDataset(
hdf5_fpath=hdf5_fpath,
max_masked_tokens_per_input=self.max_masked_tokens_per_input
)
validation_sampler = RandomSampler(pretraining_data) # This could be SequentialSampler
validation_dataloader = DataLoader(
pretraining_data,
sampler=validation_sampler,
batch_size=self.batch_size * self.n_gpu,
num_workers=4, pin_memory=True
)
steps = 0
average_loss = 0.0 # averaged loss every self.log_freq steps
# Use model in `evaluation mode`
with torch.no_grad():
self.model.eval()
if self.is_main_process:
logging.info("*************************")
logging.info("** Evaluation step **")
logging.info("*************************")
# Loop over the rest of pre-training data files
pool = ProcessPoolExecutor(1)
if len(files) == 1:
f_start_id = -1
for f_id in range(f_start_id + 1, 1 + len(files)//torch.distributed.get_world_size()):
# Submit creation of next DataLoader
if torch.distributed.get_world_size() > num_files:
hdf5_fpath = files[
(
f_id * torch.distributed.get_world_size()
+ torch.distributed.get_rank()
+ remainder * f_id
) % num_files
]
else:
hdf5_fpath = files[
(
f_id * torch.distributed.get_world_size()
+ torch.distributed.get_rank()
) % num_files
]
if self.is_main_process:
logging.info(
"Local rank: %s | File n° %s: %s",
self.local_rank, f_id, os.path.basename(previous_file)
)
previous_file = hdf5_fpath
dataset_future = pool.submit(
create_pretraining_dataloader,
hdf5_fpath,
self.max_masked_tokens_per_input,
self.batch_size * self.n_gpu,
)
# Iterate over batches (w/ progress bar for main process)
validation_batches = tqdm(
validation_dataloader,
desc="Computing loss on the validation set..."
) if self.is_main_process else validation_dataloader
for batch in validation_batches:
steps += 1
(
input_ids,
segment_ids,
input_mask,
masked_lm_labels,
next_sentence_labels
) = [tensor.to(self.device) for tensor in batch]
# Forward Pass
model_output = self.model(
input_ids=input_ids,
token_type_ids=segment_ids,
attention_mask=input_mask,
labels=masked_lm_labels,
next_sentence_label=next_sentence_labels)
loss = model_output['loss']
if self.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
divisor = self.num_accumulation_steps
if self.num_accumulation_steps > 1:
if not self.allreduce_post_accumulation:
# this division was merged into predivision
loss = loss / self.num_accumulation_steps
divisor = 1.0
# Update average
average_loss += loss.item()
# Move to next file after using up all batches of current file
del validation_dataloader
validation_dataloader, hdf5_fpath = \
dataset_future.result(timeout=None)
del validation_dataloader
num_steps = max(1, int(steps / self.num_accumulation_steps))
average_loss = torch.tensor(average_loss, dtype=torch.float32).cuda()
average_loss = average_loss / (num_steps * divisor)
if torch.distributed.is_initialized():
average_loss /= torch.distributed.get_world_size()
torch.distributed.all_reduce(average_loss)
# Check if model has improved
validation_loss = average_loss.item()
model_has_improved = False
if validation_loss < self.best_validation_loss:
model_has_improved = True
self.best_validation_loss = validation_loss
# Log
if self.is_main_process:
logging.info(
"\nTotal Validation Steps: %s | Validation Loss = %.3f",
num_steps, validation_loss
)
self.tensorboard_writer.add_scalar(
"Avg. validation loss", validation_loss,
global_step=self.global_step
)
# NOTE: /!\ Put model back in `training mode`
self.model.train()
return model_has_improved
def run_pretraining(self):
r"""Runs the pre-training process."""
if self.is_main_process:
logging.info("*********************************")
logging.info("*** Starting pre-training ***")
logging.info("*********************************")
logging.info("Training on GPU: %s", torch.cuda.get_device_name(0))
logging.info("Target batch size: %s", self.target_batch_size)
logging.info("Number of accumulation steps: %s", self.num_accumulation_steps)
logging.info("Actual batch size: %s", self.batch_size)
self.model.train()
self.most_recent_ckpts_paths = []
average_loss = 0.0 # averaged loss every self.log_freq steps
epoch = 0
training_steps = 0
pool = ProcessPoolExecutor(1)
if self.is_main_process:
tensorboard_log_fpath = os.path.join(
WORKDIR,
'.tensorboard_logs',
self.tensorboard_id,
self.start_datetime.strftime("%d-%m-%Y_%H-%M-%S")
)
logging.info(
"Writing TensorBoard logs in: %s",
tensorboard_log_fpath.replace(WORKDIR, '$WORKDIR'))
self.tensorboard_writer = SummaryWriter(tensorboard_log_fpath)
# NOTE: Infinite loop over epochs, termination is handled via iteration count
while True:
# If beginning of pre-training: read files from hdf5_directory and shuffle
if (not self.resume_pretraining) or (epoch > 0) \
or (self.phase2 and self.global_step < 1) or self.init_checkpoint:
files = []
for fname in os.listdir(self.hdf5_directory):
fpath = os.path.join(self.hdf5_directory, fname)
if os.path.isfile(fpath) and fname.startswith('training.') and fname.endswith('.hdf5'):
files.append(fpath)
f_start_id = 0
files.sort()
random.Random(self.random_seed + epoch).shuffle(files)
# Else: get id of next file
else:
f_start_id = self.checkpoint['files'][0]
files = self.checkpoint['files'][1:]
self.resume_pretraining = False
num_files = len(files)
# Get the current process hdf5 file
# and handle case where there are more processes than files left:
if \
torch.distributed.is_initialized() \
and torch.distributed.get_world_size() > num_files:
remainder = torch.distributed.get_world_size() % num_files
hdf5_fpath = files[
(
f_start_id * torch.distributed.get_world_size()
+ torch.distributed.get_rank()
+ remainder * f_start_id
) % num_files
]
else:
hdf5_fpath = files[
(
f_start_id * torch.distributed.get_world_size()
+ torch.distributed.get_rank()
) % num_files
]
# Set previous_file variable for next iteration
previous_file = hdf5_fpath
# Load the pre-training data from the .hdf5 file
pretraining_data = PretrainingDataset(
hdf5_fpath=hdf5_fpath,
max_masked_tokens_per_input=self.max_masked_tokens_per_input
)
train_sampler = RandomSampler(pretraining_data)
train_dataloader = DataLoader(
pretraining_data,
sampler=train_sampler,
batch_size=self.batch_size * self.n_gpu,
num_workers=4, pin_memory=True
)
overflow_buf = None
if self.allreduce_post_accumulation:
overflow_buf = torch.cuda.IntTensor([0])
# Loop over the rest of pre-training data files
if len(files) == 1:
f_start_id = -1
for f_id in range(f_start_id + 1, len(files)):
# Submit creation of next DataLoader
if torch.distributed.get_world_size() > num_files:
hdf5_fpath = files[
(
f_id * torch.distributed.get_world_size()
+ torch.distributed.get_rank()
+ remainder * f_id
) % num_files
]
else:
hdf5_fpath = files[
(
f_id * torch.distributed.get_world_size()
+ torch.distributed.get_rank()
) % num_files
]
if self.is_main_process:
logging.info(
"Local rank: %s | File n° %s: %s",
self.local_rank, f_id, os.path.basename(previous_file)
)
previous_file = hdf5_fpath
dataset_future = pool.submit(
create_pretraining_dataloader,
hdf5_fpath,
self.max_masked_tokens_per_input,
self.batch_size * self.n_gpu,
)
# Iterate over batches (w/ progress bar for main process)
training_batches = tqdm(
train_dataloader,
desc="Pre-training..."
) if self.is_main_process else train_dataloader
for batch in training_batches:
training_steps += 1
(
input_ids,
segment_ids,
input_mask,
masked_lm_labels,
next_sentence_labels
) = [tensor.to(self.device) for tensor in batch]
# Forward Pass
model_output = self.model(
input_ids=input_ids,
token_type_ids=segment_ids,
attention_mask=input_mask,
labels=masked_lm_labels,
next_sentence_label=next_sentence_labels)
loss = model_output['loss']
if self.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
divisor = self.num_accumulation_steps
if self.num_accumulation_steps > 1:
if not self.allreduce_post_accumulation:
# this division was merged into predivision
loss = loss / self.num_accumulation_steps
divisor = 1.0
# Compute gradients
if self.fp16:
with amp.scale_loss(
loss, self.optimizer,
delay_overflow_check=self.allreduce_post_accumulation) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
average_loss += loss.item()
# Take optimizer/scheduler step every (gradient_acc_steps) steps
# This is the model parameter update:
if training_steps % self.num_accumulation_steps == 0:
self.lr_scheduler.step() # learning rate warmup
self.take_optimizer_step(overflow_buf)
# If reached max steps save everything and log final loss:
if self.global_step >= self.total_steps:
last_num_steps = int(
training_steps / self.num_accumulation_steps
) % self.log_freq
last_num_steps = self.log_freq if last_num_steps == 0 else last_num_steps
average_loss = torch.tensor(average_loss, dtype=torch.float32).cuda()
average_loss = average_loss / (last_num_steps * divisor)
if torch.distributed.is_initialized():
average_loss /= torch.distributed.get_world_size()
torch.distributed.all_reduce(average_loss)
if self.is_main_process:
logging.info(
"Total Steps: %s | Final Loss = %.3f",
int(training_steps / self.num_accumulation_steps),
average_loss.item()
)
self.tensorboard_writer.add_scalar(
"Avg. training loss",
average_loss.item(), global_step=self.global_step)
# If at a logging step:
elif training_steps % (self.log_freq * self.num_accumulation_steps) == 0:
if self.is_main_process:
logging_message = (
f"Global step: {self.global_step} | "
f"Learning Rate: {self.optimizer.param_groups[0]['lr']:.2E} | "
f"Step Loss: {loss.item() * self.num_accumulation_steps / divisor:.3f} | "
f"Avg. Loss: {average_loss / (self.log_freq * divisor):.3f}"
)
# Update the tqdm description
training_batches.set_description(logging_message, refresh=True)
# Log average training loss to TensorBoard:
self.tensorboard_writer.add_scalar(
"Avg. training loss",
average_loss / (self.log_freq * divisor),
global_step=self.global_step)
average_loss = 0
# If reached max steps at log step or reached checkpoint step:
if \
self.global_step >= self.total_steps \
or training_steps % \
(self.checkpoint_interval * self.num_accumulation_steps) == 0:
# Check if model has improved then save a checkpoint if so
if self.do_validation:
model_has_improved = self.run_validation()
else:
model_has_improved = True
if self.is_main_process and model_has_improved:
self.make_checkpoint(f_id, files)
# End pre-training if reached max steps
if self.global_step >= self.total_steps:
del train_dataloader
return # NOTE: breaks out of the training loop
# Move to next file after using up all batches of current file
del train_dataloader
train_dataloader, hdf5_fpath = \
dataset_future.result(timeout=None)
# Update epoch after going through all .hdf5 files
epoch += 1
def take_optimizer_step(self, overflow_buf):
r"""Takes an optimizer step (updates the model weights)."""
if self.allreduce_post_accumulation:
# manually allreduce gradients after all accumulation steps
# check for Inf/NaN
# 1. allocate an uninitialized buffer for flattened gradient
scaler = _amp_state.loss_scalers[0]
master_grads = [
p.grad
for p in amp.master_params(self.optimizer)
if p.grad is not None
]
flat_grad_size = sum(p.numel() for p in master_grads)
allreduce_dtype = \
torch.float16 \
if self.allreduce_post_accumulation_fp16 \
else torch.float32
flat_raw = torch.empty(
flat_grad_size,
device='cuda', dtype=allreduce_dtype)
# 2. combine unflattening and predivision of unscaled 'raw' gradient
allreduced_views = apex_C.unflatten(flat_raw, master_grads)
overflow_buf.zero_()
amp_C.multi_tensor_scale(
65536,
overflow_buf,
[master_grads, allreduced_views],
scaler.loss_scale() /
(torch.distributed.get_world_size()
* self.num_accumulation_steps)
)
# 3. sum gradient across ranks. Because of the predivision, this averages the gradient
torch.distributed.all_reduce(flat_raw)
# 4. combine unscaling and unflattening of allreduced gradient
overflow_buf.zero_()
amp_C.multi_tensor_scale(
65536,
overflow_buf,
[allreduced_views, master_grads],
1./scaler.loss_scale()
)
# 5. update loss scale
scaler = _amp_state.loss_scalers[0]
old_overflow_buf = scaler._overflow_buf
scaler._overflow_buf = overflow_buf
had_overflow = scaler.update_scale()
scaler._overfloat_buf = old_overflow_buf
# 6. call optimizer step function
if had_overflow == 0:
self.optimizer.step()
self.global_step += 1
else:
# Overflow detected, print message and clear gradients
if self.is_main_process:
logging.info(
f"Rank {torch.distributed.get_rank()} "
":: Gradient overflow. Skipping step, "
f"reducing loss scale to {scaler.loss_scale()}"
)
if _amp_state.opt_properties.master_weights:
for param in self.optimizer._amp_stash.all_fp32_from_fp16_params:
param.grad = None
for param in self.model.parameters():
param.grad = None
else:
self.optimizer.step()
# NOTE: This basically does: optimizer.zero_grad()
for param in self.model.parameters():
param.grad = None
self.global_step += 1
def make_checkpoint(self, f_id, files):
r"""Saves a checkpoint of the model."""
logging.info("Saving a checkpoint of the current model...")
# NOTE: model may be an instance of apex.parallel.distributed.DistributedDataParallel
# in this case, model.module is the actual pytorch module
model_to_save = \
self.model.module \
if hasattr(self.model, 'module') \
else self.model
# Save model weights, optimizer state, AMP master parameters and
# the list of .hdf5 that are yet to be used (e.g. for resuming pre-training)
if self.resume_step < 0 or not self.phase2:
output_save_file = os.path.join(
self.output_directory,
f"ckpt_{self.global_step}.pt")
else:
output_save_file = os.path.join(
self.output_directory,
f"ckpt_{self.global_step + self.phase1_end_step}.pt")
torch.save(
{
'model': model_to_save.state_dict(),
'optimizer': self.optimizer.state_dict(),
'master params': list(amp.master_params(self.optimizer)),
'files': [f_id] + files
},
output_save_file
)
# Keep only a specific number of 'best' checkpoints
self.most_recent_ckpts_paths.append(output_save_file)
if len(self.most_recent_ckpts_paths) > self.num_checkpoints_to_keep:
checkpoint_to_remove = \
self.most_recent_ckpts_paths.pop(0)
os.remove(checkpoint_to_remove)
def main():
args = parse_args()
pretrainer = ModelPretrainer(args)
pretrainer.prepare_model_optimizer_and_scheduler()
pretrainer.run_pretraining()
end_datetime = datetime.datetime.now()
if pretrainer.is_main_process:
logging.info(
"The complete pre-training took approx. %s seconds",
end_datetime - pretrainer.start_datetime
)
if __name__ == "__main__":
main()
| 1.71875 | 2 |
config.py | enwawerueli/footprints | 1 | 12772497 | <gh_stars>1-10
class Config(object):
ORG_NAME = 'footprints'
ORG_DOMAIN = 'footprints.devel'
APP_NAME = 'Footprints'
APP_VERSION = '0.4.0'
| 1.257813 | 1 |
minimumSizeSubarraySum.py | anishmo99/DailyInterviewPro | 1 | 12772498 | <reponame>anishmo99/DailyInterviewPro
class Solution:
def minSubArrayLen(self, s: int, nums: List[int]) -> int:
left, sum, count = 0, 0, float('inf')
for right in range(len(nums)):
sum += nums[right]
while sum >= s:
count = min(count, right - left + 1)
sum -= nums[left]
left += 1
return count if count != float('inf') else 0 | 3.171875 | 3 |
functionaltests/api/camp/test_platform_endpoints.py | ed-/solum | 0 | 12772499 | <gh_stars>0
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from functionaltests.api import base
class PlatformDiscoveryTestCase(base.TestCase):
def test_get_root_discovers_camp_v1_1(self):
# get our platform_endpoints container
resp, body = (self.client.
request_without_auth('camp/platform_endpoints/',
'GET'))
self.assertEqual(200, resp.status)
endpoints = json.loads(body)
self.assertEqual('platform_endpoints', endpoints['type'])
self.assertEqual('Solum_CAMP_endpoints', endpoints['name'])
pe_links = endpoints['platform_endpoint_links']
# there should be one element in the Link array
self.assertEqual(1, len(pe_links))
camp_v1_1_link = pe_links[0]
self.assertEqual('Solum_CAMP_v1_1_endpoint',
camp_v1_1_link['target_name'])
# get the URL of the platform_endpoint and strip the base URL
rel_ep_url = camp_v1_1_link['href'][len(self.client.base_url) + 1:]
# get our lone platform_endpoint resource
resp, body = (self.client.
request_without_auth(rel_ep_url,
'GET'))
self.assertEqual(200, resp.status)
endpoint = json.loads(body)
self.assertEqual('platform_endpoint', endpoint['type'])
self.assertEqual('Solum_CAMP_v1_1_endpoint', endpoint['name'])
self.assertEqual('CAMP 1.1', endpoint['specification_version'])
self.assertEqual('Solum CAMP 1.1', endpoint['implementation_version'])
self.assertEqual('KEYSTONE-2.0', endpoint['auth_scheme'])
self.assertEqual('%s/camp/v1_1/platform/' % self.client.base_url,
endpoint['platform_uri'])
| 2.0625 | 2 |
random_exercises/ex1.py | ClaudiuCreanga/kaggle | 5 | 12772500 | <reponame>ClaudiuCreanga/kaggle<gh_stars>1-10
from typing import List
def rotLeft(a, d):
for _ in range(d):
a.append(a[0])
del a[0]
return a
#print(rotLeft([1, 2, 3, 4, 5], 3))
def minimumBribes(q):
result = 0
flag = False
for passing in range(len(q)-1, 0, -1):
if flag:
result = "Too chaotic"
break
bribed = 0
for i in range(passing):
if q[i] > q[i+1]:
q[i], q[i+1] = q[i+1], q[i]
result += 1
bribed += 1
if bribed > 2:
flag = True
break
else:
bribed = 0
return result
#print(minimumBribes([5, 1, 2, 3, 7, 8, 6, 4]))
def bubblesort(q):
for passing in range(len(q)-1, 0, -1):
for i in range(passing):
if q[i] > q[i+1]:
q[i], q[i+1] = q[i+1], q[i]
return q
#print(bubblesort([5,1,4,2,8]))
def two_strings(a, b):
edits = 0
if abs(len(b) - len(a)) > 1:
return "the words don't match"
for index, letter in enumerate(a):
if index <= len(b) - 1:
if b[index] != letter:
edits += 1
if len(b) - len(a) == 1:
b = b[:index] + b[index+1:]
elif len(b) - len(a) == -1:
b = b[:index] + letter + b[index:]
else:
edits += 1
if edits > 1:
return "the words don't match"
return "the words match"
# print(two_strings("abc", "abc"))
# print(two_strings("adc", "abc"))
# print(two_strings("abc", "adc"))
# print(two_strings("bbc", "adc"))
# print(two_strings("abc", "ddc"))
# print(two_strings("bc", "dbc"))
# print(two_strings("abc", "bc"))
# print(two_strings("abc", "c"))
# print(two_strings("c", "c"))
# print(two_strings("ac", "aac"))
# print(two_strings("ac", "aaac"))
# print(two_strings("aaaaac", "aaac"))
# print(two_strings("aaaaac", "aaaac"))
# print(two_strings("aaac", "aaaac"))
# print(two_strings("abcde", "accde"))
# print(two_strings("abcde", "abcdf"))
# print(two_strings("abcdef", "abcde"))
# print(two_strings("geek", "geeks"))
# print(two_strings("m", ""))
# print(two_strings("", "m"))
def find_two_strings(a, b):
if abs(len(a) - len(b)) > 1:
return False
i = 0
j = 0
edits = 0
while (i < len(a) and j < len(b)):
if a[i] != b[j]:
edits += 1
if len(a) > len(b):
i+=1
elif len(b) < len(a):
j+=1
else:
i += 1
j += 1
else:
i += 1
j += 1
if i < len(a) or j < len(b):
edits += 1
return not edits > 1
#
# print(find_two_strings("abc", "abc"))
# print(find_two_strings("adc", "abc"))
# print(find_two_strings("abc", "adc"))
# print(find_two_strings("bbc", "adc"))
# print(find_two_strings("abc", "ddc"))
# print(find_two_strings("bc", "dbc"))
# print(find_two_strings("abc", "bc"))
# print(find_two_strings("abc", "c"))
# print(find_two_strings("c", "c"))
# print(find_two_strings("ac", "aac"))
# print(find_two_strings("ac", "aaac"))
# print(find_two_strings("aaaaac", "aaac"))
# print(find_two_strings("aaaaac", "aaaac"))
# print(find_two_strings("aaac", "aaaac"))
# print(find_two_strings("abcde", "accde"))
# print(find_two_strings("abcde", "abcdf"))
# print(find_two_strings("abcdef", "abcde"))
# print(find_two_strings("geek", "geeks"))
# print(find_two_strings("m", ""))
# print(find_two_strings("", "m"))
def build_spiral_array(a):
result = []
rows = len(a)
columns = len(a[0])
row_index = 0
column_index = 0
while row_index < rows and column_index < columns:
for i in range(column_index, columns):
result.append(a[row_index][i])
row_index += 1
for i in range(row_index, rows):
result.append(a[i][columns - 1])
columns -= 1
if row_index < rows:
for i in range(columns - 1, column_index - 1, -1):
result.append(a[rows - 1][i])
rows -= 1
if column_index < columns:
for i in range(rows - 1, row_index - 1, -1):
result.append(a[i][column_index])
column_index += 1
return " ".join(map(str, result))
#
# print(build_spiral_array([[1, 2, 3, 4],[5, 6, 7, 8],[9, 10, 11, 12],[13, 14, 15, 16]]))
# print(build_spiral_array([ [1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12],[13, 14, 15, 16, 17, 18]]))
# print(build_spiral_array([ [1,2,3], [8,9,4], [7,6,5]]))
def look_and_say(n):
a = {
"1": "11",
"11": "21",
"2": "12",
"111": "3",
"22": "22",
"3": "13"
}
result = []
count = 0
def rec(x, count):
if count > n:
return
count += 1
r = ""
i = 0
while i < len(x):
if i + 1 < len(x):
if x[i] == x[i+1]:
r += a[x[i] + x[i+1]]
i += 1
else:
r += (a[x[i]])
else:
r += a[x[i]]
i += 1
result.append(r)
rec(r, count)
rec(["1"], count)
return result
#print(look_and_say(9))
def countnndSay(n):
if n == 1:
return "1"
if n == 2:
return "11"
s = "11"
for i in range(3, n+1):
count = 1
s += "$"
tmp = ""
l = len(s)
for j in range(1, l):
current = s[j]
previous = s[j-1]
if current != previous:
tmp += str(count) + previous
count = 1
else:
count += 1
s = tmp
return s
#print(countnndSay(9))
def myregex(pattern, mystring):
i = 0
while i < len(mystring):
if mystring[i] == pattern[i]:
return myregex(pattern[i+1:], mystring[i+1:])
elif pattern[i] == ".":
return myregex(pattern[i+1:], mystring[i+1:])
else:
return False
return True
#print(myregex("dasss","dasss"))
def bonetrousle(n, k, b):
minValue = b * (b + 1) / 2
maxValue = b * (2 * k - b + 1)/ 2
if minValue > n or maxValue < n:
return [-1]
if b == 1:
return [n]
result = list(range(1, b+1))
r = int((n-minValue) % b)
q = int((n -minValue) // b)
for index, item in enumerate(result):
result[index] += q
if index > len(result) - r - 1 and index <= len(result) - 1:
result[index] += 1
return result
# print(bonetrousle(12, 8, 3))
# print(bonetrousle(10, 3, 3))
# print(bonetrousle(22,7,6))
# print(bonetrousle(38, 10, 7))
# print(bonetrousle(809880023823305331, 906161411045895284, 52920))
def bonetrousle2(n, k, b):
init = b * (b + 1) // 2
extra = (n - init) // b + 1
over = init + extra * b - n
answer = list(range(1, b + 1))
answer = [i + extra - 1 for i in answer[:over]] + [i + extra for i in answer[over:]]
if answer[-1] <= k and answer[0] >= 1:
return answer
else:
return [-1]
#print(bonetrousle2(12, 8, 3))
def reverseStr(s: str, k: int) -> str:
result = ""
reverse = True
i = 0
while i < len(s):
temp = s[i:i + k]
if reverse:
result += temp[::-1]
else:
result += temp
reverse = not reverse
i += k
return result
#print(reverseStr("abcdefg", 2))
def isPalindrome(x: int) -> bool:
to_str = str(int)
reversed_str = ""
i = len(to_str) - 1
while i >= 0:
reversed_str += to_str[i]
i -= 1
return reversed_str == to_str
def isPalindrome2( x: int) -> bool:
to_str = str(x)
reversed_str = to_str[::-1]
return reversed_str == to_str
#print(isPalindrome(121))
def reverseStringConstantSPace(s): # strings are immutable in python so it's not constant space
s = [i for i in s]
start = 0
end = len(s) - 1
while start < end:
s[start], s[end] = s[end], s[start]
start += 1
end += -1
return "".join(s)
# print(reverseStringConstantSPace("abcqdef"))
def isPalindromeInt(x: int) -> bool:
y = 0
z = x
while x > 0:
last_number = x % 10
x = x // 10
y += last_number
y *= 10
y = y // 10
return y == z
#print(isPalindromeInt(121))
def removeDuplicates( nums: List[int]) -> int:
i = 1
while i < len(nums):
if nums[i] == nums[i-1]:
nums.pop(i)
else:
i += 1
return len(nums)
#print(removeDuplicates([1,1,2]))
def removeDuplicates2(nums):
if len(nums) == 0:
return 0
i = 0
for j in range(1, len(nums)):
if (nums[j] != nums[i]):
i += 1
nums[i] = nums[j]
return i + 1
#print(removeDuplicates2([1,1,2]))
def maxProfit(prices: List[int]) -> int:
profit = 0
for i in range(1, len(prices)):
if prices[i] > prices[i-1]:
profit += prices[i] - prices[i-1]
return profit
#print(maxProfit([7, 1, 5, 3, 6, 4]))
def permutations(string, step = 0):
# if we've gotten to the end, print the permutation
if step == len(string):
print("".join(string))
# everything to the right of step has not been swapped yet
for i in range(step, len(string)):
# copy the string (store as array)
string_copy = [character for character in string]
# swap the current index with the step
string_copy[step], string_copy[i] = string_copy[i], string_copy[step]
# recurse on the portion of the string that has not been swapped yet (now it's index will begin with step + 1)
permutations(string_copy, step + 1)
#print(permutations("ABCD"))
def permutations2(s):
def perm(s, l, r):
if l == r:
print("".join(s))
for i in range(l, r+1):
s[l], s[i] = s[i], s[l]
perm(s, l+1, r)
s[l], s[i] = s[i], s[l]
return perm(list(s), 0, len(s) - 1)
# print(permutations2("ABCD"))
def permute3(nums: List[int]) -> List[List[int]]:
result = []
def perm(a, k=0):
if k == len(a):
result.append([b for b in a])
else:
for i in range(k, len(a)):
a[k], a[i] = a[i], a[k]
perm(a, k + 1)
a[k], a[i] = a[i], a[k] # backtrack or else do the modification in a copy of the list
perm(nums)
return result
# print(permute3([0,1,2]))
def findIfpermutationIsPalindrom(s):
v = {}
for c in s:
if c in v:
v[c] = not v[c]
else:
v[c] = False
middle = False
for a,b in v.items():
if not b:
if not middle:
middle = True
else:
return False
return True
#print(findIfpermutationIsPalindrom("tactcoaa"))
def isValid(s: str) -> bool:
close = {
")": "(",
"]": "[",
"}": "{"
}
if s == "":
return True
stack = []
for x in range(len(s)):
if s[x] == "(" or s[x] == "{" or s[x] == "[":
stack.append(s[x])
else:
if not len(stack):
return False
last_item = stack.pop()
if close[s[x]] != last_item:
return False
return len(stack) == 0
#print(isValid("]"))
def twoSuma(a, t):
seen = {}
result = []
for i in range(len(a)):
temp = t - a[i].lower()
if temp not in seen:
seen[a[i]] = i
else:
result.append((seen[temp], i))
return result
# print(twoSuma([2, 7, 11, 15, 8, 6, 1],9))
def isPalindrome3( s: str) -> bool:
j = len(s) - 1
i = 0
while i < len(s) // 2:
if not s[i].isalnum():
i += 1
continue
if not s[j].isalnum():
j -= 1
continue
if s[i].lower() != s[j].lower():
return False
j -= 1
i += 1
return True
#print(isPalindrome3("A man, a plan, a canal: Panama"))
def validPalindrome5(s: str) -> bool:
i, j, edited = 0, len(s) - 1, False
while i < j:
if s[i] != s[j]:
if edited:
return False
edited = True
i += 1
i += 1
j -= 1
return True
def validPalindrome4(s: str) -> bool:
L = len(s)
for i in range(L//2+1):
if (s[i] != s[-i-1]):
return s[i+1:L-i] == s[i+1:L-i][::-1] or s[i:L-i-1] == s[i:L-i-1][::-1]
return True
def validPalindromeRec(s):
def rec(start = 0, end = len(s) - 1, skipOnce = False):
while start < end:
if s[start] != s[end]:
if not skipOnce:
return rec(start + 1, end, True) or rec(start, end - 1, True)
else:
return False
start += 1
end -= 1
return True
return rec()
def validPalindromeWhy(s):
start = 0
end = len(s) - 1
while start < end:
if s[start] != s[end]:
return s[start + 1:len(s) // 2] == s[len(s)//2 + 1: end + 1][::-1] \
or s[start: len(s)//2] == s[len(s)//2: end][::-1]
start += 1
end -= 1
return True
# print(validPalindromeWhy("abba"))
# print(validPalindromeWhy("abcba"))
# print(validPalindromeWhy("ebcdba"))
# print(validPalindromeWhy("abcdedcba"))
# print(validPalindromeWhy("abcdedcb"))
def searchBinary(nums: List[int], target: int) -> int:
def search(a, l, r):
if r >= l:
middle = (l + r) // 2
if a[middle] == target:
return middle
elif a[middle] < target:
return search(a, middle + 1, r)
else:
return search(a, l, middle - 1)
else:
return (r + l) // 2 + 1
return search(nums, 0, len(nums) - 1)
def searchBinaryItirative(nums, target):
l = 0
r = len(nums) - 1
while l <= r:
middle = (l + r) // 2
if nums[middle] == target:
return middle
elif nums[middle] < target:
l = middle + 1
else:
r = middle - 1
return -1
def searchBinaryLoHi(nums, target):
low = 0
hi = len(nums) - 1
while low <= hi:
middle = (low + hi) // 2 # low + (hi - lo) // 2 to avoid overflow
if nums[middle] == target:
return middle
elif nums[middle] < target:
low = middle + 1
else:
hi = middle - 1
return (low + hi) // 2 + 1
# print(searchBinaryLoHi([1,3,5,6, 8, 9, 12], 2))
# print(searchBinaryItirative([1,3,5,6], 6))
# print(searchBinary([1,3,5,6, 8, 9, 10, 12], 10))
# print(searchBinary([1,3,5,6, 8, 9, 10, 12, 13], 13))
# print(searchBinary([1,3,5,6, 8, 9, 10, 12, 13], 1))
# print(searchBinary([1,3,5,6, 8, 9, 10, 12, 13], 8))
# print(searchBinary([1,3,5,6, 8, 9, 10, 12, 13], 90))
def findRadius2(houses: List[int], heaters: List[int]) -> int: # O: HeLogHe + HoLogHe
heaters.sort()
radius = 0
def binary_search(l, r, t):
if r >= l:
middle = (l + r) // 2
if heaters[middle] == t:
return False
elif heaters[middle] < t:
return binary_search(middle + 1, r, t)
else:
return binary_search(l, middle - 1, t)
else:
return (r + l) // 2 + 1
for i in range(len(houses)):
item = binary_search(0, len(heaters) - 1, houses[i])
if item is not False:
if item < len(heaters) - 1:
hi = heaters[item]
else:
hi = heaters[-1]
if item > 0:
low = heaters[item - 1]
else:
low = heaters[0]
new_radius = min([abs(houses[i] - low), abs(hi - houses[i])])
radius = max(new_radius, radius)
return radius
# print(findRadius2([1,5], [10]))
# print(findRadius2([1,2,3,4], [1,4]))
# print(findRadius2([1,2,3],[2]))
# print(findRadius2([1,2,3,4,5,6,7],[2]))
# print(findRadius2([1,2,3,4,5,6,7],[2,5]))
# print(findRadius2([1,2,3,4,5,6],[2,5]))
# print(findRadius2([2,3,1,4,6,5],[5, 2]))
# print(findRadius2([1,2,3],[1,2,3]))
# print(findRadius2([999,999,999,999,999],[499,500,500,501]))
# anagrams and palindrom
def checkTwoSOneEditAway(A:str, B:str) -> bool:
if abs(len(A) - len(B)) > 1:
return False
for i in range(len(A)):
if i < len(B):
if A[i] != B[i]:
return A[i+1:] == B[i:] or A[i+1:] == B[i+1:] or A[i] == B[i+1:]
return True
# print(checkTwoSOneEditAway("pale", "ple"))
# print(checkTwoSOneEditAway("pales", "pale"))
# print(checkTwoSOneEditAway("pale", "bale"))
# print(checkTwoSOneEditAway("pale", "bae"))
# print(checkTwoSOneEditAway("pale", "pales"))
# print(checkTwoSOneEditAway("palas", "pale"))
# print(checkTwoSOneEditAway("palasaa", "palas"))
# print(checkTwoSOneEditAway("", "a"))
# print(checkTwoSOneEditAway("a", ""))
# print(checkTwoSOneEditAway("palas", "palas"))
def compress(s):
comp = []
if len(s):
count = 1
current = s[0]
i = 1
while i < len(s):
if s[i] != current:
comp.append(current)
comp.append(count)
current = s[i]
count = 1
else:
count += 1
i += 1
comp.append(current)
comp.append(count)
s2 = "".join(map(str,comp))
return s if len(s) <= len(s2) else s2
# print(compress("aaabccccc"))
# print(compress(""))
def rotate(M):
result = [[0, 0, 0] for x in range(len(M))]
column = len(M)
for i in range(len(M)):
row = 0
column -= 1
for j in range(len(M)):
result[row][column] = M[i][j]
row += 1
return result
# print(rotate([[1,2,3], [6,7,8], [9,4,2]]))
def rotate2(M):
n = len(M)
for i in range(n // 2):
first = i
last = n - 1 - i
for i in range(i, last):
offset = i - first
top = M[first][i]
M[first][i] = M[last-offset][first]
M[last-offset][first] = M[last][last-offset]
M[last][last-offset] = M[i][last]
M[i][last] = top
return M
# print(rotate2([[1,2,3], [6,7,8], [9,4,2]]))
def zero(M):
row, col = [], []
for i in range(len(M)):
for j in range(len(M[0])):
if M[i][j] == 0:
row.append(i)
col.append(j)
for i in range(len(row)):
for j in range(len(M[0])):
M[row[i]][j] = 0
for x in range(len(M)):
M[x][col[i]] = 0
return M
# print(zero([[1,1,1,1,1,1,1,1,1], [0,1,1,1,1,1,1,1,1], [1,1,1,1,1,1,1,1,1], [1,1,1,1,1,1,1,1,1]]))
def rotateS(s1, s2):
i = 0
while i < len(s2):
temp = s2[0]
s2 = s2[1:]
s2 += temp
if s2 == s1:
return True
i += 1
return False
def rotateS2(s1, s2):
for i in range(len(s2)):
s2 += s2[i]
if s1 in s2:
return True
return False
def rotateS3(s1, s2):
s1 += s1
if s2 in s1:
return True
return False
#print(rotateS3("waterbottle", "erbottlewat"))
def calendarConflict(cal):
conflicts = []
temp_conflicts = [cal[0][2]]
end = cal[0][1]
for i in range(1, len(cal)):
if cal[i][0] >= end:
if len(temp_conflicts) > 1:
conflicts.append(temp_conflicts)
temp_conflicts = []
end = max(end, cal[i][1])
temp_conflicts.append(cal[i][2])
if len(temp_conflicts) > 1:
conflicts.append(temp_conflicts)
return conflicts
#print(calendarConflict([[1,2,"a"], [2,4, "b"], [3,5, "c"], [7,9, "d"]]))
class StackMin():
def __init__(self):
self.data = []
self.current_min = None
def push(self, d):
if self.current_min:
self.data.append((d, self.current_min))
if d < self.current_min:
self.current_min = d
else:
self.current_min = d
self.data.append((d, None))
def remove(self):
item = self.data.pop()
if item[0] == self.current_min:
self.current_min = item[1]
# da = StackMin()
# print(da.current_min)
# da.push(3)
# print(da.current_min)
# da.push(2)
# print(da.current_min)
# da.push(7)
# print(da.current_min)
# da.push(1)
# print(da.current_min)
# da.remove()
# print(da.current_min)
# da.remove()
# print(da.current_min)
# da.remove()
# print(da.current_min)
# da.remove()
# print(da.current_min)
def SortStack(l):
l2 = []
temp = []
while len(l) != len(l2):
min = float("inf")
while len(l):
item = l.pop()
if item < min:
temp.append(min)
min = item
else:
temp.append(item)
l2.append(min)
while len(temp):
l.append(temp.pop())
return l2
#print(SortStack([3,4,1,2,5,2]))
def coin_change(amount, coins):
combinations = [0 for x in range(amount + 1)]
combinations[0] = 1
for coin in coins:
for x in range(1, len(combinations)):
if x >= coin:
combinations[x] += combinations[x-coin] # so the trick is to take the previous row value + the value from the column - coin
return combinations[amount]
# print(coin_change(12, [1,2,5]))
def KnapsackProblem(weight, items):
combinations = [0 for x in range(weight + 1)]
print(combinations)
for w,v in items.items():
for x in range(len(combinations)):
if w <= x:
combinations[x] = max(combinations[x], combinations[x - w] + v)
return combinations[-1]
# print(KnapsackProblem(5, {5: 60, 3: 50, 4: 70, 2: 30}))
def levenstein_distance(a,b): # we need to use a matrix, can't do it with a single row
combinations = [[0 for x in range(len(a) + 1)] for i in range(len(b) + 1)]
combinations[0] = [x for x in range(len(a) + 1)]
print(combinations[0])
for i in range(1, len(b) + 1):
for j in range(len(a) + 1):
if j == 0:
edits = combinations[i-1][0] + 1
else:
edits = min(combinations[i][j-1], combinations[i-1][j-1], combinations[i-1][j])
if a[j-1] != b[i-1]:
edits += 1
combinations[i][j] = edits
print(combinations[i])
return combinations[len(b)][len(a)]
#print(levenstein_distance("benyam", "ephrem"))
from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.graph = defaultdict(list) # dictionary containing adjacency List
self.V = vertices # No. of vertices
# function to add an edge to graph
def addEdge(self, u, v):
self.graph[u].append(v)
def topologicalSortUtil(self, v, visited, stack):
# Mark the current node as visited.
visited[v] = True
# Recur for all the vertices adjacent to this vertex
for i in self.graph[v]:
if visited[i] == False:
self.topologicalSortUtil(i, visited, stack)
# Push current vertex to stack which stores result
stack.insert(0, v)
def topological_sort(self):
# Mark all the vertices as not visited
visited = [False] * self.V
stack = []
# Call the recursive helper function to store Topological
# Sort starting from all vertices one by one
for i in range(self.V):
if visited[i] == False:
self.topologicalSortUtil(i, visited, stack)
# Print contents of stack
return stack
g= Graph(6)
g.addEdge(5, 2)
g.addEdge(5, 0)
g.addEdge(4, 0)
g.addEdge(4, 1)
g.addEdge(2, 3)
g.addEdge(3, 1)
#print(g.topological_sort())
graph = {
'a': ['b', 'c'],
'b': ['d'],
'c': ['d'],
'd': ['e'],
'e': []
}
def iterative_topological_sort(graph, start):
seen = set()
stack = [] # path variable is gone, stack and order are new
order = [] # order will be in reverse order at first
q = [start]
while q:
v = q.pop()
if v not in seen:
seen.add(v)
q.extend(graph[v])
while stack and v not in graph[stack[-1]]: # check that the current value is not a dependence of the last item in stack
order.append(stack.pop())
stack.append(v)
return stack + order[::-1]
def recursive_topological_sort(graph, node):
result = []
seen = set()
def recursive_helper(node):
for neighbor in graph[node]:
if neighbor not in seen:
seen.add(neighbor)
recursive_helper(neighbor)
result.append(node)
recursive_helper(node)
return result
#print(recursive_topological_sort(graph, "a"))
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
'''
Use defaultdict to build a graph based on prerequisites information, try to find a loop in this graph
Assign 3 states for each course 0, 1, 2. 0 for not visited, 1 for visiting and 2 for visited
During DFS search, if we meet a course that with state == 1, then there is a loop. If we meet a course with state == 0, call DFS on new course.
At each course, the graphy stores all possible next move (prerequisites).
Time: O(n)
Space: O(n)
'''
def DFS(start, my_dict, course_state):
course_state[start] = 1
for pre_course in my_dict[start]:
if course_state[pre_course] == 1:
return True
if course_state[pre_course] == 0:
if DFS(pre_course, my_dict, course_state):
return True
course_state[start] = 2
return False
if not numCourses or not prerequisites:
return True # Assume no course to take returns True
my_dict = defaultdict(list)
for p in prerequisites:
my_dict[p[0]].append(p[1])
# Init states for all courses
course_state = [0] * numCourses
for n in range(numCourses):
if course_state[n] == 0: # Call DFS from this node and look for a loop
loop = DFS(n, my_dict, course_state)
if loop:
return False
return True
# analyze this one
fiblist = [0,1]
def fib_dp(n):
if n<0:
print("Incorect")
if n <= len(fiblist):
return fiblist[n-1]
else:
temp = fib_dp(n-1) + fib_dp(n-2)
fiblist.append(temp)
return temp
#print(fib_dp(8))
def threeSum(nums: List[int]) -> List[List[int]]:
# return empty list if list size < 3
if not nums or len(nums) < 3:
return []
nums.sort()
# if the smallest number >0 or largest < 0, we can't have a sum of 0
if nums[0] > 0 or nums[-1] < 0:
return []
res = []
# find the index of the first non negative element
p_non_neg_index = next(index for index, val in enumerate(nums) if val >= 0)
# we will keep the pivot index to 0 and iterate till p_non_neg_index
for pivot in range(p_non_neg_index + 1):
# skip already computed pivots
if pivot > 0 and nums[pivot - 1] == nums[pivot]:
continue
left = pivot + 1
right = len(nums) - 1
while left < right:
s = nums[pivot] + nums[left] + nums[right]
if s > 0:
# if sum greater than zero we will move the right pointer towards the left(smaller value)
# if the previous element is same , we will skip the that element
while left < right and nums[right] == nums[right - 1]:
right -= 1
right -= 1
elif s < 0:
# if sum less than zero we will move the left pointer towards the right(higer value)
# if the next element is same , we will skip the that element
while left < right and nums[left] == nums[left + 1]:
left += 1
left += 1
else:
# if sum is zero, we will append the triplet to list
# move both left and right to next position avoiding the duplicates
res.append([nums[pivot], nums[left], nums[right]])
while left < right and nums[right] == nums[right - 1]:
right -= 1
while left < right and nums[left] == nums[left + 1]:
left += 1
right -= 1
left += 1
return res
def floodFill(image: List[List[int]], sr: int, sc: int, newColor: int) -> List[List[int]]:
r = len(image)
c = len(image[0])
original_color = image[sr][sc]
visited = set()
def dfs(x, y):
if x < 0 or y < 0:
return
if x >= r or y >=c:
return
if image[x][y] == original_color:
image[x][y] = newColor
if (x,y) not in visited:
visited.add((x,y))
dfs(x-1, y)
dfs(x+1, y)
dfs(x, y-1)
dfs(x, y+1)
dfs(sr, sc)
return image
#print(floodFill([[1,1,1],[1,1,0],[1,0,1]], 1, 1, 2))
def articulation_point():
pass
def triple_step(n):
"""how many possible ways you can run up the stairs n if you can hop 1,2 or 3 at a time. O(3**n)"""
if n < 0:
return 0
if n == 0:
return 1
return triple_step(n-1) + triple_step(n-2) + triple_step(n-3)
items = []
def triple_step_dp(n):
"""how many possible ways you can run up the stairs n if you can hop 1,2 or 3 at a time."""
if n < 0:
return 0
if n == 0:
return 1
if n <= len(items):
return items[n-1]
else:
temp = triple_step_dp(n-1) + triple_step_dp(n-2) + triple_step_dp(n-3)
items.append(temp)
return temp
#print(triple_step_dp(5))
def robot_grid(grid):
"""exponential big O. O(2**r+c) because each point has 2 possibilities"""
success = False
def rec(current):
nonlocal success
if success:
return
r = current[0]
c = current[1]
if current == (0,0):
success = True
return
if grid[r][c] != 'X' and r >= 0 and c >= 0:
rec((r-1, c))
rec((r, c-1))
rec((len(grid)-1,len(grid[0]) - 1))
return success
def robot_grid_dp(grid): # do this to save the good path
"""Big O r*c because we don't visit any cells twice"""
success = False
visited = set()
def rec(current):
nonlocal success
if success or current in visited:
return
r = current[0]
c = current[1]
if current == (0,0):
success = True
return
if grid[r][c] != 'X' and r >= 0 and c >= 0:
visited.add((r,c))
rec((r-1, c))
rec((r, c-1))
rec((len(grid)-1,len(grid[0]) - 1))
return success
# print(robot_grid_dp([[0, "X", 0, 0, 0],
# [0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0],
# ["X", 0, 0, 0, 0],
# [0, 0, 0, 0, 0]]))
def magic_index(A):
def binary_search(A, l, r):
if l <= r:
middle = l + (r-l) // 2
if A[middle] == middle:
return True
elif A[middle] < middle:
return binary_search(A, middle + 1, r)
else:
return binary_search(A, l, middle - 1)
else:
return False
return binary_search(A, 0, len(A) - 1)
#print(magic_index([-2, -1, 1, 2, 5, 6]))
def magic_index_duplicates(A):
def magic_fast(A, l, r):
if l<=r:
middle = l + (r-l) // 2
if A[middle] == middle:
return True
left = magic_fast(A, l, min(middle - 1, A[middle]))
if left >= 0:
return left
right = magic_fast(A, max(middle+1, A[middle]), r)
return right
else:
return -1
return magic_fast(A, 0, len(A) - 1)
#print(magic_index_duplicates([-10, -5, 2, 2, 2,3,4, 7, 9, 12]))
def subsets_set(S):
'''all subssets of a set. here we must be careful to use a deep copy of lists, even the items in the list should be copied over'''
def rec(S, result, index=0):
if index < len(S):
temp = list(result)
item = S[index]
for subset in temp:
new = subset[:]
new.append(item)
result.append(new)
return rec(S, result, index+1)
else:
return result
return rec(S, [[]])
#print(subsets_set([1,2,3,4,5]))
def hanoi_tower(n, source, dest, util):
if n == 1:
print("Move disk 1 from rod", source, "to rod", dest)
return
hanoi_tower(n - 1, source, util, dest)
print("Move disk", n, "from rod", source, "to rod", dest)
hanoi_tower(n - 1, util, dest, source)
#print(hanoi_tower(4, "A", "C", "B"))
def permutations_without_dups(S, step = 0, result=[]):
if step == len(S):
result.append("".join(S))
else:
for i in range(step, len(S)):
temp = [x for x in S]
temp[i], temp[step] = temp[step], temp[i]
permutations_without_dups(temp, step+1)
return result
def permutations_without_dups2(S,l,r,result=[]):
if l==r:
result.append("".join(S))
else:
for i in range(l, r+1):
S[i], S[l] = S[l], S[i]
permutations_without_dups2(S, l+1, r, result)
S[l], S[i] = S[i], S[l]
return result
#print(permutations_without_dups2(list("abcd"), 0, len("abcd") - 1))
def all_valid_paran(n,result, current=2 ):
if current <= n:
new = set()
for x in result:
for i in range(len(x)):
if x[i] == "(":
s = x[:i+1] + "()" + x[i+1:]
new.add(s)
new.add("()" + x)
return all_valid_paran(n, new, current+1)
else:
return set(result)
qresult = set()
qresult.add("()")
#print(all_valid_paran(3 ,qresult))
def paintFill(A, target, newcolor):
existing_color = A[target[0]][target[1]]
visited = set()
def dfs(r,c):
if r >= 0 and c >= 0 and r < len(A) and c < len(A):
if A[r][c] == existing_color and (r,c) not in visited:
A[r][c] = newcolor
visited.add((r,c))
dfs(r-1,c)
dfs(r,c-1)
dfs(r+1, c)
dfs(r,c+1)
dfs(target[0], target[1])
return A
# print(paintFill([[1,1,1],
# [1,1,0],
# [1,0,1]], (1, 1), 2))
def coin_change2(n: int, coins: List[int]) -> int:
combinations = [[0 for x in range(n + 1)] for x in range(len(coins) + 1)]
combinations[0][0] = 1
for r in range(len(combinations)):
if r == 0:
continue
coin = coins[r - 1]
for c in range(len(combinations[0])):
if c == 0:
combinations[r][c] = 1
else:
if c - coin >= 0:
combinations[r][c] = combinations[r-1][c] + combinations[r][c - coin]
else:
combinations[r][c] = combinations[r - 1][c]
return combinations[r][c]
def coin_change3(n, coins):
pass
#print(coin_change3(50, [1, 5, 10, 25]))
def minimum_amount_coins(amount, coins):
if amount < 0:
return -1
coins = sorted(coins)
d = [amount + 1] * (amount + 1)
d[0] = 0
for i in range(amount + 1):
for j in coins:
if j <= i:
d[i] = min(d[i], d[i - j] + 1)
else:
break
return -1 if d[-1] > amount else d[-1]
#print(minimum_amount_coins(11, [5,2,1]))
def coinChange(coins: List[int], amount: int):
minimum = float("Inf")
def dfs(amount, changes):
nonlocal minimum
if amount == 0:
minimum = min(changes, minimum)
elif amount > 0:
for coin in coins:
dfs(amount - coin, changes+1)
else:
return
dfs(amount, 0)
if minimum == float("Inf"):
return -1
return minimum
#print(coinChange([5,2,1], 10))
def NQueen(n):
def isValid(placements):
row = len(placements) - 1
for i in range(row):
diff = abs(placements[i] - placements[row])
if diff == 0 or diff == row - i:
return False
return True
def solve(n, r, placements):
if r == n:
return placements
else:
for c in range(n):
placements.append((r, c))
if isValid(placements):
solve(n, r+1, placements)
placements.pop()
def driver(n):
return solve(n, 0, [])
return driver(n)
#print(NQueen(4))
def knap2(weight, items):
combinations = [0 for x in range(weight + 1)]
for item in items:
for i in range(len(combinations)):
if i - item["weight"] >= 0:
combinations[i] = max(combinations[i], combinations[i-item["weight"]] + item["value"])
return combinations[-1]
# print(knap2(5, [
# {"weight": 5, "value": 60},
# {"weight": 3, "value": 50},
# {"weight": 4, "value": 70},
# {"weight": 2, "value": 30}
# ]))
def quicksort(array=[12,4,5,6,7,3,1,15]):
"""Sort the array by using quicksort."""
less = []
equal = []
greater = []
if len(array) > 1:
pivot = array[0]
for x in array:
if x < pivot:
less.append(x)
elif x == pivot:
equal.append(x)
elif x > pivot:
greater.append(x)
# Don't forget to return something!
return quicksort(less)+equal+quicksort(greater) # Just use the + operator to join lists
# Note that you want equal ^^^^^ not pivot
else: # You need to handle the part at the end of the recursion - when you only have one element in your array, just return the array.
return array
#print(quicksort())
def search_rotated_array(A, t):
def binary(A, t, l, r):
if l <= r:
m = l + (r-l) // 2
if A[m] == t:
return m
if A[m] > A[l]:
if t < A[m] and t >= A[l]:
return binary(A, t, l, m - 1)
else:
return binary(A, t, m+1, r)
else:
if t <= A[r] and t > A[m]:
return binary(A, t, m+1, r)
else:
return binary(A, t, l, m - 1)
else:
return -1
return binary(A, t, 0, len(A) - 1)
#print(search_rotated_array([3,1], 1))
def search_no_size(reader, target):
"""
:type reader: ArrayReader
:type target: int
:rtype: int
"""
a = 2
while reader.get(a) != 2147483647:
if reader.get(a) == target:
return a
elif reader.get(a) > target:
break
else:
a *= 2
def binary(reader, t, l, r):
if l <= r:
m = l + (r - l) // 2
item = reader.get(m)
if item == t:
return m
elif item == -1 or item > t:
return binary(reader, target, l, m - 1)
elif item < t:
return binary(reader, target, m + 1, r)
else:
return -1
return binary(reader, target, a // 2, a)
class Readera:
data = [-1,0,3,5,9,12]
def get(self, d):
if d > len(self.data):
return 2147483647
if d not in self.data:
return -1
else:
return self.data.index(d)
# reader = Readera()
# print(search_no_size(reader,9))
def sparse_search(a, t):
def binary(a, t, l, r):
if l <= r:
m = l + (r - l) // 2
item = a[m]
if item == "":
ml = m -1
mr = m +1
if a[ml] < 0 or a[mr] > len(a) - 1:
return -1
if a[ml] != "":
return binary(a, t, ml, m - 1)
if item == t:
return m
elif item > t:
return binary(a, t, l, m - 1)
else:
return binary(a, t, m + 1, r)
else:
return -1
return binary(a, t, 0, len(a) - 1)
#print(sparse_search(["", "at", "", "", "ball"], "ball"))
def subset_sum(nums):
stack = [(0, 0)]
visited = {}
fullSum = sum(nums)
if fullSum % 2:
return False
while len(stack):
index, sumNumbers = stack.pop()
if index >= len(nums) or 2 * sumNumbers > fullSum:
continue
elif 2 * sumNumbers == fullSum:
return True
if (index, sumNumbers) not in visited:
visited[(index, sumNumbers)] = True
else:
continue
stack.append((index + 1, sumNumbers))
stack.append((index + 1, sumNumbers + nums[index]))
return False
#print(subset_sum(( [1, 5, 11, 5])))
def numIslands(grid: List[List[str]]) -> int:
visited = set()
islands = 0
def dfs(r, c):
if r < 0 or c < 0:
return
if r >= len(grid) or c >= len(grid[0]):
return
if (r, c) not in visited:
visited.add((r, c))
if grid[r][c] == "1":
dfs(r + 1, c)
dfs(r, c + 1)
dfs(r - 1, c)
dfs(r, c - 1)
for r in range(len(grid)):
for c in range(len(grid[0])):
if (r,c) not in visited and grid[r][c] == "1":
islands += 1
dfs(r, c)
return islands
#print(numIslands([["1","1","1"],["0","1","0"],["1","1","1"]]))
def numIslands2(m: int, n: int, positions: List[List[int]]) -> List[int]:
visited = set()
result = []
def dfs(r, c):
if r < 0 or c < 0:
return
if r >= m or c >= n:
return
if (r, c) not in visited:
visited.add((r, c))
if grid[r][c] == "1":
dfs(r + 1, c)
dfs(r, c + 1)
dfs(r - 1, c)
dfs(r, c - 1)
grid = [[0 for x in range(n)] for x in range(m)]
for r, c in positions:
if r < m and c < n:
grid[r][c] = "1"
islands = 0
visited = set()
for r in range(m):
for c in range(n):
if (r, c) not in visited and grid[r][c] == "1":
islands += 1
dfs(r, c)
result.append(islands)
return result
#print(numIslands2(1,2, [[0,1],[0,0]]))
def isMatch( text, pattern):
dp = [[False] * (len(pattern) + 1) for _ in range(len(text) + 1)]
dp[-1][-1] = True
for i in range(len(text), -1, -1):
for j in range(len(pattern) - 1, -1, -1):
first_match = i < len(text) and pattern[j] in {text[i], '.'}
if j+1 < len(pattern) and pattern[j+1] == '*':
dp[i][j] = dp[i][j+2] or first_match and dp[i+1][j]
else:
dp[i][j] = first_match and dp[i+1][j+1]
return dp[0][0]
# print(isMatch("mississippi","mis*is*p*."))
def videoStitching(clips: List[List[int]], T: int) -> int:
result = 0
temp = 0
while temp < T:
candidates = []
for item in clips:
if item[0] <= temp:
if item[1] > temp:
candidates.append(item[1])
if not len(candidates) and temp < T:
return -1
else:
temp = max(candidates)
result += 1
return result
#print(videoStitching([[0,2],[4,6],[8,10],[1,9],[1,5],[5,9]], 10))
def add_binary_nums(x, y):
max_len = max(len(x), len(y))
x = x.zfill(max_len)
y = y.zfill(max_len)
# initialize the result
result = ''
# initialize the carry
carry = 0
# Traverse the string
for i in range(max_len - 1, -1, -1):
r = carry
r += 1 if x[i] == '1' else 0
r += 1 if y[i] == '1' else 0
result = ('1' if r % 2 == 1 else '0') + result
carry = 0 if r < 2 else 1 # Compute the carry.
if carry != 0: result = '1' + result
return result.zfill(max_len)
#print(add_binary_nums('1101', '100'))
def lcs(a,b,result):
if not a or not b:
return result
A = a[-1]
B = b[-1]
if A==B:
return lcs(a[:-1], b[:-1], result+1)
else:
return max(lcs(a[:-1], b, result), lcs(a, b[:-1], result))
#print(lcs("saab", "sazb", 0))
def number_to_bin(num):
if num>1:
number_to_bin(num//2)
print(num%2)
#number_to_bin(16)
def multiply(n, m):
ans = 0
count = 0
while (m):
# check for set bit and left
# shift n, count times
if (m % 2 == 1):
ans += n << count
# increment of place value (count)
count += 1
m = int(m / 2)
return ans
#print(multiply(3,4))
def longestOnes(A: List[int], K: int) -> int:
j = 0
for i in range(len(A)):
if A[i] != 1:
K -= 1
if K < 0:
if A[j] == 0:
K += 1
j += 1
return i - j + 1
#print(longestOnes([1,1,1,0,0,0,1,1,1,1,0],2))
def task_scheduler(tasks, n):
map = {}
for task in tasks:
if task in map:
map[task] += 1
else:
map[task] = 1
priority_queue = list(map.values())
result = 0
count = 0
while count < len(tasks):
biggest = priority_queue.pop()
def leastInterval(tasks, n):
if n == 0:
return len(tasks)
import collections
map = collections.defaultdict(int)
for task in tasks:
map[task] += 1
s = sorted(list(map.values()), reverse=True)
gaps = s[0] - 1
slots = gaps * n
for i in range(1, len(s)):
slots -= min(s[i], gaps)
return max(slots + len(tasks), len(tasks))
#print(leastInterval(["A","A","A","B","B","B"],2))
import collections
def criticalConnections2(connections: List[List[int]], n) -> List[List[int]]:
graph = collections.defaultdict(list)
unique = set()
for c in connections:
graph[c[0]].append(c[1])
unique.add(c[0])
unique.add(c[1])
def dfs(edges):
stack = [edges[0][0]]
graph = collections.defaultdict(list)
for c in edges:
graph[c[0]].append(c[1])
graph[c[1]].append(c[0])
visited = set()
while stack:
item = stack.pop()
visited.add(item)
for x in graph[item]:
if x not in visited:
stack.append(x)
return visited
final = []
temp = [x for x in connections]
n = len(temp)
for i in range(n):
item = temp.pop(i)
result = dfs(temp)
if result != unique:
final.append(item)
temp.insert(i, item)
return final
#print(criticalConnections2([[0,1],[1,2],[2,0],[1,3]], 4))
def a():
result = set()
def dfs(node, graph):
nonlocal result
if node != None:
result.add(node)
for x in graph[node]:
if x not in result:
dfs(x, graph)
da = [[0,1],[1,2],[2,0],[1,3]]
graph = collections.defaultdict(list)
for c in da:
graph[c[0]].append(c[1])
dfs(0, graph)
return result
#print(a())
seen = set()
def isHappy(n: int) -> bool:
if n == 1:
return True
elif n in seen:
return False
seen.add(n)
s = str(n)
res = 0
for x in s:
res += int(x)**2
res = int(res)
return isHappy(res)
#print(isHappy(10))
# anagrams and palindrom
def shortestToChar(S: str, C: str) -> List[int]:
result = []
for i in range(len(S)):
if S[i] == C:
result.append(0)
else:
m = i - 1
n = i + 1
temp = False
while m >= 0:
if S[m] == C:
temp = abs(i - m)
break
m -= 1
while n < len(S):
if S[n] == C:
if not temp:
temp = abs(i - n)
else:
temp = min(temp, abs(i - n))
break
n += 1
result.append(temp)
return result
def shortestToChar2(S: str, C: str) -> List[int]:
result = []
left = float("Inf")
right = S.index(C)
for i, c in enumerate(S):
if c == C:
result.append(0)
left = i
try:
right = S[i + 1:].index(C) + i
except:
right = float("Inf")
else:
result.append(min(abs(i - left), abs(i - right)))
return result
#print(shortestToChar2("loveleetcode", "e"))
def shortestSubarray(A: List[int], K: int) -> int:
i = 0
j = 0
result = float("Inf")
while i < len(A) and j < len(A):
if sum(A[i:j]) == K:
result = min(abs(i - j), result)
i += 1
elif sum(A[i:j]) < K:
j += 1
elif sum(A[i:j]) > K:
i += 1
for x in range(i, len(A)):
if sum(A[i:]) == K:
result = min(abs(i - len(A)), result)
return -1 if result == float("Inf") else result
#print(shortestSubarray([48,99,37,4,-31],140))
# anagrams and palindrom
def productExceptSelf(nums: List[int]) -> List[int]:
result = [1 for x in nums]
left = 1
right = 1
for i, n in enumerate(nums):
result[i] *= left
left *= n
result[~i] *= right
right *= nums[~i]
return result
#print(productExceptSelf([2,3,4,5]))
def loopbothways(n):
i = 0
while i < len(n) // 2:
print(n[i])
print(n[~i])
i += 1
#print(loopbothways([1,2,3,4,5,6]))
def isValidSudoku(board: List[List[str]]) -> bool:
row = 0
while row < 9:
seen = set()
for n in board[row]:
if n in seen and n != ".":
return False
seen.add(n)
row += 1
col = 0
while col < 9:
seen = set()
for n in range(9):
item = board[n][col]
if item in seen and item != ".":
return False
seen.add(item)
col += 1
row = 0
col = 0
for _ in range(3):
for _ in range(3):
seen = set()
for i in range(3):
for j in range(3):
v = board[row + i][col + j]
if v in seen and v != ".":
return False
seen.add(v)
col += 3
if col == 9:
col = 0
row += 3
return True
#print(isValidSudoku([["7","3",".",".","7",".",".",".","."],["6",".",".","1","9","5",".",".","."],[".","9","8",".",".",".",".","6","."],["8",".",".",".","6",".",".",".","3"],["4",".",".","8",".","3",".",".","1"],["7",".",".",".","2",".",".",".","6"],[".","6",".",".",".",".","2","8","."],[".",".",".","4","1","9",".",".","5"],[".",".",".",".","8",".",".","7","9"]]))
def turnpike_reconstruction_problem(D):
length = len(D) + 1
#TODO
#print(turnpike_reconstruction_problem([1, 2, 2, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7, 8, 10]))
def dijkstra_shortest_path(flights, start, end):
# Dijkstra doesn't take into account the direction towards the goal which is quite bad.
# that's why A* is better
G = defaultdict(list)
for edge in flights:
G[edge[0]].append(DNode(edge[1], edge[2]))
from heapq import heappush, heappop
priority_queue = [(0, DNode(start, 0))]
while priority_queue:
top = heappop(priority_queue)
if top[1].v == end:
break
for city in G[top[1].v]:
city.prev = top[1]
heappush(priority_queue, (city.w + top[1].w, city))
result = []
top = top[1]
while top.prev != None:
result.append(top.v)
top = top.prev
result.append(top.v)
return result[::-1]
class DNode:
def __init__(self, v, w, prev = None):
self.v = v
self.w = w
self.prev = prev
def __lt__(self, other):
return self.v < other.v
def findCheapestPrice(n: int, flights: List[List[int]], src: int, dst: int, K: int) -> int:
G = defaultdict(list)
for edge in flights:
G[edge[0]].append(DNode(edge[1], edge[2]))
from heapq import heappush, heappop
priority_queue = [(0, DNode(src, 0))]
found = False
while priority_queue:
top = heappop(priority_queue)
if top[1].v == dst:
found = True
break
for city in G[top[1].v]:
city.prev = top[1]
heappush(priority_queue, (city.w + top[1].w, city))
if not found:
return -1
result = 0
stops = 0
top = top[1]
while top.prev != None:
stops += 1
result += top.w
top = top.prev
stops += 1
result += top.w
if stops - 2 > K:
return -1
return result
#print(findCheapestPrice(3, [[0,1,100],[1,2,100],[0,2,500]], 0, 2, 1))
#print(findCheapestPrice(3, [["S","A",5],["S","B",3],["A","D",1], ["A", "G", 5], ["B", "G", 3], ["G", "H", 2], ["H", "E", 1], ["A", "E", 1]], "S", "E", 7))
def findCheapestPrice1(n, flights, src, dst, K):
from collections import defaultdict
import heapq
g = defaultdict(dict)
for source, dest, cost in flights: g[source][dest] = cost
priority_queue = [(0, src, K+1)]
while priority_queue:
cost, source, stops = heapq.heappop(priority_queue)
if source == dst:
return cost
if stops:
for neighbour, new_cost in g[source].items():
heapq.heappush(priority_queue, (cost + new_cost, neighbour, stops-1))
return -1
#print(findCheapestPrice1(5, [[0,1,5],[1,2,5],[0,3,2],[3,1,2],[1,4,1],[4,2,1]], 0, 2, 2))
def a_shortest_path(G, start, end):
# A* which has information of the direction you need to go
pass
| 3.703125 | 4 |
library/tetration_application.py | chrivand/ansible-module | 6 | 12772501 | ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tetration_application
short description: Enables creation, modification, deletion and query of an application
version_added: '2.9'
description:
- Enables creation, modification, deletion and query of an application
options:
alternate_query_mode:
description: Indicates if dynamic mode is used for the application. In the dynamic
mode, an ADM run creates one or more candidate queries for each cluster. Default
value is false
type: bool
app_id:
description:
- The id for the Application
- Require one of [C(app_name), C(app_id)]
- Mutually exclusive to C(app_name)
type: string
app_name:
description:
- The name for the Application
- Require one of [C(app_name), C(app_id)]
- Mutually exclusive to C(app_id)
type: string
app_scope_id:
description:
- The id for the Scope associated with the application
- Require one of [C(app_scope_name), C(app_scope_id), C(app_id)]
- Mutually exclusive to C(app_scope_name)
type: string
app_scope_name:
description:
- The name for the Scope associated with the application
- Require one of [C(app_scope_name), C(app_scope_id), C(app_id)]
- Mutually exclusive to C(app_scope_id)
type: string
description:
description: User specified description of the application
type: string
strict_validation:
description:
- Will return an error if there are unknown keys/attributes in the uploaded data.
- Useful for catching misspelled keys.
- Default value is false.
type: bool
primary:
description: Indicates if the application is primary for its scope
type: bool
state:
choices: '[present, absent]'
description: Add, change, or remove an application
required: true
type: string
extends_documentation_fragment: tetration_doc_common
notes:
- Requires the requests Python module.
- Only the fields C(app_name), C(description), C(primary) can be updated on an existing application
requirements:
- requests
- 'Required API Permission(s): app_policy_management'
author:
- <NAME> (@techbeck03)
- <NAME> (@joej164)
'''
EXAMPLES = '''
# Add or Modify application
tetration_application:
app_name: ACME InfoSec Policies
app_scope_name: ACME:Example:Application
description: InfoSec Policies for Acme Application
primary: yes
state: present
provider:
host: "https://tetration-cluster.company.com"
api_key: 1234567890QWERTY
api_secret: 1234567890QWERTY
# Delete application
tetration_application:
app_name: ACME InfoSec Policies
app_scope_name: ACME:Example:Application
primary: yes
state: absent
provider:
host: "https://tetration-cluster.company.com"
api_key: 1234567890QWERTY
api_secret: 1234567890QWERTY
'''
RETURN = '''
---
object:
contains:
alternate_query_mode:
description: Indicates if dynamic mode is used for the application
returned: when C(state) is present or query
sample: 'false'
type: bool
app_scope_id:
description: Unique identifier of app scope associated with application workspace
returned: when C(state) is present or query
sample: 596d5215497d4f3eaef1fd04
type: int
author:
description: Author of application workspace
returned: when C(state) is present or query
sample: Brandon Beck
type: string
created_at:
description: Date this application was created (Unix Epoch)
returned: when C(state) is present or query
sample: 1500402190
type: string
description:
description: A description for the application
returned: when C(state) is present or query
sample: Security policies for my application
type: string
enforced_version:
description: The policy version to enforce
returned: when C(state) is present or query
sample: 7
type: int
enforcement_enabled:
description: Sets whether enforcement is enabled on this application
returned: when C(state) is present or query
sample: 'true'
type: bool
id:
description: Unique identifier for the application workspace
returned: when C(state) is present or query
sample: 5c93da83497d4f33d7145960
type: int
latest_adm_version:
description: Latest policy version
returned: when C(state) is present or query
sample: 8
type: int
name:
description: Name of application workspace
returned: when C(state) is present or query
sample: My Application Policy
type: string
primary:
description: Sets whether this application should be primary for the given scope
returned: when C(state) is present or query
sample: 'true'
type: bool
description: the changed or modified object
returned: always
type: complex
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.tetration import TetrationApiModule
from ansible.module_utils.tetration_constants import TETRATION_API_APPLICATIONS
from ansible.module_utils.tetration_constants import TETRATION_API_SCOPES
from ansible.module_utils.tetration_constants import TETRATION_PROVIDER_SPEC
def main():
module_args = dict(
app_name=dict(type='str', required=False),
app_id=dict(type='str', required=False),
app_scope_id=dict(type='str', required=False),
app_scope_name=dict(type='str', required=False),
description=dict(type='str', required=False),
alternate_query_mode=dict(type='bool', required=False, default=False),
strict_validation=dict(type='bool', required=False, default=False),
primary=dict(type='bool', required=False),
state=dict(required=True, choices=['present', 'absent']),
provider=dict(type='dict', options=TETRATION_PROVIDER_SPEC)
)
module = AnsibleModule(
argument_spec=module_args,
mutually_exclusive=[
['app_scope_name', 'app_scope_id']
],
required_one_of=[
['app_name', 'app_id'],
],
)
tet_module = TetrationApiModule(module)
# These are all elements we put in our return JSON object for clarity
result = {
'changed': False,
'object': None,
}
# =========================================================================
# Verify passed in data is accurate.
existing_app_scope = {}
if module.params['app_scope_id']:
app_scope_route = f"{TETRATION_API_SCOPES}/{module.params['app_scope_id']}"
existing_app_scope = tet_module.run_method('GET', app_scope_route)
if not existing_app_scope:
module.fail_json(msg=f"Unable to find existing app with the id of: {module.params['app_scope_id']}")
elif module.params['app_scope_name']:
all_scopes = tet_module.run_method('GET', TETRATION_API_SCOPES)
found_app_scopes = [scope for scope in all_scopes if scope['name'] == module.params['app_scope_name']]
if len(found_app_scopes) == 0:
module.fail_json(
msg=("There were no app scopes that matched the name entered. "
f"Searched for: {module.params['app_scope_name']}"))
elif len(found_app_scopes) > 1:
module.fail_json(
msg=("There were too many app scopes that matched the name entered. "
f"Searched for: {module.params['app_scope_name']}"))
existing_app_scope = found_app_scopes[0]
existing_app = {}
if module.params['app_id']:
app_route = f"{TETRATION_API_APPLICATIONS}/{module.params['app_id']}"
existing_app = tet_module.run_method('GET', app_route)
if not existing_app:
module.fail_json(msg=f"The App ID entered is not in the system. Searched for: {module.params['app_id']}")
elif module.params['app_name']:
# If we have an app_id, and it's valid, we don't care about searching for the app_id by name
# If we don't have an app_id, then we need to find an app, but it's ok if one doesn't exist
# because we'll then make it, or we could be verifying it's absent
apps = tet_module.run_method('GET', TETRATION_API_APPLICATIONS)
found_apps = [found for found in apps if found['name'] == module.params['app_name']]
if len(found_apps) > 1:
module.fail_json(
msg=f"There were too many apps that matched the name entered. Searched for: {module.params['app_name']}")
elif len(found_apps) == 1:
existing_app = found_apps[0]
app_route = ""
if existing_app:
app_route = f"{TETRATION_API_APPLICATIONS}/{existing_app['id']}"
# =========================================================================
# Now enforce the desired state (present, absent)
# ---------------------------------
# STATE == 'present'
# ---------------------------------
if module.params['state'] == 'present':
# if the object does not exist at all, create it but verify we have all needed data first
if not existing_app and not existing_app_scope:
module.fail_json(msg=("The application does not exist. "
"Must provide a Scope ID or Scope Name to create a new scope."))
if not existing_app and module.params['primary'] is None:
module.fail_json(
msg=("The application does not exist. "
"Must provide info on if the scope is primary or not when creating a scope."))
if existing_app:
updated_app = {
'name': module.params['app_name'],
'description': module.params['description'],
'primary': module.params['primary']
}
if not module.params['app_name']:
updated_app.pop('name')
if module.params['description'] is None:
updated_app.pop('description')
if module.params['primary'] is None:
updated_app.pop('primary')
is_subset = tet_module.is_subset(updated_app, existing_app)
if not is_subset:
result['object'] = tet_module.run_method('PUT', app_route, req_payload=updated_app)
result['changed'] = True
else:
result['object'] = existing_app
else:
new_app = {
'app_scope_id': existing_app_scope['id'],
'name': module.params['app_name'],
'description': module.params['description'],
'alternate_query_mode': module.params['alternate_query_mode'],
'strict_validation': module.params['strict_validation'],
'primary': module.params['primary']
}
result['object'] = tet_module.run_method("POST", TETRATION_API_APPLICATIONS, req_payload=new_app)
result['changed'] = True
# ---------------------------------
# STATE == 'absent'
# ---------------------------------
elif module.params['state'] == 'absent':
if existing_app:
if existing_app['enforcement_enabled']:
module.fail_json(
msg='Cannot delete workspace with enforcement enabled. Disable enforcement before deleting')
elif existing_app['primary']:
module.fail_json(
msg='Cannot delete primary application. Try making application secondary before deleting')
result['object'] = tet_module.run_method('DELETE', app_route)
result['changed'] = True
# Return result
module.exit_json(**result)
if __name__ == '__main__':
main()
| 1.804688 | 2 |
liberapay/testing/elsewhere.py | virtuchain/liberapay.com | 0 | 12772502 | <gh_stars>0
# flake8: noqa
"""
Examples of data returned by the APIs of the elsewhere platforms.
They are wrapped in lambdas to prevent tests from persistently modifying the
data.
"""
import xml.etree.ElementTree as ET
bitbucket = lambda: {
"username": "whit537",
"website": "https://www.gittip.com/whit537/",
"display_name": "<NAME>",
"uuid": "{59efeb39-29dc-415e-959e-3cb1ea7f579b}",
"links": {
"self": {
"href": "https://bitbucket.org/api/2.0/users/whit537"
},
"repositories": {
"href": "https://bitbucket.org/api/2.0/repositories/whit537"
},
"html": {
"href": "https://bitbucket.org/whit537"
},
"followers": {
"href": "https://bitbucket.org/api/2.0/users/whit537/followers"
},
"avatar": {
"href": "https://secure.gravatar.com/avatar/5698bc43665106a28833ef61c8a9f67f?d=https%3A%2F%2Fd3oaxc4q5k2d6q.cloudfront.net%2Fm%2F5fe8c0346b2d%2Fimg%2Fdefault_avatar%2F32%2Fuser_blue.png&s=32"
},
"following": {
"href": "https://bitbucket.org/api/2.0/users/whit537/following"
}
},
"created_on": "2012-01-23T20:11:10.736097+00:00",
"location": "Pittsburgh, PA USA",
"type": "user"
}
bountysource = lambda: {
"bio": "Code alchemist at Bountysource.",
"twitter_account": {
"uid": 313084547,
"followers": None,
"following": None,
"image_url": "https://cloudinary-a.akamaihd.net/bountysource/image/twitter_name/d_noaoqqwxegvmulwus0un.png,c_pad,w_100,h_100/corytheboyd.png",
"login": "corytheboyd",
"id": 2105
},
"display_name": "corytheboyd",
"url": "",
"type": "Person",
"created_at": "2012-09-14T03:28:07Z",
"slug": "6-corytheboyd",
"facebook_account": {
"uid": 589244295,
"followers": 0,
"following": 0,
"image_url": "https://cloudinary-a.akamaihd.net/bountysource/image/facebook/d_noaoqqwxegvmulwus0un.png,c_pad,w_100,h_100/corytheboyd.jpg",
"login": "corytheboyd",
"id": 2103
},
"gratipay_account": {
"uid": 17306,
"followers": 0,
"following": 0,
"image_url": "https://cloudinary-a.akamaihd.net/bountysource/image/gravatar/d_noaoqqwxegvmulwus0un.png,c_pad,w_100,h_100/bdeaea505d059ccf23d8de5714ae7f73",
"login": "corytheboyd",
"id": 2067
},
"large_image_url": "https://cloudinary-a.akamaihd.net/bountysource/image/twitter_name/d_noaoqqwxegvmulwus0un.png,c_pad,w_400,h_400/corytheboyd.png",
"frontend_path": "/users/6-corytheboyd",
"image_url": "https://cloudinary-a.akamaihd.net/bountysource/image/twitter_name/d_noaoqqwxegvmulwus0un.png,c_pad,w_100,h_100/corytheboyd.png",
"location": "San Francisco, CA",
"medium_image_url": "https://cloudinary-a.akamaihd.net/bountysource/image/twitter_name/d_noaoqqwxegvmulwus0un.png,c_pad,w_200,h_200/corytheboyd.png",
"frontend_url": "https://www.bountysource.com/users/6-corytheboyd",
"github_account": {
"uid": 692632,
"followers": 11,
"following": 4,
"image_url": "https://cloudinary-a.akamaihd.net/bountysource/image/gravatar/d_noaoqqwxegvmulwus0un.png,c_pad,w_100,h_100/bdeaea505d059ccf23d8de5714ae7f73",
"login": "corytheboyd",
"id": 89,
"permissions": [
"public_repo"
]
},
"company": "Bountysource",
"id": 6,
"public_email": "<EMAIL>"
}
github = lambda: {
"avatar_url": "https://avatars2.githubusercontent.com/u/134455?v=4",
"bio": None,
"blog": "http://chadwhitacre.com/",
"company": None,
"created_at": "2009-10-03T02:47:57Z",
"email": "<EMAIL>",
"events_url": "https://api.github.com/users/chadwhitacre/events{/privacy}",
"followers": 363,
"followers_url": "https://api.github.com/users/chadwhitacre/followers",
"following": 12,
"following_url": "https://api.github.com/users/chadwhitacre/following{/other_user}",
"gists_url": "https://api.github.com/users/chadwhitacre/gists{/gist_id}",
"gravatar_id": "",
"hireable": None,
"html_url": "https://github.com/chadwhitacre",
"id": 134455,
"location": "Ambridge, PA",
"login": "chadwhitacre",
"name": "<NAME>",
"organizations_url": "https://api.github.com/users/chadwhitacre/orgs",
"public_gists": 42,
"public_repos": 37,
"received_events_url": "https://api.github.com/users/chadwhitacre/received_events",
"repos_url": "https://api.github.com/users/chadwhitacre/repos",
"site_admin": False,
"starred_url": "https://api.github.com/users/chadwhitacre/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/chadwhitacre/subscriptions",
"type": "User",
"updated_at": "2018-02-09T17:28:40Z",
"url": "https://api.github.com/users/chadwhitacre",
}
gitlab = lambda: {
"two_factor_enabled": False,
"can_create_project": True,
"twitter": "Changaco",
"linkedin": "",
"color_scheme_id": 1,
"web_url": "https://gitlab.com/u/Changaco",
"skype": "",
"identities": [],
"id": 155803,
"projects_limit": 100000,
"current_sign_in_at": "2016-02-16T20:38:00.092Z",
"state": "active",
"email": "<EMAIL>",
"website_url": "http://changaco.oy.lc",
"username": "Changaco",
"bio": "",
"can_create_group": True,
"is_admin": False,
"name": "Changaco",
"created_at": "2015-05-22T12:51:41.103Z",
"avatar_url": "https://secure.gravatar.com/avatar/37bbd0ccd96666e9868bee47e3c30eb0?s=80&d=identicon",
"private_token": "<PASSWORD>",
"theme_id": 1
}
linuxfr = lambda: {
"login": "Changaco",
"email": "<EMAIL>",
"created_at": "2009-08-14T10:31:22.000+02:00"
}
mastodon = lambda: ('mastodon.rocks', {
"id": 1964,
"username": "Liberapay",
"acct": "Liberapay",
"display_name": "Liberapay",
"locked": False,
"created_at": "2017-04-06T12:54:56.938Z",
"followers_count": 219,
"following_count": 53,
"statuses_count": 52,
"note": "<p><a href=\"https://liberapay.com/\" rel=\"nofollow noopener\" target=\"_blank\"><span class=\"invisible\">https://</span><span class=\"\">liberapay.com/</span><span class=\"invisible\"></span></a> is a recurrent donations platform. It's run by a nonprofit organization based in France and its source code is public.</p>",
"url": "https://mastodon.rocks/@Liberapay",
"avatar": "https://mastodon.rocks/system/accounts/avatars/000/001/964/original/eeaf9ed6fa5eb7b3.png?1491484129",
"avatar_static": "https://mastodon.rocks/system/accounts/avatars/000/001/964/original/eeaf9ed6fa5eb7b3.png?1491484129",
"header": "https://mastodon.rocks/headers/original/missing.png",
"header_static": "https://mastodon.rocks/headers/original/missing.png"
})
openstreetmap = lambda: ET.fromstring("""
<!-- copied from http://wiki.openstreetmap.org/wiki/API_v0.6 -->
<osm version="0.6" generator="OpenStreetMap server">
<user id="12023" display_name="jbpbis" account_created="2007-08-16T01:35:56Z">
<description></description>
<contributor-terms agreed="false"/>
<img href="http://www.gravatar.com/avatar/c8c86cd15f60ecca66ce2b10cb6b9a00.jpg?s=256&d=http%3A%2F%2Fwww.openstreetmap.org%2Fassets%2Fusers%2Fimages%2Flarge-39c3a9dc4e778311af6b70ddcf447b58.png"/>
<roles>
</roles>
<changesets count="1"/>
<traces count="0"/>
<blocks>
<received count="0" active="0"/>
</blocks>
</user>
</osm>
""")
pleroma = lambda: ('pleroma.site', {
"id": 3,
"username": "administration",
"acct": "administration",
"display_name": "pleroma.site administration",
"locked": False,
"created_at": "2018-04-18T13:55:24.000Z",
"followers_count": 36,
"following_count": 0,
"statuses_count": 1,
"note": "pleroma.site administration notices go here",
"url": "https://pleroma.site/users/administration",
"avatar": "https://pleroma.site/images/avi.png",
"avatar_static": "https://pleroma.site/images/avi.png",
"header": "https://pleroma.site/images/banner.png",
"header_static": "https://pleroma.site/images/banner.png"
})
twitter = lambda: {
"lang": "en",
"utc_offset": 3600,
"statuses_count": 1339,
"follow_request_sent": None,
"friends_count": 81,
"profile_use_background_image": True,
"contributors_enabled": False,
"profile_link_color": "0084B4",
"profile_image_url": "http://pbs.twimg.com/profile_images/3502698593/36a503f65df33aea1a59faea77a57e73_normal.png",
"time_zone": "Paris",
"notifications": None,
"is_translator": False,
"favourites_count": 81,
"profile_background_image_url_https": "https://abs.twimg.com/images/themes/theme1/bg.png",
"profile_background_color": "C0DEED",
"id": 23608307,
"profile_background_image_url": "http://abs.twimg.com/images/themes/theme1/bg.png",
"description": "#Freelance computer programmer from France. In English: #FreeSoftware and #BasicIncome. In French: #LogicielLibre, #RevenuDeBase and #Démocratie/#TirageAuSort.",
"is_translation_enabled": False,
"default_profile": True,
"profile_background_tile": False,
"verified": False,
"screen_name": "Changaco",
"entities": {
"url": {
"urls": [
{
"url": "http://t.co/2VUhacI9SG",
"indices": [
0,
22
],
"expanded_url": "http://changaco.oy.lc/",
"display_url": "changaco.oy.lc"
}
]
},
"description": {
"urls": []
}
},
"url": "http://t.co/2VUhacI9SG",
"profile_image_url_https": "https://pbs.twimg.com/profile_images/3502698593/36a503f65df33aea1a59faea77a57e73_normal.png",
"profile_sidebar_fill_color": "DDEEF6",
"location": "France",
"name": "Changaco",
"geo_enabled": False,
"profile_text_color": "333333",
"followers_count": 94,
"profile_sidebar_border_color": "C0DEED",
"id_str": "23608307",
"default_profile_image": False,
"following": None,
"protected": False,
"created_at": "Tue Mar 10 15:58:07 +0000 2009",
"listed_count": 7
}
facebook = lambda: {
"id": "187701977",
"first_name": "Chad",
"gender": "male",
"last_name": "Whitacre",
"link": "https://www.facebook.com/whit537",
"locale": "en_US",
"name": "<NAME>",
"username": "whit537"
}
google = lambda: {
"resourceName": "people/110791859286178226496",
"etag": "%EgkBAj0DBgo1Ny4aDAECAwQFBgcICQoLDCIMVXJJdG5BdEg2eXc9",
"names": [
{
"metadata": {
"primary": True,
"source": {
"type": "PROFILE",
"id": "110791859286178226496"
}
},
"displayName": "<NAME>",
"familyName": "Panda",
"givenName": "Arthur",
"displayNameLastFirst": "<NAME>"
}
],
"nicknames": [
{
"metadata": {
"primary": True,
"source": {
"type": "PROFILE",
"id": "110791859286178226496"
}
},
"value": "Changaco"
}
],
"photos": [
{
"metadata": {
"primary": True,
"source": {
"type": "PROFILE",
"id": "110791859286178226496"
}
},
"url": "https://lh6.googleusercontent.com/-JMfIhnfsuPw/AAAAAAAAAAI/AAAAAAAAABY/M5ldFOyJAPs/s100/photo.jpg"
}
]
}
twitch = lambda: {
"data": [{
"id": "44322889",
"login": "dallas",
"display_name": "dallas",
"type": "staff",
"broadcaster_type": "",
"description": "Just a gamer playing games and chatting. :)",
"profile_image_url": "https://static-cdn.jtvnw.net/jtv_user_pictures/dallas-profile_image-1a2c906ee2c35f12-300x300.png",
"offline_image_url": "https://static-cdn.jtvnw.net/jtv_user_pictures/dallas-channel_offline_image-1a2c906ee2c35f12-1920x1080.png",
"view_count": 191836881,
"email": "<EMAIL>"
}]
}
youtube = lambda: {
"kind": "youtube#channelListResponse",
"etag": "\"m2yskBQFythfE4irbTIeOgYYfBU/RRgkDTZYdqaPKhXcfRMXr0TeCTQ\"",
"pageInfo": {
"totalResults": 1,
"resultsPerPage": 1
},
"items": [
{
"kind": "youtube#channel",
"etag": "\"m2yskBQFythfE4irbTIeOgYYfBU/XIm1NyrN6U0KU-diy-M_tCBVXD0\"",
"id": "UCSNwnIgctQU9kQluQu7WrPA",
"snippet": {
"title": "Liberapay Official",
"description": "Liberapay is a platform for recurrent donations, run by a nonprofit organization based in France.",
"publishedAt": "2017-02-05T09:09:44.000Z",
"thumbnails": {
"default": {
"url": "https://yt3.ggpht.com/-3Aqgv0E2nQg/AAAAAAAAAAI/AAAAAAAAAAA/fELUZkAUgV0/s88-c-k-no-mo-rj-c0xffffff/photo.jpg"
},
"medium": {
"url": "https://yt3.ggpht.com/-3Aqgv0E2nQg/AAAAAAAAAAI/AAAAAAAAAAA/fELUZkAUgV0/s240-c-k-no-mo-rj-c0xffffff/photo.jpg"
},
"high": {
"url": "https://yt3.ggpht.com/-3Aqgv0E2nQg/AAAAAAAAAAI/AAAAAAAAAAA/fELUZkAUgV0/s240-c-k-no-mo-rj-c0xffffff/photo.jpg"
}
},
"localized": {
"title": "Liberapay Official",
"description": "Liberapay is a platform for recurrent donations, run by a nonprofit organization based in France."
}
}
}
]
}
| 2.171875 | 2 |
tests/test_server.py | ZhukovAlexander/rafter | 9 | 12772503 | import unittest
from unittest import mock
import uuid
import asyncio
from rafter.server import RaftServer
from rafter.models import LogEntry
from rafter.exceptions import NotLeaderException
from .mocks import Log, Storage, Service
class RaftServerTest(unittest.TestCase):
def setUp(self):
self.loop = asyncio.get_event_loop()
self.server = RaftServer(
Service(),
log=Log(),
server_protocol=mock.Mock(),
storage=Storage(),
bootstrap=True
)
self.server.election_timer = mock.Mock()
def test_start_stop(self):
server = RaftServer(
Service(),
log=Log(),
storage=Storage(),
bootstrap=True
)
server.election_timer = mock.Mock()
with mock.patch('rafter.server.random.randint', return_value=100):
server.start()
server.election_timer.start.assert_called_with(1)
def test_initial_heartbeat_calls_add_peer(self):
with mock.patch('rafter.server.asyncio.ensure_future') as ensure_future:
self.server.heartbeat(bootstraps=True)
ensure_future.assert_called_with(self.server.service.add_peer())
def test_heartbeat_should_schedule_ae(self):
with mock.patch('rafter.server.asyncio.ensure_future') as ensure_future:
self.server.send_append_entries = mock.Mock()
self.server.heartbeat(bootstraps=False)
ensure_future.assert_called_with(self.server.send_append_entries())
def test_handle_calls_correct_state_method(self):
self.server.state = mock.Mock()
method = 'test_method'
res = self.server.handle(method)
getattr(self.server.state, method).assert_called_with()
def test_handle_write_raises_error_when_not_leader(self):
with self.assertRaises(NotLeaderException):
self.loop.run_until_complete(self.server.handle_write_command('test', (1, 2), {1: 1}))
def test_handle_read_command(self):
self.server.state.to_leader()
res = self.loop.run_until_complete(self.server.handle_read_command('test', (1, 2), {1: 1}))
self.assertEqual(res, 'result')
def test_handle_read_raises_error_when_not_leader(self):
with self.assertRaises(NotLeaderException):
self.loop.run_until_complete(self.server.handle_read_command('test', (1, 2), {1: 1}))
def test_add_peer(self):
self.server.add_peer({'id': 'peer-2'})
self.assertIn(b'peer-2', self.server.peers)
def test_remove_peer(self):
with self.assertRaises(KeyError):
self.server.remove_peer('notapeer')
self.server.remove_peer(self.server.id)
self.assertNotIn(self.server.id, self.server.peers)
def test_list_peers(self):
self.assertListEqual(self.server.list_peers(), list(self.server.peers)) | 2.421875 | 2 |
pong/paddle.py | Arihant25/beginner-python-projects | 1 | 12772504 | from turtle import Turtle
class Paddle(Turtle):
def __init__(self, coordinates):
super().__init__()
self.penup()
self.shape('square')
self.shapesize(stretch_wid=5, stretch_len=1)
self.goto(coordinates)
self.color('white')
def move_up(self):
self.sety(self.ycor() + 20)
def move_down(self):
self.sety(self.ycor() - 20) | 3.71875 | 4 |
bluegraph/backends/utils.py | BlueBrain/BlueGraph | 25 | 12772505 | <reponame>BlueBrain/BlueGraph
"""A set of factory utils for different processing backends."""
from bluegraph.exceptions import BlueGraphException
from .configs import (ANALYZER_CLS, EMBEDDER_CLS)
def create_analyzer(analyzer_type, backend,
pgframe=None, directed=True,
uri=None, username=None, password=<PASSWORD>,
driver=None, node_label=None, edge_label=None):
"""Create an analyzer interface for a given type and backend."""
if analyzer_type not in ANALYZER_CLS:
raise BlueGraphException(
f"Analyzer type '{analyzer_type}' is not implemented, "
"available analyzers are: " + ", ".join(
[f"'{el}'" for el in ANALYZER_CLS.keys()])
)
if backend not in ANALYZER_CLS[analyzer_type]:
verbose_analyzer_name = analyzer_type.replace("_", " ").capitalize()
raise BlueGraphException(
f"{verbose_analyzer_name} is not enabled or not implemented for "
f"the backend '{backend}', available backends are: " + ", ".join(
[f"'{el}'" for el in ANALYZER_CLS[analyzer_type].keys()])
)
cls = ANALYZER_CLS[analyzer_type][backend]
return (
cls(
pgframe=pgframe, directed=directed,
uri=uri, username=username, password=password,
driver=driver, node_label=node_label, edge_label=edge_label)
if backend == "neo4j"
else cls(pgframe=pgframe, directed=directed)
)
def create_node_embedder(backend, model_name,
directed=True, include_type=False,
feature_props=None, feature_vector_prop=None,
edge_weight=None, **model_params):
"""Create a node embedding interface for a given backend."""
if backend not in EMBEDDER_CLS:
raise BlueGraphException(
f"Node embedder corresponding to the backend '{backend}' "
"is not enabled or not implemented, available backends are: " +
", ".join([f"'{el}'" for el in EMBEDDER_CLS.keys()]))
return EMBEDDER_CLS[backend](
model_name=model_name, directed=directed, include_type=include_type,
feature_props=feature_props, feature_vector_prop=feature_vector_prop,
edge_weight=edge_weight, **model_params)
| 2.21875 | 2 |
project_euler/solutions/problem_96.py | cryvate/project-euler | 0 | 12772506 | from collections import Counter
from time import time
from typing import List, Optional, Tuple
from ..framework.load_file import load_file
from ..library.base import list_to_number
Sudoku = List[List[int]]
spec = '{:6.6f}'
class SetNoZero(set):
def add(self, x) -> None:
if x != 0:
super().add(x)
def __sub__(self, other) -> None:
return SetNoZero(super().__sub__(other))
digits = SetNoZero(range(1, 10))
def sstr(input: Sudoku) -> str:
output = ''
for i in range(9):
for j in range(9):
output += str(input[i][j])
if j % 3 == 2:
output += ' '
output += '\n'
if i % 3 == 2:
output += '\n'
return output.strip('\n')
def least(input: Sudoku) -> Optional[Tuple[int, int]]:
for i in range(9):
for j in range(9):
if input[i][j] == 0:
return i, j
return None
def constraint_solve_sudoku(input: Sudoku) -> Tuple[bool, Optional[Sudoku]]:
# these are kept up to date as parts filled in
lines = []
columns = []
boxes = []
for i in range(9):
line = SetNoZero()
column = SetNoZero()
for j in range(9):
line.add(input[i][j])
column.add(input[j][i])
lines.append(digits - line)
columns.append(digits - column)
for i in range(3):
boxes.append([])
for j in range(3):
box = SetNoZero()
for k in range(3):
for l in range(3):
box.add(input[i * 3 + k][j * 3 + l])
boxes[i].append(digits - box)
flag = True
while least(input):
if not flag:
i, j = least(input)
# allowed guaranteed to exist, because the flag starts as true.
for value in allowed[i][j]: # noqa: F821
new_input = [[input[i][j] for j in range(9)] for i in range(9)]
new_input[i][j] = value
done, solution = constraint_solve_sudoku(new_input)
if done:
return True, solution
return False, None
flag = False
allowed = []
for i in range(9):
line = lines[i]
allowed.append([])
for j in range(9):
if input[i][j] != 0:
allowed[i].append(None)
else:
column = columns[j]
box = boxes[i // 3][j // 3]
allowed[i].append({d for d in digits if
d in line and d in column and d in box})
updates = []
for i in range(9):
for j in range(9):
if allowed[i][j] and len(allowed[i][j]) == 1:
updates.append((i, j, list(allowed[i][j])[0]))
for i in range(9):
counter = Counter()
for j in range(9):
if allowed[i][j]:
counter.update(allowed[i][j])
for value, count in counter.items():
if count == 1:
for j in range(9):
if allowed[i][j] and value in allowed[i][j]:
updates.append((i, j, value))
for i in range(9):
counter = Counter()
for j in range(9):
if allowed[j][i]:
counter.update(allowed[j][i])
for value, count in counter.items():
if count == 1:
for j in range(9):
if allowed[j][i] and value in allowed[j][i]:
updates.append((j, i, value))
for i in range(3):
for j in range(3):
counter = Counter()
for k in range(3):
for l in range(3):
allowedness = allowed[i * 3 + k][j * 3 + l]
if allowedness:
counter.update(allowed[i * 3 + k][j * 3 + l])
for value, count in counter.items():
if count == 1:
for k in range(3):
for l in range(3):
allowedness = allowed[i * 3 + k][j * 3 + l]
if allowedness and value in allowedness:
updates.append((i * 3 + k, j * 3 + l,
value))
if updates:
flag = True
updates = set(updates)
for i, j, value in updates:
input[i][j] = value
allowed[i][j] = None
try:
lines[i].remove(value)
columns[j].remove(value)
boxes[i // 3][j // 3].remove(value)
except KeyError: # means that this sudoku is inconsistent
return False, None
return True, input
def solve(name: str='sudoku.txt', relative: bool=True) -> int:
raw = load_file(96, name, relative)
grids_str = [[line for line in grid.split('\n')[1:] if line]
for grid in raw.split('Grid') if grid]
grids = [[[int(d) for d in line] for line in grid] for grid in grids_str]
accumulate = 0
for i, grid in enumerate(grids):
print('={:>2}th grid='.format(i))
start = time()
_, solution = constraint_solve_sudoku(grid)
spent = time() - start
print(sstr(solution))
accumulate += list_to_number(solution[0][0:3])
print(f'({spec.format(spent)}s)')
return accumulate
| 3.359375 | 3 |
articles/migrations/0002_auto_20201224_1436.py | dmahon10/django-tiered-membership-web-app | 0 | 12772507 | <filename>articles/migrations/0002_auto_20201224_1436.py
# Generated by Django 3.1.4 on 2020-12-24 14:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='freearticle',
name='tier',
field=models.CharField(choices=[('free', 'FREE'), ('premium', 'PREMIUM')], default='free', editable=False, max_length=7),
),
migrations.AddField(
model_name='premiumarticle',
name='tier',
field=models.CharField(choices=[('free', 'FREE'), ('premium', 'PREMIUM')], default='premium', editable=False, max_length=7),
),
]
| 1.484375 | 1 |
karp5/tests/unit_tests/server/test_searching.py | spraakbanken/karp-backend-v5 | 4 | 12772508 | from unittest import mock
import pytest
from karp5.server import searching
def test_autocompletequery(app):
mode = "foo"
q = "any"
boost = {"term": {"field": {"boost": "500", "value": q}}}
result = searching.autocompletequery(mode, boost, q)
expected = {"bool": {"should": [boost, {"match_phrase": {"foo": q}}]}}
assert result == expected
@pytest.mark.parametrize("user_is_authorized", [False, True])
def test_autocomplete_foo(app, user_is_authorized):
q = "any"
mode = "foo"
path = f"/autocomplete?q={q}&mode={mode}"
with app.test_request_context(path):
with mock.patch("karp5.server.searching.jsonify", return_value=None), mock.patch(
"karp5.server.translator.parser.adapt_query", return_value=None
) as adapt_query_mock, mock.patch(
"karp5.config.conf_mgr.elastic", return_value="ES"
), mock.patch(
"karp5.context.auth.validate_user", return_value=(user_is_authorized, ["foo"])
):
searching.autocomplete()
expected_must = [
{
"bool": {
"should": [
{"term": {"foo": {"boost": "500", "value": "any"}}},
{"match_phrase": {"foo": "any"}},
]
}
},
{"exists": {"field": "foo"}},
{"term": {"lexiconName": "foo"}},
]
if not user_is_authorized:
expected_must.append({"term": {"status": "ok"}})
expected_elasticq = {
"query": {
"constant_score": {
"filter": {
"bool": {
"must": expected_must
}
}
}
}
}
adapt_query_mock.assert_called_with(
1000, 0, "ES", expected_elasticq, {"size": 1000, "index": mode, "_source": [mode]}
)
@pytest.mark.parametrize("lexicon", ["foo"])
@pytest.mark.parametrize("user_is_authorized", [False, True])
@pytest.mark.parametrize("with_center", [False, True])
def test_get_context(app, lexicon, user_is_authorized, with_center):
center_id = "ID_TEST"
if with_center:
path = f"/getcontext/{lexicon}?center={center_id}"
else:
path = f"/getcontext/{lexicon}"
sortvalue = "KEY_TEST"
center_q_hits = {"hits": {"hits": [{"sort": [sortvalue], "_id": center_id}]}}
with app.test_request_context(path):
with mock.patch("karp5.server.searching.jsonify", return_value=None), mock.patch(
"karp5.config.conf_mgr.elastic"
) as conf_mgr_elastic_mock, mock.patch(
"karp5.context.auth.validate_user", return_value=(user_is_authorized, [lexicon]),
), mock.patch(
"karp5.server.searching.get_pre_post", return_value=[None]
) as get_pre_post_mock:
attrs = {"search.return_value": center_q_hits}
es_search_mock = mock.Mock()
es_search_mock.configure_mock(**attrs)
conf_mgr_elastic_mock.return_value = es_search_mock
searching.get_context(lexicon)
if with_center:
expected_q = {"term": {"_id": center_id}}
else:
expected_q = {"match_phrase": {"lexiconName": lexicon}}
if user_is_authorized:
expected_filters = []
else:
expected_filters = [{"term": {"status": "ok"}}]
if with_center:
if user_is_authorized:
expected_center_q = {"query": expected_q}
else:
expected_center_q = {
"query": {"bool": {"must": expected_q, "filter": expected_filters}}
}
else:
if user_is_authorized:
expected_center_q = {"query": {"bool": {"must": [expected_q],}}}
else:
expected_center_q = {
"query": {"bool": {"must": [expected_q, expected_filters[0],],}}
}
es_search_mock.search.assert_called_with(
index=lexicon,
doc_type="lexicalentry",
size=1,
body=expected_center_q,
sort=["foo.raw:asc"],
)
assert get_pre_post_mock.call_count == 2
for call_args in get_pre_post_mock.call_args_list:
print(f"call_args = {call_args}")
args, kwargs = call_args
assert "place" in kwargs
assert "filters" in kwargs
assert kwargs["filters"] == expected_filters
assert isinstance(args[6], int)
@pytest.mark.parametrize("place", ["post", "pre"])
@pytest.mark.parametrize("user_is_authorized", [False, True])
def test_get_pre_post_foo(app, place, user_is_authorized):
mode = "foo"
exps = []
center_id = None
sortfield = ["SORTFIELD_TEST"]
sortfieldname = "foo" # must exist in config
sortvalue = "SORTVALUE_TEST"
size = 10
es = "ES"
if user_is_authorized:
filters = []
else:
filters = [{"term": {"status": "ok"}}]
with mock.patch(
"karp5.server.translator.parser.adapt_query", return_value={}
) as adapt_query_mock:
searching.get_pre_post(
exps,
center_id,
sortfield,
sortfieldname,
sortvalue,
mode,
size,
es,
mode,
place=place,
filters=filters,
)
expected_q = {"range": {sortfieldname: {"gte" if place == "post" else "lte": sortvalue}}}
if user_is_authorized:
expected_elasticq = {"query": {"bool": {"must": [expected_q]}}}
else:
expected_elasticq = {
"bool": {"must": [expected_q], "filter": {"bool": {"must": filters}},}
}
expected_size = 3 * (size + 1)
expected_sort = ["{}:{}".format(sortfield[0], "asc" if place == "post" else "desc")]
adapt_query_mock.assert_called_once()
args, _ = adapt_query_mock.call_args
assert args[0] == expected_size
assert args[3] == expected_elasticq
assert args[4]["size"] == expected_size
assert args[4]["sort"] == expected_sort
def test_export_foo_unauth_user(app):
lexicon = "foo"
path = f"/export/{lexicon}"
with app.test_request_context(path):
with mock.patch("karp5.context.auth.validate_user", return_value=(False, ["foo"])):
with pytest.raises(searching.errors.KarpAuthenticationError):
searching.export(lexicon)
def test_export_foo_lexicon_not_permitted(app):
lexicon = "restricted"
path = f"/export/{lexicon}"
with app.test_request_context(path):
with mock.patch("karp5.context.auth.validate_user", return_value=(False, ["permitted"])):
with pytest.raises(searching.errors.KarpAuthenticationError):
searching.export(lexicon)
| 2.296875 | 2 |
misc/py/create_time_vs_error_plot_data_3d.py | sampotter/eikonal | 1 | 12772509 | #!/usr/bin/env python3
import argparse
def parse_args():
p = argparse.ArgumentParser()
p.add_argument('path', type=str)
p.add_argument('-m', '--minpow', type=int, default=3)
p.add_argument('-M', '--maxpow', type=int, default=7)
p.add_argument('-s', '--step', type=int, default=2)
p.add_argument('-t', '--trials', type=int, default=10)
p.add_argument('--speed_funcs', type=str)
return p.parse_args()
# We do this ahead of time so that if we end up only printing the
# usage message we don't bother with the other (e.g. MPI-related)
# setup below here
if __name__ == '__main__':
args = parse_args()
import sys
if '../../build/Release' not in sys.path:
sys.path.insert(0, '../../build/Release')
import pyolim as olim
import h5py
import mpi4py.MPI
import numpy as np
import os.path
from common3d import compute_soln, get_exact_soln, get_marcher_name, marchers, \
time_marcher
from itertools import product
from speedfuncs3d import get_speed_func_name, get_speed_func_by_name, \
get_soln_func, speed_funcs
comm = mpi4py.MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
def rms(x):
y = x.flatten()
n = y.size
assert(n > 0)
return np.sqrt(y.dot(y)/n)
def linf_error(x):
return np.linalg.norm(x.flatten(), np.inf)
def get_ns(args):
minpow = args.minpow
maxpow = args.maxpow
steps = args.step
ns = np.logspace(minpow, maxpow, steps*(maxpow - minpow) + 1, base=2)
return (2*np.round(ns/2)).astype(int) + 1
def get_dataset_name(Marcher, s):
mname = get_marcher_name(Marcher)
sname = get_speed_func_name(s)
return '%s/%s' % (mname.replace(' ', '_'), sname)
def create_datasets(f, M_by_s, ns):
for Marcher, s in M_by_s:
name = get_dataset_name(Marcher, s)
f.create_dataset(name + '/n', (len(ns),), dtype=np.int)
for n in ns:
shape = (n, n, n)
f.create_dataset(name + '/u' + str(n), shape, dtype=np.float)
f.create_dataset(name + '/U' + str(n), shape, dtype=np.float)
f.create_dataset(name + '/rms', (len(ns),), dtype=np.float)
f.create_dataset(name + '/max', (len(ns),), dtype=np.float)
f.create_dataset(name + '/t', (len(ns),), dtype=np.float)
def populate_datasets(Marcher, s, ns, t):
name = get_dataset_name(Marcher, s)
print(name)
f[name + '/n'][:] = ns
print('- computing exact solutions')
us = [get_exact_soln(get_soln_func(s), n) for n in ns]
for n, u in zip(ns, us):
f[name + '/u' + str(n)][:, :, :] = u
print('- computing numerical solutions')
Us = [compute_soln(Marcher, s, n) for n in ns]
for n, U in zip(ns, Us):
f[name + '/U' + str(n)][:, :, :] = U
print('- evaluating errors')
f[name + '/rms'][:] = [rms(u - U) for u, U in zip(us, Us)]
f[name + '/max'][:] = [linf_error(u - U) for u, U in zip(us, Us)]
print('- collecting CPU times')
f[name + '/t'][:] = [time_marcher(Marcher, s, n, ntrials=t) for n in ns]
if __name__ == '__main__':
with h5py.File(args.path, 'w', driver='mpio', comm=comm) as f:
if args.speed_funcs is not None:
speed_funcs_ = [
get_speed_func_by_name(name) for name in
args.speed_funcs.split(',')]
else:
speed_funcs_ = speed_funcs()
ns = get_ns(args)
if rank == 0:
print('Test problem sizes: ' + ', '.join(map(str, ns)))
if rank == 0:
print('Creating datasets')
create_datasets(f, product(marchers, speed_funcs_), ns)
for i, (Marcher, s) in enumerate(product(marchers, speed_funcs_)):
if i % size != rank:
continue
populate_datasets(Marcher, s, ns, args.trials)
| 2.15625 | 2 |
imcsdk/mometa/bios/BiosProfileToken.py | ragupta-git/ImcSdk | 0 | 12772510 | <reponame>ragupta-git/ImcSdk<filename>imcsdk/mometa/bios/BiosProfileToken.py
"""This module contains the general information for BiosProfileToken ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class BiosProfileTokenConsts:
pass
class BiosProfileToken(ManagedObject):
"""This is BiosProfileToken class."""
consts = BiosProfileTokenConsts()
naming_props = set([u'name'])
mo_meta = {
"classic": MoMeta("BiosProfileToken", "biosProfileToken", "token-[name]", VersionMeta.Version301c, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], [u'biosProfile'], [], ["Get"]),
"modular": MoMeta("BiosProfileToken", "biosProfileToken", "token-[name]", VersionMeta.Version301c, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], [u'biosProfile'], [], ["Get"])
}
prop_meta = {
"classic": {
"actual_value": MoPropertyMeta("actual_value", "actualValue", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version301c, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"configured_value": MoPropertyMeta("configured_value", "configuredValue", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version301c, MoPropertyMeta.NAMING, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
},
"modular": {
"actual_value": MoPropertyMeta("actual_value", "actualValue", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version301c, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"configured_value": MoPropertyMeta("configured_value", "configuredValue", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version301c, MoPropertyMeta.NAMING, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version301c, MoPropertyMeta.READ_ONLY, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
},
}
prop_map = {
"classic": {
"actualValue": "actual_value",
"childAction": "child_action",
"configuredValue": "configured_value",
"dn": "dn",
"name": "name",
"rn": "rn",
"status": "status",
},
"modular": {
"actualValue": "actual_value",
"childAction": "child_action",
"configuredValue": "configured_value",
"dn": "dn",
"name": "name",
"rn": "rn",
"status": "status",
},
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.actual_value = None
self.child_action = None
self.configured_value = None
self.status = None
ManagedObject.__init__(self, "BiosProfileToken", parent_mo_or_dn, **kwargs)
| 1.796875 | 2 |
data_processing.py | yuddim/DefectSoft | 2 | 12772511 | <reponame>yuddim/DefectSoft
from __future__ import print_function
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
import os
import glob
import skimage.io as io
import cv2
import time
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from shutil import copyfile
from config import *
mask_dict = MASK_DICT
def convert_num_mask_to_color(dir_path_source, dir_path_target, mask_dict, is_gray = True):
os.makedirs(dir_path_target, exist_ok = True)
for count, filename in enumerate(sorted(os.listdir(dir_path_source))):
img = io.imread(os.path.join(dir_path_source,filename),as_gray = is_gray)
class_items = mask_dict.items()
new_mask = np.zeros((img.shape[0], img.shape[1], 3))
for key, value in class_items:
class_name = key
class_index = value[0]
class_color = value[1]
new_mask[img == class_index] = class_color
cv2.imwrite(dir_path_target + '/' + filename, new_mask, [cv2.IMWRITE_PNG_COMPRESSION, 0])
print(str(count) + ':' + os.path.join(dir_path_source,filename))
def dataset_preparation(dir_path_source_masks, dir_path_source_images, dir_path_target, mask_dict,
size_threshold = 20, ratio = 10, is_gray = True):
os.makedirs(dir_path_target, exist_ok = True)
os.makedirs(dir_path_target+'/train', exist_ok=True)
os.makedirs(dir_path_target+'/test', exist_ok=True)
os.makedirs(dir_path_target + '/too_small', exist_ok=True)
os.makedirs(dir_path_target + '/train'+'/images', exist_ok=True)
os.makedirs(dir_path_target + '/train' + '/masks', exist_ok=True)
os.makedirs(dir_path_target + '/test'+'/images', exist_ok=True)
os.makedirs(dir_path_target + '/test' + '/masks', exist_ok=True)
os.makedirs(dir_path_target + '/too_small'+'/images', exist_ok=True)
os.makedirs(dir_path_target + '/too_small'+ '/masks', exist_ok=True)
full_sorted_list = sorted(os.listdir(dir_path_source_masks))
full_count = len(full_sorted_list)
no_file_count = 0
bad_crop_count = 0
for count, filename in enumerate(full_sorted_list):
img = io.imread(os.path.join(dir_path_source_masks,filename),as_gray = is_gray)
class_items = mask_dict.items()
height = img.shape[0]
width = img.shape[1]
new_mask = np.zeros((height, width, 3))
for key, value in class_items:
class_name = key
class_index = value[0]
class_color = value[1]
new_mask[img == class_index] = class_color
target_path = dir_path_target
if(width < size_threshold or height < size_threshold):
target_path = target_path + '/too_small'
else:
#if(count < train_part*full_count):
if (count % ratio != 0):
target_path = target_path + '/train'
else:
target_path = target_path + '/test'
image_file_name = filename.split('.')[0] + '.jpg'
source_image_file_name = dir_path_source_images + '/' + image_file_name
error_message = ''
if (os.path.isfile(source_image_file_name)):
(width_image, height_image) = Image.open(source_image_file_name).size
if(width == width_image and height==height_image):
cv2.imwrite(target_path + '/masks/' + filename, new_mask, [cv2.IMWRITE_PNG_COMPRESSION, 0])
copyfile(source_image_file_name, target_path + '/images/' + image_file_name)
else:
bad_crop_count += 1
error_message = 'bad_crop'
else:
no_file_count += 1
error_message = 'no_file'
print(error_message + str(count) + ':' + os.path.join(dir_path_source_masks,filename))
print('no_file_count: ', no_file_count)
print('bad_crop_count: ', bad_crop_count)
def integral_histogram(histogram):
new_histogram = np.zeros((len(histogram), 1))
for value in range(len(histogram)):
if value == 0:
continue
new_histogram[value] = new_histogram[value-1]+histogram[value]
return new_histogram
def get_mask_statistics(dir_path_source):
width_histogram = np.zeros((4000, 1))
height_histogram = np.zeros((4000, 1))
max_size_histogram = np.zeros((4000, 1))
x = np.arange(4000)
for count, filename in enumerate(sorted(os.listdir(dir_path_source))):
mask_size = Image.open(os.path.join(dir_path_source,filename)).size
(width, height) = mask_size
width_histogram[width] += 1
height_histogram[height] += 1
max_size_histogram[max(width,height)] += 1
max_size = 500
fig = plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')
ax_1 = fig.add_subplot(211)
ax_1.plot(x[:max_size], integral_histogram(width_histogram[:max_size]), color='red', linewidth=2)
ax_1.xaxis.set_major_locator(ticker.MultipleLocator(20))
ax_1.set(title='width_histogram')
ax_1.grid(which='major',
color='k')
ax_2 = fig.add_subplot(212)
ax_2.plot(x[:max_size], (max_size_histogram[:max_size]), color='red', linewidth=2)
ax_2.xaxis.set_major_locator(ticker.MultipleLocator(20))
ax_2.set(title='max_size_histogram')
ax_2.grid(which='major',
color='k')
plt.show()
# get_mask_statistics(dir_path_source = '/media/cds-pc3/Data/pku_data_segmentation/pku_data_segmentation/car_segm_dataset/train/images')
# convert_num_mask_to_color(
# dir_path_source = '/media/cds-pc3/Data/pku_data_segmentation/pku_data_segmentation/masks',
# dir_path_target = '/media/cds-pc3/Data/pku_data_segmentation/pku_data_segmentation/color_masks',
# mask_dict = mask_dict,
# is_gray = True)
# dataset_preparation(
# dir_path_source_masks = '/media/cds-pc3/Data/pku_data_segmentation/pku_data_segmentation/masks',
# dir_path_source_images = '/media/cds-pc3/Data/pku_data_segmentation/pku_data_segmentation/images',
# dir_path_target = '/media/cds-pc3/Data/pku_data_segmentation/pku_data_segmentation/car_segm_dataset',
# mask_dict = mask_dict,
# size_threshold = 20,
# ratio = 10,
# is_gray = True)
# dataset_preparation(
# dir_path_source_masks = '/media/cds-pc3/Data/pku_data_segmentation/pku_data_segmentation/crop_train_masks',
# dir_path_source_images = '/media/cds-pc3/Data/pku_data_segmentation/pku_data_segmentation/crop_train_images',
# dir_path_target = '/media/cds-pc3/Data/pku_data_segmentation/pku_data_segmentation/car_segm_coco_dataset',
# mask_dict = mask_dict,
# size_threshold = 20,
# ratio = 10,
# is_gray = True)
def train_generator(batch_size,train_path,image_folder,mask_folder,aug_dict,image_color_mode = "color",
mask_color_mode = "color",image_save_prefix = "images",mask_save_prefix = "masks",
flag_multi_class = False, num_class = 2,save_to_dir = None,target_size = (256,256),seed = 1,
mask_dict = None):
'''
can generate image and mask at the same time
use the same seed for image_datagen and mask_datagen to ensure the transformation for image and mask is the same
if you want to visualize the results of generator, set save_to_dir = "your path"
'''
image_datagen = ImageDataGenerator(**aug_dict)
mask_datagen = ImageDataGenerator(**aug_dict)
image_generator = image_datagen.flow_from_directory(
train_path,
classes = [image_folder],
class_mode = None,
color_mode = image_color_mode,
target_size = target_size,
batch_size = batch_size,
save_to_dir = save_to_dir,
save_prefix = image_save_prefix,
seed = seed)
mask_generator = mask_datagen.flow_from_directory(
train_path,
classes = [mask_folder],
class_mode = None,
color_mode = mask_color_mode,
target_size = target_size,
batch_size = batch_size,
save_to_dir = save_to_dir,
save_prefix = mask_save_prefix,
seed = seed)
train_generator = zip(image_generator, mask_generator)
for (img,mask) in train_generator:
img,mask = adjustData(img, mask, mask_dict)
yield (img,mask)
def adjustData(img, mask, mask_dict=None):
img = img / 255
class_items = mask_dict.items()
num_class = len(class_items)
batch_size = mask.shape[0]
new_mask = np.zeros((batch_size, mask.shape[1], mask.shape[2], num_class))
#в эту функцию приходит не одна маска, а батч масок
for key, value in class_items:
class_name = key
class_index = value[0]
label_color = value[1]
min_color = (label_color[0] - 5, label_color[1] - 5, label_color[2] - 5)
max_color = (label_color[0] + 5, label_color[1] + 5, label_color[2] + 5)
for batch_id in range(batch_size):
new_mask[batch_id, :, :, class_index] = cv2.inRange(mask[batch_id], min_color, max_color)
mask = new_mask
mask = mask / 255
# elif(np.max(img) > 1):
# img = img / 255
# mask = mask /255
# mask[mask > 0.5] = 1
# mask[mask <= 0.5] = 0
return (img,mask)
def test_generator(test_path, target_size = (256,256), as_gray = False):
for filename in sorted(os.listdir(test_path)):
start_time = time.time()
img = io.imread(os.path.join(test_path,filename),as_gray = as_gray)
img = img / 255
img = cv2.resize(img,(target_size[1],target_size[0]))
img = np.reshape(img,(1,)+img.shape)
end_time = time.time()
duration = end_time - start_time
with open('logs/timeread.txt', "a") as log_file:
log_file.write("Imread, s: " + str(duration) + "\n")
yield img
def read_and_preprocess_image(test_path,filename,target_size = (256,256)):
if test_path is None:
raw = io.imread(filename)
else:
raw = io.imread(os.path.join(test_path, filename))
#raw_ = cv2.imread(filename)
#raw = cv2.cvtColor(raw, cv2.COLOR_BGR2RGB)
img = raw / 255
img = cv2.resize(img, (target_size[1], target_size[0]))
img = np.reshape(img, (1,) + img.shape)
scale_coef = (raw.shape[0]/target_size[0], raw.shape[1]/target_size[1])
return img, raw, scale_coef
def label_visualize_smoothed(mask_dict, img, scale_coef = (1,1)):
class_items = mask_dict.items()
num_class = len(class_items)
(out_h, out_w) = (int(img.shape[0] * scale_coef[0]), int(img.shape[1] * scale_coef[1]))
img_out = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
for key, value in class_items:
class_name = key
class_index = value[0]
label_color = value[1]
threshold = value[2]
mask = img[:,:,class_index]
img_out[mask > threshold] = np.array(label_color)
img_out_resized = cv2.resize(img_out, (out_w, out_h))
return img_out_resized
def label_visualize_precise(mask_dict, img, scale_coef = (1,1)):
class_items = mask_dict.items()
num_class = len(class_items)
(out_h, out_w) = (int(img.shape[0] * scale_coef[0]), int(img.shape[1] * scale_coef[1]))
img_out = np.zeros((out_h, out_w, 3), dtype=np.uint8)
mask = np.argmax(img, axis=-1)
for key, value in class_items:
class_name = key
class_index = value[0]
label_color = value[1]
#threshold = value[2]
#mask = img[:,:,class_index]
#mask[mask > threshold] = 255
#mask[mask <= threshold] = 0
channel_mask = np.zeros((img.shape[0], img.shape[1]), dtype=np.uint8)
channel_mask[mask == class_index] = 255
mask_resized = cv2.resize(channel_mask, (out_w, out_h))
img_out[mask_resized > 128] = np.array(label_color)
return img_out
| 2.453125 | 2 |
pyclassyfire/dev_multipage.py | oolonek/pyclassyfire | 0 | 12772512 | <gh_stars>0
from pyclassyfire import client
url = "http://classyfire.wishartlab.com"
chunk_size = 50
sleep_interval = 100
tabular_query_multipage('./test_data/sample_table.tsv', 'sanitized_smiles', 'excel-tab')
| 1.695313 | 2 |
pgcontents/checkpoints.py | freedom079215/pgcontents | 138 | 12772513 | <reponame>freedom079215/pgcontents
"""
An IPython FileContentsManager that uses Postgres for checkpoints.
"""
from __future__ import unicode_literals
from .api_utils import (
_decode_unknown_from_base64,
outside_root_to_404,
reads_base64,
to_b64,
writes_base64,
)
from .managerbase import PostgresManagerMixin
from .query import (
delete_remote_checkpoints,
delete_single_remote_checkpoint,
get_remote_checkpoint,
list_remote_checkpoints,
move_remote_checkpoints,
purge_remote_checkpoints,
save_remote_checkpoint,
)
from .utils.ipycompat import Checkpoints, GenericCheckpointsMixin
class PostgresCheckpoints(PostgresManagerMixin,
GenericCheckpointsMixin,
Checkpoints):
"""
A Checkpoints implementation that saves checkpoints to a remote database.
"""
@outside_root_to_404
def create_notebook_checkpoint(self, nb, path):
"""Create a checkpoint of the current state of a notebook
Returns a checkpoint_id for the new checkpoint.
"""
b64_content = writes_base64(nb)
with self.engine.begin() as db:
return save_remote_checkpoint(
db,
self.user_id,
path,
b64_content,
self.crypto.encrypt,
self.max_file_size_bytes,
)
@outside_root_to_404
def create_file_checkpoint(self, content, format, path):
"""Create a checkpoint of the current state of a file
Returns a checkpoint_id for the new checkpoint.
"""
try:
b64_content = to_b64(content, format)
except ValueError as e:
self.do_400(str(e))
with self.engine.begin() as db:
return save_remote_checkpoint(
db,
self.user_id,
path,
b64_content,
self.crypto.encrypt,
self.max_file_size_bytes,
)
@outside_root_to_404
def delete_checkpoint(self, checkpoint_id, path):
"""delete a checkpoint for a file"""
with self.engine.begin() as db:
return delete_single_remote_checkpoint(
db, self.user_id, path, checkpoint_id,
)
def get_checkpoint_content(self, checkpoint_id, path):
"""Get the content of a checkpoint."""
with self.engine.begin() as db:
return get_remote_checkpoint(
db,
self.user_id,
path,
checkpoint_id,
self.crypto.decrypt,
)['content']
@outside_root_to_404
def get_notebook_checkpoint(self, checkpoint_id, path):
b64_content = self.get_checkpoint_content(checkpoint_id, path)
return {
'type': 'notebook',
'content': reads_base64(b64_content),
}
@outside_root_to_404
def get_file_checkpoint(self, checkpoint_id, path):
b64_content = self.get_checkpoint_content(checkpoint_id, path)
content, format = _decode_unknown_from_base64(path, b64_content)
return {
'type': 'file',
'content': content,
'format': format,
}
@outside_root_to_404
def list_checkpoints(self, path):
"""Return a list of checkpoints for a given file"""
with self.engine.begin() as db:
return list_remote_checkpoints(db, self.user_id, path)
@outside_root_to_404
def rename_all_checkpoints(self, old_path, new_path):
"""Rename all checkpoints for old_path to new_path."""
with self.engine.begin() as db:
return move_remote_checkpoints(
db,
self.user_id,
old_path,
new_path,
)
@outside_root_to_404
def delete_all_checkpoints(self, path):
"""Delete all checkpoints for the given path."""
with self.engine.begin() as db:
delete_remote_checkpoints(db, self.user_id, path)
def purge_db(self):
"""
Purge all database records for the current user.
"""
with self.engine.begin() as db:
purge_remote_checkpoints(db, self.user_id)
| 2.046875 | 2 |
utils/stats_file_handeling.py | HereIsANiceNickname/CAS19 | 0 | 12772514 | import threading as th
import queue as q
import yaml
STOP_STAT = "STOP"
class StatsFileHandler:
class StatsWriter(th.Thread):
def __init__(self, queue, file_path):
self.running = True
self.queue = queue
self.file_path = file_path
self.f_pointer_map = {}
def _handle(self, stat):
if self._is_time_to_stop(stat):
self.running = False
return
name = stat["name"]
value = stat["stat"]
file_name = self.f_pointer_map.setdefault(name, "".join([self.file_path, name, ".yml"]))
with open(file_name, "a+") as f:
yaml.dump(data=value, stream=f)
def run(self):
while self.running:
self._handle(self.queue.pop())
def _is_time_to_stop(self, task):
return task is STOP_STAT
def __init__(self, path):
self.queue = q.Queue()
self.worker = self.StatsWriter(self.queue, path)
def add_stats(self, name, value):
self.queue.put({"name": name,
"stat": value})
def start(self):
self.worker.start()
def stop(self):
pass
| 2.859375 | 3 |
keyoscacquire/scripts/example.py | asvela/keyoscacquire | 10 | 12772515 | <reponame>asvela/keyoscacquire
import keyoscacquire as koa
import matplotlib.pyplot as plt
def averaged_trace(scope, measurement_number, averages=8):
# Set the number of averages and get a trace
time, voltages, _ = scope.set_options_get_trace(acq_type=f"AVER{averages}")
# Save the trace data as a csv and a png plot, without showing the plot
# (the averaging mode and the number of averages is also automatically
# saved inside the file, together with a timestamp and more)
scope.save_trace(fname=f"measurement{measurement_number}_AVER{averages}",
showplot=False)
return time, voltages
def different_averaging(visa_address, measurement_number):
# Connect to the scope
with koa.Oscilloscope(address=visa_address) as scope:
# Set the channels to view on the scope
scope.active_channels = [1, 3]
# Prepare a two panel plot
fig, ax = plt.subplots(nrows=2, sharex=True)
# Obtain traces for different numbers of averages
for averages in [2, 4, 8, 16, 32]:
time, voltages = averaged_trace(scope, measurement_number, averages=averages)
# Plot channel 1 to ax[0] and ch 3 to ax[1]
for a, ch in zip(ax, voltages.T):
a.plot(time, ch, label=f"{averages}", alpha=0.5)
# Add legends to and labels to both plots
for a, ch_num in zip(ax, scope.active_channels):
a.set_xlabel("Time [s]")
a.set_ylabel(f"Channel {ch_num} [V]")
a.legend()
plt.show()
different_averaging(visa_address="USB0::1234::1234::MY1234567::INSTR",
measurement_number=1)
| 2.765625 | 3 |
riptable/rt_meta.py | 972d5defe3218bd62b741e6a2f11f5b3/riptable | 307 | 12772516 | __all__ = ['Item', 'Info', 'Doc', 'apply_schema', 'info', 'doc']
from typing import Optional, List
from .rt_struct import Struct
from .rt_fastarray import FastArray
from .rt_display import DisplayText
META_DICT = '_meta'
DOC_KEY = 'Doc'
DESCRIPTION_KEY = 'Description'
STEWARD_KEY = 'Steward'
TYPE_KEY = 'Type'
DETAIL_KEY = 'Detail'
CONTENTS_KEY = 'Contents'
NO_DESCRIPTION = '<no description>'
NO_STEWARD = '<no steward>'
NO_TYPE = '<none>'
NAME_DEFAULT_WIDTH = 4
DESCRIPTION_DEFAULT_WIDTH = 50
STEWARD_DEFAULT_WIDTH = 12
TYPE_STR_DEFAULT_WIDTH = 4
# ERROR KEYS
TYPE_MISMATCH = 'Type Mismatch'
EXTRA_COLUMN = 'Extra Column'
MISSING_COLUMN = 'Missing Column'
class Item:
"""Descriptive information for a data object.
Parameters
----------
name : str
The name of the data object.
type : str
The type of the data object.
description : str
A description of the data object.
steward : str
The steward of the data object.
"""
name : str
"""str: The name of the data object."""
type : str
"""str: The type of the data object."""
description : str
"""str: A description of the data object."""
steward : str
"""steward: The steward of the data object."""
def __init__(self, name: str, type: str, description: str, steward: str):
self.name = name
self.type = type
self.description = description
self.steward = steward
class Info:
"""A hierarchically structured container of descriptive information
for a data object.
"""
title = []
"""list: The title of the data object"""
description : Optional[str] = None
"""str: The description of the data object."""
steward : Optional[str] = None
"""str: The steward of the data object."""
type : Optional[str] = None
"""str: The type of the data object."""
detail = None
"""str: Detail about the data object."""
items : Optional[List[Item]] = None
"""list of `Item`: For a :class:`~.rt_struct.Struct` or :class:`~.rt_dataset.Dataset`, the items contained within it."""
def __init__(self):
pass
def _make_text(self):
title_format = DisplayText.title_format
header_format = DisplayText.header_format
rows = []
if self.title:
rows += [title_format('{}'.format(self.title))]
rows += [title_format('='*len(self.title))]
if self.description:
rows += [header_format('Description: ') + self.description]
if self.steward:
rows += [header_format('Steward: ') + self.steward]
if self.type:
rows += [header_format('Type: ') + self.type]
if self.detail:
rows += [header_format('Detail: ') + self.detail]
if self.items:
rows += [header_format('Contents:'), '']
# Set column widths
name_width = max(NAME_DEFAULT_WIDTH, max(len(item.name) for item in self.items))
descrip_width = DESCRIPTION_DEFAULT_WIDTH
steward_width = STEWARD_DEFAULT_WIDTH
stype_width = max(TYPE_STR_DEFAULT_WIDTH, max(len(item.type) for item in self.items))
# Add list header
rows += [header_format("{: <{}} {: <{}} {: <{}} {: <{}}".format(
"Type", stype_width, "Name", name_width,
"Description", descrip_width, "Steward", steward_width))]
rows += [header_format("{} {} {} {}".format(
"-" * stype_width, "-" * name_width, "-" * descrip_width, "-" * steward_width))]
# Add item rows
for item in self.items:
rows += ["{: <{}} {} {: <{}} {: <{}}".format(
item.type, stype_width, title_format('{: <{}}'.format(item.name, name_width)),
item.description, descrip_width, item.steward, steward_width)]
# Add a newline at the end if there is a title on top
if self.title:
rows += ['']
return "\n".join(rows)
def __str__(self):
return DisplayText(self._make_text()).__str__()
def __repr__(self):
return DisplayText(self._make_text()).__repr__()
def _repr_html_(self):
return DisplayText(self._make_text())._repr_html_()
class Doc(Struct):
"""A document object containing metadata about a data object.
Parameters
----------
schema : dict
See :meth:`apply_schema` for more information on the format of the
dictionary.
"""
_type = NO_TYPE
_descrip = NO_DESCRIPTION
_steward = NO_STEWARD
_detail = None
def __init__(self, schema):
super().__init__()
self._type = schema.get(TYPE_KEY)
self._descrip = schema.get(DESCRIPTION_KEY, NO_DESCRIPTION)
self._steward = schema.get(STEWARD_KEY, NO_STEWARD)
self._detail = schema.get(DETAIL_KEY, None)
schema_contents = schema.get(CONTENTS_KEY)
if schema_contents:
for key in schema_contents.keys():
if self.is_valid_colname(key):
self[key] = Doc(schema_contents[key])
def _as_info(self):
info = Info()
info.title = None
info.description = self._descrip
info.steward = self._steward
info.type = self._type
info.detail = self._detail
info.items = []
for name in self.keys():
elem = self[name]
info.items.append(Item(name, elem._type, elem._descrip,
elem._steward))
return info
def __str__(self):
return self._as_info().__str__()
def __repr__(self):
return self._as_info().__repr__()
def _repr_html_(self):
return self._as_info()._repr_html_()
def apply_schema(obj, schema: dict, doc: bool=True):
"""
Apply a schema containing descriptive information recursively to the
input data object.
The schema should be in the form of a hierarchical dictionary, where
for the data object, and recursively for each element it may contain,
there is a descriptive dictionary with the following keys and values:
* Type: 'Struct', 'Dataset', 'Multiset', 'FastArray', etc.
* Description: a brief description of the data object
* Steward: the name of the steward for that data object
* Detail: any additional descriptive information
* Contents: if the data object is a :class:`~.rt_struct.Struct`,
:class:`~.rt_dataset.Dataset`, or :class:`~.rt_multiset.Multiset`, a
recursively formed dictionary where there is a descriptive
dictionary of this form associated with the name of each element
contained by the data object.
When the schema is applied to the data object, key/value pairs are set
within the ``_meta`` dictionary attribute of the object and all of
its elements, to enable subsequent retrieval of the descriptive
information using the :meth:`.rt_struct.Struct.info` method or
:meth:`.rt_struct.Struct.doc` property.
In addition, during the schema application process, the contents and type
of each data object is compared to the expectation of the schema, with
any differences returned in the form of a dictionary.
Parameters
----------
obj : Struct or FastArray
The data object to apply the schema information to.
schema : dict
A descriptive dictionary defining the schema that should apply to the
data object and any elements it may contain.
doc : bool
Indicates whether to create and attach a :class:`Doc` to the object,
so that the :meth:`doc` method may be run on the object.
Returns
-------
res : dict
Dictionary of deviations from the schema
See Also
--------
:meth:`.rt_struct.Struct.apply_schema`
"""
res = {}
if isinstance(obj, (Struct, FastArray)):
if not hasattr(obj, META_DICT):
obj._meta = {}
if doc:
obj._meta[DOC_KEY] = Doc(schema)
obj._meta[DESCRIPTION_KEY] = schema.get(DESCRIPTION_KEY, NO_DESCRIPTION)
obj._meta[STEWARD_KEY] = schema.get(STEWARD_KEY, NO_STEWARD)
obj._meta[DETAIL_KEY] = schema.get(DETAIL_KEY, None)
stype = schema.get(TYPE_KEY)
if stype and _type_str(obj) != stype:
res[TYPE_MISMATCH] = "Type {} does not match schema type {}".\
format(_type_str(obj), stype)
schema_contents = schema.get(CONTENTS_KEY)
if schema_contents:
for key in obj.keys():
elem_schema = schema_contents.get(key)
if elem_schema:
elem_res = apply_schema(obj[key], elem_schema, False)
if elem_res:
res[key] = elem_res
else:
res[EXTRA_COLUMN] = key
for key in schema_contents.keys():
if key not in obj.keys():
res[MISSING_COLUMN] = key
return res
def _type_str(obj) -> str:
"""
Return the string representation of an object's type.
Parameters
----------
obj : Any
An object
Returns
-------
str : str
String representation of an object's type.
"""
if isinstance(obj, FastArray):
stype = obj.dtype.name
else:
stype = type(obj).__name__
return stype
def info(obj, title=None) -> Info:
"""
Return the :class:`Info` for the object, describing its contents.
Parameters
----------
obj : Any
The object
title : str
The title to give the object, defaults to None.
Returns
-------
info : Info
Information about `obj`.
"""
info = Info()
info.title = title
info.description = NO_DESCRIPTION
info.steward = NO_STEWARD
info.detail = None
info.type = _type_str(obj)
if hasattr(obj, META_DICT):
info.description = obj._meta.get(DESCRIPTION_KEY, info.description)
info.steward = obj._meta.get(STEWARD_KEY, info.steward)
info.detail = obj._meta.get(DETAIL_KEY, None)
if isinstance(obj, Struct):
info.items = []
for name in obj.keys():
descrip = NO_DESCRIPTION
steward = NO_STEWARD
if hasattr(obj[name], META_DICT):
descrip = obj[name]._meta.get(DESCRIPTION_KEY, descrip)
steward = obj[name]._meta.get(STEWARD_KEY, steward)
info.items.append(Item(name, _type_str(obj[name]), descrip, steward))
return info
def doc(obj) -> Optional[Doc]:
"""
Return the :class:`Doc` for the object, describing its contents.
Parameters
----------
obj : Any
The object.
Returns
-------
doc : Doc
Returns a :class:`Doc` instance if the object contains documentation
metadata, otherwise None.
"""
if hasattr(obj, META_DICT):
if DOC_KEY in obj._meta:
return obj._meta[DOC_KEY]
return None
| 2.375 | 2 |
line.py | salt-die/Snippets | 3 | 12772517 | def line(y1, x1, y2, x2):
"""
Yield integer coordinates for a line from (y1, x1) to (y2, x2).
"""
dy = abs(y2 - y1)
dx = abs(x2 - x1)
if dy == 0: # Horizontal
for x in range(x1, x2 + 1):
yield y1, x
elif dx == 0: # Vertical
for y in range(y1, y2 + 1):
yield y, x1
elif dy < dx: # Low-sloped lines
dx = x2 - x1
dy, yi = (2 * (y2 - y1), 1) if y2 >= y1 else (2 * (y1 - y2), -1)
dif = dy - 2 * dx
delta = dy - dx
y = y1
for x in range(x1, x2 + 1):
yield y, x
if delta > 0:
y += yi
delta += dif
else:
delta += dy
else: # High-sloped lines
dx, xi = (2 * (x2 - x1), 1) if x2 >= x1 else (2 * (x1 - x2), -1)
dy = y2 - y1
dif = dx - 2 * dy
delta = dx - dy
x = x1
for y in range(y1, y2 + 1):
yield y, x
if delta > 0:
x += xi
delta += dif
else:
delta += dx
| 4.03125 | 4 |
en_transformer/utils.py | dumpmemory/En-transformer | 108 | 12772518 | <filename>en_transformer/utils.py
import torch
from torch import sin, cos, atan2, acos
def rot_z(gamma):
return torch.tensor([
[cos(gamma), -sin(gamma), 0],
[sin(gamma), cos(gamma), 0],
[0, 0, 1]
], dtype = gamma.dtype)
def rot_y(beta):
return torch.tensor([
[cos(beta), 0, sin(beta)],
[0, 1, 0],
[-sin(beta), 0, cos(beta)]
], dtype = beta.dtype)
def rot(alpha, beta, gamma):
return rot_z(alpha) @ rot_y(beta) @ rot_z(gamma)
| 2.625 | 3 |
api/migrate/specs.py | michaelkerrytf/finalproject | 0 | 12772519 | <gh_stars>0
from ..env import Env
def migrate_specs(spec_list: list, target_env:Env):
"""
migrate from source organization to target
construct response dict
:param target_env:
:param spec_list:
:return:
"""
response_log = []
for spec in spec_list: # spec is a str: 'adex-spec-name'
pass
return response_log
| 2.015625 | 2 |
Test/untitled0.py | mahdijo6731/SugarScape | 6 | 12772520 | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 22 06:17:11 2018
@author: ymamo
"""
test =
if test == set():
print (test)
else:
print ("False") | 2.96875 | 3 |
ucsmsdk/mometa/equipment/EquipmentComputeConn.py | Kego/ucsmsdk | 78 | 12772521 | """This module contains the general information for EquipmentComputeConn ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class EquipmentComputeConnConsts:
SERVER_SIOC_CONNECTIVITY_SINGLE_SERVER_DUAL_SIOC = "single-server-dual-sioc"
SERVER_SIOC_CONNECTIVITY_SINGLE_SERVER_SINGLE_SIOC = "single-server-single-sioc"
class EquipmentComputeConn(ManagedObject):
"""This is EquipmentComputeConn class."""
consts = EquipmentComputeConnConsts()
naming_props = set([])
mo_meta = MoMeta("EquipmentComputeConn", "equipmentComputeConn", "compute-conn", VersionMeta.Version321d, "InputOutput", 0x1f, [], ["read-only"], ['equipmentChassis'], ['faultInst'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version321d, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"server_sioc_connectivity": MoPropertyMeta("server_sioc_connectivity", "serverSiocConnectivity", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, None, None, None, None, ["single-server-dual-sioc", "single-server-single-sioc"], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version321d, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"sacl": "sacl",
"serverSiocConnectivity": "server_sioc_connectivity",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.sacl = None
self.server_sioc_connectivity = None
self.status = None
ManagedObject.__init__(self, "EquipmentComputeConn", parent_mo_or_dn, **kwargs)
| 2.015625 | 2 |
ML/NaiveBayes/app.py | JadeMaveric/GEC | 1 | 12772522 | <reponame>JadeMaveric/GEC
import streamlit as st
import pandas as pd
from NaiveBayes import NaiveBayes
st.set_page_config(layout='centered')
st.title("Naive Bayes Classifier")
st.markdown("By [@JuliusAlphonso](https://twitter.com/JuliusAlphonso)")
dataset = st.sidebar.file_uploader("Choose a file")
probability_selector = st.sidebar.selectbox("Probability", ['simple', 'm-estimate', 'laplace'])
if dataset is not None:
df = pd.read_csv(dataset, sep='\s+')
else:
st.markdown("""
1. From the sidebar, select a tsv file to get started
2. Also select a probability estimate
3. Wait for the classifier to finish training fitting onto the data
4. Once training in complete, you can explore the calculated probabilities
5. Use the form generated to predict classes for unseen examples
"""
)
st.header("Don't have a dataset? Load a demo")
demosets = {
'Tennis': 'https://raw.githubusercontent.com/JadeMaveric/GEC/main/ML/data/data.id3.csv',
'Cars': 'https://raw.githubusercontent.com/JadeMaveric/GEC/main/ML/data/data.tutorial.3.csv',
'Customers': 'https://raw.githubusercontent.com/JadeMaveric/GEC/main/ML/data/data.tutorial.6.csv'
}
dataset = st.selectbox('Dataset', ['None']+list(demosets.keys()))
if dataset != 'None':
df = pd.read_csv(demosets[dataset], sep='\s+')
else:
df = None
if df is not None:
nb = NaiveBayes()
nb.fit(df, probability_selector)
with st.beta_expander("Dataset", expanded=True):
st.header("Dataset")
st.write(df)
with st.beta_expander("Probabilities", expanded=False):
st.subheader("Prior Probabilities")
st.write(nb.prior)
st.subheader("Likelihood")
st.write(nb.likelihood)
st.subheader("Evidence")
st.write(nb.evidence)
st.header("Classify a record")
with st.form(key='test_record'):
attribs = [
st.selectbox(attrib, list(df[attrib].unique()))
for attrib in nb.attribs
]
submit_button = st.form_submit_button(label='Submit')
if submit_button:
ans = nb.predict(attribs)
sorted_ans = sorted(ans, key=ans.__getitem__, reverse=True)
ans = {key: ans[key] for key in sorted_ans}
st.write(pd.DataFrame(ans, index=[0]))
| 3.515625 | 4 |
pyicic/IC_Structures.py | zivlab/py-ic-imaging-control | 28 | 12772523 | <filename>pyicic/IC_Structures.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from ctypes import *
class GrabberHandle(Structure):
pass
GrabberHandle._fields_ = [('unused', c_int)]
class FilterParameter(Structure):
pass
FilterParameter._fields_ = [('Name', c_char * 30),
('Type', c_int)]
class FrameFilterHandle(Structure):
pass
FrameFilterHandle._fields_ = [('pFilter', c_void_p),
('bHasDialog', c_int),
('ParameterCount', c_int),
('Parameters', POINTER(FilterParameter))]
| 1.984375 | 2 |
tests/functional-tests/utils/scale_operator_function.py | Bhaskers-Blu-Org1/ibm-spectrum-scale-csi | 0 | 12772524 | import time
import logging
import yaml
from kubernetes import client
from kubernetes.client.rest import ApiException
LOGGER = logging.getLogger()
def set_global_namespace_value(namespace_name):
"""
Make namespace as global to be used in later functions
Args:
param1: namespace_name - namespace name
Returns:
None
Raises:
None
"""
global namespace_value
namespace_value = namespace_name
def create_namespace():
"""
Create namespace namespace_value(global parameter)
Args:
None
Returns:
None
Raises:
Raises an exception on kubernetes client api failure and asserts
"""
namespace_api_instance = client.CoreV1Api()
namespace_metadata = client.V1ObjectMeta(
name=namespace_value,
labels={"product": "ibm-spectrum-scale-csi"}
)
namespace_body = client.V1Namespace(
api_version="v1", kind="Namespace", metadata=namespace_metadata)
try:
LOGGER.info(f'Creating new Namespace {namespace_value}')
namespace_api_response = namespace_api_instance.create_namespace(
body=namespace_body, pretty=True)
LOGGER.debug(str(namespace_api_response))
except ApiException as e:
LOGGER.error(
f"Exception when calling CoreV1Api->create_namespace: {e}")
assert False
def create_deployment():
"""
Create IBM Spectrum Scale CSI Operator deployment object in operator namespace using
deployment_operator_image_for_crd and deployment_driver_image_for_crd parameters from
config.json file
Args:
None
Returns:
None
Raises:
Raises an exception on kubernetes client api failure and asserts
"""
deployment_apps_api_instance = client.AppsV1Api()
filepath = "../../operator/deploy/operator.yaml"
try:
with open(filepath, "r") as f:
loaddep_yaml = yaml.full_load(f.read())
except yaml.YAMLError as exc:
print ("Error in configuration file:", exc)
assert False
try:
LOGGER.info("Creating Operator Deployment")
deployment_apps_api_response = deployment_apps_api_instance.create_namespaced_deployment(
namespace=namespace_value, body=loaddep_yaml)
LOGGER.debug(str(deployment_apps_api_response))
except ApiException as e:
LOGGER.error(
f"Exception when calling RbacAuthorizationV1Api->create_namespaced_deployment: {e}")
assert False
def create_deployment_old(config_file):
"""
Create IBM Spectrum Scale CSI Operator deployment object in operator namespace using
deployment_operator_image_for_crd and deployment_driver_image_for_crd parameters from
config.json file
Args:
param1: config_file - configuration json file
Returns:
None
Raises:
Raises an exception on kubernetes client api failure and asserts
"""
deployment_apps_api_instance = client.AppsV1Api()
deployment_labels = {
"app.kubernetes.io/instance": "ibm-spectrum-scale-csi-operator",
"app.kubernetes.io/managed-by": "ibm-spectrum-scale-csi-operator",
"app.kubernetes.io/name": "ibm-spectrum-scale-csi-operator",
"product": "ibm-spectrum-scale-csi",
"release": "ibm-spectrum-scale-csi-operator"
}
deployment_annotations = {
"productID": "ibm-spectrum-scale-csi-operator",
"productName": "IBM Spectrum Scale CSI Operator",
"productVersion": "2.0.0"
}
deployment_metadata = client.V1ObjectMeta(
name="ibm-spectrum-scale-csi-operator", labels=deployment_labels, namespace=namespace_value)
deployment_selector = client.V1LabelSelector(
match_labels={"app.kubernetes.io/name": "ibm-spectrum-scale-csi-operator"})
podtemplate_metadata = client.V1ObjectMeta(
labels=deployment_labels, annotations=deployment_annotations)
pod_affinity = client.V1Affinity(
node_affinity=client.V1NodeAffinity(
required_during_scheduling_ignored_during_execution=client.V1NodeSelector(
node_selector_terms=[client.V1NodeSelectorTerm(
match_expressions=[client.V1NodeSelectorRequirement(
key="beta.kubernetes.io/arch", operator="Exists")]
)]
)
)
)
ansible_pod_container = client.V1Container(
image=config_file["deployment_operator_image_for_crd"],
command=["/usr/local/bin/ao-logs",
"/tmp/ansible-operator/runner", "stdout"],
liveness_probe=client.V1Probe(_exec=client.V1ExecAction(
command=["/health_check.sh"]), initial_delay_seconds=10, period_seconds=30),
readiness_probe=client.V1Probe(_exec=client.V1ExecAction(
command=["/health_check.sh"]), initial_delay_seconds=3, period_seconds=1),
name="ansible", image_pull_policy="IfNotPresent",
security_context=client.V1SecurityContext(
capabilities=client.V1Capabilities(drop=["ALL"])),
volume_mounts=[client.V1VolumeMount(
mount_path="/tmp/ansible-operator/runner", name="runner", read_only=True)],
env=[client.V1EnvVar(name="CSI_DRIVER_IMAGE", value=config_file["deployment_driver_image_for_crd"])])
operator_pod_container = client.V1Container(
image=config_file["deployment_operator_image_for_crd"],
name="operator", image_pull_policy="IfNotPresent",
liveness_probe=client.V1Probe(_exec=client.V1ExecAction(
command=["/health_check.sh"]), initial_delay_seconds=10, period_seconds=30),
readiness_probe=client.V1Probe(_exec=client.V1ExecAction(
command=["/health_check.sh"]), initial_delay_seconds=3, period_seconds=1),
security_context=client.V1SecurityContext(
capabilities=client.V1Capabilities(drop=["ALL"])),
env=[client.V1EnvVar(name="WATCH_NAMESPACE",
value_from=client.V1EnvVarSource(field_ref=client.V1ObjectFieldSelector(
field_path="metadata.namespace"))),
client.V1EnvVar(name="POD_NAME", value_from=client.V1EnvVarSource(
field_ref=client.V1ObjectFieldSelector(field_path="metadata.name"))),
client.V1EnvVar(name="OPERATOR_NAME",
value="ibm-spectrum-scale-csi-operator"),
client.V1EnvVar(name="CSI_DRIVER_IMAGE", value=config_file["deployment_driver_image_for_crd"])],
volume_mounts=[client.V1VolumeMount(
mount_path="/tmp/ansible-operator/runner", name="runner")]
)
pod_spec = client.V1PodSpec(affinity=pod_affinity,
containers=[ansible_pod_container,
operator_pod_container],
service_account_name="ibm-spectrum-scale-csi-operator",
volumes=[client.V1Volume(empty_dir=client.V1EmptyDirVolumeSource(medium="Memory"), name="runner")])
podtemplate_spec = client.V1PodTemplateSpec(
metadata=podtemplate_metadata, spec=pod_spec)
deployment_spec = client.V1DeploymentSpec(
replicas=1, selector=deployment_selector, template=podtemplate_spec)
body_dep = client.V1Deployment(
kind='Deployment', api_version='apps/v1', metadata=deployment_metadata, spec=deployment_spec)
try:
LOGGER.info("creating deployment for operator")
deployment_apps_api_response = deployment_apps_api_instance.create_namespaced_deployment(
namespace=namespace_value, body=body_dep)
LOGGER.debug(str(deployment_apps_api_response))
except ApiException as e:
LOGGER.error(
f"Exception when calling RbacAuthorizationV1Api->create_namespaced_deployment: {e}")
assert False
def create_cluster_role():
"""
Create IBM Spectrum Scale CSI Operator cluster role in Operator namespace
Args:
None
Returns:
None
Raises:
Raises an exception on kubernetes client api failure and asserts
"""
cluster_role_api_instance = client.RbacAuthorizationV1Api()
pretty = True
cluster_role_labels = {
"app.kubernetes.io/instance": "ibm-spectrum-scale-csi-operator",
"app.kubernetes.io/managed-by": "ibm-spectrum-scale-csi-operator",
"app.kubernetes.io/name": "ibm-spectrum-scale-csi-operator",
"product": "ibm-spectrum-scale-csi",
"release": "ibm-spectrum-scale-csi-operator"
}
cluster_role_metadata = client.V1ObjectMeta(
name="ibm-spectrum-scale-csi-operator", labels=cluster_role_labels, namespace=namespace_value)
cluster_role_rules = []
cluster_role_rules.append(client.V1PolicyRule(api_groups=["*"], resources=[
'pods', 'persistentvolumeclaims', 'services',
'endpoints', 'events', 'configmaps', 'secrets',
'secrets/status', 'services/finalizers', 'serviceaccounts', 'securitycontextconstraints'], verbs=["*"]))
cluster_role_rules.append(client.V1PolicyRule(api_groups=['rbac.authorization.k8s.io'], resources=[
'clusterroles', 'clusterrolebindings'], verbs=["*"]))
cluster_role_rules.append(client.V1PolicyRule(api_groups=['apps'], resources=[
'deployments', 'daemonsets', 'replicasets', 'statefulsets'], verbs=["*"]))
cluster_role_rules.append(client.V1PolicyRule(api_groups=[
'monitoring.coreos.com'], resources=['servicemonitors'], verbs=['get', 'create']))
cluster_role_rules.append(client.V1PolicyRule(
api_groups=['apps'], resources=['replicasets'], verbs=["get"]))
cluster_role_rules.append(client.V1PolicyRule(
api_groups=['csi.ibm.com'], resources=['*'], verbs=["*"]))
cluster_role_rules.append(client.V1PolicyRule(api_groups=[
'security.openshift.io'], resources=['securitycontextconstraints'], verbs=["*"]))
cluster_role_rules.append(client.V1PolicyRule(api_groups=['storage.k8s.io'], resources=[
'volumeattachments', 'storageclasses'], verbs=["*"]))
cluster_role_rules.append(client.V1PolicyRule(api_groups=['apps'], resource_names=[
'ibm-spectrum-scale-csi-operator'], resources=['deployments/finalizers'], verbs=['update']))
body = client.V1ClusterRole(kind='ClusterRole', api_version='rbac.authorization.k8s.io/v1',
metadata=cluster_role_metadata, rules=cluster_role_rules)
try:
LOGGER.info("Creating ibm-spectrum-scale-csi-operator ClusterRole ")
cluster_role_api_response = cluster_role_api_instance.create_cluster_role(
body, pretty=pretty)
LOGGER.debug(str(cluster_role_api_response))
except ApiException as e:
LOGGER.error(
f"Exception when calling RbacAuthorizationV1Api->create_cluster_role: {e}")
assert False
def create_cluster_role_binding():
"""
Create IBM Spectrum Scale CSI Operator ClusterRoleBinding object in Operator namepsace
Args:
None
Returns:
None
Raises:
Raises an exception on kubernetes client api failure and asserts
"""
cluster_role_binding_api_instance = client.RbacAuthorizationV1Api()
pretty = True
cluster_role_binding_labels = {
"app.kubernetes.io/instance": "ibm-spectrum-scale-csi-operator",
"app.kubernetes.io/managed-by": "ibm-spectrum-scale-csi-operator",
"app.kubernetes.io/name": "ibm-spectrum-scale-csi-operator",
"product": "ibm-spectrum-scale-csi",
"release": "ibm-spectrum-scale-csi-operator"
}
cluster_role_binding_metadata = client.V1ObjectMeta(
name="ibm-spectrum-scale-csi-operator", labels=cluster_role_binding_labels, namespace=namespace_value)
cluster_role_binding_role_ref = client.V1RoleRef(
api_group="rbac.authorization.k8s.io", kind="ClusterRole", name="ibm-spectrum-scale-csi-operator")
cluster_role_binding_subjects = client.V1Subject(
kind="ServiceAccount", name="ibm-spectrum-scale-csi-operator", namespace=namespace_value)
cluster_role_binding_body = client.V1ClusterRoleBinding(kind='ClusterRoleBinding',
api_version='rbac.authorization.k8s.io/v1',
metadata=cluster_role_binding_metadata,
role_ref=cluster_role_binding_role_ref,
subjects=[cluster_role_binding_subjects])
try:
LOGGER.info("creating cluster role binding")
cluster_role_binding_api_response = cluster_role_binding_api_instance.create_cluster_role_binding(
cluster_role_binding_body, pretty=pretty)
LOGGER.debug(cluster_role_binding_api_response)
except ApiException as e:
LOGGER.error(
f"Exception when calling RbacAuthorizationV1Api->create_cluster_role_binding: {e}")
assert False
def create_service_account():
"""
Create IBM Spectrum Scale CSI Operator ServiceAccount in Operator namespace
Args:
None
Returns:
None
Raises:
Raises an exception on kubernetes client api failure and asserts
"""
pretty = True
service_account_api_instance = client.CoreV1Api()
service_account_labels = {
"app.kubernetes.io/instance": "ibm-spectrum-scale-csi-operator",
"app.kubernetes.io/managed-by": "ibm-spectrum-scale-csi-operator",
"app.kubernetes.io/name": "ibm-spectrum-scale-csi-operator",
"product": "ibm-spectrum-scale-csi",
"release": "ibm-spectrum-scale-csi-operator"
}
service_account_metadata = client.V1ObjectMeta(
name="ibm-spectrum-scale-csi-operator", namespace=namespace_value, labels=service_account_labels)
service_account_body = client.V1ServiceAccount(
api_version="v1", kind="ServiceAccount", metadata=service_account_metadata)
try:
LOGGER.info("Creating ibm-spectrum-scale-csi-operator ServiceAccount")
service_account_api_response = service_account_api_instance.create_namespaced_service_account(
namespace=namespace_value, body=service_account_body, pretty=pretty)
LOGGER.debug(str(service_account_api_response))
except ApiException as e:
LOGGER.error(
f"Exception when calling CoreV1Api->create_namespaced_service_account: {e}")
assert False
def create_crd():
"""
Create IBM Spectrum Scale CSI Operator CRD (Custom Resource Defination) Object
Args:
None
Returns:
None
Raises:
Raises an ValueError exception but it is expected. hence we pass.
"""
filepath = "../../operator/deploy/crds/csiscaleoperators.csi.ibm.com.crd.yaml"
try:
with open(filepath, "r") as f:
loadcrd_yaml = yaml.full_load(f.read())
except yaml.YAMLError as exc:
print ("Error in configuration file:", exc)
assert False
crd_api_instance = client.ApiextensionsV1beta1Api()
try:
LOGGER.info(
"Creating IBM SpectrumScale CRD object using csiscaleoperators.csi.ibm.com.crd.yaml file")
crd_api_response = crd_api_instance.create_custom_resource_definition(
loadcrd_yaml, pretty=True)
LOGGER.debug(str(crd_api_response))
except ValueError:
LOGGER.info(
"while there is valuerror expection,but CRD created successfully")
def create_crd_old():
"""
Create IBM Spectrum Scale CSI Operator CRD (Custom Resource Defination) Object
Args:
None
Returns:
None
Raises:
Raises an ValueError exception but it is expected. hence we pass.
"""
# input to crd_metadata
crd_labels = {
"app.kubernetes.io/instance": "ibm-spectrum-scale-csi-operator",
"app.kubernetes.io/managed-by": "ibm-spectrum-scale-csi-operator",
"app.kubernetes.io/name": "ibm-spectrum-scale-csi-operator",
"release": "ibm-spectrum-scale-csi-operator"
}
# input to crd_body
crd_metadata = client.V1ObjectMeta(
name="csiscaleoperators.csi.ibm.com", labels=crd_labels)
crd_names = client.V1beta1CustomResourceDefinitionNames(
kind="CSIScaleOperator",
list_kind="CSIScaleOperatorList",
plural="csiscaleoperators",
singular="csiscaleoperator"
)
crd_subresources = client.V1beta1CustomResourceSubresources(status={})
# input to crd_validation json input
filepath = "../../operator/deploy/crds/csiscaleoperators.csi.ibm.com.crd.yaml"
try:
with open(filepath, "r") as f:
loadcrd_yaml = yaml.full_load(f.read())
except yaml.YAMLError as exc:
print ("Error in configuration file:", exc)
assert False
properties = loadcrd_yaml['spec']['validation']['openAPIV3Schema']['properties']
crd_open_apiv3_schema = client.V1beta1JSONSchemaProps(
properties=properties, type="object")
crd_validation = client.V1beta1CustomResourceValidation(
open_apiv3_schema=crd_open_apiv3_schema)
crd_versions = [client.V1beta1CustomResourceDefinitionVersion(
name="v1", served=True, storage=True)]
crd_spec = client.V1beta1CustomResourceDefinitionSpec(
group="csi.ibm.com",
names=crd_names,
scope="Namespaced",
subresources=crd_subresources,
validation=crd_validation,
version="v1",
versions=crd_versions
)
crd_body = client.V1beta1CustomResourceDefinition(
api_version="apiextensions.k8s.io/v1beta1",
kind="CustomResourceDefinition",
metadata=crd_metadata,
spec=crd_spec)
crd_api_instance = client.ApiextensionsV1beta1Api()
try:
LOGGER.info("creating crd")
crd_api_response = crd_api_instance.create_custom_resource_definition(
crd_body, pretty=True)
LOGGER.debug(str(crd_api_response))
except ValueError:
LOGGER.info(
"while there is valuerror expection,but CRD created successfully")
def delete_crd():
"""
Delete existing IBM Spectrum Scale CSI Operator CRD (Custom Resource Defination) Object
Args:
None
Returns:
None
Raises:
Raises an exception on kubernetes client api failure and asserts
"""
delete_crd_api_instance = client.ApiextensionsV1beta1Api()
try:
delete_crd_api_response = delete_crd_api_instance.delete_custom_resource_definition(
name="csiscaleoperators.csi.ibm.com", pretty=True)
LOGGER.debug(str(delete_crd_api_response))
except ApiException as e:
LOGGER.error(
f"Exception when calling ApiextensionsV1beta1Api->delete_custom_resource_definition: {e}")
assert False
def delete_namespace():
"""
Delete IBM Spectrum Scale CSI Operator namespace
Args:
None
Returns:
None
Raises:
Raises an exception on kubernetes client api failure and asserts
"""
delete_namespace_api_instance = client.CoreV1Api()
try:
delete_namespace_api_response = delete_namespace_api_instance.delete_namespace(
name=namespace_value, pretty=True)
LOGGER.debug(str(delete_namespace_api_response))
except ApiException as e:
LOGGER.error(
f"Exception when calling CoreV1Api->delete_namespace: {e}")
assert False
def delete_deployment():
"""
Delete IBM Spectrum Scale CSI Operator Deployment object from Operator namespace
Args:
None
Returns:
None
Raises:
Raises an exception on kubernetes client api failure and asserts
"""
delete_deployment_api_instance = client.AppsV1Api()
try:
delete_deployment_api_response = delete_deployment_api_instance.delete_namespaced_deployment(
name="ibm-spectrum-scale-csi-operator", namespace=namespace_value, pretty=True)
LOGGER.debug(str(delete_deployment_api_response))
except ApiException as e:
LOGGER.error(
f"Exception when calling ExtensionsV1beta1Api->delete_namespaced_deployment: {e}")
assert False
def delete_service_account(service_account_name):
"""
Delete IBM Spectrum Scale CSI Operator ServiceAccount from Operator namespace
Args:
param1: service_accout_name - service account name to be deleted
Returns:
None
Raises:
Raises an exception on kubernetes client api failure and asserts
"""
delete_service_account_api_instance = client.CoreV1Api()
try:
delete_service_account_api_response = delete_service_account_api_instance.delete_namespaced_service_account(
name=service_account_name, namespace=namespace_value, pretty=True)
LOGGER.debug(str(delete_service_account_api_response))
except ApiException as e:
LOGGER.error(
f"Exception when calling CoreV1Api->delete_namespaced_service_account: {e}")
assert False
def delete_cluster_role(cluster_role_name):
"""
Delete IBM Spectrum Scale CSI Operator ClusterRole Object
Args:
param1: cluster_role_name - cluster role name to be deleted
Returns:
None
Raises:
Raises an exception on kubernetes client api failure and asserts
"""
delete_cluster_role_api_instance = client.RbacAuthorizationV1Api()
try:
delete_cluster_role_api_response = delete_cluster_role_api_instance.delete_cluster_role(
name=cluster_role_name, pretty=True)
LOGGER.debug(str(delete_cluster_role_api_response))
except ApiException as e:
LOGGER.error(
f"Exception when calling RbacAuthorizationV1Api->delete_cluster_role: {e}")
assert False
def delete_cluster_role_binding(cluster_role_binding_name):
"""
Delete IBM Spectrum Scale CSI Operator ClusterRoleBinding Object
Args:
param1: cluster_role_name - cluster role name to be deleted
Returns:
None
Raises:
Raises an exception on kubernetes client api failure and asserts
"""
delete_cluster_role_binding_api_instance = client.RbacAuthorizationV1Api()
try:
delete_cluster_role_binding_api_response = delete_cluster_role_binding_api_instance.delete_cluster_role_binding(
name=cluster_role_binding_name, pretty=True)
LOGGER.debug(delete_cluster_role_binding_api_response)
except ApiException as e:
LOGGER.error(
f"Exception when calling RbacAuthorizationV1Api->delete_cluster_role_binding: {e}")
assert False
def check_crd_deleted():
"""
Function for checking CRD (Custom Resource Defination) is deleted or not
If CRD is not deleted in 60 seconds,function asserts
Args:
None
Returns:
None
Raises:
Raises an exception on kubernetes client api failure and asserts
"""
var = True
count = 12
list_crd_api_instance = client.ApiextensionsV1beta1Api()
while (var and count > 0):
try:
list_crd_api_response = list_crd_api_instance.read_custom_resource_definition(
pretty=True, name="ibm-spectrum-scale-csi")
LOGGER.debug(list_crd_api_response)
LOGGER.info("still deleting crd")
count -= 1
time.sleep(5)
except ApiException:
LOGGER.info("crd deleted")
var = False
if count <= 0:
LOGGER.error("crd is not deleted")
assert False
def check_namespace_deleted():
"""
Function for checking namespace object is deleted or not
If namespace is not deleted in 120 seconds, Function asserts
Raises:
Raises an exception on kubernetes client api failure and asserts
"""
var = True
count = 24
list_namespace_api_instance = client.CoreV1Api()
while (var and count > 0):
try:
list_namespace_api_response = list_namespace_api_instance.read_namespace(
name=namespace_value, pretty=True)
LOGGER.debug(str(list_namespace_api_response))
LOGGER.info(f'Still deleting namespace {namespace_value}')
count = count-1
time.sleep(5)
except ApiException:
LOGGER.info(f'namespace {namespace_value} is deleted')
var = False
if count <= 0:
LOGGER.error(f'namespace {namespace_value} is not deleted')
assert False
def check_deployment_deleted():
"""
Function for checking deployment is deleted or not
If deployment is not deleted in 30 seconds, Function asserts
Raises:
Raises an exception on kubernetes client api failure and asserts
"""
var = True
count = 6
api_instance = client.AppsV1Api()
while (var and count > 0):
try:
api_response = api_instance.read_namespaced_deployment(
name="ibm-spectrum-scale-csi-operator", namespace=namespace_value, pretty=True)
LOGGER.debug(str(api_response))
LOGGER.info(f'Still Deleting ibm-spectrum-scale-csi-operator deployment')
count = count-1
time.sleep(5)
except ApiException:
LOGGER.info("Deployment ibm-spectrum-scale-csi-operator is deleted")
var = False
if count <= 0:
LOGGER.error("deployment is not deleted")
assert False
def check_service_account_deleted(service_account_name):
"""
Function to check ServiceAccount is deleted or not
If ServiceAccount is not deleted in 30 seconds, Function asserts
Args:
param1: service_accout_name - service account name to be checked
Raises:
Raises an exception on kubernetes client api failure and asserts
"""
var = True
count = 6
api_instance = client.CoreV1Api()
while (var and count > 0):
try:
api_response = api_instance.read_namespaced_service_account(
name=service_account_name, namespace=namespace_value, pretty=True)
LOGGER.debug(str(api_response))
LOGGER.info(f'Still deleting ServiceAccount {service_account_name}')
count = count-1
time.sleep(5)
except ApiException:
LOGGER.info(f'ServiceAccount {service_account_name} is deleted')
var = False
if count <= 0:
LOGGER.error("service account is not deleted")
assert False
def check_cluster_role_deleted(cluster_role_name):
"""
Function to check ClusterRole is deleted or not
If ClusterRole not deleted in 30 seconds, Function asserts
Args:
param1: cluster_role_name - cluster role name to be checked
Raises:
Raises an exception on kubernetes client api failure and asserts
"""
var = True
count = 6
api_instance = client.RbacAuthorizationV1Api()
while (var and count > 0):
try:
api_response = api_instance.read_cluster_role(
name=cluster_role_name, pretty=True)
LOGGER.debug(str(api_response))
LOGGER.info(f'Still deleting ClusterRole {cluster_role_name} ')
count = count-1
time.sleep(5)
except ApiException:
LOGGER.info(f'ClusterRole {cluster_role_name} is deleted')
var = False
if count <= 0:
LOGGER.error(f'ClusterRole {cluster_role_name} is not deleted')
assert False
def check_cluster_role_binding_deleted(cluster_role_binding_name):
"""
Function to check ClusterRoleBinding is deleted or not
If ClusterRoleBinding is not deleted in 30 seconds, Function asserts
Args:
param1: cluster_role_binding_name - cluster role binding name to be checked
Raises:
Raises an exception on kubernetes client api failure and asserts
"""
var = True
count = 6
api_instance = client.RbacAuthorizationV1Api()
while (var and count > 0):
try:
api_response = api_instance.read_cluster_role_binding(
name=cluster_role_binding_name, pretty=True)
LOGGER.debug(str(api_response))
LOGGER.info(f'Still deleting ClusterRoleBinding {cluster_role_binding_name}')
count = count-1
time.sleep(5)
except ApiException:
LOGGER.info(f'ClusterRoleBinding {cluster_role_binding_name} is deleted')
var = False
if count <= 0:
LOGGER.error(f'ClusterRoleBinding {cluster_role_binding_name} is not deleted')
assert False
def check_crd_exists():
"""
Checks custom resource defination exists or not
Args:
None
Returns:
return True , if crd exists
return False , if crd does not exists
Raises:
None
"""
read_crd_api_instance = client.ApiextensionsV1beta1Api()
try:
read_crd_api_response = read_crd_api_instance.read_custom_resource_definition(
pretty=True, name="csiscaleoperators.csi.ibm.com")
LOGGER.debug(str(read_crd_api_response))
LOGGER.info("crd exists")
return True
except ApiException:
LOGGER.info("crd does not exist")
return False
def check_namespace_exists():
"""
Checks namespace namespace_value exists or not
Args:
None
Returns:
return True , if namespace exists
return False , if namespace does not exists
Raises:
None
"""
read_namespace_api_instance = client.CoreV1Api()
try:
read_namespace_api_response = read_namespace_api_instance.read_namespace(
name=namespace_value, pretty=True)
LOGGER.debug(str(read_namespace_api_response))
LOGGER.info("namespace exists")
return True
except ApiException:
LOGGER.info("namespace does not exists")
return False
def check_deployment_exists():
"""
Checks deployment exists or not
Args:
None
Returns:
return True , if deployment exists
return False , if deployment does not exists
Raises:
None
"""
read_deployment_api_instance = client.AppsV1Api()
try:
read_deployment_api_response = read_deployment_api_instance.read_namespaced_deployment(
name="ibm-spectrum-scale-csi-operator", namespace=namespace_value, pretty=True)
LOGGER.debug(str(read_deployment_api_response))
LOGGER.info("deployment exists")
return True
except ApiException:
LOGGER.info("deployment does not exists")
return False
def check_service_account_exists(service_account_name):
"""
Checks service account exists or not
Args:
None
Returns:
return True , if service account exists
return False , if service account does not exists
Raises:
None
"""
api_instance = client.CoreV1Api()
try:
api_response = api_instance.read_namespaced_service_account(
name=service_account_name, namespace=namespace_value, pretty=True)
LOGGER.debug(str(api_response))
LOGGER.info("Service account exists")
return True
except ApiException:
LOGGER.info("Service account does not exists")
return False
def check_cluster_role_exists(cluster_role_name):
"""
Checks cluster role exists or not
Args:
None
Returns:
return True , if cluster role exists
return False , if cluster role does not exists
Raises:
None
"""
api_instance = client.RbacAuthorizationV1Api()
try:
api_response = api_instance.read_cluster_role(
name=cluster_role_name, pretty=True)
LOGGER.debug(str(api_response))
LOGGER.info("cluster role exists")
return True
except ApiException:
LOGGER.info("cluster role does not exists")
return False
def check_cluster_role_binding_exists(cluster_role_binding_name):
"""
Checks cluster role binding exists or not
Args:
None
Returns:
return True , if cluster role binding exists
return False , if cluster role binding does not exists
Raises:
None
"""
api_instance = client.RbacAuthorizationV1Api()
try:
api_response = api_instance.read_cluster_role_binding(
name=cluster_role_binding_name, pretty=True)
LOGGER.debug(str(api_response))
LOGGER.info("cluster role binding exists")
return True
except ApiException:
LOGGER.info("cluster role binding does not exists")
return False
| 2.25 | 2 |
openapi_server/models/block.py | havardhuns/graphsense-REST | 0 | 12772525 | # coding: utf-8
from datetime import date, datetime
from typing import List, Dict, Type
from openapi_server.models.base_model_ import Model
from openapi_server import util
class Block(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, block_hash: str=None, height: int=None, no_txs: int=None, timestamp: int=None):
"""Block - a model defined in OpenAPI
:param block_hash: The block_hash of this Block.
:param height: The height of this Block.
:param no_txs: The no_txs of this Block.
:param timestamp: The timestamp of this Block.
"""
self.openapi_types = {
'block_hash': str,
'height': int,
'no_txs': int,
'timestamp': int
}
self.attribute_map = {
'block_hash': 'block_hash',
'height': 'height',
'no_txs': 'no_txs',
'timestamp': 'timestamp'
}
self._block_hash = block_hash
self._height = height
self._no_txs = no_txs
self._timestamp = timestamp
@classmethod
def from_dict(cls, dikt: dict) -> 'Block':
"""Returns the dict as a model
:param dikt: A dict.
:return: The block of this Block.
"""
return util.deserialize_model(dikt, cls)
@property
def block_hash(self):
"""Gets the block_hash of this Block.
:return: The block_hash of this Block.
:rtype: str
"""
return self._block_hash
@block_hash.setter
def block_hash(self, block_hash):
"""Sets the block_hash of this Block.
:param block_hash: The block_hash of this Block.
:type block_hash: str
"""
self._block_hash = block_hash
@property
def height(self):
"""Gets the height of this Block.
Height
:return: The height of this Block.
:rtype: int
"""
return self._height
@height.setter
def height(self, height):
"""Sets the height of this Block.
Height
:param height: The height of this Block.
:type height: int
"""
if height is not None and height < 0:
raise ValueError("Invalid value for `height`, must be a value greater than or equal to `0`")
self._height = height
@property
def no_txs(self):
"""Gets the no_txs of this Block.
:return: The no_txs of this Block.
:rtype: int
"""
return self._no_txs
@no_txs.setter
def no_txs(self, no_txs):
"""Sets the no_txs of this Block.
:param no_txs: The no_txs of this Block.
:type no_txs: int
"""
self._no_txs = no_txs
@property
def timestamp(self):
"""Gets the timestamp of this Block.
Timestamp
:return: The timestamp of this Block.
:rtype: int
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""Sets the timestamp of this Block.
Timestamp
:param timestamp: The timestamp of this Block.
:type timestamp: int
"""
self._timestamp = timestamp
| 2.671875 | 3 |
functionality/yolo_detect.py | shrwh/QtMultiUAVGui | 0 | 12772526 | import torch
from models.experimental import attempt_load
from utils.datasets import LoadImages
from utils.general import check_img_size, non_max_suppression, scale_coords, set_logging
from utils.torch_utils import select_device, time_synchronized
import multiprocessing as mp
# 目标检测
def detect_center(frame_cap,condition:mp.Condition,conn:mp.Pipe):
weights, imgsz = '/home/nvidia/yolov3/core_mi.pt', 640
# Initialize
set_logging()
device = select_device('')
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
stride = int(model.stride.max()) # model stride
imgsz = check_img_size(imgsz, s=stride) # check img_size
names = model.module.names if hasattr(model, 'module') else model.names # get class names
if half:
model.half() # to FP16
while True:
#info={"red":None,"yellow":None}
with condition:
condition.wait()
image = frame_cap.frame
# image = cv2.imread('000125.jpg')
print("begin detect")
if image is None:
continue
img, im0s = LoadImages(image, img_size=imgsz, stride=stride).get_img()
# Run inference
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
#t0 = time.time()
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=False)[0]
# Apply NMS
pred = non_max_suppression(pred, agnostic=True, max_det=300)
t2 = time_synchronized()
# Process detections
for i, det in enumerate(pred): # detections per image
'''if webcam: # batch_size >= 1
p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count
else:'''
s, im0 = '', im0s.copy()
s += '%gx%g ' % img.shape[2:] # print string
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
det_new = reversed(det)
# Write results
for *xyxy, conf, cls in det_new:
ans = torch.tensor(xyxy).view(1, 4).tolist()[0]
ans = [(ans[0] + ans[2]) / 2, (ans[1] + ans[3]) / 2, ans[2]-ans[0], ans[3]-ans[1] ]
if (not int(cls)):
frame_cap.detect_box['red'] = ans
else:
frame_cap.detect_box['yellow'] = ans
print(frame_cap.detect_box['red'])
else:
frame_cap.detect_box['red'] = []
frame_cap.detect_box['yellow'] = []
#print(info)
conn.send(frame_cap.detect_box)
# Print time (inference + NMS)
# print(f'{s}Done. ({t2 - t1:.3f}s)')
# print("end detect one image") | 2.1875 | 2 |
webdriver_setup/windows.py | xloem/webdriver-setup | 1 | 12772527 | <reponame>xloem/webdriver-setup
from selenium import webdriver
from webdriver_manager.microsoft import EdgeChromiumDriverManager, IEDriverManager
from webdriver_setup.driver import DriverBase
class EdgeDriver(DriverBase):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def create_driver(self, **kwargs):
"""Create Edge webdriver
:type kwargs: dict
:param kwargs: Optional arguments
:rtype: selenium.webdriver.Edge
:returns: Edge webdriver instance
"""
cache_timeout = kwargs.get("cache_valid_range", 7)
driver_path = EdgeChromiumDriverManager(cache_valid_range=cache_timeout).install()
return webdriver.Edge(driver_path, **kwargs)
class IeDriver(DriverBase):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def create_driver(self, **kwargs):
"""Create Internet Explorer webdriver
:type kwargs: dict
:param kwargs: Optional arguments
:rtype: selenium.webdriver.Ie
:returns: Ie webdriver instance
"""
cache_timeout = kwargs.get("cache_valid_range", 7)
driver_path = IEDriverManager(cache_valid_range=cache_timeout).install()
return webdriver.Ie(driver_path, **kwargs)
| 2.765625 | 3 |
data_retrivers/yarn_log_parser.py | IncioMan/ci_data_issue_prediction | 1 | 12772528 | """Test Suites: 370 passed, 370 total
Tests: 4 skipped, 1050 passed, 1054 total
Tests: 28 passed, 28 total
Snapshots: 830 passed, 830 total
Time: 67.988s
Ran all test suites.
Done in 99.84s."""
"""Test Suites: 187 passed, 187 total
Tests: 1 skipped, 579 passed, 580 total
Snapshots: 429 passed, 429 total
Time: 285.168s
Ran all test suites.
Done in 339.53s."""
#185464744
"""[1m[32m › [39m[22m[1m[32m259 tests passed[39m[22m (259 total in 61 test suites, 25 snapshots, run time 38.11s)
[2K[1GDone in 39.56s."""
from log_retriever import read_job_log, joblog
import re
#Regex
TEST_REGEX_D_P_T = "Tests:(\ *)(\d*) skipped, (\d*) passed, (\d*) total"
TEST_REGEX_P_T = "Tests:(\ *)(\d*) passed, (\d*) total"
TEST_REGEX_FORMAT_2_P_T = "Tests:(\ *)\\x1b\[(\d*)m\\x1b\[(\d*)m\\x1b\[(\d*)m(\d*) passed\\x1b\[(\d*)m\\x1b\[(\d*)m(\d*), (\d*) total"
TEST_REGEX_P_T_2 = "(\d*) tests passed(.*)\((\d*) total"
FORMAT_2 = "\\x1b\[(\d*)mTests"
def test_parser_format2(log):
total_tests = 0
test_passed = 0
test_skipped = 0
test_failed = 0
allRes = re.findall(TEST_REGEX_FORMAT_2_P_T, log)
for res in allRes:
test_passed += int(res[4])
total_tests += int(res[8])
test_failed += total_tests - test_passed
return total_tests, test_passed, test_failed, test_skipped
def get_test_results(log):
total_tests = 0
test_passed = 0
test_skipped = 0
test_failed = 0
allRes = re.findall(TEST_REGEX_P_T_2, log)
for res in allRes:
test_passed += int(res[0])
total_tests += int(res[2])
test_failed += total_tests - test_passed
if(total_tests > 0):
return total_tests, test_passed, test_failed, test_skipped
allRes = re.findall(TEST_REGEX_D_P_T, log)
for res in allRes:
test_skipped += int(res[1])
test_passed += int(res[2])
total_tests += int(res[3])
test_failed += total_tests - test_passed - test_skipped
if(total_tests > 0):
return total_tests, test_passed, test_failed, test_skipped
allRes = re.findall(TEST_REGEX_P_T, log)
for res in allRes:
test_skipped += 0
test_passed += int(res[1])
total_tests += int(res[2])
test_failed += total_tests - test_passed - test_skipped
if(total_tests > 0):
return total_tests, test_passed, test_failed, test_skipped
allRes = re.findall(TEST_REGEX_FORMAT_2_P_T, log)
for res in allRes:
test_passed += int(res[4])
total_tests += int(res[8])
test_failed += total_tests - test_passed
return total_tests, test_passed, test_failed, test_skipped
def get_metrics(log):
total, passed, failed, skipped = get_test_results(log)
return total, passed, failed, skipped
if __name__ == "__main__":
#dump_job_log(728138257)
log = joblog(185464744)
print(get_metrics(log)) | 2.953125 | 3 |
LaundrIT-Project/Pedido/migrations/0040_auto_20191117_1418.py | linikerunk/laundr-it | 1 | 12772529 | # Generated by Django 2.2.7 on 2019-11-17 17:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Pedido', '0039_auto_20191117_1417'),
]
operations = [
migrations.AlterField(
model_name='pedido',
name='valor_total',
field=models.DecimalField(decimal_places=2, max_digits=7, null=True, verbose_name='Valor Total'),
),
]
| 1.40625 | 1 |
feincms/admin/editor.py | duointeractive/feincms | 0 | 12772530 | from feincms.admin.item_editor import ItemEditor, ItemEditorForm
from feincms.admin.tree_editor import TreeEditor, ajax_editable_boolean, \
ajax_editable_boolean_cell, django_boolean_icon
| 1.046875 | 1 |
youtube_downloader.py | SH1RL0CK/youtube_downloader | 0 | 12772531 | <gh_stars>0
#!/usr/bin/python3
from PySide2.QtWidgets import QApplication, QWidget, QLabel, QLineEdit, QFormLayout, QFileDialog, QPushButton, QCheckBox, QComboBox
from PySide2.QtGui import QPixmap
import sys, getpass, platform, time
from youtube_dl import YoutubeDL
class MainWidget(QWidget):
def __init__(self) -> None:
super().__init__()
self.init_ui()
def init_ui(self) -> None:
self.resize(600, 300)
self.setWindowTitle("YouTube-Downloader")
self.title_label: QLabel = QLabel("<center>YouTube Downloader</center>", self)
self.title_label.setStyleSheet("""
QLabel{
font-size: 40px;
font-weight: bold;
}
""")
self.title_label.setGeometry(0, 20, self.width(), 60)
self.form_widget: QWidget = QWidget(self)
self.form_widget.setStyleSheet("""
QWidget{
font-size: 15px
}
""")
self.form_widget_layout: QFormLayout = QFormLayout()
self.url_input: QLineEdit = QLineEdit()
self.form_widget_layout.addRow("Video URL:", self.url_input)
self.extract_audio_checkbox: QCheckBox = QCheckBox("Only extract the audio")
self.form_widget_layout.addWidget(self.extract_audio_checkbox)
self.filename_input: QLineEdit = QLineEdit()
self.form_widget_layout.addRow("Filename (optional):", self.filename_input)
self.storage_location_button: QPushButton = QPushButton(f"C:/Users/{getpass.getuser()}/Downloads" if "Windows" == platform.system() else f"/home/{getpass.getuser()}/Downloads" if "Linux" == platform.system() else "Bitte wähle einen Speicherort aus")
self.storage_location_button.setStyleSheet("""
QPushButton{
text-align: left;
}
""")
self.storage_location_button.clicked.connect(self.change_storage_location)
self.form_widget_layout.addRow("Storage Location:", self.storage_location_button)
self.form_widget.setLayout(self.form_widget_layout)
self.form_widget.setGeometry((self.width() - 550)/2, 80, 550, 200)
self.submit_button: QPushButton = QPushButton("Download this Video", self)
self.submit_button.clicked.connect(self.download_video)
self.submit_button.setGeometry((self.width() - 400)/2 , 220, 400, 30)
self.submit_button.setStyleSheet("""
QPushButton{
font-size: 15px
}
""")
self.state_message_label: QLabel = QLabel(self)
self.state_message_label.setGeometry((self.width() - 400)/2, 250, 400, 20)
def change_storage_location(self) -> None:
new_storage_location: str = QFileDialog.getExistingDirectory(None, "Please select a folder", self.storage_location_button.text())
if new_storage_location != "":
self.storage_location_button.setText(new_storage_location)
def download_video(self) -> None:
if not self.url_input.text().startswith(("www.youtube.com/watch?v=", "https://www.youtube.com/watch?v=")):
self.state_message("Please please enter a valid video URL!", "red")
return
filename: str = self.filename_input.text() if self.filename_input.text() != "" else "%(title)s"
ydl_opts: dict = {
"format": "bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best" if not self.extract_audio_checkbox.isChecked() else "bestaudio/best",
"outtmpl": f"{self.storage_location_button.text()}/{filename}.%(ext)s",
}
if self.extract_audio_checkbox.isChecked():
ydl_opts["postprocessors"] = [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192'
}]
print("Start Downloading...")
try:
with YoutubeDL(ydl_opts) as ydl:
ydl.download([self.url_input.text()])
except:
self.state_message("An error occurred while downloading this video!", "red")
print("An error occurred!")
return
self.state_message("The video was downloaded successfully!", "green")
self.url_input.setText("")
self.filename_input.setText("")
self.extract_audio_checkbox.setChecked(False)
print("Downloading is completed!")
def state_message(self, message: str, color: str) -> None:
self.state_message_label.setStyleSheet(f"""
QLabel{{
color: {color};
}}
""")
self.state_message_label.setText(f"<center>{message}</center>")
def main() -> None:
app: QApplication = QApplication()
main_widget: MainWidget = MainWidget()
main_widget.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main() | 2.703125 | 3 |
sdk/python/pulumi_alicloud/ess/scaling_configuration.py | pulumi/pulumi-alicloud | 42 | 12772532 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ScalingConfigurationArgs', 'ScalingConfiguration']
@pulumi.input_type
class ScalingConfigurationArgs:
def __init__(__self__, *,
scaling_group_id: pulumi.Input[str],
active: Optional[pulumi.Input[bool]] = None,
credit_specification: Optional[pulumi.Input[str]] = None,
data_disks: Optional[pulumi.Input[Sequence[pulumi.Input['ScalingConfigurationDataDiskArgs']]]] = None,
enable: Optional[pulumi.Input[bool]] = None,
force_delete: Optional[pulumi.Input[bool]] = None,
image_id: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None,
instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
instance_name: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
internet_charge_type: Optional[pulumi.Input[str]] = None,
internet_max_bandwidth_in: Optional[pulumi.Input[int]] = None,
internet_max_bandwidth_out: Optional[pulumi.Input[int]] = None,
io_optimized: Optional[pulumi.Input[str]] = None,
is_outdated: Optional[pulumi.Input[bool]] = None,
key_name: Optional[pulumi.Input[str]] = None,
kms_encrypted_password: Optional[pulumi.Input[str]] = None,
kms_encryption_context: Optional[pulumi.Input[Mapping[str, Any]]] = None,
override: Optional[pulumi.Input[bool]] = None,
password: Optional[pulumi.Input[str]] = None,
password_inherit: Optional[pulumi.Input[bool]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
role_name: Optional[pulumi.Input[str]] = None,
scaling_configuration_name: Optional[pulumi.Input[str]] = None,
security_group_id: Optional[pulumi.Input[str]] = None,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
substitute: Optional[pulumi.Input[str]] = None,
system_disk_auto_snapshot_policy_id: Optional[pulumi.Input[str]] = None,
system_disk_category: Optional[pulumi.Input[str]] = None,
system_disk_description: Optional[pulumi.Input[str]] = None,
system_disk_name: Optional[pulumi.Input[str]] = None,
system_disk_performance_level: Optional[pulumi.Input[str]] = None,
system_disk_size: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
user_data: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ScalingConfiguration resource.
:param pulumi.Input[str] scaling_group_id: ID of the scaling group of a scaling configuration.
:param pulumi.Input[bool] active: Whether active current scaling configuration in the specified scaling group. Default to `false`.
:param pulumi.Input[str] credit_specification: Performance mode of the t5 burstable instance. Valid values: 'Standard', 'Unlimited'.
:param pulumi.Input[Sequence[pulumi.Input['ScalingConfigurationDataDiskArgs']]] data_disks: DataDisk mappings to attach to ecs instance. See Block datadisk below for details.
:param pulumi.Input[bool] enable: Whether enable the specified scaling group(make it active) to which the current scaling configuration belongs.
:param pulumi.Input[bool] force_delete: The last scaling configuration will be deleted forcibly with deleting its scaling group. Default to false.
:param pulumi.Input[str] image_id: ID of an image file, indicating the image resource selected when an instance is enabled.
:param pulumi.Input[str] image_name: Name of an image file, indicating the image resource selected when an instance is enabled.
:param pulumi.Input[Sequence[pulumi.Input[str]]] instance_ids: It has been deprecated from version 1.6.0. New resource `ess.Attachment` replaces it.
:param pulumi.Input[str] instance_name: Name of an ECS instance. Default to "ESS-Instance". It is valid from version 1.7.1.
:param pulumi.Input[str] instance_type: Resource type of an ECS instance.
:param pulumi.Input[Sequence[pulumi.Input[str]]] instance_types: Resource types of an ECS instance.
:param pulumi.Input[str] internet_charge_type: Network billing type, Values: PayByBandwidth or PayByTraffic. Default to `PayByBandwidth`.
:param pulumi.Input[int] internet_max_bandwidth_in: Maximum incoming bandwidth from the public network, measured in Mbps (Mega bit per second). The value range is [1,200].
:param pulumi.Input[int] internet_max_bandwidth_out: Maximum outgoing bandwidth from the public network, measured in Mbps (Mega bit per second). The value range for PayByBandwidth is [0,100].
:param pulumi.Input[str] io_optimized: It has been deprecated on instance resource. All the launched alicloud instances will be I/O optimized.
:param pulumi.Input[bool] is_outdated: Whether to use outdated instance type. Default to false.
:param pulumi.Input[str] key_name: The name of key pair that can login ECS instance successfully without password. If it is specified, the password would be invalid.
:param pulumi.Input[str] kms_encrypted_password: An KMS encrypts password used to a db account. If the `password` is filled in, this field will be ignored.
:param pulumi.Input[Mapping[str, Any]] kms_encryption_context: An KMS encryption context used to decrypt `kms_encrypted_password` before creating or updating a db account with `kms_encrypted_password`. See [Encryption Context](https://www.alibabacloud.com/help/doc-detail/42975.htm). It is valid when `kms_encrypted_password` is set.
:param pulumi.Input[bool] override: Indicates whether to overwrite the existing data. Default to false.
:param pulumi.Input[str] password: The password of the ECS instance. The password must be 8 to 30 characters in length. It must contains at least three of the following character types: uppercase letters, lowercase letters, digits, and special characters. Special characters include `() ~!@#$%^&*-_+=\|{}[]:;'<>,.?/`, The password of Windows-based instances cannot start with a forward slash (/).
:param pulumi.Input[bool] password_inherit: Specifies whether to use the password that is predefined in the image. If the PasswordInherit parameter is set to true, the `password` and `kms_encrypted_password` will be ignored. You must ensure that the selected image has a password configured.
:param pulumi.Input[str] resource_group_id: ID of resource group.
:param pulumi.Input[str] role_name: Instance RAM role name. The name is provided and maintained by RAM. You can use `ram.Role` to create a new one.
:param pulumi.Input[str] scaling_configuration_name: Name shown for the scheduled task. which must contain 2-64 characters (English or Chinese), starting with numbers, English letters or Chinese characters, and can contain number, underscores `_`, hypens `-`, and decimal point `.`. If this parameter value is not specified, the default value is ScalingConfigurationId.
:param pulumi.Input[str] security_group_id: ID of the security group used to create new instance. It is conflict with `security_group_ids`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_group_ids: List IDs of the security group used to create new instances. It is conflict with `security_group_id`.
:param pulumi.Input[str] substitute: The another scaling configuration which will be active automatically and replace current configuration when setting `active` to 'false'. It is invalid when `active` is 'true'.
:param pulumi.Input[str] system_disk_auto_snapshot_policy_id: The id of auto snapshot policy for system disk.
:param pulumi.Input[str] system_disk_category: Category of the system disk. The parameter value options are `ephemeral_ssd`, `cloud_efficiency`, `cloud_ssd`, `cloud_essd` and `cloud`. `cloud` only is used to some no I/O optimized instance. Default to `cloud_efficiency`.
:param pulumi.Input[str] system_disk_description: The description of the system disk. The description must be 2 to 256 characters in length and cannot start with http:// or https://.
:param pulumi.Input[str] system_disk_name: The name of the system disk. It must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). Default value: null.
:param pulumi.Input[str] system_disk_performance_level: The performance level of the ESSD used as the system disk.
:param pulumi.Input[int] system_disk_size: Size of system disk, in GiB. Optional values: cloud: 20-500, cloud_efficiency: 20-500, cloud_ssd: 20-500, ephemeral_ssd: 20-500 The default value is max{40, ImageSize}. If this parameter is set, the system disk size must be greater than or equal to max{40, ImageSize}.
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource. It will be applied for ECS instances finally.
- Key: It can be up to 64 characters in length. It cannot begin with "aliyun", "http://", or "https://". It cannot be a null string.
- Value: It can be up to 128 characters in length. It cannot begin with "aliyun", "http://", or "https://" It can be a null string.
:param pulumi.Input[str] user_data: User-defined data to customize the startup behaviors of the ECS instance and to pass data into the ECS instance.
"""
pulumi.set(__self__, "scaling_group_id", scaling_group_id)
if active is not None:
pulumi.set(__self__, "active", active)
if credit_specification is not None:
pulumi.set(__self__, "credit_specification", credit_specification)
if data_disks is not None:
pulumi.set(__self__, "data_disks", data_disks)
if enable is not None:
pulumi.set(__self__, "enable", enable)
if force_delete is not None:
pulumi.set(__self__, "force_delete", force_delete)
if image_id is not None:
pulumi.set(__self__, "image_id", image_id)
if image_name is not None:
pulumi.set(__self__, "image_name", image_name)
if instance_ids is not None:
warnings.warn("""Field 'instance_ids' has been deprecated from provider version 1.6.0. New resource 'alicloud_ess_attachment' replaces it.""", DeprecationWarning)
pulumi.log.warn("""instance_ids is deprecated: Field 'instance_ids' has been deprecated from provider version 1.6.0. New resource 'alicloud_ess_attachment' replaces it.""")
if instance_ids is not None:
pulumi.set(__self__, "instance_ids", instance_ids)
if instance_name is not None:
pulumi.set(__self__, "instance_name", instance_name)
if instance_type is not None:
pulumi.set(__self__, "instance_type", instance_type)
if instance_types is not None:
pulumi.set(__self__, "instance_types", instance_types)
if internet_charge_type is not None:
pulumi.set(__self__, "internet_charge_type", internet_charge_type)
if internet_max_bandwidth_in is not None:
pulumi.set(__self__, "internet_max_bandwidth_in", internet_max_bandwidth_in)
if internet_max_bandwidth_out is not None:
pulumi.set(__self__, "internet_max_bandwidth_out", internet_max_bandwidth_out)
if io_optimized is not None:
warnings.warn("""Attribute io_optimized has been deprecated on instance resource. All the launched alicloud instances will be IO optimized. Suggest to remove it from your template.""", DeprecationWarning)
pulumi.log.warn("""io_optimized is deprecated: Attribute io_optimized has been deprecated on instance resource. All the launched alicloud instances will be IO optimized. Suggest to remove it from your template.""")
if io_optimized is not None:
pulumi.set(__self__, "io_optimized", io_optimized)
if is_outdated is not None:
pulumi.set(__self__, "is_outdated", is_outdated)
if key_name is not None:
pulumi.set(__self__, "key_name", key_name)
if kms_encrypted_password is not None:
pulumi.set(__self__, "kms_encrypted_password", kms_encrypted_password)
if kms_encryption_context is not None:
pulumi.set(__self__, "kms_encryption_context", kms_encryption_context)
if override is not None:
pulumi.set(__self__, "override", override)
if password is not None:
pulumi.set(__self__, "password", password)
if password_inherit is not None:
pulumi.set(__self__, "password_inherit", password_inherit)
if resource_group_id is not None:
pulumi.set(__self__, "resource_group_id", resource_group_id)
if role_name is not None:
pulumi.set(__self__, "role_name", role_name)
if scaling_configuration_name is not None:
pulumi.set(__self__, "scaling_configuration_name", scaling_configuration_name)
if security_group_id is not None:
pulumi.set(__self__, "security_group_id", security_group_id)
if security_group_ids is not None:
pulumi.set(__self__, "security_group_ids", security_group_ids)
if substitute is not None:
pulumi.set(__self__, "substitute", substitute)
if system_disk_auto_snapshot_policy_id is not None:
pulumi.set(__self__, "system_disk_auto_snapshot_policy_id", system_disk_auto_snapshot_policy_id)
if system_disk_category is not None:
pulumi.set(__self__, "system_disk_category", system_disk_category)
if system_disk_description is not None:
pulumi.set(__self__, "system_disk_description", system_disk_description)
if system_disk_name is not None:
pulumi.set(__self__, "system_disk_name", system_disk_name)
if system_disk_performance_level is not None:
pulumi.set(__self__, "system_disk_performance_level", system_disk_performance_level)
if system_disk_size is not None:
pulumi.set(__self__, "system_disk_size", system_disk_size)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if user_data is not None:
pulumi.set(__self__, "user_data", user_data)
@property
@pulumi.getter(name="scalingGroupId")
def scaling_group_id(self) -> pulumi.Input[str]:
"""
ID of the scaling group of a scaling configuration.
"""
return pulumi.get(self, "scaling_group_id")
@scaling_group_id.setter
def scaling_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "scaling_group_id", value)
@property
@pulumi.getter
def active(self) -> Optional[pulumi.Input[bool]]:
"""
Whether active current scaling configuration in the specified scaling group. Default to `false`.
"""
return pulumi.get(self, "active")
@active.setter
def active(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "active", value)
@property
@pulumi.getter(name="creditSpecification")
def credit_specification(self) -> Optional[pulumi.Input[str]]:
"""
Performance mode of the t5 burstable instance. Valid values: 'Standard', 'Unlimited'.
"""
return pulumi.get(self, "credit_specification")
@credit_specification.setter
def credit_specification(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "credit_specification", value)
@property
@pulumi.getter(name="dataDisks")
def data_disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScalingConfigurationDataDiskArgs']]]]:
"""
DataDisk mappings to attach to ecs instance. See Block datadisk below for details.
"""
return pulumi.get(self, "data_disks")
@data_disks.setter
def data_disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScalingConfigurationDataDiskArgs']]]]):
pulumi.set(self, "data_disks", value)
@property
@pulumi.getter
def enable(self) -> Optional[pulumi.Input[bool]]:
"""
Whether enable the specified scaling group(make it active) to which the current scaling configuration belongs.
"""
return pulumi.get(self, "enable")
@enable.setter
def enable(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable", value)
@property
@pulumi.getter(name="forceDelete")
def force_delete(self) -> Optional[pulumi.Input[bool]]:
"""
The last scaling configuration will be deleted forcibly with deleting its scaling group. Default to false.
"""
return pulumi.get(self, "force_delete")
@force_delete.setter
def force_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_delete", value)
@property
@pulumi.getter(name="imageId")
def image_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of an image file, indicating the image resource selected when an instance is enabled.
"""
return pulumi.get(self, "image_id")
@image_id.setter
def image_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_id", value)
@property
@pulumi.getter(name="imageName")
def image_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of an image file, indicating the image resource selected when an instance is enabled.
"""
return pulumi.get(self, "image_name")
@image_name.setter
def image_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_name", value)
@property
@pulumi.getter(name="instanceIds")
def instance_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
It has been deprecated from version 1.6.0. New resource `ess.Attachment` replaces it.
"""
return pulumi.get(self, "instance_ids")
@instance_ids.setter
def instance_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "instance_ids", value)
@property
@pulumi.getter(name="instanceName")
def instance_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of an ECS instance. Default to "ESS-Instance". It is valid from version 1.7.1.
"""
return pulumi.get(self, "instance_name")
@instance_name.setter
def instance_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_name", value)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> Optional[pulumi.Input[str]]:
"""
Resource type of an ECS instance.
"""
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="instanceTypes")
def instance_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Resource types of an ECS instance.
"""
return pulumi.get(self, "instance_types")
@instance_types.setter
def instance_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "instance_types", value)
@property
@pulumi.getter(name="internetChargeType")
def internet_charge_type(self) -> Optional[pulumi.Input[str]]:
"""
Network billing type, Values: PayByBandwidth or PayByTraffic. Default to `PayByBandwidth`.
"""
return pulumi.get(self, "internet_charge_type")
@internet_charge_type.setter
def internet_charge_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "internet_charge_type", value)
@property
@pulumi.getter(name="internetMaxBandwidthIn")
def internet_max_bandwidth_in(self) -> Optional[pulumi.Input[int]]:
"""
Maximum incoming bandwidth from the public network, measured in Mbps (Mega bit per second). The value range is [1,200].
"""
return pulumi.get(self, "internet_max_bandwidth_in")
@internet_max_bandwidth_in.setter
def internet_max_bandwidth_in(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "internet_max_bandwidth_in", value)
@property
@pulumi.getter(name="internetMaxBandwidthOut")
def internet_max_bandwidth_out(self) -> Optional[pulumi.Input[int]]:
"""
Maximum outgoing bandwidth from the public network, measured in Mbps (Mega bit per second). The value range for PayByBandwidth is [0,100].
"""
return pulumi.get(self, "internet_max_bandwidth_out")
@internet_max_bandwidth_out.setter
def internet_max_bandwidth_out(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "internet_max_bandwidth_out", value)
@property
@pulumi.getter(name="ioOptimized")
def io_optimized(self) -> Optional[pulumi.Input[str]]:
"""
It has been deprecated on instance resource. All the launched alicloud instances will be I/O optimized.
"""
return pulumi.get(self, "io_optimized")
@io_optimized.setter
def io_optimized(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "io_optimized", value)
@property
@pulumi.getter(name="isOutdated")
def is_outdated(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to use outdated instance type. Default to false.
"""
return pulumi.get(self, "is_outdated")
@is_outdated.setter
def is_outdated(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_outdated", value)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of key pair that can login ECS instance successfully without password. If it is specified, the password would be invalid.
"""
return pulumi.get(self, "key_name")
@key_name.setter
def key_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_name", value)
@property
@pulumi.getter(name="kmsEncryptedPassword")
def kms_encrypted_password(self) -> Optional[pulumi.Input[str]]:
"""
An KMS encrypts password used to a db account. If the `password` is filled in, this field will be ignored.
"""
return pulumi.get(self, "kms_encrypted_password")
@kms_encrypted_password.setter
def kms_encrypted_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_encrypted_password", value)
@property
@pulumi.getter(name="kmsEncryptionContext")
def kms_encryption_context(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
An KMS encryption context used to decrypt `kms_encrypted_password` before creating or updating a db account with `kms_encrypted_password`. See [Encryption Context](https://www.alibabacloud.com/help/doc-detail/42975.htm). It is valid when `kms_encrypted_password` is set.
"""
return pulumi.get(self, "kms_encryption_context")
@kms_encryption_context.setter
def kms_encryption_context(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "kms_encryption_context", value)
@property
@pulumi.getter
def override(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether to overwrite the existing data. Default to false.
"""
return pulumi.get(self, "override")
@override.setter
def override(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "override", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The password of the ECS instance. The password must be 8 to 30 characters in length. It must contains at least three of the following character types: uppercase letters, lowercase letters, digits, and special characters. Special characters include `() ~!@#$%^&*-_+=\|{}[]:;'<>,.?/`, The password of Windows-based instances cannot start with a forward slash (/).
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="passwordInherit")
def password_inherit(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether to use the password that is predefined in the image. If the PasswordInherit parameter is set to true, the `password` and `kms_<PASSWORD>_password` will be ignored. You must ensure that the selected image has a password configured.
"""
return pulumi.get(self, "password_inherit")
@password_inherit.setter
def password_inherit(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "password_inherit", value)
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of resource group.
"""
return pulumi.get(self, "resource_group_id")
@resource_group_id.setter
def resource_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_id", value)
@property
@pulumi.getter(name="roleName")
def role_name(self) -> Optional[pulumi.Input[str]]:
"""
Instance RAM role name. The name is provided and maintained by RAM. You can use `ram.Role` to create a new one.
"""
return pulumi.get(self, "role_name")
@role_name.setter
def role_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_name", value)
@property
@pulumi.getter(name="scalingConfigurationName")
def scaling_configuration_name(self) -> Optional[pulumi.Input[str]]:
"""
Name shown for the scheduled task. which must contain 2-64 characters (English or Chinese), starting with numbers, English letters or Chinese characters, and can contain number, underscores `_`, hypens `-`, and decimal point `.`. If this parameter value is not specified, the default value is ScalingConfigurationId.
"""
return pulumi.get(self, "scaling_configuration_name")
@scaling_configuration_name.setter
def scaling_configuration_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scaling_configuration_name", value)
@property
@pulumi.getter(name="securityGroupId")
def security_group_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the security group used to create new instance. It is conflict with `security_group_ids`.
"""
return pulumi.get(self, "security_group_id")
@security_group_id.setter
def security_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_group_id", value)
@property
@pulumi.getter(name="securityGroupIds")
def security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List IDs of the security group used to create new instances. It is conflict with `security_group_id`.
"""
return pulumi.get(self, "security_group_ids")
@security_group_ids.setter
def security_group_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "security_group_ids", value)
@property
@pulumi.getter
def substitute(self) -> Optional[pulumi.Input[str]]:
"""
The another scaling configuration which will be active automatically and replace current configuration when setting `active` to 'false'. It is invalid when `active` is 'true'.
"""
return pulumi.get(self, "substitute")
@substitute.setter
def substitute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "substitute", value)
@property
@pulumi.getter(name="systemDiskAutoSnapshotPolicyId")
def system_disk_auto_snapshot_policy_id(self) -> Optional[pulumi.Input[str]]:
"""
The id of auto snapshot policy for system disk.
"""
return pulumi.get(self, "system_disk_auto_snapshot_policy_id")
@system_disk_auto_snapshot_policy_id.setter
def system_disk_auto_snapshot_policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "system_disk_auto_snapshot_policy_id", value)
@property
@pulumi.getter(name="systemDiskCategory")
def system_disk_category(self) -> Optional[pulumi.Input[str]]:
"""
Category of the system disk. The parameter value options are `ephemeral_ssd`, `cloud_efficiency`, `cloud_ssd`, `cloud_essd` and `cloud`. `cloud` only is used to some no I/O optimized instance. Default to `cloud_efficiency`.
"""
return pulumi.get(self, "system_disk_category")
@system_disk_category.setter
def system_disk_category(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "system_disk_category", value)
@property
@pulumi.getter(name="systemDiskDescription")
def system_disk_description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the system disk. The description must be 2 to 256 characters in length and cannot start with http:// or https://.
"""
return pulumi.get(self, "system_disk_description")
@system_disk_description.setter
def system_disk_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "system_disk_description", value)
@property
@pulumi.getter(name="systemDiskName")
def system_disk_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the system disk. It must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). Default value: null.
"""
return pulumi.get(self, "system_disk_name")
@system_disk_name.setter
def system_disk_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "system_disk_name", value)
@property
@pulumi.getter(name="systemDiskPerformanceLevel")
def system_disk_performance_level(self) -> Optional[pulumi.Input[str]]:
"""
The performance level of the ESSD used as the system disk.
"""
return pulumi.get(self, "system_disk_performance_level")
@system_disk_performance_level.setter
def system_disk_performance_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "system_disk_performance_level", value)
@property
@pulumi.getter(name="systemDiskSize")
def system_disk_size(self) -> Optional[pulumi.Input[int]]:
"""
Size of system disk, in GiB. Optional values: cloud: 20-500, cloud_efficiency: 20-500, cloud_ssd: 20-500, ephemeral_ssd: 20-500 The default value is max{40, ImageSize}. If this parameter is set, the system disk size must be greater than or equal to max{40, ImageSize}.
"""
return pulumi.get(self, "system_disk_size")
@system_disk_size.setter
def system_disk_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "system_disk_size", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A mapping of tags to assign to the resource. It will be applied for ECS instances finally.
- Key: It can be up to 64 characters in length. It cannot begin with "aliyun", "http://", or "https://". It cannot be a null string.
- Value: It can be up to 128 characters in length. It cannot begin with "aliyun", "http://", or "https://" It can be a null string.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="userData")
def user_data(self) -> Optional[pulumi.Input[str]]:
"""
User-defined data to customize the startup behaviors of the ECS instance and to pass data into the ECS instance.
"""
return pulumi.get(self, "user_data")
@user_data.setter
def user_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_data", value)
@pulumi.input_type
class _ScalingConfigurationState:
def __init__(__self__, *,
active: Optional[pulumi.Input[bool]] = None,
credit_specification: Optional[pulumi.Input[str]] = None,
data_disks: Optional[pulumi.Input[Sequence[pulumi.Input['ScalingConfigurationDataDiskArgs']]]] = None,
enable: Optional[pulumi.Input[bool]] = None,
force_delete: Optional[pulumi.Input[bool]] = None,
image_id: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None,
instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
instance_name: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
internet_charge_type: Optional[pulumi.Input[str]] = None,
internet_max_bandwidth_in: Optional[pulumi.Input[int]] = None,
internet_max_bandwidth_out: Optional[pulumi.Input[int]] = None,
io_optimized: Optional[pulumi.Input[str]] = None,
is_outdated: Optional[pulumi.Input[bool]] = None,
key_name: Optional[pulumi.Input[str]] = None,
kms_encrypted_password: Optional[pulumi.Input[str]] = None,
kms_encryption_context: Optional[pulumi.Input[Mapping[str, Any]]] = None,
override: Optional[pulumi.Input[bool]] = None,
password: Optional[pulumi.Input[str]] = None,
password_inherit: Optional[pulumi.Input[bool]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
role_name: Optional[pulumi.Input[str]] = None,
scaling_configuration_name: Optional[pulumi.Input[str]] = None,
scaling_group_id: Optional[pulumi.Input[str]] = None,
security_group_id: Optional[pulumi.Input[str]] = None,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
substitute: Optional[pulumi.Input[str]] = None,
system_disk_auto_snapshot_policy_id: Optional[pulumi.Input[str]] = None,
system_disk_category: Optional[pulumi.Input[str]] = None,
system_disk_description: Optional[pulumi.Input[str]] = None,
system_disk_name: Optional[pulumi.Input[str]] = None,
system_disk_performance_level: Optional[pulumi.Input[str]] = None,
system_disk_size: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
user_data: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ScalingConfiguration resources.
:param pulumi.Input[bool] active: Whether active current scaling configuration in the specified scaling group. Default to `false`.
:param pulumi.Input[str] credit_specification: Performance mode of the t5 burstable instance. Valid values: 'Standard', 'Unlimited'.
:param pulumi.Input[Sequence[pulumi.Input['ScalingConfigurationDataDiskArgs']]] data_disks: DataDisk mappings to attach to ecs instance. See Block datadisk below for details.
:param pulumi.Input[bool] enable: Whether enable the specified scaling group(make it active) to which the current scaling configuration belongs.
:param pulumi.Input[bool] force_delete: The last scaling configuration will be deleted forcibly with deleting its scaling group. Default to false.
:param pulumi.Input[str] image_id: ID of an image file, indicating the image resource selected when an instance is enabled.
:param pulumi.Input[str] image_name: Name of an image file, indicating the image resource selected when an instance is enabled.
:param pulumi.Input[Sequence[pulumi.Input[str]]] instance_ids: It has been deprecated from version 1.6.0. New resource `ess.Attachment` replaces it.
:param pulumi.Input[str] instance_name: Name of an ECS instance. Default to "ESS-Instance". It is valid from version 1.7.1.
:param pulumi.Input[str] instance_type: Resource type of an ECS instance.
:param pulumi.Input[Sequence[pulumi.Input[str]]] instance_types: Resource types of an ECS instance.
:param pulumi.Input[str] internet_charge_type: Network billing type, Values: PayByBandwidth or PayByTraffic. Default to `PayByBandwidth`.
:param pulumi.Input[int] internet_max_bandwidth_in: Maximum incoming bandwidth from the public network, measured in Mbps (Mega bit per second). The value range is [1,200].
:param pulumi.Input[int] internet_max_bandwidth_out: Maximum outgoing bandwidth from the public network, measured in Mbps (Mega bit per second). The value range for PayByBandwidth is [0,100].
:param pulumi.Input[str] io_optimized: It has been deprecated on instance resource. All the launched alicloud instances will be I/O optimized.
:param pulumi.Input[bool] is_outdated: Whether to use outdated instance type. Default to false.
:param pulumi.Input[str] key_name: The name of key pair that can login ECS instance successfully without password. If it is specified, the password would be invalid.
:param pulumi.Input[str] kms_encrypted_password: An KMS encrypts password used to a db account. If the `password` is filled in, this field will be ignored.
:param pulumi.Input[Mapping[str, Any]] kms_encryption_context: An KMS encryption context used to decrypt `kms_encrypted_password` before creating or updating a db account with `kms_encrypted_password`. See [Encryption Context](https://www.alibabacloud.com/help/doc-detail/42975.htm). It is valid when `kms_encrypted_password` is set.
:param pulumi.Input[bool] override: Indicates whether to overwrite the existing data. Default to false.
:param pulumi.Input[str] password: The password of the ECS instance. The password must be 8 to 30 characters in length. It must contains at least three of the following character types: uppercase letters, lowercase letters, digits, and special characters. Special characters include `() ~!@#$%^&*-_+=\|{}[]:;'<>,.?/`, The password of Windows-based instances cannot start with a forward slash (/).
:param pulumi.Input[bool] password_inherit: Specifies whether to use the password that is predefined in the image. If the PasswordInherit parameter is set to true, the `password` and `kms_encrypted_password` will be ignored. You must ensure that the selected image has a password configured.
:param pulumi.Input[str] resource_group_id: ID of resource group.
:param pulumi.Input[str] role_name: Instance RAM role name. The name is provided and maintained by RAM. You can use `ram.Role` to create a new one.
:param pulumi.Input[str] scaling_configuration_name: Name shown for the scheduled task. which must contain 2-64 characters (English or Chinese), starting with numbers, English letters or Chinese characters, and can contain number, underscores `_`, hypens `-`, and decimal point `.`. If this parameter value is not specified, the default value is ScalingConfigurationId.
:param pulumi.Input[str] scaling_group_id: ID of the scaling group of a scaling configuration.
:param pulumi.Input[str] security_group_id: ID of the security group used to create new instance. It is conflict with `security_group_ids`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_group_ids: List IDs of the security group used to create new instances. It is conflict with `security_group_id`.
:param pulumi.Input[str] substitute: The another scaling configuration which will be active automatically and replace current configuration when setting `active` to 'false'. It is invalid when `active` is 'true'.
:param pulumi.Input[str] system_disk_auto_snapshot_policy_id: The id of auto snapshot policy for system disk.
:param pulumi.Input[str] system_disk_category: Category of the system disk. The parameter value options are `ephemeral_ssd`, `cloud_efficiency`, `cloud_ssd`, `cloud_essd` and `cloud`. `cloud` only is used to some no I/O optimized instance. Default to `cloud_efficiency`.
:param pulumi.Input[str] system_disk_description: The description of the system disk. The description must be 2 to 256 characters in length and cannot start with http:// or https://.
:param pulumi.Input[str] system_disk_name: The name of the system disk. It must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). Default value: null.
:param pulumi.Input[str] system_disk_performance_level: The performance level of the ESSD used as the system disk.
:param pulumi.Input[int] system_disk_size: Size of system disk, in GiB. Optional values: cloud: 20-500, cloud_efficiency: 20-500, cloud_ssd: 20-500, ephemeral_ssd: 20-500 The default value is max{40, ImageSize}. If this parameter is set, the system disk size must be greater than or equal to max{40, ImageSize}.
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource. It will be applied for ECS instances finally.
- Key: It can be up to 64 characters in length. It cannot begin with "aliyun", "http://", or "https://". It cannot be a null string.
- Value: It can be up to 128 characters in length. It cannot begin with "aliyun", "http://", or "https://" It can be a null string.
:param pulumi.Input[str] user_data: User-defined data to customize the startup behaviors of the ECS instance and to pass data into the ECS instance.
"""
if active is not None:
pulumi.set(__self__, "active", active)
if credit_specification is not None:
pulumi.set(__self__, "credit_specification", credit_specification)
if data_disks is not None:
pulumi.set(__self__, "data_disks", data_disks)
if enable is not None:
pulumi.set(__self__, "enable", enable)
if force_delete is not None:
pulumi.set(__self__, "force_delete", force_delete)
if image_id is not None:
pulumi.set(__self__, "image_id", image_id)
if image_name is not None:
pulumi.set(__self__, "image_name", image_name)
if instance_ids is not None:
warnings.warn("""Field 'instance_ids' has been deprecated from provider version 1.6.0. New resource 'alicloud_ess_attachment' replaces it.""", DeprecationWarning)
pulumi.log.warn("""instance_ids is deprecated: Field 'instance_ids' has been deprecated from provider version 1.6.0. New resource 'alicloud_ess_attachment' replaces it.""")
if instance_ids is not None:
pulumi.set(__self__, "instance_ids", instance_ids)
if instance_name is not None:
pulumi.set(__self__, "instance_name", instance_name)
if instance_type is not None:
pulumi.set(__self__, "instance_type", instance_type)
if instance_types is not None:
pulumi.set(__self__, "instance_types", instance_types)
if internet_charge_type is not None:
pulumi.set(__self__, "internet_charge_type", internet_charge_type)
if internet_max_bandwidth_in is not None:
pulumi.set(__self__, "internet_max_bandwidth_in", internet_max_bandwidth_in)
if internet_max_bandwidth_out is not None:
pulumi.set(__self__, "internet_max_bandwidth_out", internet_max_bandwidth_out)
if io_optimized is not None:
warnings.warn("""Attribute io_optimized has been deprecated on instance resource. All the launched alicloud instances will be IO optimized. Suggest to remove it from your template.""", DeprecationWarning)
pulumi.log.warn("""io_optimized is deprecated: Attribute io_optimized has been deprecated on instance resource. All the launched alicloud instances will be IO optimized. Suggest to remove it from your template.""")
if io_optimized is not None:
pulumi.set(__self__, "io_optimized", io_optimized)
if is_outdated is not None:
pulumi.set(__self__, "is_outdated", is_outdated)
if key_name is not None:
pulumi.set(__self__, "key_name", key_name)
if kms_encrypted_password is not None:
pulumi.set(__self__, "kms_encrypted_password", kms_encrypted_password)
if kms_encryption_context is not None:
pulumi.set(__self__, "kms_encryption_context", kms_encryption_context)
if override is not None:
pulumi.set(__self__, "override", override)
if password is not None:
pulumi.set(__self__, "password", password)
if password_inherit is not None:
pulumi.set(__self__, "password_inherit", password_inherit)
if resource_group_id is not None:
pulumi.set(__self__, "resource_group_id", resource_group_id)
if role_name is not None:
pulumi.set(__self__, "role_name", role_name)
if scaling_configuration_name is not None:
pulumi.set(__self__, "scaling_configuration_name", scaling_configuration_name)
if scaling_group_id is not None:
pulumi.set(__self__, "scaling_group_id", scaling_group_id)
if security_group_id is not None:
pulumi.set(__self__, "security_group_id", security_group_id)
if security_group_ids is not None:
pulumi.set(__self__, "security_group_ids", security_group_ids)
if substitute is not None:
pulumi.set(__self__, "substitute", substitute)
if system_disk_auto_snapshot_policy_id is not None:
pulumi.set(__self__, "system_disk_auto_snapshot_policy_id", system_disk_auto_snapshot_policy_id)
if system_disk_category is not None:
pulumi.set(__self__, "system_disk_category", system_disk_category)
if system_disk_description is not None:
pulumi.set(__self__, "system_disk_description", system_disk_description)
if system_disk_name is not None:
pulumi.set(__self__, "system_disk_name", system_disk_name)
if system_disk_performance_level is not None:
pulumi.set(__self__, "system_disk_performance_level", system_disk_performance_level)
if system_disk_size is not None:
pulumi.set(__self__, "system_disk_size", system_disk_size)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if user_data is not None:
pulumi.set(__self__, "user_data", user_data)
@property
@pulumi.getter
def active(self) -> Optional[pulumi.Input[bool]]:
"""
Whether active current scaling configuration in the specified scaling group. Default to `false`.
"""
return pulumi.get(self, "active")
@active.setter
def active(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "active", value)
@property
@pulumi.getter(name="creditSpecification")
def credit_specification(self) -> Optional[pulumi.Input[str]]:
"""
Performance mode of the t5 burstable instance. Valid values: 'Standard', 'Unlimited'.
"""
return pulumi.get(self, "credit_specification")
@credit_specification.setter
def credit_specification(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "credit_specification", value)
@property
@pulumi.getter(name="dataDisks")
def data_disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScalingConfigurationDataDiskArgs']]]]:
"""
DataDisk mappings to attach to ecs instance. See Block datadisk below for details.
"""
return pulumi.get(self, "data_disks")
@data_disks.setter
def data_disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScalingConfigurationDataDiskArgs']]]]):
pulumi.set(self, "data_disks", value)
@property
@pulumi.getter
def enable(self) -> Optional[pulumi.Input[bool]]:
"""
Whether enable the specified scaling group(make it active) to which the current scaling configuration belongs.
"""
return pulumi.get(self, "enable")
@enable.setter
def enable(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable", value)
@property
@pulumi.getter(name="forceDelete")
def force_delete(self) -> Optional[pulumi.Input[bool]]:
"""
The last scaling configuration will be deleted forcibly with deleting its scaling group. Default to false.
"""
return pulumi.get(self, "force_delete")
@force_delete.setter
def force_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_delete", value)
@property
@pulumi.getter(name="imageId")
def image_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of an image file, indicating the image resource selected when an instance is enabled.
"""
return pulumi.get(self, "image_id")
@image_id.setter
def image_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_id", value)
@property
@pulumi.getter(name="imageName")
def image_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of an image file, indicating the image resource selected when an instance is enabled.
"""
return pulumi.get(self, "image_name")
@image_name.setter
def image_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_name", value)
@property
@pulumi.getter(name="instanceIds")
def instance_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
It has been deprecated from version 1.6.0. New resource `ess.Attachment` replaces it.
"""
return pulumi.get(self, "instance_ids")
@instance_ids.setter
def instance_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "instance_ids", value)
@property
@pulumi.getter(name="instanceName")
def instance_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of an ECS instance. Default to "ESS-Instance". It is valid from version 1.7.1.
"""
return pulumi.get(self, "instance_name")
@instance_name.setter
def instance_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_name", value)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> Optional[pulumi.Input[str]]:
"""
Resource type of an ECS instance.
"""
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="instanceTypes")
def instance_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Resource types of an ECS instance.
"""
return pulumi.get(self, "instance_types")
@instance_types.setter
def instance_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "instance_types", value)
@property
@pulumi.getter(name="internetChargeType")
def internet_charge_type(self) -> Optional[pulumi.Input[str]]:
"""
Network billing type, Values: PayByBandwidth or PayByTraffic. Default to `PayByBandwidth`.
"""
return pulumi.get(self, "internet_charge_type")
@internet_charge_type.setter
def internet_charge_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "internet_charge_type", value)
@property
@pulumi.getter(name="internetMaxBandwidthIn")
def internet_max_bandwidth_in(self) -> Optional[pulumi.Input[int]]:
"""
Maximum incoming bandwidth from the public network, measured in Mbps (Mega bit per second). The value range is [1,200].
"""
return pulumi.get(self, "internet_max_bandwidth_in")
@internet_max_bandwidth_in.setter
def internet_max_bandwidth_in(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "internet_max_bandwidth_in", value)
@property
@pulumi.getter(name="internetMaxBandwidthOut")
def internet_max_bandwidth_out(self) -> Optional[pulumi.Input[int]]:
"""
Maximum outgoing bandwidth from the public network, measured in Mbps (Mega bit per second). The value range for PayByBandwidth is [0,100].
"""
return pulumi.get(self, "internet_max_bandwidth_out")
@internet_max_bandwidth_out.setter
def internet_max_bandwidth_out(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "internet_max_bandwidth_out", value)
@property
@pulumi.getter(name="ioOptimized")
def io_optimized(self) -> Optional[pulumi.Input[str]]:
"""
It has been deprecated on instance resource. All the launched alicloud instances will be I/O optimized.
"""
return pulumi.get(self, "io_optimized")
@io_optimized.setter
def io_optimized(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "io_optimized", value)
@property
@pulumi.getter(name="isOutdated")
def is_outdated(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to use outdated instance type. Default to false.
"""
return pulumi.get(self, "is_outdated")
@is_outdated.setter
def is_outdated(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_outdated", value)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of key pair that can login ECS instance successfully without password. If it is specified, the password would be invalid.
"""
return pulumi.get(self, "key_name")
@key_name.setter
def key_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_name", value)
@property
@pulumi.getter(name="kmsEncryptedPassword")
def kms_encrypted_password(self) -> Optional[pulumi.Input[str]]:
"""
An KMS encrypts password used to a db account. If the `password` is filled in, this field will be ignored.
"""
return pulumi.get(self, "kms_encrypted_password")
@kms_encrypted_password.setter
def kms_encrypted_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_encrypted_password", value)
@property
@pulumi.getter(name="kmsEncryptionContext")
def kms_encryption_context(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
An KMS encryption context used to decrypt `kms_encrypted_password` before creating or updating a db account with `kms_encrypted_password`. See [Encryption Context](https://www.alibabacloud.com/help/doc-detail/42975.htm). It is valid when `kms_encrypted_password` is set.
"""
return pulumi.get(self, "kms_encryption_context")
@kms_encryption_context.setter
def kms_encryption_context(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "kms_encryption_context", value)
@property
@pulumi.getter
def override(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether to overwrite the existing data. Default to false.
"""
return pulumi.get(self, "override")
@override.setter
def override(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "override", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The password of the ECS instance. The password must be 8 to 30 characters in length. It must contains at least three of the following character types: uppercase letters, lowercase letters, digits, and special characters. Special characters include `() ~!@#$%^&*-_+=\|{}[]:;'<>,.?/`, The password of Windows-based instances cannot start with a forward slash (/).
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="passwordInherit")
def password_inherit(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether to use the password that is predefined in the image. If the PasswordInherit parameter is set to true, the `password` and `kms_encrypted_password` will be ignored. You must ensure that the selected image has a password configured.
"""
return pulumi.get(self, "password_inherit")
@password_inherit.setter
def password_inherit(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "password_inherit", value)
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of resource group.
"""
return pulumi.get(self, "resource_group_id")
@resource_group_id.setter
def resource_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_id", value)
@property
@pulumi.getter(name="roleName")
def role_name(self) -> Optional[pulumi.Input[str]]:
"""
Instance RAM role name. The name is provided and maintained by RAM. You can use `ram.Role` to create a new one.
"""
return pulumi.get(self, "role_name")
@role_name.setter
def role_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_name", value)
@property
@pulumi.getter(name="scalingConfigurationName")
def scaling_configuration_name(self) -> Optional[pulumi.Input[str]]:
"""
Name shown for the scheduled task. which must contain 2-64 characters (English or Chinese), starting with numbers, English letters or Chinese characters, and can contain number, underscores `_`, hypens `-`, and decimal point `.`. If this parameter value is not specified, the default value is ScalingConfigurationId.
"""
return pulumi.get(self, "scaling_configuration_name")
@scaling_configuration_name.setter
def scaling_configuration_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scaling_configuration_name", value)
@property
@pulumi.getter(name="scalingGroupId")
def scaling_group_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the scaling group of a scaling configuration.
"""
return pulumi.get(self, "scaling_group_id")
@scaling_group_id.setter
def scaling_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scaling_group_id", value)
@property
@pulumi.getter(name="securityGroupId")
def security_group_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the security group used to create new instance. It is conflict with `security_group_ids`.
"""
return pulumi.get(self, "security_group_id")
@security_group_id.setter
def security_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_group_id", value)
@property
@pulumi.getter(name="securityGroupIds")
def security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List IDs of the security group used to create new instances. It is conflict with `security_group_id`.
"""
return pulumi.get(self, "security_group_ids")
@security_group_ids.setter
def security_group_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "security_group_ids", value)
@property
@pulumi.getter
def substitute(self) -> Optional[pulumi.Input[str]]:
"""
The another scaling configuration which will be active automatically and replace current configuration when setting `active` to 'false'. It is invalid when `active` is 'true'.
"""
return pulumi.get(self, "substitute")
@substitute.setter
def substitute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "substitute", value)
@property
@pulumi.getter(name="systemDiskAutoSnapshotPolicyId")
def system_disk_auto_snapshot_policy_id(self) -> Optional[pulumi.Input[str]]:
"""
The id of auto snapshot policy for system disk.
"""
return pulumi.get(self, "system_disk_auto_snapshot_policy_id")
@system_disk_auto_snapshot_policy_id.setter
def system_disk_auto_snapshot_policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "system_disk_auto_snapshot_policy_id", value)
@property
@pulumi.getter(name="systemDiskCategory")
def system_disk_category(self) -> Optional[pulumi.Input[str]]:
"""
Category of the system disk. The parameter value options are `ephemeral_ssd`, `cloud_efficiency`, `cloud_ssd`, `cloud_essd` and `cloud`. `cloud` only is used to some no I/O optimized instance. Default to `cloud_efficiency`.
"""
return pulumi.get(self, "system_disk_category")
@system_disk_category.setter
def system_disk_category(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "system_disk_category", value)
@property
@pulumi.getter(name="systemDiskDescription")
def system_disk_description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the system disk. The description must be 2 to 256 characters in length and cannot start with http:// or https://.
"""
return pulumi.get(self, "system_disk_description")
@system_disk_description.setter
def system_disk_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "system_disk_description", value)
@property
@pulumi.getter(name="systemDiskName")
def system_disk_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the system disk. It must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). Default value: null.
"""
return pulumi.get(self, "system_disk_name")
@system_disk_name.setter
def system_disk_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "system_disk_name", value)
@property
@pulumi.getter(name="systemDiskPerformanceLevel")
def system_disk_performance_level(self) -> Optional[pulumi.Input[str]]:
"""
The performance level of the ESSD used as the system disk.
"""
return pulumi.get(self, "system_disk_performance_level")
@system_disk_performance_level.setter
def system_disk_performance_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "system_disk_performance_level", value)
@property
@pulumi.getter(name="systemDiskSize")
def system_disk_size(self) -> Optional[pulumi.Input[int]]:
"""
Size of system disk, in GiB. Optional values: cloud: 20-500, cloud_efficiency: 20-500, cloud_ssd: 20-500, ephemeral_ssd: 20-500 The default value is max{40, ImageSize}. If this parameter is set, the system disk size must be greater than or equal to max{40, ImageSize}.
"""
return pulumi.get(self, "system_disk_size")
@system_disk_size.setter
def system_disk_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "system_disk_size", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A mapping of tags to assign to the resource. It will be applied for ECS instances finally.
- Key: It can be up to 64 characters in length. It cannot begin with "aliyun", "http://", or "https://". It cannot be a null string.
- Value: It can be up to 128 characters in length. It cannot begin with "aliyun", "http://", or "https://" It can be a null string.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="userData")
def user_data(self) -> Optional[pulumi.Input[str]]:
"""
User-defined data to customize the startup behaviors of the ECS instance and to pass data into the ECS instance.
"""
return pulumi.get(self, "user_data")
@user_data.setter
def user_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_data", value)
class ScalingConfiguration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
active: Optional[pulumi.Input[bool]] = None,
credit_specification: Optional[pulumi.Input[str]] = None,
data_disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingConfigurationDataDiskArgs']]]]] = None,
enable: Optional[pulumi.Input[bool]] = None,
force_delete: Optional[pulumi.Input[bool]] = None,
image_id: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None,
instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
instance_name: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
internet_charge_type: Optional[pulumi.Input[str]] = None,
internet_max_bandwidth_in: Optional[pulumi.Input[int]] = None,
internet_max_bandwidth_out: Optional[pulumi.Input[int]] = None,
io_optimized: Optional[pulumi.Input[str]] = None,
is_outdated: Optional[pulumi.Input[bool]] = None,
key_name: Optional[pulumi.Input[str]] = None,
kms_encrypted_password: Optional[pulumi.Input[str]] = None,
kms_encryption_context: Optional[pulumi.Input[Mapping[str, Any]]] = None,
override: Optional[pulumi.Input[bool]] = None,
password: Optional[pulumi.Input[str]] = None,
password_inherit: Optional[pulumi.Input[bool]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
role_name: Optional[pulumi.Input[str]] = None,
scaling_configuration_name: Optional[pulumi.Input[str]] = None,
scaling_group_id: Optional[pulumi.Input[str]] = None,
security_group_id: Optional[pulumi.Input[str]] = None,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
substitute: Optional[pulumi.Input[str]] = None,
system_disk_auto_snapshot_policy_id: Optional[pulumi.Input[str]] = None,
system_disk_category: Optional[pulumi.Input[str]] = None,
system_disk_description: Optional[pulumi.Input[str]] = None,
system_disk_name: Optional[pulumi.Input[str]] = None,
system_disk_performance_level: Optional[pulumi.Input[str]] = None,
system_disk_size: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
user_data: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Import
ESS scaling configuration can be imported using the id, e.g.
```sh
$ pulumi import alicloud:ess/scalingConfiguration:ScalingConfiguration example asg-abc123456
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] active: Whether active current scaling configuration in the specified scaling group. Default to `false`.
:param pulumi.Input[str] credit_specification: Performance mode of the t5 burstable instance. Valid values: 'Standard', 'Unlimited'.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingConfigurationDataDiskArgs']]]] data_disks: DataDisk mappings to attach to ecs instance. See Block datadisk below for details.
:param pulumi.Input[bool] enable: Whether enable the specified scaling group(make it active) to which the current scaling configuration belongs.
:param pulumi.Input[bool] force_delete: The last scaling configuration will be deleted forcibly with deleting its scaling group. Default to false.
:param pulumi.Input[str] image_id: ID of an image file, indicating the image resource selected when an instance is enabled.
:param pulumi.Input[str] image_name: Name of an image file, indicating the image resource selected when an instance is enabled.
:param pulumi.Input[Sequence[pulumi.Input[str]]] instance_ids: It has been deprecated from version 1.6.0. New resource `ess.Attachment` replaces it.
:param pulumi.Input[str] instance_name: Name of an ECS instance. Default to "ESS-Instance". It is valid from version 1.7.1.
:param pulumi.Input[str] instance_type: Resource type of an ECS instance.
:param pulumi.Input[Sequence[pulumi.Input[str]]] instance_types: Resource types of an ECS instance.
:param pulumi.Input[str] internet_charge_type: Network billing type, Values: PayByBandwidth or PayByTraffic. Default to `PayByBandwidth`.
:param pulumi.Input[int] internet_max_bandwidth_in: Maximum incoming bandwidth from the public network, measured in Mbps (Mega bit per second). The value range is [1,200].
:param pulumi.Input[int] internet_max_bandwidth_out: Maximum outgoing bandwidth from the public network, measured in Mbps (Mega bit per second). The value range for PayByBandwidth is [0,100].
:param pulumi.Input[str] io_optimized: It has been deprecated on instance resource. All the launched alicloud instances will be I/O optimized.
:param pulumi.Input[bool] is_outdated: Whether to use outdated instance type. Default to false.
:param pulumi.Input[str] key_name: The name of key pair that can login ECS instance successfully without password. If it is specified, the password would be invalid.
:param pulumi.Input[str] kms_encrypted_password: An KMS encrypts password used to a db account. If the `password` is filled in, this field will be ignored.
:param pulumi.Input[Mapping[str, Any]] kms_encryption_context: An KMS encryption context used to decrypt `kms_encrypted_password` before creating or updating a db account with `kms_encrypted_password`. See [Encryption Context](https://www.alibabacloud.com/help/doc-detail/42975.htm). It is valid when `kms_encrypted_password` is set.
:param pulumi.Input[bool] override: Indicates whether to overwrite the existing data. Default to false.
:param pulumi.Input[str] password: The password of the ECS instance. The password must be 8 to 30 characters in length. It must contains at least three of the following character types: uppercase letters, lowercase letters, digits, and special characters. Special characters include `() ~!@#$%^&*-_+=\|{}[]:;'<>,.?/`, The password of Windows-based instances cannot start with a forward slash (/).
:param pulumi.Input[bool] password_inherit: Specifies whether to use the password that is predefined in the image. If the PasswordInherit parameter is set to true, the `password` and `kms_encrypted_password` will be ignored. You must ensure that the selected image has a password configured.
:param pulumi.Input[str] resource_group_id: ID of resource group.
:param pulumi.Input[str] role_name: Instance RAM role name. The name is provided and maintained by RAM. You can use `ram.Role` to create a new one.
:param pulumi.Input[str] scaling_configuration_name: Name shown for the scheduled task. which must contain 2-64 characters (English or Chinese), starting with numbers, English letters or Chinese characters, and can contain number, underscores `_`, hypens `-`, and decimal point `.`. If this parameter value is not specified, the default value is ScalingConfigurationId.
:param pulumi.Input[str] scaling_group_id: ID of the scaling group of a scaling configuration.
:param pulumi.Input[str] security_group_id: ID of the security group used to create new instance. It is conflict with `security_group_ids`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_group_ids: List IDs of the security group used to create new instances. It is conflict with `security_group_id`.
:param pulumi.Input[str] substitute: The another scaling configuration which will be active automatically and replace current configuration when setting `active` to 'false'. It is invalid when `active` is 'true'.
:param pulumi.Input[str] system_disk_auto_snapshot_policy_id: The id of auto snapshot policy for system disk.
:param pulumi.Input[str] system_disk_category: Category of the system disk. The parameter value options are `ephemeral_ssd`, `cloud_efficiency`, `cloud_ssd`, `cloud_essd` and `cloud`. `cloud` only is used to some no I/O optimized instance. Default to `cloud_efficiency`.
:param pulumi.Input[str] system_disk_description: The description of the system disk. The description must be 2 to 256 characters in length and cannot start with http:// or https://.
:param pulumi.Input[str] system_disk_name: The name of the system disk. It must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). Default value: null.
:param pulumi.Input[str] system_disk_performance_level: The performance level of the ESSD used as the system disk.
:param pulumi.Input[int] system_disk_size: Size of system disk, in GiB. Optional values: cloud: 20-500, cloud_efficiency: 20-500, cloud_ssd: 20-500, ephemeral_ssd: 20-500 The default value is max{40, ImageSize}. If this parameter is set, the system disk size must be greater than or equal to max{40, ImageSize}.
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource. It will be applied for ECS instances finally.
- Key: It can be up to 64 characters in length. It cannot begin with "aliyun", "http://", or "https://". It cannot be a null string.
- Value: It can be up to 128 characters in length. It cannot begin with "aliyun", "http://", or "https://" It can be a null string.
:param pulumi.Input[str] user_data: User-defined data to customize the startup behaviors of the ECS instance and to pass data into the ECS instance.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ScalingConfigurationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
ESS scaling configuration can be imported using the id, e.g.
```sh
$ pulumi import alicloud:ess/scalingConfiguration:ScalingConfiguration example asg-abc123456
```
:param str resource_name: The name of the resource.
:param ScalingConfigurationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ScalingConfigurationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
active: Optional[pulumi.Input[bool]] = None,
credit_specification: Optional[pulumi.Input[str]] = None,
data_disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingConfigurationDataDiskArgs']]]]] = None,
enable: Optional[pulumi.Input[bool]] = None,
force_delete: Optional[pulumi.Input[bool]] = None,
image_id: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None,
instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
instance_name: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
internet_charge_type: Optional[pulumi.Input[str]] = None,
internet_max_bandwidth_in: Optional[pulumi.Input[int]] = None,
internet_max_bandwidth_out: Optional[pulumi.Input[int]] = None,
io_optimized: Optional[pulumi.Input[str]] = None,
is_outdated: Optional[pulumi.Input[bool]] = None,
key_name: Optional[pulumi.Input[str]] = None,
kms_encrypted_password: Optional[pulumi.Input[str]] = None,
kms_encryption_context: Optional[pulumi.Input[Mapping[str, Any]]] = None,
override: Optional[pulumi.Input[bool]] = None,
password: Optional[pulumi.Input[str]] = None,
password_inherit: Optional[pulumi.Input[bool]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
role_name: Optional[pulumi.Input[str]] = None,
scaling_configuration_name: Optional[pulumi.Input[str]] = None,
scaling_group_id: Optional[pulumi.Input[str]] = None,
security_group_id: Optional[pulumi.Input[str]] = None,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
substitute: Optional[pulumi.Input[str]] = None,
system_disk_auto_snapshot_policy_id: Optional[pulumi.Input[str]] = None,
system_disk_category: Optional[pulumi.Input[str]] = None,
system_disk_description: Optional[pulumi.Input[str]] = None,
system_disk_name: Optional[pulumi.Input[str]] = None,
system_disk_performance_level: Optional[pulumi.Input[str]] = None,
system_disk_size: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
user_data: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ScalingConfigurationArgs.__new__(ScalingConfigurationArgs)
__props__.__dict__["active"] = active
__props__.__dict__["credit_specification"] = credit_specification
__props__.__dict__["data_disks"] = data_disks
__props__.__dict__["enable"] = enable
__props__.__dict__["force_delete"] = force_delete
__props__.__dict__["image_id"] = image_id
__props__.__dict__["image_name"] = image_name
if instance_ids is not None and not opts.urn:
warnings.warn("""Field 'instance_ids' has been deprecated from provider version 1.6.0. New resource 'alicloud_ess_attachment' replaces it.""", DeprecationWarning)
pulumi.log.warn("""instance_ids is deprecated: Field 'instance_ids' has been deprecated from provider version 1.6.0. New resource 'alicloud_ess_attachment' replaces it.""")
__props__.__dict__["instance_ids"] = instance_ids
__props__.__dict__["instance_name"] = instance_name
__props__.__dict__["instance_type"] = instance_type
__props__.__dict__["instance_types"] = instance_types
__props__.__dict__["internet_charge_type"] = internet_charge_type
__props__.__dict__["internet_max_bandwidth_in"] = internet_max_bandwidth_in
__props__.__dict__["internet_max_bandwidth_out"] = internet_max_bandwidth_out
if io_optimized is not None and not opts.urn:
warnings.warn("""Attribute io_optimized has been deprecated on instance resource. All the launched alicloud instances will be IO optimized. Suggest to remove it from your template.""", DeprecationWarning)
pulumi.log.warn("""io_optimized is deprecated: Attribute io_optimized has been deprecated on instance resource. All the launched alicloud instances will be IO optimized. Suggest to remove it from your template.""")
__props__.__dict__["io_optimized"] = io_optimized
__props__.__dict__["is_outdated"] = is_outdated
__props__.__dict__["key_name"] = key_name
__props__.__dict__["kms_encrypted_password"] = kms_encrypted_password
__props__.__dict__["kms_encryption_context"] = kms_encryption_context
__props__.__dict__["override"] = override
__props__.__dict__["password"] = password
__props__.__dict__["password_inherit"] = password_inherit
__props__.__dict__["resource_group_id"] = resource_group_id
__props__.__dict__["role_name"] = role_name
__props__.__dict__["scaling_configuration_name"] = scaling_configuration_name
if scaling_group_id is None and not opts.urn:
raise TypeError("Missing required property 'scaling_group_id'")
__props__.__dict__["scaling_group_id"] = scaling_group_id
__props__.__dict__["security_group_id"] = security_group_id
__props__.__dict__["security_group_ids"] = security_group_ids
__props__.__dict__["substitute"] = substitute
__props__.__dict__["system_disk_auto_snapshot_policy_id"] = system_disk_auto_snapshot_policy_id
__props__.__dict__["system_disk_category"] = system_disk_category
__props__.__dict__["system_disk_description"] = system_disk_description
__props__.__dict__["system_disk_name"] = system_disk_name
__props__.__dict__["system_disk_performance_level"] = system_disk_performance_level
__props__.__dict__["system_disk_size"] = system_disk_size
__props__.__dict__["tags"] = tags
__props__.__dict__["user_data"] = user_data
super(ScalingConfiguration, __self__).__init__(
'alicloud:ess/scalingConfiguration:ScalingConfiguration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
active: Optional[pulumi.Input[bool]] = None,
credit_specification: Optional[pulumi.Input[str]] = None,
data_disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingConfigurationDataDiskArgs']]]]] = None,
enable: Optional[pulumi.Input[bool]] = None,
force_delete: Optional[pulumi.Input[bool]] = None,
image_id: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None,
instance_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
instance_name: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
internet_charge_type: Optional[pulumi.Input[str]] = None,
internet_max_bandwidth_in: Optional[pulumi.Input[int]] = None,
internet_max_bandwidth_out: Optional[pulumi.Input[int]] = None,
io_optimized: Optional[pulumi.Input[str]] = None,
is_outdated: Optional[pulumi.Input[bool]] = None,
key_name: Optional[pulumi.Input[str]] = None,
kms_encrypted_password: Optional[pulumi.Input[str]] = None,
kms_encryption_context: Optional[pulumi.Input[Mapping[str, Any]]] = None,
override: Optional[pulumi.Input[bool]] = None,
password: Optional[pulumi.Input[str]] = None,
password_inherit: Optional[pulumi.Input[bool]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
role_name: Optional[pulumi.Input[str]] = None,
scaling_configuration_name: Optional[pulumi.Input[str]] = None,
scaling_group_id: Optional[pulumi.Input[str]] = None,
security_group_id: Optional[pulumi.Input[str]] = None,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
substitute: Optional[pulumi.Input[str]] = None,
system_disk_auto_snapshot_policy_id: Optional[pulumi.Input[str]] = None,
system_disk_category: Optional[pulumi.Input[str]] = None,
system_disk_description: Optional[pulumi.Input[str]] = None,
system_disk_name: Optional[pulumi.Input[str]] = None,
system_disk_performance_level: Optional[pulumi.Input[str]] = None,
system_disk_size: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
user_data: Optional[pulumi.Input[str]] = None) -> 'ScalingConfiguration':
"""
Get an existing ScalingConfiguration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] active: Whether active current scaling configuration in the specified scaling group. Default to `false`.
:param pulumi.Input[str] credit_specification: Performance mode of the t5 burstable instance. Valid values: 'Standard', 'Unlimited'.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScalingConfigurationDataDiskArgs']]]] data_disks: DataDisk mappings to attach to ecs instance. See Block datadisk below for details.
:param pulumi.Input[bool] enable: Whether enable the specified scaling group(make it active) to which the current scaling configuration belongs.
:param pulumi.Input[bool] force_delete: The last scaling configuration will be deleted forcibly with deleting its scaling group. Default to false.
:param pulumi.Input[str] image_id: ID of an image file, indicating the image resource selected when an instance is enabled.
:param pulumi.Input[str] image_name: Name of an image file, indicating the image resource selected when an instance is enabled.
:param pulumi.Input[Sequence[pulumi.Input[str]]] instance_ids: It has been deprecated from version 1.6.0. New resource `ess.Attachment` replaces it.
:param pulumi.Input[str] instance_name: Name of an ECS instance. Default to "ESS-Instance". It is valid from version 1.7.1.
:param pulumi.Input[str] instance_type: Resource type of an ECS instance.
:param pulumi.Input[Sequence[pulumi.Input[str]]] instance_types: Resource types of an ECS instance.
:param pulumi.Input[str] internet_charge_type: Network billing type, Values: PayByBandwidth or PayByTraffic. Default to `PayByBandwidth`.
:param pulumi.Input[int] internet_max_bandwidth_in: Maximum incoming bandwidth from the public network, measured in Mbps (Mega bit per second). The value range is [1,200].
:param pulumi.Input[int] internet_max_bandwidth_out: Maximum outgoing bandwidth from the public network, measured in Mbps (Mega bit per second). The value range for PayByBandwidth is [0,100].
:param pulumi.Input[str] io_optimized: It has been deprecated on instance resource. All the launched alicloud instances will be I/O optimized.
:param pulumi.Input[bool] is_outdated: Whether to use outdated instance type. Default to false.
:param pulumi.Input[str] key_name: The name of key pair that can login ECS instance successfully without password. If it is specified, the password would be invalid.
:param pulumi.Input[str] kms_encrypted_password: An KMS encrypts password used to a db account. If the `password` is filled in, this field will be ignored.
:param pulumi.Input[Mapping[str, Any]] kms_encryption_context: An KMS encryption context used to decrypt `kms_encrypted_password` before creating or updating a db account with `kms_encrypted_password`. See [Encryption Context](https://www.alibabacloud.com/help/doc-detail/42975.htm). It is valid when `kms_encrypted_password` is set.
:param pulumi.Input[bool] override: Indicates whether to overwrite the existing data. Default to false.
:param pulumi.Input[str] password: The password of the ECS instance. The password must be 8 to 30 characters in length. It must contains at least three of the following character types: uppercase letters, lowercase letters, digits, and special characters. Special characters include `() ~!@#$%^&*-_+=\|{}[]:;'<>,.?/`, The password of Windows-based instances cannot start with a forward slash (/).
:param pulumi.Input[bool] password_inherit: Specifies whether to use the password that is predefined in the image. If the PasswordInherit parameter is set to true, the `password` and `kms_encrypted_password` will be ignored. You must ensure that the selected image has a password configured.
:param pulumi.Input[str] resource_group_id: ID of resource group.
:param pulumi.Input[str] role_name: Instance RAM role name. The name is provided and maintained by RAM. You can use `ram.Role` to create a new one.
:param pulumi.Input[str] scaling_configuration_name: Name shown for the scheduled task. which must contain 2-64 characters (English or Chinese), starting with numbers, English letters or Chinese characters, and can contain number, underscores `_`, hypens `-`, and decimal point `.`. If this parameter value is not specified, the default value is ScalingConfigurationId.
:param pulumi.Input[str] scaling_group_id: ID of the scaling group of a scaling configuration.
:param pulumi.Input[str] security_group_id: ID of the security group used to create new instance. It is conflict with `security_group_ids`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_group_ids: List IDs of the security group used to create new instances. It is conflict with `security_group_id`.
:param pulumi.Input[str] substitute: The another scaling configuration which will be active automatically and replace current configuration when setting `active` to 'false'. It is invalid when `active` is 'true'.
:param pulumi.Input[str] system_disk_auto_snapshot_policy_id: The id of auto snapshot policy for system disk.
:param pulumi.Input[str] system_disk_category: Category of the system disk. The parameter value options are `ephemeral_ssd`, `cloud_efficiency`, `cloud_ssd`, `cloud_essd` and `cloud`. `cloud` only is used to some no I/O optimized instance. Default to `cloud_efficiency`.
:param pulumi.Input[str] system_disk_description: The description of the system disk. The description must be 2 to 256 characters in length and cannot start with http:// or https://.
:param pulumi.Input[str] system_disk_name: The name of the system disk. It must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). Default value: null.
:param pulumi.Input[str] system_disk_performance_level: The performance level of the ESSD used as the system disk.
:param pulumi.Input[int] system_disk_size: Size of system disk, in GiB. Optional values: cloud: 20-500, cloud_efficiency: 20-500, cloud_ssd: 20-500, ephemeral_ssd: 20-500 The default value is max{40, ImageSize}. If this parameter is set, the system disk size must be greater than or equal to max{40, ImageSize}.
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource. It will be applied for ECS instances finally.
- Key: It can be up to 64 characters in length. It cannot begin with "aliyun", "http://", or "https://". It cannot be a null string.
- Value: It can be up to 128 characters in length. It cannot begin with "aliyun", "http://", or "https://" It can be a null string.
:param pulumi.Input[str] user_data: User-defined data to customize the startup behaviors of the ECS instance and to pass data into the ECS instance.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ScalingConfigurationState.__new__(_ScalingConfigurationState)
__props__.__dict__["active"] = active
__props__.__dict__["credit_specification"] = credit_specification
__props__.__dict__["data_disks"] = data_disks
__props__.__dict__["enable"] = enable
__props__.__dict__["force_delete"] = force_delete
__props__.__dict__["image_id"] = image_id
__props__.__dict__["image_name"] = image_name
__props__.__dict__["instance_ids"] = instance_ids
__props__.__dict__["instance_name"] = instance_name
__props__.__dict__["instance_type"] = instance_type
__props__.__dict__["instance_types"] = instance_types
__props__.__dict__["internet_charge_type"] = internet_charge_type
__props__.__dict__["internet_max_bandwidth_in"] = internet_max_bandwidth_in
__props__.__dict__["internet_max_bandwidth_out"] = internet_max_bandwidth_out
__props__.__dict__["io_optimized"] = io_optimized
__props__.__dict__["is_outdated"] = is_outdated
__props__.__dict__["key_name"] = key_name
__props__.__dict__["kms_encrypted_password"] = kms_encrypted_password
__props__.__dict__["kms_encryption_context"] = kms_encryption_context
__props__.__dict__["override"] = override
__props__.__dict__["password"] = password
__props__.__dict__["password_inherit"] = password_inherit
__props__.__dict__["resource_group_id"] = resource_group_id
__props__.__dict__["role_name"] = role_name
__props__.__dict__["scaling_configuration_name"] = scaling_configuration_name
__props__.__dict__["scaling_group_id"] = scaling_group_id
__props__.__dict__["security_group_id"] = security_group_id
__props__.__dict__["security_group_ids"] = security_group_ids
__props__.__dict__["substitute"] = substitute
__props__.__dict__["system_disk_auto_snapshot_policy_id"] = system_disk_auto_snapshot_policy_id
__props__.__dict__["system_disk_category"] = system_disk_category
__props__.__dict__["system_disk_description"] = system_disk_description
__props__.__dict__["system_disk_name"] = system_disk_name
__props__.__dict__["system_disk_performance_level"] = system_disk_performance_level
__props__.__dict__["system_disk_size"] = system_disk_size
__props__.__dict__["tags"] = tags
__props__.__dict__["user_data"] = user_data
return ScalingConfiguration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def active(self) -> pulumi.Output[bool]:
"""
Whether active current scaling configuration in the specified scaling group. Default to `false`.
"""
return pulumi.get(self, "active")
@property
@pulumi.getter(name="creditSpecification")
def credit_specification(self) -> pulumi.Output[Optional[str]]:
"""
Performance mode of the t5 burstable instance. Valid values: 'Standard', 'Unlimited'.
"""
return pulumi.get(self, "credit_specification")
@property
@pulumi.getter(name="dataDisks")
def data_disks(self) -> pulumi.Output[Optional[Sequence['outputs.ScalingConfigurationDataDisk']]]:
"""
DataDisk mappings to attach to ecs instance. See Block datadisk below for details.
"""
return pulumi.get(self, "data_disks")
@property
@pulumi.getter
def enable(self) -> pulumi.Output[Optional[bool]]:
"""
Whether enable the specified scaling group(make it active) to which the current scaling configuration belongs.
"""
return pulumi.get(self, "enable")
@property
@pulumi.getter(name="forceDelete")
def force_delete(self) -> pulumi.Output[Optional[bool]]:
"""
The last scaling configuration will be deleted forcibly with deleting its scaling group. Default to false.
"""
return pulumi.get(self, "force_delete")
@property
@pulumi.getter(name="imageId")
def image_id(self) -> pulumi.Output[Optional[str]]:
"""
ID of an image file, indicating the image resource selected when an instance is enabled.
"""
return pulumi.get(self, "image_id")
@property
@pulumi.getter(name="imageName")
def image_name(self) -> pulumi.Output[Optional[str]]:
"""
Name of an image file, indicating the image resource selected when an instance is enabled.
"""
return pulumi.get(self, "image_name")
@property
@pulumi.getter(name="instanceIds")
def instance_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
It has been deprecated from version 1.6.0. New resource `ess.Attachment` replaces it.
"""
return pulumi.get(self, "instance_ids")
@property
@pulumi.getter(name="instanceName")
def instance_name(self) -> pulumi.Output[Optional[str]]:
"""
Name of an ECS instance. Default to "ESS-Instance". It is valid from version 1.7.1.
"""
return pulumi.get(self, "instance_name")
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> pulumi.Output[Optional[str]]:
"""
Resource type of an ECS instance.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="instanceTypes")
def instance_types(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Resource types of an ECS instance.
"""
return pulumi.get(self, "instance_types")
@property
@pulumi.getter(name="internetChargeType")
def internet_charge_type(self) -> pulumi.Output[Optional[str]]:
"""
Network billing type, Values: PayByBandwidth or PayByTraffic. Default to `PayByBandwidth`.
"""
return pulumi.get(self, "internet_charge_type")
@property
@pulumi.getter(name="internetMaxBandwidthIn")
def internet_max_bandwidth_in(self) -> pulumi.Output[int]:
"""
Maximum incoming bandwidth from the public network, measured in Mbps (Mega bit per second). The value range is [1,200].
"""
return pulumi.get(self, "internet_max_bandwidth_in")
@property
@pulumi.getter(name="internetMaxBandwidthOut")
def internet_max_bandwidth_out(self) -> pulumi.Output[Optional[int]]:
"""
Maximum outgoing bandwidth from the public network, measured in Mbps (Mega bit per second). The value range for PayByBandwidth is [0,100].
"""
return pulumi.get(self, "internet_max_bandwidth_out")
@property
@pulumi.getter(name="ioOptimized")
def io_optimized(self) -> pulumi.Output[Optional[str]]:
"""
It has been deprecated on instance resource. All the launched alicloud instances will be I/O optimized.
"""
return pulumi.get(self, "io_optimized")
@property
@pulumi.getter(name="isOutdated")
def is_outdated(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to use outdated instance type. Default to false.
"""
return pulumi.get(self, "is_outdated")
@property
@pulumi.getter(name="keyName")
def key_name(self) -> pulumi.Output[Optional[str]]:
"""
The name of key pair that can login ECS instance successfully without password. If it is specified, the password would be invalid.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="kmsEncryptedPassword")
def kms_encrypted_password(self) -> pulumi.Output[Optional[str]]:
"""
An KMS encrypts password used to a db account. If the `password` is filled in, this field will be ignored.
"""
return pulumi.get(self, "kms_encrypted_password")
@property
@pulumi.getter(name="kmsEncryptionContext")
def kms_encryption_context(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
An KMS encryption context used to decrypt `kms_encrypted_password` before creating or updating a db account with `kms_encrypted_password`. See [Encryption Context](https://www.alibabacloud.com/help/doc-detail/42975.htm). It is valid when `kms_encrypted_password` is set.
"""
return pulumi.get(self, "kms_encryption_context")
@property
@pulumi.getter
def override(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether to overwrite the existing data. Default to false.
"""
return pulumi.get(self, "override")
@property
@pulumi.getter
def password(self) -> pulumi.Output[Optional[str]]:
"""
The password of the ECS instance. The password must be 8 to 30 characters in length. It must contains at least three of the following character types: uppercase letters, lowercase letters, digits, and special characters. Special characters include `() ~!@#$%^&*-_+=\|{}[]:;'<>,.?/`, The password of Windows-based instances cannot start with a forward slash (/).
"""
return pulumi.get(self, "password")
@property
@pulumi.getter(name="passwordInherit")
def password_inherit(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether to use the password that is predefined in the image. If the PasswordInherit parameter is set to true, the `password` and `kms_encrypted_password` will be ignored. You must ensure that the selected image has a password configured.
"""
return pulumi.get(self, "password_inherit")
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> pulumi.Output[Optional[str]]:
"""
ID of resource group.
"""
return pulumi.get(self, "resource_group_id")
@property
@pulumi.getter(name="roleName")
def role_name(self) -> pulumi.Output[Optional[str]]:
"""
Instance RAM role name. The name is provided and maintained by RAM. You can use `ram.Role` to create a new one.
"""
return pulumi.get(self, "role_name")
@property
@pulumi.getter(name="scalingConfigurationName")
def scaling_configuration_name(self) -> pulumi.Output[str]:
"""
Name shown for the scheduled task. which must contain 2-64 characters (English or Chinese), starting with numbers, English letters or Chinese characters, and can contain number, underscores `_`, hypens `-`, and decimal point `.`. If this parameter value is not specified, the default value is ScalingConfigurationId.
"""
return pulumi.get(self, "scaling_configuration_name")
@property
@pulumi.getter(name="scalingGroupId")
def scaling_group_id(self) -> pulumi.Output[str]:
"""
ID of the scaling group of a scaling configuration.
"""
return pulumi.get(self, "scaling_group_id")
@property
@pulumi.getter(name="securityGroupId")
def security_group_id(self) -> pulumi.Output[Optional[str]]:
"""
ID of the security group used to create new instance. It is conflict with `security_group_ids`.
"""
return pulumi.get(self, "security_group_id")
@property
@pulumi.getter(name="securityGroupIds")
def security_group_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List IDs of the security group used to create new instances. It is conflict with `security_group_id`.
"""
return pulumi.get(self, "security_group_ids")
@property
@pulumi.getter
def substitute(self) -> pulumi.Output[str]:
"""
The another scaling configuration which will be active automatically and replace current configuration when setting `active` to 'false'. It is invalid when `active` is 'true'.
"""
return pulumi.get(self, "substitute")
@property
@pulumi.getter(name="systemDiskAutoSnapshotPolicyId")
def system_disk_auto_snapshot_policy_id(self) -> pulumi.Output[Optional[str]]:
"""
The id of auto snapshot policy for system disk.
"""
return pulumi.get(self, "system_disk_auto_snapshot_policy_id")
@property
@pulumi.getter(name="systemDiskCategory")
def system_disk_category(self) -> pulumi.Output[Optional[str]]:
"""
Category of the system disk. The parameter value options are `ephemeral_ssd`, `cloud_efficiency`, `cloud_ssd`, `cloud_essd` and `cloud`. `cloud` only is used to some no I/O optimized instance. Default to `cloud_efficiency`.
"""
return pulumi.get(self, "system_disk_category")
@property
@pulumi.getter(name="systemDiskDescription")
def system_disk_description(self) -> pulumi.Output[Optional[str]]:
"""
The description of the system disk. The description must be 2 to 256 characters in length and cannot start with http:// or https://.
"""
return pulumi.get(self, "system_disk_description")
@property
@pulumi.getter(name="systemDiskName")
def system_disk_name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the system disk. It must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). Default value: null.
"""
return pulumi.get(self, "system_disk_name")
@property
@pulumi.getter(name="systemDiskPerformanceLevel")
def system_disk_performance_level(self) -> pulumi.Output[Optional[str]]:
"""
The performance level of the ESSD used as the system disk.
"""
return pulumi.get(self, "system_disk_performance_level")
@property
@pulumi.getter(name="systemDiskSize")
def system_disk_size(self) -> pulumi.Output[Optional[int]]:
"""
Size of system disk, in GiB. Optional values: cloud: 20-500, cloud_efficiency: 20-500, cloud_ssd: 20-500, ephemeral_ssd: 20-500 The default value is max{40, ImageSize}. If this parameter is set, the system disk size must be greater than or equal to max{40, ImageSize}.
"""
return pulumi.get(self, "system_disk_size")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
A mapping of tags to assign to the resource. It will be applied for ECS instances finally.
- Key: It can be up to 64 characters in length. It cannot begin with "aliyun", "http://", or "https://". It cannot be a null string.
- Value: It can be up to 128 characters in length. It cannot begin with "aliyun", "http://", or "https://" It can be a null string.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="userData")
def user_data(self) -> pulumi.Output[Optional[str]]:
"""
User-defined data to customize the startup behaviors of the ECS instance and to pass data into the ECS instance.
"""
return pulumi.get(self, "user_data")
| 1.460938 | 1 |
utils/Plot.py | benjimor/LiLa | 1 | 12772533 | <filename>utils/Plot.py
import plotly.offline as py
import plotly.graph_objs as go
import numpy as np
def draw(data_inf, data_sup, data_med, structure, order, limit, measure, nb_exec, aggregate):
measure_array_inf = []
for key, value in data_inf.iteritems():
measure_array_inf.append(_aggregate(value, aggregate))
measure_array_inf = list(np.average(measure_array_inf, axis=0))
trace_infimum = go.Scatter(
x=range(0, len(data_inf[0])+1, aggregate),
y=measure_array_inf,
name='Infimum',
line=dict(
color=('rgb(169, 203, 160)'),
width=2,
dash='dash'
)
)
measure_array_sup = []
for key, value in data_sup.iteritems():
measure_array_sup.append(_aggregate(value, aggregate))
measure_array_sup = list(np.average(measure_array_sup, axis=0))
trace_supremum = go.Scatter(
x=range(0, len(data_sup[0])+1, aggregate),
y=measure_array_sup,
name='Supremum',
line=dict(
color=('rgb(60, 84, 206)'),
width=2,
dash='dot'
)
)
measure_array_med = []
for key, value in data_med.iteritems():
measure_array_med.append(_aggregate(value, aggregate))
measure_array_med = list(np.average(measure_array_med, axis=0))
trace_median = go.Scatter(
x=range(0, len(data_med[0])+1, aggregate),
y=measure_array_med,
name='Median',
line=dict(
color=('rgb(205, 12, 24)'),
width=3
)
)
data = [trace_infimum, trace_supremum, trace_median]
if measure == 'time':
y_title = 'Execution time (ms)'
else:
y_title = 'Number of visited nodes'
layout = dict(
xaxis=dict(title='Number of nodes'),
yaxis=dict(title=y_title)
)
fig = dict(data=data, layout=layout)
py.plot(fig, filename='expermiental_results/{}-{}-{}-{}-x{}-agg{}.html'.format(structure, order, limit, measure, nb_exec, aggregate), auto_open=False)
def _aggregate(value, aggregate):
if aggregate < 2:
return value
returned_value = []
for chunk in list(_chunks(value, aggregate)):
returned_value.append(np.mean(chunk))
returned_value.insert(0, 0)
return returned_value
def _chunks(l, n):
for i in xrange(1, len(l), n):
yield l[i:i + n]
| 2.453125 | 2 |
networkapi/api_network/tests/v3/sanity/allocate/test_network_v6.py | vinicius-marinho/GloboNetworkAPI | 1 | 12772534 | # -*- coding: utf-8 -*-
import json
from itertools import izip
from django.test.client import Client
from networkapi.test.test_case import NetworkApiTestCase
from networkapi.util.geral import prepare_url
fixtures_base_path = 'networkapi/api_network/fixtures/integration/%s'
class NetworksIntegrationV6TestCase(NetworkApiTestCase):
fixtures = [
'networkapi/system/fixtures/initial_variables.json',
'networkapi/usuario/fixtures/initial_usuario.json',
'networkapi/grupo/fixtures/initial_ugrupo.json',
'networkapi/usuario/fixtures/initial_usuariogrupo.json',
'networkapi/api_ogp/fixtures/initial_objecttype.json',
'networkapi/api_ogp/fixtures/initial_objectgrouppermissiongeneral.json',
'networkapi/grupo/fixtures/initial_permissions.json',
'networkapi/grupo/fixtures/initial_permissoes_administrativas.json',
'networkapi/vlan/fixtures/initial_tipo_rede.json',
'networkapi/filter/fixtures/initial_filter.json',
'networkapi/filterequiptype/fixtures/initial_filterequiptype.json',
'networkapi/equipamento/fixtures/initial_tipo_equip.json',
'networkapi/equipamento/fixtures/initial_equip_marca.json',
'networkapi/equipamento/fixtures/initial_equip_model.json',
fixtures_base_path % 'initial_vrf.json',
fixtures_base_path % 'initial_environment_dc.json',
fixtures_base_path % 'initial_environment_envlog.json',
fixtures_base_path % 'initial_environment_gl3.json',
fixtures_base_path % 'initial_environment.json',
fixtures_base_path % 'initial_ipconfig.json',
fixtures_base_path % 'initial_config_environment.json',
fixtures_base_path % 'initial_equipments.json',
fixtures_base_path % 'initial_equipments_env.json',
fixtures_base_path % 'initial_vlan.json',
]
def setUp(self):
self.client = Client()
self.authorization = self.get_http_authorization('test')
def tearDown(self):
pass
def test_create_networkv6_by_zero(self):
"""
Test of integration for create environment, vlan, eqpt networks v6.
##################
Starting test:
- environment A:
- eqpt 1, 2, 3
- vrf 1
- starting vlans 1,2,3
- environment B:
- eqpt 2, 4, 5
- vrf 1
- starting vLANS 4, 5, 6, 7, 8, 9
- environment C:
- EQpt 5, 6
- vrf 2
- startinG VLANS 10, 11
- environment D:
- eqpt 7
- vrf 1
- starting vlans 1
##################
##################
Starting networks:
environment A:
Nothing
environment B:
fdbe:bebe:bebe:1201:0000:0000:0000:0000/64
fdfdf8:f53e:61e4::18/65
fdbe:bebe:bebe:1203:0000:0000:0000:0000/64
environment C:
Nothing
environment D:
Nothing
##################
##################
Inserting networks:
- environment B:fdfd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/64
- environment C:fdbe:bebe:bebe:1200:0000:0000:0000:0000/65
- environment C using prefix 24: fdfdf8:f53e:61e4::18/64
- environment A:fdfc00:db20:35b:7399::5/65
- environment A:fdbe:bebe:bebe:1204:0000:0000:0000:0000/65
- environment B:fdbe:bebe:bebe:1205:0000:0000:0000:0000/64
- environment A:fdbe:bebe:bebe:1204:8000:0000:0000:0000/65
- environment D:fdbe:bebe:bebe:1200:0000:0000:0000:0000/64
##################
"""
# Creates networks with octs
self.create_netv6_with_octs()
# Creates networks with auto octs and prefix
self.create_netv6_without_octs()
def search_all_vlans(self, ids_env):
search_vlan = {
'start_record': 0,
'end_record': 100,
'asorting_cols': [],
'searchable_columns': [],
'extends_search': [
{'ambiente': id_env} for id_env in ids_env
]
}
url = '/api/v3/vlan/'
response = self.client.get(
prepare_url(url, search=search_vlan, fields=['id']),
HTTP_AUTHORIZATION=self.authorization
)
vlans = response.data['vlans']
ids_vlans = [id_vlan['id'] for id_vlan in vlans]
return ids_vlans
def create_netv6_with_octs(self):
"""Creates networks v6 using first vlan."""
networks = [{
'block1': 'fdbe',
'block2': 'bebe',
'block3': 'bebe',
'block4': '1201',
'block5': '0000',
'block6': '0000',
'block7': '0000',
'block8': '0000',
'prefix': 64,
'mask1': 'ffff',
'mask2': 'ffff',
'mask3': 'ffff',
'mask4': 'ffff',
'mask5': '0000',
'mask6': '0000',
'mask7': '0000',
'mask8': '0000',
'env': 3
}, {
'block1': 'fdbe',
'block2': 'bebe',
'block3': 'bebe',
'block4': '1202',
'block5': '0000',
'block6': '0000',
'block7': '0000',
'block8': '0000',
'prefix': 65,
'mask1': 'ffff',
'mask2': 'ffff',
'mask3': 'ffff',
'mask4': 'ffff',
'mask5': '8000',
'mask6': '0000',
'mask7': '0000',
'mask8': '0000',
'env': 3
}, {
'block1': 'fdbe',
'block2': 'bebe',
'block3': 'bebe',
'block4': '1203',
'block5': '0000',
'block6': '0000',
'block7': '0000',
'block8': '0000',
'prefix': 64,
'mask1': 'ffff',
'mask2': 'ffff',
'mask3': 'ffff',
'mask4': 'ffff',
'mask5': '0000',
'mask6': '0000',
'mask7': '0000',
'mask8': '0000',
'env': 3
}]
fields = [
'block1',
'block2',
'block3',
'block4',
'block5',
'block6',
'block7',
'block8',
'prefix',
'mask1',
'mask2',
'mask3',
'mask4',
'mask5',
'mask6',
'mask7',
'mask8',
'vlan'
]
for network_send in networks:
# Get all vlans of environment
ids_vlans = self.search_all_vlans([network_send.get('env')])
del network_send['env']
# Creates networks v4
network_send['vlan'] = ids_vlans[0]
network = [{
'block1': network_send.get('block1'),
'block2': network_send.get('block2'),
'block3': network_send.get('block3'),
'block4': network_send.get('block4'),
'block5': network_send.get('block5'),
'block6': network_send.get('block6'),
'block7': network_send.get('block7'),
'block8': network_send.get('block8'),
'prefix': network_send.get('prefix'),
'vlan': network_send.get('vlan'),
'network_type': 3,
'environmentvip': None
}]
id_network = self.create_networkv6s(network)[0]['id']
# Get object created
url = '/api/v3/networkv6/%s/' % id_network
url = prepare_url(url, fields=fields)
response = self.client.get(
url, HTTP_AUTHORIZATION=self.authorization
)
# Verify if object is right
self.compare_values(
json.dumps(network_send, sort_keys=True),
json.dumps(response.data['networks'][0], sort_keys=True)
)
def create_networkv6s(self, netv6_dict):
response = self.client.post(
'/api/v3/networkv6/',
data=json.dumps({'networks': netv6_dict}),
content_type='application/json',
HTTP_AUTHORIZATION=self.authorization
)
return response.data
def create_netv6_without_octs(self):
networks = [
{
'prefix': None,
'env': 3,
'network_type': 3,
'environmentvip': None
},
{
'prefix': None,
'env': 4,
'network_type': 3,
'environmentvip': None
},
{
'prefix': 64,
'env': 4,
'network_type': 3,
'environmentvip': None
},
{
'prefix': None,
'env': 2,
'network_type': 3,
'environmentvip': None
},
{
'prefix': None,
'env': 2,
'network_type': 3,
'environmentvip': None
},
{
'prefix': None,
'env': 3,
'network_type': 3,
'environmentvip': None
},
{
'prefix': None,
'env': 2,
'network_type': 3,
'environmentvip': None
},
{
'prefix': None,
'env': 5,
'network_type': 3,
'environmentvip': None
}
]
expected_networks = [{
'block1': 'fdbe',
'block2': 'bebe',
'block3': 'bebe',
'block4': '1200',
'block5': '0000',
'block6': '0000',
'block7': '0000',
'block8': '0000',
'prefix': 64,
'mask1': 'ffff',
'mask2': 'ffff',
'mask3': 'ffff',
'mask4': 'ffff',
'mask5': '0000',
'mask6': '0000',
'mask7': '0000',
'mask8': '0000'
}, {
'block1': 'fdbe',
'block2': 'bebe',
'block3': 'bebe',
'block4': '1200',
'block5': '0000',
'block6': '0000',
'block7': '0000',
'block8': '0000',
'prefix': 65,
'mask1': 'ffff',
'mask2': 'ffff',
'mask3': 'ffff',
'mask4': 'ffff',
'mask5': '8000',
'mask6': '0000',
'mask7': '0000',
'mask8': '0000'
}, {
'block1': 'fdbe',
'block2': 'bebe',
'block3': 'bebe',
'block4': '1201',
'block5': '0000',
'block6': '0000',
'block7': '0000',
'block8': '0000',
'prefix': 64,
'mask1': 'ffff',
'mask2': 'ffff',
'mask3': 'ffff',
'mask4': 'ffff',
'mask5': '0000',
'mask6': '0000',
'mask7': '0000',
'mask8': '0000'
}, {
'block1': 'fdbe',
'block2': 'bebe',
'block3': 'bebe',
'block4': '1202',
'block5': '8000',
'block6': '0000',
'block7': '0000',
'block8': '0000',
'prefix': 65,
'mask1': 'ffff',
'mask2': 'ffff',
'mask3': 'ffff',
'mask4': 'ffff',
'mask5': '8000',
'mask6': '0000',
'mask7': '0000',
'mask8': '0000'
}, {
'block1': 'fdbe',
'block2': 'bebe',
'block3': 'bebe',
'block4': '1204',
'block5': '0000',
'block6': '0000',
'block7': '0000',
'block8': '0000',
'prefix': 65,
'mask1': 'ffff',
'mask2': 'ffff',
'mask3': 'ffff',
'mask4': 'ffff',
'mask5': '8000',
'mask6': '0000',
'mask7': '0000',
'mask8': '0000'
}, {
'block1': 'fdbe',
'block2': 'bebe',
'block3': 'bebe',
'block4': '1205',
'block5': '0000',
'block6': '0000',
'block7': '0000',
'block8': '0000',
'prefix': 64,
'mask1': 'ffff',
'mask2': 'ffff',
'mask3': 'ffff',
'mask4': 'ffff',
'mask5': '0000',
'mask6': '0000',
'mask7': '0000',
'mask8': '0000'
}, {
'block1': 'fdbe',
'block2': 'bebe',
'block3': 'bebe',
'block4': '1204',
'block5': '8000',
'block6': '0000',
'block7': '0000',
'block8': '0000',
'prefix': 65,
'mask1': 'ffff',
'mask2': 'ffff',
'mask3': 'ffff',
'mask4': 'ffff',
'mask5': '8000',
'mask6': '0000',
'mask7': '0000',
'mask8': '0000'
}, {
'block1': 'fdbe',
'block2': 'bebe',
'block3': 'bebe',
'block4': '1200',
'block5': '0000',
'block6': '0000',
'block7': '0000',
'block8': '0000',
'prefix': 64,
'mask1': 'ffff',
'mask2': 'ffff',
'mask3': 'ffff',
'mask4': 'ffff',
'mask5': '0000',
'mask6': '0000',
'mask7': '0000',
'mask8': '0000'
}]
fields = [
'block1',
'block2',
'block3',
'block4',
'block5',
'block6',
'block7',
'block8',
'prefix',
'mask1',
'mask2',
'mask3',
'mask4',
'mask5',
'mask6',
'mask7',
'mask8',
'vlan',
]
for network_send, expected_network in izip(networks, expected_networks):
# Get all vlans of environment
ids_vlans = self.search_all_vlans([network_send.get('env')])
# Creates networks v4
network_send['vlan'] = ids_vlans[0]
expected_network['vlan'] = ids_vlans[0]
id_network = self.create_networkv6s([network_send])[0]['id']
# Get object created
url = '/api/v3/networkv6/%s/' % id_network
url = prepare_url(url, fields=fields)
response = self.client.get(
url, HTTP_AUTHORIZATION=self.authorization
)
# Verify if object is right
self.compare_values(
json.dumps(expected_network, sort_keys=True),
json.dumps(response.data['networks'][0], sort_keys=True)
)
| 1.859375 | 2 |
unstdlib/standard/string_.py | wolever/unstdlib.py | 83 | 12772535 | import re
import string
import unicodedata
from unstdlib.six import text_type, PY3, string_types, binary_type, u
from unstdlib.six.moves import xrange
if PY3:
text_type_magicmethod = "__str__"
else:
text_type_magicmethod = "__unicode__"
from .random_ import random
__all__ = [
'random_string',
'number_to_string', 'string_to_number', 'number_to_bytes', 'bytes_to_number',
'dollars_to_cents',
'to_str', 'to_unicode', 'to_int', 'to_float',
'format_int',
'slugify',
]
class r(object):
"""
A normalized repr for bytes/unicode between Python2 and Python3.
"""
def __init__(self, val):
self.val = val
def __repr__(self):
if PY3:
if isinstance(self.val, text_type):
return 'u' + repr(self.val)
else:
if isinstance(self.val, str):
return 'b' + repr(self.val)
return repr(self.val)
_Default = object()
def random_string(length=6, alphabet=string.ascii_letters+string.digits):
"""
Return a random string of given length and alphabet.
Default alphabet is url-friendly (base62).
"""
return ''.join([random.choice(alphabet) for i in xrange(length)])
def number_to_string(n, alphabet):
"""
Given an non-negative integer ``n``, convert it to a string composed of
the given ``alphabet`` mapping, where the position of each element in
``alphabet`` is its radix value.
Examples::
>>> number_to_string(12345678, '01')
'101111000110000101001110'
>>> number_to_string(12345678, 'ab')
'babbbbaaabbaaaababaabbba'
>>> number_to_string(12345678, string.ascii_letters + string.digits)
'ZXP0'
>>> number_to_string(12345, ['zero ', 'one ', 'two ', 'three ', 'four ', 'five ', 'six ', 'seven ', 'eight ', 'nine '])
'one two three four five '
"""
result = ''
base = len(alphabet)
current = int(n)
if current < 0:
raise ValueError("invalid n (must be non-negative): %s", n)
while current:
result = alphabet[current % base] + result
current = current // base
return result
def string_to_number(s, alphabet):
"""
Given a string ``s``, convert it to an integer composed of the given
``alphabet`` mapping, where the position of each element in ``alphabet`` is
its radix value.
Examples::
>>> string_to_number('101111000110000101001110', '01')
12345678
>>> string_to_number('babbbbaaabbaaaababaabbba', 'ab')
12345678
>>> string_to_number('ZXP0', string.ascii_letters + string.digits)
12345678
"""
base = len(alphabet)
inverse_alphabet = dict(zip(alphabet, xrange(0, base)))
n = 0
exp = 0
for i in reversed(s):
n += inverse_alphabet[i] * (base ** exp)
exp += 1
return n
def bytes_to_number(b, endian='big'):
"""
Convert a string to an integer.
:param b:
String or bytearray to convert.
:param endian:
Byte order to convert into ('big' or 'little' endian-ness, default
'big')
Assumes bytes are 8 bits.
This is a special-case version of string_to_number with a full base-256
ASCII alphabet. It is the reverse of ``number_to_bytes(n)``.
Examples::
>>> bytes_to_number(b'*')
42
>>> bytes_to_number(b'\\xff')
255
>>> bytes_to_number(b'\\x01\\x00')
256
>>> bytes_to_number(b'\\x00\\x01', endian='little')
256
"""
if endian == 'big':
b = reversed(b)
n = 0
for i, ch in enumerate(bytearray(b)):
n ^= ch << i * 8
return n
def number_to_bytes(n, endian='big'):
"""
Convert an integer to a corresponding string of bytes..
:param n:
Integer to convert.
:param endian:
Byte order to convert into ('big' or 'little' endian-ness, default
'big')
Assumes bytes are 8 bits.
This is a special-case version of number_to_string with a full base-256
ASCII alphabet. It is the reverse of ``bytes_to_number(b)``.
Examples::
>>> r(number_to_bytes(42))
b'*'
>>> r(number_to_bytes(255))
b'\\xff'
>>> r(number_to_bytes(256))
b'\\x01\\x00'
>>> r(number_to_bytes(256, endian='little'))
b'\\x00\\x01'
"""
res = []
while n:
n, ch = divmod(n, 256)
if PY3:
res.append(ch)
else:
res.append(chr(ch))
if endian == 'big':
res.reverse()
if PY3:
return bytes(res)
else:
return ''.join(res)
def to_str(obj, encoding='utf-8', **encode_args):
r"""
Returns a ``str`` of ``obj``, encoding using ``encoding`` if necessary. For
example::
>>> some_str = b"\xff"
>>> some_unicode = u"\u1234"
>>> some_exception = Exception(u'Error: ' + some_unicode)
>>> r(to_str(some_str))
b'\xff'
>>> r(to_str(some_unicode))
b'\xe1\x88\xb4'
>>> r(to_str(some_exception))
b'Error: \xe1\x88\xb4'
>>> r(to_str([42]))
b'[42]'
See source code for detailed semantics.
"""
# Note: On py3, ``b'x'.__str__()`` returns ``"b'x'"``, so we need to do the
# explicit check first.
if isinstance(obj, binary_type):
return obj
# We coerce to unicode if '__unicode__' is available because there is no
# way to specify encoding when calling ``str(obj)``, so, eg,
# ``str(Exception(u'\u1234'))`` will explode.
if isinstance(obj, text_type) or hasattr(obj, text_type_magicmethod):
# Note: unicode(u'foo') is O(1) (by experimentation)
return text_type(obj).encode(encoding, **encode_args)
return binary_type(obj)
def to_unicode(obj, encoding='utf-8', fallback='latin1', **decode_args):
r"""
Returns a ``unicode`` of ``obj``, decoding using ``encoding`` if necessary.
If decoding fails, the ``fallback`` encoding (default ``latin1``) is used.
Examples::
>>> r(to_unicode(b'\xe1\x88\xb4'))
u'\u1234'
>>> r(to_unicode(b'\xff'))
u'\xff'
>>> r(to_unicode(u'\u1234'))
u'\u1234'
>>> r(to_unicode(Exception(u'\u1234')))
u'\u1234'
>>> r(to_unicode([42]))
u'[42]'
See source code for detailed semantics.
"""
# Note: on py3, the `bytes` type defines an unhelpful "__str__" function,
# so we need to do this check (see comments in ``to_str``).
if not isinstance(obj, binary_type):
if isinstance(obj, text_type) or hasattr(obj, text_type_magicmethod):
return text_type(obj)
obj_str = binary_type(obj)
else:
obj_str = obj
try:
return text_type(obj_str, encoding, **decode_args)
except UnicodeDecodeError:
return text_type(obj_str, fallback, **decode_args)
def to_int(s, default=0):
"""
Return input converted into an integer. If failed, then return ``default``.
Examples::
>>> to_int('1')
1
>>> to_int(1)
1
>>> to_int('')
0
>>> to_int(None)
0
>>> to_int(0, default='Empty')
0
>>> to_int(None, default='Empty')
'Empty'
"""
try:
return int(s)
except (TypeError, ValueError):
return default
_infs=set([float("inf"), float("-inf")])
def to_float(s, default=0.0, allow_nan=False):
"""
Return input converted into a float. If failed, then return ``default``.
Note that, by default, ``allow_nan=False``, so ``to_float`` will not return
``nan``, ``inf``, or ``-inf``.
Examples::
>>> to_float('1.5')
1.5
>>> to_float(1)
1.0
>>> to_float('')
0.0
>>> to_float('nan')
0.0
>>> to_float('inf')
0.0
>>> to_float('-inf', allow_nan=True)
-inf
>>> to_float(None)
0.0
>>> to_float(0, default='Empty')
0.0
>>> to_float(None, default='Empty')
'Empty'
"""
try:
f = float(s)
except (TypeError, ValueError):
return default
if not allow_nan:
if f != f or f in _infs:
return default
return f
def format_int(n, singular=_Default, plural=_Default):
"""
Return `singular.format(n)` if n is 1, or `plural.format(n)` otherwise. If
plural is not specified, then it is assumed to be same as singular but
suffixed with an 's'.
:param n:
Integer which determines pluralness.
:param singular:
String with a format() placeholder for n. (Default: `u"{:,}"`)
:param plural:
String with a format() placeholder for n. (Default: If singular is not
default, then it's `singular + u"s"`. Otherwise it's same as singular.)
Example: ::
>>> r(format_int(1000))
u'1,000'
>>> r(format_int(1, u"{} day"))
u'1 day'
>>> r(format_int(2, u"{} day"))
u'2 days'
>>> r(format_int(2, u"{} box", u"{} boxen"))
u'2 boxen'
>>> r(format_int(20000, u"{:,} box", u"{:,} boxen"))
u'20,000 boxen'
"""
n = int(n)
if singular in (None, _Default):
if plural is _Default:
plural = None
singular = u'{:,}'
elif plural is _Default:
plural = singular + u's'
if n == 1 or not plural:
return singular.format(n)
return plural.format(n)
RE_NUMBER = re.compile(r'[\d\.\-eE]+')
def dollars_to_cents(s, allow_negative=False):
"""
Given a string or integer representing dollars, return an integer of
equivalent cents, in an input-resilient way.
This works by stripping any non-numeric characters before attempting to
cast the value.
Examples::
>>> dollars_to_cents('$1')
100
>>> dollars_to_cents('1')
100
>>> dollars_to_cents(1)
100
>>> dollars_to_cents('1e2')
10000
>>> dollars_to_cents('-1$', allow_negative=True)
-100
>>> dollars_to_cents('1 dollar')
100
"""
# TODO: Implement cents_to_dollars
if not s:
return
if isinstance(s, string_types):
s = ''.join(RE_NUMBER.findall(s))
dollars = int(round(float(s) * 100))
if not allow_negative and dollars < 0:
raise ValueError('Negative values not permitted.')
return dollars
RE_SLUG = re.compile(r'\W+')
def slugify(s, delimiter='-'):
"""
Normalize `s` into ASCII and replace non-word characters with `delimiter`.
"""
s = unicodedata.normalize('NFKD', to_unicode(s)).encode('ascii', 'ignore').decode('ascii')
return RE_SLUG.sub(delimiter, s).strip(delimiter).lower()
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
| 3.234375 | 3 |
notification_sender/urls.py | juliojmphjv/spot-that-fire | 0 | 12772536 | <reponame>juliojmphjv/spot-that-fire
from django.contrib import admin
from django.urls import path, include
from django.conf.urls import url
from notification_sender.views import sms_response
urlpatterns = [
url(r'^receive/$', sms_response),
] | 1.507813 | 2 |
tests/test_snappi_lists.py | Rangababu-R/snappi | 34 | 12772537 | import pytest
import snappi
def test_snappi_lists(api):
"""Validate SnappiList object indexing and unpacking
"""
config = api.config()
flows = config.flows.flow(name='1')
assert(flows.__class__ == snappi.FlowIter)
flow = flows[0]
flow.tx_rx.port.tx_name = 'p1'
assert(flow.__class__ == snappi.Flow)
eth, vlan, vlan1 = flow.packet.ethernet().vlan().vlan()
vlan.id.value = 1
flow = config.flows.flow(name='2')[-1]
flow.tx_rx.port.tx_name = 'p1'
assert(flow.__class__ == snappi.Flow)
eth, vlan, ipv4, tcp = flow.packet.ethernet().vlan().ipv4().tcp()
assert(eth.__class__ == snappi.FlowEthernet)
assert(vlan.__class__ == snappi.FlowVlan)
assert(ipv4.__class__ == snappi.FlowIpv4)
assert(tcp.__class__ == snappi.FlowTcp)
vlan.id.value = 2
flow = config.flows.flow(name='3')[-1]
flow.tx_rx.port.tx_name = 'p1'
pkt = flow.packet.ethernet().vlan()
assert(pkt.__class__ == snappi.FlowHeaderIter)
vlan = pkt[-1]
vlan.id.value = 3
flow = config.flows.flow(name='4')[-1]
flow.tx_rx.port.tx_name = 'p1'
vlan = flow.packet.ethernet().vlan()[-1]
assert(vlan.__class__ == snappi.FlowVlan)
vlan.id.value = 4
print(config)
api.set_config(config)
assert (len(config.flows) == 4)
assert (config.flows[0].name == '1')
assert (config.flows[1].name == '2')
assert (config.flows[2].name == '3')
assert (config.flows[3].name == '4')
assert (len(config.flows[0].packet) == 3)
assert (len(config.flows[1].packet) == 4)
assert (len(config.flows[2].packet) == 2)
assert (len(config.flows[3].packet) == 2)
assert (config.flows[0].packet[1].id.value == 1)
assert (config.flows[1].packet[1].id.value == 2)
assert (config.flows[2].packet[1].id.value == 3)
assert (config.flows[3].packet[1].id.value == 4)
if __name__ == '__main__':
pytest.main(['-vv', '-s', __file__])
| 2.453125 | 2 |
imutils/utils/ResizeRight/_setup.py | JacobARose/image-utils | 0 | 12772538 | <gh_stars>0
# from distutils.core import setup #, find_packages
import sys
from setuptools import find_packages, setup
# print(sys.path)
# print(dir())
setup(
name="ResizeRight",
version="0.1dev",
packages=find_packages(include=["ResizeRight", "resize_right", "resize_right.*"]),
license="Creative Commons Attribution-Noncommercial-Share Alike license",
long_description=open("README.md").read(),
)
| 1.234375 | 1 |
examples/decompose/do_compare_ica_methods.py | fboers/jumeg | 6 | 12772539 | <filename>examples/decompose/do_compare_ica_methods.py
"""
Compute the ica object on filtered data based on the mne and on the
jumeg method.
Compare pca_mean_ and pre_whitener_ for:
mne & filtered data, jumeg & filtered data, jumeg & unfiltered data
"""
import mne
from mne.preprocessing.ica import ICA as ICA_mne
from jumeg.decompose.ica_replace_mean_std import ICA as ICA_jumeg
from jumeg.decompose.ica_replace_mean_std import apply_ica_replace_mean_std
from mne.datasets import sample
flow = 1.
fhigh = 45.
reject = {'mag': 5e-12}
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_raw = data_path + '/MEG/sample/sample_audvis_raw.fif'
raw = mne.io.Raw(fname_raw, preload=True)
raw_filt = raw.copy().filter(flow, fhigh, method='fir', n_jobs=2, fir_design='firwin', phase='zero')
# use 60s of data
raw_filt.crop(0, 60)
raw.crop(0, 60)
raw_unfilt = raw.copy()
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
ica_mne = ICA_mne(method='fastica', n_components=60, random_state=42,
max_pca_components=None, max_iter=5000, verbose=False)
# fit ica object from mne to filtered data
ica_mne.fit(raw_filt, picks=picks, reject=reject, verbose=True)
# save mean and standard deviation of filtered MEG channels for the standard mne routine
pca_mean_filt_mne = ica_mne.pca_mean_.copy()
pca_pre_whitener_filt_mne = ica_mne.pre_whitener_.copy() # this is the standard deviation of MEG channels
ica_jumeg = ICA_jumeg(method='fastica', n_components=60, random_state=42,
max_pca_components=None, max_iter=5000, verbose=False)
# fit ica object from jumeg to filtered data
ica_jumeg.fit(raw_filt, picks=picks, reject=reject, verbose=True)
# save mean and standard deviation of filtered MEG channels for the standard mne routine
pca_mean_filt_jumeg = ica_jumeg.pca_mean_.copy()
pca_pre_whitener_filt_jumeg = ica_jumeg.pre_whitener_.copy() # this is the standard deviation of MEG channels
# use the same arguments for apply_ica_replace_mean_std as when you are initializing the ICA
# object and when you are fitting it to the data
# the ica object is modified in place!!
# apply ica object from jumeg to unfiltered data while replacing the mean and std
raw_clean = apply_ica_replace_mean_std(raw_unfilt, ica_jumeg, picks=picks, reject=reject, exclude=ica_mne.exclude,
n_pca_components=None)
# save mean and standard deviation of unfiltered MEG channels
pca_mean_replaced_unfilt_jumeg = ica_jumeg.pca_mean_
pca_pre_whitener_replaced_unfilt_jumeg = ica_jumeg.pre_whitener_
# compare methods for filtered and unfiltered data
for idx in range(0, len(pca_mean_filt_mne)):
print('%10.6f\t%10.6f\t%10.6f' % (pca_mean_filt_mne[idx], pca_mean_filt_jumeg[idx],
pca_mean_replaced_unfilt_jumeg[idx]))
if idx >= 9:
break
for idx in range(0, len(pca_pre_whitener_filt_mne)):
print(pca_pre_whitener_filt_mne[idx], pca_pre_whitener_filt_jumeg[idx],\
pca_pre_whitener_replaced_unfilt_jumeg[idx])
if idx >= 9:
break
| 2.515625 | 3 |
src/complicated1.py | belkale/deeplearning_playground | 0 | 12772540 | <reponame>belkale/deeplearning_playground
import sys
import math
import numpy as np
def complicated_val(x,y):
val1 = (x-3)*(x-3) + (y+4)*(y+4) + y*math.sin(x) - 16
val2 = (x+4)*(x+4) + (y-2)*(y-2) + x*math.cos(y) - 9
if val1 < 0 or val2 < 0:
return True
else:
return False
def main(argv):
for xval in np.linspace(-10, 10, num=50):
for yval in np.linspace(-10, 10, num=50):
if complicated_val(xval, yval):
print('{},{},{}'.format(xval,yval,1))
else:
print('{},{},{}'.format(xval,yval,0))
if __name__ == "__main__":
main(sys.argv)
| 3.984375 | 4 |
transpile_shr_to_asm.py | N00byEdge/shrush | 3 | 12772541 | import sys
def dprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def read():
c = sys.stdin.read(1)
#dprint(f"Read byte '{c[0]}'")
return c
def clean_read():
while True:
v = read()
#dprint(f"clean_read: '{v[0]}'")
if v == '\n': continue
if v == ' ': continue
return v
def readword_finish(s):
while True:
c = read()
#dprint(f"readword: '{s}', '{c[0]}'")
if c == '\n': break
if c == ' ': break
if c == '': break
s += c
return s
def readword():
return readword_finish(clean_read())
def readint():
s = read()
#dprint(f'readint: {s}')
if s == 'x':
return int(readword(), 16)
if s == '\'':
return ord(read()[0])
return int(readword_finish(s), 10)
reg = [
"rax", #0
"rcx", #1
"rdx", #2
"rbx", #3
"rsp", #4
"rbp", #5
"rsi", #6
"rdi", #7
]
def translate():
while True:
op = readword()
#dprint(f"opcode: '{op}'")
if op == '!':
print(f"""
syscall
""")
elif op == 'A':
dest = readint()
lbl = readword()
print(f"""
lea {reg[dest]}, [{lbl}]
""")
elif op == 'B':
print(f"""
db {readint()}
""")
elif op == 'b':
dest = readint()
src = readint()
print(f"""
movzx {reg[dest]}, byte[{reg[src]}]
""")
elif op == 'C':
lbl = readword()
print(f"""
call {lbl}
""")
elif op == 'D':
lbl = readword()
print(f"""
{lbl}:
""")
elif op == 'E':
lbl = readword()
print(f"""
je {lbl}
""")
elif op == 'e':
print(f"""
_start:
""")
elif op == 'I':
value = readint()
print(f"""
dq {value}
""")
elif op == 'J':
lbl = readword()
print(f"""
jmp {lbl}
""")
elif op == 'L':
dest = readint()
src = readint()
print(f"""
mov {reg[dest]}, [{reg[src]}]
""")
elif op == 'l':
r = readint()
num = readint()
print(f"""
shl {reg[r]}, {num}
""")
elif op == 'M':
a = readint()
b = readint()
print(f"""
cmp {reg[a]}, {b}
""")
elif op == 'm':
a = readint()
b = readint()
print(f"""
cmp {reg[a]}, {reg[b]}
""")
elif op == 'N':
lbl = readword()
print(f"""
jne {lbl}
""")
elif op == 'P':
print(f"""
times 0x1000 db 0x00
""")
elif op == 'Q':
lbl = readword()
print(f"""
dq {lbl}
""")
elif op == 'R':
print(f"""
ret
""")
elif op == 'S':
src = readint()
dest = readint()
print(f"""
mov [{reg[dest]}], {reg[src]}
""")
elif op == 'W':
dest = readint()
src = readint()
value = readint()
print(f"""
lea {reg[dest]}, [{reg[src]} + {reg[value]}]
""")
elif op == 'w':
dest = readint()
src = readint()
print(f"""
sub {reg[dest]}, {reg[src]}
""")
elif op == '<':
print(f"""
pop {reg[readint()]}
""")
elif op == '=':
dest = readint()
value = readint()
print(f"""
mov {reg[dest]}, {value}
""")
elif op == '>':
print(f"""
push {reg[readint()]}
""")
elif op == '-':
dest = readint()
src = readint()
value = readint()
print(f"""
lea {reg[dest]}, [{reg[src]} - {value}]
""")
elif op == '+':
dest = readint()
src = readint()
value = readint()
print(f"""
lea {reg[dest]}, [{reg[src]} + {value}]
""")
elif op == '':
return
else:
dprint(f"ERR: Unknwon op: '{op}'!")
sys.exit(1)
if __name__ == '__main__':
print(f"""
[bits 64]
[section .memes]
extern _start
""")
translate()
| 2.796875 | 3 |
src/fuckery/memory.py | williamgibb/pyFuckery | 0 | 12772542 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pyFuckery - memory.py
Created on 2/12/17.
Memory object implementation. Provides memory bounds checking, as well as value enforcement.
"""
# Stdlib
import argparse
import hashlib
import json
import logging
import sys
# Third Party Code
import msgpack
# Custom Code
from fuckery.constants import DEFAULT_MEMORY_SIZE
from fuckery.constants import MEMORY_MAX_VALUE
from fuckery.constants import MEMORY_MIN_VALUE
from fuckery.exc import AddressError
from fuckery.exc import StorageError
log = logging.getLogger(__name__)
class Storage(object):
"""
Provides an interface for storing memory values for the Brainfuck VM.
This provides for type safety & memory access checking.
"""
def __init__(self, n=DEFAULT_MEMORY_SIZE):
"""
Init function for Storage.
:param n: Number of memory cells to create.
"""
self.n = n
self.min = MEMORY_MIN_VALUE
self.max = MEMORY_MAX_VALUE
self.mem = {i: 0x00 for i in range(self.n)}
@property
def mem_hash(self):
"""
Returns a hash of the state of the memory.
Note - Computing this frequently can be expensive to do as the memory section is
serialized via msgpack.dumps() prior to hashing.
:return:
"""
# We're abusing the python 3.6 ordereddict behavior which
# became a part of the cpython spec in 3.7 for this
# to work. It really only works since we pre-initialize
# the self.mem dictionary on startup.
ret = hashlib.md5(msgpack.dumps(self.mem)).hexdigest()
return ret
def __contains__(self, item):
return item in self.mem
def __len__(self):
return len(self.mem)
def get(self, addr):
"""
Get the value of the memory at a location.
:param addr: Memory address to retrieve.
:return:
"""
if addr not in self:
raise AddressError(f'Address is invalid: {addr}')
return self.mem.get(addr)
def set(self, addr, value):
"""
Set the value of the memory at a locaiton.
:param addr: Memory address to set.
:param value: Value to set.
:return:
"""
if addr not in self:
raise AddressError(f'Address is invalid: {addr}')
if not isinstance(value, int):
raise StorageError(f'Value is not an int: {type(value)}')
if value < self.min or value > self.max:
raise StorageError(f'Value is out of size bounds: {value}')
self.mem[addr] = value
# noinspection PyMissingOrEmptyDocstring
def main(options): # pragma: no cover
if not options.verbose:
logging.disable(logging.DEBUG)
m = Storage(n=25)
v = m.get(0)
log.info(f'm[0] is {v}')
m.set(24, 1)
v = m.get(0)
log.info(f'm[24] is {v}')
sys.exit(0)
# noinspection PyMissingOrEmptyDocstring
def makeargpaser(): # pragma: no cover
parser = argparse.ArgumentParser(description="Memory / Storage runner.")
parser.add_argument('-v', '--verbose', dest='verbose', default=False, action='store_true',
help='Enable verbose output')
return parser
def _main(): # pragma: no cover
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s [%(levelname)s] %(message)s [%(filename)s:%(funcName)s]')
p = makeargpaser()
opts = p.parse_args()
main(opts)
if __name__ == '__main__': # pragma: no cover
_main()
| 2.984375 | 3 |
tests/contract/test_misc.py | samuelhwilliams/github-trello-powerup | 0 | 12772543 | <filename>tests/contract/test_misc.py<gh_stars>0
"""
TESTS TO WRITE:
* API contract tests for notify/email provider, github, trello
"""
| 0.992188 | 1 |
privacy/tests/test_app/admin.py | bitlabstudio/django-privacy | 2 | 12772544 | """Admin for the ``test_app`` app."""
from django.contrib import admin
from .models import DummyProfileModel
admin.site.register(DummyProfileModel)
| 1.3125 | 1 |
boresights/analyzer/flux_calibration_file_analyzer.py | SDRAST/Data_Reduction | 0 | 12772545 |
import datetime
import logging
import h5py
from .file_analyzer import FileAnalyzer
from .flux_calibration_data_analyzer import FluxCalibrationDataAnalyzer
module_logger = logging.getLogger(__name__)
class FluxCalibrationFileAnalyzer(FileAnalyzer):
def __init__(self, file_path):
super(FluxCalibrationAnalyzer, self).__init__()
self.file_path = file_path
self.file_name = os.path.basename(self.file_path)
self.timestamp = "2018-" + \
self.file_name.replace(".hdf5", "").split("_")[-1]
self.timestamp_obj = datetime.datetime.strptime(
self.timestamp, "%Y-%j-%Hh%Mm%Ss")
self.meta_data["timestamp"] = self.timestamp
self.meta_data["file_path"] = self.file_path
self.meta_data["file_name"] = self.file_name
def load_data(self):
calib_data = {}
with h5py.File(self.file_path, "r") as f:
for key in list(f.keys()):
calib_data[key] = f[key][...]
self.data = FluxCalibrationDataAnalyzer(calib_data)
def load_meta_data(self):
pass
def report_meta_data(self, header=False,
line_delimiter=", ", delimiter="\n"):
return ""
# this is not used here
def plot(self, save_dir=None, overwrite=True, ax=None):
save_file_path, save_file_name = self._plot_save_path(save_dir)
if not self._check_existing(save_file_path, overwrite):
return
fig = None
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(10, 10/1.3))
self.data.plot(ax=ax)
suptitle = save_file_name + "\n" + self.report_meta_data(
header=False, line_delimiter=", ", delimiter="\n")
if fig is not None:
fig.suptitle(suptitle)
top = 0.98-(0.03*float(len(suptitle.split('\n'))))
fig.tight_layout(rect=[0, 0.03, 1, top])
fig.savefig(save_file_path)
plt.close(fig)
| 2.46875 | 2 |
modules/viewers/TransferFunctionEditor.py | chrisidefix/devide | 25 | 12772546 | <filename>modules/viewers/TransferFunctionEditor.py
# - make our own window control for colour-sequence bar
# - this should also have separate (?) line with HSV colour vertices
# - on this line, there should be vertical lines indicating the current
# position of all the opacity transfer function vertices
# - abstract floatcanvas-derived linear function editor into wx_kit
import os
from module_base import ModuleBase
from module_mixins import IntrospectModuleMixin,\
FileOpenDialogModuleMixin
import module_utils
from external import transfer_function_widget
import vtk
import wx
TF_LIBRARY = {
'CT Hip (bones+vasculature)' : [
(-1024.0, (0, 0, 0), 0),
(184.65573770491801, (255, 128, 128), 0.0),
(225.20534629404619, (255, 128, 128), 0.73857868020304562),
(304.8359659781288, (255, 128, 128), 0.0),
(377.70491803278696, (233, 231, 148), 0.0),
(379.48967193195631, (233, 231, 148), 1.0),
(3072.0, (255, 255, 255), 1)],
'CT Hip (test)' : [
(-1024.0, (0, 0, 0), 0),
(117.50819672131138, (255, 128, 128), 0.0),
(595.93442622950829, (255, 255, 255), 1.0),
(3072.0, (255, 255, 255), 1)],
'Panoramix (prototype)' : [
(-1024.0, (0, 0, 0), 0),
(136.33994334277622, (214, 115, 115), 0.0),
(159.5467422096317, (230, 99, 99), 0.24788732394366197),
(200.1586402266289, (255, 128, 128), 0.0),
(252.37393767705385, (206, 206, 61), 0.40000000000000002),
(287.18413597733706, (255, 128, 128), 0.0),
(403.21813031161469, (206, 61, 67), 0.13521126760563384),
(525.05382436260629, (255, 255, 255), 0.0),
(612.07932011331445, (255, 255, 255), 0.92957746478873238),
(3072.0, (255, 255, 255), 1)]
}
class TransferFunctionEditor(IntrospectModuleMixin, FileOpenDialogModuleMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
self._volume_input = None
self._opacity_tf = vtk.vtkPiecewiseFunction()
self._colour_tf = vtk.vtkColorTransferFunction()
self._lut = vtk.vtkLookupTable()
# list of tuples, where each tuple (scalar_value, (r,g,b,a))
self._config.transfer_function = [
(0, (0,0,0), 0),
(255, (255,255,255), 1)
]
self._view_frame = None
self._create_view_frame()
self._bind_events()
self.view()
# all modules should toggle this once they have shown their
# stuff.
self.view_initialised = True
self.config_to_logic()
self.logic_to_config()
self.config_to_view()
def _bind_events(self):
def handler_blaat(event):
tf_widget = event.GetEventObject() # the tf_widget
ret = tf_widget.get_current_point_info()
if not ret is None:
val, col, opacity = ret
vf = self._view_frame
vf.colour_button.SetBackgroundColour(col)
vf.cur_scalar_text.SetValue('%.2f' % (val,))
vf.cur_col_text.SetValue(str(col))
vf.cur_opacity_text.SetValue('%.2f' % (opacity,))
vf = self._view_frame
tfw = vf.tf_widget
tfw.Bind(transfer_function_widget.EVT_CUR_PT_CHANGED,
handler_blaat)
def handler_colour_button(event):
coldialog = wx.ColourDialog(vf, tfw.colour_data)
if coldialog.ShowModal() == wx.ID_OK:
colour = coldialog.GetColourData().GetColour().Get()
tfw.colour_data = coldialog.GetColourData()
tfw.set_current_point_colour(colour)
vf.colour_button.Bind(wx.EVT_BUTTON, handler_colour_button)
def handler_delete_button(event):
tfw.delete_current_point()
vf.delete_button.Bind(wx.EVT_BUTTON, handler_delete_button)
def handler_auto_range_button(event):
try:
range = self._volume_input.GetScalarRange()
except AttributeError:
self._module_manager.log_error(
'Could not determine range from input. ' +
'Have you connected some input data and ' +
'has the network executed at least once?')
else:
vf = self._view_frame
vf.scalar_min_text.SetValue(str(range[0]))
vf.scalar_max_text.SetValue(str(range[1]))
vf.auto_range_button.Bind(wx.EVT_BUTTON,
handler_auto_range_button)
def handler_apply_range_button(event):
try:
min = float(vf.scalar_min_text.GetValue())
max = float(vf.scalar_max_text.GetValue())
except ValueError:
self._module_manager.log_error(
'Invalid scalar MIN / MAX.')
else:
tfw.set_min_max(min, max)
vf.apply_range_button.Bind(wx.EVT_BUTTON,
handler_apply_range_button)
def handler_load_preset_button(event):
key = vf.preset_choice.GetStringSelection()
preset_tf = TF_LIBRARY[key]
tfw.set_transfer_function(preset_tf)
vf.load_preset_button.Bind(wx.EVT_BUTTON,
handler_load_preset_button)
def handler_file_save_button(event):
filename = self.filename_browse(self._view_frame,
'Select DVTF filename to save to',
'DeVIDE Transfer Function (*.dvtf)|*.dvtf|All files (*)|*',
style=wx.SAVE)
if filename:
# if the user has NOT specified any fileextension, we
# add .dvtf. (on Win this gets added by the
# FileSelector automatically, on Linux it doesn't)
if os.path.splitext(filename)[1] == '':
filename = '%s.dvtf' % (filename,)
self._save_tf_to_file(filename)
vf.file_save_button.Bind(wx.EVT_BUTTON,
handler_file_save_button)
def handler_file_load_button(event):
filename = self.filename_browse(self._view_frame,
'Select DVTF filename to load',
'DeVIDE Transfer Function (*.dvtf)|*.dvtf|All files (*)|*',
style=wx.OPEN)
if filename:
self._load_tf_from_file(filename)
vf.file_load_button.Bind(wx.EVT_BUTTON,
handler_file_load_button)
# auto_range_button
def _create_view_frame(self):
import resources.python.tfeditorframe
reload(resources.python.tfeditorframe)
self._view_frame = module_utils.instantiate_module_view_frame(
self, self._module_manager,
resources.python.tfeditorframe.TFEditorFrame)
module_utils.create_standard_object_introspection(
self, self._view_frame, self._view_frame.view_frame_panel,
{'Module (self)' : self})
# add the ECASH buttons
module_utils.create_eoca_buttons(self, self._view_frame,
self._view_frame.view_frame_panel)
# and customize the presets choice
vf = self._view_frame
keys = TF_LIBRARY.keys()
keys.sort()
vf.preset_choice.Clear()
for key in keys:
vf.preset_choice.Append(key)
vf.preset_choice.Select(0)
def close(self):
for i in range(len(self.get_input_descriptions())):
self.set_input(i, None)
self._view_frame.Destroy()
del self._view_frame
ModuleBase.close(self)
def get_input_descriptions(self):
return ('Optional input volume',)
def get_output_descriptions(self):
return ('VTK Opacity Transfer Function',
'VTK Colour Transfer Function',
'VTK Lookup Table')
def set_input(self, idx, input_stream):
self._volume_input = input_stream
def get_output(self, idx):
if idx == 0:
return self._opacity_tf
elif idx == 1:
return self._colour_tf
else:
return self._lut
def logic_to_config(self):
pass
def config_to_logic(self):
self._opacity_tf.RemoveAllPoints()
self._colour_tf.RemoveAllPoints()
for p in self._config.transfer_function:
self._opacity_tf.AddPoint(p[0], p[2])
r,g,b = [i / 255.0 for i in p[1]]
self._colour_tf.AddRGBPoint(
p[0],r,g,b)
lut_res = 1024
minmax = self._view_frame.tf_widget.get_min_max()
self._lut.SetTableRange(minmax)
self._lut.SetNumberOfTableValues(lut_res)
# lut_res - 1: lut_res points == lut_res-1 intervals
incr = (minmax[1] - minmax[0]) / float(lut_res - 1)
for i in range(lut_res):
v = minmax[0] + i * incr
rgb = self._colour_tf.GetColor(v)
o = self._opacity_tf.GetValue(v)
self._lut.SetTableValue(i, rgb + (o,))
def view_to_config(self):
self._config.transfer_function = \
self._view_frame.tf_widget.get_transfer_function()
def config_to_view(self):
vf = self._view_frame
tfw = vf.tf_widget
tfw.set_transfer_function(
self._config.transfer_function)
min,max = tfw.get_min_max()
vf.scalar_min_text.SetValue('%.1f' % (min,))
vf.scalar_max_text.SetValue('%.1f' % (max,))
def view(self):
self._view_frame.Show()
self._view_frame.Raise()
def execute_module(self):
pass
def _load_tf_from_file(self, filename):
try:
loadf = file(filename, 'r')
tf = eval(loadf.read(), {"__builtins__": {}})
loadf.close()
except Exception, e:
self._module_manager.log_error_with_exception(
'Could not load transfer function: %s.' %
(str(e),))
else:
self._view_frame.tf_widget.set_transfer_function(
tf)
def _save_tf_to_file(self, filename):
tf = self._view_frame.tf_widget.get_transfer_function()
try:
savef = file(filename, 'w')
savef.write("# DeVIDE Transfer Function DVTF v1.0\n%s" % \
(str(tf),))
savef.close()
except Exception, e:
self._module_manager.log_error(
'Error saving transfer function: %s.' % (str(e),))
else:
self._module_manager.log_message(
'Saved %s.' % (filename,))
| 2.015625 | 2 |
Data_processing/create_station_json.py | oyvorha/TIO4905 | 0 | 12772547 | import pandas as pd
import json
file = '~/stations.xlsx'
sheet_name = 'Data'
scenarios = ['A', 'B', 'C', 'D', 'E']
flat_rate = 0.3
battery_rate = 0.7
length_time_interval = 120
stations = {}
def read_excel():
df_stations = pd.read_excel(file, sheet_name)
for index, row in df_stations.iterrows():
interval_scenarios = {}
for scenario in scenarios:
init_load = round(battery_rate * float(row[scenario+'_start_load']), 3)
init_flat_load = round(flat_rate * float(row[scenario + '_start_load']), 3)
incoming_battery_rate = round(battery_rate * float(row[scenario + '_incoming'])/length_time_interval, 3)
incoming_flat_rate = round(flat_rate * float(row[scenario + '_incoming'])/length_time_interval, 3)
outgoing_rate = round(float(row[scenario + '_outgoing_rate']) / length_time_interval, 3)
demand = calculate_demand(float(row[scenario + '_outgoing_rate']), row[scenario+'_empty'])
interval_scenarios[scenario] = [init_load, init_flat_load, incoming_battery_rate, incoming_flat_rate,
outgoing_rate, demand]
stations[int(row['Station_ID'])] = [row['Latitude'], row['Longitude'], interval_scenarios]
def calculate_demand(trips, empty_time):
demand_rate = trips / (length_time_interval - empty_time)
return round(demand_rate, 2)
def write_json(json_element):
with open('station.json', 'w') as fp:
json.dump(json_element, fp)
read_excel()
write_json(stations)
| 3.09375 | 3 |
Assignments/Assignment1/python/src/producerConsumer.py | r4ghu/CSC564-Concurrency | 0 | 12772548 | from utils import Semaphore, Thread, Buffer, execution_manager
import random
import time
class WaitForEvent:
def __init__(self, data):
self.data = data
def process(self):
print("Finished consuming event {}".format(self.data))
mutex = Semaphore(1)
items = Semaphore(0)
buffer = Buffer()
def delay(n=1):
return time.sleep(random.random() * n)
def Producer():
global buffer
data = 0
while True:
delay(2)
event = WaitForEvent(data)
print("Producing event {}".format(event.data))
mutex.wait()
buffer.add(event)
mutex.signal()
items.signal()
data += 1
def Consumer():
global buffer
while True:
items.wait()
mutex.wait()
event = buffer.get()
mutex.signal()
print("Consuming event {}".format(event.data))
delay()
event.process()
#execution_manager() # Forces a global parent thread
Thread(Producer)
Thread(Consumer)
| 3.359375 | 3 |
deep_tracking/deeptracking/data/sensors/sensorbase.py | birlrobotics/birlBaxter_demos | 24 | 12772549 | import abc
class SensorBase:
@abc.abstractmethod
def start(self):
pass
@abc.abstractmethod
def stop(self):
pass
@abc.abstractmethod
def intrinsics(self):
"""
return CameraParameters
:return:
"""
pass
@abc.abstractmethod
def get_frame(self, block=True):
pass | 2.890625 | 3 |
mbus/MBusRecord.py | droid4control/python-mbus | 23 | 12772550 | from ctypes import Structure, Union, c_char_p, c_double, c_int, c_byte, \
c_long, POINTER
c_byte_p = POINTER(c_byte)
class MBusString(Structure):
_fields_ = [
('value', c_byte_p),
('size', c_int),
]
class MBusValue(Union):
_fields_ = [
('real_val', c_double),
('str_val', MBusString),
]
class MBusRecord(Structure):
_fields_ = [
('value', MBusValue),
('is_numeric', c_byte),
('unit', c_char_p),
('function_medium', c_char_p),
('quantity', c_char_p),
('device', c_int),
('tariff', c_long),
('storage_number', c_long),
]
| 2.609375 | 3 |