hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
31ecb15b99e3ceb267fe3088d539b5b22c952d38 | 1,346 | py | Python | flink-ai-flow/examples/workflow_on_event/workflows/init/init.py | lisy09/flink-ai-extended | 011a5a332f7641f66086653e715d0596eab2e107 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | flink-ai-flow/examples/workflow_on_event/workflows/init/init.py | lisy09/flink-ai-extended | 011a5a332f7641f66086653e715d0596eab2e107 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | flink-ai-flow/examples/workflow_on_event/workflows/init/init.py | lisy09/flink-ai-extended | 011a5a332f7641f66086653e715d0596eab2e107 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import ai_flow as af
hourly_data_dir = '/tmp/hourly_data'
process_result_base_path = '/tmp/hourly_processed'
daily_data_base_path = '/tmp/daily_data'
daily_result = '/tmp/daily_result'
if __name__ == '__main__':
af.init_ai_flow_context()
init()
| 35.421053 | 83 | 0.770431 |
31ee3bc132db64859847221802dd7bff470b9ce3 | 977 | py | Python | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/bobcat/profiles/Profile_WiSUN.py | SiliconLabs/Gecko_SDK | 991121c706578c9a2135b6f75cc88856e8c64bdc | [
"Zlib"
] | 82 | 2016-06-29T17:24:43.000Z | 2021-04-16T06:49:17.000Z | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/bobcat/profiles/Profile_WiSUN.py | SiliconLabs/Gecko_SDK | 991121c706578c9a2135b6f75cc88856e8c64bdc | [
"Zlib"
] | 2 | 2017-02-13T10:07:17.000Z | 2017-03-22T21:28:26.000Z | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/bobcat/profiles/Profile_WiSUN.py | SiliconLabs/Gecko_SDK | 991121c706578c9a2135b6f75cc88856e8c64bdc | [
"Zlib"
] | 56 | 2016-08-02T10:50:50.000Z | 2021-07-19T08:57:34.000Z | from pyradioconfig.parts.ocelot.profiles.Profile_WiSUN import Profile_WiSUN_Ocelot
from pyradioconfig.parts.common.profiles.bobcat_regs import build_modem_regs_bobcat
from pyradioconfig.parts.common.profiles.profile_common import buildCrcOutputs, buildFecOutputs, buildFrameOutputs, \
buildWhiteOutputs | 42.478261 | 117 | 0.73695 |
31ee781effe2a319a7f8d1c8b7b12faf33878337 | 1,846 | py | Python | tests/dgds_functions_test.py | openearth/hydro-engine-service | 8e7eea489ee241dad2d6d8152d1c30af8a09a8d1 | [
"MIT"
] | 4 | 2019-02-15T13:53:01.000Z | 2021-12-13T09:53:02.000Z | tests/dgds_functions_test.py | openearth/hydro-engine-service | 8e7eea489ee241dad2d6d8152d1c30af8a09a8d1 | [
"MIT"
] | 12 | 2018-12-19T08:30:29.000Z | 2021-04-21T12:59:59.000Z | tests/dgds_functions_test.py | openearth/hydro-engine-service | 8e7eea489ee241dad2d6d8152d1c30af8a09a8d1 | [
"MIT"
] | 4 | 2018-10-17T23:48:21.000Z | 2020-08-05T18:36:14.000Z | import logging
import pytest
from . import auth
from hydroengine_service import dgds_functions
logger = logging.getLogger(__name__)
| 51.277778 | 109 | 0.538462 |
31ee7dd58797f57d854758b0971c25c71826cd28 | 2,485 | py | Python | smol_opyt/logistic_problem.py | abelsiqueira/smol-opyt | 58901906eb3129f4aae9edc7893bba624c5a0686 | [
"MIT"
] | null | null | null | smol_opyt/logistic_problem.py | abelsiqueira/smol-opyt | 58901906eb3129f4aae9edc7893bba624c5a0686 | [
"MIT"
] | 5 | 2021-08-02T02:04:48.000Z | 2021-08-02T02:27:57.000Z | smol_opyt/logistic_problem.py | abelsiqueira/smol-opyt | 58901906eb3129f4aae9edc7893bba624c5a0686 | [
"MIT"
] | null | null | null | from math import log
import numpy as np
from numpy import linalg as la
| 33.133333 | 107 | 0.534004 |
9ec42ebdeb8c357fae82c9abfd68ebde784ec5ba | 1,280 | py | Python | TeamClassificationUtils.py | Neerajj9/Computer-Vision-based-Offside-Detection-in-soccer | 744bfc636463f24c4f78f25684864c2ce4abb43f | [
"MIT"
] | 8 | 2020-10-17T14:54:53.000Z | 2022-02-09T11:03:01.000Z | TeamClassificationUtils.py | Neerajj9/Computer-Vision-based-Offside-Detection-in-soccer | 744bfc636463f24c4f78f25684864c2ce4abb43f | [
"MIT"
] | 4 | 2021-01-03T16:02:29.000Z | 2021-11-23T03:26:01.000Z | TeamClassificationUtils.py | Neerajj9/Computer-Vision-based-Offside-Detection-in-soccer | 744bfc636463f24c4f78f25684864c2ce4abb43f | [
"MIT"
] | 2 | 2021-04-10T07:05:55.000Z | 2021-09-19T23:22:18.000Z | import numpy as np
# TODO : add code for referee | 33.684211 | 109 | 0.651563 |
9ec50e4a84db3516536add2eb38a5493aef3c343 | 856 | py | Python | examples/PTSD/mpi_tmp/PTSD_cognet.py | zeroknowledgediscovery/cognet | 3acc2f05451ccbc228bf9c02e5d357b40b0c3e4f | [
"MIT"
] | null | null | null | examples/PTSD/mpi_tmp/PTSD_cognet.py | zeroknowledgediscovery/cognet | 3acc2f05451ccbc228bf9c02e5d357b40b0c3e4f | [
"MIT"
] | null | null | null | examples/PTSD/mpi_tmp/PTSD_cognet.py | zeroknowledgediscovery/cognet | 3acc2f05451ccbc228bf9c02e5d357b40b0c3e4f | [
"MIT"
] | null | null | null | from mpi4py.futures import MPIPoolExecutor
import numpy as np
import pandas as pd
from quasinet.qnet import Qnet, qdistance, load_qnet, qdistance_matrix
from quasinet.qsampling import qsample, targeted_qsample
qnet=load_qnet('../results/PTSD_cognet_test.joblib')
w = 304
h = w
p_all = pd.read_csv("tmp_samples_as_strings.csv", header=None).values.astype(str)[:]
if __name__ == '__main__':
with MPIPoolExecutor() as executor:
result = executor.map(dfunc_line, range(h))
result = pd.DataFrame(result)
result = result.to_numpy()
result = pd.DataFrame(np.maximum(result, result.transpose()))
result.to_csv('tmp_distmatrix.csv',index=None,header=None) | 27.612903 | 84 | 0.73715 |
9ec518765538fd6d2d3d18e0ed23d60b0ac69f7f | 58 | py | Python | tests/__init__.py | bio2bel/famplex | 3a1dfb0f3da3eb33c2b4de658cf02ffb6b5bebaa | [
"MIT"
] | null | null | null | tests/__init__.py | bio2bel/famplex | 3a1dfb0f3da3eb33c2b4de658cf02ffb6b5bebaa | [
"MIT"
] | 3 | 2018-07-24T14:32:41.000Z | 2018-08-10T11:17:49.000Z | tests/__init__.py | bio2bel/famplex | 3a1dfb0f3da3eb33c2b4de658cf02ffb6b5bebaa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Tests for Bio2BEL FamPlex."""
| 14.5 | 32 | 0.551724 |
9ec5885a6003a25f321416770e39cf31583e933d | 4,778 | py | Python | dfainductor/algorithms/searchers.py | ctlab/DFA-Inductor-py | c9f0906101a4c83f125ab8c487dc2eac7a52d310 | [
"MIT"
] | 2 | 2020-06-03T11:27:45.000Z | 2021-08-30T04:14:48.000Z | dfainductor/algorithms/searchers.py | ctlab/DFA-Inductor-py | c9f0906101a4c83f125ab8c487dc2eac7a52d310 | [
"MIT"
] | 1 | 2021-07-14T18:43:58.000Z | 2021-07-14T18:43:58.000Z | dfainductor/algorithms/searchers.py | ctlab/DFA-Inductor-py | c9f0906101a4c83f125ab8c487dc2eac7a52d310 | [
"MIT"
] | null | null | null | from typing import List
from pysat.solvers import Solver
from ..variables import VarPool
from .reductions import ClauseGenerator
from ..examples import BaseExamplesProvider
from ..logging_utils import *
from ..statistics import STATISTICS
from ..structures import APTA, DFA, InconsistencyGraph
| 43.834862 | 102 | 0.547928 |
9ec5b4570de1244cfecc950781db192eb22b2b73 | 22,697 | py | Python | lc_sqlalchemy_dbutils/manager.py | libcommon/sqlalchemy-dbutils-py | 39b2fb0fc51279a4d1c8a2b6fe250f8cff44d1b1 | [
"MIT"
] | null | null | null | lc_sqlalchemy_dbutils/manager.py | libcommon/sqlalchemy-dbutils-py | 39b2fb0fc51279a4d1c8a2b6fe250f8cff44d1b1 | [
"MIT"
] | null | null | null | lc_sqlalchemy_dbutils/manager.py | libcommon/sqlalchemy-dbutils-py | 39b2fb0fc51279a4d1c8a2b6fe250f8cff44d1b1 | [
"MIT"
] | null | null | null | ## -*- coding: UTF8 -*-
## manager.py
## Copyright (c) 2020 libcommon
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
from getpass import getpass
import os
from pathlib import Path
from typing import Any, Optional, Union
from sqlalchemy import create_engine as sqla_create_engine, MetaData
from sqlalchemy.engine import Engine
from sqlalchemy.engine.url import make_url, URL
from sqlalchemy.orm import scoped_session as ScopedSession, Session, sessionmaker as SessionMaker
from sqlalchemy.orm.query import Query
__author__ = "libcommon"
DBManagerSessionFactory = Union[ScopedSession, SessionMaker]
DBManagerSession = Union[ScopedSession, Session]
ConnectionURL = Union[str, URL]
if os.environ.get("ENVIRONMENT") == "TEST":
import unittest
from unittest.mock import patch, mock_open
from tests.common import BaseTable, User
| 40.821942 | 119 | 0.628233 |
9ec6363df3d16f3e41bfd55d3ca8396d912ca17a | 160 | py | Python | mazeexperiment/__main__.py | NickAnderegg/rpacr-mazeexperiment | 3afe6afb10b4ad61a169645e59f2ad0d0f92f565 | [
"MIT"
] | null | null | null | mazeexperiment/__main__.py | NickAnderegg/rpacr-mazeexperiment | 3afe6afb10b4ad61a169645e59f2ad0d0f92f565 | [
"MIT"
] | null | null | null | mazeexperiment/__main__.py | NickAnderegg/rpacr-mazeexperiment | 3afe6afb10b4ad61a169645e59f2ad0d0f92f565 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""mazeexperiment.__main__: executed when mazeexperiment directory is called as script."""
from .mazeexperiment import main
main()
| 20 | 90 | 0.71875 |
9ec70101de03b36989296a10649d2dea72a92c80 | 1,608 | py | Python | kafka_demo_1/producer.py | Aguinore/udemy_kafka_demo | 5f8383e1381dba2ddc0fc656b3cdc66b98258aad | [
"MIT"
] | null | null | null | kafka_demo_1/producer.py | Aguinore/udemy_kafka_demo | 5f8383e1381dba2ddc0fc656b3cdc66b98258aad | [
"MIT"
] | null | null | null | kafka_demo_1/producer.py | Aguinore/udemy_kafka_demo | 5f8383e1381dba2ddc0fc656b3cdc66b98258aad | [
"MIT"
] | null | null | null | from tweepy import StreamListener, OAuthHandler, Stream
from configs import Configs
import sys
configs = Configs()
producer = None
try:
producer = create_kafka_producer()
client = create_twitter_client(producer, configs)
client.filter(track=configs.twitter_topics)
finally:
exit_gracefully(producer)
| 26.8 | 88 | 0.697139 |
9ec74c2b027410af0c055e866b7e76cb8dc5f04e | 1,717 | py | Python | demo/examples/stability/advection_d2q4.py | bgraille/pylbm | fd4419933e05b85be364232fddedfcb4f7275e1f | [
"BSD-3-Clause"
] | 106 | 2016-09-13T07:19:17.000Z | 2022-03-19T13:41:55.000Z | demo/examples/stability/advection_d2q4.py | gouarin/pylbm | fd4419933e05b85be364232fddedfcb4f7275e1f | [
"BSD-3-Clause"
] | 53 | 2017-09-18T04:51:19.000Z | 2022-01-19T21:36:23.000Z | demo/examples/stability/advection_d2q4.py | gouarin/pylbm | fd4419933e05b85be364232fddedfcb4f7275e1f | [
"BSD-3-Clause"
] | 33 | 2016-06-17T13:21:17.000Z | 2021-11-11T16:57:46.000Z |
"""
Stability analysis of the
D2Q4 solver for the advection equation
d_t(u) + c_x d_x(u) + c_y d_y(u) = 0
"""
import sympy as sp
import pylbm
# pylint: disable=invalid-name
# symbolic variables
U, X, Y = sp.symbols('U, X, Y')
# symbolic parameters
LA, CX, CY = sp.symbols('lambda, cx, cy', constants=True)
S_1, S_2 = sp.symbols('s1, s2', constants=True)
# numerical parameters
la = 1. # velocity of the scheme
s_1, s_2 = 2., 1. # relaxation parameters
c_x, c_y = 0.5, 0.25 # velocity of the advection equation
dico = {
'dim': 2,
'scheme_velocity': LA,
'schemes': [
{
'velocities': [1, 2, 3, 4],
'conserved_moments': U,
'polynomials': [1, X, Y, X**2-Y**2],
'relaxation_parameters': [0, S_1, S_1, S_2],
'equilibrium': [
U,
CX*U, CY*U,
(CX**2-CY**2)*U
],
},
],
'parameters': {
LA: la,
S_1: s_1,
S_2: s_2,
CX: c_x,
CY: c_y,
},
'relative_velocity': [CX, CY],
}
scheme = pylbm.Scheme(dico)
stab = pylbm.Stability(scheme)
stab.visualize({
'parameters': {
CX: {
'range': [0, 1],
'init': c_x,
'step': 0.01,
},
CY: {
'range': [0, 1],
'init': c_y,
'step': 0.01,
},
S_1: {
'name': r"$s_1$",
'range': [0, 2],
'init': s_1,
'step': 0.01,
},
S_2: {
'name': r"$s_2$",
'range': [0, 2],
'init': s_2,
'step': 0.01,
},
},
'number_of_wave_vectors': 4096,
})
| 20.939024 | 58 | 0.438556 |
9ec7841a173dc4c19d7dac5f98e4c9ddedd5460c | 157 | py | Python | glimix_core/_util/_array.py | Horta/limix-inference | 1ba102fc544f8d307412d361b574da9d4c166f8e | [
"MIT"
] | 7 | 2019-06-10T12:27:25.000Z | 2021-07-23T16:36:04.000Z | glimix_core/_util/_array.py | Horta/limix-inference | 1ba102fc544f8d307412d361b574da9d4c166f8e | [
"MIT"
] | 12 | 2017-05-28T10:59:31.000Z | 2021-05-17T20:11:00.000Z | glimix_core/_util/_array.py | Horta/limix-inference | 1ba102fc544f8d307412d361b574da9d4c166f8e | [
"MIT"
] | 5 | 2017-08-27T20:13:45.000Z | 2022-02-14T06:33:14.000Z | from numpy import reshape
| 15.7 | 53 | 0.611465 |
9ec859c40962ecf3e9c555e76fd3db0d87f04e0f | 3,386 | py | Python | src/tests/component/test_engine_manager.py | carbonblack/cbc-binary-toolkit | 92c90b80e3c3e0b5c2473ef2086d2ce2fb651db4 | [
"MIT"
] | 8 | 2020-05-12T18:08:52.000Z | 2021-12-27T06:11:00.000Z | src/tests/component/test_engine_manager.py | carbonblack/cbc-binary-toolkit | 92c90b80e3c3e0b5c2473ef2086d2ce2fb651db4 | [
"MIT"
] | 4 | 2020-05-13T16:07:49.000Z | 2020-06-30T18:47:14.000Z | src/tests/component/test_engine_manager.py | carbonblack/cbc-binary-toolkit | 92c90b80e3c3e0b5c2473ef2086d2ce2fb651db4 | [
"MIT"
] | 3 | 2020-05-16T19:57:57.000Z | 2020-11-01T08:43:31.000Z | # -*- coding: utf-8 -*-
# *******************************************************
# Copyright (c) VMware, Inc. 2020-2021. All Rights Reserved.
# SPDX-License-Identifier: MIT
# *******************************************************
# *
# * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
# * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
# * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
# * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
# * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""Unit tests for the analysis engine"""
import pytest
from cbc_binary_toolkit import InitializationError
from cbc_binary_toolkit.config import Config
from cbc_binary_toolkit.engine import LocalEngineManager
from cbc_binary_toolkit.schemas import EngineResponseSchema
from tests.component.engine_fixtures.mock_engine import MockLocalEngine
from tests.component.schema_fixtures.mock_data import VALID_BINARY_METADATA, MISSING_FIELDS_BINARY_METADATA
ENGINE_NAME = "MockEngine"
# ==================================== Unit TESTS BELOW ====================================
def test_create_engine(config):
"""Test successful creation of MockLocalEngine"""
manager = LocalEngineManager(config)
assert isinstance(manager.create_engine(), MockLocalEngine)
def test_analyze(config):
"""Test analyze pass through"""
manager = LocalEngineManager(config)
assert EngineResponseSchema.validate(manager.analyze(VALID_BINARY_METADATA))
| 30.781818 | 107 | 0.672475 |
9ec95a1a1ec287a29e316037c8a1f39e97c4bff8 | 97 | py | Python | funolympics/apps.py | codeema/Yokiyo | 2e710bca487ee393784c116b7db2db7337f73d40 | [
"MIT"
] | null | null | null | funolympics/apps.py | codeema/Yokiyo | 2e710bca487ee393784c116b7db2db7337f73d40 | [
"MIT"
] | 6 | 2020-05-20T15:29:55.000Z | 2021-09-08T02:02:43.000Z | funolympics/apps.py | codeema/Yokiyo | 2e710bca487ee393784c116b7db2db7337f73d40 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 16.166667 | 35 | 0.773196 |
9eca8cb06280c8af6786e7a410286dc58b44dac0 | 5,734 | py | Python | src/gt4sd/algorithms/generation/polymer_blocks/core.py | hhhsu0825/gt4sd-core | 4a1fe9da58d2f33bba2fba64604427e037ad7a46 | [
"MIT"
] | null | null | null | src/gt4sd/algorithms/generation/polymer_blocks/core.py | hhhsu0825/gt4sd-core | 4a1fe9da58d2f33bba2fba64604427e037ad7a46 | [
"MIT"
] | null | null | null | src/gt4sd/algorithms/generation/polymer_blocks/core.py | hhhsu0825/gt4sd-core | 4a1fe9da58d2f33bba2fba64604427e037ad7a46 | [
"MIT"
] | null | null | null | """PaccMann vanilla generator trained on polymer building blocks (catalysts/monomers)."""
import logging
import os
from dataclasses import field
from typing import ClassVar, Dict, Optional, TypeVar
from ....domains.materials import SmallMolecule, validate_molecules
from ....exceptions import InvalidItem
from ....training_pipelines.core import TrainingPipelineArguments
from ....training_pipelines.paccmann.core import PaccMannSavingArguments
from ...core import AlgorithmConfiguration, GeneratorAlgorithm, Untargeted
from ...registry import ApplicationsRegistry
from .implementation import Generator
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
T = type(None)
S = TypeVar("S", bound=SmallMolecule)
| 35.614907 | 108 | 0.646495 |
9ecbaf805798824811c8f44248c90470a6ab1527 | 4,458 | py | Python | src/form/panel/MultiPanel.py | kaorin/vmd_sizing | e609299a0acaa17bd34487314b05bab6af6819d8 | [
"MIT"
] | 32 | 2019-05-05T13:08:51.000Z | 2022-03-11T07:13:27.000Z | src/form/panel/MultiPanel.py | kaorin/vmd_sizing | e609299a0acaa17bd34487314b05bab6af6819d8 | [
"MIT"
] | 3 | 2019-07-13T03:06:15.000Z | 2021-11-03T10:30:15.000Z | src/form/panel/MultiPanel.py | kaorin/vmd_sizing | e609299a0acaa17bd34487314b05bab6af6819d8 | [
"MIT"
] | 11 | 2019-07-15T17:49:09.000Z | 2022-03-20T10:40:27.000Z | # -*- coding: utf-8 -*-
#
import wx
import wx.lib.newevent
from form.panel.BasePanel import BasePanel
from form.parts.SizingFileSet import SizingFileSet
from module.MMath import MRect, MVector3D, MVector4D, MQuaternion, MMatrix4x4 # noqa
from utils import MFileUtils # noqa
from utils.MLogger import MLogger # noqa
logger = MLogger(__name__)
| 42.056604 | 136 | 0.680126 |
9ecd3fdffb0348d1335d2b0ee06d51e7c7681296 | 1,261 | py | Python | androgui.py | nawfling/androguard | 67b992ce0feeeb01bc69a99257916487689c3bcf | [
"Apache-2.0"
] | 1 | 2019-03-29T19:24:23.000Z | 2019-03-29T19:24:23.000Z | androgui.py | adiltirur/malware_classification | 67b992ce0feeeb01bc69a99257916487689c3bcf | [
"Apache-2.0"
] | null | null | null | androgui.py | adiltirur/malware_classification | 67b992ce0feeeb01bc69a99257916487689c3bcf | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Androguard Gui"""
import argparse
import os
import sys
from androguard.core import androconf
from androguard.gui.mainwindow import MainWindow
from PyQt5 import QtWidgets, QtGui
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Androguard GUI")
parser.add_argument("-d", "--debug", action="store_true", default=False)
parser.add_argument("-i", "--input_file", default=None)
parser.add_argument("-p", "--input_plugin", default=None)
args = parser.parse_args()
if args.debug:
androconf.set_debug()
# We need that to save huge sessions when leaving and avoid
# RuntimeError: maximum recursion depth exceeded while pickling an object
# or
# RuntimeError: maximum recursion depth exceeded in cmp
# http://stackoverflow.com/questions/2134706/hitting-maximum-recursion-depth-using-pythons-pickle-cpickle
sys.setrecursionlimit(50000)
app = QtWidgets.QApplication(sys.argv)
app.setWindowIcon(QtGui.QIcon(os.path.join(androconf.CONF['data_prefix'], "androguard.ico")))
window = MainWindow(input_file=args.input_file,
input_plugin=args.input_plugin)
window.resize(1024, 768)
window.show()
sys.exit(app.exec_())
| 31.525 | 109 | 0.716891 |
9ecd99d19c3e1460adaaef7fa6dcf5ae53718429 | 2,551 | py | Python | python-trunk/sfapi2/sflib/ZSI/wstools/XMLname.py | raychorn/svn_molten-magma | 8aa2ff2340707eecae6514943e86f5afba9cd54a | [
"CC0-1.0"
] | null | null | null | python-trunk/sfapi2/sflib/ZSI/wstools/XMLname.py | raychorn/svn_molten-magma | 8aa2ff2340707eecae6514943e86f5afba9cd54a | [
"CC0-1.0"
] | null | null | null | python-trunk/sfapi2/sflib/ZSI/wstools/XMLname.py | raychorn/svn_molten-magma | 8aa2ff2340707eecae6514943e86f5afba9cd54a | [
"CC0-1.0"
] | null | null | null | """Translate strings to and from SOAP 1.2 XML name encoding
Implements rules for mapping application defined name to XML names
specified by the w3 SOAP working group for SOAP version 1.2 in
Appendix A of "SOAP Version 1.2 Part 2: Adjuncts", W3C Working Draft
17, December 2001, <http://www.w3.org/TR/soap12-part2/#namemap>
Also see <http://www.w3.org/2000/xp/Group/xmlp-issues>.
Author: Gregory R. Warnes <gregory_r_warnes@groton.pfizer.com>
Date:: 2002-04-25
Version 0.9.0
"""
ident = "$Id: XMLname.py 25 2006-05-24 18:12:14Z misha $"
from re import *
def toXMLname(string):
"""Convert string to a XML name."""
if string.find(':') != -1 :
(prefix, localname) = string.split(':',1)
else:
prefix = None
localname = string
T = unicode(localname)
N = len(localname)
X = [];
for i in range(N) :
if i< N-1 and T[i]==u'_' and T[i+1]==u'x':
X.append(u'_x005F_')
elif i==0 and N >= 3 and \
( T[0]==u'x' or T[0]==u'X' ) and \
( T[1]==u'm' or T[1]==u'M' ) and \
( T[2]==u'l' or T[2]==u'L' ):
X.append(u'_xFFFF_' + T[0])
elif (not _NCNameChar(T[i])) or (i==0 and not _NCNameStartChar(T[i])):
X.append(_toUnicodeHex(T[i]))
else:
X.append(T[i])
return u''.join(X)
def fromXMLname(string):
"""Convert XML name to unicode string."""
retval = sub(r'_xFFFF_','', string )
retval = sub(r'_x[0-9A-Za-z]+_', fun, retval )
return retval
| 28.662921 | 79 | 0.547236 |
9ecf156b5761ad136db575bc3923db3ea214ba15 | 5,939 | py | Python | mmtbx/validation/regression/tst_restraints.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 155 | 2016-11-23T12:52:16.000Z | 2022-03-31T15:35:44.000Z | mmtbx/validation/regression/tst_restraints.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 590 | 2016-12-10T11:31:18.000Z | 2022-03-30T23:10:09.000Z | mmtbx/validation/regression/tst_restraints.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 115 | 2016-11-15T08:17:28.000Z | 2022-02-09T15:30:14.000Z |
from __future__ import absolute_import, division, print_function
from libtbx.utils import null_out
from libtbx import easy_pickle
from six.moves import cStringIO as StringIO
if (__name__ == "__main__"):
exercise_simple()
print("OK")
| 51.198276 | 79 | 0.506651 |
9ecfe7e3194f0f7656e10dd2b39c230900905bf9 | 887 | py | Python | Python/repeated-dna-sequences.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | Python/repeated-dna-sequences.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | Python/repeated-dna-sequences.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | # Time: O(n)
# Space: O(n)
import collections
| 25.342857 | 79 | 0.476888 |
9ecff0d2def72853bb2077007cb31a53e1e71834 | 231 | py | Python | recipe/app.py | Udayan-Coding/examples | 720515bf614f4edd08c734cc5a708d8a2618522d | [
"MIT"
] | 1 | 2021-01-04T17:17:14.000Z | 2021-01-04T17:17:14.000Z | recipe/app.py | Udayan-Coding/examples | 720515bf614f4edd08c734cc5a708d8a2618522d | [
"MIT"
] | null | null | null | recipe/app.py | Udayan-Coding/examples | 720515bf614f4edd08c734cc5a708d8a2618522d | [
"MIT"
] | 1 | 2021-01-31T11:10:44.000Z | 2021-01-31T11:10:44.000Z | from flask import Flask, render_template, request
app = Flask(__name__)
| 19.25 | 53 | 0.709957 |
9ed032bb75772e44674a7c37bb30bc62c636bc41 | 3,695 | py | Python | step2.py | mosheliv/tfcollab1 | 50da5683fb40a50cb957aeca2d28bc9f72440813 | [
"MIT"
] | null | null | null | step2.py | mosheliv/tfcollab1 | 50da5683fb40a50cb957aeca2d28bc9f72440813 | [
"MIT"
] | null | null | null | step2.py | mosheliv/tfcollab1 | 50da5683fb40a50cb957aeca2d28bc9f72440813 | [
"MIT"
] | null | null | null | """
Usage:
# From tensorflow/models/
# Create train data:
python generate_tfrecord.py --csv_input=data/train_labels.csv --output_path=train.record
# Create test data:
python generate_tfrecord.py --csv_input=data/test_labels.csv --output_path=test.record
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import io
import pandas as pd
import tensorflow as tf
from PIL import Image
from collections import namedtuple, OrderedDict
flags = tf.app.flags
flags.DEFINE_string('image_dir', '', 'Path to the image directory')
flags.DEFINE_string('csv_input', '', 'Path to the CSV input')
flags.DEFINE_string('output_path', '', 'Path to output TFRecord')
FLAGS = flags.FLAGS
# TO-DO replace this with label map
if __name__ == '__main__':
tf.app.run()
| 32.991071 | 96 | 0.700677 |
9ed0bf65b8f404e11c189c592c88427ef28a69fc | 685 | py | Python | lh_lib/sensors/esp32/touch.py | lh70/s-connect-python | 5a4ca17690ec700b36faf69ea744c514f532cc48 | [
"Apache-2.0"
] | null | null | null | lh_lib/sensors/esp32/touch.py | lh70/s-connect-python | 5a4ca17690ec700b36faf69ea744c514f532cc48 | [
"Apache-2.0"
] | null | null | null | lh_lib/sensors/esp32/touch.py | lh70/s-connect-python | 5a4ca17690ec700b36faf69ea744c514f532cc48 | [
"Apache-2.0"
] | null | null | null | from machine import Pin
from lh_lib.sensors.sensor import AbstractSensor
| 28.541667 | 130 | 0.636496 |
9ed2d77b6c8c12c27e466fb716c2e65ea3ea3aaa | 2,579 | py | Python | squeeze_and_excitation_networks/datasets/data_loader.py | younnggsuk/CV-Paper-Implementation | fecd67d3f216872976f9b38445ce1c1f9ef1ac02 | [
"MIT"
] | 4 | 2021-06-03T13:56:51.000Z | 2021-11-05T06:22:25.000Z | densely_connected_convolutional_networks/datasets/data_loader.py | younnggsuk/CV-Paper-Implementation | fecd67d3f216872976f9b38445ce1c1f9ef1ac02 | [
"MIT"
] | null | null | null | densely_connected_convolutional_networks/datasets/data_loader.py | younnggsuk/CV-Paper-Implementation | fecd67d3f216872976f9b38445ce1c1f9ef1ac02 | [
"MIT"
] | 1 | 2022-03-28T09:34:03.000Z | 2022-03-28T09:34:03.000Z | import os
import cv2
import albumentations as A
from albumentations.pytorch import ToTensorV2
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
__all__ = ['CatDogDataset', 'fetch_dataloader']
| 29.643678 | 83 | 0.579294 |
9ed44c7c52a922019ce69deffde3525039c1362a | 4,203 | py | Python | seq2seq_utils.py | mumbihere/summarizer | c230115c7d2d3bb659e9a0e402266178743f8de6 | [
"MIT"
] | null | null | null | seq2seq_utils.py | mumbihere/summarizer | c230115c7d2d3bb659e9a0e402266178743f8de6 | [
"MIT"
] | null | null | null | seq2seq_utils.py | mumbihere/summarizer | c230115c7d2d3bb659e9a0e402266178743f8de6 | [
"MIT"
] | null | null | null | from keras.preprocessing.text import text_to_word_sequence
from keras.models import Sequential
from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent, Embedding
from keras.layers.recurrent import LSTM
from keras.optimizers import Adam, RMSprop
from nltk import FreqDist
import numpy as np
import os
import datetime | 39.650943 | 169 | 0.664287 |
9ed4b01964cfce5140c8270d443eb2c516032d63 | 2,830 | py | Python | SAMAE/data/__init__.py | Lisa-pa/SAMAE | 8d52fd6f8c2634c82f2071233e9796ea322f6360 | [
"MIT"
] | null | null | null | SAMAE/data/__init__.py | Lisa-pa/SAMAE | 8d52fd6f8c2634c82f2071233e9796ea322f6360 | [
"MIT"
] | 4 | 2021-03-20T09:31:02.000Z | 2022-03-12T00:51:19.000Z | SAMAE/data/__init__.py | Lisa-pa/AponeurosesDetection | 8d52fd6f8c2634c82f2071233e9796ea322f6360 | [
"MIT"
] | null | null | null |
"""Standard test images.
"""
import os
from skimage.io import imread
data_dir = os.path.abspath(os.path.dirname(__file__))
__all__ = ['data_dir', 'circle', 'skmuscimg']
def _load(f, as_gray=False):
"""Load an image file located in the data directory.
Parameters
----------
f : string
File name.
as_gray : bool, optional
Whether to convert the image to grayscale.
Returns
-------
img : ndarray
Image loaded from ``data_dir``.
"""
# importing io is quite slow since it scans all the backends
# we lazy import it here
return imread(f, as_gray=as_gray)
def circle():
"""Synthetic image of a circle
Returns
-------
circle : (xdim, ydim) bool ndarray
Circle image.
"""
return _load(os.path.join(data_dir, "circle.bmp"))
def skmuscimg():
"""Cropped US image of a musculoskeletal muscle
"""
return _load(os.path.join(data_dir, "skmuscle.jpg"))
def panoimg():
"""Panoramic US image of a musculoskeletal muscle
"""
return _load(os.path.join(data_dir, "panoramic_echo.jpg"))
def simpleimg():
"""Simple US image of a musculoskeletal muscle
"""
return _load(os.path.join(data_dir, "simple_echo.jpg"))
def downloadFromDropbox(tok, path2file):
"""Download an image from a Dropbox account.
Args:
tok (string): access token that connects to the wanted
app in Dropbox account
path2file (string): Path of the file to download, in the
app corresponding to the above token.
Output:
image (numpy.ndarray): 3-channel color image, with
coefficients' type == uint8
Example:
1) Register a new app in the App Console of your Dropbox
account. Set up parameters as you want.
2) In Dropbox>Applications>MyApp, import your data.
3) In the settings page of MyApp, generate a token and copy it.
It should look like a random string of letters and figures,
as below. (!!!This access token can be used to access your
account via the API. Dont share your access token with anyone!!!)
> token = 'Q8yhHQ4wquAAAAAAAAABRPb9LYdKAr2WGcmhhJ8egiX4_Qak6YZwBw4GUpX9DVeb' //token not available anymore
> path = '/cropped_20181002_153426_image.jpg'
> dt = downloadFromDropbox(token, path);
"""
import dropbox
import numpy as np
import cv2
dbx = dropbox.Dropbox(tok)
try:
metadata, file = dbx.files_download(path2file)
except dropbox.exceptions.HttpError as err:
print('*** HTTP error', err)
return None
data = np.frombuffer(file.content, np.uint8)
image = cv2.imdecode(data, 1)
return image | 28.877551 | 114 | 0.621908 |
9ed4c95b11ddd761bdc51c8d9a831201ff7973eb | 1,080 | py | Python | pandas_support/test_pandas_support.py | quanbingDG/sharper | 4cd5c6b3238d5e430d5986829cc4e0bb47ab3dff | [
"MIT"
] | null | null | null | pandas_support/test_pandas_support.py | quanbingDG/sharper | 4cd5c6b3238d5e430d5986829cc4e0bb47ab3dff | [
"MIT"
] | 2 | 2021-01-13T03:39:15.000Z | 2021-01-19T08:50:18.000Z | pandas_support/test_pandas_support.py | quanbingDG/sharper | 4cd5c6b3238d5e430d5986829cc4e0bb47ab3dff | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2020/11/9 9:13
# @Author : quanbing
# @Email : quanbinks@sina.com
import pandas as pd
import numpy as np
from unittest import TestCase
from pandas_support import PandasSupport as PS
# @File : test_pandas_support.py
| 38.571429 | 99 | 0.655556 |
9ed4d88c4f6045e4df06f3ac9733b88b158d09a9 | 245 | py | Python | 08-About_scrapy/douban/main.py | jiaxiaochu/spider | 4b0f751f76a31556a91dea719873cf2979e4be94 | [
"MIT"
] | null | null | null | 08-About_scrapy/douban/main.py | jiaxiaochu/spider | 4b0f751f76a31556a91dea719873cf2979e4be94 | [
"MIT"
] | 1 | 2020-08-27T10:25:38.000Z | 2020-08-27T10:25:38.000Z | 08-About_scrapy/douban/main.py | jiaxiaochu/spider | 4b0f751f76a31556a91dea719873cf2979e4be94 | [
"MIT"
] | null | null | null | # !/Library/Frameworks/Python.framework/Versions/3.7/bin/python3
# -*- coding:utf-8 -*-
# @Author : Jiazhixiang
# cmdline,
from scrapy import cmdline
# executescrapy
cmdline.execute(['scrapy', 'crawl', 'douban'])
| 24.5 | 64 | 0.726531 |
9ed556610d4e386e3f7c1552b11e15722ee31053 | 1,125 | py | Python | DynamicProgramming/longestIncreasingSubsequence.py | suyash248/data_structures | 41a732cebf791ed63edbce10329251f03b763ccf | [
"Apache-2.0"
] | 7 | 2017-12-13T05:54:29.000Z | 2022-03-25T09:10:59.000Z | DynamicProgramming/longestIncreasingSubsequence.py | suyash248/data_structures | 41a732cebf791ed63edbce10329251f03b763ccf | [
"Apache-2.0"
] | null | null | null | DynamicProgramming/longestIncreasingSubsequence.py | suyash248/data_structures | 41a732cebf791ed63edbce10329251f03b763ccf | [
"Apache-2.0"
] | 4 | 2019-05-22T02:51:56.000Z | 2021-05-23T10:49:57.000Z | from Array import empty_1d_array
"""
input array : [10, 22, 9, 33, 21, 50, 41, 60]
# Element at each index `i` is representing length of longest LIS from index 0 to i in input array.
output array: [1, 2, 1, 3, 2, 4, 4, 5]
"""
# Time complexity: O(n^2)
# Space complexity: O(n)
if __name__ == '__main__':
arr = [10, 22, 9, 33, 21, 50, 41, 60]
max_lis = lis_dp(arr)
print "Length of longest increasing sub-sequence for given array is {}".format(max_lis) | 36.290323 | 99 | 0.543111 |
9ed6cf9a0648712f69e8e03077835798f4836842 | 4,318 | py | Python | venv/Lib/site-packages/gevent/backdoor.py | Kiiwi/Syssel | 83705e3fd0edf40f09df950d5ce91c95586573f5 | [
"BSD-3-Clause"
] | null | null | null | venv/Lib/site-packages/gevent/backdoor.py | Kiiwi/Syssel | 83705e3fd0edf40f09df950d5ce91c95586573f5 | [
"BSD-3-Clause"
] | null | null | null | venv/Lib/site-packages/gevent/backdoor.py | Kiiwi/Syssel | 83705e3fd0edf40f09df950d5ce91c95586573f5 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2009-2014, gevent contributors
# Based on eventlet.backdoor Copyright (c) 2005-2006, Bob Ippolito
from __future__ import print_function
import sys
from code import InteractiveConsole
from gevent import socket
from gevent.greenlet import Greenlet
from gevent.hub import PY3, PYPY, getcurrent
from gevent.server import StreamServer
if PYPY:
import gc
__all__ = ['BackdoorServer']
try:
sys.ps1
except AttributeError:
sys.ps1 = '>>> '
try:
sys.ps2
except AttributeError:
sys.ps2 = '... '
if __name__ == '__main__':
if not sys.argv[1:]:
print('USAGE: %s PORT' % sys.argv[0])
else:
BackdoorServer(('127.0.0.1', int(sys.argv[1])), locals={'hello': 'world'}).serve_forever()
| 29.175676 | 98 | 0.598194 |
9ed839d6a98ae914dcbccc4b145b5eaa923e4f41 | 7,385 | py | Python | spark/par_decompress_audio.py | droyston/spectralize | 572770e7358acc3ec433470659759c17453409f2 | [
"MIT"
] | null | null | null | spark/par_decompress_audio.py | droyston/spectralize | 572770e7358acc3ec433470659759c17453409f2 | [
"MIT"
] | null | null | null | spark/par_decompress_audio.py | droyston/spectralize | 572770e7358acc3ec433470659759c17453409f2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 17 16:12:56 2020
@author: dylanroyston
"""
# import/configure packages
import numpy as np
import pandas as pd
#import pyarrow as pa
import librosa
import librosa.display
from pathlib import Path
#import Ipython.display as ipd
#import matplotlib.pyplot as plt
from pyspark.sql import *
import pyspark.sql.functions as f
from pyspark import SparkConf, SparkContext, SQLContext
import boto3
from tinytag import TinyTag as tt
import soundfile as sf
import audioread
from pydub import AudioSegment
from io import BytesIO
#from io import BytesIO
import os
import sys
import time
import struct
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/lib")
#import config
time_seq = []
#####
# create local Spark instance (for non-cluster dev)
sc = SparkContext('local')
spark = SparkSession (sc)
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
# define Spark config
spark = spark_conf()
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
#####
# Function to write spark-dataframe to mySQL
#####
# function to read audio files from S3 bucket and extract tags
#####
if __name__ == '__main__':
time_seq.append(['start', time.time()])
read_audio_files()
| 27.867925 | 90 | 0.59499 |
9eda27b08876015d63b9cfdc12be859142fbbd21 | 1,073 | py | Python | get_ip_list_ru_gov.py | gil9red/SimplePyScripts | c191ce08fbdeb29377639184579e392057945154 | [
"CC-BY-4.0"
] | 117 | 2015-12-18T07:18:27.000Z | 2022-03-28T00:25:54.000Z | get_ip_list_ru_gov.py | gil9red/SimplePyScripts | c191ce08fbdeb29377639184579e392057945154 | [
"CC-BY-4.0"
] | 8 | 2018-10-03T09:38:46.000Z | 2021-12-13T19:51:09.000Z | get_ip_list_ru_gov.py | gil9red/SimplePyScripts | c191ce08fbdeb29377639184579e392057945154 | [
"CC-BY-4.0"
] | 28 | 2016-08-02T17:43:47.000Z | 2022-03-21T08:31:12.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
"""
ip .
"""
import ipaddress
import sys
import requests
rs = requests.get('https://jarib.github.io/anon-history/RuGovEdits/ru/latest/ranges.json')
#
if not rs or not rs.json() or 'ranges' not in rs.json():
print(' ip ')
sys.exit()
#
items = sorted(rs.json()['ranges'].items(), key=lambda x: x[0])
ip_counter = 0
for i, (name, ip_network_list) in enumerate(items, 1):
print(f'{i}. {name}')
# ip
for ip_network in ip_network_list:
print(f' {ip_network}:')
# ip
net4 = ipaddress.ip_network(ip_network)
# ip
for ip in net4.hosts():
print(f' {ip}')
ip_counter += 1
print()
print(' ip:', ip_counter)
| 22.354167 | 90 | 0.665424 |
9edc1088501805cae0cb1dc1f360911a6998aed9 | 1,337 | py | Python | test_collection.py | Rodrun/weatherguess | 468ae8f6484ee3e3e82262ae10d845fd2d9b4267 | [
"MIT"
] | null | null | null | test_collection.py | Rodrun/weatherguess | 468ae8f6484ee3e3e82262ae10d845fd2d9b4267 | [
"MIT"
] | null | null | null | test_collection.py | Rodrun/weatherguess | 468ae8f6484ee3e3e82262ae10d845fd2d9b4267 | [
"MIT"
] | null | null | null | import unittest
import requests
from collection import Collection
| 34.282051 | 145 | 0.635004 |
9edc4b896c4673af8ba61e91bf9ac87a555fe75f | 272 | py | Python | tests/bitwiseOperations/__init__.py | mgorzkowski/abn | 3a9ac6fb0cfe9d497b6d8f26373d2af3b6ff9860 | [
"MIT"
] | 4 | 2018-04-24T15:25:55.000Z | 2022-03-08T15:01:07.000Z | tests/bitwiseOperations/__init__.py | mgorzkowski/abn | 3a9ac6fb0cfe9d497b6d8f26373d2af3b6ff9860 | [
"MIT"
] | 2 | 2021-05-04T19:44:28.000Z | 2021-05-05T11:51:15.000Z | tests/bitwiseOperations/__init__.py | mgorzkowski/abn | 3a9ac6fb0cfe9d497b6d8f26373d2af3b6ff9860 | [
"MIT"
] | null | null | null | from . import nand_tests
from . import and_tests
from . import nor_tests
from . import not_tests
from . import or_tests
from . import xor_tests
from . import rotate_left_tests
from . import rotate_right_tests
from . import shift_left_tests
from . import shift_right_tests
| 24.727273 | 32 | 0.816176 |
9edd07604a3a97e4febf7283f02a7a1e61075cbb | 36,220 | py | Python | exot/util/misc.py | ETHZ-TEC/exot_eengine | 7b7ce6cb949e1b0a02e716b03f2f9af751713b29 | [
"BSD-3-Clause"
] | null | null | null | exot/util/misc.py | ETHZ-TEC/exot_eengine | 7b7ce6cb949e1b0a02e716b03f2f9af751713b29 | [
"BSD-3-Clause"
] | null | null | null | exot/util/misc.py | ETHZ-TEC/exot_eengine | 7b7ce6cb949e1b0a02e716b03f2f9af751713b29 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2015-2020, Swiss Federal Institute of Technology (ETH Zurich)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Misc helpers"""
import math
import random
import re
import signal
import typing as t
from datetime import datetime
from enum import Enum
from functools import reduce
from inspect import isabstract
from string import ascii_letters
from subprocess import list2cmdline as _list2cmdline
from typing import Mapping as Map
import numpy as np
from exot.exceptions import *
__all__ = (
"call_with_leaves",
"dict_depth",
"dict_diff",
"find_attributes",
"flatten_dict",
"get_concrete_subclasses",
"get_subclasses",
"get_valid_access_paths",
"getitem",
"has_method",
"has_property",
"has_type",
"has_variable",
"is_abstract",
"is_scalar_numeric",
"leaves",
"list2cmdline",
"map_to_leaves",
"mro_getattr",
"mro_hasattr",
"random_string",
"safe_eval",
"sanitise_ansi",
"setgetattr",
"setitem",
"stub_recursively",
"unpack__all__",
"validate_helper",
"get_cores_and_schedules",
)
"""
Signatures
----------
call_with_leaves :: (function: Callable[[Any], Any], obj: ~T, _seq: bool = True) -> None
dict_depth :: (obj: Any, level: int = 0) -> int
dict_diff :: (left: Mapping, right: Mapping) -> List[Dict]
find_attributes :: (attr: str, klass: Any) -> List
flatten_dict :: (obj: Mapping, sep: str = '.') -> Mapping
get_concrete_subclasses :: (klass, recursive=True, derived=True) -> List
get_subclasses :: (klass, recursive=True, derived=True) -> List
get_valid_access_paths :: (obj: Mapping, _limit: int = 8192, _leaf_only: bool = False, _use_lists: bool = True, _fallthrough_empty: bool = True) -> Generator
getitem :: (obj: Mapping, query: Union[str, Tuple], *args: Any, sep: str = '/') -> Any
has_method :: (klass: Union[type, object], name: str) -> bool
has_property :: (klass: Union[type, object], name: str) -> bool
has_type :: (klass: Union[type, object]) -> bool
has_variable :: (klass: Union[type, object], name: str) -> bool
is_abstract :: (klass: Union[type, object]) -> bool
is_scalar_numeric :: (value: t.Any) -> bool
map_to_leaves :: (function: Callable[[Any], Any], obj: ~T, _seq: bool = True) -> Any
mro_getattr :: (cls: type, attr: str, *args: Any) -> Any
mro_hasattr :: (cls: type, attr: str) -> bool
random_string :: (length: int) -> str
safe_eval :: (to_eval: str, expect: Tuple[type], timeout: int) -> object
sanitise_ansi :: (value Union[List[str], str]) -> Union[List[str], str]
setgetattr :: (klass: Union[type, object], attr: str, default: Any) -> None
setitem :: (obj: MutableMapping, query: Tuple, value: Any) -> None
stub_recursively :: (obj: ~T, stub: Any = None, _stub_list_elements: bool = True) -> Optional[~T]
unpack__all__ :: (*imports: Collection[str]) -> Tuple[str]
validate_helper :: (what: Mapping, key: Any, *types: type, msg: str = '') -> NoReturn
"""
def call_with_leaves(function: t.Callable[[t.Any], t.Any], obj: t.T, _seq: bool = True) -> None:
"""Calls a function on leaves of an object
A leaf is considered to be an object that is not a Mapping (or, when _seq is set,
also not a Sequence except a string, which is also a Sequence).
Args:
function (t.Callable[[t.Any], t.Any]): The callable
obj (t.T): The tree-like or sequence-like object
_seq (bool, optional): Should sequences be considered?. Defaults to True.
"""
inner(obj)
def dict_depth(obj: t.Any, level: int = 0) -> int:
"""Get maximum depth of a dict-like object
Args:
obj (t.Any): The dict-like object
level (int): For internal use only. Defaults to 0.
.. note::
The depth of a non-dict-like object is considered to be 0.
An empty dict increases the depth if `_empty_increments` is True.
Examples:
>>> dict_depth(1) # returns 0
>>> dict_depth([1,2,3]) # returns 0
>>> dict_depth({1: 1, 2: 2}) # returns 1
>>> dict_depth({1: {2: {3: 3}}}) # returns 3
>>> dict_depth({1: {2: {3: {}}}}) # returns 4
"""
if not isinstance(obj, Map) or not obj:
return level
return max(dict_depth(v, level + 1) for k, v in obj.items())
def dict_diff(left: Map, right: Map) -> t.List[t.Dict]:
"""Get the difference between 2 dict-like objects
Args:
left (Map): The left dict-like object
right (Map): The right dict-like object
The value returned is a list of dictionaries with keys ["path", "left", "right"]
which contain the query path and the differences between the left and right mapping.
If a key is missing in either mapping, it will be indicated as a "None".
`math.nan` (not-a-number) is used for default values in the comparison because of
the property: `math.nan != math.nan`. Simple None cannot be used, since it would
not handle keys that both have a value of None. In general, this function might
report false-positives for keys that contain the math.nan (or np.nan) value simply
due to this property. There is no workaround available.
"""
left_paths = set(get_valid_access_paths(left, _leaf_only=True, _use_lists=False))
right_paths = set(get_valid_access_paths(right, _leaf_only=True, _use_lists=False))
return list(
{
"path": path,
"left": getitem(left, path, math.nan),
"right": getitem(right, path, math.nan),
}
for path in left_paths.union(right_paths)
if getitem(left, path, math.nan) != getitem(right, path, math.nan)
)
def find_attributes(klass: t.Any, attr: str) -> t.List:
"""Find attributes in any of a class'es bases
Args:
klass (t.Any): The type object
attr (str): The attribute
Returns:
t.List: List of found instances of the attribute in the class hierarchy
"""
if not isinstance(attr, str):
raise TypeError(attr)
mro = klass.__mro__ if hasattr(klass, "__mro__") else type(klass).mro()
return [attr for base in mro if hasattr(base, attr)]
def flatten_dict(obj: Map, sep: str = ".") -> Map:
"""Flatten a dict to a 1-level dict combining keys with a separator
Args:
obj (Map): The dict-like object
sep (str): The separator used when combining keys. Defaults to ".".
Returns:
Map: A flattened object of same type as 'obj'.
.. warning::
Flattening will enforce all keys to be string-types!
`reducer` is a function accepted by the functools.reduce function, which is of
form: f(a, b) where _a_ is the accumulated value, and _b_ is the updated value
from the iterable.
The .items() function produces key-value tuple-pairs. These can be expanded
with *, e.g. `*("a", "b")` will expand to `"a", "b"`. This property is used
to expand the `kv_pair` below.
Example walkthrough on `flatten_dict({'a': 1, 'b': {'c': {'d': 2}}})`: ::
`outer` <- obj: {'a': 1, 'b': {'c': {'d': 2}}}, prefix: ''
`reducer` <- key: 'a', value: 1
`inner` <- acc: {}, key: 'a', value: 1, prefix: ''
`inner` -> {'a': 1}
`reducer` -> {'a': 1}
`reducer` <- key: 'b', value: {'c': {'d': 2}}
`inner` <- acc: {'a': 1}, key: 'b', value: {'c': {'d': 2}}, prefix: ''
`outer` <- obj: {'c': {'d': 2}}, prefix: 'b.'
`reducer` <- key: 'c', value: {'d': 2}
`inner` <- acc: {}, key: 'c', value: {'d': 2}, prefix: 'b.'
`outer` <- obj: {'d': 2}, prefix: 'b.c.'
`reducer` <- key: 'd', value: 2
`inner` <- acc: {}, key: 'd', value: 2, prefix: 'b.c.'
`inner` -> {'b.c.d': 2}
`reducer` -> {'b.c.d': 2}
`outer` -> {'b.c.d': 2}
`inner` -> {'b.c.d': 2}
`reducer` -> {'b.c.d': 2}
`outer` -> {'b.c.d': 2}
`inner` -> {'a': 1, 'b.c.d': 2}
`reducer` -> {'a': 1, 'b.c.d': 2}
`outer` -> {'a': 1, 'b.c.d': 2}
"""
if not isinstance(obj, Map):
raise TypeError("flatten_dict works only on dict-like types", type(obj))
_t = type(obj)
return outer(obj, "")
def expand_dict(obj: Map, sep: str = ".") -> Map:
"""Expands a flattened mapping by splitting keys with the given separator
Args:
obj (Map): The flattened dict-like object to unflatten
sep (str, optional): The key separator
Raises:
TypeError: If wrong type is supplied
ValueError: If a non-flat dict is supplied
Returns:
Map: The expanded mapping object of same type as 'obj'.
Example:
>>> d = {'a': 1, 'b': 2, 'c.ca': 1, 'c.cb': 2}
>>> expand_dict(d)
{'a': 1, 'b': 2, 'c': {'ca': 1, 'cb': 2}}
"""
if not isinstance(obj, Map):
raise TypeError("expand_dict works only on dict-like types", type(obj))
if dict_depth(obj) != 1:
raise ValueError(
"expand_dict works only on flat dict-like types, "
"got a mapping of depth: {}".format(dict_depth(obj))
)
return inner(obj)
def get_concrete_subclasses(klass, recursive: bool = True, derived: bool = True) -> t.List:
"""Get a list of non-abstract subclasses of a type
Args:
klass (t.Type): The type object
recursive (bool): Should the classes be extracted recursively? Defaults to True.
derived (bool): Use the 'derived' property of SubclassTracker-enhanced types? [True]
Returns:
t.List: A list of concrete subclasses of the type
"""
from exot.util.mixins import _SubclassTracker as __
if derived and hasattr(klass, __.concrete):
return list(getattr(klass, __.concrete))
subclasses = get_subclasses(klass, recursive=recursive)
return [k for k in subclasses if not isabstract(k)]
def get_subclasses(klass, recursive: bool = True, derived: bool = True) -> t.List:
"""Get a list of subclasses of a type
Args:
klass (t.Type): The type object
recursive (bool): Should the classes be extracted recursively? Defaults to True.
derived (bool): Use the 'derived' property of SubclassTracker-enhanced types? [True]
Returns:
t.List: A list of concrete subclasses of the type
"""
from exot.util.mixins import _SubclassTracker as __
if not (hasattr(klass, "__subclasses__") or hasattr(klass, __.derived)):
raise TypeError(f"__subclasses__ or {__.derived} attribute missing", klass)
if derived:
return list(getattr(klass, __.derived))
subclasses = klass.__subclasses__()
if recursive:
walker(subclasses)
return subclasses
def getitem(obj: Map, query: t.Union[str, t.Tuple], *args: t.Any, sep: str = "/") -> t.Any:
"""Get a value from a dict-like object using an XPath-like query, or a tuple-path
Accesses an object that provides a dict-like interface using a query: either a
tuple representing the path, or a string where consecutive keys are separated with
a separator, e.g. "key1/key2".
Returns the value of the object at the given key-sequence. Returns a default value
if provided, or throws a LookupError.
Args:
obj (Map): a mapping
query (t.Union[str, t.Tuple]): a query path using a separated string or a tuple
*args (t.Any): an optional default value, similar to `getattr`
sep (str, optional): a separator string used to split a string query path
Returns:
t.Any: the value stored in obj for the given query, or the default value
Raises:
LookupError: if query not found and no default value is provided
TypeError: if obj is not a mapping, or query is not a str or tuple
"""
if not isinstance(obj, Map):
raise TypeError("'obj' must be an instance of Mapping, e.g. dict", type(obj))
if not isinstance(query, (str, t.Tuple)):
raise TypeError("'query' must be a str or a tuple", type(query))
if len(args) > 1:
raise TypeError(f"getitem accepts at most 3 positional args, got {len(args)}")
_obj = obj
# handler for tuple queries
if isinstance(query, t.Tuple):
_valid = get_valid_access_paths(obj)
if query not in _valid:
if args:
return args[0]
else:
raise LookupError(f"query {query!r} not found")
else:
for node in query:
_obj = _obj[node]
return _obj
# handler for string queries
else:
try:
# loop through components in the query, consecutively accessing the mapping
for node in query.split(sep):
# handle empty nodes in the query, e.g. when query="a///b" -> "a/b"
if not node:
continue
if isinstance(_obj, Map):
for k in _obj.keys():
node = type(k)(node) if str(k) == node else node
elif isinstance(_obj, (t.List, t.Set)):
try:
node = int(node)
except TypeError:
raise LookupError(
f"{node} not convertible to int when attempting to access "
f"a list {_obj!r}"
)
_obj = _obj[node]
return _obj
except LookupError as Error:
if args:
return args[0]
else:
Error.args += (query,)
raise
def has_method(klass: t.Union[type, object], name: str) -> bool:
"""Check if a method exists in any of a klass'es bases
Args:
klass (t.Union[type, object]): The type or object
name (str): The name of the method
Returns:
bool: True if has a method with the given name.
"""
candidates = find_attributes(klass, name)
if not candidates:
return False
return all(is_callable(f) for f in candidates)
def has_property(klass: t.Union[type, object], name: str) -> bool:
"""Check if a variable exists in any of a klass'es bases
Args:
klass (t.Union[type, object]): The type or object
name (str): The name of the property
Returns:
bool: True if has a property with the given name.
"""
candidates = find_attributes(klass, name)
if not candidates:
return False
return all(is_property(f) for f in candidates)
def has_type(klass: t.Union[type, object]) -> bool:
"""Check if a type or instance has a Type member type that derives from Enum
Args:
klass (t.Union[type, object]): The type or object
Returns:
bool: True if has the "Type" attribute.
"""
if not isinstance(klass, (type, object)):
raise TypeError(klass)
return issubclass(getattr(klass, "Type", type(None)), Enum)
def has_variable(klass: t.Union[type, object], name: str) -> bool:
"""Check if a variable exists in any of a klass'es bases
Args:
klass (t.Union[type, object]): The type or object
name (str): The name of the variable
Returns:
bool: True if has a variable with the given name.
"""
candidates = find_attributes(klass, name)
if not candidates:
return False
return all(is_not_callable(f) for f in candidates)
def is_abstract(klass: t.Union[type, object]) -> bool:
"""Check if a type or instance is abstract
Args:
klass (t.Union[type, object]): The type or object
Returns:
bool: True if the type/instance is abstract.
"""
if not isinstance(klass, (type, object)):
raise TypeError(klass)
if hasattr(klass, "__abstractmethods__"):
return 0 != len(getattr(klass, "__abstractmethods__"))
else:
from inspect import isabstract
return isabstract(klass)
def is_scalar_numeric(value: t.Any) -> bool:
"""Check if is an int, a float, or a NumPy variant thereof
Args:
value (t.Any): The value to inspect
Returns:
bool: True if scalar and numeric.
"""
return isinstance(value, (float, int, np.integer, np.floating))
def leaves(obj: Map) -> t.Generator:
"""Get leaves of a mapping
Args:
obj (Map): The dict-like object
Returns:
t.Generator: A generator that yields the leaf elements of the mapping.
"""
paths = get_valid_access_paths(obj, _leaf_only=True, _use_lists=False)
return (getitem(obj, path) for path in paths)
def list2cmdline(seq: t.Iterable) -> str:
"""Translates a sequence of arguments into a command line string with "None" removal
Args:
seq (t.Iterable): The sequence of arguments
Returns:
str: The command-line string
"""
seq = [_ for _ in seq if _ is not None]
return _list2cmdline(seq)
def map_to_leaves(function: t.Callable[[t.Any], t.Any], obj: t.T, _seq: bool = True) -> t.Any:
"""Map a function to leaves of an object
A leaf is considered to be an object that is not a Mapping (or, when _seq is set,
also not a Sequence except a string, which is also a Sequence).
Args:
function (t.Callable[[t.Any], t.Any]): a function or signatude "a -> a"
obj (t.T): a dict-like, list-like, or plain object
_seq (bool, optional): map on elements of lists?
Returns:
t.T: the obj with transformed elements
"""
return inner(obj)
def mro_getattr(cls: type, attr: str, *args: t.Any) -> t.Any:
"""Get an attribute from a type's class hierarchy
Args:
cls (type): The type
attr (str): The attribute
*args (t.Any): The default value (like in Python's default getattr)
Returns:
t.Any: The attribute, or when not found the default value (if provided)
Raises:
TypeError: Not called on a type
TypeError: Wrong number of arguments
AttributeError: Attribute not found and no default value provided
"""
if not isinstance(cls, type):
raise TypeError(f"mro_getattr can only be used on types, got {type(cls)}")
if len(args) > 1:
raise TypeError(f"mro_getattr expected at most 3 arguments, got {2 + len(args)}")
for klass in cls.mro()[1:]:
if hasattr(klass, attr):
# return first matching attribute
return getattr(klass, attr)
if args:
# if provided, return args[0], i.e. the a default value
return args[0]
else:
raise AttributeError(f"type object {cls.__name__!r} has not attribute {attr!r}")
def mro_hasattr(cls: type, attr: str) -> bool:
"""Check if an attribute exists in a type's class hierarchy
Args:
cls (type): The type
attr (str): The attribute
Returns:
bool: True if has the attribute.
Raises:
TypeError: Not called on a type
"""
if not isinstance(cls, type):
raise TypeError(f"mro_getattr can only be used on types, got {type(cls)}")
for klass in cls.mro()[1:]:
if hasattr(klass, attr):
return True
return False
def random_string(length: int) -> str:
"""Make a random string of specified length
Args:
length (int): The desired random string length
Returns:
str: The random string
"""
assert isinstance(length, int), f"'length' must be an int, got: {type(length)}"
return "".join(random.choices(ascii_letters, k=length))
def timestamp() -> str:
"""Make a timestamp with current time
Returns:
str: The timestamp in ISO format
"""
return datetime.now().isoformat("_", timespec="seconds").replace(":", "-")
def safe_eval(
to_eval: str, *, expect: t.Tuple[type] = (list, np.ndarray), timeout: int = 10
) -> object:
"""Evaluate a restricted subset of Python (and numpy) from a string
Args:
to_eval (str): The string to evaluate
expect (t.Tuple[type]): The list of expected resulting types. Defaults to list, ndarray.
timeout (int): The timeout after which the call fails in seconds. Defaults to 10.
The `safe_eval` function allows using a subset of commands, listed in `_globals` and
`_locals`, which includes a few numpy functions: linspace, arange, array, rand, and
randint. Examples:
>>> safe_eval("linspace(1, 10, 10, dtype=int).tolist()")
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> safe_eval("__import__('os').getcwd()")
NameError Traceback (most recent call last)
...
NameError: name '__import__' is not defined
>>> safe_eval("range(5)")
TypeError Traceback (most recent call last)
...
TypeError: eval produced a <class 'range'>, expected: (<class 'list'>, <class 'numpy.ndarray'>)
>>> safe_eval("list(round(rand(), 2) for _ in range(5))")
[0.96, 0.41, 0.9, 0.98, 0.02]
"""
assert isinstance(to_eval, str), "'to_eval' must be a str"
assert isinstance(expect, tuple), "'expect' must be a tuple"
assert all(isinstance(_, type) for _ in expect), "'expect' must contain only types"
_locals = {}
_globals = {
"__builtins__": {},
"list": list,
"range": range,
"len": len,
"int": int,
"float": float,
"min": min,
"max": max,
"round": round,
"linspace": np.linspace,
"geomspace": np.geomspace,
"logspace": np.logspace,
"hstack": np.hstack,
"vstack": np.vstack,
"split": np.split,
"arange": np.arange,
"array": np.array,
"rand": np.random.rand,
"randint": np.random.randint,
}
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(timeout)
try:
_ = eval(to_eval, _globals, _locals)
except AlarmException:
raise TimeoutError(f"safe_eval took longer than {timeout} seconds")
else:
signal.signal(signal.SIGALRM, signal.SIG_IGN)
signal.alarm(0)
if not isinstance(_, expect):
raise EvalTypeError(f"eval produced a {type(_)}, expected: {expect}")
return _
def sanitise_ansi(value: t.Union[t.List[str], str]) -> t.Union[t.List[str], str]:
"""Remove all ANSI escape characters from a str or a list of str
Args:
value (t.Union[t.List[str], str]): The string or list of strings
Returns:
t.Union[t.List[str], str]: The sanitised string or a list of sanitised strings
"""
_ansi_escape = re.compile(r"(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]")
if isinstance(value, str):
return _ansi_escape.sub("", value)
elif isinstance(value, t.List):
return list(map(lambda x: _ansi_escape.sub("", x).strip(), value))
else:
raise TypeError("sanitise_ansi accepts only str or lists of str")
def setgetattr(klass: t.Union[type, object], attr: str, default: t.Any) -> None:
"""Combines `setattr` and `getattr` to set attributes
Args:
klass (t.Union[type, object]): The type or object
attr (str): The attribute
default (t.Any): The default value
"""
if not any([isinstance(klass, type), isinstance(klass, object)]):
raise TypeError("'klass' should be a type or an object", klass)
if not isinstance(attr, str):
raise TypeError("'attr' should be a str")
if not attr:
raise ValueError("'attr' should not be empty")
setattr(klass, attr, getattr(klass, attr, default))
def setitem(obj: t.MutableMapping, query: t.Tuple, value: t.Any, force: bool = False) -> None:
"""Set a value in a dict-like object using a tuple-path query
Args:
obj (t.MutableMapping): a mutable mapping
query (t.Tuple): a query path as a tuple
value (t.Any): value to set
Raises:
TypeError: if obj is not a mutable mapping
"""
if not isinstance(obj, t.MutableMapping):
raise TypeError("'obj' needs to be a mutable mapping", type(obj))
_obj = obj
_valid = get_valid_access_paths(obj)
if query not in _valid:
if not force:
raise KeyError(f"query-path {query!r} not found")
else:
for node in query[:-1]:
if node not in _obj:
_obj = dict()
_obj = _obj[node]
else:
for node in query[:-1]:
_obj = _obj[node]
_obj[query[-1]] = value
def stub_recursively(
obj: t.T, stub: t.Any = None, _stub_list_elements: bool = True
) -> t.Optional[t.T]:
"""Produce a copy with all leaf values recursively set to a 'stub' value
Args:
obj (t.T): the object to stub
stub (t.Any, optional): the value to set the leaf elements to
_stub_list_elements (bool, optional): stub individual elements in collections?
Returns:
(t.T, optional): the stubbed object
"""
return inner(obj)
def unpack__all__(*imports: t.Collection[str]) -> t.Tuple[str]:
"""Upacks a list of lists/tuples into a 1-dimensional list
Args:
*imports (t.Collection[str]): The collections of strings in "__all__"
Returns:
t.Tuple[str]: The flattened imports as a tuple of strings.
"""
from itertools import chain
_name = f"{__name__}.unpack__all__"
if not all(isinstance(e, (t.List, t.Tuple)) for e in imports):
raise TypeError(f"{_name}: arguments should be lists or tuples")
_ = chain(*imports)
assert all(
issubclass(type(e), str) for e in _
), f"{_name}: values in unpacked containers were not scalar or 'str'"
return tuple(_)
def validate_helper(what: t.Mapping, key: t.Any, *types: type, msg: str = "") -> t.NoReturn:
"""Validate types of key in a mapping using key-paths
Args:
what (t.Mapping): The mapping
key (t.Any): The key
*types (type): The valid types
msg (str): An additional error message. Defaults to "".
"""
if not isinstance(what, t.Mapping):
raise TypeError(f"validate_helper works only on mappings, got {type(what)}")
if not types:
raise TypeError(f"validate helper expects at least 1 'types' argument")
if isinstance(key, str) or not isinstance(key, t.Iterable):
key = tuple([key])
elif not isinstance(key, tuple):
key = tuple(key)
# The `config` property setter guarantees that `config` is a fully
# mutated AttributeDict, therefore :meth:`getattr` can be used.
if not isinstance(getitem(what, key, None), types):
raise MisconfiguredError(
"{0}config key: '{1!s}' should be of type {2!r}, got {3!s}".format(
f"{msg} " if msg else "", key, types, type(getitem(what, key, None))
)
)
| 33.755825 | 158 | 0.592601 |
9ede197b4e22a537f288d32a4de554ea29c1ea06 | 1,222 | py | Python | 70_question/dynamic_programming/max_profit_with_k_transactions.py | alvinctk/google-tech-dev-guide | 9d7759bea1f44673c2de4f25a94b27368928a59f | [
"Apache-2.0"
] | 26 | 2019-06-07T05:29:47.000Z | 2022-03-19T15:32:27.000Z | 70_question/dynamic_programming/max_profit_with_k_transactions.py | alvinctk/google-tech-dev-guide | 9d7759bea1f44673c2de4f25a94b27368928a59f | [
"Apache-2.0"
] | null | null | null | 70_question/dynamic_programming/max_profit_with_k_transactions.py | alvinctk/google-tech-dev-guide | 9d7759bea1f44673c2de4f25a94b27368928a59f | [
"Apache-2.0"
] | 6 | 2019-10-10T06:39:28.000Z | 2020-05-12T19:50:55.000Z |
if __name__ == "__main__":
maxProfitWithKTransactions([5, 11, 3, 50, 60, 90], 2)
| 31.333333 | 136 | 0.56383 |
9edf6ecb3d424f1fd6e8e155154f4ecebc700938 | 4,149 | py | Python | main.py | rdmaulana/flask-smart-xls-clean | 8dde5b56c241312ab252964b159921acd6013839 | [
"MIT"
] | null | null | null | main.py | rdmaulana/flask-smart-xls-clean | 8dde5b56c241312ab252964b159921acd6013839 | [
"MIT"
] | null | null | null | main.py | rdmaulana/flask-smart-xls-clean | 8dde5b56c241312ab252964b159921acd6013839 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import io
import time
import uuid
from flask import Flask, render_template, request, redirect, url_for, Response, session, send_file, make_response, send_from_directory
from os.path import join, dirname, realpath
from werkzeug.wsgi import FileWrapper
app = Flask(__name__)
app.config["DEBUG"] = True
app.config["UPLOAD_FOLDER"] = 'media/dataset'
app.config["EXPORT_FOLDER_CSV"] = 'media/result'
app.config["SECRET_KEY"] = 'DBA2823#*@$&bdaiuwgdbi8238XBxjzhx@$@'
app.config['SESSION_TYPE'] = 'filesystem'
def cleanExcel(file_path, start_id):
xls = pd.read_excel(file_path)
xls.replace(to_replace=[r"\\t|\\n|\\r", "\t|\n|\r"], value=["",""], regex=True)
print("Jumlah awal: {}".format(xls.shape))
xls.rename(columns = {
'NIK':'nik',
'NAMA':'nama',
'JENIS_KELAMIN':'jkel',
'TANGGAL_LAHIR':'tgl_lahir',
'NO_HP':'telp',
'INSTANSI_PEKERJAAN':'instansi',
'ALAMAT KTP': 'alamat',
'ALAMAT_KTP': 'alamat',
'KODE_KAB_KOTA_TEMPAT_KERJA': 'kab_id',
'KODE_KATEGORI': 'kategori'
}, inplace = True)
xls['nik'] = xls['nik'].astype(str)
xls.insert(0, 'id', range(int(start_id), int(start_id) + len(xls)))
xls.insert(2, 'nama_ktp', xls['nama'])
xls.insert(6, 'status', 0)
# del xls['NO']
del xls['UMUR']
del xls['JENIS_PEKERJAAN']
xls.drop(xls[xls['tgl_lahir'].isnull()].index, inplace = True)
xls.drop(xls[xls['nik'].isnull()].index, inplace = True)
xls.drop(xls[xls['nik'].str.len() > 16].index, inplace = True)
xls.drop(xls[xls['nik'].str.len() < 16].index, inplace = True)
xls.drop(xls[xls.duplicated(['nik'])].index, inplace = True)
if xls['tgl_lahir'].dtypes == 'object':
xls['tgl_lahir'] = pd.to_datetime(xls['tgl_lahir'])
if xls['telp'].dtypes == 'float64':
xls['telp'] = xls['telp'].astype(str)
xls['telp'] = xls['telp'].str.split('.').str[0]
xls['telp'] = xls['telp'].replace('nan',np.NaN)
xls['telp'] = '0' + xls['telp']
if xls['telp'].dtypes == 'object':
xls['telp'] = xls['telp'].str.split('/').str[0]
xls['telp'] = xls['telp'].str.replace('\+62','0')
xls['telp'] = xls['telp'].str.replace(' ','')
xls['telp'] = xls['telp'].str.replace('-','')
if xls['kab_id'].dtypes == 'float64':
xls['kab_id'] = xls['kab_id'].astype(str)
xls['kab_id'] = xls['kab_id'].str.split('.').str[0]
xls['kab_id'] = xls['kab_id'].replace('nan',np.NaN)
if xls['kategori'].dtypes == 'int64':
xls['kategori'] = xls['kategori'].astype(str)
xls['kategori'] = xls['kategori'].apply(lambda x: '0' + x if len(x) == 1 else x)
xls['alamat'] = xls['alamat'].replace(';','')
print("Jumlah akhir: {}".format(xls.shape))
uid = str(uuid.uuid4())[:4]
path_file = 'media/result/'
outfile_name = '{0}{1}'.format(time.strftime("%Y%m%d-%H%M%S-"),uid)
session['csv_name'] = f'{outfile_name}'
xls.to_csv(f'{path_file}{outfile_name}.csv', index=False, header=True, encoding="utf-8")
if __name__ == '__main__':
app.run(debug=True) | 35.161017 | 134 | 0.612919 |
9edfa90d3388411fff4970296751427f8a1b76b6 | 257 | py | Python | 2_UNIXCommands/Exercise11.py | takeyoshinitta/NLP-100-Exercise | e77fb385fbbf50c8a8bdc47442db1421739ea5b6 | [
"MIT"
] | 3 | 2022-01-04T19:02:22.000Z | 2022-02-21T08:52:18.000Z | 2_UNIXCommands/Exercise11.py | takeyoshinitta/NLP-100-Exercise | e77fb385fbbf50c8a8bdc47442db1421739ea5b6 | [
"MIT"
] | null | null | null | 2_UNIXCommands/Exercise11.py | takeyoshinitta/NLP-100-Exercise | e77fb385fbbf50c8a8bdc47442db1421739ea5b6 | [
"MIT"
] | null | null | null | # 11. Replace tabs into spaces
# Replace every occurrence of a tab character into a space. Confirm the result by using sed, tr, or expand command.
with open('popular-names.txt') as f:
for line in f:
print(line.strip().replace("\t", " "))
| 36.714286 | 116 | 0.66537 |
9edfcae85303a4e73d41bdae85aeda75e4c87673 | 2,817 | py | Python | scripts/wapo/wapo_link_graph_from_mongo.py | feup-infolab/army-ant | 7b33120d5160f73d7a41a05e6336489c917fb75c | [
"BSD-3-Clause"
] | 5 | 2018-01-18T14:11:52.000Z | 2020-10-23T16:02:25.000Z | scripts/wapo/wapo_link_graph_from_mongo.py | feup-infolab/army-ant | 7b33120d5160f73d7a41a05e6336489c917fb75c | [
"BSD-3-Clause"
] | 10 | 2018-02-02T20:19:36.000Z | 2020-10-05T08:46:36.000Z | scripts/wapo/wapo_link_graph_from_mongo.py | feup-infolab/army-ant | 7b33120d5160f73d7a41a05e6336489c917fb75c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# wapo_link_graph_from_mongo.py
# Jos Devezas <joseluisdevezas@gmail.com>
# 2019-02-05
import logging
import sys
import warnings
import networkx as nx
from bs4 import BeautifulSoup
from pymongo import MongoClient
logging.basicConfig(
format='%(asctime)s wapo_link_graph_from_mongo: %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO)
warnings.filterwarnings("ignore", category=UserWarning, module='bs4')
if len(sys.argv) < 3:
print("Usage: %s MONGO_DBNAME OUTPUT_GRAPH_PATH" % sys.argv[0])
sys.exit(1)
database = sys.argv[1]
output_graph_path = sys.argv[2]
mongo = MongoClient()
db = mongo[database]
logging.info("Extracting anchors from content elements (using article_url as node ID) and building graph")
g = nx.DiGraph()
doc_count = 0
edge_count = 0
attr_keys = ['id', 'title', 'article_url', 'published_date', 'author', 'type']
for source in document_iterator():
if not 'contents' in source or source.get('contents') is None:
continue
for par in source['contents']:
if par is None:
continue
html = par.get('content')
if html is None:
continue
html = str(html)
soup = BeautifulSoup(html, 'lxml')
anchors = soup.find_all('a')
for a in anchors:
target_url = a.attrs.get('href')
if target_url is None:
continue
query = {'article_url': target_url}
attr_selector = {
'_id': -1, 'id': 1, 'article_url': 1, 'title': 1,
'published_date': 1, 'author': 1, 'type': 1}
target = db.articles.find_one(query, attr_selector) \
or db.blog_posts.find_one(query, attr_selector)
if target is None:
continue
# graph[source_url].add(target_url)
g.add_node(
source['id'], **{k.replace('_', ''): source[k] for k in attr_keys if not source[k] is None})
g.add_node(
target['id'], **{k.replace('_', ''): target[k] for k in attr_keys if not target[k] is None})
g.add_edge(source['id'], target['id'])
edge_count += 1
doc_count += 1
if doc_count % 1000 == 0:
logging.info("%d documents processed (%d edges created)" % (doc_count, edge_count))
logging.info("%d documents processed (%d edges created)" % (doc_count, edge_count))
logging.info("Saving graph to %s" % output_graph_path)
if output_graph_path.endswith('.gml') or output_graph_path.endswith('.gml.gz'):
nx.write_gml(g, output_graph_path)
else:
nx.write_graphml(g, output_graph_path)
| 26.083333 | 108 | 0.615903 |
9ee566ce8a227cbd2a762122ce0690fc72e66ca6 | 7,540 | py | Python | designScripts/vernierMask.py | smartalecH/BYUqot | 5b24759c4a100086937795a80d2eb6597e611819 | [
"MIT"
] | 5 | 2019-03-26T17:12:25.000Z | 2021-12-27T18:05:52.000Z | designScripts/vernierMask.py | smartalecH/BYUqot | 5b24759c4a100086937795a80d2eb6597e611819 | [
"MIT"
] | 5 | 2018-05-30T21:05:36.000Z | 2018-08-16T05:16:40.000Z | designScripts/vernierMask.py | smartalecH/BYUqot | 5b24759c4a100086937795a80d2eb6597e611819 | [
"MIT"
] | 5 | 2018-05-30T02:54:07.000Z | 2020-08-16T17:18:38.000Z | # ------------------------------------------------------------------ #
# vernierMask.py
# ------------------------------------------------------------------ #
#
# A mask design used to align the 3D printer to a silicon photonic chip
#
# ------------------------------------------------------------------ #
# VERSION HISTORY
# 10 Apr 2018 - AMH - Initialization
#
# ------------------------------------------------------------------ #
# ------------------------------------------------------------------ #
# Import libraries
# ------------------------------------------------------------------ #
# Get project library path to import library files
import sys
import os
d = os.path.dirname(os.getcwd())
libPath = os.path.abspath(os.path.join(d, 'lib'))
sys.path.insert(0, libPath)
# Import all other libraries
import gdspy
import numpy as np
import objectLibrary as obLib
# ------------------------------------------------------------------ #
# Design Constants
# ------------------------------------------------------------------ #
# Cell parameters
layerNumber = 1
# Vernier mask design parameters (all values in microns)
numFingers = 10 # Number of fingers to have on top and bottom
fingerWidth = 30 # Width of each finger
fingerSpacing = 40 # Spacing between fingers
longFingerLength = 200; # Length of the long, middle finger
shortFingerLength = 150; # Length of the short, outer fingers
baseThickness = 76; # Thickness of edge border of design
separationDistance = 380 # distance from edge of pattern to origin
buffer = 50 # Kerf width of blade
innerBoxWidth = 8.78e3 # Actual dimensions of chip
outerBoxWidth = innerBoxWidth + buffer # Buffered chip size
numCells = 12 # number of repeated cells in each dimension
# Now create a series of functions that return a cell. We'll leverage the recursive
# nature of GDS files to keep things simple.
# ------------------------------------------------------------------ #
# Create single Vernier pattern
# ------------------------------------------------------------------ #
# ------------------------------------------------------------------ #
# Create 2D Vernier pattern from single pattern
# ------------------------------------------------------------------ #
# ------------------------------------------------------------------ #
# Create Box outline
# ------------------------------------------------------------------ #
# ------------------------------------------------------------------ #
# Create Single Chip
# ------------------------------------------------------------------ #
# ------------------------------------------------------------------ #
# Tapeout entire wafer
# ------------------------------------------------------------------ #
# ------------------------------------------------------------------ #
# OUTPUT
# ------------------------------------------------------------------ #
vernierMask()
# Output the layout to a GDSII file (default to all created cells).
# Set the units we used to micrometers and the precision to nanometers.
filename = 'vernierMask.gds'
outPath = os.path.abspath(os.path.join(d, 'GDS/'+filename))
gdspy.write_gds(outPath, unit=1.0e-6, precision=1.0e-9)
| 35.233645 | 96 | 0.554377 |
9ee57d6363120b9d54a9902e2243f9122d20af71 | 4,810 | py | Python | src/core/serializers.py | pradipta/back-end | 05895b051afc4c8e0cb17db708063d80102e9de5 | [
"MIT"
] | 17 | 2019-05-11T22:15:34.000Z | 2022-03-26T22:45:33.000Z | src/core/serializers.py | pradipta/back-end | 05895b051afc4c8e0cb17db708063d80102e9de5 | [
"MIT"
] | 390 | 2019-05-23T10:48:57.000Z | 2021-12-17T21:01:43.000Z | src/core/serializers.py | pradipta/back-end | 05895b051afc4c8e0cb17db708063d80102e9de5 | [
"MIT"
] | 40 | 2019-05-21T14:41:57.000Z | 2021-01-30T13:39:38.000Z | from django.contrib.auth import get_user_model
from rest_auth.registration.serializers import (
RegisterSerializer as BaseRegisterSerializer,
)
from rest_auth.registration.serializers import (
SocialLoginSerializer as BaseSocialLoginSerializer,
)
from rest_auth.serializers import LoginSerializer as BaseLoginSerializer
from rest_auth.serializers import (
PasswordResetConfirmSerializer as BasePasswordResetConfirmSerializer,
)
from rest_auth.serializers import UserDetailsSerializer as BaseUserDetailsSerializer
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from core.models import Profile
# noinspection PyAbstractClass
# noinspection PyAbstractClass
# noinspection PyAbstractClass
# noinspection PyAbstractClass
UserModel = get_user_model()
| 33.172414 | 88 | 0.677755 |
9ee5da5b7c789afc93423e16612fb9f6de97baba | 3,519 | py | Python | src/programy/brainfactory.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | null | null | null | src/programy/brainfactory.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | null | null | null | src/programy/brainfactory.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 4 | 2019-04-01T15:42:23.000Z | 2020-11-05T08:14:27.000Z | """
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.brain import Brain
from programy.utils.classes.loader import ClassLoader
from abc import abstractmethod, ABCMeta
| 34.165049 | 127 | 0.680591 |
9ee68cd6efba5b094a83a85c60acb1031a826384 | 2,050 | py | Python | tools/docs/generate_api_rst.py | dcillera/envoy | cb54ba8eec26f768f8c1ae412113b07bacde7321 | [
"Apache-2.0"
] | 17,703 | 2017-09-14T18:23:43.000Z | 2022-03-31T22:04:17.000Z | tools/docs/generate_api_rst.py | dcillera/envoy | cb54ba8eec26f768f8c1ae412113b07bacde7321 | [
"Apache-2.0"
] | 15,957 | 2017-09-14T16:38:22.000Z | 2022-03-31T23:56:30.000Z | tools/docs/generate_api_rst.py | dcillera/envoy | cb54ba8eec26f768f8c1ae412113b07bacde7321 | [
"Apache-2.0"
] | 3,780 | 2017-09-14T18:58:47.000Z | 2022-03-31T17:10:47.000Z | import os
import shutil
import sys
import tarfile
if __name__ == "__main__":
main()
| 32.03125 | 96 | 0.642927 |
9ee7307b78f857465fe941638e5a41dd83ec835a | 15,792 | py | Python | src/wa_parser.py | ifly6/NS-WA-Authorboards | 57921457795306867844a29cdfce88bfcdd1c3f6 | [
"Apache-2.0"
] | null | null | null | src/wa_parser.py | ifly6/NS-WA-Authorboards | 57921457795306867844a29cdfce88bfcdd1c3f6 | [
"Apache-2.0"
] | null | null | null | src/wa_parser.py | ifly6/NS-WA-Authorboards | 57921457795306867844a29cdfce88bfcdd1c3f6 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 ifly6
import html
import io
import re
from datetime import datetime
from functools import cache
from typing import Tuple
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from lxml import etree
from pytz import timezone
from ratelimit import limits, sleep_and_retry
from helpers import ref
from src import wa_cacher
""" Imperium Anglorum:
This is adapted from proprietary InfoEurope code which in part does most of this already. Eg the proposal portions
which translate, the locality adjustments, API reading, etc. There is also code in beta (not-in-production)
which would have done this entirely, but I never got around to developing the VIEWS for that portion of the website.
It seems much easier just to commit something like this given that all the code is already present.
See ifly6.no-ip.org for more information. """
_headers = {
'User-Agent': 'WA parser (Auralia; Imperium Anglorum)'
}
def clean_chamber_input(chamber):
""" Turns ambiguous chamber information into tuple (int, str) with chamber id and chamber name """
if type(chamber) == str:
if chamber == '1':
chamber = 1
elif chamber == '2':
chamber = 2
elif chamber == 'GA':
chamber = 1
elif chamber == 'SC':
chamber = 2
chamber_name = 'GA' if chamber == 1 else \
'SC' if chamber == 2 else ''
return chamber, chamber_name
def localised(dt: 'datetime', tz='US/Eastern'):
return timezone(tz).localize(dt)
| 38.705882 | 123 | 0.590109 |
9ee7fc2118d9db373e3131dcd7ab5c6417b15d3a | 5,191 | py | Python | conans/search/binary_html_table.py | matthiasng/conan | 634eadc319da928084633a344d42785edccb8d6c | [
"MIT"
] | 2 | 2019-01-09T10:01:29.000Z | 2019-01-09T10:01:31.000Z | conans/search/binary_html_table.py | matthiasng/conan | 634eadc319da928084633a344d42785edccb8d6c | [
"MIT"
] | 1 | 2019-01-09T10:09:41.000Z | 2019-01-09T10:09:41.000Z | conans/search/binary_html_table.py | matthiasng/conan | 634eadc319da928084633a344d42785edccb8d6c | [
"MIT"
] | null | null | null | import os
from collections import OrderedDict, defaultdict
from conans.model.ref import PackageReference
from conans.util.files import save
class Headers(object):
_preferred_ordering = ['os', 'arch', 'compiler', 'build_type']
def row(self, n_rows=2):
"""
Retrieve list of headers as a single list (1-row) or as a list of tuples with
settings organized by categories (2-row).
Example output:
1-row: ['os', 'arch', 'compiler', 'compiler.version', 'compiler.libcxx', 'build_type']
2-row: [('os', ['']), ('arch', ['']), ('compiler', ['', 'version', 'libcxx']),]
"""
headers = list(self.keys)
if n_rows == 1:
headers.extend(self.settings + self.options)
if self.requires:
headers.append('requires')
return headers
elif n_rows == 2:
headers = [(it, ['']) for it in headers]
settings = self._group_settings(self.settings)
headers.extend(settings)
headers.append(('options', self.options))
if self.requires:
headers.append(('requires', ['']))
return headers
else:
raise NotImplementedError("not yet")
| 33.275641 | 98 | 0.571181 |
9eec590065dcf6f8cc85b4d213651d2aa3e487f2 | 1,140 | py | Python | irancovid-19.py | AmiiirCom/irancovid-19 | c8871830e9344c5bf17043c802195911127bc532 | [
"MIT"
] | null | null | null | irancovid-19.py | AmiiirCom/irancovid-19 | c8871830e9344c5bf17043c802195911127bc532 | [
"MIT"
] | null | null | null | irancovid-19.py | AmiiirCom/irancovid-19 | c8871830e9344c5bf17043c802195911127bc532 | [
"MIT"
] | null | null | null | from covid import Covid
import json
covid = Covid(source="worldometers")
covid.get_data()
iran_casses = covid.get_status_by_country_name("iran")
confirmed = iran_casses['confirmed']
new_cases = iran_casses['new_cases']
deaths = iran_casses['deaths']
recovered = iran_casses['recovered']
active = iran_casses['active']
critical = iran_casses['critical']
new_deaths = iran_casses ['new_deaths']
total_tests = iran_casses['total_tests']
total_tests_per_million = int(iran_casses['total_tests_per_million'])
total_cases_per_million = int(iran_casses['total_cases_per_million'])
total_deaths_per_million = int(iran_casses['total_deaths_per_million'])
population = int(iran_casses['population'])
pr = json.dumps({
'confirmed': confirmed,
'new_cases': new_cases,
'deaths': deaths,
'recovered': recovered,
'active': active,
'critical': critical,
'new_deaths': new_deaths,
'total_tests': total_tests,
'total_tests_per_million': total_tests_per_million,
'total_cases_per_million': total_cases_per_million,
'total_deaths_per_million': total_deaths_per_million,
'population': population
})
print(pr) | 30.810811 | 71 | 0.764035 |
9eec86a2c6579218afa159749612db5d5e43ce59 | 3,198 | py | Python | models/__init__.py | esentino/literate-doodle | 598533042602b989a4bdaa8778968c5f3ead3500 | [
"Apache-2.0"
] | null | null | null | models/__init__.py | esentino/literate-doodle | 598533042602b989a4bdaa8778968c5f3ead3500 | [
"Apache-2.0"
] | null | null | null | models/__init__.py | esentino/literate-doodle | 598533042602b989a4bdaa8778968c5f3ead3500 | [
"Apache-2.0"
] | 1 | 2019-09-11T21:27:37.000Z | 2019-09-11T21:27:37.000Z | # models/__init__.py
from clcrypto import password_hash
from psycopg2 import connect
def delete(self, cursor):
sql = "DELETE FROM Users WHERE id=%s"
cursor.execute(sql, (self.__id,))
self.__id = -1
return True
| 31.663366 | 92 | 0.581614 |
9eed09503a5541f18459a14cf6ef3617066817b6 | 4,124 | py | Python | crys3d/command_line/model_viewer.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | crys3d/command_line/model_viewer.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | crys3d/command_line/model_viewer.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import division
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT=1
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export BOOST_ADAPTBX_FPE_DEFAULT=1
import cStringIO
from crys3d.wx_selection_editor import selection_editor_mixin
import wx
import libtbx.load_env
import sys, os, time
########################################################################
# CLASSES AND METHODS FOR STANDALONE VIEWER
#
if __name__ == "__main__" :
if "--test" in sys.argv :
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/1ywf.pdb",
test=os.path.isfile)
run([pdb_file, "--ss"])
else :
run(sys.argv[1:])
| 38.185185 | 78 | 0.707081 |
9eedb43deb24d2533fe70662a5b08fab696d08f6 | 500 | py | Python | Crypto/py3compat.py | eddiejessup/transcrypt | 1a5894a2c355e1b88626a2b195e132bd7e701981 | [
"MIT"
] | 14 | 2015-02-15T02:17:07.000Z | 2020-07-15T03:02:46.000Z | Crypto/py3compat.py | eddiejessup/Transcrypt | 1a5894a2c355e1b88626a2b195e132bd7e701981 | [
"MIT"
] | 12 | 2015-04-11T14:26:14.000Z | 2021-09-07T09:25:38.000Z | Crypto/py3compat.py | eddiejessup/Transcrypt | 1a5894a2c355e1b88626a2b195e132bd7e701981 | [
"MIT"
] | 4 | 2016-02-27T16:06:59.000Z | 2019-09-04T04:01:05.000Z | __revision__ = "$Id$"
from io import BytesIO
| 13.513514 | 38 | 0.542 |
9eedcf612c173937e475b9b20ab18a1677cc7feb | 2,758 | py | Python | verres/optim/schedule.py | csxeba/Verres | 04230d22b7791f84d86b9eb2272a6314a27580ed | [
"MIT"
] | null | null | null | verres/optim/schedule.py | csxeba/Verres | 04230d22b7791f84d86b9eb2272a6314a27580ed | [
"MIT"
] | null | null | null | verres/optim/schedule.py | csxeba/Verres | 04230d22b7791f84d86b9eb2272a6314a27580ed | [
"MIT"
] | null | null | null | from typing import Dict
import numpy as np
import tensorflow as tf
import verres as V
def factory(spec: dict) -> tf.optimizers.schedules.LearningRateSchedule:
name = spec.pop("name", "default")
if name.lower() in {"default", "constant"}:
scheduler = ConstantSchedule(float(spec["learning_rate"]))
else:
scheduler_type = getattr(tf.optimizers.schedules, name, None)
if scheduler_type is None:
raise KeyError(f"No such scheduler: {name}")
scheduler = scheduler_type(**spec)
print(f" [Verres.schedule] - Factory built: {name}")
return scheduler
| 32.069767 | 118 | 0.62074 |
9eeee0e6163243e2bcb3f1fbe4bb62fbc1fef478 | 4,865 | py | Python | JIG.py | mmg1/JIG | bc36ed013b5ba48e549a16151b9135e271d55055 | [
"MIT"
] | 28 | 2017-12-04T02:03:25.000Z | 2021-09-13T04:37:21.000Z | JIG.py | mmg1/JIG | bc36ed013b5ba48e549a16151b9135e271d55055 | [
"MIT"
] | 1 | 2018-01-20T21:13:56.000Z | 2018-01-20T21:13:56.000Z | JIG.py | NetSPI/JIG | bc36ed013b5ba48e549a16151b9135e271d55055 | [
"MIT"
] | 18 | 2018-01-08T13:40:29.000Z | 2022-02-20T17:10:57.000Z | import re
import sys
from itertools import izip as zip
import argparse
import requests
# argparse definitions
parser = argparse.ArgumentParser(description='Jira attack script')
parser.add_argument('URL', type=str , help='the URL of the Jira instance... ex. https://jira.organization.com/')
parser.add_argument('-u' ,'--usernames', dest='names', action='store_const', const=True, help='Print discovered usernames')
parser.add_argument('-e' , '--emails', dest='emails',action='store_const', const=True, help='Print discovered email addresses')
parser.add_argument('-a' ,'--all', dest='all',action='store_const',const=True,help='Print discovered email addresses and usernames')
parser.add_argument('-eu' , dest='all',action='store_const',const=True,help=argparse.SUPPRESS)
parser.add_argument('-ue' , dest='all',action='store_const',const=True,help=argparse.SUPPRESS)
args = parser.parse_args()
url = args.URL
if args.URL[-1] != '/':
args.URL = args.URL + "/"
# Define URLs
pickerURL = args.URL + "secure/popups/UserPickerBrowser.jspa?max=9999"
filtersURL = args.URL + "secure/ManageFilters.jspa?filter=popular"
#dashboardURL = args.URL + "secure/Dashboard.jspa"
def extractPicker(response):
'''
Takes in the response body for UserBrowserPicker and returns a dictionary containing
usernames and email addresses.
'''
userList = re.compile(r"-name\">(.*)</td>").findall(response.text)
emailList = re.compile(r">(.*\@.*)</td>").findall(response.text)
dictionary = dict(zip(userList , emailList))
return dictionary
def extractFilters(response):
'''
Takes in the response body for the manage filters page and returns a list containing usernames.
'''
userList = re.compile(r"</span>.\((.*)\)").findall(response.text)
return list(set(userList))
def validateURL(url):
'''
Runs a stream of validation on a given URL and returns the response and a boolean value.
'''
try:
s = requests.Session()
validateresponse = s.get(url , allow_redirects=False,timeout=5)
except requests.exceptions.InvalidSchema:
print ""
print "[-] Invalid schema provided... Must follow format https://jira.organization.com/"
print ""
sys.exit(1)
except requests.exceptions.MissingSchema:
print ""
print "[-] A supported schema was not provided. Please use http:// or https://"
print ""
sys.exit(1)
except requests.exceptions.InvalidURL:
print "[-] Invalid base URL was supplied... Please try again."
sys.exit(1)
except requests.exceptions.ConnectionError:
print ""
print "[-] Connection failed... Please check the URL and try again."
print ""
sys.exit(1)
except requests.exceptions.RequestException:
print ""
print "[-] An unknown exception occurred... Please try again."
print ""
sys.exit(1)
if validateresponse.status_code == 200:
return validateresponse,True
else:
return "[-] The page is inaccessible",False
if __name__ == "__main__":
pickerResponse,pickerAccessible = validateURL(pickerURL)
filterResponse,filterAccessible = validateURL(filtersURL)
print ""
print ""
print "[+] Checking the User Picker page..."
if pickerAccessible == True:
users = extractPicker(pickerResponse)
print ""
print "[+] Success..."
print "[+] Users: "+str(len(users))
print "[+] Emails: " + str(len(users))
print ""
if (args.emails and args.names) or args.all:
print '{:<20}{:<20}'.format("---Username---", "---------Email---------")
for username, email in sorted(users.iteritems()):
print '{:<20}{:<20}'.format(username,email)
elif args.emails:
for username,email in sorted(users.iteritems()):
print email
elif args.names:
for username,email in sorted(users.iteritems()):
print username
print ""
elif pickerAccessible == False:
print pickerResponse
print ""
print ""
print "[+] Checking the Manage Filters page..."
if filterAccessible == True:
filterUsers = extractFilters(filterResponse)
if args.names or args.all:
if len(filterUsers) == 0:
print "[-] We could not find any anonymously accessible filters"
print ""
else:
print "[+] The Manage Filters page is accessible and contains data..."
print ""
for username in filterUsers:
print username
print ""
elif filterAccessible == False:
print filterResponse | 39.233871 | 133 | 0.615211 |
9eef48e8177814194dd2d1510e39357b5d13bd02 | 4,383 | py | Python | run.py | SamChatfield/final-year-project | 9d1ae2cb3009ffbff89cb438cfcde855db8a53ac | [
"MIT"
] | null | null | null | run.py | SamChatfield/final-year-project | 9d1ae2cb3009ffbff89cb438cfcde855db8a53ac | [
"MIT"
] | null | null | null | run.py | SamChatfield/final-year-project | 9d1ae2cb3009ffbff89cb438cfcde855db8a53ac | [
"MIT"
] | null | null | null | import json
import string
from datetime import datetime
import deap
import numpy as np
import hmm
from discriminator import Discriminator
from ea import EA
import random_search
DEFAULT_PARAMS = {
# Discriminator CNN model
"model": "CNNModel3",
# Algorithm Parameters
"states": 5,
"symbols": 5,
"epochs": 10,
"epoch_size": 500,
"batch_size": 200,
"seq_len": 20,
"pop_size": 25,
"gens": 50,
"offspring_prop": 1.0,
"cx_prob": 0.0,
"mut_fn": "uniform",
"mut_prob": 1.0,
"mut_rate": None, # None - default to 1/N where N is number of genes
# Implementation Parameters
"_pool_size": 4,
"_random_search": True, # Also run an elitist random search over #gens to compare performance
}
if __name__ == "__main__":
main()
| 26.72561 | 98 | 0.6094 |
9ef2b9fdb256c9db58c16d3d792f230772a8e948 | 2,174 | py | Python | rrc_example_package/benchmark_rrc/tools/plot/exp_align_obj.py | wq13552463699/TriFinger_Research | 6ddfab4531cb4ba05a0fbb41227a734295dce378 | [
"BSD-3-Clause"
] | 12 | 2021-05-06T18:00:21.000Z | 2022-01-11T14:23:22.000Z | rrc_example_package/benchmark_rrc/tools/plot/exp_align_obj.py | wq13552463699/TriFinger_Research | 6ddfab4531cb4ba05a0fbb41227a734295dce378 | [
"BSD-3-Clause"
] | 3 | 2021-06-03T16:06:01.000Z | 2021-08-15T13:40:09.000Z | rrc_example_package/benchmark_rrc/tools/plot/exp_align_obj.py | wq13552463699/TriFinger_Research | 6ddfab4531cb4ba05a0fbb41227a734295dce378 | [
"BSD-3-Clause"
] | 4 | 2021-05-12T02:34:34.000Z | 2021-07-18T19:54:50.000Z | #!/usr/bin/env python3
'''
This code traverses a directories of evaluation log files and
record evaluation scores as well as plotting the results.
'''
import os
import argparse
import json
import copy
from shutil import copyfile
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from utils import *
MAX_ALIGN_STEPS = 75000 - 1 # This depends on the evaluation code used to generate the logs
def generate_csv(log_dir, csv_file):
'''
Traverse and read log files, and then output csv file from the eval data.
- file to be generated: 'eval_scores.csv'
- columns: state_machine_id, timesteps, rot_error
'''
df = pd.DataFrame(columns=['state_machine_id', 'state_machine_name', 'timesteps', 'rot_error'])
model_names = extract_model_names(log_dir)
# Traverse all episodes and add each entry to data frame
for state_machine_id, episode_idx, episode_dir in traverse_all_episodes(log_dir):
json_util = JsonUtil(os.path.join(episode_dir, 'goal.json'))
entry = {
'state_machine_id': state_machine_id,
'state_machine_name': model_names[state_machine_id],
**json_util.load()
}
# Handling the timesteps==-1 case
if entry['reachfinish'] == -1:
entry['reachfinish'] = MAX_ALIGN_STEPS
if entry['reachstart'] == -1:
raise ValueError('\'reachstart\' in {episode_dir}/goal.json does not contain a valid value.')
# Rename dict keys
entry['timesteps'] = entry.pop('reachfinish') - entry.pop('reachstart')
entry['rot_error'] = entry.pop('align_obj_error')
entry['init_rot_error'] = entry.pop('init_align_obj_error', None)
# Add a new entry
entry['rot_error_diff'] = entry['init_rot_error'] - entry['rot_error']
df = df.append(entry, ignore_index=True) # df.append works differently from python since it is stupid
df.to_csv(csv_file, index=False)
| 35.639344 | 110 | 0.689512 |
9ef2bd5f0fee2640fb7fcf65e291ea514c7f1058 | 286 | py | Python | test cases/common/64 custom header generator/makeheader.py | objectx/meson | c0f097c0c74551972f7ec2203cd960824984f058 | [
"Apache-2.0"
] | null | null | null | test cases/common/64 custom header generator/makeheader.py | objectx/meson | c0f097c0c74551972f7ec2203cd960824984f058 | [
"Apache-2.0"
] | null | null | null | test cases/common/64 custom header generator/makeheader.py | objectx/meson | c0f097c0c74551972f7ec2203cd960824984f058 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# NOTE: this file does not have the executable bit set. This tests that
# Meson can automatically parse shebang lines.
import sys
template = '#define RET_VAL %s\n'
output = template % (open(sys.argv[1]).readline().strip())
open(sys.argv[2], 'w').write(output)
| 26 | 71 | 0.713287 |
9ef42081bff35de1f92bff97bfccd08e32e6f3d8 | 395 | py | Python | studio_ghibli/movies/test_data.py | hbansal0122/studio_ghibli_project | 1a2df853f9d5088aa137f372ab0ee83ce8ba3667 | [
"MIT"
] | null | null | null | studio_ghibli/movies/test_data.py | hbansal0122/studio_ghibli_project | 1a2df853f9d5088aa137f372ab0ee83ce8ba3667 | [
"MIT"
] | null | null | null | studio_ghibli/movies/test_data.py | hbansal0122/studio_ghibli_project | 1a2df853f9d5088aa137f372ab0ee83ce8ba3667 | [
"MIT"
] | null | null | null | """ Test data"""
stub_films = [{
"id": "12345",
"title": "This is film one",
},{
"id": "23456",
"title": "This is film two",
}]
stub_poeple = [{
"name": "person 1",
"films": ["url/12345", "url/23456"]
},{
"name": "person 2",
"films": ["url/23456"]
},{
"name": "person 3",
"films": ["url/12345"]
},{
"name": "person 4",
"films": ["url/12345"]
}] | 16.458333 | 39 | 0.463291 |
9ef4febad34c41f83b4899c15a9e9cfec2b40a27 | 236 | py | Python | data_converters/fsdbripper/create_new_db.py | osvaldolove/amiberry-api | 3310592d2411c69f7c225edb3e3907e6a5e6caf8 | [
"MIT"
] | null | null | null | data_converters/fsdbripper/create_new_db.py | osvaldolove/amiberry-api | 3310592d2411c69f7c225edb3e3907e6a5e6caf8 | [
"MIT"
] | null | null | null | data_converters/fsdbripper/create_new_db.py | osvaldolove/amiberry-api | 3310592d2411c69f7c225edb3e3907e6a5e6caf8 | [
"MIT"
] | 1 | 2018-08-22T21:55:26.000Z | 2018-08-22T21:55:26.000Z | import sqlite3
from constants import DESTINATION_DB
destination_connection = sqlite3.connect(DESTINATION_DB)
destination_cursor = destination_connection.cursor()
destination_cursor.execute('CREATE TABLE game(uuid, payload)')
| 26.222222 | 63 | 0.817797 |
9ef65f5bf372723d5444efb6cd95a0880cc13cef | 7,366 | py | Python | upvote/gae/shared/common/json_utils_test.py | cclauss/upvote | 9d526fec72690cde1575dbd32dacf68cbbab81d1 | [
"Apache-2.0"
] | null | null | null | upvote/gae/shared/common/json_utils_test.py | cclauss/upvote | 9d526fec72690cde1575dbd32dacf68cbbab81d1 | [
"Apache-2.0"
] | null | null | null | upvote/gae/shared/common/json_utils_test.py | cclauss/upvote | 9d526fec72690cde1575dbd32dacf68cbbab81d1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for json_utils."""
import datetime
import json
from google.appengine.ext import ndb
from common.testing import basetest
from upvote.gae.datastore.models import santa
from upvote.gae.shared.common import json_utils
from upvote.shared import constants
if __name__ == '__main__':
basetest.main()
| 33.481818 | 79 | 0.691827 |
9ef7f25002d6a0233c11be0350ae657d327330f8 | 3,728 | py | Python | app.py | YukiNagat0/Blog | 6f01d1a3e73f1f865b5d22dbdbb27a5acfb3e937 | [
"MIT"
] | 1 | 2021-06-24T17:48:37.000Z | 2021-06-24T17:48:37.000Z | app.py | YukiNagat0/Blog | 6f01d1a3e73f1f865b5d22dbdbb27a5acfb3e937 | [
"MIT"
] | null | null | null | app.py | YukiNagat0/Blog | 6f01d1a3e73f1f865b5d22dbdbb27a5acfb3e937 | [
"MIT"
] | null | null | null | from os import path
from typing import Union
from datetime import datetime
from flask import Flask, request, redirect, render_template
from flask_wtf import CSRFProtect
from werkzeug.utils import secure_filename
from data import db_session
from data.posts import Posts
from forms.edit_post_form import EditPostForm
app = Flask(__name__)
app.config['SECRET_KEY'] = 'SECRET_KEY'
csrf_protect = CSRFProtect(app)
UPLOAD_FOLDER = 'static/posts_img/'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
DATA_BASE = 'db/blog.sqlite'
app.config['DATA_BASE'] = DATA_BASE
def main():
db_session.global_init(app.config['DATA_BASE'])
app.run('127.0.0.1', 8080)
if __name__ == '__main__':
main()
| 25.888889 | 117 | 0.668455 |
9ef839c4fcb13ab1bd28852911644c75dc9c3837 | 48,320 | py | Python | neon/backends/gpu.py | kashif/neon | d4d8ed498ee826b67f5fda1746d2d65c8ce613d2 | [
"Apache-2.0"
] | 1 | 2018-07-17T16:54:58.000Z | 2018-07-17T16:54:58.000Z | neon/backends/gpu.py | kashif/neon | d4d8ed498ee826b67f5fda1746d2d65c8ce613d2 | [
"Apache-2.0"
] | null | null | null | neon/backends/gpu.py | kashif/neon | d4d8ed498ee826b67f5fda1746d2d65c8ce613d2 | [
"Apache-2.0"
] | 2 | 2016-06-09T13:05:00.000Z | 2021-02-18T14:18:15.000Z | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Neon backend wrapper for the NervanaGPU library. Most functions are thin
wrappers around functions from the NervanaGPU class, the GPUTensor is taken
directly from NervanaGPU as well.
NervanaGPU is available at `<https://github.com/NervanaSystems/nervanagpu>`
"""
import logging
from neon.backends.backend import Backend
from nervanagpu import NervanaGPU
from neon.diagnostics.timing_decorators import FlopsDecorator
import pycuda.driver as drv
import numpy as np
logger = logging.getLogger(__name__)
| 39.736842 | 79 | 0.556126 |
9ef85b894eb9c57e729d7cdbf2e496c34efcf07f | 23,685 | py | Python | test/test_automl/test_automl.py | ihounie/auto-sklearn | 6a72f0df60b0c66ad75b0100d8d22c07da6217bb | [
"BSD-3-Clause"
] | null | null | null | test/test_automl/test_automl.py | ihounie/auto-sklearn | 6a72f0df60b0c66ad75b0100d8d22c07da6217bb | [
"BSD-3-Clause"
] | null | null | null | test/test_automl/test_automl.py | ihounie/auto-sklearn | 6a72f0df60b0c66ad75b0100d8d22c07da6217bb | [
"BSD-3-Clause"
] | 1 | 2021-04-06T09:38:12.000Z | 2021-04-06T09:38:12.000Z | # -*- encoding: utf-8 -*-
import os
import pickle
import sys
import time
import glob
import unittest
import unittest.mock
import numpy as np
import pandas as pd
import sklearn.datasets
from smac.scenario.scenario import Scenario
from smac.facade.roar_facade import ROAR
from autosklearn.util.backend import Backend
from autosklearn.automl import AutoML
import autosklearn.automl
from autosklearn.data.xy_data_manager import XYDataManager
from autosklearn.metrics import accuracy, log_loss, balanced_accuracy
import autosklearn.pipeline.util as putil
from autosklearn.util.logging_ import setup_logger, get_logger
from autosklearn.constants import MULTICLASS_CLASSIFICATION, BINARY_CLASSIFICATION, REGRESSION
from smac.tae.execute_ta_run import StatusType
sys.path.append(os.path.dirname(__file__))
from base import Base # noqa (E402: module level import not at top of file)
def test_fail_if_feat_type_on_pandas_input(self):
"""We do not support feat type when pandas
is provided as an input
"""
backend_api = self._create_backend('test_fail_feat_pandas')
automl = autosklearn.automl.AutoML(
backend=backend_api,
time_left_for_this_task=20,
per_run_time_limit=5,
metric=accuracy,
)
X_train = pd.DataFrame({'a': [1, 1], 'c': [1, 2]})
y_train = [1, 0]
with self.assertRaisesRegex(ValueError,
"feat_type cannot be provided when using pandas"):
automl.fit(
X_train, y_train,
task=BINARY_CLASSIFICATION,
feat_type=['Categorical', 'Numerical'],
)
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
def test_fail_if_dtype_changes_automl(self):
"""We do not support changes in the input type.
Once a estimator is fitted, it should not change data type
"""
backend_api = self._create_backend('test_fail_feat_typechange')
automl = autosklearn.automl.AutoML(
backend=backend_api,
time_left_for_this_task=20,
per_run_time_limit=5,
metric=accuracy,
)
X_train = pd.DataFrame({'a': [1, 1], 'c': [1, 2]})
y_train = [1, 0]
automl.InputValidator.validate(X_train, y_train, is_classification=True)
with self.assertRaisesRegex(ValueError,
"Auto-sklearn previously received features of type"):
automl.fit(
X_train.to_numpy(), y_train,
task=BINARY_CLASSIFICATION,
)
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
if __name__ == "__main__":
unittest.main()
| 39.343854 | 94 | 0.609246 |
9ef87644a467b7a43c75ac4ae95f1780dab19950 | 3,934 | py | Python | algopy/base_type.py | arthus701/algopy | 1e2430f803289bbaed6bbdff6c28f98d7767835c | [
"Unlicense"
] | 54 | 2015-03-05T13:38:08.000Z | 2021-11-29T11:54:48.000Z | algopy/base_type.py | arthus701/algopy | 1e2430f803289bbaed6bbdff6c28f98d7767835c | [
"Unlicense"
] | 7 | 2016-04-06T11:25:00.000Z | 2020-11-09T13:53:20.000Z | algopy/base_type.py | arthus701/algopy | 1e2430f803289bbaed6bbdff6c28f98d7767835c | [
"Unlicense"
] | 13 | 2015-01-17T17:05:56.000Z | 2021-08-05T01:13:16.000Z | """
This implements an abstrace base class Ring .
Rationale:
Goal is to separate the datatype specification from the algorithms and containers for the following reasons:
1) It allows to directly use the algorithms *without* overhead. E.g. calling mul(z.data, x.data, y.data)
has much less overhead than z = x.__mul__(y). data is to be kept as close as possible to
machine primitives. E.g. data is array or tuple of arrays.
2) Potential reuse of an algorithm in several datatypes.
3) Relatively easy to connect high performance algorithms with a very highlevel abstract description.
For instance, most programming languages allow calling C-functions. Therefore, the algorithms
should be given as void fcn(int A, double B, ...)
For instance, the datatype is a truncated Taylor polynomial R[t]/<t^D> of the class Foo.
The underlying container is a simple array of doubles.
"""
import numpy
| 35.125 | 113 | 0.630147 |
9ef906903676953e2a8a6d553c8fc0e08426873c | 556 | py | Python | estrutura-repeticao-while/ex062.py | TacilioRodriguez/Python | 0b98dc8336e014046c579b387013b2871024e3d0 | [
"Unlicense"
] | null | null | null | estrutura-repeticao-while/ex062.py | TacilioRodriguez/Python | 0b98dc8336e014046c579b387013b2871024e3d0 | [
"Unlicense"
] | null | null | null | estrutura-repeticao-while/ex062.py | TacilioRodriguez/Python | 0b98dc8336e014046c579b387013b2871024e3d0 | [
"Unlicense"
] | null | null | null | """
Melhore o Desafio 061, perguntando para o usurio se ele quer mostrar mais alguns termos.
O programa encerra quando ele disser que quer mostrar 0 termos.
"""
primeiro = int(input('Digite o termo: '))
razao = int(input('Digite a razo: '))
termo = primeiro
cont = 1
total = 0
mais = 10
while mais != 0:
total = total + mais
while cont <= total:
print('{} -> '.format(termo), end=' ')
termo = termo + razao
cont = cont + 1
print('Pausa')
mais = int(input('Quantos termos voc quer mostrar a mais? '))
print('FIM') | 27.8 | 89 | 0.633094 |
9ef958e7d381e2efbcf979fbddc497610f9580d1 | 3,487 | py | Python | Udemy_PythonBootcamp/Sec15_WebScraping.py | gonzalosc2/LearningPython | 0210d4cbbb5e154f12007b8e8f825fd3d0022be0 | [
"MIT"
] | null | null | null | Udemy_PythonBootcamp/Sec15_WebScraping.py | gonzalosc2/LearningPython | 0210d4cbbb5e154f12007b8e8f825fd3d0022be0 | [
"MIT"
] | null | null | null | Udemy_PythonBootcamp/Sec15_WebScraping.py | gonzalosc2/LearningPython | 0210d4cbbb5e154f12007b8e8f825fd3d0022be0 | [
"MIT"
] | null | null | null | ####################################
# author: Gonzalo Salazar
# course: 2020 Complete Python Bootcamps: From Zero to Hero in Python
# purpose: lecture notes
# description: Section 15 - Web Scraping
# other: N/A
####################################
# RULES
# 1. always try to get permission before scraping, otherwise I might be blocked
# 2. check the laws of whatever country we are operating in (for legal issues)
# LIMITATIONS
# each website is unique -> so for each website there must exist a Python script
# an update to a website might brake my script
import requests
import bs4
# Grabbing a title
result = requests.get("http://example.com")
type(result)
result.text
# bs with lxml tranforms the previous raw html into the following
soup = bs4.BeautifulSoup(result.text,'lxml')
soup
# returns the tag we specified as a list (i.e., there might be more than one)
soup.select('title')
soup.select('title')[0].getText()
soup.select('p')
site_paragraphs = soup.select('p')
type(site_paragraphs[0]) # not a string, instead is a specialized bs object,
# which is why we can do something like call .getText()
# Grabbing a class (from CSS) using soup.select()
# 'div' : all elements with 'div' tag
# '#some_id' : elements containing id='some_id'
# '.some_class' : elements containing class='some_class'
# 'div span' : any element named span within a div element
# 'div > span' : any element named span directly within a div element, with
# nothing in between
res = requests.get("https://en.wikipedia.org/wiki/Jonas_Salk")
soup = bs4.BeautifulSoup(res.text,'lxml')
soup.select('.toctext')[0].text
soup.select('.toctext')[0].getText()
for item in soup.select('.toctext'):
print(item.text)
# Grabbing an image
#soup.select('img') # can return more than what is needeed (it will depend on
# the website)
soup.select('.thumbimage')
jonas_salk = soup.select('.thumbimage')[0]
jonas_salk['src'] # we can treat it as a dictionary
image_link = requests.get('http://upload.wikimedia.org/wikipedia/commons/thumb/3/3c/Roosevelt_OConnor.jpg/220px-Roosevelt_OConnor.jpg')
#image_link.content # raw content of the image which is a binary file
#make sure to use the same format that the image has
f = open('my_image_image.jpg','wb') # wb means write binary
f.write(image_link.content)
f.close()
# Multiple elements across multiple pages
# GOAL: get title of every book with a 2 star rating
#Check that this also work with page 1
#http://books.toscrape.com/catalogue/page-2.html
base_url = 'http://books.toscrape.com/catalogue/page-{}.html'
req = requests.get(base_url.format(1))
soup = bs4.BeautifulSoup(req.text,'lxml')
products = soup.select(".product_pod") # always check the length, in this case should be 20
example = products[0]
# one way (not useful everytime)
'star-rating Two' in str(example)
# another way (checking for the presence of a class)
example.select('.star-rating.Three') # if there is a space in a class we should add a dot
example.select('.star-rating.Two') # nothing
example.select('a')[1]['title']
two_star_titles = []
for n in range(1,51):
scrape_url = base_url.format(n)
req = requests.get(base_url.format(1))
soup = bs4.BeautifulSoup(req.text,'lxml')
books = soup.select(".product_pod")
for book in books:
if len(book.select('.star-rating.Two')) != 0:
two_star_titles.append(book.select('a')[1]['title'])
two_star_titles
| 32.287037 | 135 | 0.694006 |
9ef987b5b2fc09a91874ef390e457aed66cdf6c0 | 10,220 | py | Python | anchore_engine/analyzers/modules/33_binary_packages.py | dspalmer99/anchore-engine | 8c61318be6fec5d767426fa4ccd98472cc85b5cd | [
"Apache-2.0"
] | null | null | null | anchore_engine/analyzers/modules/33_binary_packages.py | dspalmer99/anchore-engine | 8c61318be6fec5d767426fa4ccd98472cc85b5cd | [
"Apache-2.0"
] | null | null | null | anchore_engine/analyzers/modules/33_binary_packages.py | dspalmer99/anchore-engine | 8c61318be6fec5d767426fa4ccd98472cc85b5cd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import sys
import os
import re
import json
import traceback
import pkg_resources
import tarfile
from collections import OrderedDict
import anchore_engine.analyzers.utils, anchore_engine.utils
analyzer_name = "package_list"
try:
config = anchore_engine.analyzers.utils.init_analyzer_cmdline(sys.argv, analyzer_name)
except Exception as err:
print(str(err))
sys.exit(1)
imgname = config['imgid']
imgid = config['imgid_full']
outputdir = config['dirs']['outputdir']
unpackdir = config['dirs']['unpackdir']
squashtar = os.path.join(unpackdir, "squashed.tar")
resultlist = {}
version_found_map = {}
binary_package_el = {
'name': None,
'version': None,
'location': None,
'type': 'binary',
'files': [],
'license': 'N/A',
'origin': 'N/A',
'metadata': json.dumps({})
}
try:
allfiles = {}
if os.path.exists(unpackdir + "/anchore_allfiles.json"):
with open(unpackdir + "/anchore_allfiles.json", 'r') as FH:
allfiles = json.loads(FH.read())
else:
fmap, allfiles = anchore_engine.analyzers.utils.get_files_from_squashtar(os.path.join(unpackdir, "squashed.tar"))
with open(unpackdir + "/anchore_allfiles.json", 'w') as OFH:
OFH.write(json.dumps(allfiles))
# read in previous analyzer output for helping to increase accuracy of findings
fname = os.path.join(outputdir, 'pkgfiles.all')
pkgfilesall = anchore_engine.analyzers.utils.read_kvfile_todict(fname)
meta = anchore_engine.analyzers.utils.get_distro_from_squashtar(os.path.join(unpackdir, "squashed.tar"), unpackdir=unpackdir)
distrodict = anchore_engine.analyzers.utils.get_distro_flavor(meta['DISTRO'], meta['DISTROVERS'], likedistro=meta['LIKEDISTRO'])
# set up ordered dictionary structure for the runtimes and evidence types
evidence = OrderedDict()
for runtime in ['python', 'go', 'busybox']:
evidence[runtime] = OrderedDict()
for etype in ['binary', 'devel']:
evidence[runtime][etype] = []
# Perform a per file routine to evaluate files for gathering binary package version evidence
with tarfile.open(os.path.join(unpackdir, "squashed.tar"), mode='r', format=tarfile.PAX_FORMAT) as tfl:
alltnames = tfl.getnames()
alltfiles = {}
for name in alltnames:
alltfiles[name] = True
memberhash = anchore_engine.analyzers.utils.get_memberhash(tfl)
for member in list(memberhash.values()):
try:
get_python_evidence(tfl, member, memberhash, evidence)
except Exception as err:
print ("WARN: caught exception evaluating file ({}) for python runtime evidence: {}".format(member.name, str(err)))
try:
get_golang_evidence(tfl, member, memberhash, evidence)
except Exception as err:
print ("WARN: caught exception evaluating file ({}) for golang runtime evidence: {}".format(member.name, str(err)))
try:
get_busybox_evidence(tfl, member, memberhash, distrodict, evidence)
except Exception as err:
print ("WARN: caught exception evaluating file ({}) for busybox runtime evidence: {}".format(member.name, str(err)))
resultlist = {}
for runtime in evidence.keys(): #['python', 'go']:
for e in evidence[runtime].keys(): #['binary', 'devel']:
for t in evidence[runtime][e]:
version = t.get('version')
location = t.get('location')
if location in pkgfilesall:
print ("INFO: Skipping evidence {} - file is owned by OS package".format(location))
else:
key = "{}-{}".format(runtime, version)
if key not in version_found_map:
result = {}
result.update(binary_package_el)
result.update(t)
result['metadata'] = json.dumps({"evidence_type": e})
resultlist[location] = json.dumps(result)
version_found_map[key] = True
try:
squashtar = os.path.join(unpackdir, "squashed.tar")
hints = anchore_engine.analyzers.utils.get_hintsfile(unpackdir, squashtar)
for pkg in hints.get('packages', []):
pkg_type = pkg.get('type', "").lower()
if pkg_type == 'binary':
try:
pkg_key, el = anchore_engine.analyzers.utils._hints_to_binary(pkg)
try:
resultlist[pkg_key] = json.dumps(el)
except Exception as err:
print ("WARN: unable to add binary package ({}) from hints - excpetion: {}".format(pkg_key, err))
except Exception as err:
print ("WARN: bad hints record encountered - exception: {}".format(err))
except Exception as err:
print ("WARN: problem honoring hints file - exception: {}".format(err))
except Exception as err:
import traceback
traceback.print_exc()
print("WARN: analyzer unable to complete - exception: " + str(err))
if resultlist:
ofile = os.path.join(outputdir, 'pkgs.binary')
anchore_engine.analyzers.utils.write_kvfile_fromdict(ofile, resultlist)
#print ("RESULT: {}".format(resultlist))
sys.exit(0)
| 41.044177 | 148 | 0.545108 |
9ef9c33373ed6286394fc6556d56b0671f5ed0ac | 20,610 | py | Python | SF-home-price-prediction/src/preparation.py | apthomas/SF-home-price-prediction | 448dac93ef26022bc81fab4665a12f592f9556a1 | [
"MIT"
] | null | null | null | SF-home-price-prediction/src/preparation.py | apthomas/SF-home-price-prediction | 448dac93ef26022bc81fab4665a12f592f9556a1 | [
"MIT"
] | null | null | null | SF-home-price-prediction/src/preparation.py | apthomas/SF-home-price-prediction | 448dac93ef26022bc81fab4665a12f592f9556a1 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import csv
import urllib.request
import json
from datetime import datetime
from datetime import timedelta
from sklearn.preprocessing import MinMaxScaler
import web_scrapers
import os
def wrangle_real_estate_headers(df):
'''
run before joining dataframes so keys match
df_sale_counts_by_zip_silicon_valley.columns = df_sale_counts_by_zip_silicon_valley.columns.str.replace('Sales Counts ', '')
df_sale_counts_by_zip_silicon_valley = df_sale_counts_by_zip_silicon_valley.add_prefix('Sales Counts ')
df_sale_counts_by_zip_silicon_valley.rename(columns = {'Sales Counts RegionName':'Zipcode'}, inplace=True)
'''
df.columns = df.columns.str.replace('All Homes ', '')
df = df.add_prefix('All Homes ')
df.rename(columns={'All Homes RegionName': 'Zipcode'}, inplace=True)
return df
def create_zipcode_distances_dictionary(zipcodes, zip_list):
'''
***DONT RUN IF THESE ARE ALREADY CREATED***
currently stored as data/processed/zipcodes_within_radius.txt
'''
print(len(zip_list))
for i in range(0, len(zip_list)):
zipcodes[zip_list[i]] = calculate_distance_between_zips(zip_list[i], '0', '5'), calculate_distance_between_zips(
zip_list[i], '5', '10')
return zipcodes
def create_text_file_from_dictionary(filename, dictionary):
'''
with open('data/processed/zipcodes_within_radius.txt', 'w') as json_file:
json.dump(zipcodes, json_file)
'''
with open(filename, 'w') as json_file:
json.dump(dictionary, json_file)
return dictionary
if __name__ == "__main__":
print("we are wrangling data")
#update_ipo_list(2019, 6, 7)
main() | 51.654135 | 214 | 0.655313 |
9ef9d0cb1ac73ebdbfd64d7d2d0514517d257322 | 734 | py | Python | src/python/director/builtin/plugins/measurement_tool/plugin.py | afdaniele/director | 845ba027f9009803fcf77f44874f2ab9d7ab72e3 | [
"BSD-3-Clause"
] | null | null | null | src/python/director/builtin/plugins/measurement_tool/plugin.py | afdaniele/director | 845ba027f9009803fcf77f44874f2ab9d7ab72e3 | [
"BSD-3-Clause"
] | null | null | null | src/python/director/builtin/plugins/measurement_tool/plugin.py | afdaniele/director | 845ba027f9009803fcf77f44874f2ab9d7ab72e3 | [
"BSD-3-Clause"
] | null | null | null | from director.devel.plugin import GenericPlugin
from director.fieldcontainer import FieldContainer
from .lib import measurementpanel
from PythonQt import QtCore
| 25.310345 | 77 | 0.741144 |
9efa004ed72e268641173fcd54de72edaac3595f | 4,858 | py | Python | jupyter_book/yaml.py | akhmerov/jupyter-book | 06b8134af1266655717df474438bed2569b14efe | [
"BSD-3-Clause"
] | 1 | 2021-04-26T03:21:49.000Z | 2021-04-26T03:21:49.000Z | jupyter_book/yaml.py | akhmerov/jupyter-book | 06b8134af1266655717df474438bed2569b14efe | [
"BSD-3-Clause"
] | 1 | 2020-08-26T08:27:27.000Z | 2020-08-27T18:00:42.000Z | jupyter_book/yaml.py | phaustin/jupyter-book | 674b222d44cc1acb858804782cee4549eef03fb1 | [
"BSD-3-Clause"
] | null | null | null | """A small sphinx extension to let you configure a site with YAML metadata."""
from pathlib import Path
# Transform a "Jupyter Book" YAML configuration file into a Sphinx configuration file.
# This is so that we can choose more user-friendly words for things than Sphinx uses.
# e.g., 'logo' instead of 'html_logo'.
# Note that this should only be used for **top level** keys.
PATH_YAML_DEFAULT = Path(__file__).parent.joinpath("default_config.yml")
def yaml_to_sphinx(yaml):
"""Convert a Jupyter Book style config structure into a Sphinx config dict."""
sphinx_config = {
"exclude_patterns": [
"_build",
"Thumbs.db",
".DS_Store",
"**.ipynb_checkpoints",
],
}
# Start with an empty options block
theme_options = {}
# Launch button configuration
launch_buttons_config = yaml.get("launch_buttons", {})
repository_config = yaml.get("repository", {})
theme_options["launch_buttons"] = launch_buttons_config
theme_options["path_to_docs"] = repository_config.get("path_to_book", "")
theme_options["repository_url"] = repository_config.get("url", "")
theme_options["repository_branch"] = repository_config.get("branch", "")
# HTML
html = yaml.get("html")
if html:
sphinx_config["html_favicon"] = html.get("favicon", "")
sphinx_config["html_baseurl"] = html.get("baseurl", "")
theme_options["google_analytics_id"] = html.get("google_analytics_id", "")
# Deprecate navbar_footer_text after a release cycle
theme_options["navbar_footer_text"] = html.get("navbar_footer_text", "")
theme_options["extra_navbar"] = html.get("extra_navbar", "")
theme_options["extra_footer"] = html.get("extra_footer", "")
theme_options["home_page_in_toc"] = html.get("home_page_in_navbar")
# Comments config
sphinx_config["comments_config"] = html.get("comments", {})
# Pass through the buttons
btns = ["use_repository_button", "use_edit_page_button", "use_issues_button"]
use_buttons = {btn: html.get(btn) for btn in btns if html.get(btn) is not None}
if any(use_buttons.values()):
if not repository_config.get("url"):
raise ValueError(
"To use 'repository' buttons, you must specify the repository URL"
)
# Update our config
theme_options.update(use_buttons)
# Update the theme options in the main config
sphinx_config["html_theme_options"] = theme_options
execute = yaml.get("execute")
if execute:
if execute.get("execute_notebooks") is False:
# Special case because YAML treats `off` as "False".
execute["execute_notebooks"] = "off"
sphinx_config["jupyter_execute_notebooks"] = execute.get(
"execute_notebooks", "auto"
)
sphinx_config["execution_timeout"] = execute.get("timeout", 30)
sphinx_config["jupyter_cache"] = execute.get("cache", "")
_recursive_update(
sphinx_config,
{"execution_excludepatterns": execute.get("exclude_patterns", [])},
)
# LaTeX
latex = yaml.get("latex")
if latex:
sphinx_config["latex_engine"] = latex.get("latex_engine", "pdflatex")
# Extra extensions
extra_extensions = yaml.get("sphinx", {}).get("extra_extensions")
if extra_extensions:
if not isinstance(extra_extensions, list):
extra_extensions = [extra_extensions]
extensions = sphinx_config.get("extensions", [])
for extra in extra_extensions:
extensions.append(extra)
sphinx_config["extensions"] = extensions
# Files that we wish to skip
sphinx_config["exclude_patterns"].extend(yaml.get("exclude_patterns", []))
# Now do simple top-level translations
YAML_TRANSLATIONS = {
"logo": "html_logo",
"title": "html_title",
"execute_notebooks": "jupyter_execute_notebooks",
"project": "project",
"author": "author",
"copyright": "copyright",
}
for key, newkey in YAML_TRANSLATIONS.items():
if key in yaml:
val = yaml.get(key)
if val is None:
val = ""
sphinx_config[newkey] = val
return sphinx_config
def _recursive_update(config, update):
"""Update the dict `config` with `update` recursively.
This *updates* nested dicts / lists instead of replacing them.
"""
for key, val in update.items():
if isinstance(config.get(key), dict):
config[key].update(val)
elif isinstance(config.get(key), list):
if isinstance(val, list):
config[key].extend(val)
else:
config[key] = val
else:
config[key] = val
| 37.083969 | 87 | 0.628036 |
9efb34b3c08bdbb3ec7a611587c6c1763f510bd0 | 5,759 | py | Python | ScriptedAgent.py | RaphaelRoyerRivard/Supervised-End-to-end-Weight-sharing-for-StarCraft-II | 17171fc95c8385920ab7cab80bd4681ce1bff799 | [
"Apache-2.0"
] | null | null | null | ScriptedAgent.py | RaphaelRoyerRivard/Supervised-End-to-end-Weight-sharing-for-StarCraft-II | 17171fc95c8385920ab7cab80bd4681ce1bff799 | [
"Apache-2.0"
] | null | null | null | ScriptedAgent.py | RaphaelRoyerRivard/Supervised-End-to-end-Weight-sharing-for-StarCraft-II | 17171fc95c8385920ab7cab80bd4681ce1bff799 | [
"Apache-2.0"
] | null | null | null | __author__ = 'Tony Beltramelli - www.tonybeltramelli.com'
# scripted agents taken from PySC2, credits to DeepMind
# https://github.com/deepmind/pysc2/blob/master/pysc2/agents/scripted_agent.py
import numpy as np
import uuid
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
_SCREEN_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_SCREEN_SELECTED = features.SCREEN_FEATURES.selected.index
_PLAYER_FRIENDLY = 1
_PLAYER_NEUTRAL = 3
_PLAYER_HOSTILE = 4
_NO_OP = actions.FUNCTIONS.no_op.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_NOT_QUEUED = [0]
_SELECT_ALL = [0]
| 39.445205 | 133 | 0.576489 |
9efb77347037fbe157767ce33cce2fb416895aa6 | 5,602 | py | Python | benchmark/test_tpch.py | serverless-analytics/dask-distributed-vanilla | b4b135ee956dbf9e64d10712558a88eafa080675 | [
"BSD-3-Clause"
] | null | null | null | benchmark/test_tpch.py | serverless-analytics/dask-distributed-vanilla | b4b135ee956dbf9e64d10712558a88eafa080675 | [
"BSD-3-Clause"
] | null | null | null | benchmark/test_tpch.py | serverless-analytics/dask-distributed-vanilla | b4b135ee956dbf9e64d10712558a88eafa080675 | [
"BSD-3-Clause"
] | null | null | null | import time
import sys
import dask
from dask.distributed import (
wait,
futures_of,
Client,
)
from tpch import loaddata, queries
#from benchmarks import utils
# Paths or URLs to the TPC-H tables.
#table_paths = {
# 'CUSTOMER': 'hdfs://bu-23-115:9000/tpch/customer.tbl',
# 'LINEITEM': 'hdfs://bu-23-115:9000/tpch/lineitem.tbl',
# 'NATION': 'hdfs://bu-23-115:9000/tpch/nation.tbl',
# 'ORDERS': 'hdfs://bu-23-115:9000/tpch/orders.tbl',
# 'PART': 'hdfs://bu-23-115:9000/tpch/part.tbl',
# 'PARTSUPP': 'hdfs://bu-23-115:9000/tpch/partsupp.tbl',
# 'REGION': 'hdfs://bu-23-115:9000/tpch/region.tbl',
# 'SUPPLIER': 'hdfs://bu-23-115:9000/tpch/supplier.tbl',
#}
table_paths = {
'CUSTOMER': '/root/2g/customer.tbl',
'LINEITEM': '/root/2g/lineitem.tbl',
'NATION': '/root/2g/nation.tbl',
'ORDERS': '/root/2g/orders.tbl',
'PART': '/root/2g/part.tbl',
'PARTSUPP': '/root/2g/partsupp.tbl',
'REGION': '/root/2g/region.tbl',
'SUPPLIER': '/root/2g/supplier.tbl',
}
#table_paths = {
# 'CUSTOMER': 'https://gochaudhstorage001.blob.core.windows.net/tpch/customer.tbl',
# 'LINEITEM': 'https://gochaudhstorage001.blob.core.windows.net/tpch/lineitem.tbl',
# 'NATION': 'https://gochaudhstorage001.blob.core.windows.net/tpch/nation.tbl',
# 'ORDERS': 'https://gochaudhstorage001.blob.core.windows.net/tpch/orders.tbl',
# 'PART': 'https://gochaudhstorage001.blob.core.windows.net/tpch/part.tbl',
# 'PARTSUPP': 'https://gochaudhstorage001.blob.core.windows.net/tpch/partsupp.tbl',
# 'REGION': 'https://gochaudhstorage001.blob.core.windows.net/tpch/region.tbl',
# 'SUPPLIER': 'https://gochaudhstorage001.blob.core.windows.net/tpch/supplier.tbl',
#}
if __name__ == '__main__':
main()
| 35.0125 | 87 | 0.593181 |
9efc2be79705e76de2137bab964886217cb24983 | 3,582 | py | Python | pika/adapters/tornado_connection.py | hugovk/pika | 03542ef616a2a849e8bfb0845427f50e741ea0c6 | [
"BSD-3-Clause"
] | 1 | 2019-08-28T10:10:56.000Z | 2019-08-28T10:10:56.000Z | pika/adapters/tornado_connection.py | goupper/pika | e2f26db4f41ac7ea6bdc50964a766472460dce4a | [
"BSD-3-Clause"
] | null | null | null | pika/adapters/tornado_connection.py | goupper/pika | e2f26db4f41ac7ea6bdc50964a766472460dce4a | [
"BSD-3-Clause"
] | null | null | null | """Use pika with the Tornado IOLoop
"""
import logging
from tornado import ioloop
from pika.adapters.utils import nbio_interface, selector_ioloop_adapter
from pika.adapters import base_connection
LOGGER = logging.getLogger(__name__)
| 38.934783 | 80 | 0.634283 |
9efe36b7df749158058e0d954855a509a9ce6a8b | 7,057 | py | Python | tests/library/test_ceph_volume_simple_activate.py | u-kosmonaft-u/ceph-ansible | 14c472707c165f77def05826b22885480af3e8f9 | [
"Apache-2.0"
] | 1,570 | 2015-01-03T08:38:22.000Z | 2022-03-31T09:24:37.000Z | tests/library/test_ceph_volume_simple_activate.py | u-kosmonaft-u/ceph-ansible | 14c472707c165f77def05826b22885480af3e8f9 | [
"Apache-2.0"
] | 4,964 | 2015-01-05T10:41:44.000Z | 2022-03-31T07:59:49.000Z | tests/library/test_ceph_volume_simple_activate.py | u-kosmonaft-u/ceph-ansible | 14c472707c165f77def05826b22885480af3e8f9 | [
"Apache-2.0"
] | 1,231 | 2015-01-04T11:48:16.000Z | 2022-03-31T12:15:28.000Z | from mock.mock import patch
import os
import pytest
import ca_test_common
import ceph_volume_simple_activate
fake_cluster = 'ceph'
fake_container_binary = 'podman'
fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
fake_id = '42'
fake_uuid = '0c4a7eca-0c2a-4c12-beff-08a80f064c52'
fake_path = '/etc/ceph/osd/{}-{}.json'.format(fake_id, fake_uuid)
| 40.325714 | 132 | 0.621794 |
9effc7a4839375e16dbdf0896beb3c70b1e21234 | 154 | py | Python | setup.py | Minterious/minter-monitoring | 1a2216be57dec491a970950c3b9cfc72cea228c2 | [
"MIT"
] | 2 | 2019-08-24T12:15:20.000Z | 2019-08-24T12:19:07.000Z | setup.py | Minterious/minter-monitoring | 1a2216be57dec491a970950c3b9cfc72cea228c2 | [
"MIT"
] | null | null | null | setup.py | Minterious/minter-monitoring | 1a2216be57dec491a970950c3b9cfc72cea228c2 | [
"MIT"
] | 1 | 2019-09-19T21:16:25.000Z | 2019-09-19T21:16:25.000Z | import setuptools
setuptools.setup(
name='mintermonitoring',
version='1.0.0',
packages=setuptools.find_packages(include=['mintermonitoring'])
)
| 19.25 | 66 | 0.746753 |
7300890aeb852238c2f50f2aafaca22c70ba3108 | 158 | py | Python | Python/Back_solve_python/back_joon/StringArray/P10808.py | skyriv213/Studyriv | 6dfd3c52a873cd3bdb018280d81aec8bdcf61e6e | [
"MIT"
] | null | null | null | Python/Back_solve_python/back_joon/StringArray/P10808.py | skyriv213/Studyriv | 6dfd3c52a873cd3bdb018280d81aec8bdcf61e6e | [
"MIT"
] | null | null | null | Python/Back_solve_python/back_joon/StringArray/P10808.py | skyriv213/Studyriv | 6dfd3c52a873cd3bdb018280d81aec8bdcf61e6e | [
"MIT"
] | null | null | null | s = input()
num = [0] * 26
for i in range(len(s)):
num[ord(s[i])-97] += 1
for i in num:
print(i, end = " ")
if i == len(num)-1:
print(i)
| 15.8 | 26 | 0.455696 |
73009bb6994a5ff455eca19ffc1b698f9cf1d1d2 | 600 | py | Python | src/reliefcpp/utils.py | ferrocactus/reliefcpp | 41705a9e5c749e700f83f9fe9f352457ae57426d | [
"MIT"
] | null | null | null | src/reliefcpp/utils.py | ferrocactus/reliefcpp | 41705a9e5c749e700f83f9fe9f352457ae57426d | [
"MIT"
] | null | null | null | src/reliefcpp/utils.py | ferrocactus/reliefcpp | 41705a9e5c749e700f83f9fe9f352457ae57426d | [
"MIT"
] | null | null | null | from enum import Enum
from numpy import isin
metric_names = [
"euclidean",
"manhattan",
"hamming",
"l2",
"l1"
]
| 18.181818 | 54 | 0.638333 |
7300c97c38a22ec9df0ea9ea6a865bb5bd5120e7 | 1,993 | py | Python | utilityFiles/createValidationDatasetFromXYTrainWithCandidates.py | jmfinelli/JavaNeuralDecompiler | fb914fcf4518815a4d00061b562617fc25e2f2b4 | [
"Apache-2.0"
] | 1 | 2021-06-30T12:50:28.000Z | 2021-06-30T12:50:28.000Z | utilityFiles/createValidationDatasetFromXYTrainWithCandidates.py | jmfinelli/JavaNeuralDecompiler | fb914fcf4518815a4d00061b562617fc25e2f2b4 | [
"Apache-2.0"
] | null | null | null | utilityFiles/createValidationDatasetFromXYTrainWithCandidates.py | jmfinelli/JavaNeuralDecompiler | fb914fcf4518815a4d00061b562617fc25e2f2b4 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import os.path
length_switch = True
max_body_length = 50
process_candidates = os.path.exists('./datasets/candidates.output')
x_train = open('./datasets/x_train').readlines()
x_train = [x.rstrip('\n') for x in x_train]
y_train = open('./datasets/y_train').readlines()
y_train = [x.rstrip('\n') for x in y_train]
x_valid = open('./datasets/x_valid').readlines()
x_valid = [x.rstrip('\n') for x in x_valid]
y_valid = open('./datasets/y_valid').readlines()
y_valid = [x.rstrip('\n') for x in y_valid]
bytecodes = open('./datasets/bytecode.output').readlines()
bytecodes = [x.rstrip('\n') for x in bytecodes]
references = open('./datasets/references.output').readlines()
references = [x.rstrip('\n') for x in references]
if (process_candidates):
candidates = open('./datasets/candidates.output').readlines()
candidates = [x.rstrip('\n') for x in candidates]
df_pairs = pd.DataFrame({'source': bytecodes, 'target' : references, 'candidates': candidates })
else:
df_pairs = pd.DataFrame({'source': bytecodes, 'target': references })
if (length_switch):
mask = df_pairs['source'].apply(lambda x: len(x.split()) <= max_body_length)
df_pairs = df_pairs.loc[mask]
df_train = pd.DataFrame({'source': x_train + x_valid, 'target' : y_train + y_valid })
df_valid = df_pairs.merge(df_train, on='source', indicator=True, how='left')\
.query('_merge=="left_only"')\
.drop('_merge', axis=1)\
.drop('target_y', axis=1)
# df_valid = df_valid.sample(frac=1).reset_index(drop=True).sample(50000)
with open('./datasets/remaining_sources', 'w') as filehandle:
filehandle.writelines("%s\n" % place for place in df_valid['source'])
with open('./datasets/remaining_references', 'w') as filehandle:
filehandle.writelines("%s\n" % place for place in df_valid['target_x'])
if (process_candidates):
with open('./datasets/remaining_candidates', 'w') as filehandle:
filehandle.writelines("%s\n" % place for place in df_valid['candidates']) | 39.078431 | 100 | 0.697441 |
7303a20740842e72c83f9691beba5498f652855d | 105 | py | Python | py/Utility.GetData.py | mathematicalmichael/SpringNodes | 3ff4034b6e57ee6efa55c963e1819f3d30a2c4ab | [
"MIT"
] | 51 | 2015-09-25T09:30:57.000Z | 2022-01-19T14:16:44.000Z | py/Utility.GetData.py | sabeelcoder/SpringNodes | e21a24965474d54369e74d23c06f8c42a7b926b5 | [
"MIT"
] | 66 | 2015-09-30T02:43:32.000Z | 2022-03-31T02:26:52.000Z | py/Utility.GetData.py | sabeelcoder/SpringNodes | e21a24965474d54369e74d23c06f8c42a7b926b5 | [
"MIT"
] | 48 | 2015-11-19T01:34:47.000Z | 2022-02-25T17:26:48.000Z | import System
dataKey, _ = IN
OUT = System.AppDomain.CurrentDomain.GetData("_Dyn_Wireless_%s" % dataKey) | 26.25 | 74 | 0.780952 |
7303be01ae89f9c41f09c1617f6cea31c52d0cf4 | 347 | py | Python | codes_/1189_Maximum_Number_of_Balloons.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
] | null | null | null | codes_/1189_Maximum_Number_of_Balloons.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
] | null | null | null | codes_/1189_Maximum_Number_of_Balloons.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
] | null | null | null | # %% [1189. *Maximum Number of Balloons](https://leetcode.com/problems/maximum-number-of-balloons/)
# text'ballon'
# collections.Counter
| 43.375 | 99 | 0.700288 |
7303f0aa47265452a8086f8bcf4551e8db1e3810 | 7,746 | py | Python | src/Quiet.X.Tests/i2c_test.py | callwyat/Quiet-Firmware | 864c210e44d368a4a683704841067717ebc8ac43 | [
"MIT"
] | null | null | null | src/Quiet.X.Tests/i2c_test.py | callwyat/Quiet-Firmware | 864c210e44d368a4a683704841067717ebc8ac43 | [
"MIT"
] | null | null | null | src/Quiet.X.Tests/i2c_test.py | callwyat/Quiet-Firmware | 864c210e44d368a4a683704841067717ebc8ac43 | [
"MIT"
] | null | null | null | from quiet_coms import find_quiet_ports
from quiet import Quiet
import time
if 'EXIT_ON_FAIL' not in locals():
VERBOSE = True
EXIT_ON_FAIL = True
if __name__ == "__main__":
q2c = QuietI2C(None, log_path='usb_log.txt')
i2c_test(q2c)
i2c_test_errors(q2c)
i2c_test(q2c)
print('All I2C Tests Passed')
| 29.340909 | 113 | 0.631423 |
7304d96eed7cd6d1a985ffc90a2d6a94ba9983b7 | 716 | py | Python | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/Overflow/_Data-Structures/binary-tree/binary-tree-tilt.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 5 | 2021-06-02T23:44:25.000Z | 2021-12-27T16:21:57.000Z | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/Overflow/_Data-Structures/binary-tree/binary-tree-tilt.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 22 | 2021-05-31T01:33:25.000Z | 2021-10-18T18:32:39.000Z | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/Overflow/_Data-Structures/binary-tree/binary-tree-tilt.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 3 | 2021-06-19T03:37:47.000Z | 2021-08-31T00:49:51.000Z | # Source : https://leetcode.com/problems/binary-tree-tilt/description/
# Date : 2017-12-26
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
| 21.058824 | 70 | 0.540503 |
7305e3962fe9733cd02f16a567ab4d4b8d8a9743 | 7,581 | py | Python | kerastuner/engine/tuner_utils.py | krantirk/keras-tuner | fbc34866bf4e7ff1d60bf8c341a9325b9d5429b3 | [
"Apache-2.0"
] | 1 | 2019-07-12T17:17:06.000Z | 2019-07-12T17:17:06.000Z | kerastuner/engine/tuner_utils.py | nishantsbi/keras-tuner | fbc34866bf4e7ff1d60bf8c341a9325b9d5429b3 | [
"Apache-2.0"
] | null | null | null | kerastuner/engine/tuner_utils.py | nishantsbi/keras-tuner | fbc34866bf4e7ff1d60bf8c341a9325b9d5429b3 | [
"Apache-2.0"
] | 1 | 2020-01-02T04:07:22.000Z | 2020-01-02T04:07:22.000Z | # Copyright 2019 The Keras Tuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Tuner class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
from collections import defaultdict
import numpy as np
import time
import random
import hashlib
import tensorflow as tf
from tensorflow import keras
from ..abstractions import display
def generate_trial_id():
s = str(time.time()) + str(random.randint(1, 1e7))
return hashlib.sha256(s.encode('utf-8')).hexdigest()[:32]
def format_execution_id(i, executions_per_trial):
execution_id_length = math.ceil(
math.log(executions_per_trial, 10))
execution_id_template = '%0' + str(execution_id_length) + 'd'
execution_id = execution_id_template % i
return execution_id
| 33.544248 | 78 | 0.626962 |
7306a719a754d7eb090a7a28857cf9ab3cc30caf | 1,880 | py | Python | plotter.py | ZiegHailo/SMUVI | c324c881c511f1c44e481f93e6bd6fe7f85d4ded | [
"MIT"
] | null | null | null | plotter.py | ZiegHailo/SMUVI | c324c881c511f1c44e481f93e6bd6fe7f85d4ded | [
"MIT"
] | null | null | null | plotter.py | ZiegHailo/SMUVI | c324c881c511f1c44e481f93e6bd6fe7f85d4ded | [
"MIT"
] | null | null | null | __author__ = 'zieghailo'
import matplotlib.pyplot as plt
# plt.ion()
if __name__ == "__main__":
start_gui() | 22.650602 | 79 | 0.579787 |
7306a81bcc0bef579d78b882fb2bc110b0f6bf5f | 1,506 | py | Python | fannypack/utils/_deprecation.py | brentyi/hfdsajk | 2888aa5d969824ac1e1a528264674ece3f4703f9 | [
"MIT"
] | 5 | 2020-03-13T21:34:31.000Z | 2020-10-27T15:18:17.000Z | fannypack/utils/_deprecation.py | brentyi/hfdsajk | 2888aa5d969824ac1e1a528264674ece3f4703f9 | [
"MIT"
] | 2 | 2020-06-17T11:06:56.000Z | 2020-10-25T03:06:18.000Z | fannypack/utils/_deprecation.py | brentyi/hfdsajk | 2888aa5d969824ac1e1a528264674ece3f4703f9 | [
"MIT"
] | 4 | 2020-03-15T01:55:18.000Z | 2022-01-21T22:06:48.000Z | import warnings
from typing import Callable, Optional, TypeVar, cast
CallableType = TypeVar("CallableType", bound=Callable)
def deprecation_wrapper(message: str, function_or_class: CallableType) -> CallableType:
"""Creates a wrapper for a deprecated function or class. Prints a warning
the first time a function or class is called.
Args:
message (str): Warning message.
function_or_class (CallableType): Function or class to wrap.
Returns:
CallableType: Wrapped function/class.
"""
warned = False
return cast(CallableType, curried)
def new_name_wrapper(
old_name: str, new_name: str, function_or_class: CallableType
) -> CallableType:
"""Creates a wrapper for a renamed function or class. Prints a warning the first
time a function or class is called with the old name.
Args:
old_name (str): Old name of function or class. Printed in warning.
new_name (str): New name of function or class. Printed in warning.
function_or_class (CallableType): Function or class to wrap.
Returns:
CallableType: Wrapped function/class.
"""
return deprecation_wrapper(
f"{old_name} is deprecated! Use {new_name} instead.", function_or_class
)
| 31.375 | 87 | 0.688579 |
7307b7da6fb6d2b5a5aa27d12b5f25e31c28bd7c | 319 | py | Python | write/5_json_writer.py | pavlovprojects/python_qa_test_data | 4066f73c83cdd4ace9d6150726a578c0326daf94 | [
"MIT"
] | null | null | null | write/5_json_writer.py | pavlovprojects/python_qa_test_data | 4066f73c83cdd4ace9d6150726a578c0326daf94 | [
"MIT"
] | null | null | null | write/5_json_writer.py | pavlovprojects/python_qa_test_data | 4066f73c83cdd4ace9d6150726a578c0326daf94 | [
"MIT"
] | null | null | null | import json
data = {
"users": [
{"Name": "Dominator", "skill": 100, "gold": 99999, "weapons": ['Sword', 'Atomic Laser']},
{"Name": "Looser", "skill": 1, "gold": -100000, "weapons": [None, None, None]},
]
}
with open("example.json", "w") as f:
s = json.dumps(data, indent=4)
f.write(s)
| 24.538462 | 97 | 0.526646 |
730824ac4dba3e614be06b76613a0a6b290846f5 | 46 | py | Python | src/utils.py | sequoia-tree/cs370 | 47bf7f56d20bd81abbdbd0502477afcd5f62bbbe | [
"CC-BY-4.0"
] | 1 | 2019-01-14T08:31:45.000Z | 2019-01-14T08:31:45.000Z | src/utils.py | sequoia-tree/teaching-cs | 47bf7f56d20bd81abbdbd0502477afcd5f62bbbe | [
"CC-BY-4.0"
] | null | null | null | src/utils.py | sequoia-tree/teaching-cs | 47bf7f56d20bd81abbdbd0502477afcd5f62bbbe | [
"CC-BY-4.0"
] | null | null | null | from md_utils import *
from py_utils import *
| 15.333333 | 22 | 0.782609 |
73085370dd0ae578546e4f06c27e87ad769b743a | 387 | py | Python | practice/ai/machine-learning/digital-camera-day-or-night/digital-camera-day-or-night.py | zeyuanxy/HackerRank | 5194a4af780ece396501c215996685d1be529e73 | [
"MIT"
] | 4 | 2017-01-18T17:51:58.000Z | 2019-10-20T12:14:37.000Z | practice/ai/machine-learning/digital-camera-day-or-night/digital-camera-day-or-night.py | zeyuanxy/HackerRank | 5194a4af780ece396501c215996685d1be529e73 | [
"MIT"
] | null | null | null | practice/ai/machine-learning/digital-camera-day-or-night/digital-camera-day-or-night.py | zeyuanxy/HackerRank | 5194a4af780ece396501c215996685d1be529e73 | [
"MIT"
] | 8 | 2016-03-14T17:16:59.000Z | 2021-06-26T10:11:33.000Z | if __name__ == "__main__":
data = raw_input().strip(',\n').split(' ')
count = 0
total = 0
for pxl in data:
pxl = pxl.split(',')
mean = 0
for i in pxl:
mean += int(i)
mean /= 3
if mean < 70:
count += 1
total += 1
if float(count) / total > 0.4:
print 'night'
else:
print 'day'
| 21.5 | 46 | 0.426357 |
73087bd098e88fc78614d997333c9cb2a9e486e2 | 1,231 | py | Python | Mini Projects/RockPaperScissors/RPS.py | Snowystar122/Python-Projects | faf05ec388030b8b40ad7a8ca5c2760fb62cf5a3 | [
"MIT"
] | null | null | null | Mini Projects/RockPaperScissors/RPS.py | Snowystar122/Python-Projects | faf05ec388030b8b40ad7a8ca5c2760fb62cf5a3 | [
"MIT"
] | null | null | null | Mini Projects/RockPaperScissors/RPS.py | Snowystar122/Python-Projects | faf05ec388030b8b40ad7a8ca5c2760fb62cf5a3 | [
"MIT"
] | null | null | null | import random as r
# Sets up required variables
running = True
user_wins = 0
comp_wins = 0
answers = ["R", "P", "S"]
win_combos = ["PR", "RS", "SP"]
# Welcome message
print("Welcome to Rock-Paper-Scissors. Please input one of the following:"
"\n'R' - rock\n'P' - paper\n'S' - scissors\nto get started.")
while running:
# Running game of rock, paper, scissors
if user_wins == 3 or comp_wins == 3:
print(f"Game is over. The score was {user_wins}-{comp_wins}. Thanks for playing.")
break
user_guess = input("Guess:").upper()
if user_guess.upper() not in answers:
print("You didn't enter a valid letter.")
break
comp_guess = answers[r.randint(0, 2)]
guess_join = user_guess + comp_guess
if guess_join[0] == guess_join[1]:
print(f"You both guessed {user_guess}!\nThe current score is {user_wins}-{comp_wins}.")
else:
# Checks to see if computer or user has won the round.
if any(guess_join == elem in win_combos for elem in win_combos):
user_wins += 1
print(f"You win! Score is {user_wins}-{comp_wins}.")
else:
comp_wins += 1
print(f"You lose! Score is {user_wins}-{comp_wins}.")
| 32.394737 | 95 | 0.622258 |
730b2987ac65ae096f7d5f37854abcd28bec2bf9 | 1,147 | py | Python | pybullet-gym/pybulletgym/agents/agents_baselines.py | SmaleZ/vcl_diayn | b2c47a681675b405d2011bc4a43c3914f3af4ecc | [
"MIT"
] | 2 | 2021-07-12T17:11:35.000Z | 2021-07-13T05:56:30.000Z | pybullet-gym/pybulletgym/agents/agents_baselines.py | SmaleZ/vcl_diayn | b2c47a681675b405d2011bc4a43c3914f3af4ecc | [
"MIT"
] | null | null | null | pybullet-gym/pybulletgym/agents/agents_baselines.py | SmaleZ/vcl_diayn | b2c47a681675b405d2011bc4a43c3914f3af4ecc | [
"MIT"
] | null | null | null | from baselines import deepq
| 20.854545 | 58 | 0.691369 |
730be722fa533a8220a435fcc4009bd19bbb500f | 1,426 | py | Python | exploit.py | hexcowboy/CVE-2020-8813 | 0229d52f8b5adb63cc6d5bc757850a01a7800b8d | [
"MIT"
] | null | null | null | exploit.py | hexcowboy/CVE-2020-8813 | 0229d52f8b5adb63cc6d5bc757850a01a7800b8d | [
"MIT"
] | null | null | null | exploit.py | hexcowboy/CVE-2020-8813 | 0229d52f8b5adb63cc6d5bc757850a01a7800b8d | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import requests
import click
from rich import inspect
from rich.console import Console
from url_normalize import url_normalize
from urllib.parse import quote
console = Console()
if __name__ == "__main__":
exploit()
| 31 | 109 | 0.680224 |
730d40eb64f626d437281807fa30ca37ecd18cc5 | 1,119 | py | Python | common/src/stack/command/stack/commands/set/firmware/model/imp/__init__.py | kmcm0/stacki | eb9dff1b45d5725b4986e567876bf61707fec28f | [
"BSD-3-Clause"
] | 123 | 2015-05-12T23:36:45.000Z | 2017-07-05T23:26:57.000Z | common/src/stack/command/stack/commands/set/firmware/model/imp/__init__.py | kmcm0/stacki | eb9dff1b45d5725b4986e567876bf61707fec28f | [
"BSD-3-Clause"
] | 177 | 2015-06-05T19:17:47.000Z | 2017-07-07T17:57:24.000Z | common/src/stack/command/stack/commands/set/firmware/model/imp/__init__.py | kmcm0/stacki | eb9dff1b45d5725b4986e567876bf61707fec28f | [
"BSD-3-Clause"
] | 32 | 2015-06-07T02:25:03.000Z | 2017-06-23T07:35:35.000Z | # @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
#
# @rocks@
# Copyright (c) 2000 - 2010 The Regents of the University of California
# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org
# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt
# @rocks@
import stack.commands
| 29.447368 | 111 | 0.739946 |
73106dc1db1187afa8a045a4fa929befaa9cbf34 | 5,939 | py | Python | torch/jit/_fuser.py | ljhOfGithub/pytorch | c568f7b16f2a98d72ff5b7c6c6161b67b2c27514 | [
"Intel"
] | 1 | 2022-03-29T00:44:31.000Z | 2022-03-29T00:44:31.000Z | torch/jit/_fuser.py | ljhOfGithub/pytorch | c568f7b16f2a98d72ff5b7c6c6161b67b2c27514 | [
"Intel"
] | null | null | null | torch/jit/_fuser.py | ljhOfGithub/pytorch | c568f7b16f2a98d72ff5b7c6c6161b67b2c27514 | [
"Intel"
] | 1 | 2022-03-28T21:49:41.000Z | 2022-03-28T21:49:41.000Z | import contextlib
import torch
from typing import List, Tuple
last_executed_optimized_graph = torch._C._last_executed_optimized_graph
def set_fusion_strategy(strategy: List[Tuple[str, int]]):
"""
Sets the type and number of specializations that can occur during fusion.
Usage: provide a list of pairs (type, depth) where type is one of "STATIC" or "DYNAMIC"
and depth is an integer.
Behavior - static vs dynamic:
In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined
based on some initial profiling runs.
In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple
shapes are possible.
In both cases, we also recompile on new striding behavior, device, or dtype.
Behavior - fallback functions & depth:
When an input doesn't match the format required by the specialized compiled op, it will run
a fallback function. Fallback functions are recursively be compiled and specialized based
on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to
limit the number of specializations that can be compiled, before giving up on recompiling and
falling back to a completely un-fused, un-specialized implementation.
The list of (type, depth) pairs controls the type of specializations and the number of
specializations. For example: [("STATIC", 2), ("DYNAMIC", 2)] indicates that the first
two specializations will use static fusions, the following two specializations will use
dynamic fusion, and any inputs that satisfy none of the 4 options will run an
unfused implementation.
NB: in the future, if more as more fusion backends are added there may be more granular
apis for specific fusers.
"""
return torch._C._jit_set_fusion_strategy(strategy)
| 42.120567 | 106 | 0.706348 |
73111dceec02df0e21147895187850aaff39304f | 4,420 | py | Python | modlit/db/postgres.py | patdaburu/modlit | 9c9c153b74f116357e856e4c204c9a83bb15398f | [
"MIT"
] | null | null | null | modlit/db/postgres.py | patdaburu/modlit | 9c9c153b74f116357e856e4c204c9a83bb15398f | [
"MIT"
] | null | null | null | modlit/db/postgres.py | patdaburu/modlit | 9c9c153b74f116357e856e4c204c9a83bb15398f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by pat on 5/8/18
"""
.. currentmodule:: modlit.db.postgres
.. moduleauthor:: Pat Daburu <pat@daburu.net>
This module contains utilities for working directly with PostgreSQL.
"""
import json
from pathlib import Path
from urllib.parse import urlparse, ParseResult
from addict import Dict
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
DEFAULT_ADMIN_DB = 'postgres' #: the default administrative database name
DEFAULT_PG_PORT = 5432 #: the default PostgreSQL listener port
# Load the Postgres phrasebook.
# pylint: disable=invalid-name
# pylint: disable=no-member
sql_phrasebook = Dict(
json.loads(
(
Path(__file__).resolve().parent / 'postgres.json'
).read_text()
)['sql']
)
def connect(url: str, dbname: str = None, autocommit: bool = False):
"""
Create a connection to a Postgres database.
:param url: the Postgres instance URL
:param dbname: the target database name (if it differs from the one
specified in the URL)
:param autocommit: Set the `autocommit` flag on the connection?
:return: a psycopg2 connection
"""
# Parse the URL. (We'll need the pieces to construct an ogr2ogr connection
# string.)
dbp: ParseResult = urlparse(url)
# Create a dictionary to hold the arguments for the connection. (We'll
# unpack it later.)
cnx_opt = {
k: v for k, v in
{
'host': dbp.hostname,
'port': int(dbp.port) if dbp.port is not None else DEFAULT_PG_PORT,
'database': dbname if dbname is not None else dbp.path[1:],
'user': dbp.username,
'password': dbp.password
}.items() if v is not None
}
cnx = psycopg2.connect(**cnx_opt)
# If the caller requested that the 'autocommit' flag be set...
if autocommit:
# ...do that now.
cnx.autocommit = True
return cnx
def db_exists(url: str,
dbname: str = None,
admindb: str = DEFAULT_ADMIN_DB) -> bool:
"""
Does a given database on a Postgres instance exist?
:param url: the Postgres instance URL
:param dbname: the name of the database to test
:param admindb: the name of an existing (presumably the main) database
:return: `True` if the database exists, otherwise `False`
"""
# Let's see what we got for the database name.
_dbname = dbname
# If the caller didn't specify a database name...
if not _dbname:
# ...let's figure it out from the URL.
db: ParseResult = urlparse(url)
_dbname = db.path[1:]
# Now, let's do this!
with connect(url=url, dbname=admindb) as cnx:
with cnx.cursor() as crs:
# Execute the SQL query that counts the databases with a specified
# name.
crs.execute(
sql_phrasebook.select_db_count.format(_dbname)
)
# If the count isn't zero (0) the database exists.
return crs.fetchone()[0] != 0
def create_db(
url: str,
dbname: str,
admindb: str = DEFAULT_ADMIN_DB):
"""
Create a database on a Postgres instance.
:param url: the Postgres instance URL
:param dbname: the name of the database
:param admindb: the name of an existing (presumably the main) database
"""
with connect(url=url, dbname=admindb) as cnx:
cnx.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
with cnx.cursor() as crs:
crs.execute(sql_phrasebook.create_db.format(dbname))
def touch_db(
url: str,
dbname: str = None,
admindb: str = DEFAULT_ADMIN_DB):
"""
Create a database if it does not already exist.
:param url: the Postgres instance URL
:param dbname: the name of the database
:param admindb: the name of an existing (presumably the main) database
"""
# If the database already exists, we don't need to do anything further.
if db_exists(url=url, dbname=dbname, admindb=admindb):
return
# Let's see what we got for the database name.
_dbname = dbname
# If the caller didn't specify a database name...
if not _dbname:
# ...let's figure it out from the URL.
db: ParseResult = urlparse(url)
_dbname = db.path[1:]
# Now we can create it.
create_db(url=url, dbname=_dbname, admindb=admindb)
| 32.262774 | 79 | 0.640045 |
7311fe6464a3f41ba16f8290bf926cae00157858 | 3,179 | py | Python | estradaspt_legacy/__init__.py | dpjrodrigues/home-assistant-custom-components | 105feec36ea065e62e839b5137a9ee2e2dcf3513 | [
"MIT"
] | null | null | null | estradaspt_legacy/__init__.py | dpjrodrigues/home-assistant-custom-components | 105feec36ea065e62e839b5137a9ee2e2dcf3513 | [
"MIT"
] | null | null | null | estradaspt_legacy/__init__.py | dpjrodrigues/home-assistant-custom-components | 105feec36ea065e62e839b5137a9ee2e2dcf3513 | [
"MIT"
] | 5 | 2018-12-29T16:39:25.000Z | 2019-12-21T22:29:22.000Z | import logging
import async_timeout
import urllib.request
import time
import re
from datetime import datetime, timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.util import Throttle
from homeassistant.helpers.aiohttp_client import async_get_clientsession
REQUIREMENTS = ['pyEstradasPT==1.0.2']
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Powered by estradas.pt"
CONF_CAMERA = 'camera'
SCAN_INTERVAL = timedelta(minutes=5)
DOMAIN = 'estradaspt'
PLATFORM_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_CAMERA): vol.All(cv.ensure_list, [cv.string])
})
}, extra=vol.ALLOW_EXTRA)
class CameraVideo(Entity):
"""Sensor that reads and stores the camera video."""
ICON = 'mdi:webcam'
def __init__(self, name, file_name, url):
"""Initialize the component."""
self._name = name
self._file_name = file_name
self._url = url
self._last_update = datetime.now()
| 27.17094 | 75 | 0.674111 |
7311ffda56e787743243c236f69f050e734a7937 | 22,262 | py | Python | parser.py | boshijingang/PyLuaCompiler | 37cdf73286d020b2d119635d6d2609a5d9debfed | [
"MIT"
] | null | null | null | parser.py | boshijingang/PyLuaCompiler | 37cdf73286d020b2d119635d6d2609a5d9debfed | [
"MIT"
] | null | null | null | parser.py | boshijingang/PyLuaCompiler | 37cdf73286d020b2d119635d6d2609a5d9debfed | [
"MIT"
] | null | null | null | import lexer
import ast
| 42.894027 | 128 | 0.620519 |
73127b6e66f9e5e908a0672dbaeb988571d8cf2c | 14,720 | py | Python | python/terra_proto/terra/treasury/v1beta1/__init__.py | Vritra4/terra.proto | 977264b7c3e0f9d135120d77b48657b82f5eacf6 | [
"Apache-2.0"
] | null | null | null | python/terra_proto/terra/treasury/v1beta1/__init__.py | Vritra4/terra.proto | 977264b7c3e0f9d135120d77b48657b82f5eacf6 | [
"Apache-2.0"
] | null | null | null | python/terra_proto/terra/treasury/v1beta1/__init__.py | Vritra4/terra.proto | 977264b7c3e0f9d135120d77b48657b82f5eacf6 | [
"Apache-2.0"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: terra/treasury/v1beta1/genesis.proto, terra/treasury/v1beta1/query.proto, terra/treasury/v1beta1/treasury.proto
# plugin: python-betterproto
from dataclasses import dataclass
from typing import Dict, List
import betterproto
from betterproto.grpc.grpclib_server import ServiceBase
import grpclib
class QueryStub(betterproto.ServiceStub):
class QueryBase(ServiceBase):
from ....cosmos.base import v1beta1 as ___cosmos_base_v1_beta1__
| 31.120507 | 122 | 0.691508 |
7316876aa79ec9dd6b9b2ee309c9f7ea22776613 | 5,066 | py | Python | usbservo/usbservogui.py | ppfenninger/screwball | c4a7273fa47dac6bdf6fcf8ca29c85a77f9e5bd6 | [
"MIT"
] | null | null | null | usbservo/usbservogui.py | ppfenninger/screwball | c4a7273fa47dac6bdf6fcf8ca29c85a77f9e5bd6 | [
"MIT"
] | null | null | null | usbservo/usbservogui.py | ppfenninger/screwball | c4a7273fa47dac6bdf6fcf8ca29c85a77f9e5bd6 | [
"MIT"
] | null | null | null | #
## Copyright (c) 2018, Bradley A. Minch
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## 1. Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## 2. Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
## ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
## LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
## INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
## CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
## POSSIBILITY OF SUCH DAMAGE.
#
import Tkinter as tk
import usbservo
if __name__=='__main__':
gui = usbservogui()
gui.root.mainloop()
| 49.666667 | 153 | 0.647059 |
7317deb1560647aa925ec2a580d6d0908f2796af | 155 | py | Python | GasBotty/models/utils.py | GreenCUBIC/GasBotty | 158f5991201c80bf4cbbbb9deabc9954ff19bbb1 | [
"MIT"
] | 353 | 2020-12-10T10:47:17.000Z | 2022-03-31T23:08:29.000Z | GasBotty/models/utils.py | GreenCUBIC/GasBotty | 158f5991201c80bf4cbbbb9deabc9954ff19bbb1 | [
"MIT"
] | 80 | 2020-12-10T09:54:22.000Z | 2022-03-30T22:08:45.000Z | GasBotty/models/utils.py | GreenCUBIC/GasBotty | 158f5991201c80bf4cbbbb9deabc9954ff19bbb1 | [
"MIT"
] | 63 | 2020-12-10T17:10:34.000Z | 2022-03-28T16:27:07.000Z | try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
| 31 | 75 | 0.806452 |
7318340689a601475670cd96bc3a15da21a3e8a4 | 2,438 | py | Python | pyzayo/svcinv_mixin.py | jeremyschulman/pyzayo | 37869daf6ef2df8e0898bae7c3ddbb0139840751 | [
"Apache-2.0"
] | 1 | 2021-06-02T10:00:35.000Z | 2021-06-02T10:00:35.000Z | pyzayo/svcinv_mixin.py | jeremyschulman/pyzayo | 37869daf6ef2df8e0898bae7c3ddbb0139840751 | [
"Apache-2.0"
] | null | null | null | pyzayo/svcinv_mixin.py | jeremyschulman/pyzayo | 37869daf6ef2df8e0898bae7c3ddbb0139840751 | [
"Apache-2.0"
] | null | null | null | """
This file contains the Zayo Service Inventory related API endpoints.
References
----------
Docs
http://54.149.224.75/wp-content/uploads/2020/02/Service-Inventory-Wiki.pdf
"""
# -----------------------------------------------------------------------------
# System Imports
# -----------------------------------------------------------------------------
from typing import List, Dict
# -----------------------------------------------------------------------------
# Public Imports
# -----------------------------------------------------------------------------
from first import first
# -----------------------------------------------------------------------------
# Private Imports
# -----------------------------------------------------------------------------
from pyzayo.base_client import ZayoClientBase
from pyzayo.consts import ZAYO_SM_ROUTE_SERVICES
# -----------------------------------------------------------------------------
# Module Exports
# -----------------------------------------------------------------------------
__all__ = ["ZayoServiceInventoryMixin"]
| 30.475 | 82 | 0.455291 |
7318d12083b715d2887f9b7cf5b2559fad4d08c0 | 6,236 | py | Python | pychron/core/helpers/logger_setup.py | aelamspychron/pychron | ad87c22b0817c739c7823a24585053041ee339d5 | [
"Apache-2.0"
] | 1 | 2019-02-27T21:57:44.000Z | 2019-02-27T21:57:44.000Z | pychron/core/helpers/logger_setup.py | aelamspychron/pychron | ad87c22b0817c739c7823a24585053041ee339d5 | [
"Apache-2.0"
] | 20 | 2020-09-09T20:58:39.000Z | 2021-10-05T17:48:37.000Z | pychron/core/helpers/logger_setup.py | AGESLDEO/pychron | 1a81e05d9fba43b797f335ceff6837c016633bcf | [
"Apache-2.0"
] | null | null | null | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
# =============standard library imports ========================
from __future__ import absolute_import
import logging
import os
import shutil
from logging.handlers import RotatingFileHandler
from pychron.core.helpers.filetools import list_directory, unique_path2
from pychron.paths import paths
NAME_WIDTH = 40
gFORMAT = '%(name)-{}s: %(asctime)s %(levelname)-9s (%(threadName)-10s) %(message)s'.format(NAME_WIDTH)
gLEVEL = logging.DEBUG
def tail(f, lines=20):
"""
http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail
"""
total_lines_wanted = lines
BLOCK_SIZE = 1024
f.seek(0, 2)
block_end_byte = f.tell()
lines_to_go = total_lines_wanted
block_number = -1
blocks = [] # blocks of size BLOCK_SIZE, in reverse order starting
# from the end of the file
while lines_to_go > 0 and block_end_byte > 0:
if block_end_byte - BLOCK_SIZE > 0:
# read the last block we haven't yet read
f.seek(block_number * BLOCK_SIZE, 2)
blocks.append(f.read(BLOCK_SIZE))
else:
# file too small, start from begining
f.seek(0, 0)
# only read what was not read
blocks.append(f.read(block_end_byte))
lines_found = blocks[-1].count(b'\n')
lines_to_go -= lines_found
block_end_byte -= BLOCK_SIZE
block_number -= 1
all_read_text = b''.join(reversed(blocks))
return b'\n'.join(all_read_text.splitlines()[-total_lines_wanted:]).decode('utf-8')
# def anomaly_setup(name):
# ld = logging.Logger.manager.loggerDict
# print 'anomaly setup ld={}'.format(ld)
# if name not in ld:
# bdir = paths.log_dir
# name = add_extension(name, '.anomaly')
# apath, _cnt = unique_path2(bdir, name, delimiter='-', extension='.log')
# logger = logging.getLogger('anomalizer')
# h = logging.FileHandler(apath)
# logger.addHandler(h)
def logging_setup(name, use_archiver=True, root=None, use_file=True, **kw):
"""
"""
# set up deprecation warnings
# import warnings
# warnings.simplefilter('default')
bdir = paths.log_dir if root is None else root
# make sure we have a log directory
# if not os.path.isdir(bdir):
# os.mkdir(bdir)
if use_archiver:
# archive logs older than 1 month
# lazy load Archive because of circular dependency
from pychron.core.helpers.archiver import Archiver
a = Archiver(archive_days=14,
archive_months=1,
root=bdir)
a.clean()
if use_file:
# create a new logging file
logname = '{}.current.log'.format(name)
logpath = os.path.join(bdir, logname)
if os.path.isfile(logpath):
backup_logpath, _cnt = unique_path2(bdir, name, delimiter='-', extension='.log', width=5)
shutil.copyfile(logpath, backup_logpath)
os.remove(logpath)
ps = list_directory(bdir, filtername=logname, remove_extension=False)
for pi in ps:
_h, t = os.path.splitext(pi)
v = os.path.join(bdir, pi)
shutil.copyfile(v, '{}{}'.format(backup_logpath, t))
os.remove(v)
root = logging.getLogger()
root.setLevel(gLEVEL)
shandler = logging.StreamHandler()
handlers = [shandler]
if use_file:
rhandler = RotatingFileHandler(
logpath, maxBytes=1e7, backupCount=50)
handlers.append(rhandler)
fmt = logging.Formatter(gFORMAT)
for hi in handlers:
hi.setLevel(gLEVEL)
hi.setFormatter(fmt)
root.addHandler(hi)
def wrap(items, width=40, indent=90, delimiter=','):
"""
wrap a list
"""
if isinstance(items, str):
items = items.split(delimiter)
gcols = iter(items)
t = 0
rs = []
r = []
while 1:
try:
c = next(gcols)
t += 1 + len(c)
if t < width:
r.append(c)
else:
rs.append(','.join(r))
r = [c]
t = len(c)
except StopIteration:
rs.append(','.join(r))
break
return ',\n{}'.format(' ' * indent).join(rs)
# ============================== EOF ===================================
| 29.837321 | 103 | 0.591725 |
7318f31264c2155178f9f5bd08d307cfd0e1de20 | 7,980 | py | Python | picmodels/models/care_advisors/case_management_models/sequence_models/services/create_update_delete.py | bbcawodu/careadvisors-backend | 5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838 | [
"MIT"
] | null | null | null | picmodels/models/care_advisors/case_management_models/sequence_models/services/create_update_delete.py | bbcawodu/careadvisors-backend | 5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838 | [
"MIT"
] | null | null | null | picmodels/models/care_advisors/case_management_models/sequence_models/services/create_update_delete.py | bbcawodu/careadvisors-backend | 5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838 | [
"MIT"
] | null | null | null | import picmodels
| 34.545455 | 166 | 0.61817 |