hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
677d56032178efeb016755dc92a217e0030b9013 | 926 | py | Python | utils/exceptions.py | acatiadroid/util-bot | 2a91aa4335c4a844f5335d70cb7c7c32dd8010be | [
"MIT"
] | 1 | 2021-06-02T18:59:34.000Z | 2021-06-02T18:59:34.000Z | utils/exceptions.py | acatiadroid/util-bot | 2a91aa4335c4a844f5335d70cb7c7c32dd8010be | [
"MIT"
] | null | null | null | utils/exceptions.py | acatiadroid/util-bot | 2a91aa4335c4a844f5335d70cb7c7c32dd8010be | [
"MIT"
] | 1 | 2021-05-22T19:53:43.000Z | 2021-05-22T19:53:43.000Z | from pymongo.errors import PyMongoError
def human_join(seq, delim=', ', final='or'):
size = len(seq)
if size == 0:
return ''
if size == 1:
return seq[0]
if size == 2:
return f'{seq[0]} {final} {seq[1]}'
return delim.join(seq[:-1]) + f' {final} {seq[-1]}'
| 22.047619 | 67 | 0.552916 |
677f07bacda33862018d0c3f5ae887b33c4fb2d4 | 45,205 | py | Python | envs/flatland/utils/gym_env_wrappers.py | netceteragroup/Flatland-Challenge | 4292e8aa778d264d025ad6d32926840864b22a21 | [
"MIT"
] | 4 | 2021-01-15T10:49:33.000Z | 2021-12-31T08:11:35.000Z | envs/flatland/utils/gym_env_wrappers.py | netceteragroup/Flatland-Challenge | 4292e8aa778d264d025ad6d32926840864b22a21 | [
"MIT"
] | null | null | null | envs/flatland/utils/gym_env_wrappers.py | netceteragroup/Flatland-Challenge | 4292e8aa778d264d025ad6d32926840864b22a21 | [
"MIT"
] | null | null | null |
from typing import Dict, Any, Optional, List
import gym
import numpy as np
from collections import defaultdict
from flatland.core.grid.grid4_utils import get_new_position
from flatland.envs.agent_utils import EnvAgent, RailAgentStatus
from flatland.envs.rail_env import RailEnv, RailEnvActions
from envs.flatland.observations.segment_graph import Graph
from envs.flatland.utils.gym_env import StepOutput
def possible_actions_sorted_by_distance(env: RailEnv, handle: int):
agent = env.agents[handle]
if agent.status == RailAgentStatus.READY_TO_DEPART:
agent_virtual_position = agent.initial_position
elif agent.status == RailAgentStatus.ACTIVE:
agent_virtual_position = agent.position
elif agent.status == RailAgentStatus.DONE:
agent_virtual_position = agent.target
else:
return None
possible_transitions = env.rail.get_transitions(*agent_virtual_position, agent.direction)
distance_map = env.distance_map.get()[handle]
possible_steps = []
for movement in list(range(4)):
if possible_transitions[movement]:
if movement == agent.direction:
action = RailEnvActions.MOVE_FORWARD
elif movement == (agent.direction + 1) % 4:
action = RailEnvActions.MOVE_RIGHT
elif movement == (agent.direction - 1) % 4:
action = RailEnvActions.MOVE_LEFT
else:
raise ValueError("Wtf, debug this shit.")
distance = distance_map[get_new_position(agent_virtual_position, movement) + (movement,)]
possible_steps.append((action, distance))
possible_steps = sorted(possible_steps, key=lambda step: step[1])
if len(possible_steps) == 1:
return possible_steps * 2
else:
return possible_steps
| 48.659849 | 224 | 0.596284 |
677f502efc17cc81872e696789bcab5852c8b1a5 | 1,226 | py | Python | acceptability/models/cbow_classifier.py | nyu-mll/CoLA-baselines | dd095d3646ed05a315280aaa8ed4ec84ba435b3e | [
"MIT"
] | 54 | 2018-05-31T22:57:28.000Z | 2022-03-17T13:25:49.000Z | acceptability/models/cbow_classifier.py | nyu-mll/CoLA-baselines | dd095d3646ed05a315280aaa8ed4ec84ba435b3e | [
"MIT"
] | 4 | 2018-06-06T14:15:10.000Z | 2020-08-07T16:35:50.000Z | acceptability/models/cbow_classifier.py | nyu-mll/CoLA-baselines | dd095d3646ed05a315280aaa8ed4ec84ba435b3e | [
"MIT"
] | 18 | 2018-07-10T12:18:17.000Z | 2022-03-02T22:19:22.000Z | import torch
from torch import nn
| 34.055556 | 71 | 0.577488 |
677f53508c3acb6aa3c5210a9a7139a828c94921 | 14,637 | py | Python | tests/test_validators.py | yaaminu/yaval | 32f04ecfa092c978fc026f6b7f58d6cf2defd8c9 | [
"MIT"
] | 14 | 2021-02-12T19:04:21.000Z | 2021-03-12T18:18:09.000Z | tests/test_validators.py | yaaminu/yaval | 32f04ecfa092c978fc026f6b7f58d6cf2defd8c9 | [
"MIT"
] | 5 | 2021-02-12T16:04:37.000Z | 2021-04-14T12:05:02.000Z | tests/test_validators.py | yaaminu/yaval | 32f04ecfa092c978fc026f6b7f58d6cf2defd8c9 | [
"MIT"
] | null | null | null | import datetime
from mock import Mock, call
import pytest
from finicky import ValidationException, is_int, is_float, is_str, is_date, is_dict, is_list
# noinspection PyShadowingBuiltins
# noinspection PyShadowingBuiltins
def test_must_return_none_when_input_is_none_and_required_is_false(self):
assert is_float(required=False)(None) is None
# noinspection PyShadowingBuiltins
# noinspection PyShadowingBuiltins
def test_must_clean_validated_input_before_returning(self):
validated_input = is_dict(schema={"phone": is_str(required=True)})({"phone": " +233-23-23283234"})
assert validated_input == {"phone": "+233-23-23283234"}
class TestListValidator:
"""
1. must reject none input whend field is required
2. must return default value when field isnot required and default is provided
4. must validate all entries against the validator.
5. must require all entries to pass validation by default
6. when all is set to false, must require that at least one entry pass valdiation
7. must return only validated entries
6. on error, must return all errors encountered
"""
| 52.841155 | 122 | 0.683678 |
677f77f661f042444b5b6e3515ca7ba65cf1bbd5 | 583 | py | Python | polygon.py | SYED-RAFI-NAQVI/10hourcodingchallenge | 20c7c3aee52a2eb281381a9db4d57075cbf38446 | [
"MIT"
] | null | null | null | polygon.py | SYED-RAFI-NAQVI/10hourcodingchallenge | 20c7c3aee52a2eb281381a9db4d57075cbf38446 | [
"MIT"
] | null | null | null | polygon.py | SYED-RAFI-NAQVI/10hourcodingchallenge | 20c7c3aee52a2eb281381a9db4d57075cbf38446 | [
"MIT"
] | null | null | null | import numpy as np
import cv2 as cv
img = cv.imread('1.jpeg',cv.IMREAD_COLOR)
#for polygon we need to have set of points so we create a numpy array. and pts is an object.
pts = np.array([[20,33],[300,120], [67,79], [123,111], [144,134]], np.int32)
#the method polylines will actully draws a polygon by taking different parametes, 1.where to draw (img),
#2.which set of points, 3.checks first and last point should be connected or not by (bool), 4.color, 5.widht of line.
cv.polylines(img, [pts], True,(0,231,123), 1)
cv.imshow('image',img)
cv.waitKey(0)
cv.destroyAllWindows() | 32.388889 | 117 | 0.711835 |
67805442e518a6adbf84390b3eb7ec7d3ff5cd9c | 3,871 | py | Python | lib/fathead/firefox_about_config/parse.py | aeisenberg/zeroclickinfo-fathead | 9be00a038d812ca9ccd0d601220afde777ab2f8e | [
"Apache-2.0"
] | 1 | 2021-01-05T16:48:23.000Z | 2021-01-05T16:48:23.000Z | lib/fathead/firefox_about_config/parse.py | aeisenberg/zeroclickinfo-fathead | 9be00a038d812ca9ccd0d601220afde777ab2f8e | [
"Apache-2.0"
] | null | null | null | lib/fathead/firefox_about_config/parse.py | aeisenberg/zeroclickinfo-fathead | 9be00a038d812ca9ccd0d601220afde777ab2f8e | [
"Apache-2.0"
] | 1 | 2016-06-12T06:12:02.000Z | 2016-06-12T06:12:02.000Z | #!/usr/bin/env python2
from BeautifulSoup import BeautifulSoup, NavigableString
import urllib
import string
import re
if __name__ == "__main__":
parser = Parser()
parser.findEntries()
with open('output.txt', 'w') as file:
for entry in parser.entries:
file.write(entry.__str__().encode('UTF-8') + '\n')
| 42.538462 | 92 | 0.422113 |
6781793ae8fc13e5299017f4d13600e84c029c5a | 547 | py | Python | sources/simulators/multiprocessing_simulator/start_client.py | M4rukku/impact_of_non_iid_data_in_federated_learning | c818db03699c82e42217d56f8ddd4cc2081c8bb1 | [
"MIT"
] | null | null | null | sources/simulators/multiprocessing_simulator/start_client.py | M4rukku/impact_of_non_iid_data_in_federated_learning | c818db03699c82e42217d56f8ddd4cc2081c8bb1 | [
"MIT"
] | null | null | null | sources/simulators/multiprocessing_simulator/start_client.py | M4rukku/impact_of_non_iid_data_in_federated_learning | c818db03699c82e42217d56f8ddd4cc2081c8bb1 | [
"MIT"
] | null | null | null | import flwr as fl
import flwr.client
from sources.utils.simulation_parameters import DEFAULT_SERVER_ADDRESS
from sources.simulators.base_client_provider import BaseClientProvider
| 39.071429 | 90 | 0.824497 |
6785745e950d85dea8868d37187f8f6ecdfbf12a | 23,056 | py | Python | aea/helpers/pipe.py | bryanchriswhite/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 126 | 2019-09-07T09:32:44.000Z | 2022-03-29T14:28:41.000Z | aea/helpers/pipe.py | salman6049/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 1,814 | 2019-08-24T10:08:07.000Z | 2022-03-31T14:28:36.000Z | aea/helpers/pipe.py | salman6049/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 46 | 2019-09-03T22:13:58.000Z | 2022-03-22T01:25:16.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Portable pipe implementation for Linux, MacOS, and Windows."""
import asyncio
import errno
import logging
import os
import socket
import struct
import tempfile
from abc import ABC, abstractmethod
from asyncio import AbstractEventLoop
from asyncio.streams import StreamWriter
from shutil import rmtree
from typing import IO, Optional
from aea.exceptions import enforce
_default_logger = logging.getLogger(__name__)
PIPE_CONN_TIMEOUT = 10.0
PIPE_CONN_ATTEMPTS = 10
TCP_SOCKET_PIPE_CLIENT_CONN_ATTEMPTS = 5
def make_ipc_channel(
logger: logging.Logger = _default_logger, loop: Optional[AbstractEventLoop] = None
) -> IPCChannel:
"""
Build a portable bidirectional InterProcess Communication channel
:param logger: the logger
:param loop: the loop
:return: IPCChannel
"""
if os.name == "posix":
return PosixNamedPipeChannel(logger=logger, loop=loop)
if os.name == "nt": # pragma: nocover
return TCPSocketChannel(logger=logger, loop=loop)
raise NotImplementedError( # pragma: nocover
"make ipc channel is not supported on platform {}".format(os.name)
)
def make_ipc_channel_client(
in_path: str,
out_path: str,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> IPCChannelClient:
"""
Build a portable bidirectional InterProcess Communication client channel
:param in_path: rendezvous point for incoming communication
:param out_path: rendezvous point for outgoing outgoing
:param logger: the logger
:param loop: the loop
:return: IPCChannel
"""
if os.name == "posix":
return PosixNamedPipeChannelClient(in_path, out_path, logger=logger, loop=loop)
if os.name == "nt": # pragma: nocover
return TCPSocketChannelClient(in_path, out_path, logger=logger, loop=loop)
raise NotImplementedError( # pragma: nocover
"make ip channel client is not supported on platform {}".format(os.name)
)
| 32.022222 | 88 | 0.600885 |
6785ebdaa0a0f8a5a088b840a1b64f1e5c59a6a9 | 6,046 | py | Python | src/config/svc-monitor/svc_monitor/tests/test_port_tuple.py | UbuntuEvangelist/contrail-controller | 4e8a992230f8f8e91e4f753e19b5442d9e1b446d | [
"Apache-2.0"
] | null | null | null | src/config/svc-monitor/svc_monitor/tests/test_port_tuple.py | UbuntuEvangelist/contrail-controller | 4e8a992230f8f8e91e4f753e19b5442d9e1b446d | [
"Apache-2.0"
] | null | null | null | src/config/svc-monitor/svc_monitor/tests/test_port_tuple.py | UbuntuEvangelist/contrail-controller | 4e8a992230f8f8e91e4f753e19b5442d9e1b446d | [
"Apache-2.0"
] | 18 | 2017-01-12T09:28:44.000Z | 2019-04-18T20:47:42.000Z | import mock
from mock import patch
import unittest
from vnc_api.vnc_api import *
from svc_monitor.port_tuple import PortTupleAgent
from svc_monitor.config_db import *
import test_common_utils as test_utils
| 48.368 | 93 | 0.664406 |
6786e2d4a6f307e6300a31ab2c4e829094e2410e | 5,672 | py | Python | pearll/agents/ga.py | LondonNode/Anvil | bc50fd7b16af36051157814e2548a98e787b03de | [
"MIT"
] | 13 | 2022-01-17T14:43:05.000Z | 2022-03-10T04:05:36.000Z | pearll/agents/ga.py | LondonNode/Anvil | bc50fd7b16af36051157814e2548a98e787b03de | [
"MIT"
] | 3 | 2022-02-24T18:29:12.000Z | 2022-03-22T11:09:07.000Z | pearll/agents/ga.py | LondonNode/Anvil | bc50fd7b16af36051157814e2548a98e787b03de | [
"MIT"
] | null | null | null | from functools import partial
from typing import Callable, List, Optional, Type
import numpy as np
from gym.vector.vector_env import VectorEnv
from pearll.agents.base_agents import BaseAgent
from pearll.buffers import RolloutBuffer
from pearll.buffers.base_buffer import BaseBuffer
from pearll.callbacks.base_callback import BaseCallback
from pearll.common.type_aliases import Log
from pearll.common.utils import filter_rewards
from pearll.explorers.base_explorer import BaseExplorer
from pearll.models import ActorCritic, Dummy
from pearll.settings import (
BufferSettings,
ExplorerSettings,
LoggerSettings,
MiscellaneousSettings,
MutationSettings,
PopulationSettings,
Settings,
)
from pearll.signal_processing import (
crossover_operators,
mutation_operators,
selection_operators,
)
from pearll.updaters.evolution import BaseEvolutionUpdater, GeneticUpdater
def default_model(env: VectorEnv):
"""
Returns a default model for the given environment.
"""
actor = Dummy(space=env.single_action_space)
critic = Dummy(space=env.single_action_space)
return ActorCritic(
actor=actor,
critic=critic,
population_settings=PopulationSettings(
actor_population_size=env.num_envs, actor_distribution="uniform"
),
)
| 38.849315 | 99 | 0.701164 |
6787612d23eda8ccb35a41398442232a6c1a614e | 17,643 | py | Python | src/tequila/optimizers/optimizer_scipy.py | snc2/tequila | 6767ced9215408f7d055c22df7a66ccd610b00fb | [
"MIT"
] | null | null | null | src/tequila/optimizers/optimizer_scipy.py | snc2/tequila | 6767ced9215408f7d055c22df7a66ccd610b00fb | [
"MIT"
] | null | null | null | src/tequila/optimizers/optimizer_scipy.py | snc2/tequila | 6767ced9215408f7d055c22df7a66ccd610b00fb | [
"MIT"
] | null | null | null | import scipy, numpy, typing, numbers
from tequila.objective import Objective
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from .optimizer_base import Optimizer
from ._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from collections import namedtuple
from tequila.utils.exceptions import TequilaException
from tequila.circuit.noise import NoiseModel
from tequila.tools.qng import get_qng_combos
SciPyReturnType = namedtuple('SciPyReturnType', 'energy angles history scipy_output')
def available_methods(energy=True, gradient=True, hessian=True) -> typing.List[str]:
"""Convenience
:return: Available methods of the scipy optimizer
Parameters
----------
energy :
(Default value = True)
gradient :
(Default value = True)
hessian :
(Default value = True)
Returns
-------
"""
methods = []
if energy:
methods += OptimizerSciPy.gradient_free_methods
if gradient:
methods += OptimizerSciPy.gradient_based_methods
if hessian:
methods += OptimizerSciPy.hessian_based_methods
return methods
def minimize(objective: Objective,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyReturnType:
"""
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : (Default value = None) :
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None] : (Default value = None) :
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real]: (Default value = None):
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable] :
(Default value = None)
List of Variables to optimize
samples: int :
(Default value = None)
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int :
(Default value = 100)
backend: str :
(Default value = None)
Simulator backend, will be automatically chosen if set to None
backend_options: dict:
(Default value = None)
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel:
(Default value =None)
a NoiseModel to apply to all expectation values in the objective.
method: str :
(Default value = "BFGS")
Optimization method (see scipy documentation, or 'available methods')
tol: float :
(Default value = 1.e-3)
Convergence tolerance for optimization (see scipy documentation)
method_options: dict :
(Default value = None)
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]]:
(Default value = None)
bounds for the variables (see scipy documentation)
method_constraints :
(Default value = None)
(see scipy documentation
silent: bool :
(Default value = False)
No printout if True
save_history: bool:
(Default value = True)
Save the history throughout the optimization
Returns
-------
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = OptimizerSciPy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(objective=objective,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| 44.55303 | 141 | 0.590829 |
67884e5df8d269868ffffa5bd0b7c492cbdd5945 | 12,051 | py | Python | Section_3.3_simul_3/2_Runtime/bsolar.py | isaac2math/solar | 92a2a869cd902e15edce7aa5ed5af10f148763d9 | [
"Intel"
] | null | null | null | Section_3.3_simul_3/2_Runtime/bsolar.py | isaac2math/solar | 92a2a869cd902e15edce7aa5ed5af10f148763d9 | [
"Intel"
] | null | null | null | Section_3.3_simul_3/2_Runtime/bsolar.py | isaac2math/solar | 92a2a869cd902e15edce7aa5ed5af10f148763d9 | [
"Intel"
] | null | null | null | import numpy as np
import time
import warnings
from sklearn.linear_model import LinearRegression
from solar import solar
from sklearn.exceptions import ConvergenceWarning
# For recent version of Scikit-learn: since the class 'Lars' may rely on the Cholesky decomposition and hence may have potential convergence warning in high dimensional data (p is much larger than n), we input the following commmand to skip the convergence warning.
warnings.filterwarnings("ignore", category=ConvergenceWarning, module="sklearn")
#####################################################
# define the class of bsolar (sequential computing) #
#####################################################
'''
this class is used to demonstrate the performance of bootstrap solar (bsolar) via sequential computation
please note that this file is identical to "bsolar_parallel.py" except the subsample selection frequency estimation (step 2, line 93 - 120 of this file), where we use sequential computing scheme instead.
Check this before you run the code:
Plz check if you have 'sci-kit learn', 'numpy', 'joblib', 'matplotlib' and 'tqdm' installed. If not,
1. run 'pip install scikit-learn joblib numpy matplotlib tqdm' if you use pure Python3
2. run 'conda install scikit-learn joblib numpy matplotlib tqdm' if you use Anaconda3
Modules:
1. from scikit-learn, we call 'Lars' to compute solar.
2. we use 'numpy' for matrix computation and random variable generation;
3. for simulator, plz see 'simulator.py' for detail;
4. we use class 'time' to time the computation of solar
Inputs:
1. X and y : the inputs and output of regression;
2. n_repeat_solar : the number of subsamples that solar generates;
3. n_repeat_bsolar : the number of subsamples that bsolar generates;
4. step_size : the step size of grid search for threshold optimization of subsample selection frequency;
Outputs:
1. bsolar_coef_H : the bsolar-H regression coefficients;
2. bsolar_coef_S : the bsolar-S regression coefficients;
4. Qc_list : the detailed subsample selection frequency of bsolar;
5. Q_opt_c_H : the variable that bsolar-H selects;
5. Q_opt_c_S : the variable that bsolar-S selects;
Remarks:
1. fit : the function that trains bsolar;
2. q_list : the plot function that returns the full list of subsample selection frequency for each variable in bsolar;
'''
##################################
# test if this module works fine #
##################################
'''
this part is set up to test the functionability of the class above;
you can run all the codes in this file to test if the class works;
when you call the class from this file, the codes (even functions or classes) after " if __name__ == '__main__': " will be ingored
'''
if __name__ == '__main__':
from simulator import simul
sample_size = 100
n_dim = 12
n_info = 5
n_repeat_solar = 10
n_repeat_bsolar = 3
step_size = -0.02
np.random.seed(0)
# generate X and Y
trial1 = simul(sample_size, n_dim, n_info)
X, Y = trial1.data_gen()
# start timing
start = time.time()
# train solar
trial2 = bsolar(X, Y, n_repeat_solar, n_repeat_bsolar, step_size)
bsolar_coef_H, bsolar_coef_S, Qc_list, Q_opt_c_H, Q_opt_c_S = trial2.fit()
# end timing
end = time.time()
# print the result
print('variables that bsolar-H selects: ', Q_opt_c_H)
print('variables that bsolar-S selects: ', Q_opt_c_S)
trial2.q_list(Qc_list) | 46.528958 | 265 | 0.64866 |
6788b2d4a5d2258670eff8708364f1ba49cb5189 | 615 | py | Python | solutions/nelum_pokuna.py | UdeshUK/RxH5-Prextreme | 6f329b13d552d9c7e9ad927e2fe607c7cc0964f6 | [
"Apache-2.0"
] | 1 | 2018-10-14T12:47:03.000Z | 2018-10-14T12:47:03.000Z | solutions/nelum_pokuna.py | Team-RxH5/Prextreme | 6f329b13d552d9c7e9ad927e2fe607c7cc0964f6 | [
"Apache-2.0"
] | null | null | null | solutions/nelum_pokuna.py | Team-RxH5/Prextreme | 6f329b13d552d9c7e9ad927e2fe607c7cc0964f6 | [
"Apache-2.0"
] | null | null | null | cases=int(raw_input())
for case in range(cases):
answers=[0,0]
grid=[[0 for x in range(4)] for y in range(2)]
common=[]
for i in range(2):
answers[i]=int(raw_input())
for j in range(4):
grid[i][j]=raw_input().split()
grid[i][j] = map(int, grid[i][j])
# Code begins
for i in grid[0][answers[0]-1]:
if i in grid[1][answers[1]-1]:
common.append(i)
if len(common)>1:
print "Bad magician!"
elif len(common)==1:
for i in common:
print i
elif len(common)==0:
print "Volunteer cheated!"
| 23.653846 | 50 | 0.518699 |
6788c25e7a00ed595c0a516765861ce2d8e549e1 | 69,067 | py | Python | resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/MolKit/data/allct_dat.py | J-E-J-S/aaRS-Pipeline | 43f59f28ab06e4b16328c3bc405cdddc6e69ac44 | [
"MIT"
] | 9 | 2021-03-06T04:24:28.000Z | 2022-01-03T09:53:07.000Z | MolKit/data/allct_dat.py | e-mayo/autodocktools-prepare-py3k | 2dd2316837bcb7c19384294443b2855e5ccd3e01 | [
"BSD-3-Clause"
] | 3 | 2021-03-07T05:37:16.000Z | 2021-09-19T15:06:54.000Z | MolKit/data/allct_dat.py | e-mayo/autodocktools-prepare-py3k | 2dd2316837bcb7c19384294443b2855e5ccd3e01 | [
"BSD-3-Clause"
] | 4 | 2019-08-28T23:11:39.000Z | 2021-11-27T08:43:36.000Z | allct_dat = {
"TYR": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"INTX,KFORM":['INT', '1'],
"HD2":{'torsion': 180.0, 'tree': 'E', 'NC': 16, 'NB': 19, 'NA': 21, 'I': 22, 'angle': 120.0, 'blen': 1.09, 'charge': 0.064, 'type': 'HC'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"OH":{'torsion': 180.0, 'tree': 'S', 'NC': 12, 'NB': 14, 'NA': 16, 'I': 17, 'angle': 120.0, 'blen': 1.36, 'charge': -0.528, 'type': 'OH'},
"HD1":{'torsion': 0.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 13, 'angle': 120.0, 'blen': 1.09, 'charge': 0.064, 'type': 'HC'},
"HE1":{'torsion': 180.0, 'tree': 'E', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 15, 'angle': 120.0, 'blen': 1.09, 'charge': 0.102, 'type': 'HC'},
"HE2":{'torsion': 180.0, 'tree': 'E', 'NC': 14, 'NB': 16, 'NA': 19, 'I': 20, 'angle': 120.0, 'blen': 1.09, 'charge': 0.102, 'type': 'HC'},
"CD2":{'torsion': 0.0, 'tree': 'S', 'NC': 14, 'NB': 16, 'NA': 19, 'I': 21, 'angle': 120.0, 'blen': 1.4, 'charge': -0.002, 'type': 'CA'},
"NAMRES":'TYROSINE COO- ANION',
"CD1":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 120.0, 'blen': 1.4, 'charge': -0.002, 'type': 'CA'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'CD1', 'HD1', 'CE1', 'HE1', 'CZ', 'OH', 'HH', 'CE2', 'HE2', 'CD2', 'HD2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"CE1":{'torsion': 180.0, 'tree': 'B', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 14, 'angle': 120.0, 'blen': 1.4, 'charge': -0.264, 'type': 'CA'},
"CE2":{'torsion': 0.0, 'tree': 'B', 'NC': 12, 'NB': 14, 'NA': 16, 'I': 19, 'angle': 120.0, 'blen': 1.4, 'charge': -0.264, 'type': 'CA'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"HH":{'torsion': 0.0, 'tree': 'E', 'NC': 14, 'NB': 16, 'NA': 17, 'I': 18, 'angle': 113.0, 'blen': 0.96, 'charge': 0.334, 'type': 'HO'},
"CZ":{'torsion': 0.0, 'tree': 'B', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 16, 'angle': 120.0, 'blen': 1.4, 'charge': 0.462, 'type': 'C'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 23, 'I': 24, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.47, 'blen': 1.51, 'charge': -0.03, 'type': 'CA'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 23, 'I': 25, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"loopList":[['CG', 'CD2']],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 23, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"ASN": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'OD1', 'ND2', 'HD21', 'HD22', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"ND2":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 116.6, 'blen': 1.335, 'charge': -0.867, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 16, 'I': 17, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.086, 'type': 'CT'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O'], ['CB', 'ND2', 'CG', 'OD1'], ['CG', 'HD21', 'ND2', 'HD22']],
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CG":{'torsion': 180.0, 'tree': 'B', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 111.1, 'blen': 1.522, 'charge': 0.675, 'type': 'C'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HD21":{'torsion': 180.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 13, 'I': 14, 'angle': 119.8, 'blen': 1.01, 'charge': 0.344, 'type': 'H'},
"OD1":{'torsion': 0.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 120.5, 'blen': 1.229, 'charge': -0.47, 'type': 'O'},
"HD22":{'torsion': 0.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 13, 'I': 15, 'angle': 119.8, 'blen': 1.01, 'charge': 0.344, 'type': 'H'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 16, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 16, 'I': 18, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'ASPARAGINE COO- ANION',
},
"CYS": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'SG', 'HSG', 'LP1', 'LP2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"SG":{'torsion': 180.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 116.0, 'blen': 1.81, 'charge': 0.827, 'type': 'SH'},
"LP1":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 96.7, 'blen': 0.679, 'charge': -0.481, 'type': 'LP'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 15, 'I': 16, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.06, 'type': 'CT'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"LP2":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 14, 'angle': 96.7, 'blen': 0.679, 'charge': -0.481, 'type': 'LP'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HSG":{'torsion': 180.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 96.0, 'blen': 1.33, 'charge': 0.135, 'type': 'HS'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 15, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 15, 'I': 17, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'CYSTEINE COO- ANION',
},
"ARG": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.056, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.056, 'type': 'HC'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O'], ['NE', 'NH1', 'CZ', 'NH2'], ['CD', 'CZ', 'NE', 'HE'], ['CZ', 'HH12', 'NH1', 'HH11'], ['CZ', 'HH22', 'NH2', 'HH21']],
"HH11":{'torsion': 0.0, 'tree': 'E', 'NC': 17, 'NB': 19, 'NA': 20, 'I': 21, 'angle': 119.8, 'blen': 1.01, 'charge': 0.361, 'type': 'H3'},
"HH12":{'torsion': 180.0, 'tree': 'E', 'NC': 17, 'NB': 19, 'NA': 20, 'I': 22, 'angle': 119.8, 'blen': 1.01, 'charge': 0.361, 'type': 'H3'},
"HH21":{'torsion': 0.0, 'tree': 'E', 'NC': 17, 'NB': 19, 'NA': 23, 'I': 24, 'angle': 119.8, 'blen': 1.01, 'charge': 0.361, 'type': 'H3'},
"HH22":{'torsion': 180.0, 'tree': 'E', 'NC': 17, 'NB': 19, 'NA': 23, 'I': 25, 'angle': 119.8, 'blen': 1.01, 'charge': 0.361, 'type': 'H3'},
"INTX,KFORM":['INT', '1'],
"NE":{'torsion': 180.0, 'tree': 'B', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 17, 'angle': 111.0, 'blen': 1.48, 'charge': -0.324, 'type': 'N2'},
"HG2":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.074, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HD2":{'torsion': 300.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 15, 'angle': 109.5, 'blen': 1.09, 'charge': 0.133, 'type': 'HC'},
"HD3":{'torsion': 60.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 16, 'angle': 109.5, 'blen': 1.09, 'charge': 0.133, 'type': 'HC'},
"NAMRES":'ARGININE COO- ANION',
"HE":{'torsion': 0.0, 'tree': 'E', 'NC': 11, 'NB': 14, 'NA': 17, 'I': 18, 'angle': 118.5, 'blen': 1.01, 'charge': 0.269, 'type': 'H3'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'HD2', 'HD3', 'NE', 'HE', 'CZ', 'NH1', 'HH11', 'HH12', 'NH2', 'HH21', 'HH22', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"NH2":{'torsion': 180.0, 'tree': 'B', 'NC': 14, 'NB': 17, 'NA': 19, 'I': 23, 'angle': 118.0, 'blen': 1.33, 'charge': -0.624, 'type': 'N2'},
"HG3":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 109.5, 'blen': 1.09, 'charge': 0.074, 'type': 'HC'},
"NH1":{'torsion': 0.0, 'tree': 'B', 'NC': 14, 'NB': 17, 'NA': 19, 'I': 20, 'angle': 122.0, 'blen': 1.33, 'charge': -0.624, 'type': 'N2'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"CZ":{'torsion': 180.0, 'tree': 'B', 'NC': 11, 'NB': 14, 'NA': 17, 'I': 19, 'angle': 123.0, 'blen': 1.33, 'charge': 0.76, 'type': 'CA'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"CD":{'torsion': 180.0, 'tree': '3', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 14, 'angle': 109.47, 'blen': 1.525, 'charge': -0.228, 'type': 'CT'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 26, 'I': 27, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.47, 'blen': 1.525, 'charge': -0.103, 'type': 'CT'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.08, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 26, 'I': 28, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 26, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"LEU": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.033, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.033, 'type': 'HC'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"INTX,KFORM":['INT', '1'],
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"NAMRES":'LEUCINE COO- ANION',
"HG":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.031, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG', 'CD1', 'HD11', 'HD12', 'HD13', 'CD2', 'HD21', 'HD22', 'HD23', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HD11":{'torsion': 60.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 13, 'I': 14, 'angle': 109.5, 'blen': 1.09, 'charge': 0.034, 'type': 'HC'},
"HD12":{'torsion': 180.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 13, 'I': 15, 'angle': 109.5, 'blen': 1.09, 'charge': 0.034, 'type': 'HC'},
"HD13":{'torsion': 300.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 13, 'I': 16, 'angle': 109.5, 'blen': 1.09, 'charge': 0.034, 'type': 'HC'},
"CD2":{'torsion': 180.0, 'tree': '3', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 17, 'angle': 109.47, 'blen': 1.525, 'charge': -0.107, 'type': 'CT'},
"CD1":{'torsion': 60.0, 'tree': '3', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 109.47, 'blen': 1.525, 'charge': -0.107, 'type': 'CT'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 21, 'I': 22, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.47, 'blen': 1.525, 'charge': -0.01, 'type': 'CT'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.061, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 21, 'I': 23, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"HD21":{'torsion': 60.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 17, 'I': 18, 'angle': 109.5, 'blen': 1.09, 'charge': 0.034, 'type': 'HC'},
"HD23":{'torsion': 300.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 17, 'I': 20, 'angle': 109.5, 'blen': 1.09, 'charge': 0.034, 'type': 'HC'},
"HD22":{'torsion': 180.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 17, 'I': 19, 'angle': 109.5, 'blen': 1.09, 'charge': 0.034, 'type': 'HC'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 21, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"HID": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"NE2":{'torsion': 0.0, 'tree': 'S', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 16, 'angle': 109.0, 'blen': 1.31, 'charge': -0.502, 'type': 'NB'},
"ND1":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 122.0, 'blen': 1.39, 'charge': -0.146, 'type': 'NA'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O'], ['CG', 'CE1', 'ND1', 'HD1']],
"CE1":{'torsion': 180.0, 'tree': 'B', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 14, 'angle': 108.0, 'blen': 1.32, 'charge': 0.241, 'type': 'CR'},
"INTX,KFORM":['INT', '1'],
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HD1":{'torsion': 0.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 13, 'angle': 126.0, 'blen': 1.01, 'charge': 0.228, 'type': 'H'},
"NAMRES":'HISTIDINE DELTAH COO- ANION',
"HE":{'torsion': 180.0, 'tree': 'E', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 15, 'angle': 120.0, 'blen': 1.09, 'charge': 0.036, 'type': 'HC'},
"HD":{'torsion': 180.0, 'tree': 'E', 'NC': 14, 'NB': 16, 'NA': 17, 'I': 18, 'angle': 120.0, 'blen': 1.09, 'charge': 0.018, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'ND1', 'HD1', 'CE1', 'HE', 'NE2', 'CD2', 'HD', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"CD2":{'torsion': 0.0, 'tree': 'S', 'NC': 12, 'NB': 14, 'NA': 16, 'I': 17, 'angle': 110.0, 'blen': 1.36, 'charge': 0.195, 'type': 'CV'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 19, 'I': 20, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 115.0, 'blen': 1.51, 'charge': -0.032, 'type': 'CC'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 19, 'I': 21, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"loopList":[['CG', 'CD2']],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 19, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"HIE": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"NE2":{'torsion': 0.0, 'tree': 'B', 'NC': 11, 'NB': 12, 'NA': 13, 'I': 15, 'angle': 109.0, 'blen': 1.31, 'charge': -0.146, 'type': 'NA'},
"ND1":{'torsion': 180.0, 'tree': 'S', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 122.0, 'blen': 1.39, 'charge': -0.502, 'type': 'NB'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O'], ['CE1', 'CD2', 'NE2', 'HE2']],
"CE1":{'torsion': 180.0, 'tree': 'B', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 13, 'angle': 108.0, 'blen': 1.32, 'charge': 0.241, 'type': 'CR'},
"INTX,KFORM":['INT', '1'],
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HE2":{'torsion': 180.0, 'tree': 'E', 'NC': 12, 'NB': 13, 'NA': 15, 'I': 16, 'angle': 125.0, 'blen': 1.01, 'charge': 0.228, 'type': 'H'},
"NAMRES":'HISTIDINE EPSILON-H COO- ANION',
"HE":{'torsion': 180.0, 'tree': 'E', 'NC': 11, 'NB': 12, 'NA': 13, 'I': 14, 'angle': 120.0, 'blen': 1.09, 'charge': 0.036, 'type': 'HC'},
"HD":{'torsion': 180.0, 'tree': 'E', 'NC': 13, 'NB': 15, 'NA': 17, 'I': 18, 'angle': 120.0, 'blen': 1.09, 'charge': 0.114, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'ND1', 'CE1', 'HE', 'NE2', 'HE2', 'CD2', 'HD', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"CD2":{'torsion': 0.0, 'tree': 'S', 'NC': 12, 'NB': 13, 'NA': 15, 'I': 17, 'angle': 110.0, 'blen': 1.36, 'charge': -0.184, 'type': 'CW'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 19, 'I': 20, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 115.0, 'blen': 1.51, 'charge': 0.251, 'type': 'CC'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 19, 'I': 21, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"loopList":[['CG', 'CD2']],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 19, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"MET": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.027, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.027, 'type': 'HC'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 21, 'I': 22, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"SD":{'torsion': 180.0, 'tree': '3', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 14, 'angle': 110.0, 'blen': 1.81, 'charge': 0.737, 'type': 'S'},
"LP1":{'torsion': 60.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 15, 'angle': 96.7, 'blen': 0.679, 'charge': -0.381, 'type': 'LP'},
"HG3":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 109.5, 'blen': 1.09, 'charge': 0.0652, 'type': 'HC'},
"HG2":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.0652, 'type': 'HC'},
"INTX,KFORM":['INT', '1'],
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HE3":{'torsion': 300.0, 'tree': 'E', 'NC': 11, 'NB': 14, 'NA': 17, 'I': 20, 'angle': 109.5, 'blen': 1.09, 'charge': 0.0652, 'type': 'HC'},
"HE2":{'torsion': 180.0, 'tree': 'E', 'NC': 11, 'NB': 14, 'NA': 17, 'I': 19, 'angle': 109.5, 'blen': 1.09, 'charge': 0.0652, 'type': 'HC'},
"HE1":{'torsion': 60.0, 'tree': 'E', 'NC': 11, 'NB': 14, 'NA': 17, 'I': 18, 'angle': 109.5, 'blen': 1.09, 'charge': 0.0652, 'type': 'HC'},
"NAMRES":'METHIONINE COO- ANION',
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'SD', 'LP1', 'LP2', 'CE', 'HE1', 'HE2', 'HE3', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"CE":{'torsion': 180.0, 'tree': '3', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 17, 'angle': 100.0, 'blen': 1.78, 'charge': -0.134, 'type': 'CT'},
"CG":{'torsion': 180.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.47, 'blen': 1.525, 'charge': -0.054, 'type': 'CT'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.151, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 21, 'I': 23, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"LP2":{'torsion': 300.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 16, 'angle': 96.7, 'blen': 0.679, 'charge': -0.381, 'type': 'LP'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 21, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"IDBGEN,IREST,ITYPF":['1', '1', '201'],
"ALA": { "HB2":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB1', 'HB2', 'HB3', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HB1":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 12, 'I': 13, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 12, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 12, 'I': 14, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'ALANINE COO- ANION',
},
"PHE": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"INTX,KFORM":['INT', '1'],
"HD2":{'torsion': 180.0, 'tree': 'E', 'NC': 16, 'NB': 18, 'NA': 20, 'I': 21, 'angle': 120.0, 'blen': 1.09, 'charge': 0.058, 'type': 'HC'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HD1":{'torsion': 0.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 13, 'angle': 120.0, 'blen': 1.09, 'charge': 0.058, 'type': 'HC'},
"HE1":{'torsion': 180.0, 'tree': 'E', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 15, 'angle': 120.0, 'blen': 1.09, 'charge': 0.063, 'type': 'HC'},
"HE2":{'torsion': 180.0, 'tree': 'E', 'NC': 14, 'NB': 16, 'NA': 18, 'I': 19, 'angle': 120.0, 'blen': 1.09, 'charge': 0.063, 'type': 'HC'},
"CD2":{'torsion': 0.0, 'tree': 'S', 'NC': 14, 'NB': 16, 'NA': 18, 'I': 20, 'angle': 120.0, 'blen': 1.4, 'charge': -0.069, 'type': 'CA'},
"NAMRES":'PHENYLALANINE COO- ANION',
"CD1":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 120.0, 'blen': 1.4, 'charge': -0.069, 'type': 'CA'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'CD1', 'HD1', 'CE1', 'HE1', 'CZ', 'HZ', 'CE2', 'HE2', 'CD2', 'HD2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"CE1":{'torsion': 180.0, 'tree': 'B', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 14, 'angle': 120.0, 'blen': 1.4, 'charge': -0.059, 'type': 'CA'},
"CE2":{'torsion': 0.0, 'tree': 'B', 'NC': 12, 'NB': 14, 'NA': 16, 'I': 18, 'angle': 120.0, 'blen': 1.4, 'charge': -0.059, 'type': 'CA'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"CZ":{'torsion': 0.0, 'tree': 'B', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 16, 'angle': 120.0, 'blen': 1.4, 'charge': -0.065, 'type': 'CA'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 22, 'I': 23, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 115.0, 'blen': 1.51, 'charge': 0.055, 'type': 'CA'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 22, 'I': 24, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"loopList":[['CG', 'CD2']],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 22, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"HZ":{'torsion': 180.0, 'tree': 'E', 'NC': 12, 'NB': 14, 'NA': 16, 'I': 17, 'angle': 120.0, 'blen': 1.09, 'charge': 0.062, 'type': 'HC'},
},
"CYX": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.0495, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.0495, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'SG', 'LP1', 'LP2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"SG":{'torsion': 180.0, 'tree': 'B', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 116.0, 'blen': 1.81, 'charge': 0.824, 'type': 'S'},
"LP1":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 96.7, 'blen': 0.679, 'charge': -0.4045, 'type': 'LP'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 14, 'I': 15, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"LP2":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 96.7, 'blen': 0.679, 'charge': -0.4045, 'type': 'LP'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 14, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 14, 'I': 16, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'CYSTINE(S-S BRIDGE) COO- ANION',
},
"PRO": { "HB2":{'torsion': 136.3, 'tree': 'E', 'NC': 5, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.061, 'type': 'HC'},
"HB3":{'torsion': 256.3, 'tree': 'E', 'NC': 5, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 109.5, 'blen': 1.09, 'charge': 0.061, 'type': 'HC'},
"impropTors":[['CA', 'OXT', 'C', 'O'], ['-M', 'CA', 'N', 'CD']],
"INTX,KFORM":['INT', '1'],
"HG3":{'torsion': 98.0, 'tree': 'E', 'NC': 4, 'NB': 5, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.063, 'type': 'HC'},
"HG2":{'torsion': 218.0, 'tree': 'E', 'NC': 4, 'NB': 5, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.063, 'type': 'HC'},
"CD":{'torsion': 356.1, 'tree': '3', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 126.1, 'blen': 1.458, 'charge': -0.012, 'type': 'CT'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"loopList":[['CA', 'CB']],
"HD2":{'torsion': 80.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 5, 'I': 6, 'angle': 109.5, 'blen': 1.09, 'charge': 0.06, 'type': 'HC'},
"HD3":{'torsion': 320.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 5, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.06, 'type': 'HC'},
"NAMRES":'PROLINE COO- ANION',
"atNameList":['N', 'CD', 'HD2', 'HD3', 'CG', 'HG2', 'HG3', 'CB', 'HB2', 'HB3', 'CA', 'HA', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HA":{'torsion': 81.1, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 14, 'I': 15, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 117.0, 'blen': 1.337, 'charge': -0.229, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 14, 'NA': 16, 'I': 17, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 200.1, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 5, 'I': 8, 'angle': 103.2, 'blen': 1.5, 'charge': -0.121, 'type': 'CT'},
"CA":{'torsion': 175.2, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 14, 'angle': 120.6, 'blen': 1.451, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 338.3, 'tree': 'B', 'NC': 4, 'NB': 5, 'NA': 8, 'I': 11, 'angle': 106.0, 'blen': 1.51, 'charge': -0.115, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 14, 'NA': 16, 'I': 18, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CUT":['0.00000'],
"C":{'torsion': 0.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 14, 'I': 16, 'angle': 111.1, 'blen': 1.522, 'charge': 0.438, 'type': 'C'},
},
"LYS": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HZ2":{'torsion': 180.0, 'tree': 'E', 'NC': 14, 'NB': 17, 'NA': 20, 'I': 22, 'angle': 109.47, 'blen': 1.01, 'charge': 0.294, 'type': 'H3'},
"HZ3":{'torsion': 300.0, 'tree': 'E', 'NC': 14, 'NB': 17, 'NA': 20, 'I': 23, 'angle': 109.47, 'blen': 1.01, 'charge': 0.294, 'type': 'H3'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"HZ1":{'torsion': 60.0, 'tree': 'E', 'NC': 14, 'NB': 17, 'NA': 20, 'I': 21, 'angle': 109.47, 'blen': 1.01, 'charge': 0.294, 'type': 'H3'},
"NZ":{'torsion': 180.0, 'tree': '3', 'NC': 11, 'NB': 14, 'NA': 17, 'I': 20, 'angle': 109.47, 'blen': 1.47, 'charge': -0.138, 'type': 'N3'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 24, 'I': 25, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"INTX,KFORM":['INT', '1'],
"HG3":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 109.5, 'blen': 1.09, 'charge': 0.116, 'type': 'HC'},
"HG2":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.116, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HE3":{'torsion': 60.0, 'tree': 'E', 'NC': 11, 'NB': 14, 'NA': 17, 'I': 19, 'angle': 109.5, 'blen': 1.09, 'charge': 0.098, 'type': 'HC'},
"HE2":{'torsion': 300.0, 'tree': 'E', 'NC': 11, 'NB': 14, 'NA': 17, 'I': 18, 'angle': 109.5, 'blen': 1.09, 'charge': 0.098, 'type': 'HC'},
"HD2":{'torsion': 300.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 15, 'angle': 109.5, 'blen': 1.09, 'charge': 0.122, 'type': 'HC'},
"HD3":{'torsion': 60.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 16, 'angle': 109.5, 'blen': 1.09, 'charge': 0.122, 'type': 'HC'},
"NAMRES":'LYSINE COO- ANION',
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'HD2', 'HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"CD":{'torsion': 180.0, 'tree': '3', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 14, 'angle': 109.47, 'blen': 1.525, 'charge': -0.18, 'type': 'CT'},
"CE":{'torsion': 180.0, 'tree': '3', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 17, 'angle': 109.47, 'blen': 1.525, 'charge': -0.038, 'type': 'CT'},
"CG":{'torsion': 180.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.47, 'blen': 1.525, 'charge': -0.16, 'type': 'CT'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 24, 'I': 26, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 24, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"NAMDBF":'db4.dat',
"SER": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.119, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.119, 'type': 'HC'},
"HG":{'torsion': 180.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 109.47, 'blen': 0.96, 'charge': 0.31, 'type': 'HO'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'OG', 'HG', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 13, 'I': 14, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': 0.018, 'type': 'CT'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"OG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.47, 'blen': 1.43, 'charge': -0.55, 'type': 'OH'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 13, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 13, 'I': 15, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'SERINE COO- ANION',
},
"ASP": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.071, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.071, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'OD1', 'OD2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 14, 'I': 15, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.398, 'type': 'CT'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O'], ['CB', 'OD1', 'CG', 'OD2']],
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CG":{'torsion': 180.0, 'tree': 'B', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.47, 'blen': 1.527, 'charge': 0.714, 'type': 'C'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"OD2":{'torsion': 270.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 117.2, 'blen': 1.26, 'charge': -0.721, 'type': 'O2'},
"OD1":{'torsion': 90.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 117.2, 'blen': 1.26, 'charge': -0.721, 'type': 'O2'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 14, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 14, 'I': 16, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'ASPARTIC ACID COO- ANION',
},
"GLN": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"NE2":{'torsion': 180.0, 'tree': 'B', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 16, 'angle': 116.6, 'blen': 1.335, 'charge': -0.867, 'type': 'N'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O'], ['CG', 'NE2', 'CD', 'OE1'], ['CD', 'HE21', 'NE2', 'HE22']],
"INTX,KFORM":['INT', '1'],
"HG3":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 109.5, 'blen': 1.09, 'charge': 0.057, 'type': 'HC'},
"HG2":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.057, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"NAMRES":'GLUTAMINE COO- ANION',
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'OE1', 'NE2', 'HE21', 'HE22', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HE21":{'torsion': 180.0, 'tree': 'E', 'NC': 11, 'NB': 14, 'NA': 16, 'I': 17, 'angle': 119.8, 'blen': 1.01, 'charge': 0.344, 'type': 'H'},
"HE22":{'torsion': 0.0, 'tree': 'E', 'NC': 11, 'NB': 14, 'NA': 16, 'I': 18, 'angle': 119.8, 'blen': 1.01, 'charge': 0.344, 'type': 'H'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"CD":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 14, 'angle': 111.1, 'blen': 1.522, 'charge': 0.675, 'type': 'C'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 19, 'I': 20, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.47, 'blen': 1.525, 'charge': -0.102, 'type': 'CT'},
"OE1":{'torsion': 0.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 15, 'angle': 120.5, 'blen': 1.229, 'charge': -0.47, 'type': 'O'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 19, 'I': 21, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 19, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"GLU": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.092, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.092, 'type': 'HC'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O'], ['CG', 'OE1', 'CD', 'OE2']],
"INTX,KFORM":['INT', '1'],
"HG3":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 13, 'angle': 109.5, 'blen': 1.09, 'charge': 0.071, 'type': 'HC'},
"HG2":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.071, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"OE2":{'torsion': 270.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 16, 'angle': 117.2, 'blen': 1.26, 'charge': -0.721, 'type': 'O2'},
"NAMRES":'GLUTAMIC ACID COO- ANION',
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'OE1', 'OE2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"CD":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 14, 'angle': 109.47, 'blen': 1.527, 'charge': 0.714, 'type': 'C'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 17, 'I': 18, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 109.47, 'blen': 1.51, 'charge': -0.398, 'type': 'CT'},
"OE1":{'torsion': 90.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 14, 'I': 15, 'angle': 117.2, 'blen': 1.26, 'charge': -0.721, 'type': 'O2'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.184, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 17, 'I': 19, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 17, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"TRP": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.038, 'type': 'HC'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 26, 'I': 28, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"HZ2":{'torsion': 0.0, 'tree': 'E', 'NC': 14, 'NB': 16, 'NA': 17, 'I': 18, 'angle': 120.0, 'blen': 1.09, 'charge': 0.084, 'type': 'HC'},
"HZ3":{'torsion': 180.0, 'tree': 'E', 'NC': 17, 'NB': 19, 'NA': 21, 'I': 22, 'angle': 120.0, 'blen': 1.09, 'charge': 0.057, 'type': 'HC'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O'], ['CD1', 'CE2', 'NE1', 'HE1'], ['CE2', 'CH2', 'CZ2', 'HZ2'], ['CZ2', 'CZ3', 'CH2', 'HH2'], ['CH2', 'CE3', 'CZ3', 'HZ3'], ['CZ3', 'CD2', 'CE3', 'HE3']],
"CH2":{'torsion': 180.0, 'tree': 'B', 'NC': 14, 'NB': 16, 'NA': 17, 'I': 19, 'angle': 116.0, 'blen': 1.39, 'charge': -0.077, 'type': 'CA'},
"CZ3":{'torsion': 0.0, 'tree': 'B', 'NC': 16, 'NB': 17, 'NA': 19, 'I': 21, 'angle': 121.0, 'blen': 1.35, 'charge': -0.066, 'type': 'CA'},
"NE1":{'torsion': 180.0, 'tree': 'B', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 14, 'angle': 107.0, 'blen': 1.43, 'charge': -0.352, 'type': 'NA'},
"INTX,KFORM":['INT', '1'],
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HE3":{'torsion': 180.0, 'tree': 'E', 'NC': 19, 'NB': 21, 'NA': 23, 'I': 24, 'angle': 120.0, 'blen': 1.09, 'charge': 0.086, 'type': 'HC'},
"HD1":{'torsion': 0.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 13, 'angle': 120.0, 'blen': 1.09, 'charge': 0.093, 'type': 'HC'},
"HE1":{'torsion': 180.0, 'tree': 'E', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 15, 'angle': 125.5, 'blen': 1.01, 'charge': 0.271, 'type': 'H'},
"NAMRES":'TRYPTOPHAN COO- ANION',
"CD1":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 127.0, 'blen': 1.34, 'charge': 0.044, 'type': 'CW'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'CD1', 'HD1', 'NE1', 'HE1', 'CE2', 'CZ2', 'HZ2', 'CH2', 'HH2', 'CZ3', 'HZ3', 'CE3', 'HE3', 'CD2', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"CD2":{'torsion': 0.0, 'tree': 'E', 'NC': 19, 'NB': 21, 'NA': 23, 'I': 25, 'angle': 117.0, 'blen': 1.4, 'charge': 0.146, 'type': 'CB'},
"CE2":{'torsion': 0.0, 'tree': 'S', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 16, 'angle': 109.0, 'blen': 1.31, 'charge': 0.154, 'type': 'CN'},
"CE3":{'torsion': 0.0, 'tree': 'B', 'NC': 17, 'NB': 19, 'NA': 21, 'I': 23, 'angle': 122.0, 'blen': 1.41, 'charge': -0.173, 'type': 'CA'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 26, 'I': 27, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 115.0, 'blen': 1.51, 'charge': -0.135, 'type': 'C*'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"CZ2":{'torsion': 180.0, 'tree': 'B', 'NC': 12, 'NB': 14, 'NA': 16, 'I': 17, 'angle': 128.0, 'blen': 1.4, 'charge': -0.168, 'type': 'CA'},
"loopList":[['CG', 'CD2'], ['CE2', 'CD2']],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 26, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"HH2":{'torsion': 180.0, 'tree': 'E', 'NC': 16, 'NB': 17, 'NA': 19, 'I': 20, 'angle': 120.0, 'blen': 1.09, 'charge': 0.074, 'type': 'HC'},
},
"GLY": { "HA3":{'torsion': 60.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 109.5, 'blen': 1.09, 'charge': 0.032, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA2', 'HA3', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HA2":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.032, 'type': 'HC'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 9, 'I': 10, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 9, 'angle': 110.4, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 9, 'I': 11, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'GLYCINE COO- ANION',
},
"THR": { "atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB', 'CG2', 'HG21', 'HG22', 'HG23', 'OG1', 'HG1', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HG23":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 10, 'I': 13, 'angle': 109.5, 'blen': 1.09, 'charge': 0.065, 'type': 'HC'},
"HB":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.082, 'type': 'HC'},
"HG22":{'torsion': 180.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 10, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.065, 'type': 'HC'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': 0.17, 'type': 'CT'},
"HG1":{'torsion': 180.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 14, 'I': 15, 'angle': 109.47, 'blen': 0.96, 'charge': 0.31, 'type': 'HO'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"HG21":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 10, 'I': 11, 'angle': 109.5, 'blen': 1.09, 'charge': 0.065, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"INTX,KFORM":['INT', '1'],
"OG1":{'torsion': 60.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 14, 'angle': 109.47, 'blen': 1.43, 'charge': -0.55, 'type': 'OH'},
"CG2":{'torsion': 300.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.47, 'blen': 1.525, 'charge': -0.191, 'type': 'CT'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 16, 'I': 17, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 16, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 16, 'I': 18, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"NAMRES":'THREONINE COO- ANION',
},
"HIP": { "HB2":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.086, 'type': 'HC'},
"HB3":{'torsion': 60.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.5, 'blen': 1.09, 'charge': 0.086, 'type': 'HC'},
"NE2":{'torsion': 0.0, 'tree': 'B', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 16, 'angle': 109.0, 'blen': 1.31, 'charge': -0.058, 'type': 'NA'},
"ND1":{'torsion': 180.0, 'tree': 'B', 'NC': 6, 'NB': 8, 'NA': 11, 'I': 12, 'angle': 122.0, 'blen': 1.39, 'charge': -0.058, 'type': 'NA'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O'], ['CG', 'CE1', 'ND1', 'HD1'], ['CE1', 'CD2', 'NE2', 'HE2']],
"CE1":{'torsion': 180.0, 'tree': 'B', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 14, 'angle': 108.0, 'blen': 1.32, 'charge': 0.114, 'type': 'CR'},
"INTX,KFORM":['INT', '1'],
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"HD1":{'torsion': 0.0, 'tree': 'E', 'NC': 8, 'NB': 11, 'NA': 12, 'I': 13, 'angle': 126.0, 'blen': 1.01, 'charge': 0.306, 'type': 'H'},
"HE2":{'torsion': 180.0, 'tree': 'E', 'NC': 12, 'NB': 14, 'NA': 16, 'I': 17, 'angle': 125.0, 'blen': 1.01, 'charge': 0.306, 'type': 'H'},
"NAMRES":'HISTIDINE PLUS COO-',
"HE":{'torsion': 180.0, 'tree': 'E', 'NC': 11, 'NB': 12, 'NA': 14, 'I': 15, 'angle': 120.0, 'blen': 1.09, 'charge': 0.158, 'type': 'HC'},
"HD":{'torsion': 180.0, 'tree': 'E', 'NC': 14, 'NB': 16, 'NA': 18, 'I': 19, 'angle': 120.0, 'blen': 1.09, 'charge': 0.153, 'type': 'HC'},
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB2', 'HB3', 'CG', 'ND1', 'HD1', 'CE1', 'HE', 'NE2', 'HE2', 'CD2', 'HD', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"CD2":{'torsion': 0.0, 'tree': 'S', 'NC': 12, 'NB': 14, 'NA': 16, 'I': 18, 'angle': 110.0, 'blen': 1.36, 'charge': -0.037, 'type': 'CW'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 20, 'I': 21, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CG":{'torsion': 180.0, 'tree': 'S', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 11, 'angle': 115.0, 'blen': 1.51, 'charge': 0.058, 'type': 'CC'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.098, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 20, 'I': 22, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"loopList":[['CG', 'CD2']],
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 20, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"VAL": { "HG22":{'torsion': 180.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 14, 'I': 16, 'angle': 109.5, 'blen': 1.09, 'charge': 0.031, 'type': 'HC'},
"HG23":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 14, 'I': 17, 'angle': 109.5, 'blen': 1.09, 'charge': 0.031, 'type': 'HC'},
"HG21":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 14, 'I': 15, 'angle': 109.5, 'blen': 1.09, 'charge': 0.031, 'type': 'HC'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
"HG13":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 10, 'I': 13, 'angle': 109.5, 'blen': 1.09, 'charge': 0.031, 'type': 'HC'},
"HG12":{'torsion': 180.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 10, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.031, 'type': 'HC'},
"HG11":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 10, 'I': 11, 'angle': 109.5, 'blen': 1.09, 'charge': 0.031, 'type': 'HC'},
"INTX,KFORM":['INT', '1'],
"CG2":{'torsion': 180.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 14, 'angle': 109.47, 'blen': 1.525, 'charge': -0.091, 'type': 'CT'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CG1":{'torsion': 60.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.47, 'blen': 1.525, 'charge': -0.091, 'type': 'CT'},
"NAMRES":'VALINE COO- ANION',
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB', 'CG1', 'HG11', 'HG12', 'HG13', 'CG2', 'HG21', 'HG22', 'HG23', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HB":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.024, 'type': 'HC'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 18, 'I': 19, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 111.1, 'blen': 1.525, 'charge': -0.012, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 18, 'I': 20, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 18, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
},
"ILE": { "HG22":{'torsion': 180.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 10, 'I': 12, 'angle': 109.5, 'blen': 1.09, 'charge': 0.029, 'type': 'HC'},
"HG23":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 10, 'I': 13, 'angle': 109.5, 'blen': 1.09, 'charge': 0.029, 'type': 'HC'},
"HG21":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 10, 'I': 11, 'angle': 109.5, 'blen': 1.09, 'charge': 0.029, 'type': 'HC'},
"HD13":{'torsion': 300.0, 'tree': 'E', 'NC': 8, 'NB': 14, 'NA': 17, 'I': 20, 'angle': 109.5, 'blen': 1.09, 'charge': 0.028, 'type': 'HC'},
"HG13":{'torsion': 60.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 14, 'I': 16, 'angle': 109.5, 'blen': 1.09, 'charge': 0.027, 'type': 'HC'},
"HG12":{'torsion': 300.0, 'tree': 'E', 'NC': 6, 'NB': 8, 'NA': 14, 'I': 15, 'angle': 109.5, 'blen': 1.09, 'charge': 0.027, 'type': 'HC'},
"INTX,KFORM":['INT', '1'],
"CG2":{'torsion': 60.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 10, 'angle': 109.47, 'blen': 1.525, 'charge': -0.085, 'type': 'CT'},
"IFIXC,IOMIT,ISYMDU,IPOS":['CORR', 'OMIT', 'DU', 'BEG'],
"CG1":{'torsion': 180.0, 'tree': '3', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 14, 'angle': 109.47, 'blen': 1.525, 'charge': -0.049, 'type': 'CT'},
"NAMRES":'ISOLEUCINE COO- ANION',
"atNameList":['N', 'H', 'CA', 'HA', 'CB', 'HB', 'CG2', 'HG21', 'HG22', 'HG23', 'CG1', 'HG12', 'HG13', 'CD1', 'HD11', 'HD12', 'HD13', 'C', 'O', 'OXT'],
"DUMM":[['1', 'DUMM', 'DU', 'M', '0', '-1', '-2', '0.000', '0.000', '0.000', '0.00000'], ['2', 'DUMM', 'DU', 'M', '1', '0', '-1', '1.449', '0.000', '0.000', '0.00000'], ['3', 'DUMM', 'DU', 'M', '2', '1', '0', '1.522', '111.100', '0.000', '0.00000']],
"HD11":{'torsion': 60.0, 'tree': 'E', 'NC': 8, 'NB': 14, 'NA': 17, 'I': 18, 'angle': 109.5, 'blen': 1.09, 'charge': 0.028, 'type': 'HC'},
"HD12":{'torsion': 180.0, 'tree': 'E', 'NC': 8, 'NB': 14, 'NA': 17, 'I': 19, 'angle': 109.5, 'blen': 1.09, 'charge': 0.028, 'type': 'HC'},
"HB":{'torsion': 300.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 8, 'I': 9, 'angle': 109.5, 'blen': 1.09, 'charge': 0.022, 'type': 'HC'},
"CD1":{'torsion': 180.0, 'tree': '3', 'NC': 6, 'NB': 8, 'NA': 14, 'I': 17, 'angle': 109.47, 'blen': 1.525, 'charge': -0.085, 'type': 'CT'},
"HA":{'torsion': 300.0, 'tree': 'E', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 7, 'angle': 109.5, 'blen': 1.09, 'charge': 0.048, 'type': 'HC'},
"N":{'torsion': 180.0, 'tree': 'M', 'NC': 1, 'NB': 2, 'NA': 3, 'I': 4, 'angle': 116.6, 'blen': 1.335, 'charge': -0.463, 'type': 'N'},
"O":{'torsion': 0.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 21, 'I': 22, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"H":{'torsion': 0.0, 'tree': 'E', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 5, 'angle': 119.8, 'blen': 1.01, 'charge': 0.252, 'type': 'H'},
"CA":{'torsion': 180.0, 'tree': 'M', 'NC': 2, 'NB': 3, 'NA': 4, 'I': 6, 'angle': 121.9, 'blen': 1.449, 'charge': 0.035, 'type': 'CT'},
"CB":{'torsion': 60.0, 'tree': '3', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 8, 'angle': 109.47, 'blen': 1.525, 'charge': -0.012, 'type': 'CT'},
"OXT":{'torsion': 180.0, 'tree': 'E', 'NC': 4, 'NB': 6, 'NA': 21, 'I': 23, 'angle': 120.5, 'blen': 1.229, 'charge': -0.706, 'type': 'O2'},
"CUT":['0.00000'],
"C":{'torsion': 180.0, 'tree': 'M', 'NC': 3, 'NB': 4, 'NA': 6, 'I': 21, 'angle': 111.1, 'blen': 1.522, 'charge': 0.524, 'type': 'C'},
"impropTors":[['-M', 'CA', 'N', 'H'], ['CA', 'OXT', 'C', 'O']],
},
"filename":'allct.in',
} | 116.274411 | 251 | 0.447768 |
678c13af2d3d4847271449c6ae5791e470d46e78 | 39,961 | py | Python | chi/_mechanistic_models.py | DavAug/erlotinib | 9d113257de52b56359ed6451ba7db455645315d1 | [
"BSD-3-Clause"
] | null | null | null | chi/_mechanistic_models.py | DavAug/erlotinib | 9d113257de52b56359ed6451ba7db455645315d1 | [
"BSD-3-Clause"
] | 221 | 2020-11-06T13:03:32.000Z | 2021-07-30T08:17:58.000Z | chi/_mechanistic_models.py | DavAug/erlotinib | 9d113257de52b56359ed6451ba7db455645315d1 | [
"BSD-3-Clause"
] | 1 | 2021-02-10T13:03:58.000Z | 2021-02-10T13:03:58.000Z | #
# This file is part of the chi repository
# (https://github.com/DavAug/chi/) which is released under the
# BSD 3-clause license. See accompanying LICENSE.md for copyright notice and
# full license details.
#
import copy
import myokit
import myokit.formats.sbml as sbml
import numpy as np
| 35.332449 | 79 | 0.620205 |
678d54c4f215c915ab40d1921519e8d17b0d89cd | 4,954 | py | Python | smartlicense/settings/__init__.py | coblo/smartlicense | 288b40496646c225716fa3bf5f43b48ff645b96c | [
"MIT"
] | 6 | 2018-04-26T05:54:29.000Z | 2021-04-03T05:08:46.000Z | smartlicense/settings/__init__.py | coblo/smartlicense | 288b40496646c225716fa3bf5f43b48ff645b96c | [
"MIT"
] | 13 | 2018-03-31T07:58:02.000Z | 2022-02-10T10:35:28.000Z | smartlicense/settings/__init__.py | coblo/smartlicense | 288b40496646c225716fa3bf5f43b48ff645b96c | [
"MIT"
] | 2 | 2019-06-13T21:42:21.000Z | 2021-04-03T05:09:02.000Z | # -*- coding: utf-8 -*-
"""
Django settings for smartlicense project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
from os.path import dirname, abspath, join
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = dirname(dirname(dirname(abspath(__file__))))
SCRATCH_DIR = join(BASE_DIR, '.scratch')
SCRACTH_DB = join(SCRATCH_DIR, 'scratch.sqlite3')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3ka4^(c+fm7rw+@ttete34bt6tv3^8=r1!*_*-ovp1vu&qi=a9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ADMINS = [('admin', 'admin@admin.org')]
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'martor',
'suit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_markup',
'django_object_actions',
'smartlicense.apps.SmartLicenseConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'smartlicense.urls'
MEDIA_ROOT = SCRATCH_DIR
MEDIA_URL = '/media/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [join(BASE_DIR, 'smartlicense', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'smartlicense.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': SCRACTH_DB,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
# Mator
MARTOR_ENABLE_CONFIGS = {
'imgur': 'false', # to enable/disable imgur/custom uploader.
'mention': 'false', # to enable/disable mention
'jquery': 'true',
# to include/revoke jquery (require for admin default django)
}
# Custom project settings
NODE_IP = '127.0.0.1'
NODE_PORT = '9718'
NODE_USER = 'testuser'
NODE_PWD = 'testpassword'
STREAM_SMART_LICENSE = 'smart-license'
STREAM_SMART_LICENSE_ATTESTATION = 'smart-license'
STREAM_ISCC = 'iscc'
SUIT_CONFIG = {
'ADMIN_NAME': 'Smart License Demo',
'CONFIRM_UNSAVED_CHANGES': False,
'MENU_OPEN_FIRST_CHILD': True,
'SEARCH_URL': 'admin:smartlicense_mediacontent_changelist',
'LIST_PER_PAGE': 18,
'MENU': (
{'label': 'Smart Licenses', 'models': (
{'model': 'smartlicense.mediacontent'},
{'model': 'smartlicense.smartlicense'},
)},
{'label': 'Transactions', 'models': (
{'model': 'smartlicense.attestation'},
{'model': 'smartlicense.tokentransaction'},
)},
{'label': 'Configuration', 'models': (
{'model': 'smartlicense.template'},
{'model': 'smartlicense.rightsmodule'},
{'model': 'smartlicense.activationmode'},
)}
)
}
# Make sure deployment overrides settings
try:
from smartlicense.settings.config import *
except Exception:
print(
'No custom configuration found. Create a smartlicense/settings/config.py')
import sys
sys.exit(0)
| 27.370166 | 91 | 0.677836 |
678e1041a75c67c39856bfcf8a9561f7bd5138f9 | 2,226 | py | Python | firmwire/memory_map.py | j4s0n/FirmWire | d3a20e2429cb4827f538d1a16163afde8b45826b | [
"BSD-3-Clause"
] | null | null | null | firmwire/memory_map.py | j4s0n/FirmWire | d3a20e2429cb4827f538d1a16163afde8b45826b | [
"BSD-3-Clause"
] | null | null | null | firmwire/memory_map.py | j4s0n/FirmWire | d3a20e2429cb4827f538d1a16163afde8b45826b | [
"BSD-3-Clause"
] | null | null | null | ## Copyright (c) 2022, Team FirmWire
## SPDX-License-Identifier: BSD-3-Clause
from enum import Enum, auto
from .hw.soc import SOCPeripheral
| 28.177215 | 81 | 0.591195 |
678eb98334509fe0bad64239aa78922c47d0b166 | 1,688 | py | Python | src/resources/Land.py | noancloarec/mapisto-api | b2458f6b12b229babb116f906b3e4f7e8b7b8a71 | [
"MIT"
] | null | null | null | src/resources/Land.py | noancloarec/mapisto-api | b2458f6b12b229babb116f906b3e4f7e8b7b8a71 | [
"MIT"
] | 1 | 2020-07-08T07:12:31.000Z | 2020-07-08T07:12:31.000Z | src/resources/Land.py | noancloarec/mapisto-api | b2458f6b12b229babb116f906b3e4f7e8b7b8a71 | [
"MIT"
] | null | null | null | from .helper import fill_optional_fields
from maps_geometry.feature_extraction import get_bounding_box
from .MapistoShape import MapistoShape
from .BoundingBox import BoundingBox
| 35.166667 | 101 | 0.65936 |
6790c65796ad1cfbe5e6c6ab2a2c1453d34ad7fb | 298 | py | Python | reexercises/two_sum_target.py | R0bertWell/interview_questions | f8a65a842dfe03ac28c865bb8370422ff2071137 | [
"MIT"
] | null | null | null | reexercises/two_sum_target.py | R0bertWell/interview_questions | f8a65a842dfe03ac28c865bb8370422ff2071137 | [
"MIT"
] | null | null | null | reexercises/two_sum_target.py | R0bertWell/interview_questions | f8a65a842dfe03ac28c865bb8370422ff2071137 | [
"MIT"
] | null | null | null | from typing import List
print(two_sum([1, 2, 3, 4, 5, 6], 7))
| 19.866667 | 41 | 0.553691 |
6792c61e36032efcbcd6f3d46a42dbabd2400582 | 1,032 | py | Python | vue/decorators/base.py | adamlwgriffiths/vue.py | f4256454256ddfe54a8be6dea493d3fc915ef1a2 | [
"MIT"
] | 274 | 2018-07-07T00:57:17.000Z | 2022-03-22T23:49:53.000Z | vue/decorators/base.py | adamlwgriffiths/vue.py | f4256454256ddfe54a8be6dea493d3fc915ef1a2 | [
"MIT"
] | 25 | 2018-11-24T17:19:44.000Z | 2022-03-23T22:30:18.000Z | vue/decorators/base.py | adamlwgriffiths/vue.py | f4256454256ddfe54a8be6dea493d3fc915ef1a2 | [
"MIT"
] | 18 | 2019-07-04T07:18:18.000Z | 2022-03-22T23:49:55.000Z | from vue.bridge import Object
import javascript
| 28.666667 | 74 | 0.602713 |
679841fb13e9e1b6f465dd6a052897627ff56964 | 40,992 | py | Python | datalabeling/google/cloud/datalabeling_v1beta1/proto/data_labeling_service_pb2_grpc.py | DaveCheez/google-cloud-python | fc03d4d41f13e9d13db7206438163b3a471fdabd | [
"Apache-2.0"
] | 2 | 2021-11-26T07:08:43.000Z | 2022-03-07T20:20:04.000Z | datalabeling/google/cloud/datalabeling_v1beta1/proto/data_labeling_service_pb2_grpc.py | DaveCheez/google-cloud-python | fc03d4d41f13e9d13db7206438163b3a471fdabd | [
"Apache-2.0"
] | 6 | 2019-05-27T22:05:58.000Z | 2019-08-05T16:46:16.000Z | datalabeling/google/cloud/datalabeling_v1beta1/proto/data_labeling_service_pb2_grpc.py | DaveCheez/google-cloud-python | fc03d4d41f13e9d13db7206438163b3a471fdabd | [
"Apache-2.0"
] | 1 | 2019-03-29T18:26:16.000Z | 2019-03-29T18:26:16.000Z | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.datalabeling_v1beta1.proto import (
annotation_spec_set_pb2 as google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_annotation__spec__set__pb2,
)
from google.cloud.datalabeling_v1beta1.proto import (
data_labeling_service_pb2 as google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2,
)
from google.cloud.datalabeling_v1beta1.proto import (
dataset_pb2 as google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2,
)
from google.cloud.datalabeling_v1beta1.proto import (
evaluation_job_pb2 as google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__job__pb2,
)
from google.cloud.datalabeling_v1beta1.proto import (
evaluation_pb2 as google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__pb2,
)
from google.cloud.datalabeling_v1beta1.proto import (
instruction_pb2 as google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_instruction__pb2,
)
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
def add_DataLabelingServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
"CreateDataset": grpc.unary_unary_rpc_method_handler(
servicer.CreateDataset,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateDatasetRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.Dataset.SerializeToString,
),
"GetDataset": grpc.unary_unary_rpc_method_handler(
servicer.GetDataset,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetDatasetRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.Dataset.SerializeToString,
),
"ListDatasets": grpc.unary_unary_rpc_method_handler(
servicer.ListDatasets,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDatasetsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDatasetsResponse.SerializeToString,
),
"DeleteDataset": grpc.unary_unary_rpc_method_handler(
servicer.DeleteDataset,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteDatasetRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"ImportData": grpc.unary_unary_rpc_method_handler(
servicer.ImportData,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ImportDataRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"ExportData": grpc.unary_unary_rpc_method_handler(
servicer.ExportData,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ExportDataRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetDataItem": grpc.unary_unary_rpc_method_handler(
servicer.GetDataItem,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetDataItemRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.DataItem.SerializeToString,
),
"ListDataItems": grpc.unary_unary_rpc_method_handler(
servicer.ListDataItems,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDataItemsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListDataItemsResponse.SerializeToString,
),
"GetAnnotatedDataset": grpc.unary_unary_rpc_method_handler(
servicer.GetAnnotatedDataset,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetAnnotatedDatasetRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.AnnotatedDataset.SerializeToString,
),
"ListAnnotatedDatasets": grpc.unary_unary_rpc_method_handler(
servicer.ListAnnotatedDatasets,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotatedDatasetsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotatedDatasetsResponse.SerializeToString,
),
"DeleteAnnotatedDataset": grpc.unary_unary_rpc_method_handler(
servicer.DeleteAnnotatedDataset,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteAnnotatedDatasetRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"LabelImage": grpc.unary_unary_rpc_method_handler(
servicer.LabelImage,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.LabelImageRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"LabelVideo": grpc.unary_unary_rpc_method_handler(
servicer.LabelVideo,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.LabelVideoRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"LabelText": grpc.unary_unary_rpc_method_handler(
servicer.LabelText,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.LabelTextRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetExample": grpc.unary_unary_rpc_method_handler(
servicer.GetExample,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetExampleRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_dataset__pb2.Example.SerializeToString,
),
"ListExamples": grpc.unary_unary_rpc_method_handler(
servicer.ListExamples,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListExamplesRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListExamplesResponse.SerializeToString,
),
"CreateAnnotationSpecSet": grpc.unary_unary_rpc_method_handler(
servicer.CreateAnnotationSpecSet,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateAnnotationSpecSetRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_annotation__spec__set__pb2.AnnotationSpecSet.SerializeToString,
),
"GetAnnotationSpecSet": grpc.unary_unary_rpc_method_handler(
servicer.GetAnnotationSpecSet,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetAnnotationSpecSetRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_annotation__spec__set__pb2.AnnotationSpecSet.SerializeToString,
),
"ListAnnotationSpecSets": grpc.unary_unary_rpc_method_handler(
servicer.ListAnnotationSpecSets,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotationSpecSetsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListAnnotationSpecSetsResponse.SerializeToString,
),
"DeleteAnnotationSpecSet": grpc.unary_unary_rpc_method_handler(
servicer.DeleteAnnotationSpecSet,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteAnnotationSpecSetRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"CreateInstruction": grpc.unary_unary_rpc_method_handler(
servicer.CreateInstruction,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateInstructionRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetInstruction": grpc.unary_unary_rpc_method_handler(
servicer.GetInstruction,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetInstructionRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_instruction__pb2.Instruction.SerializeToString,
),
"ListInstructions": grpc.unary_unary_rpc_method_handler(
servicer.ListInstructions,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListInstructionsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListInstructionsResponse.SerializeToString,
),
"DeleteInstruction": grpc.unary_unary_rpc_method_handler(
servicer.DeleteInstruction,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteInstructionRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"GetEvaluation": grpc.unary_unary_rpc_method_handler(
servicer.GetEvaluation,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetEvaluationRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__pb2.Evaluation.SerializeToString,
),
"SearchEvaluations": grpc.unary_unary_rpc_method_handler(
servicer.SearchEvaluations,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchEvaluationsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchEvaluationsResponse.SerializeToString,
),
"SearchExampleComparisons": grpc.unary_unary_rpc_method_handler(
servicer.SearchExampleComparisons,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchExampleComparisonsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.SearchExampleComparisonsResponse.SerializeToString,
),
"CreateEvaluationJob": grpc.unary_unary_rpc_method_handler(
servicer.CreateEvaluationJob,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.CreateEvaluationJobRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__job__pb2.EvaluationJob.SerializeToString,
),
"UpdateEvaluationJob": grpc.unary_unary_rpc_method_handler(
servicer.UpdateEvaluationJob,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.UpdateEvaluationJobRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__job__pb2.EvaluationJob.SerializeToString,
),
"GetEvaluationJob": grpc.unary_unary_rpc_method_handler(
servicer.GetEvaluationJob,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.GetEvaluationJobRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_evaluation__job__pb2.EvaluationJob.SerializeToString,
),
"PauseEvaluationJob": grpc.unary_unary_rpc_method_handler(
servicer.PauseEvaluationJob,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.PauseEvaluationJobRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"ResumeEvaluationJob": grpc.unary_unary_rpc_method_handler(
servicer.ResumeEvaluationJob,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ResumeEvaluationJobRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"DeleteEvaluationJob": grpc.unary_unary_rpc_method_handler(
servicer.DeleteEvaluationJob,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.DeleteEvaluationJobRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"ListEvaluationJobs": grpc.unary_unary_rpc_method_handler(
servicer.ListEvaluationJobs,
request_deserializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListEvaluationJobsRequest.FromString,
response_serializer=google_dot_cloud_dot_datalabeling__v1beta1_dot_proto_dot_data__labeling__service__pb2.ListEvaluationJobsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.datalabeling.v1beta1.DataLabelingService", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| 63.553488 | 169 | 0.782616 |
67988b46e3108d80c389257b2f89c3e8f006472d | 6,777 | py | Python | keras_version/utils.py | nunu0910/BiO-Net | 2038eadb16f200c4e9de8346af5e3d23422eb438 | [
"MIT"
] | 44 | 2020-07-07T06:40:13.000Z | 2022-03-24T10:15:39.000Z | keras_version/utils.py | nunu0910/BiO-Net | 2038eadb16f200c4e9de8346af5e3d23422eb438 | [
"MIT"
] | 12 | 2020-11-18T01:27:08.000Z | 2021-09-22T08:19:14.000Z | keras_version/utils.py | nunu0910/BiO-Net | 2038eadb16f200c4e9de8346af5e3d23422eb438 | [
"MIT"
] | 14 | 2020-07-26T14:10:09.000Z | 2021-11-18T23:20:44.000Z | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import keras
from keras.models import Model, load_model
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) # mute deprecation warnings
from keras.optimizers import Adam, SGD
from tensorflow import ConfigProto
from tensorflow import InteractiveSession
import numpy as np
import sys
from PIL import Image
import argparse
from matplotlib import pyplot as plt
from .dataloader import *
from .model import *
from .metrics import *
| 32.425837 | 182 | 0.703261 |
6798bb647c9031d2653050d76cd3f241dd42a5cd | 2,734 | py | Python | sdk/python/pulumi_azure_native/batch/__init__.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/batch/__init__.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/batch/__init__.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .application import *
from .application_package import *
from .batch_account import *
from .certificate import *
from .get_application import *
from .get_application_package import *
from .get_batch_account import *
from .get_certificate import *
from .get_pool import *
from .list_batch_account_keys import *
from .pool import *
from ._inputs import *
from . import outputs
# Make subpackages available:
if typing.TYPE_CHECKING:
import pulumi_azure_native.batch.v20151201 as __v20151201
v20151201 = __v20151201
import pulumi_azure_native.batch.v20170101 as __v20170101
v20170101 = __v20170101
import pulumi_azure_native.batch.v20170501 as __v20170501
v20170501 = __v20170501
import pulumi_azure_native.batch.v20170901 as __v20170901
v20170901 = __v20170901
import pulumi_azure_native.batch.v20181201 as __v20181201
v20181201 = __v20181201
import pulumi_azure_native.batch.v20190401 as __v20190401
v20190401 = __v20190401
import pulumi_azure_native.batch.v20190801 as __v20190801
v20190801 = __v20190801
import pulumi_azure_native.batch.v20200301 as __v20200301
v20200301 = __v20200301
import pulumi_azure_native.batch.v20200501 as __v20200501
v20200501 = __v20200501
import pulumi_azure_native.batch.v20200901 as __v20200901
v20200901 = __v20200901
import pulumi_azure_native.batch.v20210101 as __v20210101
v20210101 = __v20210101
import pulumi_azure_native.batch.v20210601 as __v20210601
v20210601 = __v20210601
else:
v20151201 = _utilities.lazy_import('pulumi_azure_native.batch.v20151201')
v20170101 = _utilities.lazy_import('pulumi_azure_native.batch.v20170101')
v20170501 = _utilities.lazy_import('pulumi_azure_native.batch.v20170501')
v20170901 = _utilities.lazy_import('pulumi_azure_native.batch.v20170901')
v20181201 = _utilities.lazy_import('pulumi_azure_native.batch.v20181201')
v20190401 = _utilities.lazy_import('pulumi_azure_native.batch.v20190401')
v20190801 = _utilities.lazy_import('pulumi_azure_native.batch.v20190801')
v20200301 = _utilities.lazy_import('pulumi_azure_native.batch.v20200301')
v20200501 = _utilities.lazy_import('pulumi_azure_native.batch.v20200501')
v20200901 = _utilities.lazy_import('pulumi_azure_native.batch.v20200901')
v20210101 = _utilities.lazy_import('pulumi_azure_native.batch.v20210101')
v20210601 = _utilities.lazy_import('pulumi_azure_native.batch.v20210601')
| 43.396825 | 80 | 0.793343 |
6799287dad3bb8281070f0e2070fafa75ab7324c | 1,853 | py | Python | setup.py | tgolsson/appJar | 5e2f8bff44e927e7c2bae17fccddc6dbf79952f0 | [
"Apache-2.0"
] | 666 | 2016-11-14T18:17:40.000Z | 2022-03-29T03:53:22.000Z | setup.py | tgolsson/appJar | 5e2f8bff44e927e7c2bae17fccddc6dbf79952f0 | [
"Apache-2.0"
] | 598 | 2016-10-20T21:04:09.000Z | 2022-03-15T22:44:49.000Z | setup.py | tgolsson/appJar | 5e2f8bff44e927e7c2bae17fccddc6dbf79952f0 | [
"Apache-2.0"
] | 95 | 2017-01-19T12:23:58.000Z | 2022-03-06T18:16:21.000Z | from setuptools import setup, find_packages
__name__ = "appJar"
__version__ = "0.94.0"
__author__ = "Richard Jarvis"
__desc__ = "An easy-to-use, feature-rich GUI wrapper for tKinter. Designed specifically for use in the classroom, but powerful enough to be used anywhere."
__author_email__ = "info@appjar.info"
__license__ = "Apache 2.0"
__url__ = "http://appJar.info"
__keywords__ = ["python", "gui", "tkinter", "appJar", "interface"]
__packages__= ["appJar"]
__classifiers__ = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Education',
'Topic :: Software Development',
'Topic :: Software Development :: User Interfaces',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: Apache Software License',
]
__long_description__ = """# appJar
Simple tKinter GUIs in Python.
"""
setup(
name=__name__,
packages=__packages__,
version=__version__,
description=__desc__,
long_description=__long_description__,
long_description_content_type="text/markdown",
author=__author__,
author_email=__author_email__,
url=__url__,
keywords=__keywords__,
license=__license__,
classifiers=__classifiers__,
package_data = {
"appJar": ["lib/*.py", "lib/*.txt", "lib/tkdnd2.8/*.tcl", "lib/tkdnd2.8/tcl_files/*.tcl", "lib/tkdnd2.8/tcl_libs/*", "resources/icons/*", "examples/showcase.py", "PYPI.md"]
}
)
| 37.06 | 180 | 0.658392 |
67995960cafd98e838927288e205c58078f19735 | 141,788 | py | Python | pertama/andir.py | alitkurniawan48/BelajarGIS | c52556bc6fa74b849b9c3461410805807b742967 | [
"MIT"
] | 2 | 2020-02-09T14:47:07.000Z | 2020-02-09T14:47:12.000Z | pertama/andir.py | alitkurniawan48/BelajarGIS | c52556bc6fa74b849b9c3461410805807b742967 | [
"MIT"
] | 12 | 2019-12-11T06:45:59.000Z | 2020-01-06T09:35:35.000Z | pertama/andir.py | alitkurniawan48/BelajarGIS | c52556bc6fa74b849b9c3461410805807b742967 | [
"MIT"
] | 71 | 2019-12-09T13:52:54.000Z | 2021-05-28T16:19:09.000Z | import shapefile
| 36.383885 | 64 | 0.471471 |
679d339786e1a3d3431ad8eb7251f79813420fa0 | 8,226 | py | Python | sparseconvnet/utils.py | THU-luvision/Occuseg | 163e1fba6f5d9afd4ee2a4202118bc81d8f7c5e4 | [
"BSD-3-Clause"
] | 1 | 2022-03-29T18:26:11.000Z | 2022-03-29T18:26:11.000Z | sparseconvnet/utils.py | THU-luvision/Occuseg | 163e1fba6f5d9afd4ee2a4202118bc81d8f7c5e4 | [
"BSD-3-Clause"
] | null | null | null | sparseconvnet/utils.py | THU-luvision/Occuseg | 163e1fba6f5d9afd4ee2a4202118bc81d8f7c5e4 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch, glob, os
from .sparseConvNetTensor import SparseConvNetTensor
from .metadata import Metadata
import sparseconvnet as scn
import pdb
| 35.153846 | 155 | 0.600049 |
679e2250d3e4704bdc0cc067419d5a8f3eb454fa | 12,983 | py | Python | python-numpy-lists/numpylists.py | tosinayanda/python-starter-kit | 9faee168ff82e46b6ef8102ae72ea936fd099961 | [
"MIT"
] | null | null | null | python-numpy-lists/numpylists.py | tosinayanda/python-starter-kit | 9faee168ff82e46b6ef8102ae72ea936fd099961 | [
"MIT"
] | null | null | null | python-numpy-lists/numpylists.py | tosinayanda/python-starter-kit | 9faee168ff82e46b6ef8102ae72ea936fd099961 | [
"MIT"
] | null | null | null | #
import numpy as np
#create numpy arrays
#
#Generate array
height=np.round(np.random.normal(1.75,0.20,5000),2)
weight=np.round(np.random.normal(60.32,15,5000),2)
np_city=np.column_stack((height,weight))
print(np_city.shape)
cars=["Toyota","Chevrolet","Ford","Honda","Brabus"]
cars_np=np.array(cars)
weight=[20.12,20.12,20.12,20.12,20.12,20.12,20.12,20.12,20.12,20.12,20.12,23,23,23,23,23,23,23,23,23,23,23,23,23,
23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,
23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,
23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,
23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23]
baseball=[[74, 180], [74, 215], [72, 210], [72, 210], [73, 188], [69, 176], [69, 209], [71, 200], [76, 231], [71, 180], [73, 188], [73, 180], [74, 185], [74, 160], [69, 180], [70, 185], [73, 189], [75, 185], [78, 219], [79, 230], [76, 205], [74, 230], [76, 195], [72, 180], [71, 192], [75, 225], [77, 203], [74, 195], [73, 182], [74, 188], [78, 200], [73, 180], [75, 200], [73, 200], [75, 245], [75, 240], [74, 215], [69, 185], [71, 175], [74, 199], [73, 200], [73, 215], [76, 200], [74, 205], [74, 206], [70, 186], [72, 188], [77, 220], [74, 210], [70, 195], [73, 200], [75, 200], [76, 212], [76, 224], [78, 210], [74, 205], [74, 220], [76, 195], [77, 200], [81, 260], [78, 228], [75, 270], [77, 200], [75, 210], [76, 190], [74, 220], [72, 180], [72, 205], [75, 210], [73, 220], [73, 211], [73, 200], [70, 180], [70, 190], [70, 170], [76, 230], [68, 155], [71, 185], [72, 185], [75, 200], [75, 225], [75, 225], [75, 220], [68, 160], [74, 205], [78, 235], [71, 250], [73, 210], [76, 190], [74, 160], [74, 200], [79, 205], [75, 222], [73, 195], [76, 205], [74, 220], [74, 220], [73, 170], [72, 185], [74, 195], [73, 220], [74, 230], [72, 180], [73, 220], [69, 180], [72, 180], [73, 170], [75, 210], [75, 215], [73, 200], [72, 213], [72, 180], [76, 192], [74, 235], [72, 185], [77, 235], [74, 210], [77, 222], [75, 210], [76, 230], [80, 220], [74, 180], [74, 190], [75, 200], [78, 210], [73, 194], [73, 180], [74, 190], [75, 240], [76, 200], [71, 198], [73, 200], [74, 195], [76, 210], [76, 220], [74, 190], [73, 210], [74, 225], [70, 180], [72, 185], [73, 170], [73, 185], [73, 185], [73, 180], [71, 178], [74, 175], [74, 200], [72, 204], [74, 211], [71, 190], [74, 210], [73, 190], [75, 190], [75, 185], [79, 290], [73, 175], [75, 185], [76, 200], [74, 220], [76, 170], [78, 220], [74, 190], [76, 220], [72, 205], [74, 200], [76, 250],
[74, 225], [75, 215], [78, 210], [75, 215], [72, 195], [74, 200], [72, 194], [74, 220], [70, 180], [71, 180], [70, 170], [75, 195], [71, 180], [71, 170], [73, 206], [72, 205], [71, 200], [73, 225], [72, 201], [75, 225], [74, 233], [74, 180], [75, 225], [73, 180], [77, 220], [73, 180], [76, 237], [75, 215], [74, 190], [76, 235], [75, 190], [73, 180], [71, 165], [76, 195], [75, 200], [72, 190], [71, 190], [77, 185], [73, 185], [74, 205], [71, 190], [72, 205], [74, 206], [75, 220], [73, 208], [72, 170], [75, 195], [75, 210], [74, 190], [72, 211], [74, 230], [71, 170], [70, 185], [74, 185], [77, 241], [77, 225], [75, 210], [75, 175], [78, 230], [75, 200], [76, 215], [73, 198], [75, 226], [75, 278], [79, 215], [77, 230], [76, 240], [71, 184], [75, 219], [74, 170], [69, 218], [71, 190], [76, 225], [72, 220], [72, 176], [70, 190], [72, 197], [73, 204], [71, 167], [72, 180], [71, 195], [73, 220], [72, 215], [73, 185], [74, 190], [74, 205], [72, 205], [75, 200], [74, 210], [74, 215], [77, 200], [75, 205], [73, 211], [72, 190], [71, 208], [74, 200], [77, 210], [75, 232], [75, 230], [75, 210], [78, 220], [78, 210], [74, 202], [76, 212], [78, 225], [76, 170], [70, 190], [72, 200], [80, 237], [74, 220], [74, 170], [71, 193], [70, 190], [72, 150], [71, 220], [74, 200], [71, 190], [72, 185], [71, 185], [74, 200], [69, 172], [76, 220], [75, 225], [75, 190], [76, 195], [73, 219], [76, 190], [73, 197], [77, 200], [73, 195], [72, 210], [72, 177], [77, 220], [77, 235], [71, 180], [74, 195], [74, 195], [73, 190], [78, 230], [75, 190], [73, 200], [70, 190], [74, 190], [72, 200], [73, 200], [73, 184], [75, 200], [75, 180], [74, 219], [76, 187], [73, 200], [74, 220], [75, 205], [75, 190], [72, 170], [73, 160], [73, 215], [72, 175], [74, 205], [78, 200], [76, 214], [73, 200], [74, 190], [75, 180], [70, 205], [75, 220], [71, 190], [72, 215], [78, 235], [75, 191], [73, 200], [73, 181], [71, 200], [75, 210], [77, 240], [72, 185], [69, 165], [73, 190], [74, 185], [72, 175], [70, 155], [75, 210], [70, 170], [72, 175], [72, 220], [74, 210], [73, 205], [74, 200], [76, 205], [75, 195], [80, 240], [72, 150], [75, 200], [73, 215], [74, 202], [74, 200], [73, 190], [75, 205], [75, 190], [71, 160], [73, 215], [75, 185], [74, 200], [74, 190], [72, 210], [74, 185], [74, 220], [74, 190], [73, 202], [76, 205], [75, 220], [72, 175], [73, 160], [73, 190], [73, 200], [72, 229], [72, 206], [72, 220], [72, 180], [71, 195], [75, 175], [75, 188], [74, 230], [73, 190], [75, 200], [79, 190], [74, 219], [76, 235], [73, 180], [74, 180], [74, 180], [72, 200], [74, 234], [74, 185], [75, 220], [78, 223], [74, 200], [74, 210], [74, 200], [77, 210], [70, 190], [73, 177], [74, 227], [73, 180], [71, 195], [75, 199], [71, 175], [72, 185], [77, 240], [74, 210], [70, 180], [77, 194], [73, 225], [72, 180], [76, 205], [71, 193], [76, 230], [78, 230], [75, 220], [73, 200], [78, 249], [74, 190], [79, 208], [75, 245], [76, 250],
[72, 160], [75, 192], [75, 220], [70, 170], [72, 197], [70, 155], [74, 190], [71, 200], [76, 220], [73, 210], [76, 228], [71, 190], [69, 160], [72, 184], [72, 180], [69, 180], [73, 200], [69, 176], [73, 160], [74, 222], [74, 211], [72, 195], [71, 200], [72, 175], [72, 206], [76, 240], [76, 185], [76, 260], [74, 185], [76, 221], [75, 205], [71, 200], [72, 170], [71, 201], [73, 205], [75, 185], [76, 205], [75, 245], [71, 220], [75, 210], [74, 220], [72, 185], [73, 175], [73, 170], [73, 180], [73, 200], [76, 210], [72, 175], [76, 220], [73, 206], [73, 180], [73, 210], [75, 195], [75, 200], [77, 200], [73, 164], [72, 180], [75, 220], [70, 195], [74, 205], [72, 170], [80, 240], [71, 210], [71, 195], [74, 200], [74, 205], [73, 192], [75, 190], [76, 170], [73, 240], [77, 200], [72, 205], [73, 175], [77, 250], [76, 220], [71, 224], [75, 210], [73, 195], [74, 180], [77, 245], [71, 175], [72, 180], [73, 215], [69, 175], [73, 180], [70, 195], [74, 230], [76, 230], [73, 205], [73, 215], [75, 195], [73, 180], [79, 205], [74, 180], [73, 190], [74, 180], [77, 190], [75, 190], [74, 220], [73, 210], [77, 255], [73, 190], [77, 230], [74, 200], [74, 205], [73, 210], [77, 225], [74, 215], [77, 220], [75, 205], [77, 200], [75, 220], [71, 197], [74, 225], [70, 187], [79, 245], [72, 185], [72, 185], [70, 175], [74, 200], [74, 180], [72, 188], [73, 225], [72, 200], [74, 210], [74, 245], [76, 213], [82, 231], [74, 165], [74, 228], [70, 210], [73, 250], [73, 191], [74, 190], [77, 200], [72, 215], [76, 254], [73, 232], [73, 180], [72, 215], [74, 220], [74, 180], [71, 200], [72, 170], [75, 195], [74, 210], [74, 200], [77, 220], [70, 165], [71, 180], [73, 200], [76, 200], [71, 170], [75, 224], [74, 220], [72, 180], [76, 198], [79, 240], [76, 239], [73, 185], [76, 210], [78, 220], [75, 200], [76, 195], [72, 220], [72, 230], [73, 170], [73, 220], [75, 230], [71, 165], [76, 205], [70, 192], [75, 210], [74, 205], [75, 200], [73, 210], [71, 185], [71, 195], [72, 202], [73, 205], [73, 195], [72, 180], [69, 200], [73, 185], [78, 240], [71, 185], [73, 220], [75, 205], [76, 205], [70, 180], [74, 201], [77, 190], [75, 208], [79, 240], [72, 180], [77, 230], [73, 195], [75, 215], [75, 190], [75, 195], [73, 215], [73, 215], [76, 220], [77, 220], [75, 230], [70, 195], [71, 190], [71, 195], [75, 209], [74, 204], [69, 170], [70, 185], [75, 205], [72, 175], [75, 210], [73, 190], [72, 180], [72, 180], [72, 160], [76, 235], [75, 200], [74, 210], [69, 180], [73, 190], [72, 197], [72, 203], [75, 205], [77, 170], [76, 200], [80, 250], [77, 200], [76, 220], [79, 200], [71, 190], [75, 170], [73, 190], [76, 220], [77, 215], [73, 206], [76, 215], [70, 185], [75, 235], [73, 188], [75, 230], [70, 195], [69, 168], [71, 190], [72, 160], [72, 200], [73, 200], [70, 189], [70, 180], [73, 190], [76, 200], [75, 220], [72, 187], [73, 240], [79, 190], [71, 180], [72, 185], [74, 210], [74, 220], [74, 219], [72, 190], [76, 193], [76, 175], [72, 180], [72, 215], [71, 210], [72, 200], [72, 190], [70, 185], [77, 220], [74, 170], [72, 195], [76, 205], [71, 195], [76, 210], [71, 190], [73, 190], [70, 180], [73, 220], [73, 190], [72, 186], [71, 185], [71, 190], [71, 180], [72, 190], [72, 170], [74, 210], [74, 240], [74, 220], [71, 180], [72, 210], [75, 210], [72, 195], [71, 160], [72, 180], [72, 205], [72, 200], [72, 185], [74, 245], [74, 190], [77, 210], [75, 200], [73, 200], [75, 222], [73, 215], [76, 240], [72, 170], [77, 220], [75, 156], [72, 190], [71, 202], [71, 221], [75, 200], [72, 190], [73, 210], [73, 190], [71, 200], [70, 165], [75, 190], [71, 185], [76, 230], [73, 208], [68, 209], [71, 175], [72, 180], [74, 200], [77, 205], [72, 200], [76, 250], [78, 210], [81, 230], [72, 244], [73, 202], [76, 240], [72, 200], [72, 215], [74, 177], [76, 210], [73, 170], [76, 215], [75, 217], [70, 198], [71, 200], [74, 220], [72, 170], [73, 200], [76, 230], [76, 231], [73, 183], [71, 192], [68, 167], [71, 190], [71, 180], [74, 180], [77, 215], [69, 160], [72, 205], [76, 223], [75, 175], [76, 170], [75, 190], [76, 240], [72, 175], [74, 230], [76, 223], [74, 196], [72, 167], [75, 195], [78, 190], [77, 250], [70, 190], [72, 190], [79, 190], [74, 170], [71, 160], [68, 150], [77, 225], [75, 220], [71, 209], [72, 210], [70, 176], [72, 260], [72, 195], [73, 190], [72, 184], [74, 180], [72, 195], [72, 195], [75, 219], [72, 225], [73, 212], [74, 202], [72, 185], [78, 200], [75, 209], [72, 200], [74, 195], [75, 228], [75, 210], [76, 190], [74, 212], [74, 190], [73, 218], [74, 220], [71, 190], [74, 235], [75, 210], [76, 200], [74, 188], [76, 210], [76, 235], [73, 188], [75, 215], [75, 216], [74, 220], [68, 180], [72, 185], [75, 200], [71, 210], [70, 220], [72, 185], [73, 231], [72, 210], [75, 195], [74, 200], [70, 205], [76, 200], [71, 190], [82, 250], [72, 185], [73, 180], [74, 170], [71, 180], [75, 208], [77, 235], [72, 215], [74, 244], [72, 220], [73, 185], [78, 230], [77, 190], [73, 200], [73, 180], [73, 190], [73, 196],
[73, 180], [76, 230], [75, 224], [70, 160], [73, 178], [72, 205], [73, 185], [75, 210], [74, 180], [73, 190], [73, 200], [76, 257], [73, 190], [75, 220], [70, 165], [77, 205], [72, 200], [77, 208], [74, 185], [75, 215], [75, 170], [75, 235], [75, 210], [72, 170],
[74, 180], [71, 170], [76, 190], [71, 150], [75, 230], [76, 203], [83, 260], [75, 246], [74, 186], [76, 210],
[72, 198], [72, 210], [75, 215], [75, 180], [72, 200], [77, 245], [73, 200], [72, 192], [70, 192], [74, 200], [72, 192],
[74, 205], [72, 190], [71, 186], [70, 170], [71, 197], [76, 219], [74, 200], [76, 220], [74, 207], [74, 225], [74, 207],
[75, 212], [75, 225], [71, 170], [71, 190], [74, 210], [77, 230], [71, 210], [74, 200], [75, 238], [77, 234], [76, 222],
[74, 200], [76, 190], [72, 170], [71, 220], [72, 223], [75, 210], [73, 215], [68, 196], [72, 175], [69, 175], [73, 189],
[73, 205], [75, 210], [70, 180], [70, 180], [74, 197], [75, 220], [74, 228], [74, 190], [73, 204], [74, 165], [75, 216],
[77, 220], [73, 208], [74, 210], [76, 215], [74, 195], [75, 200], [73, 215], [76, 229], [78, 240], [75, 207], [73, 205],
[77, 208], [74, 185], [72, 190], [74, 170], [72, 208], [71, 225], [73, 190], [75, 225], [73, 185], [67, 180], [67, 165],
[76, 240], [74, 220], [73, 212], [70, 163], [75, 215], [70, 175], [72, 205], [77, 210], [79, 205], [78, 208], [74, 215],
[75, 180], [75, 200], [78, 230], [76, 211], [75, 230], [69, 190], [75, 220], [72, 180], [75, 205], [73, 190], [74, 180],
[75, 205], [75, 190], [73, 195]]
weight_np=np.array(weight)
#print(type(weight_np))
#print(weight_np)
light=weight_np < 21
lowweight=weight_np[light]
print(lowweight)
np_baseball=np.array(baseball)
print(np_baseball.shape)
#Basic Operations on numpy arrays
#
#Statistical Operations on numpy arrays
#
# np_baseball is available
# Print mean height (first column)
avg = np.mean(np_baseball[:,0])
print("Average: " + str(avg))
# Print median height. Replace 'None'
med = np.median(np_baseball[:,0])
print("Median: " + str(med))
# Print out the standard deviation on height. Replace 'None'
stddev = np.std(np_baseball[:,0])
print("Standard Deviation: " + str(stddev))
# Print out correlation between first and second column. Replace 'None'
corr = np.corrcoef(np_baseball[:,0],np_baseball[:,1])
print("Correlation: " + str(corr)) | 177.849315 | 4,931 | 0.484942 |
679f8e5b12103c54dd655de826901d7a4752b208 | 11,818 | py | Python | sysinv/sysinv/sysinv/sysinv/puppet/nfv.py | MarioCarrilloA/config | 06a6f142d154970ce658e979822cd84ce447f612 | [
"Apache-2.0"
] | null | null | null | sysinv/sysinv/sysinv/sysinv/puppet/nfv.py | MarioCarrilloA/config | 06a6f142d154970ce658e979822cd84ce447f612 | [
"Apache-2.0"
] | null | null | null | sysinv/sysinv/sysinv/sysinv/puppet/nfv.py | MarioCarrilloA/config | 06a6f142d154970ce658e979822cd84ce447f612 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2017-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.common import constants
from sysinv.common import utils
from sysinv.helm import helm
from sysinv.puppet import openstack
| 42.358423 | 76 | 0.582924 |
679fc8ee35fed0b83bbf337e8c352e97186a807c | 1,151 | py | Python | qualif16/timeline.py | valenca/hashcode16 | ac47b6f480a9c2ce78446aa3510178cc32f26ea5 | [
"WTFPL"
] | 1 | 2016-02-08T17:23:18.000Z | 2016-02-08T17:23:18.000Z | qualif16/timeline.py | valenca/hashcode16 | ac47b6f480a9c2ce78446aa3510178cc32f26ea5 | [
"WTFPL"
] | null | null | null | qualif16/timeline.py | valenca/hashcode16 | ac47b6f480a9c2ce78446aa3510178cc32f26ea5 | [
"WTFPL"
] | null | null | null | from data import *
from heapq import *
if __name__ == '__main__':
q=Timeline()
d = Drone(0,0,100)
q.addEvent(Event(d,0,"load"))
q.addEvent(Event(d,0,"load"))
q.addEvent(Event(d,0,"load"))
q.addEvent(Event(d,1,"load"))
q.addEvent(Event(d,1,"load"))
q.addEvent(Event(d,2,"load"))
q.addEvent(Event(d,2,"load"))
while not q.isEmpty():
print q.nextEvents()
print ""
| 19.508475 | 89 | 0.652476 |
67a10756dbb9e4be6d237dca1eb33c024676daf2 | 5,394 | py | Python | cfgov/v1/util/migrations.py | hkeeler/cfgov-refresh | 33977186a8e9cb972e63cc22baa357d381316aec | [
"CC0-1.0"
] | null | null | null | cfgov/v1/util/migrations.py | hkeeler/cfgov-refresh | 33977186a8e9cb972e63cc22baa357d381316aec | [
"CC0-1.0"
] | null | null | null | cfgov/v1/util/migrations.py | hkeeler/cfgov-refresh | 33977186a8e9cb972e63cc22baa357d381316aec | [
"CC0-1.0"
] | null | null | null | import json
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from treebeard.mp_tree import MP_Node
try:
from wagtail.core.blocks import StreamValue
except ImportError: # pragma: no cover; fallback for Wagtail < 2.0
from wagtail.wagtailcore.blocks import StreamValue
def is_page(page_or_revision):
""" Return True if the page_or_revision is a Page object """
return not hasattr(page_or_revision, 'content_json')
def get_stream_data(page_or_revision, field_name):
""" Get the stream field data for a given field name on a page or a
revision """
if is_page(page_or_revision):
field = getattr(page_or_revision, field_name)
return field.stream_data
else:
revision_content = json.loads(page_or_revision.content_json)
field = revision_content.get(field_name, "[]")
return json.loads(field)
def set_stream_data(page_or_revision, field_name, stream_data, commit=True):
""" Set the stream field data for a given field name on a page or a
revision. If commit is True (default) save() is called on the
page_or_revision object. """
if is_page(page_or_revision):
field = getattr(page_or_revision, field_name)
stream_block = field.stream_block
stream_value = StreamValue(stream_block, stream_data, is_lazy=True)
setattr(page_or_revision, field_name, stream_value)
else:
revision_content = json.loads(page_or_revision.content_json)
revision_content[field_name] = json.dumps(stream_data)
page_or_revision.content_json = json.dumps(revision_content)
if commit:
page_or_revision.save()
def migrate_stream_data(page_or_revision, block_path, stream_data, mapper):
""" Recursively run the mapper on fields of block_type in stream_data """
migrated = False
if isinstance(block_path, str):
block_path = [block_path, ]
if len(block_path) == 0:
return stream_data, False
# Separate out the current block name from its child paths
block_name = block_path[0]
child_block_path = block_path[1:]
for field in stream_data:
if field['type'] == block_name:
if len(child_block_path) == 0:
value = mapper(page_or_revision, field['value'])
field_migrated = True
else:
value, field_migrated = migrate_stream_data(
page_or_revision, child_block_path, field['value'], mapper
)
if field_migrated:
field['value'] = value
migrated = True
return stream_data, migrated
def migrate_stream_field(page_or_revision, field_name, block_path, mapper):
""" Run mapper on blocks within a StreamField on a page or revision. """
stream_data = get_stream_data(page_or_revision, field_name)
stream_data, migrated = migrate_stream_data(
page_or_revision, block_path, stream_data, mapper
)
if migrated:
set_stream_data(page_or_revision, field_name, stream_data)
| 32.299401 | 78 | 0.678532 |
67a10b0fb92da7a2ec247253549979648e850cef | 8,436 | py | Python | source/codes.py | Very1Fake/monitor | bb47352cffebd8b99bafac0a342324b042b3d826 | [
"Apache-2.0",
"MIT"
] | null | null | null | source/codes.py | Very1Fake/monitor | bb47352cffebd8b99bafac0a342324b042b3d826 | [
"Apache-2.0",
"MIT"
] | null | null | null | source/codes.py | Very1Fake/monitor | bb47352cffebd8b99bafac0a342324b042b3d826 | [
"Apache-2.0",
"MIT"
] | null | null | null | from typing import Dict
_codes: Dict[int, str] = {
# Debug (1xxxx)
# System (100xx)
10000: 'Test debug',
# Pipe (103xx)
10301: 'Reindexing parser',
# Resolver (109xx)
10901: 'Executing catalog',
10902: 'Executing target',
10903: 'Catalog executed',
10904: 'Target executed',
# SubProvider (113xx)
11301: 'Common exception while sending request',
# Information (2xxxx)
# System (200xx)
20000: 'Test information',
20001: 'Thread started',
20002: 'Thread paused',
20003: 'Thread resumed',
20004: 'Thread closing',
20005: 'Thread closed',
# Core (201xx)
20101: 'Production mode enabled',
20102: 'Signal Interrupt',
20103: 'Turning off',
20104: 'Saving success hashes started',
20105: 'Saving success hashes complete',
20106: 'Offline',
# ThreadManager (202xx)
20201: 'Pipe initialized',
20202: 'Pipe started',
20203: 'Worker initialized',
20204: 'Worker started',
20205: 'CatalogWorker initialized',
20206: 'CatalogWorker started',
# Pipe (203xx)
20301: 'Reindexing parsers started',
20302: 'Reindexing parsers complete',
20303: 'Parser reindexing complete',
# ScriptManager (205xx)
20501: 'Script loaded',
20502: 'Script unloaded',
20503: 'Script reloaded',
20504: 'Loading all indexed scripts',
20505: 'Loading all indexed scripts complete',
20506: 'Unloading all scripts',
20507: 'Unloading all scripts complete',
20508: 'Reloading all scripts',
20509: 'Reloading all scripts complete',
# ScriptIndex (206xx)
20601: 'Config loaded',
20602: 'Config dumped',
20603: 'Config does not loaded (must be dict)',
20604: 'Skipping script (config not detected)',
20605: 'Skipping script (bad config)',
20606: 'Skipping script (script incompatible with core)',
20607: 'Skipping script (script in blacklist)',
20608: 'Skipping script (script with this name is already indexed)',
20609: 'N script(s) indexed',
20610: 'Skipping config (script not in whitelist)',
# EventHandler (207xx)
20701: 'Starting loop',
20702: 'Loop started',
20703: 'Stopping loop',
20704: 'Loop stopped',
# Logger (208xx)
20801: 'Log level changed',
20802: 'Log mode changed',
20803: 'Time changed to UTC',
20804: 'Time changed to local',
# Resolver (209xx)
20901: 'Successful target execution',
20902: 'Catalog updated',
# Commands (211xx)
21101: 'Command executing',
21102: 'Command executed',
21103: 'Command execute',
# Provider (212xx)
21201: 'Proxies dumped',
21202: 'Checking proxy',
21203: 'Checking proxy (OK)',
# Keywords (215xx)
21501: 'Dumping keywords(started)',
21502: 'Dumping keywords(complete)',
21503: 'Clearing keywords(started)',
21504: 'Clearing keywords(complete)',
21505: 'Syncing keywords(started)',
21506: 'Syncing keywords(complete)',
21507: 'Loading keywords(started)',
21508: 'Loading keywords(complete)',
# Warning (3xxxx)
# System (300xx)
30000: 'Test warning',
# ThreadManager (302xx)
30201: 'Pipe was stopped',
30202: 'Worker was stopped',
30203: 'CatalogWorker was stopped',
30204: 'Lock forced released',
# Pipe (303xx)
30301: 'Parser reindexing failed',
30302: 'Catalog lost while sending (queue full)',
30303: 'Target lost while sending (queue full)',
# ScriptManager (305xx)
30501: 'Module not loaded',
30502: 'Nothing to import in script',
30503: 'Script cannot be unloaded (_unload)',
30504: 'Script cannot be unloaded (_reload)',
30505: 'Script not indexed but still loaded',
30506: 'Script already loaded',
30507: 'Max errors for script reached, unloading',
# EventHandler (307xx)
30701: 'Loop already started',
30702: 'Loop already stopped',
# Logger (308xx)
30801: 'Meaningless level change (changing to the same value)',
30802: 'Meaningless mode change (changing to the same value)',
30803: 'Meaningless time change (changing to the same value)',
# Resolver (309xx)
30901: 'Catalog lost while retrieving (script not loaded)',
30902: 'Catalog lost while retrieving (script has no Parser)',
30903: 'Target lost while retrieving (script not loaded)',
30904: 'Target lost while retrieving (script has no Parser)',
30905: 'Catalog lost while executing (script unloaded)',
30906: 'Catalog lost while executing (script has no parser)',
30907: 'Catalog lost while executing (bad result)',
30908: 'Target lost while executing (script unloaded)',
30909: 'Target lost while executing (script has no parser)',
30910: 'Target lost while executing (bad result)',
30911: 'Smart catalog expired',
30912: 'Smart target expired',
# Provider (312xx)
31201: 'Proxy added',
31202: 'Proxy removed',
31203: 'Proxies list changed',
31204: 'Proxies statistics reset',
31205: 'Proxies list cleared',
# Keywords (315xx)
31501: 'Keywords file not found',
31511: 'Absolute keyword not loaded (TypeError)',
31512: 'Absolute keyword not loaded (UniquenessError)',
31521: 'Positive keyword not loaded (TypeError)',
31522: 'Positive keyword not loaded (UniquenessError)',
31531: 'Negative keyword not loaded (TypeError)',
31532: 'Negative keyword not loaded (UniquenessError)',
# Error (4xxxx)
# System (400xx)
40000: 'Unknown error',
# ThreadManager (402xx)
40201: 'Pipe was unexpectedly stopped',
40202: 'Worker was unexpectedly stopped',
40203: 'CatalogWorker was unexpectedly stopped',
# Pipe (403xx)
40301: 'Wrong catalog received from script',
# Worker (404xx)
40401: 'Unknown status received while executing',
40402: 'Parser execution failed',
40403: 'Target lost in pipeline (script unloaded)',
# ScriptsManager (405xx)
40501: 'Can\'t load script (ImportError)',
40502: 'Can\'t load script (script not indexed)',
40503: 'Can\'t unload script (script isn\'t loaded)',
40504: 'Can\'t reload script (script isn\'t loaded)',
40505: 'Script cannot be reloaded (folder not found)',
40506: 'Script cannot be reloaded (script not in index)',
# EventHandler (407xx)
40701: 'Event execution failed',
# Logger (408xx)
40801: 'Can\'t change level (possible values (0, 1, 2, 3, 4, 5))',
40802: 'Can\'t change mode (possible values (0, 1, 2, 3))',
# Resolver (409xx)
40901: 'Unknown index type (while inserting)',
40902: 'Unknown target type (while inserting)',
40903: 'Catalog execution failed',
40904: 'Target execution failed',
# Provider (412xx)
41201: 'Bad proxy',
41202: 'Checking proxy (FAILED)',
# SubProvider (413xx)
41301: 'Severe exception while sending request',
# Keywords (415xx)
41501: 'Loading keywords (Failed)',
# Fatal (5xxxx)
# System (500xx)
50000: 'Test fatal',
# Core (501xx)
50101: 'ThreadManager unexpectedly has turned off',
# ThreadManager (502xx)
50201: 'Exception raised, emergency stop initiated',
# Pipe (503xx)
50301: 'Unexpectedly has turned off',
# Worker (504xx)
50401: 'Unexpectedly has turned off',
# CatalogWorker (510xx)
51001: 'Unexpectedly has turned off',
# RemoteThread (514xx)
51401: 'Unknown fatal error'
}
| 30.345324 | 95 | 0.641062 |
67a1409839afbcce2cc6a08bb9dc1126a5b4df90 | 937 | py | Python | Stack.py | jdegene/ArcGIS-scripts | 8821adc32b89525039591db83c762083a4ef750f | [
"MIT"
] | null | null | null | Stack.py | jdegene/ArcGIS-scripts | 8821adc32b89525039591db83c762083a4ef750f | [
"MIT"
] | null | null | null | Stack.py | jdegene/ArcGIS-scripts | 8821adc32b89525039591db83c762083a4ef750f | [
"MIT"
] | null | null | null | # Erstellt aus vielen TIFF Datei eine stacked Datei mit dem ArcGIS
# Tool composite bands
import arcpy
import os
arcpy.env.overwriteOutput = True # Ueberschreiben fuer ArcGIS aktivieren
arcpy.env.pyramid = "NONE" # Verhindert dass Pyramiden berechnet werden
arcpy.env.rasterStatistics = "NONE" # Verhindert dass Statistiken berechnet werden
inFol = "D:/Test/NDVI_tif/"
outFol = "D:/Test/NDVI_file/"
month = ("jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec")
half = ("a", "b")
datList = ""
for i in range(1981,2013):
iStr = str(i)[2:4]
for ii in month:
for iii in half:
datName = inFol + "geo" + iStr + ii + "15" + iii + ".tif"
datList = datList + ";" + datName
datList = datList [1:] #Da sonst datList mit einem ; beginnt
arcpy.CompositeBands_management(datList, outFol + "NDVIstack.tif")
#compRas.save(outFol + "NDVIstack.tif")
| 28.393939 | 92 | 0.640342 |
67a2a922aab66937ea10eabfea17b426aac61814 | 2,106 | py | Python | tests/test_frozenordereddict.py | tirkarthi/frozenordereddict | 8837a7e2b55cf8531793b0ec5ad40d56c500ec0f | [
"MIT"
] | 2 | 2016-01-14T18:03:42.000Z | 2020-11-03T22:13:03.000Z | tests/test_frozenordereddict.py | tirkarthi/frozenordereddict | 8837a7e2b55cf8531793b0ec5ad40d56c500ec0f | [
"MIT"
] | 4 | 2017-10-24T06:03:24.000Z | 2020-11-03T22:23:06.000Z | tests/test_frozenordereddict.py | tirkarthi/frozenordereddict | 8837a7e2b55cf8531793b0ec5ad40d56c500ec0f | [
"MIT"
] | 6 | 2015-12-02T11:34:33.000Z | 2021-11-04T04:31:11.000Z | from collections import OrderedDict
from unittest import TestCase
from frozenordereddict import FrozenOrderedDict
| 27.350649 | 85 | 0.625831 |
67a4b479d6f75f2f17d3b85691a149733addfde8 | 7,560 | py | Python | tests/test_data_gateway/test_dummy_serial.py | aerosense-ai/data-gateway | 019b8e4a114e16d363a3167171a457cefdbf004f | [
"Apache-2.0"
] | null | null | null | tests/test_data_gateway/test_dummy_serial.py | aerosense-ai/data-gateway | 019b8e4a114e16d363a3167171a457cefdbf004f | [
"Apache-2.0"
] | 34 | 2021-12-20T14:51:57.000Z | 2022-03-30T16:47:04.000Z | tests/test_data_gateway/test_dummy_serial.py | aerosense-ai/data-gateway | 019b8e4a114e16d363a3167171a457cefdbf004f | [
"Apache-2.0"
] | null | null | null | import random
import unittest
from serial.serialutil import SerialException
from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string
from tests.base import BaseTestCase
if __name__ == "__main__":
unittest.main()
| 41.538462 | 119 | 0.694577 |
67a4dc5dd5440ed57b743f18f84e2d218d7c1ec4 | 5,216 | py | Python | site/flask/lib/python2.7/site-packages/speaklater.py | theholyhades1/tartanHacks2015 | a801b473f21cfbd136e2a5a74423e8c72d14f900 | [
"MIT"
] | 32 | 2015-01-19T12:13:26.000Z | 2021-11-11T00:11:22.000Z | site/flask/lib/python2.7/site-packages/speaklater.py | theholyhades1/tartanHacks2015 | a801b473f21cfbd136e2a5a74423e8c72d14f900 | [
"MIT"
] | 10 | 2020-06-05T19:42:26.000Z | 2022-03-11T23:38:35.000Z | site/flask/lib/python2.7/site-packages/speaklater.py | theholyhades1/tartanHacks2015 | a801b473f21cfbd136e2a5a74423e8c72d14f900 | [
"MIT"
] | 9 | 2015-07-18T01:03:56.000Z | 2019-05-24T09:36:40.000Z | # -*- coding: utf-8 -*-
r"""
speaklater
~~~~~~~~~~
A module that provides lazy strings for translations. Basically you
get an object that appears to be a string but changes the value every
time the value is evaluated based on a callable you provide.
For example you can have a global `lazy_gettext` function that returns
a lazy string with the value of the current set language.
Example:
>>> from speaklater import make_lazy_string
>>> sval = u'Hello World'
>>> string = make_lazy_string(lambda: sval)
This lazy string will evaluate to the value of the `sval` variable.
>>> string
lu'Hello World'
>>> unicode(string)
u'Hello World'
>>> string.upper()
u'HELLO WORLD'
If you change the value, the lazy string will change as well:
>>> sval = u'Hallo Welt'
>>> string.upper()
u'HALLO WELT'
This is especially handy when combined with a thread local and gettext
translations or dicts of translatable strings:
>>> from speaklater import make_lazy_gettext
>>> from threading import local
>>> l = local()
>>> l.translations = {u'Yes': 'Ja'}
>>> lazy_gettext = make_lazy_gettext(lambda: l.translations.get)
>>> yes = lazy_gettext(u'Yes')
>>> print yes
Ja
>>> l.translations[u'Yes'] = u'Si'
>>> print yes
Si
Lazy strings are no real strings so if you pass this sort of string to
a function that performs an instance check, it will fail. In that case
you have to explicitly convert it with `unicode` and/or `string` depending
on what string type the lazy string encapsulates.
To check if a string is lazy, you can use the `is_lazy_string` function:
>>> from speaklater import is_lazy_string
>>> is_lazy_string(u'yes')
False
>>> is_lazy_string(yes)
True
New in version 1.2: It's now also possible to pass keyword arguments to
the callback used with `make_lazy_string`.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
def is_lazy_string(obj):
"""Checks if the given object is a lazy string."""
return isinstance(obj, _LazyString)
def make_lazy_string(__func, *args, **kwargs):
"""Creates a lazy string by invoking func with args."""
return _LazyString(__func, args, kwargs)
def make_lazy_gettext(lookup_func):
"""Creates a lazy gettext function dispatches to a gettext
function as returned by `lookup_func`.
Example:
>>> translations = {u'Yes': u'Ja'}
>>> lazy_gettext = make_lazy_gettext(lambda: translations.get)
>>> x = lazy_gettext(u'Yes')
>>> x
lu'Ja'
>>> translations[u'Yes'] = u'Si'
>>> x
lu'Si'
"""
return lazy_gettext
if __name__ == '__main__':
import doctest
doctest.testmod()
| 25.950249 | 78 | 0.637078 |
67a75973cb787f7c7e91d28c32afde2e4db5408b | 848 | py | Python | test/test_graph.py | mits58/Python-Graph-Library | aa85788ad63e356944d77a4c251ad707562dd9c0 | [
"MIT"
] | null | null | null | test/test_graph.py | mits58/Python-Graph-Library | aa85788ad63e356944d77a4c251ad707562dd9c0 | [
"MIT"
] | null | null | null | test/test_graph.py | mits58/Python-Graph-Library | aa85788ad63e356944d77a4c251ad707562dd9c0 | [
"MIT"
] | null | null | null | import unittest
import numpy as np
from graph import Graph
if __name__ == '__main__':
unittest.main()
| 24.941176 | 78 | 0.483491 |
67a783ee0f0ec9ab1fa4d600a15705146b7bc899 | 260 | py | Python | 09_cumledeki_kelime_sayisi.py | kabatasmirac/We_WantEd_OrnekCozumler | 0f022361659fb78cd3f644910f3611d45df64317 | [
"MIT"
] | 1 | 2020-06-09T13:09:23.000Z | 2020-06-09T13:09:23.000Z | 09_cumledeki_kelime_sayisi.py | kabatasmirac/We_WantEd_OrnekCozumler | 0f022361659fb78cd3f644910f3611d45df64317 | [
"MIT"
] | null | null | null | 09_cumledeki_kelime_sayisi.py | kabatasmirac/We_WantEd_OrnekCozumler | 0f022361659fb78cd3f644910f3611d45df64317 | [
"MIT"
] | null | null | null |
cumle = input("Cumlenizi giriniz : ")
print("Cumlenizdeki kelime sayisi = {}".format(kelime_sayisi(cumle))) | 26 | 69 | 0.615385 |
67a9204ea3bc6abf715d94ea6ccb879d61991881 | 909 | py | Python | pdns-mysql-domain-exp/lib/db.py | kilgoretrout1985/pdns-mysql-domain-exp | 9692971da82d625b242c740d9be8e2130a483249 | [
"MIT"
] | null | null | null | pdns-mysql-domain-exp/lib/db.py | kilgoretrout1985/pdns-mysql-domain-exp | 9692971da82d625b242c740d9be8e2130a483249 | [
"MIT"
] | null | null | null | pdns-mysql-domain-exp/lib/db.py | kilgoretrout1985/pdns-mysql-domain-exp | 9692971da82d625b242c740d9be8e2130a483249 | [
"MIT"
] | null | null | null | import MySQLdb
| 34.961538 | 104 | 0.537954 |
67a972ea6a872e759ef7065f8c8e54aa921e3f54 | 3,370 | py | Python | barni/_result.py | Thrameos/barni | e5ba76f9bb04a15a272b5159b25e6425733102c4 | [
"MIT"
] | 8 | 2020-03-16T23:21:59.000Z | 2021-08-12T12:26:44.000Z | barni/_result.py | johnromo04/barni | 3d758f21a9317b8826019261548339c047923b96 | [
"MIT"
] | 6 | 2020-03-17T16:57:14.000Z | 2020-08-04T17:51:45.000Z | barni/_result.py | johnromo04/barni | 3d758f21a9317b8826019261548339c047923b96 | [
"MIT"
] | 3 | 2020-03-17T00:47:28.000Z | 2020-07-29T18:19:10.000Z | ###############################################################################
# Copyright (c) 2019 Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory
#
# Written by M. Monterial, K. Nelson
# monterial1@llnl.gov
#
# LLNL-CODE-805904
#
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED,INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
###############################################################################
'''
Module for handling the results of identification.
@author monterial1
'''
from typing import List
from collections import UserList
import textwrap
from ._architecture import NuclideResult, Serializable
from ._reader import registerReader
__all__ = ["NuclideResultList"]
def loadNuclideResult(context, element):
""" Loads in a nuclide result
"""
out = NuclideResult(nuclide=None, score=None, prediction=None)
for node in element.childNodes:
# skip all but elements
if node.nodeType != node.ELEMENT_NODE:
continue
if node.tagName == "nuclide":
out.nuclide = str(node.firstChild.nodeValue)
continue
if node.tagName == "score":
out.score = float(node.firstChild.nodeValue)
continue
if node.tagName == "prediction":
out.prediction = int(node.firstChild.nodeValue)
continue
context.raiseElementError(element, node)
return out
def loadNuclideResultList(context, element):
""" Loads a list of nuclide results
"""
out = NuclideResultList()
for node in element.childNodes:
# skip all but elements
if node.nodeType != node.ELEMENT_NODE:
continue
if node.tagName == "NuclideResult":
out.addTemplate(loadNuclideResult(context, node))
continue
context.raiseElementError(element, node)
return out
registerReader("NuclideResult", loadNuclideResult)
registerReader("NuclideResultList", loadNuclideResultList)
| 34.387755 | 81 | 0.67003 |
67a9af0c056744f8b59776cc12a80777352c44e7 | 2,976 | py | Python | work/code/5fold/paddle_model.py | kkoren/2021CCFBDCI-QAmatch-rank5 | 379f89ad43ffcfbd2c15ad6ac4f93e8fa5b27dc3 | [
"Apache-2.0"
] | null | null | null | work/code/5fold/paddle_model.py | kkoren/2021CCFBDCI-QAmatch-rank5 | 379f89ad43ffcfbd2c15ad6ac4f93e8fa5b27dc3 | [
"Apache-2.0"
] | null | null | null | work/code/5fold/paddle_model.py | kkoren/2021CCFBDCI-QAmatch-rank5 | 379f89ad43ffcfbd2c15ad6ac4f93e8fa5b27dc3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on %(date)s
@author: %Christian
"""
"""
#BASE +BN
#dropout0.15
"""
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import paddlenlp as ppnlp
| 30.367347 | 108 | 0.584341 |
67aa649ee72d5a267bbc9cdfc568c9bcaf20b9fc | 21,917 | py | Python | q2_mlab/plotting/app.py | patrickimran/regression-benchmarking | 90a9dd1f4196d76145d17d733dffc13830fd95fa | [
"BSD-3-Clause"
] | null | null | null | q2_mlab/plotting/app.py | patrickimran/regression-benchmarking | 90a9dd1f4196d76145d17d733dffc13830fd95fa | [
"BSD-3-Clause"
] | 29 | 2020-04-22T16:39:02.000Z | 2021-08-02T15:43:11.000Z | q2_mlab/plotting/app.py | patrickimran/regression-benchmarking | 90a9dd1f4196d76145d17d733dffc13830fd95fa | [
"BSD-3-Clause"
] | 4 | 2019-12-30T17:06:04.000Z | 2020-08-14T17:55:31.000Z | from functools import partialmethod
import pandas as pd
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
import sqlite3
import click
import json
import pkg_resources
from itertools import combinations
from q2_mlab.db.schema import RegressionScore
from q2_mlab.plotting.components import (
Mediator,
ComponentMixin,
Plottable,
ButtonComponent,
ScatterComponent,
SegmentComponent,
DataSourceComponent,
SelectComponent,
)
from bokeh.plotting import figure
from bokeh.transform import factor_cmap
from bokeh.models import (
ColumnDataSource,
CheckboxButtonGroup,
TextInput,
Legend,
LegendItem,
)
from bokeh.models.widgets import (
Div,
)
from bokeh.palettes import (
Category20,
Set3,
)
from bokeh.layouts import column, row
from bokeh.server.server import Server
groups = ['parameters_id', 'dataset', 'target', 'level', 'algorithm']
drop_cols = ['artifact_uuid', 'datetime', 'CV_IDX', 'id']
target_map = {
'age_v2': 'age',
'BL_AGE': 'age',
'age': 'age',
'bmi_v2': 'bmi',
'BMI': 'bmi',
'bmi': 'bmi'
}
with pkg_resources.resource_stream(
__name__, "standard_deviations.json"
) as f:
TARGET_SD = json.load(f)
def _get_standardized_mae(df_row, norm_dict):
"""
"""
mae = df_row['MAE']
target = df_row['target']
dataset = df_row['dataset']
cv_fold = df_row['CV_IDX']
level = df_row['level']
key = f"({dataset}, {target}, {level}, {cv_fold})"
sd = norm_dict.get(key, 1)
standardized_mae = mae / sd
return standardized_mae
def find_segments(group_stats, across, groupby):
"""
TODO makes some assumptions about the guarantees on pairs when there are
more than 2 categories
"""
seg_cols = groupby.copy()
seg_cols.remove(across)
group_counts = group_stats[seg_cols + [across]].groupby(seg_cols).count()
max_n_pairs = group_counts[across].max()
category_values = group_stats[across].unique()
where = (group_counts[across] == max_n_pairs)
keep_repeats = group_stats.set_index(seg_cols).loc[where]
keep_repeats_parts = []
for i, sub_group in enumerate(category_values):
where = keep_repeats[across] == sub_group
keep_repeats_parts.append(keep_repeats.loc[where])
keep_repeats_parts[i].columns = [col + '_' + sub_group for
col in keep_repeats_parts[i].columns]
segment_df = pd.concat(keep_repeats_parts,
axis=1
)
return segment_df
palettes = {
'Category20': Category20,
'Set3': Set3,
}
DEFAULTS = {
'segment_variable': 'dataset',
'x': 'MAE_mean',
'y': 'MAE_var',
'x_axis_type': 'log',
'y_axis_type': 'log',
'cmap': 'Category20'
}
def run_app(db, color_scheme):
# thanks https://github.com/sqlalchemy/sqlalchemy/issues/4863
engine = create_engine("sqlite://", creator=connect)
bkapp = AlgorithmScatter(
DEFAULTS['x'], DEFAULTS['y'],
engine=engine,
cmap=palettes.get(color_scheme),
).plot().app
server = Server({'/': bkapp})
server.start()
server.io_loop.add_callback(server.show, "/")
server.io_loop.start()
| 33.927245 | 79 | 0.578409 |
67add2205d4190930f5b032323a1238d7a058e8c | 6,378 | py | Python | gpn/distributions/base.py | WodkaRHR/Graph-Posterior-Network | 139e7c45c37324c9286e0cca60360a4978b3f411 | [
"MIT"
] | 23 | 2021-11-16T01:31:55.000Z | 2022-03-04T05:49:03.000Z | gpn/distributions/base.py | WodkaRHR/Graph-Posterior-Network | 139e7c45c37324c9286e0cca60360a4978b3f411 | [
"MIT"
] | 1 | 2021-12-17T01:25:16.000Z | 2021-12-20T10:38:30.000Z | gpn/distributions/base.py | WodkaRHR/Graph-Posterior-Network | 139e7c45c37324c9286e0cca60360a4978b3f411 | [
"MIT"
] | 7 | 2021-12-03T11:13:44.000Z | 2022-02-06T03:12:10.000Z | import torch
import torch.distributions as D
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
| 33.21875 | 100 | 0.607087 |
67addac624c1ac8a0bc388113f31ef1180a2d2c5 | 557 | py | Python | demos/python/3_statements.py | denfromufa/mipt-course | ad828f9f3777b68727090bcd69feb0dd91f17465 | [
"BSD-3-Clause"
] | null | null | null | demos/python/3_statements.py | denfromufa/mipt-course | ad828f9f3777b68727090bcd69feb0dd91f17465 | [
"BSD-3-Clause"
] | null | null | null | demos/python/3_statements.py | denfromufa/mipt-course | ad828f9f3777b68727090bcd69feb0dd91f17465 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
condition = 42
# IMPORTANT: colons, _indentation_ are significant!
if condition:
print "Condition is true!"
elif True: # not 'true'!
print "I said it's true! :)"
else:
print "Condition is false :("
# of course, elif/else are optional
assert True == (not False)
# Equivalent of `for (int i = 0; i < 13; i++) {`
for i in range(0, 13):
print i, # "," at the end means "no newline"
print # newline
while True:
if condition == 42:
break
elif condition == 17:
continue
else:
print "?"
| 19.892857 | 51 | 0.601436 |
67ae4667834ab686277782bd3ef57e5f23b602fc | 6,492 | py | Python | dedupe/training.py | BrianSipple/dedupe | d276da675e319d5cc6e7cafd4963deebde0d485d | [
"MIT"
] | 1 | 2015-11-06T01:33:04.000Z | 2015-11-06T01:33:04.000Z | dedupe/training.py | BrianSipple/dedupe | d276da675e319d5cc6e7cafd4963deebde0d485d | [
"MIT"
] | null | null | null | dedupe/training.py | BrianSipple/dedupe | d276da675e319d5cc6e7cafd4963deebde0d485d | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# provides functions for selecting a sample of training data
from itertools import combinations, islice
import blocking
import core
import numpy
import logging
import random
import sys
def findUncertainPairs(field_distances, data_model, bias=0.5):
"""
Given a set of field distances and a data model return the
indices of the record pairs in order of uncertainty. For example,
the first indices corresponds to the record pair where we have the
least certainty whether the pair are duplicates or distinct.
"""
probability = core.scorePairs(field_distances, data_model)
p_max = (1.0 - bias)
logging.info(p_max)
informativity = numpy.copy(probability)
informativity[probability < p_max] /= p_max
informativity[probability >= p_max] = (1 - probability[probability >= p_max])/(1-p_max)
return numpy.argsort(-informativity)
def activeLearning(candidates,
data_model,
labelPairFunction,
training_data,
training_pairs=None):
"""
Ask the user to label the record pair we are most uncertain of. Train the
data model, and update our uncertainty. Repeat until user tells us she is
finished.
"""
fields = [field for field in data_model['fields']
if data_model['fields'][field]['type'] not in ('Missing Data',
'Interaction',
'Higher Categories')]
duplicates = []
nonduplicates = []
if training_pairs:
nonduplicates.extend(training_pairs[0])
duplicates.extend(training_pairs[1])
if training_data.shape[0] == 0 :
rand_int = random.randint(0, len(candidates))
exact_match = candidates[rand_int]
training_data = addTrainingData({1:[exact_match]*2,
0:[]},
data_model,
training_data)
data_model = core.trainModel(training_data, data_model, .1)
finished = False
import time
t_train = time.time()
field_distances = core.fieldDistances(candidates, data_model)
logging.info('calculated fieldDistances in %s seconds',
str(time.time() - t_train))
seen_indices = set()
while finished == False:
logging.info('finding the next uncertain pair ...')
uncertain_indices = findUncertainPairs(field_distances,
data_model,
(len(duplicates)/
(len(nonduplicates)+1.0)))
for uncertain_index in uncertain_indices:
if uncertain_index not in seen_indices:
seen_indices.add(uncertain_index)
break
uncertain_pairs = [candidates[uncertain_index]]
(labeled_pairs, finished) = labelPairFunction(uncertain_pairs, fields)
nonduplicates.extend(labeled_pairs[0])
duplicates.extend(labeled_pairs[1])
training_data = addTrainingData(labeled_pairs, data_model, training_data)
if len(training_data) > 0:
data_model = core.trainModel(training_data, data_model, .1)
else:
raise ValueError('No training pairs given')
training_pairs = {0: nonduplicates, 1: duplicates}
return (training_data, training_pairs, data_model)
def addTrainingData(labeled_pairs, data_model, training_data=[]):
"""
Appends training data to the training data collection.
"""
fields = data_model['fields']
examples = [record_pair for example in labeled_pairs.values()
for record_pair in example]
new_training_data = numpy.empty(len(examples),
dtype=training_data.dtype)
new_training_data['label'] = [0] * len(labeled_pairs[0]) + [1] * len(labeled_pairs[1])
new_training_data['distances'] = core.fieldDistances(examples, data_model)
training_data = numpy.append(training_data, new_training_data)
return training_data
def consoleLabel(uncertain_pairs, fields):
'''Command line interface for presenting and labeling training pairs by the user'''
duplicates = []
nonduplicates = []
finished = False
for record_pair in uncertain_pairs:
label = ''
for pair in record_pair:
for field in fields:
line = "%s : %s\n" % (field, pair[field])
sys.stderr.write(line)
sys.stderr.write('\n')
sys.stderr.write('Do these records refer to the same thing?\n')
valid_response = False
while not valid_response:
sys.stderr.write('(y)es / (n)o / (u)nsure / (f)inished\n')
label = sys.stdin.readline().strip()
if label in ['y', 'n', 'u', 'f']:
valid_response = True
if label == 'y':
duplicates.append(record_pair)
elif label == 'n':
nonduplicates.append(record_pair)
elif label == 'f':
sys.stderr.write('Finished labeling\n')
finished = True
break
elif label != 'u':
sys.stderr.write('Nonvalid response\n')
raise
return ({0: nonduplicates, 1: duplicates}, finished)
| 31.362319 | 91 | 0.590881 |
67aebac6e47b438aae9ad595766760877ca83a55 | 166 | py | Python | Chapter04/listcmp1.py | kaushalkumarshah/Learn-Python-in-7-Days | 2663656767c8959ace836f0c0e272f3e501bbe6e | [
"MIT"
] | 12 | 2018-07-09T16:20:31.000Z | 2022-03-21T22:52:15.000Z | Chapter04/listcmp1.py | kaushalkumarshah/Learn-Python-in-7-Days | 2663656767c8959ace836f0c0e272f3e501bbe6e | [
"MIT"
] | null | null | null | Chapter04/listcmp1.py | kaushalkumarshah/Learn-Python-in-7-Days | 2663656767c8959ace836f0c0e272f3e501bbe6e | [
"MIT"
] | 19 | 2018-01-09T12:49:06.000Z | 2021-11-23T08:05:55.000Z | list1 = [10,9,3,7,2,1,23,1,561,1,1,96,1]
list1.sort(cmp = cmp1)
print list1 | 12.769231 | 41 | 0.5 |
67aefde1df9dfdcb55a1ab80ea64b075758a46e0 | 520 | py | Python | ObitSystem/ObitTalk/test/template.py | sarrvesh/Obit | e4ce6029e9beb2a8c0316ee81ea710b66b2b7986 | [
"Linux-OpenIB"
] | 5 | 2019-08-26T06:53:08.000Z | 2020-10-20T01:08:59.000Z | ObitSystem/ObitTalk/test/template.py | sarrvesh/Obit | e4ce6029e9beb2a8c0316ee81ea710b66b2b7986 | [
"Linux-OpenIB"
] | null | null | null | ObitSystem/ObitTalk/test/template.py | sarrvesh/Obit | e4ce6029e9beb2a8c0316ee81ea710b66b2b7986 | [
"Linux-OpenIB"
] | 8 | 2017-08-29T15:12:32.000Z | 2022-03-31T12:16:08.000Z | from AIPS import AIPS
from AIPSTask import AIPSTask
from AIPSData import AIPSImage
from ObitTask import ObitTask
AIPS.userno = 103
image = AIPSImage('MANDELBROT', 'MANDL', 1, 1)
mandl = AIPSTask('mandl')
mandl.outdata = image
mandl.imsize[1:] = [ 512, 512 ]
mandl.go()
try:
template = ObitTask('Template')
template.DataType = 'AIPS'
template.inName = image.name
template.inClass = image.klass
template.inDisk = image.disk
template.inSeq = image.seq
template.go()
finally:
image.zap()
| 20.8 | 46 | 0.701923 |
67afb6f388c98096e84a0f8aa3dc9e79c6d38f5b | 5,186 | py | Python | src/voxelize.py | Beskamir/BlenderDepthMaps | ba1201effde617078fb35f23d534372de3dd39c3 | [
"MIT"
] | null | null | null | src/voxelize.py | Beskamir/BlenderDepthMaps | ba1201effde617078fb35f23d534372de3dd39c3 | [
"MIT"
] | null | null | null | src/voxelize.py | Beskamir/BlenderDepthMaps | ba1201effde617078fb35f23d534372de3dd39c3 | [
"MIT"
] | null | null | null | import bpy
import bmesh
import numpy
from random import randint
import time
# pointsToVoxels() has been modified from the function generate_blocks() in https://github.com/cagcoach/BlenderPlot/blob/master/blendplot.py
# Some changes to accomodate Blender 2.8's API changes were made,
# and the function has been made much more efficient through creative usage of numpy.
# Given a 3D array of 0 and 1's it'll place a voxel in every cell that has a 1 in it
# place a voxel at a given position, using mesh.primitive_cube_add is really slow so it might be worth making this faster
if __name__ == "__main__":
# calculate the runtime of this script
startTime = time.time()
# createVoxel((1,2,3))
# Generate a 10*10*10 3D texture
testImageArray = []
for x in range(10):
yArray = []
for y in range(10):
zArray = []
for z in range(10):
zArray.append(0)
# zArray.append(randint(0,1))
yArray.append(zArray)
testImageArray.append(yArray)
# print(testImageArray)
# place voxels based on that 10*10*10 array
imagesToVoxelsInefficient(testImageArray)
# testImage = [[[0,0],[1,1]],[[1,1],[1,0]]]
stopTime = time.time()
print("Script took:",stopTime-startTime) | 42.508197 | 140 | 0.636521 |
67b5f86ef31a000c3511435b9060d1043c35b90a | 2,182 | py | Python | storage/lustre_client_iops/lustre_client_iops.py | jssfy/toolpedia | 084d592f7f1de373e6acae5856dfbb8b06b2f7a1 | [
"Apache-2.0"
] | null | null | null | storage/lustre_client_iops/lustre_client_iops.py | jssfy/toolpedia | 084d592f7f1de373e6acae5856dfbb8b06b2f7a1 | [
"Apache-2.0"
] | null | null | null | storage/lustre_client_iops/lustre_client_iops.py | jssfy/toolpedia | 084d592f7f1de373e6acae5856dfbb8b06b2f7a1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#-*-coding:utf-8-*-
import json
import sys
import time
# TBD: auto discovery
# data_path = "/proc/fs/lustre/llite/nvmefs-ffff883f8a4f2800/stats"
data_path = "/proc/fs/lustre/lmv/shnvme3-clilmv-ffff8859d3e2d000/md_stats"
# use a dic1/dic2 to hold sampling data
# put "next - prev" into delta
# print a dictionary in the indented json format
# calculate iops for each category except snapshot_time, all divided by snapshot_time
if __name__ == '__main__':
# dic1/dic2 are used to load prev/next kernel data interchangably
# calc delta by doing: next - prev
# calc iops by doing: delta/time_consumption
dic1 = {}
dic2 = {}
delta = {}
load_data(dic1)
prev = 1
# load_data(dic2)
# calc_delta(dic1, dic2, delta)
# calc_iops_from_delta(delta)
# print_dict(delta)
# dic1['name'] = 'anhua'
# print_dict(dic1)
# enter loop
while True:
time.sleep(2) # TBD: configurable
if prev == 1:
load_data(dic2)
prev = 2
calc_delta(dic1, dic2, delta)
else:
load_data(dic1)
prev = 1
calc_delta(dic2, dic1, delta)
calc_iops_from_delta(delta)
print_dict(delta)
| 26.289157 | 85 | 0.61824 |
67b63f883548e6cabc6d6344eb2af1aa23104352 | 5,716 | py | Python | test/lsh_test.py | titusz/datasketch | a483b39fe4e444c372792e5c91c86d9d8d27a4a5 | [
"MIT"
] | 1 | 2022-03-21T05:36:15.000Z | 2022-03-21T05:36:15.000Z | test/lsh_test.py | tomzhang/datasketch | a483b39fe4e444c372792e5c91c86d9d8d27a4a5 | [
"MIT"
] | null | null | null | test/lsh_test.py | tomzhang/datasketch | a483b39fe4e444c372792e5c91c86d9d8d27a4a5 | [
"MIT"
] | 2 | 2018-11-12T18:00:52.000Z | 2022-03-21T05:36:20.000Z | import unittest
from hashlib import sha1
import pickle
import numpy as np
from datasketch.lsh import MinHashLSH
from datasketch.minhash import MinHash
from datasketch.weighted_minhash import WeightedMinHashGenerator
if __name__ == "__main__":
unittest.main()
| 32.662857 | 64 | 0.561407 |
67b6738fcd0ebe0de56b7b545d7adc583f1c2d45 | 4,134 | py | Python | src/datasets/tsn_dataset.py | tomstark99/epic-kitchens-100-fyrp | cbc9e59569fb6110b900a51def1947b8a3c93699 | [
"Apache-2.0"
] | 2 | 2021-08-31T10:02:56.000Z | 2021-11-24T12:44:19.000Z | src/datasets/tsn_dataset.py | tomstark99/epic-kitchens-100-fyrp | cbc9e59569fb6110b900a51def1947b8a3c93699 | [
"Apache-2.0"
] | null | null | null | src/datasets/tsn_dataset.py | tomstark99/epic-kitchens-100-fyrp | cbc9e59569fb6110b900a51def1947b8a3c93699 | [
"Apache-2.0"
] | null | null | null | import logging
from typing import Callable
from typing import List
import numpy as np
import torch.utils.data
from .video_dataset import VideoDataset
from .video_dataset import VideoRecord
LOG = logging.getLogger(__name__)
# line_profiler injects a "profile" into __builtins__. When not running under
# line_profiler we need to inject our own passthrough
if type(__builtins__) is not dict or "profile" not in __builtins__:
profile = lambda f: f
| 33.33871 | 87 | 0.60716 |
67b70692a042775258dace6d02203639346f7fe2 | 5,947 | py | Python | ce_cli/function.py | maiot-io/cengine | 3a1946c449e8c5e1d216215df6eeab941eb1640a | [
"Apache-2.0"
] | 7 | 2020-10-13T12:47:32.000Z | 2021-03-12T12:00:14.000Z | ce_cli/function.py | maiot-io/cengine | 3a1946c449e8c5e1d216215df6eeab941eb1640a | [
"Apache-2.0"
] | null | null | null | ce_cli/function.py | maiot-io/cengine | 3a1946c449e8c5e1d216215df6eeab941eb1640a | [
"Apache-2.0"
] | 1 | 2021-01-23T02:19:42.000Z | 2021-01-23T02:19:42.000Z | import click
import ce_api
import base64
import os
from ce_cli.cli import cli, pass_info
from ce_cli.utils import check_login_status
from ce_cli.utils import api_client, api_call
from ce_api.models import FunctionCreate, FunctionVersionCreate
from ce_cli.utils import declare, notice
from tabulate import tabulate
from ce_cli.utils import format_uuid, find_closest_uuid
| 35.189349 | 86 | 0.648562 |
67ba0ceb8217748f29955b3f1f48be862f98b8da | 1,747 | py | Python | office-plugin/windows-office/program/wizards/ui/event/RadioDataAware.py | jerrykcode/kkFileView | 6efc3750665c9c4034798fb9fb3e74cd8144165c | [
"Apache-2.0"
] | 6,660 | 2018-01-13T12:16:53.000Z | 2022-03-31T15:15:28.000Z | office-plugin/windows-office/program/wizards/ui/event/RadioDataAware.py | jerrykcode/kkFileView | 6efc3750665c9c4034798fb9fb3e74cd8144165c | [
"Apache-2.0"
] | 208 | 2018-01-26T08:55:12.000Z | 2022-03-29T02:36:34.000Z | office-plugin/windows-office/program/wizards/ui/event/RadioDataAware.py | jerrykcode/kkFileView | 6efc3750665c9c4034798fb9fb3e74cd8144165c | [
"Apache-2.0"
] | 1,933 | 2018-01-15T13:08:40.000Z | 2022-03-31T11:28:59.000Z | #
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
from .CommonListener import ItemListenerProcAdapter
from .DataAware import DataAware
| 35.653061 | 70 | 0.68403 |
67ba1058171fe27c8c016baa860730f05f7fd4ed | 5,416 | py | Python | Allura/allura/lib/patches.py | shalithasuranga/allura | 4f7fba13415954d07f602a051ec697329dd3706b | [
"Apache-2.0"
] | 1 | 2019-03-17T04:16:15.000Z | 2019-03-17T04:16:15.000Z | Allura/allura/lib/patches.py | DalavanCloud/allura | a25329caed9e6d136a1004c33372e0632a16e352 | [
"Apache-2.0"
] | null | null | null | Allura/allura/lib/patches.py | DalavanCloud/allura | a25329caed9e6d136a1004c33372e0632a16e352 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import webob
import tg.decorators
from decorator import decorator
from pylons import request
import mock
import simplejson
from allura.lib import helpers as h
_patched = False
# must be saved outside the newrelic() method so that multiple newrelic()
# calls (e.g. during tests) don't cause the patching to get applied to itself
# over and over
old_controller_call = tg.controllers.DecoratedController._call
| 42.3125 | 94 | 0.675406 |
67bb468d4e8788f36e1783f576c1ab1f1ae90543 | 834 | py | Python | leetcode/binary_search/search_for_a_range.py | phantomnat/python-learning | addc7ba5fc4fb8920cdd2891d4b2e79efd1a524a | [
"MIT"
] | null | null | null | leetcode/binary_search/search_for_a_range.py | phantomnat/python-learning | addc7ba5fc4fb8920cdd2891d4b2e79efd1a524a | [
"MIT"
] | null | null | null | leetcode/binary_search/search_for_a_range.py | phantomnat/python-learning | addc7ba5fc4fb8920cdd2891d4b2e79efd1a524a | [
"MIT"
] | null | null | null | from typing import List
s = Solution()
ans = [
s.searchRange([1],0),
s.searchRange([5,7,7,8,8,8,9,10],8),
s.searchRange([7,7,7,8,10],7),
s.searchRange([7,7,7,8,10,10,10,10],10),
s.searchRange([7,7,7,8,10],10),
s.searchRange([7,7,7,7,8,10],10),
]
for a in ans:
print(a) | 23.828571 | 69 | 0.425659 |
67bbf09857ef02050b6c12ecac3ac6f6bf74d30b | 770 | py | Python | pi/Cart/main.py | polycart/polycart | 2c36921b126df237b109312a16dfb04f2b2ab20f | [
"Apache-2.0"
] | 3 | 2020-01-10T15:54:57.000Z | 2020-03-14T13:04:14.000Z | pi/Cart/main.py | polycart/polycart | 2c36921b126df237b109312a16dfb04f2b2ab20f | [
"Apache-2.0"
] | null | null | null | pi/Cart/main.py | polycart/polycart | 2c36921b126df237b109312a16dfb04f2b2ab20f | [
"Apache-2.0"
] | 1 | 2020-01-29T06:07:39.000Z | 2020-01-29T06:07:39.000Z | #!/usr/bin/python3
import cartinit
from kivy.app import App
from kivy.uix.screenmanager import Screen, ScreenManager, SlideTransition
from kivy.lang import Builder
from buttons import RoundedButton
cartinit.init()
# create ScreenManager as root, put all screens into
sm = ScreenManager()
sm.transition = SlideTransition()
screens = []
# load kv files
Builder.load_file('screens.kv')
if __name__ == '__main__':
app = CartApp()
screens.append(MainScreen())
sm.switch_to(screens[-1])
app.run()
| 18.780488 | 73 | 0.720779 |
67bece9167131625c374de6477b0b045ebb3b193 | 160 | py | Python | docs.bak/test.py | goujou/CompartmentalSystems | 4724555c33f11395ddc32738e8dfed7349ee155f | [
"MIT"
] | null | null | null | docs.bak/test.py | goujou/CompartmentalSystems | 4724555c33f11395ddc32738e8dfed7349ee155f | [
"MIT"
] | null | null | null | docs.bak/test.py | goujou/CompartmentalSystems | 4724555c33f11395ddc32738e8dfed7349ee155f | [
"MIT"
] | null | null | null | from CompartmentalSystems import smooth_reservoir_model
from CompartmentalSystems import smooth_model_run
from CompartmentalSystems import start_distributions
| 32 | 55 | 0.91875 |
67bee977fd10b6b9e05e382910c3fcfaf854728d | 6,482 | py | Python | src/functions_DJTB.py | QTGTech/DJTB-Generator | 96c36516b4bede5fee7a538d79e1e7b380f9d31f | [
"Apache-2.0"
] | null | null | null | src/functions_DJTB.py | QTGTech/DJTB-Generator | 96c36516b4bede5fee7a538d79e1e7b380f9d31f | [
"Apache-2.0"
] | null | null | null | src/functions_DJTB.py | QTGTech/DJTB-Generator | 96c36516b4bede5fee7a538d79e1e7b380f9d31f | [
"Apache-2.0"
] | 1 | 2017-12-08T18:39:01.000Z | 2017-12-08T18:39:01.000Z | import numpy as np
import re
"""
"""
OCC_LIMIT = 10
def load_and_parse(filepath, verbose=True, pad_to_tweets=False, tweet_length=280):
"""
Le nom est plutot equivoque. Charge le fichier txt de chemin 'filepath' et retire les artefacts de parsing
:param filepath: chemin d'acces vers le fichier (.txt contenant le texte brut des tweets)
:param verbose: affiche ou non l'etat d'avancement de l'algorithme
:param pad_to_tweets: permet de forcer les tweets faire 'tweet_length' caracteres
:param tweet_length: longueur des tweets dans le cas pad_to_tweets=True
:return: charset: set contenant les caracteres uniques utilises dans le texte (moins ceux supprimes car trop peu
utilises.
text: string contenant le texte brut nettoye.
"""
if verbose:
print("Starting Data parsing...\n")
# Lecture et caracterisation du corpus
text = open(filepath, 'r').read().lower()
charset = list(set(text))
vocab_size = len(charset)
# Suppression de certains caractres speciaux polluant la comprehension de la machine
re.sub(r"\n", ' ', text)
# Dtection des caractres n'apparaissant pas au moins OCC_LIMIT fois dans le corpus
nb_occ_chars = np.zeros(len(charset))
for i in range(len(charset)):
for j in range(len(text)):
if text[j] == charset[i]:
nb_occ_chars[i] += 1
vocab_occ = dict(zip(charset, nb_occ_chars))
key_blacklist = []
for key in vocab_occ:
if vocab_occ[key] < OCC_LIMIT:
key_blacklist.append(key)
# La suppression des caractres trop peu nombreux dans le corpus prend en compte les caracteres speciaux
# et s'efforce de les rendre lisibles dans les regular expressions en ajoutant un antislash
unreadable_chars = ['|', '.', '*' '^', '$', '+', '?']
for k in key_blacklist:
if k in unreadable_chars:
readable_k = '\\' + k
else:
readable_k = k
text = re.sub(readable_k, '', text)
del vocab_occ[k]
print("Deleted following characters :\n", key_blacklist, "\n(Insufficient occurences in corpus)\n")
# Suppression des 'http://www. ' qui ne menent rien et ajout d'espace avant les liens n'en ayant pas
text = re.sub('([0-9]|[a-z]|:|!)(http://|https://)', '\g<1> \g<2>', text)
text = re.sub('(http://www.|https://www.|http://)\n', '', text)
# Suppression des doubles et triples espaces
text = re.sub(' +', ' ', text)
if pad_to_tweets:
print("Padding tweets...")
iterator = 0
old_iterator = 0
text = text + ''
while text[iterator] != '':
if text[iterator] == '\n' and text[iterator + 1] != '':
padding_string = " " * (tweet_length - (iterator - old_iterator))
text = text[:iterator] + padding_string + text[(iterator+1):]
old_iterator += tweet_length
iterator += len(padding_string)
iterator += 1
return charset, text
def format_data(charset, data, sequence_length, verbose_x=False):
"""
:param sequence_length:
:param charset: set contenant tous les caracteres utilises par le texte
:param data: texte brut pre-nettoye ( l'aide de load_and_parse)
:return: x:
"""
# Dictionnaire liant chaque caractere a un entier et vice-versa(necessaire pour que le reseau les comprenne !)
ix_to_char = {ix: char for ix, char in enumerate(charset)}
char_to_ix = {char: ix for ix, char in enumerate(charset)}
vocab_size = len(charset)
# Creation de matrices de donnees. On va en fait decouper ensuite nos donnees en sequences de caracteres de longueur
# sequence_length. La matrice de donnees en 3 dimensions : une ligne correspond a une sequence, une colonne a un
# caractere dans cette sequence
# Le // evite de placer un float dans un in range. Je doute de la proprete mais jusqu'ici pas de soucis
x = np.zeros((len(data) // sequence_length, sequence_length, vocab_size))
y = np.zeros((len(data) // sequence_length, sequence_length, vocab_size))
# Le gros du boulot. Remplissage de la matrice ligne par ligne.
for i in range(0, len(data) // sequence_length):
x_sequence = data[i * sequence_length:(i + 1) * sequence_length]
if verbose_x:
print(x_sequence)
x_sequence_ix = [char_to_ix[value] for value in x_sequence]
input_sequence = np.zeros((sequence_length, vocab_size))
for j in range(sequence_length):
input_sequence[j][x_sequence_ix[j]] = 1.
x[i] = input_sequence
y_sequence = data[i * sequence_length + 1:(i + 1) * sequence_length + 1]
y_sequence_ix = [char_to_ix[value] for value in y_sequence]
target_sequence = np.zeros((sequence_length, vocab_size))
for j in range(sequence_length) :
target_sequence[j][y_sequence_ix[j]] = 1.
y[i] = target_sequence
return x, y, vocab_size, ix_to_char
# Generation d'un texte utilisant un modele existant
# --------------------------------TESTING------------------------------
if __name__ == "__main__":
chars, txt = load_and_parse("./data/tweets_small_raw.txt", pad_to_tweets=True)
x, y, v_s, tochar = format_data(chars, txt, 280)
| 38.583333 | 120 | 0.611848 |
67bfb2a09270657736e8e4b32cff8a3a6b09b92a | 141 | py | Python | src/tsp_c/__init__.py | kjudom/tsp-c | 2ed4ba83ac14443533e6167edf20a4199e871657 | [
"MIT"
] | null | null | null | src/tsp_c/__init__.py | kjudom/tsp-c | 2ed4ba83ac14443533e6167edf20a4199e871657 | [
"MIT"
] | null | null | null | src/tsp_c/__init__.py | kjudom/tsp-c | 2ed4ba83ac14443533e6167edf20a4199e871657 | [
"MIT"
] | null | null | null | from . import _tsp_c
from .tsp_c import solve_greedy
from .tsp_c import solve_SA
from .tsp_c import set_param_SA
from .tsp_c import solve_PSO | 28.2 | 31 | 0.829787 |
67bff67472f4b5e6324ab64de0cd6d6f2c3905b9 | 4,496 | py | Python | biosimulators_test_suite/results/data_model.py | Ryannjordan/Biosimulators_test_suite | 5f79f157ee8927df277b1967e9409ccfc6baf45f | [
"CC0-1.0",
"MIT"
] | null | null | null | biosimulators_test_suite/results/data_model.py | Ryannjordan/Biosimulators_test_suite | 5f79f157ee8927df277b1967e9409ccfc6baf45f | [
"CC0-1.0",
"MIT"
] | null | null | null | biosimulators_test_suite/results/data_model.py | Ryannjordan/Biosimulators_test_suite | 5f79f157ee8927df277b1967e9409ccfc6baf45f | [
"CC0-1.0",
"MIT"
] | null | null | null | """ Data model for results of test cases
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2021-01-01
:Copyright: 2021, Center for Reproducible Biomedical Modeling
:License: MIT
"""
from .._version import __version__
from ..warnings import TestCaseWarning # noqa: F401
import enum
__all__ = [
'TestCaseResultType',
'TestCaseResult',
'TestResultsReport',
]
| 37.157025 | 119 | 0.61121 |
67c0cd97d0c8bd3cb2723928b3e6589de9cc3b73 | 8,834 | py | Python | Projects/Project1/regan/regression.py | adelezaini/MachineLearning | dc3f34f5d509bed6a993705373c46be4da3f97db | [
"MIT"
] | null | null | null | Projects/Project1/regan/regression.py | adelezaini/MachineLearning | dc3f34f5d509bed6a993705373c46be4da3f97db | [
"MIT"
] | 1 | 2021-10-03T15:16:07.000Z | 2021-10-03T15:16:07.000Z | Projects/Project1/regan/regression.py | adelezaini/MachineLearning | dc3f34f5d509bed6a993705373c46be4da3f97db | [
"MIT"
] | null | null | null | # The MIT License (MIT)
#
# Copyright 2021 Fridtjof Gjengset, Adele Zaini, Gaute Holen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the Software), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import numpy as np
from random import random, seed
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn.preprocessing import StandardScaler
from sklearn.utils import resample
# FrankeFunction: a two-variables function to create the dataset of our vanilla problem
# 3D plot of FrankeFunction
# Create xyz dataset from the FrankeFunction with a added normal distributed noise
# Error analysis: MSE and R2 score
# SVD theorem
# SVD inversion
# Design matrix for two indipendent variables x,y
# Splitting and rescaling data (rescaling is optional)
# Default values: 20% of test data and the scaler is StandardScaler without std.dev.
# OLS equation
# Return the rolling mean of a vector and two values at one sigma from the rolling average
# Plot MSE in function of complexity of the model (rolling mean)
| 38.745614 | 191 | 0.695381 |
67c210c665f75559fb74fd11831d3b0f31fccc08 | 3,521 | py | Python | habittracker/commands/list-habits.py | anjakuchenbecker/oofpp_habits_project | 5db8e46fedc7ce839008bf8a7f00eabfee2ba901 | [
"MIT"
] | 2 | 2021-02-16T16:49:16.000Z | 2021-05-13T13:22:02.000Z | habittracker/commands/list-habits.py | anjakuchenbecker/oofpp_habits_project | 5db8e46fedc7ce839008bf8a7f00eabfee2ba901 | [
"MIT"
] | null | null | null | habittracker/commands/list-habits.py | anjakuchenbecker/oofpp_habits_project | 5db8e46fedc7ce839008bf8a7f00eabfee2ba901 | [
"MIT"
] | null | null | null | import json
import shelve
import sys
import os
import click
from prettytable import PrettyTable
import app_config as conf
import analytics
def get_json_out(raw_text):
"""Convert input raw text and return JSON."""
return json.dumps(raw_text, indent=4, sort_keys=False)
def get_human_out(raw_text):
"""Convert input raw text and return human readable format (table style)."""
human_text = PrettyTable(["id", "name", "description", "periodicity", "created", "checkoffs"])
for item in raw_text:
human_text.add_row([item["id"], item["name"], item["description"], item["periodicity"], item["created"],
"\n".join(item["checkoffs"])])
return human_text
| 44.0125 | 113 | 0.585345 |
67c2e5278bdfc21f2e207b4643b01e0663656b3d | 4,065 | py | Python | src/zhinst/toolkit/helpers/shf_waveform.py | MadSciSoCool/zhinst-toolkit | 5ea884db03f53029552b7898dae310f22ce622ba | [
"MIT"
] | null | null | null | src/zhinst/toolkit/helpers/shf_waveform.py | MadSciSoCool/zhinst-toolkit | 5ea884db03f53029552b7898dae310f22ce622ba | [
"MIT"
] | null | null | null | src/zhinst/toolkit/helpers/shf_waveform.py | MadSciSoCool/zhinst-toolkit | 5ea884db03f53029552b7898dae310f22ce622ba | [
"MIT"
] | null | null | null | # Copyright (C) 2020 Zurich Instruments
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
import numpy as np
def _adjust_scale(self, wave):
"""Adjust the scaling of the waveform.
The data is actually sent as complex values in the range of (-1, 1).
"""
if len(wave) == 0:
wave = np.zeros(1)
n = len(wave)
n = min(n, self.buffer_length)
m = np.max(np.abs(wave))
data = np.zeros(self.buffer_length)
if self._align_start:
if len(wave) > n:
data[:n] = wave[:n] / m if m >= 1 else wave[:n]
else:
data[: len(wave)] = wave / m if m >= 1 else wave
else:
if len(wave) > n:
data[:n] = (
wave[len(wave) - n :] / m if m >= 1 else wave[len(wave) - n :]
)
else:
data[(self.buffer_length - len(wave)) :] = wave / m if m >= 1 else wave
complex_data = data.astype(complex)
return complex_data
def _round_up(self, waveform_length):
"""Adapt to the allowed granularity and minimum length of waveforms.
The length of the waveform is rounded up if it does not match
the waveform granularity and minimum waveform length specifications
of the instrument.
"""
length = max(waveform_length, self._min_length)
multiplier, rest = divmod(length, self._granularity)
if not rest:
return length
else:
return (multiplier + 1) * self._granularity
| 34.74359 | 87 | 0.60861 |
67c3fb858e01fe9489719be010810d56f24cb176 | 3,905 | py | Python | mongoadmin/auth/forms.py | hywhut/django-mongoadmin | 7252f9724e4d556878a907914424745f5fdb0d42 | [
"BSD-3-Clause"
] | null | null | null | mongoadmin/auth/forms.py | hywhut/django-mongoadmin | 7252f9724e4d556878a907914424745f5fdb0d42 | [
"BSD-3-Clause"
] | null | null | null | mongoadmin/auth/forms.py | hywhut/django-mongoadmin | 7252f9724e4d556878a907914424745f5fdb0d42 | [
"BSD-3-Clause"
] | 1 | 2020-05-10T13:57:36.000Z | 2020-05-10T13:57:36.000Z | # from django.utils.translation import ugettext_lazy as _
# from django import forms
# from django.contrib.auth.forms import ReadOnlyPasswordHashField
#
# from mongoengine.django.auth import User
#
# from mongodbforms import DocumentForm
#
# class UserCreationForm(DocumentForm):
# """
# A form that creates a user, with no privileges, from the given username and
# password.
# """
# error_messages = {
# 'duplicate_username': _("A user with that username already exists."),
# 'password_mismatch': _("The two password fields didn't match."),
# }
# username = forms.RegexField(label=_("Username"), max_length=30,
# regex=r'^[\w.@+-]+$',
# help_text=_("Required. 30 characters or fewer. Letters, digits and "
# "@/./+/-/_ only."),
# error_messages={
# 'invalid': _("This value may contain only letters, numbers and "
# "@/./+/-/_ characters.")})
# password1 = forms.CharField(label=_("Password"),
# widget=forms.PasswordInput)
# password2 = forms.CharField(label=_("Password confirmation"),
# widget=forms.PasswordInput,
# help_text=_("Enter the same password as above, for verification."))
#
# class Meta:
# model = User
# fields = ("username",)
#
# def clean_username(self):
# # Since User.username is unique, this check is redundant,
# # but it sets a nicer error message than the ORM. See #13147.
# username = self.cleaned_data["username"]
# try:
# User.objects.get(username=username)
# except User.DoesNotExist:
# return username
# raise forms.ValidationError(
# self.error_messages['duplicate_username'],
# code='duplicate_username',
# )
#
# def clean_password2(self):
# password1 = self.cleaned_data.get("password1")
# password2 = self.cleaned_data.get("password2")
# if password1 and password2 and password1 != password2:
# raise forms.ValidationError(
# self.error_messages['password_mismatch'],
# code='password_mismatch',
# )
# return password2
#
# def save(self, commit=True):
# user = super(UserCreationForm, self).save(commit=False)
# self.instance = user.set_password(self.cleaned_data["password1"])
# return self.instance
#
#
# class UserChangeForm(DocumentForm):
# username = forms.RegexField(
# label=_("Username"), max_length=30, regex=r"^[\w.@+-]+$",
# help_text=_("Required. 30 characters or fewer. Letters, digits and "
# "@/./+/-/_ only."),
# error_messages={
# 'invalid': _("This value may contain only letters, numbers and "
# "@/./+/-/_ characters.")})
# password = ReadOnlyPasswordHashField(label=_("Password"),
# help_text=_("Raw passwords are not stored, so there is no way to see "
# "this user's password, but you can change the password "
# "using <a href=\"password/\">this form</a>."))
#
# class Meta:
# model = User
#
# def __init__(self, *args, **kwargs):
# super(UserChangeForm, self).__init__(*args, **kwargs)
# f = self.fields.get('user_permissions', None)
# if f is not None:
# f.queryset = f.queryset.select_related('content_type')
#
# def clean_password(self):
# # Regardless of what the user provides, return the initial value.
# # This is done here, rather than on the field, because the
# # field does not have access to the initial value
# return self.initial["password"]
#
# def clean_email(self):
# email = self.cleaned_data.get("email")
# if email == '':
# return None
# return email
| 40.257732 | 81 | 0.589245 |
67c4dc33394c474c6cabe97b41d6b2b8fa22728a | 2,554 | py | Python | odin-libraries/python/odin_test.py | gspu/odin | a01d039e809eca257fa78d358fe72eb3ad2a09f2 | [
"MIT"
] | 447 | 2020-05-21T11:22:16.000Z | 2022-03-13T01:28:25.000Z | odin-libraries/python/odin_test.py | gspu/odin | a01d039e809eca257fa78d358fe72eb3ad2a09f2 | [
"MIT"
] | 40 | 2020-05-21T13:17:57.000Z | 2022-03-02T08:44:45.000Z | odin-libraries/python/odin_test.py | gspu/odin | a01d039e809eca257fa78d358fe72eb3ad2a09f2 | [
"MIT"
] | 25 | 2020-05-28T21:23:13.000Z | 2022-03-18T19:31:31.000Z | """ Runs tests for Ptyhon Odin SDK """
import unittest
from os import environ
import random
from pymongo import MongoClient
import pyodin as odin
if __name__ == "__main__":
unittest.main() # run all tests
| 34.513514 | 94 | 0.664056 |
67c4e469d6bfee9cfc7c187e94df576f7ce20488 | 657 | py | Python | artemis/general/test_dict_ops.py | peteroconnor-bc/artemis | ad2871fae7d986bf10580eec27aee5b7315adad5 | [
"BSD-2-Clause-FreeBSD"
] | 235 | 2016-08-26T14:18:51.000Z | 2022-03-13T10:54:39.000Z | artemis/general/test_dict_ops.py | peteroconnor-bc/artemis | ad2871fae7d986bf10580eec27aee5b7315adad5 | [
"BSD-2-Clause-FreeBSD"
] | 112 | 2016-04-30T11:48:38.000Z | 2021-01-12T20:17:32.000Z | artemis/general/test_dict_ops.py | peteroconnor-bc/artemis | ad2871fae7d986bf10580eec27aee5b7315adad5 | [
"BSD-2-Clause-FreeBSD"
] | 31 | 2016-11-05T19:09:19.000Z | 2021-09-13T07:35:40.000Z | from artemis.general.dict_ops import cross_dict_dicts, merge_dicts
__author__ = 'peter'
if __name__ == "__main__":
test_dict_merge()
test_cross_dict_dicts()
| 22.655172 | 98 | 0.427702 |
67c5e84b87b6ce3f11354746686bb279c5332a32 | 1,317 | py | Python | plur/eval/cubert_swapped_operand_classification_eval.py | VHellendoorn/plur | 63ea4b8dd44b43d26177fb23b0572e0b7c20f4cd | [
"Apache-2.0"
] | 52 | 2021-12-03T17:54:27.000Z | 2022-03-30T13:38:16.000Z | plur/eval/cubert_swapped_operand_classification_eval.py | VHellendoorn/plur | 63ea4b8dd44b43d26177fb23b0572e0b7c20f4cd | [
"Apache-2.0"
] | 2 | 2022-02-18T01:04:45.000Z | 2022-03-31T17:20:25.000Z | plur/eval/cubert_swapped_operand_classification_eval.py | VHellendoorn/plur | 63ea4b8dd44b43d26177fb23b0572e0b7c20f4cd | [
"Apache-2.0"
] | 6 | 2021-12-21T06:00:44.000Z | 2022-03-30T21:10:46.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute class and mean-per-class accuracy for CuBERT SO."""
from plur.eval.cubert_classification_eval import CuBertClassificationEval
from plur.stage_1.cubert_swapped_operand_classification_dataset import CuBertSwappedOperandClassificationDataset
| 38.735294 | 112 | 0.742597 |
67c77d71f1fdbcad027edc06ae60ed4f292fc007 | 908 | py | Python | Dynamic Programming/Paint House II.py | ikaushikpal/DS-450-python | 9466f77fb9db9e6a5bb3f20aa89ba6332f49e848 | [
"MIT"
] | 3 | 2021-06-28T12:04:19.000Z | 2021-09-07T07:23:41.000Z | Dynamic Programming/Paint House II.py | ikaushikpal/DS-450-python | 9466f77fb9db9e6a5bb3f20aa89ba6332f49e848 | [
"MIT"
] | null | null | null | Dynamic Programming/Paint House II.py | ikaushikpal/DS-450-python | 9466f77fb9db9e6a5bb3f20aa89ba6332f49e848 | [
"MIT"
] | 1 | 2021-06-28T15:42:55.000Z | 2021-06-28T15:42:55.000Z |
if __name__ == "__main__":
cost = [[1, 5, 7, 2, 1, 4],
[5, 8, 4, 3, 6, 1],
[3, 2, 9, 7, 2, 3],
[1, 2, 4, 9, 1, 7]]
n, k = len(cost), len(cost[0])
print(Solution().paintHouse(cost, n, k)) | 29.290323 | 66 | 0.4163 |
67c9536255b8a2a78151de4a15608734a1f092c8 | 6,445 | py | Python | dufi/gui/balloontip/__init__.py | Shura1oplot/dufi | c9c25524020e57d3670c298acca305900b6490e7 | [
"MIT"
] | null | null | null | dufi/gui/balloontip/__init__.py | Shura1oplot/dufi | c9c25524020e57d3670c298acca305900b6490e7 | [
"MIT"
] | null | null | null | dufi/gui/balloontip/__init__.py | Shura1oplot/dufi | c9c25524020e57d3670c298acca305900b6490e7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import sys
import os
import threading
import warnings
import locale
import logging
import win32api
import win32con
import win32gui
import win32ts
PY2 = sys.version_info < (3,)
if PY2:
reload(sys)
sys.setdefaultencoding(locale.getpreferredencoding() or "utf-8")
NIN_BALLOONSHOW = win32con.WM_USER + 2
NIN_BALLOONHIDE = win32con.WM_USER + 3
NIN_BALLOONTIMEOUT = win32con.WM_USER + 4
NIN_BALLOONUSERCLICK = win32con.WM_USER + 5
WM_TRAY_EVENT = win32con.WM_USER + 20
win32gui.InitCommonControls()
################################################################################
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
_test_async()
| 31.286408 | 84 | 0.611482 |
67caf9eed648abdd18c55cb059b56dcfdeff5272 | 7,893 | py | Python | ProxyIP.py | plumefox/BiliTrend | 449bade3cbaa92878fab866457f513aa81dcd567 | [
"Apache-2.0"
] | 2 | 2019-05-11T18:05:34.000Z | 2022-02-18T13:34:21.000Z | ProxyIP.py | plumefox/BiliTrend | 449bade3cbaa92878fab866457f513aa81dcd567 | [
"Apache-2.0"
] | null | null | null | ProxyIP.py | plumefox/BiliTrend | 449bade3cbaa92878fab866457f513aa81dcd567 | [
"Apache-2.0"
] | null | null | null | # * coding:utf-8 *
# Author : Lucy Cai
# Create Time : 2019/4/12
# IDE : PyCharm
# Copyright(C) 2019 Lucy Cai/plumefox (LucysTime@outlook.com)
# Github:https://github.com/plumefox/BiliTrend/
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/plumefox/BiliTrend/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================
from urllib import request
from lxml import etree
if __name__ == '__main__':
headers = {
'Host': 'www.bilibili.com',
'Referer': 'https://www.bilibili.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 '
'Safari/537.36 Edge/16.16299'
}
url = 'https://www.bilibili.com/ranking/'
a = ProxyIP()
a.readProxyIP()
u = a.getProxyIP(url,headers)
print(u)
print("a")
| 33.444915 | 149 | 0.548714 |
67cc334615da33b43cc91dce1c8d5fcb9a162b36 | 29,914 | py | Python | name_matching/test/test_name_matcher.py | DeNederlandscheBank/name_matching | 366a376596403a1fd912cbf130062016b82306bf | [
"MIT"
] | null | null | null | name_matching/test/test_name_matcher.py | DeNederlandscheBank/name_matching | 366a376596403a1fd912cbf130062016b82306bf | [
"MIT"
] | null | null | null | name_matching/test/test_name_matcher.py | DeNederlandscheBank/name_matching | 366a376596403a1fd912cbf130062016b82306bf | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import os.path as path
import abydos.distance as abd
import abydos.phonetic as abp
import pytest
from scipy.sparse import csc_matrix
from sklearn.feature_extraction.text import TfidfVectorizer
import name_matching.name_matcher as nm
def test_vectorise_data(name_match):
name_match._vectorise_data(transform=False)
assert len(name_match._vec.vocabulary_) > 0
def test_search_for_possible_matches_error(adjusted_name):
name_matcher = nm.NameMatcher()
with pytest.raises(RuntimeError):
name_matcher._search_for_possible_matches(adjusted_name)
def test_do_name_matching_full(name_match, adjusted_name):
result = name_match.match_names(adjusted_name, 'company_name')
assert np.sum(result['match_index'] == result.index) == 1922
def test_do_name_matching_split(name_match, adjusted_name):
name_match._preprocess_split = True
result = name_match.match_names(adjusted_name.iloc[44, :], 'company_name')
assert np.any(result['match_index'] == 44)
def test_do_name_matching_series(name_match, adjusted_name):
result = name_match.match_names(adjusted_name.iloc[44, :], 'company_name')
assert np.any(result['match_index'] == 44)
def test_do_name_matching_error(adjusted_name):
name_match = nm.NameMatcher()
with pytest.raises(ValueError):
name_match.match_names(adjusted_name, 'company_name')
| 55.499072 | 197 | 0.526576 |
67ccd647dc5505b2bf0b3f2efbfadce995daded7 | 645 | py | Python | data/train/python/67ccd647dc5505b2bf0b3f2efbfadce995daded7create_new_default.py | harshp8l/deep-learning-lang-detection | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | [
"MIT"
] | 84 | 2017-10-25T15:49:21.000Z | 2021-11-28T21:25:54.000Z | data/train/python/67ccd647dc5505b2bf0b3f2efbfadce995daded7create_new_default.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 5 | 2018-03-29T11:50:46.000Z | 2021-04-26T13:33:18.000Z | data/train/python/67ccd647dc5505b2bf0b3f2efbfadce995daded7create_new_default.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 24 | 2017-11-22T08:31:00.000Z | 2022-03-27T01:22:31.000Z | '''
Created on Dec 21, 2014
@author: Ben
'''
def create_new_default(directory: str, dest: dict, param: dict):
'''
Creates new default parameter file based on parameter settings
'''
with open(directory, 'w') as new_default:
new_default.write(
'''TARGET DESTINATION = {}
SAVE DESTINATION = {}
SAVE DESTINATION2 = {}
SAVE STARTUP DEST1 = {}
SAVE STARTUP DEST2 = {}
SAVE TYPE DEST1 = {}
SAVE TYPE DEST2 = {}
'''.format(dest['target'], dest['save'], dest['save2'],
param["dest1_save_on_start"], param["dest2_save_on_start"],
param["save_dest1"], param["save_dest2"])
)
| 23.888889 | 70 | 0.612403 |
67cdceeb2a0e37311849079ddc2d4d94bc900a6a | 4,129 | py | Python | analysis/SiPMPE_reader.py | akira-okumura/isee_sipm | dff98c82ed8ef950c450c83ad8951743e3799e94 | [
"MIT"
] | 1 | 2019-07-08T02:43:12.000Z | 2019-07-08T02:43:12.000Z | analysis/SiPMPE_reader.py | akira-okumura/ISEE_SiPM | dff98c82ed8ef950c450c83ad8951743e3799e94 | [
"MIT"
] | null | null | null | analysis/SiPMPE_reader.py | akira-okumura/ISEE_SiPM | dff98c82ed8ef950c450c83ad8951743e3799e94 | [
"MIT"
] | null | null | null | import numpy as np
import math
import ROOT
import sys
# #
# PEd = PEdistr('/Volumes/Untitled/zenin/linearity_465/linearity_465_sipm/hists/3500_4_465')
#
# total = PEd.GetLambda()
# stat_err = PEd.GetStatError()
# sys_err = PEd.GetSysError()
#
# print('total lambda = %f \u00B1 %f stat \u00B1 %f sys'%(total, stat_err, sys_err))
# print('relative uncertainty = %f%% stat + %f%% sys'%(stat_err/total*100, sys_err/total*100))
#
# h = PEd.GetLambdaDistr().Clone()
# print(h.GetBinContent(9))
# h.Draw()
| 34.123967 | 102 | 0.534996 |
67cde7d5e3ff3451bd18f756ff702549907cc3a3 | 2,364 | py | Python | bad_apps_blog/__init__.py | bkesk/bad-apps-blog | 86df1e848cd17f17bce9bb06d6c1ac1f81b23b9e | [
"BSD-3-Clause"
] | null | null | null | bad_apps_blog/__init__.py | bkesk/bad-apps-blog | 86df1e848cd17f17bce9bb06d6c1ac1f81b23b9e | [
"BSD-3-Clause"
] | 1 | 2022-03-31T00:30:57.000Z | 2022-03-31T21:31:17.000Z | bad_apps_blog/__init__.py | bkesk/bad-apps-blog | 86df1e848cd17f17bce9bb06d6c1ac1f81b23b9e | [
"BSD-3-Clause"
] | null | null | null | """
Bad Apps Blog
Author: Brandon Eskridge (a.k.a. 7UR7L3)
(Initial commit is based on the official Flask tutorial)
About: This app began as an (essentially) exact copy
of the official Flask tutorial (linke below). It is
intented as an opportunity to practice application
security, secure design, and secure coding techniques.
At the end of the Flask tutorial, the interested student
is challenged to implement several features. In order to
achive that goal, we will attempt to implement those features
while "pushing left" (security-wise) in the process.
Official Flask tutorial : https://flask.palletsprojects.com/en/2.0.x/tutorial/
"""
import os
import secrets
from flask import Flask
import logging
logging.basicConfig(level=logging.INFO,format='%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s')
| 29.55 | 112 | 0.706853 |
67ce55c048774bb454c705b23d4003d7370d1d13 | 204 | py | Python | status/urls.py | Khryptooo/infra_api | 15b69dea8e0ce1795525f96d9362722151b3c8f7 | [
"BSD-2-Clause"
] | null | null | null | status/urls.py | Khryptooo/infra_api | 15b69dea8e0ce1795525f96d9362722151b3c8f7 | [
"BSD-2-Clause"
] | null | null | null | status/urls.py | Khryptooo/infra_api | 15b69dea8e0ce1795525f96d9362722151b3c8f7 | [
"BSD-2-Clause"
] | null | null | null | from django.conf.urls import patterns, url
from status import views
urlpatterns = patterns('',
url(r'^ups$', views.ups_status, name='ups_status'),
url(r'^tor$', views.tor_status, name='tor_status'),
)
| 25.5 | 52 | 0.720588 |
67ce7c38eacf87bac8bd21b2a7cec718eeabebeb | 9,100 | py | Python | automation/auto_update_image_pr.py | WaqasAhmedLatif/cloud-native-edition | 1e6002f27ea971c153df59373e30d4506e9932dc | [
"Apache-2.0"
] | 23 | 2020-04-18T14:51:41.000Z | 2022-03-31T19:59:40.000Z | automation/auto_update_image_pr.py | WaqasAhmedLatif/cloud-native-edition | 1e6002f27ea971c153df59373e30d4506e9932dc | [
"Apache-2.0"
] | 236 | 2020-04-22T08:59:27.000Z | 2022-03-31T07:21:12.000Z | automation/auto_update_image_pr.py | WaqasAhmedLatif/cloud-native-edition | 1e6002f27ea971c153df59373e30d4506e9932dc | [
"Apache-2.0"
] | 23 | 2020-04-19T15:25:59.000Z | 2022-03-16T17:17:36.000Z | import os
import json
from common import update_json_file, get_logger, exec_cmd
from yamlparser import Parser
from pathlib import Path
logger = get_logger("update-image")
# Functions that work to update gluu_versions.json
def determine_final_official_and_dev_version(tag_list):
"""
Determine official version i.e 4.1.0 , 4.2.2..etc using oxauths repo
@param tag_list:
@return:
"""
# Check for the highest major.minor.patch i.e 4.2.0 vs 4.2.2
dev_image = ""
patch_list = []
for tag in tag_list:
patch_list.append(int(tag[4:5]))
# Remove duplicates
patch_list = list(set(patch_list))
# Sort
patch_list.sort()
highest_major_minor_patch_number = str(patch_list[-1])
versions_list = []
for tag in tag_list:
if "dev" in tag and tag[4:5] == highest_major_minor_patch_number:
dev_image = tag[0:5] + "_dev"
# Exclude any tag with the following
if "dev" not in tag and "a" not in tag and tag[4:5] == highest_major_minor_patch_number:
versions_list.append(int(tag[6:8]))
# A case were only a dev version of a new patch is available then a lower stable patch should be checked.
# i.e there is no 4.3.0_01 but there is 4.2.2_dev
if not versions_list:
highest_major_minor_patch_number = str(int(highest_major_minor_patch_number) - 1)
for tag in tag_list:
if not dev_image and "dev" in tag and tag[4:5] == highest_major_minor_patch_number:
dev_image = tag[0:5] + "_dev"
# Exclude any tag with the following
if "dev" not in tag and "a" not in tag and tag[4:5] == highest_major_minor_patch_number:
versions_list.append(int(tag[6:8]))
# Remove duplicates
versions_list = list(set(versions_list))
# Sort
versions_list.sort()
# Return highest patch
highest_major_minor_patch_image_patch = str(versions_list[-1])
if len(highest_major_minor_patch_image_patch) == 1:
highest_major_minor_patch_image_patch = "0" + highest_major_minor_patch_image_patch
highest_major_minor_patch_image = ""
for tag in tag_list:
if "dev" not in tag and highest_major_minor_patch_image_patch in tag \
and tag[4:5] == highest_major_minor_patch_number:
highest_major_minor_patch_image = tag
return highest_major_minor_patch_image, dev_image
def determine_major_version(all_repos_tags):
"""
Determine official major version i.e 4.1 , 4.2..etc using oxauths repo
@param all_repos_tags:
@return:
"""
versions_list = []
for tag in all_repos_tags["oxauth"]:
# Exclude any tag with the following
if "dev" not in tag \
and "latest" not in tag \
and "secret" not in tag \
and "gluu-engine" not in tag:
versions_list.append(float(tag[0:3]))
# Remove duplicates
versions_list = list(set(versions_list))
# Sort
versions_list.sort()
# Return highest version
return versions_list[-1]
def get_docker_repo_tag(org, repo):
"""
Returns a dictionary of all available tags for a certain repo
:param org:
:param repo:
:return:
"""
logger.info("Getting docker tag for repository {}.".format(repo))
exec_get_repo_tag_curl_command = ["curl", "-s",
"https://hub.docker.com/v2/repositories/{}/{}/tags/?page_size=100".format(org,
repo)]
stdout, stderr, retcode = None, None, None
try:
stdout, stderr, retcode = exec_cmd(" ".join(exec_get_repo_tag_curl_command))
except (IndexError, Exception):
manual_curl_command = " ".join(exec_get_repo_tag_curl_command)
logger.error("Failed to curl\n{}".format(manual_curl_command))
all_tags = json.loads(stdout)["results"]
image_tags = []
for tag in all_tags:
image_tags.append(tag["name"])
image_tags_dict = dict()
image_tags_dict[repo] = image_tags
return image_tags_dict
def filter_all_repo_dictionary_tags(all_repos_tags, major_official_version):
"""
Analyze the dictionary containing all repos and keeps only the list of tags and versions matching the major version
@param all_repos_tags:
@param major_official_version:
"""
filtered_all_repos_tags = dict()
for repo, tag_list in all_repos_tags.items():
temp_filtered_tag_list = []
for tag in tag_list:
if major_official_version == tag[0:3]:
temp_filtered_tag_list.append(tag)
filtered_all_repos_tags[repo] = temp_filtered_tag_list
return filtered_all_repos_tags
def analyze_filtered_dict_return_final_dict(filtered_all_repos_tags, major_official_version):
"""
Analyze filtered dictionary and return the final dict with only one official version and one dev version
@param filtered_all_repos_tags:
@param major_official_version:
"""
final_official_version_dict = dict()
final_dev_version_dict = dict()
# Gluus main values.yaml
gluu_values_file = Path("../pygluu/kubernetes/templates/helm/gluu/values.yaml").resolve()
gluu_values_file_parser = Parser(gluu_values_file, True)
dev_version = ""
for repo, tag_list in filtered_all_repos_tags.items():
official_version, dev_version = determine_final_official_and_dev_version(tag_list)
if repo == "casa":
update_dicts_and_yamls("CASA", repo, tag_list)
elif repo == "oxd-server":
update_dicts_and_yamls("OXD", repo, tag_list)
elif repo == "fido2":
update_dicts_and_yamls("FIDO2", repo, tag_list)
elif repo == "scim":
update_dicts_and_yamls("SCIM", repo, tag_list)
elif repo == "config-init":
update_dicts_and_yamls("CONFIG", repo, tag_list, "config")
elif repo == "cr-rotate":
update_dicts_and_yamls("CACHE_REFRESH_ROTATE", repo, tag_list)
elif repo == "certmanager":
update_dicts_and_yamls("CERT_MANAGER", repo, tag_list, "oxauth-key-rotation")
elif repo == "opendj":
update_dicts_and_yamls("LDAP", repo, tag_list, "opendj")
elif repo == "jackrabbit":
update_dicts_and_yamls("JACKRABBIT", repo, tag_list)
elif repo == "oxauth":
update_dicts_and_yamls("OXAUTH", repo, tag_list)
elif repo == "oxpassport":
update_dicts_and_yamls("OXPASSPORT", repo, tag_list)
elif repo == "oxshibboleth":
update_dicts_and_yamls("OXSHIBBOLETH", repo, tag_list)
elif repo == "oxtrust":
update_dicts_and_yamls("OXTRUST", repo, tag_list)
elif repo == "persistence":
update_dicts_and_yamls("PERSISTENCE", repo, tag_list)
elif repo == "upgrade":
update_dicts_and_yamls("UPGRADE", repo, tag_list)
gluu_versions_dict = {major_official_version: final_official_version_dict,
dev_version: final_dev_version_dict}
gluu_values_file_parser.dump_it()
return gluu_versions_dict
if __name__ == '__main__':
main()
| 42.325581 | 120 | 0.656703 |
67ce95b83726624dc137a006b385290c23c7bf1c | 2,767 | py | Python | es_reporting_tool/generate_report.py | yugendra/elasticsearch_reporting_tool | bdbb5ae95efdc7552d9dfe771ecf44432246d7bb | [
"Apache-2.0"
] | null | null | null | es_reporting_tool/generate_report.py | yugendra/elasticsearch_reporting_tool | bdbb5ae95efdc7552d9dfe771ecf44432246d7bb | [
"Apache-2.0"
] | 4 | 2021-06-01T21:49:24.000Z | 2022-01-13T00:39:06.000Z | es_reporting_tool/generate_report.py | yugendra/elasticsearch_reporting_tool | bdbb5ae95efdc7552d9dfe771ecf44432246d7bb | [
"Apache-2.0"
] | null | null | null | from reportlab.lib import colors
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.units import inch
from reportlab.lib.pagesizes import A3
from reportlab.platypus import Paragraph, SimpleDocTemplate, Table, TableStyle
from reportlab.lib.enums import TA_CENTER
import datetime
| 36.893333 | 118 | 0.550777 |
67cee025d3929b6dcb02f8283d7e7b80eb2a3619 | 2,958 | py | Python | fe/functional.py | proteneer/timemachine | feee9f24adcb533ab9e1c15a3f4fa4dcc9d9a701 | [
"Apache-2.0"
] | 91 | 2019-01-05T17:03:04.000Z | 2022-03-11T09:08:46.000Z | fe/functional.py | proteneer/timemachine | feee9f24adcb533ab9e1c15a3f4fa4dcc9d9a701 | [
"Apache-2.0"
] | 474 | 2019-01-07T14:33:15.000Z | 2022-03-31T19:15:12.000Z | fe/functional.py | proteneer/timemachine | feee9f24adcb533ab9e1c15a3f4fa4dcc9d9a701 | [
"Apache-2.0"
] | 12 | 2019-01-13T00:40:36.000Z | 2022-01-14T10:23:54.000Z | from jax import config
config.update("jax_enable_x64", True)
from jax import custom_jvp, numpy as np
from timemachine.lib.potentials import SummedPotential
def wrap_impl(impl, pack=lambda x: x):
"""Construct a differentiable function U(x, params, box, lam) -> float
from a single unbound potential
"""
U.defjvps(U_jvp_x, U_jvp_params, None, U_jvp_lam)
return U
def construct_differentiable_interface(unbound_potentials, precision=np.float32):
"""Construct a differentiable function U(x, params, box, lam) -> float
from a collection of unbound potentials
>>> U = construct_differentiable_interface(unbound_potentials)
>>> _ = grad(U, (0,1,3))(coords, sys_params, box, lam)
This implementation computes the sum of the component potentials in Python
"""
impls = [ubp.unbound_impl(precision) for ubp in unbound_potentials]
U_s = [wrap_impl(impl) for impl in impls]
return U
def construct_differentiable_interface_fast(unbound_potentials, params, precision=np.float32):
"""Construct a differentiable function U(x, params, box, lam) -> float
from a collection of unbound potentials
>>> U = construct_differentiable_interface(unbound_potentials, params)
>>> _ = grad(U, (0,1,3))(coords, sys_params, box, lam)
This implementation computes the sum of the component potentials in C++ using the SummedPotential custom op
"""
impl = SummedPotential(unbound_potentials, params).unbound_impl(precision)
U = wrap_impl(impl, pack)
return U
| 36.975 | 111 | 0.713658 |
67cf0d02161a3633d1e7bda727c4a5909dae5bbc | 996 | py | Python | utilityfiles/race.py | IronicNinja/covid19api | f96a18c646379fe144db228eaa3c69d66125628d | [
"MIT"
] | 1 | 2020-09-16T05:18:54.000Z | 2020-09-16T05:18:54.000Z | utilityfiles/race.py | IronicNinja/covid19api | f96a18c646379fe144db228eaa3c69d66125628d | [
"MIT"
] | null | null | null | utilityfiles/race.py | IronicNinja/covid19api | f96a18c646379fe144db228eaa3c69d66125628d | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup as soup
from urllib.request import Request, urlopen
from datetime import date
import math
import openpyxl
import pandas as pd
fname = 'https://www.governing.com/gov-data/census/state-minority-population-data-estimates.html'
req = Request(fname, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req)
page_soup = soup(webpage, "html.parser")
containers = page_soup.findAll("table")
container = containers[1]
A = container.findAll("tr")
tmp_list = [[], [], [], [], []]
for x in range(1, 52):
if x == 9:
continue
B = A[x].findAll("td")
for c in range(1, 6):
s = str(B[c])
s1 = s.replace('<td>', '')
s2 = s1.replace('</td>', '')
s3 = s2.replace('%', '')
tmp_list[c-1].append(float(s3))
df = pd.read_excel('states_info.xlsx')
headers_list = ['hispanic', 'white', 'black', 'asian', 'american indian']
for pos in range(5):
df[headers_list[pos]] = tmp_list[pos]
df.to_excel('states_info.xlsx')
| 25.538462 | 97 | 0.63755 |
67d227f164d327f585654ba9c51b22b4d48f67c1 | 7,601 | py | Python | prioListe/utils.py | FelixTheC/allSales | 76d955b80bf9b5bb58bd53d8ee644249cf04e1a3 | [
"Apache-2.0"
] | null | null | null | prioListe/utils.py | FelixTheC/allSales | 76d955b80bf9b5bb58bd53d8ee644249cf04e1a3 | [
"Apache-2.0"
] | null | null | null | prioListe/utils.py | FelixTheC/allSales | 76d955b80bf9b5bb58bd53d8ee644249cf04e1a3 | [
"Apache-2.0"
] | null | null | null | from django.core.exceptions import FieldError
from staff.models import Staff
import re
STAFFCHOICESONE = set()
for staff in Staff.objects.all():
STAFFCHOICESONE.add((staff.initialies, staff.name))
STAFFCHOICESTWO = set()
STAFFCHOICESTWO.add(('', ''))
for staff in Staff.objects.all():
STAFFCHOICESTWO.add((staff.initialies, staff.name))
def check_form_and_db(form, queryset):
"""
get data from(bevor it was saved) and get data from current object
check if there are changes between them
:param form:
:param queryset:
:return: boolean update
"""
update = False
if queryset.box != form.instance.box:
update = True
elif queryset.customer != form.instance.customer:
update = True
elif queryset.hardware != form.instance.hardware:
update = True
elif queryset.created_at != form.instance.created_at:
update = True
elif queryset.status != form.instance.status:
update = False
elif queryset.finished_until != form.instance.finished_until:
update = True
elif queryset.optional_status != form.instance.optional_status:
update = False
elif queryset.finished_until != form.instance.finished_until:
update = True
elif queryset.staff != form.instance.staff:
update = True
elif queryset.time_in_weeks != int(form.instance.time_in_weeks):
update = True
elif queryset.remark != form.instance.remark:
update = True
elif queryset.production_remark != form.instance.production_remark:
update = False
return update
COLORS = {
'Fertig': '#33cc00',
'Test': '#99ff99',
'Bearbeitung': '#ffff00',
'Produktion': '#ffffcc',
'Vertrieb': '#ff99ff',
'Lschen': '#ffffff'
}
def searching(model, search_string, *args, **kwargs):
'''
usage e.g.:
t = searching(ModelName, search_string, 'Foo', 'Bar', **kwargs)
tmp = ModelName.objects.none()
for i in t:
tmp = i | tmp #merge Querysets
:param model: Django Modelobject
:param search_string: self explaning
:param args: datatypes that should be excluded
:param kwargs: can contain exlude or exact as key with a list of values containing the field name/-s
:return: list of querysets gte 1
'''
types = [field.get_internal_type() for field in model._meta.get_fields()]
names = [f.name for f in [field for field in model._meta.get_fields()]]
field_name_dict = dict(zip(names, types))
excat_fields = []
foreignKeyFields = None
special_filter = None
if kwargs:
try:
foreignKeyFields = kwargs['foreignKeyFields']
except KeyError:
pass
try:
special_filter = kwargs['filter']
except KeyError:
pass
try:
field_name_dict = remove_items_dict(field_name_dict, kwargs['exclude'])
except KeyError:
pass
try:
excat_fields = kwargs['exact']
except KeyError:
pass
# to use following e.g. in function call:
# data = {'exclude': liste['foo', ]}
# searching(modelname, searchstring, kwargs=data)
try:
if 'exclude' in kwargs['kwargs']:
field_name_dict = remove_items_dict(field_name_dict, kwargs['kwargs']['exclude'])
elif 'exact' in kwargs:
excat_fields = kwargs['exact']
except KeyError:
pass
if args:
field_name_dict = remove_items_dict(field_name_dict, args)
if special_filter is not None:
tmp = model.objects.filter(**{special_filter[0]: special_filter[1]})
else:
tmp = model.objects.all()
liste = []
for key, value in field_name_dict.items():
if value != 'ForeignKey' and value != 'ManyToManyField':
if key in excat_fields:
filter = f'{key}__iexact'
if len(tmp.filter(**{filter: search_string})) > 0:
liste.append(tmp.filter(**{filter: search_string}))
else:
filter = f'{key}__icontains'
if len(tmp.filter(**{filter: search_string})) > 0:
liste.append(tmp.filter(**{filter: search_string}))
elif value == 'ManyToManyField' and key == 'customer_collar':
filter = f'{key}__serialno__icontains'
if len(tmp.filter(**{filter: search_string})) > 0:
liste.append(tmp.filter(**{filter: search_string}))
else:
filter = f'{key}__pk__iexact'
if len(tmp.filter(**{filter: search_string})) > 0:
liste.append(tmp.filter(**{filter: search_string}))
else:
if foreignKeyFields is not None:
for keyfield in foreignKeyFields:
filter = f'{key}__{keyfield}__icontains'
try:
if len(tmp.filter(**{filter: search_string})) > 0:
liste.append(tmp.filter(**{filter: search_string}))
except FieldError:
pass
else:
filter = f'{key}__name__icontains'
if len(tmp.filter(**{filter: search_string})) > 0:
liste.append(tmp.filter(**{filter: search_string}))
return liste
def remove_items_dict(dictionary, keys):
'''
Remove items from dictonary
:param dictionary:
:param keys:
:return:
'''
return {key: value for key, value in dictionary.items() if key not in keys and value not in keys}
def move_ids_from_remark_to_ids(text):
'''
extract ids from allready existing production_remark to new field ids
:param text:
:return: ids as string seperated by ;
'''
range_ids = re.findall(r'[0-9]*-[0-9]*', text)
tmp_string = '; '.join(range_ids)
tmp = re.sub(r'[0-9]*-[0-9]*', '', text)
id_list = list(filter(lambda x: len(x) > 4, filter(None, re.findall(r'[\d]*', tmp))))
new_string = '; '.join(id_list)
return f'{new_string}; {tmp_string}'
def filter_ids(obj, id):
'''
:param id:
:return:
'''
queryset = obj.objects.all().only('pk', 'ids')
for i in queryset:
if i.ids is not None:
if '-' in i.ids:
x = i.ids.split('; ')
x = list(filter(lambda x: '-' in x, x))
for ids in x:
if int(ids.split('-')[0]) > int(id) or int(id) < int(ids.split('-')[1]):
return i.pk
else:
if id in i.ids:
return i.pk
else:
return None
else:
if id in i.ids:
return i.pk
return None
| 32.482906 | 104 | 0.568215 |
67d23e8a7d069e05acd374ed761b417602e522e5 | 287 | py | Python | app/pydantic_models/phone.py | matiasbavera/fastapi-tortoise-fk-example | b61b202e20604a03bb36291fc534935048f17187 | [
"Apache-2.0"
] | null | null | null | app/pydantic_models/phone.py | matiasbavera/fastapi-tortoise-fk-example | b61b202e20604a03bb36291fc534935048f17187 | [
"Apache-2.0"
] | null | null | null | app/pydantic_models/phone.py | matiasbavera/fastapi-tortoise-fk-example | b61b202e20604a03bb36291fc534935048f17187 | [
"Apache-2.0"
] | null | null | null | from pydantic import BaseModel
from app.orm_models.phone import Phone
from tortoise.contrib.pydantic import pydantic_model_creator
Phone_Pydantic = pydantic_model_creator(Phone, name="Phone")
PhoneIn_Pydantic = pydantic_model_creator(
Phone, name="PhoneIn", exclude_readonly=True)
| 31.888889 | 60 | 0.832753 |
67d27163450c56993ca54027a1f3ba12395df50b | 6,403 | py | Python | suls/mealymachine.py | TCatshoek/lstar | 042b0ae3a0627db7a412c828f3752a9c30928ec1 | [
"MIT"
] | 2 | 2019-10-15T11:28:12.000Z | 2021-01-28T15:14:09.000Z | suls/mealymachine.py | TCatshoek/lstar | 042b0ae3a0627db7a412c828f3752a9c30928ec1 | [
"MIT"
] | null | null | null | suls/mealymachine.py | TCatshoek/lstar | 042b0ae3a0627db7a412c828f3752a9c30928ec1 | [
"MIT"
] | null | null | null |
# Need this to fix types
from __future__ import annotations
import tempfile
import threading
from typing import Union, Iterable, Dict, Tuple
from suls.sul import SUL
from graphviz import Digraph
import random
from itertools import product
# A statemachine can represent a system under learning
| 34.240642 | 116 | 0.562236 |
67d2e3d4874353fb5ea93748eaef79e0a94659bb | 636 | py | Python | app/email.py | DXYyang/shenNeng_gasAnalysis | d94e2451d1938c090d1377dfbd487d0c6a649188 | [
"MIT"
] | 1 | 2020-02-16T04:32:15.000Z | 2020-02-16T04:32:15.000Z | app/email.py | DXYyang/shenNeng_gasAnalysis | d94e2451d1938c090d1377dfbd487d0c6a649188 | [
"MIT"
] | null | null | null | app/email.py | DXYyang/shenNeng_gasAnalysis | d94e2451d1938c090d1377dfbd487d0c6a649188 | [
"MIT"
] | null | null | null | from threading import Thread
from flask import current_app,render_template
from flask_mail import Message
from . import mail | 35.333333 | 72 | 0.72956 |
67d3514f1ace46de9127a9a4a21e892c7ad712e0 | 29,708 | py | Python | MAIN_FIGURES.py | tortugar/Schott_etal_2022 | 5cccec4d59184397df39f0bae3544b9c8294ffe2 | [
"MIT"
] | null | null | null | MAIN_FIGURES.py | tortugar/Schott_etal_2022 | 5cccec4d59184397df39f0bae3544b9c8294ffe2 | [
"MIT"
] | null | null | null | MAIN_FIGURES.py | tortugar/Schott_etal_2022 | 5cccec4d59184397df39f0bae3544b9c8294ffe2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 10 18:30:46 2021
@author: fearthekraken
"""
import AS
import pwaves
import sleepy
import pandas as pd
#%%
### FIGURE 1C - example EEGs for NREM, IS, and REM ###
ppath = '/home/fearthekraken/Documents/Data/photometry'
AS.plot_example(ppath, 'hans_091118n1', ['EEG'], tstart=721.5, tend=728.5, eeg_nbin=4, ylims=[(-0.6, 0.6)]) # NREM EEG
AS.plot_example(ppath, 'hans_091118n1', ['EEG'], tstart=780.0, tend=787.0, eeg_nbin=4, ylims=[(-0.6, 0.6)]) # IS EEG
AS.plot_example(ppath, 'hans_091118n1', ['EEG'], tstart=818.5, tend=825.5, eeg_nbin=4, ylims=[(-0.6, 0.6)]) # REM EEG
#%%
### FIGURE 1E - example photometry recording ###
ppath = '/home/fearthekraken/Documents/Data/photometry'
AS.plot_example(ppath, 'hans_091118n1', tstart=170, tend=2900, PLOT=['EEG', 'SP', 'EMG_AMP', 'HYPNO', 'DFF'], dff_nbin=1800,
eeg_nbin=130, fmax=25, vm=[50,1800], highres=False, pnorm=0, psmooth=[2,5], flatten_tnrem=4, ma_thr=0)
#%%
### FIGURE 1F - average DF/F signal in each brain state ###
ppath = '/home/fearthekraken/Documents/Data/photometry'
recordings = sleepy.load_recordings(ppath, 'crh_photometry.txt')[1]
df = AS.dff_activity(ppath, recordings, istate=[1,2,3,4], ma_thr=20, flatten_tnrem=4, ma_state=3)
#%%
### FIGURE 1G - example EEG theta burst & DF/F signal ###
ppath = '/home/fearthekraken/Documents/Data/photometry'
AS.plot_example(ppath, 'hans_091118n1', tstart=2415, tend=2444, PLOT=['SP', 'DFF'], dff_nbin=450, fmax=20,
vm=[0,5], highres=True, recalc_highres=False, nsr_seg=2.5, perc_overlap=0.8, pnorm=1, psmooth=[4,4])
#%%
### FIGURE 1H - average spectral field during REM ###
ppath = '/home/fearthekraken/Documents/Data/photometry'
recordings = sleepy.load_recordings(ppath, 'crh_photometry.txt')[1]
pwaves.spectralfield_highres_mice(ppath, recordings, pre=4, post=4, istate=[1], theta=[1,10,100,1000,10000], pnorm=1,
psmooth=[6,1], fmax=25, nsr_seg=2, perc_overlap=0.8, recalc_highres=True)
#%%
### FIGURE 2B - recorded P-waveforms ###
ppath ='/media/fearthekraken/Mandy_HardDrive1/nrem_transitions'
# left - example LFP trace with P-waves
AS.plot_example(ppath, 'Fincher_040221n1', tstart=16112, tend=16119, PLOT=['LFP'], lfp_nbin=7, ylims=[(-0.4, 0.2)])
# right - average P-waveform
recordings = sleepy.load_recordings(ppath, 'pwaves_mice.txt')[0]
pwaves.avg_waveform(ppath, recordings, istate=[], win=[0.15,0.15], mode='pwaves', plaser=False, p_iso=0, pcluster=0, clus_event='waves')
#%%
### FIGURE 2C - average P-wave frequency in each brain state ###
ppath ='/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
recordings = sleepy.load_recordings(ppath, 'pwaves_mice.txt')[0]
istate = [1,2,3,4]; p_iso=0; pcluster=0
_,_,_,_ = pwaves.state_freq(ppath, recordings, istate, plotMode='03', ma_thr=20, flatten_tnrem=4, ma_state=3,
p_iso=p_iso, pcluster=pcluster, ylim2=[-0.3, 0.1])
#%%
### FIGURE 2D - time-normalized P-wave frequency across brain state transitions ###
ppath ='/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
recordings = sleepy.load_recordings(ppath, 'pwaves_mice.txt')[0]
sequence=[3,4,1,2]; state_thres=[(0,10000)]*len(sequence); nstates=[20,20,20,20]; vm=[0.2, 2.1] # NREM --> IS --> REM --> WAKE
_, mx_pwave, _ = pwaves.stateseq(ppath, recordings, sequence=sequence, nstates=nstates, state_thres=state_thres, ma_thr=20, ma_state=3,
flatten_tnrem=4, fmax=25, pnorm=1, vm=vm, psmooth=[2,2], mode='pwaves', mouse_avg='mouse', print_stats=False)
#%%
### FIGURE 2E - example theta burst & P-waves ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/dreadds_processed/'
AS.plot_example(ppath, 'Scrabble_072420n1', tstart=11318.6, tend=11323, PLOT=['SP','EEG','LFP'], eeg_nbin=1, lfp_nbin=6, fmax=20,
vm=[0,4.5], highres=True, recalc_highres=False, nsr_seg=1, perc_overlap=0.85, pnorm=1, psmooth=[4,5])
#%%
### FIGURE 2F - averaged spectral power surrounding P-waves ###
ppath ='/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
recordings = sleepy.load_recordings(ppath, 'pwaves_mice.txt')[0]
filename = 'sp_win3'
# top - averaged spectrogram
pwaves.avg_SP(ppath, recordings, istate=[1], win=[-3,3], mouse_avg='mouse', plaser=False, pnorm=2, psmooth=[2,2], fmax=25,
vm=[0.8,1.5], pload=filename, psave=filename)
# bottom - averaged high theta power
_ = pwaves.avg_band_power(ppath, recordings, istate=[1], bands=[(8,15)], band_colors=['green'], win=[-3,3], mouse_avg='mouse',
plaser=False, pnorm=2, psmooth=0, ylim=[0.6,1.8], pload=filename, psave=filename)
#%%
### FIGURE 2H - example DF/F signal and P-waves ###
ppath = '/home/fearthekraken/Documents/Data/photometry'
AS.plot_example(ppath, 'Fritz_032819n1', tstart=2991, tend=2996.75, PLOT=['DFF','LFP_THRES_ANNOT'], dff_nbin=50, lfp_nbin=10)
#%%
### FIGURE 2I - DF/F signal surrounding P-waves ###
ppath ='/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
# top - diagrams of P-waveforms
recordings = sleepy.load_recordings(ppath, 'pwaves_mice.txt')[0]
p_iso=0.8; pcluster=0; clus_event='waves' # single P-waves
#p_iso=0; pcluster=0.1; clus_event='cluster start' # clustered P-waves
pwaves.avg_waveform(ppath, recordings, istate=[], win=[1,1], mode='pwaves', plaser=False, p_iso=p_iso,
pcluster=pcluster, clus_event=clus_event, wform_std=False)
# middle/bottom - heatmaps & average DF/F plots
ppath = '/home/fearthekraken/Documents/Data/photometry'
recordings = sleepy.load_recordings(ppath, 'pwaves_photometry.txt')[1]
# single P-waves
pzscore=[2,2,2]; p_iso=0.8; pcluster=0; ylim=[-0.4,1.0]; vm=[-1,1.5]
iso_mx = pwaves.dff_timecourse(ppath, recordings, istate=0, plotMode='ht', dff_win=[10,10], pzscore=pzscore, mouse_avg='mouse',
base_int=2.5, baseline_start=0, p_iso=p_iso, pcluster=pcluster, clus_event='waves', ylim=ylim, vm=vm,
psmooth=(8,15), ds=1000, sf=1000)[0]
# clustered P-waves
pzscore=[2,2,2]; p_iso=0; pcluster=0.5; ylim=[-0.4,1.0]; vm=[-1,1.5]
clus_mx = pwaves.dff_timecourse(ppath, recordings, istate=0, plotMode='ht', dff_win=[10,10], pzscore=pzscore, mouse_avg='mouse',
base_int=2.5, baseline_start=0, p_iso=p_iso, pcluster=pcluster, clus_event='waves', ylim=ylim, vm=vm,
psmooth=(4,15), ds=1000, sf=1000)[0]
# random points
pzscore=[2,2,2]; p_iso=0.8; pcluster=0; ylim=[-0.4,1.0]; vm=[-1,1.5]
jter_mx = pwaves.dff_timecourse(ppath, recordings, istate=0, plotMode='ht', dff_win=[10,10], pzscore=pzscore, mouse_avg='mouse',
base_int=2.5, baseline_start=0, p_iso=p_iso, pcluster=pcluster, clus_event='waves', ylim=ylim, vm=vm,
psmooth=(8,15), ds=1000, sf=1000, jitter=10)[0]
#%%
### FIGURE 3B - example open loop opto recording ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
AS.plot_example(ppath, 'Huey_082719n1', tstart=12300, tend=14000, PLOT=['LSR', 'SP', 'HYPNO'], fmax=25, vm=[50,1800], highres=False,
pnorm=0, psmooth=[2,2], flatten_tnrem=4, ma_thr=10)
#%%
### FIGURE 3C,D - percent time spent in each brain state surrounding laser ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
recordings = sleepy.load_recordings(ppath, 'crh_chr2_ol.txt')[1]
BS, t, df = AS.laser_brainstate(ppath, recordings, pre=400, post=520, flatten_tnrem=4, ma_state=3, ma_thr=20, edge=10, sf=0, ci='sem', ylim=[0,80])
#%%
### FIGURE 3E - averaged SPs and frequency band power surrounding laser ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
recordings = sleepy.load_recordings(ppath, 'crh_chr2_ol.txt')[1]
bands=[(0.5,4), (6,10), (11,15), (55,99)]; band_labels=['delta', 'theta', 'sigma', 'gamma']; band_colors=['firebrick', 'limegreen', 'cyan', 'purple']
AS.laser_triggered_eeg_avg(ppath, recordings, pre=400, post=520, fmax=100, laser_dur=120, pnorm=1, psmooth=3, harmcs=10, iplt_level=2,
vm=[0.6,1.4], sf=7, bands=bands, band_labels=band_labels, band_colors=band_colors, ci=95, ylim=[0.6,1.3])
#%%
### FIGURE 3G - example closed loop opto recording ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
AS.plot_example(ppath, 'Cinderella_022420n1', tstart=7100, tend=10100, PLOT=['LSR', 'SP', 'HYPNO'], fmax=25, vm=[0,1500],
highres=False, pnorm=0, psmooth=[2,3], flatten_tnrem=4, ma_thr=0)
#%%
### FIGURE 3H - closed-loop ChR2 graph ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
recordings = sleepy.load_recordings(ppath, 'crh_chr2_cl.txt')[1]
_ = AS.state_online_analysis(ppath, recordings, istate=1, plotMode='03', ylim=[0,130])
#%%
### FIGURE 3I - eYFP controls for ChR2 ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
recordings = sleepy.load_recordings(ppath, 'crh_yfp_chr2_cl.txt')[1]
_ = AS.state_online_analysis(ppath, recordings, istate=1, plotMode='03', ylim=[0,130])
#%%
### FIGURE 3J - closed-loop iC++ graph ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
recordings = sleepy.load_recordings(ppath, 'crh_ic_cl.txt')[1]
_ = AS.state_online_analysis(ppath, recordings, istate=1, plotMode='03', ylim=[0,130])
#%%
### FIGURE 3K - eYFP controls for iC++ ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed/'
recordings = sleepy.load_recordings(ppath, 'crh_yfp_ic_cl.txt')[1]
_ = AS.state_online_analysis(ppath, recordings, istate=1, plotMode='03', ylim=[0,130])
#%%
### FIGURE 4B - example spontaneous & laser-triggered P-wave ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
recordings = sleepy.load_recordings(ppath, 'lsr_pwaves.txt')[1]
AS.plot_example(ppath, 'Huey_101719n1', tstart=5925, tend=5930, PLOT=['LSR', 'EEG', 'LFP'], eeg_nbin=5, lfp_nbin=10)
#%%
### FIGURE 4C,D,E - waveforms & spectral power surrounding P-waves/laser ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
recordings = sleepy.load_recordings(ppath, 'lsr_pwaves.txt')[1]
# top - averaged waveforms surrounding P-waves & laser
filename = 'wf_win025'; wform_win = [0.25,0.25]; istate=[1]
pwaves.avg_waveform(ppath, recordings, istate, mode='pwaves', win=wform_win, mouse_avg='trials', # spontaneous & laser-triggered P-waves
plaser=True, post_stim=0.1, pload=filename, psave=filename, ylim=[-0.3,0.1])
pwaves.avg_waveform(ppath, recordings, istate, mode='lsr', win=wform_win, mouse_avg='trials', # successful & failed laser
plaser=True, post_stim=0.1, pload=filename, psave=filename, ylim=[-0.3,0.1])
# middle - averaged SPs surrounding P-waves & laser
filename = 'sp_win3'; win=[-3,3]; pnorm=2
pwaves.avg_SP(ppath, recordings, istate=[1], mode='pwaves', win=win, plaser=True, post_stim=0.1, # spontaneous & laser-triggered P-waves
mouse_avg='mouse', pnorm=pnorm, psmooth=[(8,8),(8,8)], vm=[(0.82,1.32),(0.8,1.45)],
fmax=25, recalc_highres=False, pload=filename, psave=filename)
pwaves.avg_SP(ppath, recordings, istate=[1], mode='lsr', win=win, plaser=True, post_stim=0.1, # successful & failed laser
mouse_avg='mouse', pnorm=pnorm, psmooth=[(8,8),(8,8)], vm=[(0.82,1.32),(0.6,1.8)],
fmax=25, recalc_highres=False, pload=filename, psave=filename)
# bottom - average high theta power surrounding P-waves & laser
_ = pwaves.avg_band_power(ppath, recordings, istate=[1], mode='pwaves', win=win, plaser=True, # spontaneous & laser-triggered P-waves
post_stim=0.1, mouse_avg='mouse', bands=[(8,15)], band_colors=[('green')],
pnorm=pnorm, psmooth=0, fmax=25, pload=filename, psave=filename, ylim=[0.5,1.5])
# successful and failed laser
_ = pwaves.avg_band_power(ppath, recordings, istate=[1], mode='lsr', win=win, plaser=True, # successful & failed laser
post_stim=0.1, mouse_avg='mouse', bands=[(8,15)], band_colors=[('green')],
pnorm=pnorm, psmooth=0, fmax=25, pload=filename, psave=filename, ylim=[0.5,1.5])
#%%
### FIGURE 4F - spectral profiles: null vs spon vs success lsr vs fail lsr ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
recordings = sleepy.load_recordings(ppath, 'lsr_pwaves.txt')[1]
filename = 'sp_win3'
spon_win=[-0.5, 0.5]; lsr_win=[0,1]; collect_win=[-3,3]; frange=[0, 20]; pnorm=2; null=True; null_win=0; null_match='lsr'
df = pwaves.sp_profiles(ppath, recordings, spon_win=spon_win, lsr_win=lsr_win, collect_win=collect_win, frange=frange,
null=null, null_win=null_win, null_match=null_match, plaser=True, post_stim=0.1, pnorm=pnorm,
psmooth=12, mouse_avg='mouse', ci='sem', pload=filename, psave=filename)
#%%
### FIGURE 4G - probability of laser success per brainstate ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
recordings = sleepy.load_recordings(ppath, 'lsr_pwaves.txt')[1]
filename = 'lsr_stats'
df = pwaves.get_lsr_stats(ppath, recordings, istate=[1,2,3,4], lsr_jitter=5, post_stim=0.1,
flatten_tnrem=4, ma_thr=20, ma_state=3, psave=filename)
_ = pwaves.lsr_state_success(df, istate=[1,2,3,4]) # true laser success
_ = pwaves.lsr_state_success(df, istate=[1], jstate=[1]) # true vs sham laser success
#%%
### FIGURE 4H - latencies of elicited P-waves to laser ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
recordings = sleepy.load_recordings(ppath, 'lsr_pwaves.txt')[1]
df = pd.read_pickle('lsr_stats.pkl')
pwaves.lsr_pwave_latency(df, istate=1, jitter=True)
#%%
### FIGURE 4I - phase preferences of spontaneous & laser-triggered P-waves ###
ppath = '/home/fearthekraken/Documents/Data/sleepRec_processed'
recordings = sleepy.load_recordings(ppath, 'lsr_pwaves.txt')[1]
filename = 'lsr_phases'
pwaves.lsr_hilbert(ppath, recordings, istate=1, bp_filt=[6,12], min_state_dur=30, stat='perc', mode='pwaves',
mouse_avg='trials', bins=9, pload=filename, psave=filename)
#%%
### FIGURE 5B,C - example recordings of hm3dq + saline vs cno ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
AS.plot_example(ppath, 'Dahl_030321n1', tstart=3960, tend=5210, PLOT=['EEG', 'SP', 'HYPNO', 'EMG_AMP'], eeg_nbin=100, # saline
fmax=25, vm=[15,2200], psmooth=(1,2), flatten_tnrem=4, ma_thr=0, ylims=[[-0.6,0.6],'','',[0,300]])
AS.plot_example(ppath, 'Dahl_031021n1', tstart=3620, tend=4870, PLOT=['EEG', 'SP', 'HYPNO', 'EMG_AMP'], eeg_nbin=100, # CNO
fmax=25, vm=[15,2200], psmooth=(1,2), flatten_tnrem=4, ma_thr=0, ylims=[[-0.6,0.6],'','',[0,300]])
#%%
### FIGURE 5D - hm3dq percent time spent in REM ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=False); e=e['0.25']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='perc', plotMode='03',
group_colors=['gray', 'blue'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','0.25'])
pwaves.pairT_from_df(df.iloc[np.where(df['state']==1)[0],:], 'dose', '0', '0.25', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5E - hm3dq mean REM duration ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=False); e=e['0.25']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='dur', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='dur', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='dur', plotMode='03',
group_colors=['gray', 'blue'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','0.25'])
pwaves.pairT_from_df(df.iloc[np.where(df['state']==1)[0],:], 'dose', '0', '0.25', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5F - hm3dq mean REM frequency ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=False); e=e['0.25']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='freq', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='freq', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='freq', plotMode='03',
group_colors=['gray', 'blue'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','0.25'])
pwaves.pairT_from_df(df.iloc[np.where(df['state']==1)[0],:], 'dose', '0', '0.25', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5G - hm3dq percent time spent in Wake/NREM/IS ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=False); e=e['0.25']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[2,3,4], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[2,3,4], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='perc', plotMode='03',
group_colors=['gray', 'blue'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','0.25'])
for s in [2,3,4]:
pwaves.pairT_from_df(df.iloc[np.where(df['state']==s)[0],:], 'dose', '0', '0.25', ['t0'], print_notice='### STATE = ' + str(s) + ' ###')
#%%
### FIGURE 5H - hm3dq probability of IS-->REM transition ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=False); e=e['0.25']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='transition probability', flatten_tnrem=False, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='transition probability', flatten_tnrem=False, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='transition probability', plotMode='03',
group_colors=['gray', 'blue'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','0.25'])
pwaves.pairT_from_df(df, 'dose', '0', '0.25', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5I - example P-waves during NREM-->IS-->REM transitions ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
AS.plot_example(ppath, 'King_071020n1', ['HYPNO', 'EEG', 'LFP'], tstart=16097, tend=16172, ylims=['',(-0.6, 0.6), (-0.3, 0.15)]) # saline
AS.plot_example(ppath, 'King_071520n1', ['HYPNO', 'EEG', 'LFP'], tstart=5600, tend=5675, ylims=['',(-0.6, 0.6), (-0.3, 0.15)]) # CNO
#%%
### FIGURE 5J - hm3dq time-normalized P-wave frequency across brain state transitions ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=True); e=e['0.25']
c = [i[0] for i in c if i[1] != 'X']; e = [i[0] for i in e if i[1] != 'X']
sequence=[3,4,1,2]; state_thres=[(0,10000)]*len(sequence); nstates=[20,20,20,20]; cvm=[0.3,2.5]; evm= [0.28,2.2] # NREM --> IS --> REM --> WAKE
mice,cmx,cspe = pwaves.stateseq(ppath, c, sequence=sequence, nstates=nstates, state_thres=state_thres, fmax=25, pnorm=1, # saline
vm=cvm, psmooth=[2,2], mode='pwaves', mouse_avg='mouse', pplot=False, print_stats=False)
mice,emx,espe = pwaves.stateseq(ppath, e, sequence=sequence, nstates=nstates, state_thres=state_thres, fmax=25, pnorm=1, # CNO
vm=evm, psmooth=[2,2], mode='pwaves', mouse_avg='mouse', pplot=False, print_stats=False)
# plot timecourses
pwaves.plot_activity_transitions([cmx, emx], [mice, mice], plot_id=['gray', 'blue'], group_labels=['saline', 'cno'],
xlim=nstates, xlabel='Time (normalized)', ylabel='P-waves/s', title='NREM-->tNREM-->REM-->Wake')
#%%
### FIGURE 5K - hm3dq average P-wave frequency in each brain state ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm3dq_tnrem.txt', dose=True, pwave_channel=True); e=e['0.25']
c = [i[0] for i in c if i[1] != 'X']; e = [i[0] for i in e if i[1] != 'X']
# top - mean P-wave frequency
mice, x, cf, cw = pwaves.state_freq(ppath, c, istate=[1,2,3,4], flatten_tnrem=4, pplot=False, print_stats=False) # saline
mice, x, ef, ew = pwaves.state_freq(ppath, e, istate=[1,2,3,4], flatten_tnrem=4, pplot=False, print_stats=False) # CNO
pwaves.plot_state_freq(x, [mice, mice], [cf, ef], [cw, ew], group_colors=['gray', 'blue'], group_labels=['saline','cno'])
# bottom - change in P-wave frequency from saline to CNO
fdif = (ef-cf)
df = pd.DataFrame(columns=['Mouse','State','Change'])
for i,state in enumerate(x):
df = df.append(pd.DataFrame({'Mouse':mice, 'State':[state]*len(mice), 'Change':fdif[:,i]}))
plt.figure(); sns.barplot(x='State', y='Change', data=df, order=['NREM', 'tNREM', 'REM', 'Wake'], color='lightblue', ci=68)
sns.swarmplot(x='State', y='Change', data=df, order=['NREM', 'tNREM', 'REM', 'Wake'], color='black', size=9); plt.show()
# stats
for i,s in enumerate([1,2,3,4]):
p = stats.ttest_rel(cf[:,i], ef[:,i], nan_policy='omit')
print(f'saline vs cno, state={s} -- T={round(p.statistic,3)}, p-value={round(p.pvalue,5)}')
#%%
### FIGURE 5L - hm4di percent time spent in REM ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=False); e=e['5']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='perc', plotMode='03',
group_colors=['gray', 'red'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','5'])
pwaves.pairT_from_df(df.iloc[np.where(df['state']==1)[0],:], 'dose', '0', '5', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5M - hm4di mean REM duration ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=False); e=e['5']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='dur', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='dur', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='dur', plotMode='03',
group_colors=['gray', 'red'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','5'])
pwaves.pairT_from_df(df.iloc[np.where(df['state']==1)[0],:], 'dose', '0', '5', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5N - hm4di mean REM frequency ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=False); e=e['5']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='freq', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='freq', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='freq', plotMode='03',
group_colors=['gray', 'red'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','5'])
pwaves.pairT_from_df(df.iloc[np.where(df['state']==1)[0],:], 'dose', '0', '5', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5O - hm4di percent time spent in Wake/NREM/IS ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=False); e=e['5']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[2,3,4], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[2,3,4], tbin=18000, n=1, stats='perc', flatten_tnrem=4, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='perc', plotMode='03',
group_colors=['gray', 'red'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','5'])
for s in [2,3,4]:
pwaves.pairT_from_df(df.iloc[np.where(df['state']==s)[0],:], 'dose', '0', '5', ['t0'], print_notice='### STATE = ' + str(s) + ' ###')
#%%
### FIGURE 5P - hm4di probability of IS-->REM transition ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=False); e=e['5']
cmice, cT = pwaves.sleep_timecourse(ppath, c, istate=[1], tbin=18000, n=1, stats='transition probability', flatten_tnrem=False, pplot=False) # saline
emice, eT = pwaves.sleep_timecourse(ppath, e, istate=[1], tbin=18000, n=1, stats='transition probability', flatten_tnrem=False, pplot=False) # CNO
pwaves.plot_sleep_timecourse([cT,eT], [cmice, emice], tstart=0, tbin=18000, stats='transition probability', plotMode='03',
group_colors=['gray', 'red'], group_labels=['saline','cno'])
# stats
df = pwaves.df_from_timecourse_dict([cT,eT], [cmice,emice], ['0','5'])
pwaves.pairT_from_df(df, 'dose', '0', '5', ['t0'], print_notice='### STATE = 1 ###')
#%%
### FIGURE 5Q - hm4di time-normalized P-wave frequency across brain state transitions ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=True); e=e['5']
c = [i[0] for i in c if i[1] != 'X']; e = [i[0] for i in e if i[1] != 'X']
sequence=[3,4,1,2]; state_thres=[(0,10000)]*len(sequence); nstates=[20,20,20,20]; cvm=[0.3,2.5]; evm= [0.28,2.2] # NREM --> IS --> REM --> WAKE
mice,cmx,cspe = pwaves.stateseq(ppath, c, sequence=sequence, nstates=nstates, state_thres=state_thres, fmax=25, pnorm=1, # saline
vm=cvm, psmooth=[2,2], mode='pwaves', mouse_avg='mouse', pplot=False, print_stats=False)
mice,emx,espe = pwaves.stateseq(ppath, e, sequence=sequence, nstates=nstates, state_thres=state_thres, fmax=25, pnorm=1, # CNO
vm=evm, psmooth=[2,2], mode='pwaves', mouse_avg='mouse', pplot=False, print_stats=False)
# plot timecourses
pwaves.plot_activity_transitions([cmx, emx], [mice, mice], plot_id=['gray', 'red'], group_labels=['saline', 'cno'],
xlim=nstates, xlabel='Time (normalized)', ylabel='P-waves/s', title='NREM-->tNREM-->REM-->Wake')
#%%
### FIGURE 5R - hm4di average P-wave frequency in each brain state ###
ppath = '/media/fearthekraken/Mandy_HardDrive1/nrem_transitions/'
(c, e) = AS.load_recordings(ppath, 'crh_hm4di_tnrem.txt', dose=True, pwave_channel=True); e=e['5']
c = [i[0] for i in c if i[1] != 'X']; e = [i[0] for i in e if i[1] != 'X']
# top - mean P-wave frequency
mice, x, cf, cw = pwaves.state_freq(ppath, c, istate=[1,2,3,4], flatten_tnrem=4, pplot=False, print_stats=False) # saline
mice, x, ef, ew = pwaves.state_freq(ppath, e, istate=[1,2,3,4], flatten_tnrem=4, pplot=False, print_stats=False) # CNO
pwaves.plot_state_freq(x, [mice, mice], [cf, ef], [cw, ew], group_colors=['gray', 'red'], group_labels=['saline','cno'])
# bottom - change in P-wave frequency from saline to CNO
fdif = (ef-cf)
df = pd.DataFrame(columns=['Mouse','State','Change'])
for i,state in enumerate(x):
df = df.append(pd.DataFrame({'Mouse':mice, 'State':[state]*len(mice), 'Change':fdif[:,i]}))
plt.figure(); sns.barplot(x='State', y='Change', data=df, order=['NREM', 'tNREM', 'REM', 'Wake'], color='salmon', ci=68)
sns.swarmplot(x='State', y='Change', data=df, order=['NREM', 'tNREM', 'REM', 'Wake'], color='black', size=9); plt.show()
# stats
for i,s in enumerate([1,2,3,4]):
p = stats.ttest_rel(cf[:,i], ef[:,i], nan_policy='omit')
print(f'saline vs cno, state={s} -- T={round(p.statistic,3)}, p-value={round(p.pvalue,5)}') | 60.752556 | 150 | 0.660832 |
67d3ce8adb8ddc67219cf049efed17f327e1aab1 | 42 | py | Python | bitmovin/services/filters/__init__.py | camberbridge/bitmovin-python | 3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95 | [
"Unlicense"
] | 44 | 2016-12-12T17:37:23.000Z | 2021-03-03T09:48:48.000Z | bitmovin/services/filters/__init__.py | camberbridge/bitmovin-python | 3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95 | [
"Unlicense"
] | 38 | 2017-01-09T14:45:45.000Z | 2022-02-27T18:04:33.000Z | bitmovin/services/filters/__init__.py | camberbridge/bitmovin-python | 3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95 | [
"Unlicense"
] | 27 | 2017-02-02T22:49:31.000Z | 2019-11-21T07:04:57.000Z | from .filter_service import FilterService
| 21 | 41 | 0.880952 |
67d3edf3fcff0ea5f8066746c234cf386931fcea | 4,177 | py | Python | inspect_population.py | puzis/OverflowPrediction | 01341df701e513025cb427d4cdf1db0868a5963b | [
"MIT"
] | 5 | 2019-11-19T11:53:23.000Z | 2022-03-11T05:54:46.000Z | inspect_population.py | puzis/OverflowPrediction | 01341df701e513025cb427d4cdf1db0868a5963b | [
"MIT"
] | 5 | 2020-05-29T23:53:14.000Z | 2022-03-12T00:05:11.000Z | inspect_population.py | erap129/EEGNAS | 1d9c94b106d40317146f7f09d79fad489f1059dc | [
"MIT"
] | 1 | 2021-12-17T14:25:04.000Z | 2021-12-17T14:25:04.000Z | import pickle
from copy import deepcopy
from graphviz import Digraph
from torch.nn import Conv2d, MaxPool2d, ELU, Dropout, BatchNorm2d
import pandas as pd
from EEGNAS.model_generation.abstract_layers import IdentityLayer, ConvLayer, PoolingLayer, ActivationLayer
from EEGNAS.model_generation.custom_modules import IdentityModule
SHORT_NAMES = {Conv2d: 'C',
MaxPool2d: 'M',
ELU: 'E',
Dropout: 'D',
BatchNorm2d: 'B'}
sum_path = "/home/user/Documents/eladr/netflowinsights/CDN_overflow_prediction/eegnas_models/195_10_input_height_240_normalized_handovers_all_inheritance_fold9_architectures_iteration_1.p"
per_path = '/home/user/Documents/eladr/netflowinsights/CDN_overflow_prediction/eegnas_models/197_10_input_height_240_normalized_per_handover_handovers_all_inheritance_fold9_architectures_iteration_1.p'
weighted_population_per = pickle.load(open(per_path, 'rb'))
weighted_population_sum = pickle.load(open(sum_path, 'rb'))
# export_eegnas_table([weighted_population_per[i]['finalized_model'] for i in range(5)], 'per_architectures.csv')
# export_eegnas_table([weighted_population_sum[i]['finalized_model'] for i in range(5)], 'sum_architectures.csv')
create_ensemble_digraph(weighted_population_per, 5)
| 44.913978 | 201 | 0.677041 |
67d91682b7361980dedb029fa4ec3aa3743a4f6d | 3,910 | py | Python | implementations/rest/bin/authhandlers.py | djsincla/SplunkModularInputsPythonFramework | 1dd215214f3d2644cb358e41f4105fe40cff5393 | [
"Apache-2.0"
] | 3 | 2020-08-31T00:59:26.000Z | 2021-10-19T22:01:00.000Z | implementations/rest/bin/authhandlers.py | djsincla/SplunkModularInputsPythonFramework | 1dd215214f3d2644cb358e41f4105fe40cff5393 | [
"Apache-2.0"
] | null | null | null | implementations/rest/bin/authhandlers.py | djsincla/SplunkModularInputsPythonFramework | 1dd215214f3d2644cb358e41f4105fe40cff5393 | [
"Apache-2.0"
] | null | null | null | from requests.auth import AuthBase
import hmac
import base64
import hashlib
import urlparse
import urllib
#add your custom auth handler class to this module
#template
#example of adding a client certificate
#example of adding a client certificate
#cloudstack auth example | 29.179104 | 100 | 0.586701 |
67d9abf1948658a2c5e38ae12ec4d8b8adf3bd58 | 1,515 | py | Python | sdk/core/azure-core/azure/core/pipeline/policies/authentication_async.py | pjquirk/azure-sdk-for-python | cbf02ec4f177b96eae1dbbba87c34c2c93880150 | [
"MIT"
] | null | null | null | sdk/core/azure-core/azure/core/pipeline/policies/authentication_async.py | pjquirk/azure-sdk-for-python | cbf02ec4f177b96eae1dbbba87c34c2c93880150 | [
"MIT"
] | null | null | null | sdk/core/azure-core/azure/core/pipeline/policies/authentication_async.py | pjquirk/azure-sdk-for-python | cbf02ec4f177b96eae1dbbba87c34c2c93880150 | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from azure.core.pipeline import PipelineRequest, PipelineResponse
from azure.core.pipeline.policies import AsyncHTTPPolicy
from azure.core.pipeline.policies.authentication import _BearerTokenCredentialPolicyBase
| 48.870968 | 113 | 0.681848 |
67da024b54f0853f0965d1f566e700aad7c2a74c | 152 | py | Python | pbt/population/__init__.py | automl/HPO_for_RL | d82c7ddd6fe19834c088137570530f11761d9390 | [
"Apache-2.0"
] | 9 | 2021-06-22T08:54:19.000Z | 2022-03-28T09:10:59.000Z | pbt/population/__init__.py | automl/HPO_for_RL | d82c7ddd6fe19834c088137570530f11761d9390 | [
"Apache-2.0"
] | null | null | null | pbt/population/__init__.py | automl/HPO_for_RL | d82c7ddd6fe19834c088137570530f11761d9390 | [
"Apache-2.0"
] | null | null | null | from .trial import Trial, NoTrial
from .member import Member
from .population import Population
__all__ = ['Trial', 'NoTrial', 'Member', 'Population']
| 25.333333 | 54 | 0.75 |
67da0e87556ec7b055d13f1258cbac356a9a64d2 | 7,003 | py | Python | darth/process.py | OOXXXXOO/DARTH | bd899acc7a777157f393c7078b9deccbf6e7e461 | [
"Apache-2.0"
] | 11 | 2020-06-30T03:57:41.000Z | 2021-05-20T13:19:41.000Z | darth/process.py | ceresman/darth | 038cd7cdc18771b73873bd5a8653c89655336448 | [
"Apache-2.0"
] | 3 | 2021-09-08T02:14:52.000Z | 2022-03-12T00:37:29.000Z | darth/process.py | ceresman/darth | 038cd7cdc18771b73873bd5a8653c89655336448 | [
"Apache-2.0"
] | 6 | 2020-07-01T06:11:43.000Z | 2020-09-11T05:57:41.000Z | import multiprocessing
from tqdm import tqdm
import os
import gdal
from .downloader import downloader
from .obsclient import bucket
from .vector import Vector
def Process(
VectorDataSource,
WgsCord,
Class_key,
DataSourcesType='Google China',
DataSetName="DataSet",
Remote_dataset_root="DataSets/",
Thread_count=2,
Nodata=0,
Merge=False,
Keep_local=True,
Over_write=True,
Upload=False,
**args
):
"""
Step I:
Init Downlaoder,Bucket,Vector
Step II:
Init default vector layer
Init area , imagery level of mission
Step III:
Download
Merge(Optional)
Rasterize
Step IV:
Upload to Bucket
Last Step:
If don't save temp dataset ,clean the cache
args:
for obs server:
ak : access_key_id,
sk : secret_access_key,
server : server
bn : bucketname
"""
print("\033[1;32# ---------------------------------------------------------------------------- #\033[0m")
print("\033[1;32# DARTH #\033[0m")
print("\033[1;32# ---------------------------------------------------------------------------- #\033[0m")
print("# ===== Bucket para preview\033[1;32 %s\033[0m"%args)
print("\n\n\n# ---------------------------------------------------------------------------- #")
print("# ---------------------------------- Step I ---------------------------------- #")
print("# ---------------------------------------------------------------------------- #")
Download=downloader(DataSourcesType,thread_count=Thread_count)
if Upload:
Bucket=bucket(
access_key_id=args["ak"],
secret_access_key=args["sk"],
server=args["server"],
bucketName=args["bn"]
)
if not Over_write:
Bucket.check(remote_metaname)
Vec=Vector(VectorDataSource)
remote_metaname=Remote_dataset_root+DataSetName+"/.meta"
print("\n\n\n# ---------------------------------------------------------------------------- #")
print("# ---------------------------------- Step II --------------------------------- #")
print("# ---------------------------------------------------------------------------- #")
Vec.getDefaultLayerbyName(Class_key)
Download.add_cord(*WgsCord)
Vec.crop_default_layer_by_rect(Download.mercator_cord)
print("\n\n\n# ---------------------------------------------------------------------------- #")
print("# --------------------------------- Step III --------------------------------- #")
print("# ---------------------------------------------------------------------------- #")
image_dir=os.path.join(DataSetName,'images/')
targets_dir=os.path.join(DataSetName,'targets/')
print("# ===== imagery dir :\033[1;32%s\033[0m"%image_dir)
print("# ===== targets dir :\033[1;32%s\033[0m"%targets_dir)
if not os.path.exists("./"+DataSetName):
os.makedirs(image_dir)
os.makedirs(targets_dir)
local_metaname=DataSetName+"/.meta"
with open(local_metaname,"w") as meta:
if Upload:
meta.write(
"Bucket Meta:\n"+str(Bucket.getBucketMetadata())
)
meta.write(
"Vector object Meta:\n"+str(Vec.meta)
)
meta.close()
if Upload:
bucket_imagery_root=os.path.join(Remote_dataset_root,image_dir)
bucket_targets_root=os.path.join(Remote_dataset_root,targets_dir)
bucket_description_root=os.path.join(Remote_dataset_root,DataSetName+"/")
print("# ===== Bucket imagery root :\033[1;32%s\033[0m",bucket_imagery_root)
print("# ===== Bucket Targets root :\033[1;32%s\033[0m",bucket_targets_root)
print("# ===== Bucket Description root :\033[1;32%s\033[0m",bucket_description_root)
Bucket.cd("DataSets")
Bucket.ls()
print("\033[5;36# ===== Start Downloading.....\033[0m")
Download.download(output_path=image_dir)
tiles=[i["path"] for i in Download.result]
Vec.generate(tiles,output_path=targets_dir)
if Upload:
print("\n\n\n# ---------------------------------------------------------------------------- #")
print("# ---------------------------------- Step IV --------------------------------- #")
print("# ---------------------------------------------------------------------------- #")
print("# ===== Upload dataset meta\033[1;32%s\033[0m"%remote_metaname)
Bucket.upload(
remote_path=remote_metaname,
local_path=local_metaname
)
## Saveing index json file
remote_json_path=os.path.join(bucket_description_root,Download.json_path.split('/')[-1])
print("# ===== Upload dataset description\033[1;32%s\033[0m"%remote_json_path)
if not Over_write:
Bucket.check(remote_json_path)
Bucket.upload(
remote_path=remote_json_path,
local_path=Download.json_path
)
print("# ===== upload imagry to bucket.....")
for tile in tqdm(tiles):
file_name=tile.split('/')[-1]
remote_tiles=os.path.join(bucket_imagery_root,file_name)
if not Over_write:
Bucket.check(remote_tiles)
Bucket.upload(
remote_path=remote_tiles,
local_path=tile
)
print("# ===== upload target to bucket.....")
for target in tqdm(Vec.labellist):
file_name=target.split('/')[-1]
remote_target=os.path.join(bucket_targets_root,file_name)
if not Over_write:
Bucket.check(remote_target)
Bucket.upload(
remote_path=remote_target,
local_path=target
)
print("# ===== uploaded bucket:")
Bucket.ls()
if not Keep_local:
print("# ------------------------------- Clear-cache ------------------------------- #")
cmd="rm -rf "+DataSetName
os.system(cmd)
print("# -------------------------------- Clear-Done ------------------------------- #")
print("# ---------------------------------------------------------------------------- #")
print("# DataSet process done #")
print("# ---------------------------------------------------------------------------- #")
if __name__ == '__main__':
main()
| 31.977169 | 109 | 0.456519 |
67dbe149e9deb1f839afee4ecf248d5698ff9007 | 1,016 | py | Python | setup.py | Willd14469/cj8-patient-panthers | b977091c19cd0e7299f91ebd94ce25c086661fd7 | [
"MIT"
] | 1 | 2021-10-04T09:42:58.000Z | 2021-10-04T09:42:58.000Z | setup.py | Willd14469/cj8-patient-panthers | b977091c19cd0e7299f91ebd94ce25c086661fd7 | [
"MIT"
] | 5 | 2021-07-17T13:24:42.000Z | 2021-07-17T13:35:32.000Z | setup.py | Willd14469/cj8-patient-panthers | b977091c19cd0e7299f91ebd94ce25c086661fd7 | [
"MIT"
] | null | null | null | import sys
from setuptools import setup
required_packages = ["boombox", "Pillow", "PyYAML", "rich"]
win_packages = ["keyboard"]
unix_packages = ["pynput"]
WIN = "win32"
LINUX = "linux"
MACOS = "darwin"
if sys.platform == WIN:
required_packages += win_packages
elif sys.platform in (LINUX, MACOS):
required_packages += unix_packages
setup(
name="pantheras_box",
version="0.1.0",
packages=[
"pantheras_box",
"pantheras_box.story",
"pantheras_box.sounds",
"pantheras_box.backend",
"pantheras_box.frontend",
"pantheras_box.keyboard_handlers",
],
url="",
license="MIT",
author="Patient Panthers",
author_email="",
description="Pantheras box TUI game.",
install_requires=required_packages,
entry_points={
"console_scripts": [
"pantheras-box = pantheras_box.run:run_game",
],
},
package_data={"": ["**/*.txt", "**/*.yaml", "**/*.png", "**/*.wav"]},
include_package_data=True,
)
| 23.627907 | 73 | 0.616142 |
67dc3420f8889bf1e85452c17cc2bb0c45148c0c | 2,609 | py | Python | lunch_handler.py | wimo7083/Wheel-Of-Lunch-Slack-Bot | 7bcb8cc6a4ccd1b6034a9e3a60b470a1934962ef | [
"MIT"
] | 1 | 2018-03-27T04:01:19.000Z | 2018-03-27T04:01:19.000Z | lunch_handler.py | wimo7083/Wheel-Of-Lunch-Slack-Bot | 7bcb8cc6a4ccd1b6034a9e3a60b470a1934962ef | [
"MIT"
] | 2 | 2018-04-22T22:25:44.000Z | 2018-05-26T03:10:08.000Z | lunch_handler.py | wimo7083/Wheel-Of-Lunch-Slack-Bot | 7bcb8cc6a4ccd1b6034a9e3a60b470a1934962ef | [
"MIT"
] | null | null | null | from zipcodes import is_valid
from random import randint
from all_lunch_locs import call_lunch_api
default_max = 30
default_range = 20
if __name__ == '__main__':
# format of the json
# CombinedMultiDict([ImmutableMultiDict([]), ImmutableMultiDict(
# [('token', 'workspace token'), ('team_id', 'team_id'), ('team_domain', 'some_string_name'),
# ('channel_id', 'some_channel_id'), ('channel_name', 'some_channel_name'), ('user_id', 'user_id_requested'), ('user_name', 'user_name_requested'),
# ('command', '/lunch'), ('text', '80233'), #<---- args
# ('response_url', 'response url'),
# ('trigger_id', 'slash trigger command')])])
print(create_lunch_event({'text': '80020 20'}))
print(create_lunch_event({'text': '20'}))
| 31.817073 | 156 | 0.690303 |
67e03d999e85af82b3115a02553d48dddb7a3aa2 | 1,414 | py | Python | py-insta/__init__.py | ItsTrakos/Py-insta | 483725f13b7c7eab0261b461c7ec507d1109a9f4 | [
"Unlicense"
] | null | null | null | py-insta/__init__.py | ItsTrakos/Py-insta | 483725f13b7c7eab0261b461c7ec507d1109a9f4 | [
"Unlicense"
] | null | null | null | py-insta/__init__.py | ItsTrakos/Py-insta | 483725f13b7c7eab0261b461c7ec507d1109a9f4 | [
"Unlicense"
] | null | null | null |
"""
# -*- coding: utf-8 -*-
__author__ = "Trakos"
__email__ = "mhdeiimhdeiika@gmail.com"
__version__ = 1.0.0"
__copyright__ = "Copyright (c) 2019 -2021 Leonard Richardson"
# Use of this source code is governed by the MIT license.
__license__ = "MIT"
Description:
py-Insta Is A Python Library
Scrape Instagram Data
And Print It Or You Can Define It Into A Variable...
#####
__version__ = 1.0
import requests
from bs4 import BeautifulSoup
__url__ = "https://www.instagram.com/{}/"
def Insta(username):
try:
response = requests.get(__url__.format(username.replace('@','')),timeout=5) # InCase Someone Types @UserName
if '404' in str(response): # If The Username Is Invalid
data = 'No Such Username'
return data
else:
soup = BeautifulSoup(response.text, "html.parser")
meta = soup.find("meta", property="og:description")
try:
s = meta.attrs['content'].split(' ')
data = {
'Followers': s[0],
'Following': s[2],
'Posts': s[4],
'Name': s[13]
}
return data
except requests.exceptions.InvalidURL:
return 'No Such Username'
except (requests.ConnectionError, requests.Timeout):
return 'No InterNet Connection' | 32.883721 | 117 | 0.562942 |
67e244309b1b3c160456702586e33422cb197d21 | 1,182 | py | Python | pyopenproject/business/services/command/membership/create.py | webu/pyopenproject | 40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966 | [
"MIT"
] | 5 | 2021-02-25T15:54:28.000Z | 2021-04-22T15:43:36.000Z | pyopenproject/business/services/command/membership/create.py | webu/pyopenproject | 40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966 | [
"MIT"
] | 7 | 2021-03-15T16:26:23.000Z | 2022-03-16T13:45:18.000Z | pyopenproject/business/services/command/membership/create.py | webu/pyopenproject | 40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966 | [
"MIT"
] | 6 | 2021-06-18T18:59:11.000Z | 2022-03-27T04:58:52.000Z | from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.post_request import PostRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.membership.membership_command import MembershipCommand
from pyopenproject.model import membership as mem
| 43.777778 | 99 | 0.685279 |
67e2f36fcb3cfb98bcd8a0637b9a6793dd11a7cc | 5,783 | py | Python | lottery/branch/singular_values.py | NogaBar/open_lth | 09bcea21e69708549ecff2659690162a6c45f9ca | [
"MIT"
] | null | null | null | lottery/branch/singular_values.py | NogaBar/open_lth | 09bcea21e69708549ecff2659690162a6c45f9ca | [
"MIT"
] | null | null | null | lottery/branch/singular_values.py | NogaBar/open_lth | 09bcea21e69708549ecff2659690162a6c45f9ca | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from lottery.branch import base
import models.registry
from pruning.mask import Mask
from pruning.pruned_model import PrunedModel
from training import train
from utils.tensor_utils import shuffle_state_dict, weight_erank, feature_erank, activation, generate_mask_active, features_spectral, features_frobenius, features_spectral_fro_ratio, erank
from platforms.platform import get_platform
from foundations import paths
import json
import os
import datasets.registry
import copy
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import tqdm
import seaborn as sns
import pandas as pd
import numpy as np
from utils.tensor_utils import generate_mask_active, erank, shuffle_tensor, mutual_coherence
sns.set_style("whitegrid")
| 45.896825 | 187 | 0.649144 |
67e342235525736d0490c23bf879ad0c51964c88 | 6,400 | py | Python | parser.py | Saevon/DMP-Career-Share | e3486080d1e17b93b6676bdf59e0dc89c524c9f6 | [
"MIT"
] | null | null | null | parser.py | Saevon/DMP-Career-Share | e3486080d1e17b93b6676bdf59e0dc89c524c9f6 | [
"MIT"
] | null | null | null | parser.py | Saevon/DMP-Career-Share | e3486080d1e17b93b6676bdf59e0dc89c524c9f6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from collections import OrderedDict
from decimal import Decimal
from parser_data import InlineList, DuplicationList
from state import State, StateMachine
from type_check import is_int, is_float, is_sci_notation
from format import format
from error import DMPException
def load(fp, options=None):
config = {
# 'verbose': True,
}
if options is not None:
config.update(options)
machine = ParserStateMachine(config)
try:
machine.runAll(fp)
except State.Error as err:
raise DMPException.wraps(err)
return PostProcessor.run(machine.get_data())
def dump(data, options=None):
config = {
# 'verbose': True,
}
if options is not None:
config.update(options)
lines = []
for key, val in data.iteritems():
lines += format(key, val)
# Adds Trailing newline
lines.append('')
return '\n'.join(lines)
def _test(infile, outfile):
with open(infile, 'r') as fp:
data = load(fp)
with open(infile, 'r') as fp:
raw = fp.read()
# print json.dumps(data, indent=4)
out = dump(data)
with open(outfile, 'w') as fp:
fp.write(out)
import subprocess
subprocess.call(['diff', infile, outfile])
subprocess.call(['rm', outfile])
if __name__ == "__main__":
ALL_DATA = [
"ContractSystem.txt",
"Funding.txt",
"PCScenario.txt",
"ProgressTracking.txt",
"Reputation.txt",
"ResearchAndDevelopment.txt",
"ResourceScenario.txt",
"ScenarioDestructibles.txt",
"ScenarioNewGameIntro.txt",
"ScenarioUpgradeableFacilities.txt",
"StrategySystem.txt",
"VesselRecovery.txt",
]
outfile = './tmp.txt'
import os.path
for filename in ALL_DATA:
infile = os.path.join('../Universe/Scenarios/Saevon/', filename)
_test(infile, outfile)
| 26.122449 | 90 | 0.569688 |
67e41af80998f84e9f552dffe5a9fc7f2b6c4124 | 1,795 | py | Python | scripts/redact_cli_py/redact/io/blob_reader.py | jhapran/OCR-Form-Tools | 77e80227f7285c419f72b12edbbc8c316b973874 | [
"MIT"
] | 412 | 2020-03-02T21:43:17.000Z | 2022-03-24T17:20:33.000Z | scripts/redact_cli_py/redact/io/blob_reader.py | jhapran/OCR-Form-Tools | 77e80227f7285c419f72b12edbbc8c316b973874 | [
"MIT"
] | 388 | 2020-03-05T14:08:31.000Z | 2022-03-25T19:07:05.000Z | scripts/redact_cli_py/redact/io/blob_reader.py | jhapran/OCR-Form-Tools | 77e80227f7285c419f72b12edbbc8c316b973874 | [
"MIT"
] | 150 | 2020-03-03T17:29:11.000Z | 2022-03-16T23:55:27.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project
# root for license information.
from typing import List
from pathlib import Path
from azure.storage.blob import ContainerClient
from redact.types.file_bundle import FileBundle
| 37.395833 | 78 | 0.640111 |
67e4a190f4b21b618d8a69e714cec31032c3687f | 8,111 | py | Python | layers/util/mapping_functions.py | meder411/spherical-package | 73d51a25da5891d12e4c04d8ad2e6f1854ffa121 | [
"BSD-3-Clause"
] | 8 | 2020-06-13T19:49:06.000Z | 2022-02-24T07:16:02.000Z | layers/util/mapping_functions.py | meder411/spherical-package | 73d51a25da5891d12e4c04d8ad2e6f1854ffa121 | [
"BSD-3-Clause"
] | 4 | 2020-07-03T08:44:13.000Z | 2021-09-17T12:18:57.000Z | layers/util/mapping_functions.py | meder411/spherical-package | 73d51a25da5891d12e4c04d8ad2e6f1854ffa121 | [
"BSD-3-Clause"
] | 3 | 2020-06-10T23:30:20.000Z | 2020-12-29T13:50:01.000Z | import torch
import math
from .grids import *
from .conversions import *
# =============================================================================
# Equirectangular mapping functions
# =============================================================================
#
# Note that there is no concept of padding for spherical images because there
# are no image boundaries.
# #
def equirectangular_kernel(shape, kernel_size, dilation=1):
"""
Returns a kernel sampling grid with angular spacing according to the provided shape (and associated computed angular resolution) of an equirectangular image
shape: (H, W)
kernel_size: (kh, kw)
"""
# For convenience
kh, kw = kernel_size
# Get equirectangular grid resolution
res_lon, res_lat = get_equirectangular_grid_resolution(shape)
# Build the kernel according to the angular resolution of the equirectangular image
dlon = torch.zeros(kernel_size)
dlat = torch.zeros(kernel_size)
for i in range(kh):
cur_i = i - (kh // 2)
for j in range(kw):
cur_j = j - (kw // 2)
dlon[i, j] = cur_j * dilation * res_lon
# Flip sign is because +Y is down
dlat[i, j] = cur_i * dilation * -res_lat
# Returns the kernel differentials as kh x kw
return dlon, dlat
# =============================================================================
# Cube map mapping functions
# =============================================================================
def cube_kernel(cube_dim, kernel_size, dilation=1):
"""
Returns a kernel sampling grid with angular spacing according to the provided cube dimension (and associated computed angular resolution) of a cube map
cube_dim: length of side of square face of cube map
kernel_size: (kh, kw)
"""
# For convenience
kh, kw = kernel_size
cube_res = 1 / cube_dim
# Build the kernel according to the angular resolution of the cube face
dx = torch.zeros(kernel_size)
dy = torch.zeros(kernel_size)
for i in range(kh):
cur_i = i - (kh // 2)
for j in range(kw):
cur_j = j - (kw // 2)
dx[i, j] = cur_j * dilation * cube_res
# Flip sign is because +Y is down
dy[i, j] = cur_i * dilation * -cube_res
# Returns the kernel differentials as kh x kw
return dx, dy
def inverse_cube_face_projection_map(cube_dim,
kernel_size,
stride=1,
dilation=1,
polar=False):
"""
Creates a sampling map which models each face of the cube as an gnomonic projection (equatorial aspect) of the sphere. Warps the kernel according to the inverse gnomonic projection for the face.
"""
# For convenience
kh, kw = kernel_size
# Get a meshgrid of a cube face in terms of spherical coordinates
face_lon, face_lat = cube_face_spherical_meshgrid(cube_dim, polar)
# Get the kernel differentials
dx, dy = cube_kernel(cube_dim, kernel_size, dilation)
# Equalize views
face_lat = face_lat.view(cube_dim, cube_dim, 1)
face_lon = face_lon.view(cube_dim, cube_dim, 1)
dx = dx.view(1, 1, kh * kw)
dy = dy.view(1, 1, kh * kw)
# Compute the inverse gnomonic projection of each tangent grid (the kernel) back onto sphere at each pixel of the cube face
rho = (dx**2 + dy**2).sqrt()
nu = rho.atan()
map_lat = (nu.cos() * face_lat.sin() +
dy * nu.sin() * face_lat.cos() / rho).asin()
map_lon = face_lon + torch.atan2(
dx * nu.sin(),
rho * face_lat.cos() * nu.cos() - dy * face_lat.sin() * nu.sin())
# Handle the (0,0) case
map_lat[..., [kh * kw // 2]] = face_lat
map_lon[..., [kh * kw // 2]] = face_lon
# Create the sample map in terms of spherical coordinates
map_face = torch.stack((map_lon, map_lat), -1)
# Convert the cube coordinates on the sphere to pixels in the cube map
map_pixels = convert_spherical_to_cube_face(map_face, cube_dim)
# Adjust the stride of the map accordingly
map_pixels = map_pixels[::stride, ::stride, ...].contiguous()
# Return the pixel sampling map
# cube_dime x cube_dim x KH*KW x 2
return map_pixels | 33.378601 | 198 | 0.601159 |
67e4a6a4b62a36140c3ec2606810cde8cf6567ae | 8,164 | py | Python | src/lambda_router/routers.py | jpaidoussi/lambda-router | c7909e6667f2fc837f34f54ccffcc409e33cebb6 | [
"BSD-3-Clause"
] | null | null | null | src/lambda_router/routers.py | jpaidoussi/lambda-router | c7909e6667f2fc837f34f54ccffcc409e33cebb6 | [
"BSD-3-Clause"
] | null | null | null | src/lambda_router/routers.py | jpaidoussi/lambda-router | c7909e6667f2fc837f34f54ccffcc409e33cebb6 | [
"BSD-3-Clause"
] | 1 | 2021-03-05T06:50:26.000Z | 2021-03-05T06:50:26.000Z | import json
from typing import Any, Callable, Dict, Optional
import attr
from .interfaces import Event, Router
| 32.268775 | 114 | 0.614037 |
67e5a6a6c74d4339ea14061f1806e706d149cac0 | 6,026 | py | Python | Modules/ego_planner/ego-planner-swarm/src/uav_simulator/Utils/multi_map_server/src/multi_map_server/msg/_VerticalOccupancyGridList.py | 473867143/Prometheus | df1e1b0d861490223ac8b94d8cc4796537172292 | [
"BSD-3-Clause"
] | 1,217 | 2020-07-02T13:15:18.000Z | 2022-03-31T06:17:44.000Z | Modules/ego_planner/ego-planner-swarm/src/uav_simulator/Utils/multi_map_server/src/multi_map_server/msg/_VerticalOccupancyGridList.py | 473867143/Prometheus | df1e1b0d861490223ac8b94d8cc4796537172292 | [
"BSD-3-Clause"
] | 167 | 2020-07-12T15:35:43.000Z | 2022-03-31T11:57:40.000Z | Modules/ego_planner/ego-planner-swarm/src/uav_simulator/Utils/multi_map_server/src/multi_map_server/msg/_VerticalOccupancyGridList.py | 473867143/Prometheus | df1e1b0d861490223ac8b94d8cc4796537172292 | [
"BSD-3-Clause"
] | 270 | 2020-07-02T13:28:00.000Z | 2022-03-28T05:43:08.000Z | """autogenerated by genpy from multi_map_server/VerticalOccupancyGridList.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
_struct_I = genpy.struct_I
_struct_2f = struct.Struct("<2f")
| 32.397849 | 123 | 0.623963 |
67e63c84e17221da6f00d66f3c8761be24cd93e2 | 2,718 | py | Python | examples/plot_benchmark.py | MrNuggelz/glvq | 1eba279a07fd7abe2ee18ccdba27fba22755f877 | [
"BSD-3-Clause"
] | 27 | 2018-04-11T06:46:07.000Z | 2022-03-24T06:15:31.000Z | examples/plot_benchmark.py | MrNuggelz/glvq | 1eba279a07fd7abe2ee18ccdba27fba22755f877 | [
"BSD-3-Clause"
] | 11 | 2018-04-13T02:04:06.000Z | 2021-09-26T21:32:50.000Z | examples/plot_benchmark.py | MrNuggelz/glvq | 1eba279a07fd7abe2ee18ccdba27fba22755f877 | [
"BSD-3-Clause"
] | 17 | 2018-04-05T13:46:06.000Z | 2022-03-24T06:15:35.000Z | """
==============
GLVQ Benchmark
==============
This example shows the differences between the 4 different GLVQ implementations and LMNN.
The Image Segmentation dataset is used for training and test. Each plot shows the projection
and classification from each implementation. Because Glvq can't project the data on its own
a PCA is used.
"""
from __future__ import with_statement
import numpy as np
import matplotlib.pyplot as plt
from metric_learn import LMNN
from sklearn.decomposition import PCA
from sklearn_lvq import GlvqModel, GrlvqModel, LgmlvqModel, GmlvqModel
from sklearn_lvq.utils import _to_tango_colors, _tango_color
print(__doc__)
y = []
x = []
with open('segmentation.data') as f:
for line in f:
v = line.split(',')
y.append(v[0])
x.append(v[1:])
x = np.asarray(x, dtype='float64')
y = np.asarray(y)
lmnn = LMNN(k=5, learn_rate=1e-6)
lmnn.fit(x, y)
x_t = lmnn.transform(x)
p1 = plt.subplot(231)
p1.scatter(x_t[:, 0], x_t[:, 1], c=_to_tango_colors(y, 0))
p1.axis('equal')
p1.set_title('LMNN')
# GLVQ
glvq = GlvqModel()
glvq.fit(x, y)
p2 = plt.subplot(232)
p2.set_title('GLVQ')
plot(PCA().fit_transform(x), y, glvq.predict(x), glvq.w_, glvq.c_w_, p2)
# GRLVQ
grlvq = GrlvqModel()
grlvq.fit(x, y)
p3 = plt.subplot(233)
p3.set_title('GRLVQ')
plot(grlvq.project(x, 2),
y, grlvq.predict(x), grlvq.project(grlvq.w_, 2),
grlvq.c_w_, p3)
# GMLVQ
gmlvq = GmlvqModel()
gmlvq.fit(x, y)
p4 = plt.subplot(234)
p4.set_title('GMLVQ')
plot(gmlvq.project(x, 2),
y, gmlvq.predict(x), gmlvq.project(gmlvq.w_, 2),
gmlvq.c_w_, p4)
# LGMLVQ
lgmlvq = LgmlvqModel()
lgmlvq.fit(x, y)
p5 = plt.subplot(235)
elem_set = list(set(lgmlvq.c_w_))
p5.set_title('LGMLVQ 1')
plot(lgmlvq.project(x, 1, 2, True),
y, lgmlvq.predict(x), lgmlvq.project(np.array([lgmlvq.w_[1]]), 1, 2),
elem_set.index(lgmlvq.c_w_[1]), p5)
p6 = plt.subplot(236)
p6.set_title('LGMLVQ 2')
plot(lgmlvq.project(x, 6, 2, True),
y, lgmlvq.predict(x), lgmlvq.project(np.array([lgmlvq.w_[6]]), 6, 2),
elem_set.index(lgmlvq.c_w_[6]), p6)
plt.show()
| 27.734694 | 92 | 0.654893 |