hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7182cc9e1a275d7846a31a780b10f6ed97021067 | 1,440 | py | Python | microcosm_pubsub/context.py | Sinon/microcosm-pubsub | c98a188fcd5b3f358c7171dae0c39a33c5774a4e | [
"Apache-2.0"
] | 5 | 2016-07-23T21:20:50.000Z | 2021-07-15T00:27:47.000Z | microcosm_pubsub/context.py | Sinon/microcosm-pubsub | c98a188fcd5b3f358c7171dae0c39a33c5774a4e | [
"Apache-2.0"
] | 76 | 2016-03-22T23:41:21.000Z | 2020-07-27T17:35:36.000Z | microcosm_pubsub/context.py | Sinon/microcosm-pubsub | c98a188fcd5b3f358c7171dae0c39a33c5774a4e | [
"Apache-2.0"
] | 8 | 2016-06-01T18:43:41.000Z | 2021-04-27T20:22:15.000Z | """
Message context.
"""
from typing import Dict
from microcosm.api import defaults, typed
from microcosm.config.types import boolean
from microcosm_logging.decorators import logger
from microcosm_pubsub.constants import TTL_KEY, URI_KEY
from microcosm_pubsub.message import SQSMessage
| 26.181818 | 78 | 0.6625 |
71860bda1bd4506337b0b07e0b43aaca3e5c2511 | 2,185 | py | Python | azure_ml/pytorch_classifier/train_parameterized.py | murdockcrc/python-tricks | 57f7ad9c00a045c1f9f18f89bed6e73be6c85b69 | [
"MIT"
] | null | null | null | azure_ml/pytorch_classifier/train_parameterized.py | murdockcrc/python-tricks | 57f7ad9c00a045c1f9f18f89bed6e73be6c85b69 | [
"MIT"
] | null | null | null | azure_ml/pytorch_classifier/train_parameterized.py | murdockcrc/python-tricks | 57f7ad9c00a045c1f9f18f89bed6e73be6c85b69 | [
"MIT"
] | null | null | null | import os
import argparse
import torch
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from model import Net
from azureml.core import Run
run = Run.get_context()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_path',
type=str,
help='Path to the training data'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.001,
help='Learning rate for SGD'
)
parser.add_argument(
'--momentum',
type=float,
default=0.9,
help='Momentum for SGD'
)
args = parser.parse_args()
print("===== DATA =====")
print("DATA PATH: " + args.data_path)
print("LIST FILES IN DATA PATH...")
print(os.listdir(args.data_path))
print("================")
# prepare DataLoader for CIFAR10 data
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
trainset = torchvision.datasets.CIFAR10(
root=args.data_path,
train=True,
download=False,
transform=transform,
)
trainloader = torch.utils.data.DataLoader(
trainset,
batch_size=4,
shuffle=True,
num_workers=2
)
# define convolutional network
net = Net()
# set up pytorch loss / optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(
net.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
)
# train the network
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# unpack the data
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999:
loss = running_loss / 2000
run.log('loss', loss) # log loss metric to AML
print(f'epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}')
running_loss = 0.0
print('Finished Training') | 23 | 69 | 0.622426 |
71866e54c9be9ceced231705351ad07d4dec3246 | 244 | py | Python | src/tests/test_app_db.py | kazqvaizer/arq-sqlalchemy-boilerplate | c14596ed358a061e6eb2a380f4bd962242b123f3 | [
"MIT"
] | 6 | 2021-12-20T14:49:14.000Z | 2022-03-21T14:32:49.000Z | src/tests/test_app_db.py | kazqvaizer/arq-sqlalchemy-boilerplate | c14596ed358a061e6eb2a380f4bd962242b123f3 | [
"MIT"
] | null | null | null | src/tests/test_app_db.py | kazqvaizer/arq-sqlalchemy-boilerplate | c14596ed358a061e6eb2a380f4bd962242b123f3 | [
"MIT"
] | null | null | null | import pytest
from app.db import session_scope
pytestmark = pytest.mark.asyncio
| 22.181818 | 77 | 0.762295 |
7187ac8a1ef00393974831033262a38cc227b4e0 | 3,063 | py | Python | catalyst/core/callbacks/formatters.py | cgarciae/catalyst | 391ff89ab0d9a1961b88719e894f917ac0fb7fc3 | [
"Apache-2.0"
] | 1 | 2019-11-26T06:41:33.000Z | 2019-11-26T06:41:33.000Z | catalyst/core/callbacks/formatters.py | cgarciae/catalyst | 391ff89ab0d9a1961b88719e894f917ac0fb7fc3 | [
"Apache-2.0"
] | null | null | null | catalyst/core/callbacks/formatters.py | cgarciae/catalyst | 391ff89ab0d9a1961b88719e894f917ac0fb7fc3 | [
"Apache-2.0"
] | null | null | null | from abc import ABC, abstractmethod
from datetime import datetime
import json
import logging
from catalyst import utils
from catalyst.core import _State
__all__ = ["MetricsFormatter", "TxtMetricsFormatter", "JsonMetricsFormatter"]
| 27.845455 | 77 | 0.615083 |
7187ff57f53912dbb2c2ffb581f78542068a9ec6 | 7,612 | py | Python | fuzzy/fuzzy.py | Suraj1127/fuzzy-matcher | a3a6ecc6954d79ca65e2517f93db44cc432e7a90 | [
"MIT"
] | null | null | null | fuzzy/fuzzy.py | Suraj1127/fuzzy-matcher | a3a6ecc6954d79ca65e2517f93db44cc432e7a90 | [
"MIT"
] | null | null | null | fuzzy/fuzzy.py | Suraj1127/fuzzy-matcher | a3a6ecc6954d79ca65e2517f93db44cc432e7a90 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Description: Python script to append the common columns in one sheet from another sheet using fuzzy matching.
"""
import pip
import os
import sys
import argparse
import_or_install('numpy')
import_or_install('pandas')
import_or_install('fuzzywuzzy')
import numpy as np
import pandas as pd
from fuzzywuzzy import process, fuzz
def parse_args(parser):
"""
Parsing and configuration of the command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--firstcsv', type=str, required=True, help='CSV file for first table.')
parser.add_argument('--secondcsv', type=str, required=True, help='CSV file for second table.')
parser.add_argument('--destination', type=str, default='output.csv', help='Destination filename.')
parser.add_argument('--commoncolumns1', type=str, required=True, help='Common columns for first table.')
parser.add_argument('--commoncolumns2', type=str, required=True, help='Common columns for second table in the same order.')
parser.add_argument("--in", dest="_in", default='second', choices=['second', 'first'], help='Table to append the columns. ')
return check_args(parser.parse_args())
def check_args(args):
"""
Checking the arguments if they are entered properly.
Validations performed:
1. Compulsory arguments are entered.
2. The entered filenames are present in the current folder.
3. The entered column names are present in the corresponding files.
4. If the destination filename is already present in the directory, ask the user if it can be overwritten.
"""
# for --firstcsv and --secondcsv
for filename in [args.firstcsv, args.secondcsv]:
if not os.path.isfile(filename):
raise Exception("File {} is not present in the currrent folder.".format(filename))
# --commoncolumns1
commoncolumns1 = [i.strip().lower() for i in args.commoncolumns1.split(',')]
temp = set(commoncolumns1) - set(pd.read_csv(args.firstcsv, nrows=1).columns.str.lower().str.strip())
if temp:
raise Exception("The following columns are not present in the file, {}:\n{}".format(args.firstcsv, temp))
# --commoncolumns2
commoncolumns2 = [i.strip().lower() for i in args.commoncolumns2.split(',')]
temp = set(commoncolumns2) - set(pd.read_csv(args.secondcsv, nrows=1).columns.str.lower().str.strip())
if temp:
raise Exception("The following columns are not present in the file, {}:\n{}".format(args.secondcsv, temp))
# --destination
if os.path.isfile(args.destination):
print("The file {} already exists. Do you want to overwrite it? y/n".format(args.destination))
ans = input().strip().lower()
if ans == 'n':
print("Please enter different destination filename and run the script again.")
sys.exit()
return args
if __name__ == "__main__":
# instantiate the ArgumentParser class and parse the arguments
parser = argparse.ArgumentParser()
arguments = parse_args(parser)
# save the arguments as some variables which later would be passed to FuzzyMatcher class
filename_1 = arguments.firstcsv
filename_2 = arguments.secondcsv
result_filename = arguments.destination
# clean and lowercase-ize the columns names
common_columns_1 = [i.strip().lower() for i in arguments.commoncolumns1.split(',')]
common_columns_2 = [i.strip().lower() for i in arguments.commoncolumns2.split(',')]
# instantiate the FuzzyMatcher object, perform the fuzzy match, and save the result to the destination CSV file
fuzzy_matcher = FuzzyMatcher(filename_1, filename_2, common_columns_1, common_columns_2, append_in=arguments._in)
fuzzy_matcher.fuzzy_match
fuzzy_matcher.save(result_filename)
| 35.078341 | 128 | 0.626379 |
718a2a5b0f6feb828e1a124e9a30a273db18a144 | 9,770 | py | Python | exoatlas/visualizations/panels/BubblePanel.py | zkbt/exopop | 5e8b9d391fe9e2d39c623d7ccd7eca8fd0f0f3f8 | [
"MIT"
] | 4 | 2020-06-24T16:38:27.000Z | 2022-01-23T01:57:19.000Z | exoatlas/visualizations/panels/BubblePanel.py | zkbt/exopop | 5e8b9d391fe9e2d39c623d7ccd7eca8fd0f0f3f8 | [
"MIT"
] | 4 | 2018-09-20T23:12:30.000Z | 2019-05-15T15:31:58.000Z | exoatlas/visualizations/panels/BubblePanel.py | zkbt/exopop | 5e8b9d391fe9e2d39c623d7ccd7eca8fd0f0f3f8 | [
"MIT"
] | null | null | null | from .Panel import *
__all__ = ['BubblePanel']
default_size = plt.matplotlib.rcParams['lines.markersize']**2
| 35.787546 | 91 | 0.570624 |
718a929c80bd8d634b1687ba5560ac7c6a4f6fe7 | 264 | py | Python | venv/lib/python2.7/dist-packages/landscape/sysinfo/load.py | pengwu/scapy_env | 3db9c5dea2e219048a2387649d6d89be342903d9 | [
"MIT"
] | null | null | null | venv/lib/python2.7/dist-packages/landscape/sysinfo/load.py | pengwu/scapy_env | 3db9c5dea2e219048a2387649d6d89be342903d9 | [
"MIT"
] | null | null | null | venv/lib/python2.7/dist-packages/landscape/sysinfo/load.py | pengwu/scapy_env | 3db9c5dea2e219048a2387649d6d89be342903d9 | [
"MIT"
] | null | null | null | import os
from twisted.internet.defer import succeed
| 18.857143 | 72 | 0.666667 |
718c6a96017a844d29bf1f77cede2d377a4c970c | 675 | py | Python | src/boh_api/viewsets.py | dougmorato/bag-of-holding | 8a7bc45ced8837bdb00da60dcfb496bb0271f161 | [
"Apache-2.0"
] | null | null | null | src/boh_api/viewsets.py | dougmorato/bag-of-holding | 8a7bc45ced8837bdb00da60dcfb496bb0271f161 | [
"Apache-2.0"
] | 1 | 2021-06-10T23:58:45.000Z | 2021-06-10T23:58:45.000Z | src/boh_api/viewsets.py | dougmorato/bag-of-holding | 8a7bc45ced8837bdb00da60dcfb496bb0271f161 | [
"Apache-2.0"
] | null | null | null | from rest_framework import viewsets
from boh import models
from . import serializers
| 25.961538 | 57 | 0.8 |
718d447c90c45e89882aa6196cb3c3ab761ce174 | 2,207 | py | Python | githubintro-fe2d832af2bad7d6b27d036c205cc9d8414b2183/CommunicationAnimation.py | TatendaNoreen/Python | df9799bbea84af03c1fb3b29fada1e16c04bab80 | [
"MIT"
] | null | null | null | githubintro-fe2d832af2bad7d6b27d036c205cc9d8414b2183/CommunicationAnimation.py | TatendaNoreen/Python | df9799bbea84af03c1fb3b29fada1e16c04bab80 | [
"MIT"
] | null | null | null | githubintro-fe2d832af2bad7d6b27d036c205cc9d8414b2183/CommunicationAnimation.py | TatendaNoreen/Python | df9799bbea84af03c1fb3b29fada1e16c04bab80 | [
"MIT"
] | null | null | null | import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot
import agentframework
import csv
import matplotlib.animation
#create environment in which agents will operate
environment=[]
#read csv downloaded file
f = open('in.txt', newline='')
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
for row in reader:
rowlist=[] # A list of rows
environment.append(rowlist)
for value in row: # A list of value
#print(value) # Floats
rowlist.append(value)
f.close() # Don't close until you are done with the reader;
# the data is read on request.
#def distance_between(agents_row_a, agents_row_b):
# return (((agents_row_a.x - agents_row_b.x)**2) +
# ((agents_row_a.y - agents_row_b.y)**2))**0.5
num_of_agents = 10
num_of_iterations = 10
neighbourhood = 20
fig = matplotlib.pyplot.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
# Make the agents and connecting with the environment.
agents = []
animation = matplotlib.animation.FuncAnimation(fig, update, interval=1)
matplotlib.pyplot.show()
| 24.797753 | 71 | 0.645673 |
718e41b1051f8c81e49363a47885bbfedb81564d | 2,027 | py | Python | external/model-preparation-algorithm/tests/conftest.py | opencv/openvino_training_extensions | f5d809741e192a2345558efc75899a475019cf98 | [
"Apache-2.0"
] | 775 | 2019-03-01T02:13:33.000Z | 2020-09-07T22:49:15.000Z | external/model-preparation-algorithm/tests/conftest.py | opencv/openvino_training_extensions | f5d809741e192a2345558efc75899a475019cf98 | [
"Apache-2.0"
] | 229 | 2019-02-28T21:37:08.000Z | 2020-09-07T15:11:49.000Z | external/model-preparation-algorithm/tests/conftest.py | opencv/openvino_training_extensions | f5d809741e192a2345558efc75899a475019cf98 | [
"Apache-2.0"
] | 290 | 2019-02-28T20:32:11.000Z | 2020-09-07T05:51:41.000Z | # Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
try:
import e2e.fixtures
from e2e.conftest_utils import * # noqa
from e2e.conftest_utils import pytest_addoption as _e2e_pytest_addoption # noqa
from e2e import config # noqa
from e2e.utils import get_plugins_from_packages
pytest_plugins = get_plugins_from_packages([e2e])
except ImportError:
_e2e_pytest_addoption = None
pass
import config
import pytest
from ote_sdk.test_suite.pytest_insertions import *
from ote_sdk.test_suite.training_tests_common import REALLIFE_USECASE_CONSTANT
pytest_plugins = get_pytest_plugins_from_ote()
ote_conftest_insertion(default_repository_name='ote/training_extensions/external/model-preparation-algorithm')
# pytest magic
def pytest_generate_tests(metafunc):
ote_pytest_generate_tests_insertion(metafunc)
def pytest_addoption(parser):
ote_pytest_addoption_insertion(parser)
| 32.174603 | 111 | 0.750863 |
718e43027722775db4c64b0811dfc59a1835349b | 2,418 | py | Python | ibis/udf/validate.py | rtpsw/ibis | d7318fdf87121cd8fadbcf0369a2b217aab3053a | [
"Apache-2.0"
] | 986 | 2017-06-07T07:33:01.000Z | 2022-03-31T13:00:46.000Z | ibis/udf/validate.py | marlenezw/ibis | 14b9baf3e1021e8698e7f0ae3c0ae5747543431c | [
"Apache-2.0"
] | 2,623 | 2017-06-07T18:29:11.000Z | 2022-03-31T20:27:31.000Z | ibis/udf/validate.py | marlenezw/ibis | 14b9baf3e1021e8698e7f0ae3c0ae5747543431c | [
"Apache-2.0"
] | 238 | 2017-06-26T19:02:58.000Z | 2022-03-31T15:18:29.000Z | """Validation for UDFs.
Warning: This is an experimental module and API here can change without notice.
DO NOT USE DIRECTLY.
"""
from inspect import Parameter, Signature, signature
from typing import Any, Callable, List
import ibis.common.exceptions as com
from ibis.expr.datatypes import DataType
def _parameter_count(funcsig: Signature) -> int:
"""Get the number of positional-or-keyword or position-only parameters in a
function signature.
Parameters
----------
funcsig : inspect.Signature
A UDF signature
Returns
-------
int
The number of parameters
"""
return sum(
param.kind in {param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY}
for param in funcsig.parameters.values()
if param.default is Parameter.empty
)
def validate_input_type(
input_type: List[DataType], func: Callable
) -> Signature:
"""Check that the declared number of inputs (the length of `input_type`)
and the number of inputs to `func` are equal.
If the signature of `func` uses *args, then no check is done (since no
check can be done).
Parameters
----------
input_type : List[DataType]
func : callable
Returns
-------
inspect.Signature
"""
funcsig = signature(func)
params = funcsig.parameters.values()
# We can only do validation if all the positional arguments are explicit
# (i.e. no *args)
if not any(param.kind is Parameter.VAR_POSITIONAL for param in params):
declared_parameter_count = len(input_type)
function_parameter_count = _parameter_count(funcsig)
if declared_parameter_count != function_parameter_count:
raise TypeError(
'Function signature {!r} has {:d} parameters, '
'input_type has {:d}. These must match. Non-column '
'parameters must be defined as keyword only, i.e., '
'def foo(col, *, function_param).'.format(
func.__name__,
function_parameter_count,
declared_parameter_count,
)
)
return funcsig
def validate_output_type(output_type: Any) -> None:
"""Check that the output type is a single datatype."""
if isinstance(output_type, list):
raise com.IbisTypeError(
'The output type of a UDF must be a single datatype.'
)
| 28.447059 | 79 | 0.639371 |
71915f8963ebf873674df05ecd7d2ac82cadfb43 | 5,629 | py | Python | packages/stattik/stattik/schema/schema.py | stattikcms/stattik | 5c96d600d105461edb95a11d8050dee3c32edd1e | [
"MIT"
] | 1 | 2021-11-05T06:24:28.000Z | 2021-11-05T06:24:28.000Z | packages/stattik/stattik/schema/schema.py | stattikcms/stattik | 5c96d600d105461edb95a11d8050dee3c32edd1e | [
"MIT"
] | null | null | null | packages/stattik/stattik/schema/schema.py | stattikcms/stattik | 5c96d600d105461edb95a11d8050dee3c32edd1e | [
"MIT"
] | null | null | null | import inspect
from ariadne import make_executable_schema, QueryType, MutationType, SubscriptionType
from .resolver import *
#
# Schema
#
keywords = ['query', 'mutation', 'subscription', 'source']
# This is for testing or in case you don't want a database as the root schema | 28.004975 | 107 | 0.607035 |
7193df3e00cf1bbbc7e779239b2adfcf9b4f4173 | 78,616 | py | Python | toontown/battle/DistributedBattleBaseAI.py | DankMickey/Project-Altis-Educational-Source | 0a74999fb52d4e690a41b984703119f63c372d20 | [
"Apache-2.0"
] | 1 | 2021-06-25T02:56:32.000Z | 2021-06-25T02:56:32.000Z | toontown/battle/DistributedBattleBaseAI.py | kool601/Project-Altis-Educational-Source | 0a74999fb52d4e690a41b984703119f63c372d20 | [
"Apache-2.0"
] | null | null | null | toontown/battle/DistributedBattleBaseAI.py | kool601/Project-Altis-Educational-Source | 0a74999fb52d4e690a41b984703119f63c372d20 | [
"Apache-2.0"
] | 2 | 2017-12-20T17:46:56.000Z | 2021-06-25T02:56:36.000Z | import random
from otp.ai.AIBase import *
from direct.distributed.ClockDelta import *
from toontown.battle.BattleBase import *
from toontown.battle.BattleCalculatorAI import *
from toontown.toonbase.ToontownBattleGlobals import *
from toontown.battle.SuitBattleGlobals import *
from pandac.PandaModules import *
from toontown.battle import BattleExperienceAI
from direct.distributed import DistributedObjectAI
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.task import Task
from direct.directnotify import DirectNotifyGlobal
from toontown.ai import DatabaseObject
from toontown.toon import DistributedToonAI
from toontown.toon import InventoryBase
from toontown.toonbase import ToontownGlobals
from toontown.toon import NPCToons
from otp.ai.MagicWordGlobal import *
from toontown.pets import DistributedPetProxyAI
| 42.221267 | 279 | 0.551567 |
719475f300d53be54d446d8d9cab1b9a95946543 | 371 | py | Python | tracking_test.py | HsunGong/Augmented-Advertisement | ae9d0f5796c13e837a1a547d888647aeb61f0b04 | [
"MIT"
] | 5 | 2020-07-10T03:16:24.000Z | 2022-01-14T01:12:23.000Z | tracking_test.py | HsunGong/Augmented-Advertisement | ae9d0f5796c13e837a1a547d888647aeb61f0b04 | [
"MIT"
] | 4 | 2021-08-25T16:13:24.000Z | 2022-02-10T03:34:06.000Z | tracking_test.py | HsunGong/Augmented-Advertisement | ae9d0f5796c13e837a1a547d888647aeb61f0b04 | [
"MIT"
] | 1 | 2021-10-22T02:53:39.000Z | 2021-10-22T02:53:39.000Z | # Copyright (c) Group Three-Forest SJTU. All Rights Reserved.
from tracking.tracking import *
# a = tracking_video_rectangle("video/","1.mp4",[[273,352],[266,616],[412,620],[416,369]])
a = tracking_video_rectangle_tovideo("video/","1.mp4", "1.png", [[273,352],[266,616],[412,620],[416,369]], result = 'result__.avi', method_num = 5, edge = 4, middle_halt = 250)
| 53 | 177 | 0.668464 |
7195924eb07d641386ea892a9ee9a4835feb2275 | 11,102 | py | Python | gym_flock/envs/old/flocking_position.py | katetolstaya/gym-flock | 3236d1dafcb1b9be0cf78b471672e8becb2d37af | [
"MIT"
] | 19 | 2019-07-29T22:19:58.000Z | 2022-01-27T04:38:38.000Z | gym_flock/envs/old/flocking_position.py | henghenghahei849/gym-flock | b09bdfbbe4a96fe052958d1f9e1e9dd314f58419 | [
"MIT"
] | null | null | null | gym_flock/envs/old/flocking_position.py | henghenghahei849/gym-flock | b09bdfbbe4a96fe052958d1f9e1e9dd314f58419 | [
"MIT"
] | 5 | 2019-10-03T14:44:49.000Z | 2021-12-09T20:39:39.000Z | import gym
from gym import spaces, error, utils
from gym.utils import seeding
import numpy as np
from scipy.spatial.distance import pdist, squareform
import configparser
from os import path
import matplotlib.pyplot as plt
from matplotlib.pyplot import gca
font = {'family' : 'sans-serif',
'weight' : 'bold',
'size' : 14}
| 38.682927 | 126 | 0.582418 |
719665fcbb1b48dc2e95347865f8f0d20166bbd8 | 2,127 | py | Python | conf/constants.py | codingwangfeng/GoodGoodName | 02bfeb3ae65fd9ba0354f5b67237fcad4c0e11cb | [
"MIT"
] | null | null | null | conf/constants.py | codingwangfeng/GoodGoodName | 02bfeb3ae65fd9ba0354f5b67237fcad4c0e11cb | [
"MIT"
] | null | null | null | conf/constants.py | codingwangfeng/GoodGoodName | 02bfeb3ae65fd9ba0354f5b67237fcad4c0e11cb | [
"MIT"
] | null | null | null | # -*-coding:utf-8-*-
# from functools import reduce
from functools import reduce
SANCAI_jixiang = [1, 3, 5, 7, 8, 11, 13, 15, 16, 18, 21, 23, 24, 25, 31, 32, 33, 35, 37, 39, 41, 45, 47, 48, 52, 57, 61,
63,
65, 67, 68, 81] # ,,
SANCAI_xiaoji = [6, 17, 26, 27, 29, 30, 38, 49, 51, 55, 58, 71, 73, 75] #
SANCAI_xiong = [2, 4, 9, 10, 12, 14, 19, 20, 22, 28, 34, 36, 40, 42, 43, 44, 46, 50, 53, 54, 56, 59, 60, 62, 64, 66, 69,
70,
72, 74, 76, 77, 78, 79, 80] # ,,,,,
SANCAI_wise = [3, 13, 16, 21, 23, 29, 31, 37, 39, 41, 45, 47] # ,,
SANCAI_wealth = [15, 16, 24, 29, 32, 33, 41, 52] # ,,
SANCAI_artist = [13, 14, 18, 26, 29, 33, 35, 38, 48] # ,,,
SANCAI_goodwife = [5, 6, 11, 13, 15, 16, 24, 32, 35] #
SANCAI_death = [21, 23, 26, 28, 29, 33, 39] #
SANCAI_alone = [4, 10, 12, 14, 22, 28, 34] #
SANCAI_merry = [5, 6, 15, 16, 32, 39, 41] #
SANCAI_stubbon = [7, 17, 18, 25, 27, 28, 37, 47] # ,
SANCAI_gentle = [5, 6, 11, 15, 16, 24, 31, 32, 35] # ,
#
#
refer_good_num_list = [SANCAI_jixiang, SANCAI_xiaoji, SANCAI_wise, SANCAI_wealth, SANCAI_artist, SANCAI_goodwife,
SANCAI_merry, SANCAI_gentle]
#
good_num_list = [SANCAI_jixiang, SANCAI_xiaoji, SANCAI_wise, SANCAI_wealth, SANCAI_artist, SANCAI_goodwife,
SANCAI_merry, SANCAI_gentle]
#
refer_bad_num_list = [SANCAI_xiong, SANCAI_death, SANCAI_alone, SANCAI_stubbon]
#
bad_num_list = [SANCAI_xiong, SANCAI_death, SANCAI_alone]
good_num_set = set(reduce((lambda x, y: x + y), good_num_list, []))
bad_num_set = set(reduce((lambda x, y: x + y), bad_num_list, []))
print(':', good_num_set)
print(':', bad_num_set)
#
best_num_set = [x for x in good_num_set if x not in bad_num_set]
print(':', best_num_set)
RESULT_UNKNOWN = ''
| 49.465116 | 120 | 0.640809 |
7196a7afa44165b6070e17839c160c5651229421 | 406 | py | Python | main/migrations/0006_labourer_allproj.py | kevinmuturi5/farm-Management-system | 61929d7998d92d56daac67c2f8ace3cc76b6ee8b | [
"MIT"
] | 1 | 2020-11-24T14:39:54.000Z | 2020-11-24T14:39:54.000Z | main/migrations/0006_labourer_allproj.py | kevinmuturi5/farm-Management-system | 61929d7998d92d56daac67c2f8ace3cc76b6ee8b | [
"MIT"
] | null | null | null | main/migrations/0006_labourer_allproj.py | kevinmuturi5/farm-Management-system | 61929d7998d92d56daac67c2f8ace3cc76b6ee8b | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-10-18 16:07
from django.db import migrations, models
| 21.368421 | 72 | 0.603448 |
7196e863b7922259efe8d454892b5eb76fb7593e | 27,897 | py | Python | bzt/modules/blazemeter/blazemeter_reporter.py | beachwood23/taurus | 698ac747bae5d4940a879a8526add67c11ef42da | [
"Apache-2.0"
] | null | null | null | bzt/modules/blazemeter/blazemeter_reporter.py | beachwood23/taurus | 698ac747bae5d4940a879a8526add67c11ef42da | [
"Apache-2.0"
] | 34 | 2017-08-31T22:54:12.000Z | 2022-03-16T00:39:48.000Z | bzt/modules/blazemeter/blazemeter_reporter.py | beachwood23/taurus | 698ac747bae5d4940a879a8526add67c11ef42da | [
"Apache-2.0"
] | null | null | null | """
Module for reporting into http://www.blazemeter.com/ service
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import logging
import os
import platform
import sys
import time
import traceback
import zipfile
from collections import defaultdict, OrderedDict
from io import BytesIO
from urllib.error import HTTPError
import requests
from bzt import TaurusInternalException, TaurusConfigError, TaurusNetworkError
from bzt.bza import User, Session, Test
from bzt.engine import Reporter, Singletone
from bzt.utils import b, humanize_bytes, iteritems, open_browser, BetterDict, to_json, dehumanize_time
from bzt.modules.aggregator import AggregatorListener, DataPoint, KPISet, ResultsProvider, ConsolidatingAggregator
from bzt.modules.monitoring import Monitoring, MonitoringListener
from bzt.modules.blazemeter.project_finder import ProjectFinder
from bzt.modules.blazemeter.const import NOTE_SIZE_LIMIT
| 40.547965 | 120 | 0.566979 |
71975dd9b4598f0884460876d889b91d528834d3 | 20,434 | py | Python | nitorch/nn/losses/_spatial.py | wyli/nitorch | 3ecd18944cf45fb9193c4c6ffc32953c4d1c71ac | [
"MIT"
] | 1 | 2021-04-09T21:24:47.000Z | 2021-04-09T21:24:47.000Z | nitorch/nn/losses/_spatial.py | wyli/nitorch | 3ecd18944cf45fb9193c4c6ffc32953c4d1c71ac | [
"MIT"
] | null | null | null | nitorch/nn/losses/_spatial.py | wyli/nitorch | 3ecd18944cf45fb9193c4c6ffc32953c4d1c71ac | [
"MIT"
] | null | null | null | """
Losses that assume an underlying spatial organization
(gradients, curvature, etc.)
"""
import torch
import torch.nn as tnn
from nitorch.core.pyutils import make_list, prod
from nitorch.core.utils import slice_tensor
from nitorch.spatial import diff1d
from ._base import Loss
| 35.414211 | 90 | 0.553049 |
7197c87f66af380e5e98dd30c64711ce25f12d71 | 607 | py | Python | items/models.py | roberthtamayose/digitalmenu | 19c6633844934fd95f861674946da386411a19c9 | [
"MIT"
] | null | null | null | items/models.py | roberthtamayose/digitalmenu | 19c6633844934fd95f861674946da386411a19c9 | [
"MIT"
] | null | null | null | items/models.py | roberthtamayose/digitalmenu | 19c6633844934fd95f861674946da386411a19c9 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils import timezone
| 27.590909 | 73 | 0.726524 |
719810055bee113941d00e469e5cff1dcf6bfa92 | 114 | py | Python | app/services/__init__.py | zeroday0619/XenXenXenSe | 5af079e5edde3a6e4a1f5868052480d7b140d87c | [
"MIT"
] | 1 | 2021-04-23T08:56:05.000Z | 2021-04-23T08:56:05.000Z | app/services/__init__.py | Alex4386/XenXenXenSe | c60e50f26a7c3b306ee3cbb140b3ad7f39c21d93 | [
"MIT"
] | null | null | null | app/services/__init__.py | Alex4386/XenXenXenSe | c60e50f26a7c3b306ee3cbb140b3ad7f39c21d93 | [
"MIT"
] | null | null | null | from app.services.console import Console
from app.services.server import Server
__main__ = ["server", "console"]
| 22.8 | 40 | 0.780702 |
719876b6e33d3caa67b41082a88c72293d4411b5 | 2,801 | py | Python | launch/twist_mux_launch.py | nuclearsandwich-ros/twist_mux-release | d92dcda0255e727b899d3bac62ef3d89c19cb38e | [
"Apache-2.0"
] | 31 | 2017-11-25T17:13:00.000Z | 2022-01-20T14:39:12.000Z | launch/twist_mux_launch.py | nuclearsandwich-ros/twist_mux-release | d92dcda0255e727b899d3bac62ef3d89c19cb38e | [
"Apache-2.0"
] | 27 | 2015-05-22T13:35:04.000Z | 2021-12-29T07:26:02.000Z | launch/twist_mux_launch.py | nuclearsandwich-ros/twist_mux-release | d92dcda0255e727b899d3bac62ef3d89c19cb38e | [
"Apache-2.0"
] | 51 | 2015-10-16T11:41:24.000Z | 2022-03-28T07:33:24.000Z | #!/usr/bin/env python3
# Copyright 2020 Gaitech Korea Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: Brighten Lee
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
| 38.902778 | 84 | 0.63513 |
7199385be37350560f528085cc7c3bcbd212b172 | 5,298 | py | Python | Tests/testLiveService.py | psu-capstone-teamD/ElementalAuth | d896efad5a3e4cb453c324afc456aa82f82da239 | [
"MIT"
] | 2 | 2017-08-21T00:52:35.000Z | 2018-10-31T17:38:42.000Z | Tests/testLiveService.py | psu-capstone-teamD/ElementalAuth | d896efad5a3e4cb453c324afc456aa82f82da239 | [
"MIT"
] | 27 | 2017-07-27T21:10:35.000Z | 2017-08-24T21:19:23.000Z | Tests/testLiveService.py | psu-capstone-teamD/ElementalAuth | d896efad5a3e4cb453c324afc456aa82f82da239 | [
"MIT"
] | 2 | 2017-07-08T00:57:08.000Z | 2017-07-24T19:21:12.000Z | import sys
import unittest
import requests_mock
from mock import patch
sys.path.append('services/LiveService')
from LiveService import LiveService
L = LiveService()
baseURL = "https://yanexx65s8e1.live.elementalclouddev.com/api"
if __name__ == '__main__':
unittest.main()
| 35.557047 | 85 | 0.634957 |
719a07f87262fe8ff8cbef8ec2795807ff5db531 | 10,005 | py | Python | tests/models/test_stacking.py | LionelMassoulard/aikit | 98b2abaa3bf47ab46f2fd3c270010293de06dba9 | [
"BSD-2-Clause"
] | null | null | null | tests/models/test_stacking.py | LionelMassoulard/aikit | 98b2abaa3bf47ab46f2fd3c270010293de06dba9 | [
"BSD-2-Clause"
] | null | null | null | tests/models/test_stacking.py | LionelMassoulard/aikit | 98b2abaa3bf47ab46f2fd3c270010293de06dba9 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 11:49:10 2018
@author: Lionel Massoulard
"""
import pytest
import numpy as np
import pandas as pd
from sklearn.base import is_regressor, is_classifier
from sklearn.exceptions import NotFittedError
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.dummy import DummyRegressor
from aikit.models.stacking import OutSamplerTransformer, StackerClassifier, StackerRegressor
| 29.254386 | 172 | 0.687856 |
719a305b1e0f6ee4015df4fc0e1d42b61d553b49 | 1,611 | py | Python | employee/views/check_rental.py | odrolliv13/Hex-Photos | d1b42b63394783164f843fe6343491f04fe11e0c | [
"Apache-2.0"
] | null | null | null | employee/views/check_rental.py | odrolliv13/Hex-Photos | d1b42b63394783164f843fe6343491f04fe11e0c | [
"Apache-2.0"
] | null | null | null | employee/views/check_rental.py | odrolliv13/Hex-Photos | d1b42b63394783164f843fe6343491f04fe11e0c | [
"Apache-2.0"
] | null | null | null | from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from manager import models as pmod
from . import templater
from django.conf import settings
import decimal, datetime
# This view will display all users and then on a new page display all the current rentals for a given user
| 30.980769 | 150 | 0.703911 |
719bca03a01e24f7c868ad83a281e40679838ca7 | 1,521 | py | Python | jupyter/settings.py | nguyenngtt/GSE---TEAM-A | 4f78c1ace051d4f2ff30a039aa481aa9b79d3242 | [
"MIT"
] | 3 | 2021-11-21T08:47:18.000Z | 2021-11-28T10:35:10.000Z | jupyter/settings.py | nguyenngtt/GSE---TEAM-A | 4f78c1ace051d4f2ff30a039aa481aa9b79d3242 | [
"MIT"
] | 6 | 2021-11-29T02:00:49.000Z | 2022-02-08T09:21:38.000Z | jupyter/settings.py | nguyenngtt/GSE---TEAM-A | 4f78c1ace051d4f2ff30a039aa481aa9b79d3242 | [
"MIT"
] | 3 | 2021-12-11T08:11:08.000Z | 2022-01-10T12:51:48.000Z | import pandas as pd
import numpy as np
import os
import logging
# suppress warnings
import warnings;
warnings.filterwarnings('ignore');
from tqdm.autonotebook import tqdm
# register `pandas.progress_apply` and `pandas.Series.map_apply` with `tqdm`
tqdm.pandas()
# https://pandas.pydata.org/pandas-docs/stable/user_guide/options.html#available-options
# adjust pandas display
pd.options.display.max_columns = 30 # default 20
pd.options.display.max_rows = 200 # default 60
pd.options.display.float_format = '{:.2f}'.format
# pd.options.display.precision = 2
pd.options.display.max_colwidth = 200 # default 50; None = all
# Number of array items in summary at beginning and end of each dimension
# np.set_printoptions(edgeitems=3) # default 3
np.set_printoptions(suppress=True) # no scientific notation for small numbers
# IPython (Jupyter) setting:
# Print out every value instead of just "last_expr" (default)
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import matplotlib as mpl
from matplotlib import pyplot as plt
# defaults: mpl.rcParamsDefault
rc_params = {'figure.figsize': (8, 4),
'axes.labelsize': 'large',
'axes.titlesize': 'large',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large',
'savefig.dpi': 100,
'figure.dpi': 100 }
# adjust matplotlib defaults
mpl.rcParams.update(rc_params)
import seaborn as sns
sns.set_style("darkgrid")
# sns.set()
| 30.42 | 88 | 0.724523 |
719d88c236122420bab454b120302ded66f22838 | 828 | py | Python | var/spack/repos/builtin/packages/py-cyvcf2/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/py-cyvcf2/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/py-cyvcf2/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
| 31.846154 | 96 | 0.689614 |
719e5a0939a4c90bfd66956e7385e51aac9d612e | 340 | py | Python | pset_functions/db_search/p1.py | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 5 | 2019-04-08T20:05:37.000Z | 2019-12-04T20:48:45.000Z | pset_functions/db_search/p1.py | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 8 | 2019-04-15T15:16:05.000Z | 2022-02-12T10:33:32.000Z | pset_functions/db_search/p1.py | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 2 | 2019-04-10T00:14:42.000Z | 2020-02-26T20:35:21.000Z | """
GPA Calculator
"""
# Write a function called "simple_gpa" to find GPA when student enters a letter grade as a string. Assign the result to a variable called "gpa".
"""
Use these conversions:
A+ --> 4.0
A --> 4.0
A- --> 3.7
B+ --> 3.3
B --> 3.0
B- --> 2.7
C+ --> 2.3
C --> 2.0
C- --> 1.7
D+ --> 1.3
D --> 1.0
D- --> 0.7
F --> 0.0
"""
| 14.166667 | 144 | 0.538235 |
719e7932fde71fc017391588fcca49763cf61208 | 5,283 | py | Python | test_soundcard.py | flying-sheep/SoundCard | b476c8142b460fc8161d374b282fe846d72a0780 | [
"BSD-3-Clause"
] | 1 | 2020-01-27T00:59:12.000Z | 2020-01-27T00:59:12.000Z | test_soundcard.py | flying-sheep/SoundCard | b476c8142b460fc8161d374b282fe846d72a0780 | [
"BSD-3-Clause"
] | null | null | null | test_soundcard.py | flying-sheep/SoundCard | b476c8142b460fc8161d374b282fe846d72a0780 | [
"BSD-3-Clause"
] | null | null | null | import sys
import soundcard
import numpy
import pytest
ones = numpy.ones(1024)
signal = numpy.concatenate([[ones], [-ones]]).T
def test_loopback_playback(loopback_player, loopback_recorder):
loopback_player.play(signal)
recording = loopback_recorder.record(1024*10)
assert recording.shape[1] == 2
left, right = recording.T
assert left.mean() > 0
assert right.mean() < 0
assert (left > 0.5).sum() == len(signal)
assert (right < -0.5).sum() == len(signal)
def test_loopback_reverse_recorder_channelmap(loopback_player, loopback_microphone):
with loopback_microphone.recorder(48000, channels=[1, 0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert right.mean() > 0
assert left.mean() < 0
assert (right > 0.5).sum() == len(signal)
assert (left < -0.5).sum() == len(signal)
def test_loopback_reverse_player_channelmap(loopback_speaker, loopback_recorder):
with loopback_speaker.player(48000, channels=[1, 0], blocksize=512) as loopback_player:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert right.mean() > 0
assert left.mean() < 0
assert (right > 0.5).sum() == len(signal)
assert (left < -0.5).sum() == len(signal)
def test_loopback_mono_player_channelmap(loopback_speaker, loopback_recorder):
with loopback_speaker.player(48000, channels=[0], blocksize=512) as loopback_player:
loopback_player.play(signal[:,0])
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert left.mean() > 0
if sys.platform == 'linux':
# unmapped channels on linux are filled with the mean of other channels
assert right.mean() < left.mean()
else:
assert abs(right.mean()) < 0.01 # something like zero
assert (left > 0.5).sum() == len(signal)
def test_loopback_mono_recorder_channelmap(loopback_player, loopback_microphone):
with loopback_microphone.recorder(48000, channels=[0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert len(recording.shape) == 1 or recording.shape[1] == 1
assert recording.mean() > 0
assert (recording > 0.5).sum() == len(signal)
def test_loopback_multichannel_channelmap(loopback_speaker, loopback_microphone):
with loopback_speaker.player(48000, channels=[2, 0], blocksize=512) as loopback_player:
with loopback_microphone.recorder(48000, channels=[2, 0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert len(recording.shape) == 2
left, right = recording.T
assert left.mean() > 0
assert right.mean() < 0
assert (left > 0.5).sum() == len(signal)
assert (right < -0.5).sum() == len(signal)
| 38.845588 | 102 | 0.696952 |
719fb32d418ed1529b6d751555ff2385cebf2266 | 623 | py | Python | Last 3 digits of 11^x.py | jaiveergill/Last-Three-Digits-of-11-x | def4519b9b46e41b4c4f2b3a5dbe5566316dd83e | [
"MIT"
] | null | null | null | Last 3 digits of 11^x.py | jaiveergill/Last-Three-Digits-of-11-x | def4519b9b46e41b4c4f2b3a5dbe5566316dd83e | [
"MIT"
] | null | null | null | Last 3 digits of 11^x.py | jaiveergill/Last-Three-Digits-of-11-x | def4519b9b46e41b4c4f2b3a5dbe5566316dd83e | [
"MIT"
] | null | null | null | # This is a simple program to find the last three digits of 11 raised to any given number.
# The main algorithm that does the work is on line 10
# To use it, simply copy the code and run the function
| 44.5 | 179 | 0.662921 |
719fd87192b7b49949a8b70a475fd96677b03575 | 6,137 | py | Python | osr_odometry/scripts/osr_odom_ackerman2.py | ljb2208/osr-rover-code | f4791d835cd760446777a226d37bb3114256affd | [
"Apache-2.0"
] | null | null | null | osr_odometry/scripts/osr_odom_ackerman2.py | ljb2208/osr-rover-code | f4791d835cd760446777a226d37bb3114256affd | [
"Apache-2.0"
] | null | null | null | osr_odometry/scripts/osr_odom_ackerman2.py | ljb2208/osr-rover-code | f4791d835cd760446777a226d37bb3114256affd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import time
from osr_msgs.msg import Joystick, Commands, Encoder, RunStop
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point, Pose, Quaternion, Twist, Vector3
import rospy
import tf
import math
import numpy
if __name__ == '__main__':
rospy.init_node('osr_odometry2')
rospy.loginfo("Starting the osr odometry2 node")
baseFrame = rospy.get_param("/odometry/base_frame_id", "base_link")
# mpt = rospy.get_param("/odometry/mpt", 0.000026322)
mpt = rospy.get_param("/odometry/mpt", 0.000100708)
wheelTrack = rospy.get_param("/odometry/wheel_track", 0.455)
d4 = rospy.get_param("/odometry/d4", 0.2559)
maxTickPerSec = rospy.get_param("/odometry/maxTickPerSec", 8000)
publishTF = rospy.get_param("~publishTF", False)
odom = Odometry2(baseFrame, wheelTrack, mpt, d4, maxTickPerSec, pubTF=publishTF)
encSub = rospy.Subscriber("/encoder", Encoder, odom.onEncoderMessage)
rate = rospy.Rate(20)
while not rospy.is_shutdown():
rate.sleep()
| 30.532338 | 96 | 0.579599 |
71a0b40b2d964c1cdacc2a99529ad40612493ff0 | 4,199 | py | Python | src/simulation-conditioning/utilities/data-generation-scripts/Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_A_train.py | alisiahkoohi/importance-of-transfer-learning | bb4c7943f4ff64a2f1785503328b4cbb4f5111aa | [
"MIT"
] | null | null | null | src/simulation-conditioning/utilities/data-generation-scripts/Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_A_train.py | alisiahkoohi/importance-of-transfer-learning | bb4c7943f4ff64a2f1785503328b4cbb4f5111aa | [
"MIT"
] | 4 | 2020-09-25T22:32:41.000Z | 2022-02-09T23:36:02.000Z | src/simulation-conditioning/utilities/data-generation-scripts/Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_A_train.py | slimgroup/Software.siahkoohi2019itl | bb4c7943f4ff64a2f1785503328b4cbb4f5111aa | [
"MIT"
] | null | null | null | import numpy as np
import h5py
import os
from devito.logger import info
from devito import TimeFunction, clear_cache
from examples.seismic.acoustic import AcousticWaveSolver
from examples.seismic import Model, RickerSource, Receiver, TimeAxis
from math import floor
from scipy.interpolate import griddata
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--data_path', dest='data_path', type=str, default='/home/ec2-user/data', help='raw data path')
parser.add_argument('--save_dir', dest='save_dir', type=str, default='/home/ec2-user/data', help='saving directory')
args = parser.parse_args()
data_path = args.data_path
save_dir = args.save_dir
origin = (0., 0.)
spacing=(7.5, 7.5)
tn=1100.
nbpml=40
# Define your vp in km/sec (x, z)
vp = np.fromfile(os.path.join(data_path, 'vp_marmousi_bi'),
dtype='float32', sep="")
vp = np.reshape(vp, (1601, 401))
# vp = vp[400:1401, 0:401]
shape=[401, 301]
values = np.zeros([vp.shape[0]*vp.shape[1], ])
points = np.zeros([vp.shape[0]*vp.shape[1], 2])
k = 0
for indx in range(0, vp.shape[0]):
for indy in range(0, vp.shape[1]):
values[k] = vp[indx, indy]
points[k, 0] = indx
points[k, 1] = indy
k = k + 1
# nx, ny = shape[0], shape[1]
X, Y = np.meshgrid(np.array(np.linspace(1000, 1287, shape[0])), np.array(np.linspace(120, 232, shape[1])))
int_vp = griddata(points, values, (X, Y), method='cubic')
int_vp = np.transpose(int_vp)
vp = int_vp
# create model
model = Model(origin, spacing, shape, 2, vp, nbpml=nbpml)
# Derive timestepping from model spacing
dt = model.critical_dt
t0 = 0.0
nt = int(1 + (tn-t0) / dt) # Number of timesteps
time = np.linspace(t0, tn, nt) # Discretized time axis
datasize0 = int(np.shape(range(0, shape[0], 4))[0])
datasize1 = int(np.shape(range(100, nt, 20))[0])
datasize = datasize0*datasize1
strTrainA = os.path.join(save_dir, 'Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_A_train.hdf5')
strTrainB = os.path.join(save_dir, 'Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_B_train.hdf5')
dataset_train = "train_dataset"
file_trainA = h5py.File(strTrainA, 'w-')
datasetA = file_trainA.create_dataset(dataset_train, (datasize, shape[0]+2*nbpml, shape[1]+2*nbpml))
file_trainB = h5py.File(strTrainB, 'w-')
datasetB = file_trainB.create_dataset(dataset_train, (datasize, shape[0]+2*nbpml, shape[1]+2*nbpml))
num_rec = 601
rec_samp = np.linspace(0., model.domain_size[0], num=num_rec);
rec_samp = rec_samp[1]-rec_samp[0]
time_range = TimeAxis(start=t0, stop=tn, step=dt)
src = RickerSource(name='src', grid=model.grid, f0=0.025, time_range=time_range, space_order=1, npoint=1)
src.coordinates.data[0, :] = np.array([1*spacing[0], 2*spacing[1]]).astype(np.float32)
rec = Receiver(name='rec', grid=model.grid, time_range=time_range, npoint=num_rec)
rec.coordinates.data[:, 0] = np.linspace(0., model.domain_size[0], num=num_rec)
rec.coordinates.data[:, 1:] = src.coordinates.data[0, 1:]
solverbad = AcousticWaveSolver(model, source=src, receiver=rec, kernel='OT2', isic=True,
space_order=2, freesurface=False)
solvergood = AcousticWaveSolver(model, source=src, receiver=rec, kernel='OT2', isic=True,
space_order=20, freesurface=False)
ulocgood = TimeFunction(name="u", grid=model.grid, time_order=2, space_order=20, save=nt)
ulocbad = TimeFunction(name="u", grid=model.grid, time_order=2, space_order=2, save=nt)
kk = 0
for xsrc in range(0, shape[0], 4):
clear_cache()
ulocgood.data.fill(0.)
ulocbad.data.fill(0.)
src.coordinates.data[0, :] = np.array([xsrc*spacing[0], 2*spacing[1]]).astype(np.float32)
rec.coordinates.data[:, 0] = np.linspace(0., model.domain_size[0], num=num_rec)
rec.coordinates.data[:, 1:] = src.coordinates.data[0, 1:]
_, ulocgood, _ = solvergood.forward(m=model.m, src=src, time=nt-1, save=True)
_, ulocbad, _ = solverbad.forward(m=model.m, src=src, time=nt-1, save=True)
datasetA[kk:(kk+datasize1), :, :] = np.array(ulocgood.data[range(100, nt, 20), :, :])
datasetB[kk:(kk+datasize1), :, :] = np.array(ulocbad.data[range(100, nt, 20), :, :])
kk = kk + datasize1
file_trainA.close()
file_trainB.close()
| 34.702479 | 116 | 0.700881 |
71a155a137fa83ef0306a441e11bd003d9b6a750 | 154 | py | Python | facto.py | divine-coder/CODECHEF-PYTHON | a1e34d6f9f75cf7b9497f1ef2f937cb4f64f1543 | [
"MIT"
] | null | null | null | facto.py | divine-coder/CODECHEF-PYTHON | a1e34d6f9f75cf7b9497f1ef2f937cb4f64f1543 | [
"MIT"
] | 4 | 2020-10-04T07:49:30.000Z | 2021-10-02T05:24:40.000Z | facto.py | divine-coder/CODECHEF-PYTHON | a1e34d6f9f75cf7b9497f1ef2f937cb4f64f1543 | [
"MIT"
] | 7 | 2020-10-04T07:46:55.000Z | 2021-11-05T14:30:00.000Z | import math
if __name__=='__main__':
n=(int)(input())
for abc in range(n):
t=(int)(input())
print math.factorial(t)
| 17.111111 | 31 | 0.512987 |
71a1f9b966a655c142f90e8f1814eebae105ba9e | 373 | py | Python | setup.py | johnmartingodo/pyKinematicsKineticsToolbox | 4ffc99885f3c637b8c33914a4e50ccb4595fc844 | [
"MIT"
] | null | null | null | setup.py | johnmartingodo/pyKinematicsKineticsToolbox | 4ffc99885f3c637b8c33914a4e50ccb4595fc844 | [
"MIT"
] | null | null | null | setup.py | johnmartingodo/pyKinematicsKineticsToolbox | 4ffc99885f3c637b8c33914a4e50ccb4595fc844 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name="pykinematicskineticstoolbox",
version="0.0",
description="Installable python package which collects useful kinematics and kinetics functions",
author="John Martin K. God",
author_email="john.martin.kleven.godo@gmail.com",
license="MIT",
packages=["pykinematicskineticstoolbox"],
install_requires=["numpy"],
)
| 31.083333 | 100 | 0.753351 |
71a33e281903173f09972e5b14ecf88c5dd711ba | 1,251 | py | Python | summary/summary_avail.py | bit0fun/plugins | 1f6f701bf1e60882b8fa61cb735e7033c8c29e3c | [
"BSD-3-Clause"
] | 173 | 2019-01-17T12:40:47.000Z | 2022-03-27T12:14:00.000Z | summary/summary_avail.py | bit0fun/plugins | 1f6f701bf1e60882b8fa61cb735e7033c8c29e3c | [
"BSD-3-Clause"
] | 284 | 2019-03-01T17:54:14.000Z | 2022-03-29T13:27:51.000Z | summary/summary_avail.py | bit0fun/plugins | 1f6f701bf1e60882b8fa61cb735e7033c8c29e3c | [
"BSD-3-Clause"
] | 92 | 2019-02-26T03:45:40.000Z | 2022-03-28T03:23:50.000Z | from datetime import datetime
# ensure an rpc peer is added
# exponetially smooth online/offline states of peers
| 36.794118 | 108 | 0.60032 |
71a38554040095f344a4dbd4dbed0540a3d29b06 | 505 | py | Python | terrascript/dns/r.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | 4 | 2022-02-07T21:08:14.000Z | 2022-03-03T04:41:28.000Z | terrascript/dns/r.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | null | null | null | terrascript/dns/r.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | 2 | 2022-02-06T01:49:42.000Z | 2022-02-08T14:15:00.000Z | # terrascript/dns/r.py
import terrascript
| 14.428571 | 48 | 0.778218 |
71a3ec3949c4d0b824f364cf880c163e7d4093ec | 749 | py | Python | JumpscaleCore/clients/tcprouter/TCPRouterFactory.py | gneumann333/jumpscaleX_core | 777d249fa3668c6e802c2f765f4b82fb39c3e5fa | [
"Apache-2.0"
] | 1 | 2020-06-21T11:18:52.000Z | 2020-06-21T11:18:52.000Z | JumpscaleCore/clients/tcprouter/TCPRouterFactory.py | gneumann333/jumpscaleX_core | 777d249fa3668c6e802c2f765f4b82fb39c3e5fa | [
"Apache-2.0"
] | 644 | 2019-08-25T10:19:56.000Z | 2020-12-23T09:41:04.000Z | JumpscaleCore/clients/tcprouter/TCPRouterFactory.py | gneumann333/jumpscaleX_core | 777d249fa3668c6e802c2f765f4b82fb39c3e5fa | [
"Apache-2.0"
] | 11 | 2019-08-29T21:38:50.000Z | 2020-06-21T11:18:55.000Z | from Jumpscale import j
from .TCPRouterClient import TCPRouterClient
JSConfigs = j.baseclasses.object_config_collection
| 22.029412 | 81 | 0.580774 |
71a54794818c1c14503bf2853a8ad157b14a963f | 8,837 | py | Python | nmrglue/fileio/spinsolve.py | miguelarbesu/nmrglue | 6ca36de7af1a2cf109f40bf5afe9c1ce73c9dcdc | [
"BSD-3-Clause"
] | null | null | null | nmrglue/fileio/spinsolve.py | miguelarbesu/nmrglue | 6ca36de7af1a2cf109f40bf5afe9c1ce73c9dcdc | [
"BSD-3-Clause"
] | null | null | null | nmrglue/fileio/spinsolve.py | miguelarbesu/nmrglue | 6ca36de7af1a2cf109f40bf5afe9c1ce73c9dcdc | [
"BSD-3-Clause"
] | null | null | null | """
Functions for reading Magritek Spinsolve binary (dx/1d) files and
parameter (acqu.par/proc.par) files.
"""
import os
from warnings import warn
import numpy as np
from . import fileiobase
from . import jcampdx
__developer_info__ = """
Spinsolve is the software used on the Magritek benchtop NMR devices.
A spectrum is saved in a folder with several files. The spectral data is
stored in these files: 'data.1d' (FID), 'spectrum.1d' (Fourier transformed)
and 'spectrum_processed.1d' (FT + processed by spinsolve)
Optional spectral data (System->Prefs->Setup->Global data storage):
'nmr_fid.dx' (FID stored in `JCAMP-DX standard <http://www.jcamp-dx.org/>`),
'spectrum.csv' and 'spectrum_processed.csv' (FT + processed by Spinsovle with ppm for each
point and intensity delimited by ';')
Other files:
'acqu.par' - all parameters that are used for acquisition
'Protocol.par' - text file used to reload data back into the Spinsolve software
'processing.script' - text file to transfer Spinsolve software protocol settings
into MNOVA
The Spinsolve Expert software has a slightly different output:
[Needs to be double checked as I do not have access to this software -LCageman]
- Output into JCAMP-DX is not possible
- 'spectrum_processed.1d' is not generated
- (new) 'fid.1d' - seems to be the same as 'data.1d'
- (new) 'proc.par' - contains processing parameters in the same style as 'acqu.par'
- (new) .pt1 files - seem to be plot files specific for the expert software, cannot
be read by NMRglue
"""
def read(dir='.', specfile=None, acqupar="acqu.par", procpar="proc.par"):
"""
Reads spinsolve files from a directory
When no spectrum filename is given (specfile), the following list is tried, in
that specific order
["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d"]
To use the resolution enhanced spectrum use the './Enhanced' folder as input.
Note that spectrum.1d and spectrum_processed.1d contain only data in the
frequency domain, so no Fourier transformation is needed. Also, use
dic["spectrum"]["xaxis"] to plot the x-axis
Parameters
----------
dir : str
Directory to read from
specfile : str, optional
Filename to import spectral data from. None uses standard filename from:
["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d"]
acqupar : str, optional
Filename for acquisition parameters. None uses standard name.
procpar : str, optional
Filename for processing parameters. None uses standard name.
Returns
-------
dic : dict
All parameters that can be present in the data folder:
dic["spectrum"] - First bytes of spectrum(_processed).1d
dic["acqu"] - Parameters present in acqu.par
dic["proc"] - Parameters present in proc.par
dic["dx"] - - Parameters present in the header of nmr_fid.dx
data : ndarray
Array of NMR data
"""
if os.path.isdir(dir) is not True:
raise IOError("directory %s does not exist" % (dir))
# Create empty dic
dic = {"spectrum": {}, "acqu": {}, "proc":{}, "dx":{}}
# Read in acqu.par and write to dic
acqupar = os.path.join(dir, acqupar)
if os.path.isfile(acqupar):
with open(acqupar, "r") as f:
info = f.readlines()
for line in info:
line = line.replace("\n", "")
k, v = line.split("=")
dic["acqu"][k.strip()] = v.strip()
# Read in proc.par and write to dic
procpar = os.path.join(dir,procpar)
if os.path.isfile(procpar):
with open(procpar, "r") as f:
info = f.readlines()
for line in info:
line = line.replace("\n", "")
k, v = line.split("=")
dic["proc"][k.strip()] = v.strip()
# Define which spectrumfile to take, using 'specfile' when defined, otherwise
# the files in 'priority_list' are tried, in that particular order
priority_list = ["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d", None]
if specfile:
inputfile = os.path.join(dir, specfile)
if not os.path.isfile(inputfile):
raise IOError("File %s does not exist" % (inputfile))
else:
for priority in priority_list:
if priority == None:
raise IOError("directory %s does not contain spectral data" % (dir))
inputfile = os.path.join(dir, priority)
if os.path.isfile(inputfile):
break
# Detect which file we are dealing with from the extension and read in the spectral data
# Reading .dx file using existing nmrglue.fileio.jcampdx module
if inputfile.split('.')[-1] == "dx":
dic["dx"], raw_data = jcampdx.read(inputfile)
data = np.empty((int(dic["dx"]["$TD"][0]), ), dtype='complex128')
data = raw_data[0][:] + 1j * raw_data[1][:]
# Reading .1d files
elif inputfile.split('.')[-1] == "1d":
with open(inputfile, "rb") as f:
raw_data = f.read()
# Write out parameters from the first 32 bytes into dic["spectrum"]
keys = ["owner", "format", "version", "dataType", "xDim", "yDim", "zDim", "qDim"]
for i, k in enumerate(keys):
start = i * 4
end = start + 4
value = int.from_bytes( raw_data[start:end], "little")
dic["spectrum"][k] = value
data = np.frombuffer(raw_data[end:], "<f")
# The first 1/3 of the file is xaxis data (s or ppm)
split = data.shape[-1] // 3
xscale = data[0 : split]
dic["spectrum"]["xaxis"] = xscale
# The rest is real and imaginary data points interleaved
data = data[split : : 2] + 1j * data[split + 1 : : 2]
else:
raise IOError("File %s cannot be interpreted, use .dx or .1d instead" % (inputfile))
return dic,data
def guess_udic(dic,data):
"""
Guess parameters of universal dictionary from dic, data pair.
Parameters
----------
dic : dict
Dictionary of JCAMP-DX, acqu, proc and spectrum parameters.
data : ndarray
Array of NMR data.
Returns
-------
udic : dict
Universal dictionary of spectral parameters.
"""
# Create an empty universal dictionary
udic = fileiobase.create_blank_udic(1)
# Update defalt parameters, first acqu.par parameters in dic are tried, then JCAMP-DX header parameters
# size
if data is not None:
udic[0]["size"] = len(data)
else:
warn('No data, cannot set udic size')
# sw
try:
udic[0]['sw'] = float(dic['acqu']['bandwidth']) * 1000
except KeyError:
try:
udic[0]['sw'] = float(dic['dx']['$SW'][0]) * float(dic['dx']['$BF1'][0])
except KeyError:
try:
if dic["spectrum"]["freqdata"]:
udic[0]['sw'] = dic["spectrum"]["xaxis"][-1] - dic["spectrum"]["xaxis"][0]
elif data is not None:
udic[0]['sw'] = len(data) / dic["spectrum"]["xaxis"][-1]
else:
warn("Cannot set spectral width - set manually using: 'udic[0]['sw'] = x' where x is the spectral width in Hz")
except KeyError:
warn("Cannot set spectral width - set manually using: 'udic[0]['sw'] = x' where x is the spectral width in Hz")
# obs
try:
udic[0]['obs'] = float(dic['acqu']['b1Freq'])
except KeyError:
try:
udic[0]['obs'] = float(dic['dx']['$BF1'][0])
except KeyError:
warn("Cannot set observe frequency - set manually using: 'udic[0]['obs'] = x' where x is magnetic field in MHz")
# car
try:
udic[0]['car'] = float(dic['acqu']['lowestFrequency']) + (float(dic['acqu']['bandwidth']) * 1000 / 2)
except KeyError:
try:
udic[0]['car'] = (float(dic['dx']['$REFERENCEPOINT'][0]) * -1 ) + (float(dic['dx']['$SW'][0]) * udic[0]['obs'] / 2)
except KeyError:
try:
udic[0]['car'] = (float(dic['dx']['$BF1'][0]) - float(dic['dx']['$SF'][0])) * 1000000
except KeyError:
warn("Cannot set carrier - try: 'udic[0]['car'] = x * udic[0]['obs']' where x is the center of the spectrum in ppm")
# label
try:
udic[0]['label'] = dic['acqu']['rxChannel']
except KeyError:
try:
label_value = dic['dx'][".OBSERVENUCLEUS"][0].replace("^", "")
udic[0]["label"] = label_value
except KeyError:
warn("Cannot set observed nucleus label")
#keys left to default
# udic[0]['complex']
# udic[0]['encoding']
# udic[0]['time'] = True
# udic[0]['freq'] = False
return udic
| 37.764957 | 132 | 0.593188 |
71a6a1b4c00b5723fdf1d5cebd6d02a67810c5fb | 21,781 | py | Python | src/navigation_analytics/navigation_data.py | mielgosez/navigation_analytics | 3c382e8200afe4d37fa0880f155bf1bb2f48b83f | [
"MIT"
] | null | null | null | src/navigation_analytics/navigation_data.py | mielgosez/navigation_analytics | 3c382e8200afe4d37fa0880f155bf1bb2f48b83f | [
"MIT"
] | null | null | null | src/navigation_analytics/navigation_data.py | mielgosez/navigation_analytics | 3c382e8200afe4d37fa0880f155bf1bb2f48b83f | [
"MIT"
] | null | null | null | import logging
import copy
import pickle
import pandas as pd
| 40.186347 | 120 | 0.620724 |
71a73e1712465a4bec511db6faf72a21ab1c2e2c | 946 | py | Python | openskill/statistics.py | CalColson/openskill.py | ab61ca57fa6e60140d0a292c73440f22ceabd9a2 | [
"MIT"
] | 120 | 2021-09-03T03:06:11.000Z | 2022-03-28T05:54:54.000Z | openskill/statistics.py | CalColson/openskill.py | ab61ca57fa6e60140d0a292c73440f22ceabd9a2 | [
"MIT"
] | 48 | 2021-09-23T07:15:13.000Z | 2022-03-31T14:47:25.000Z | openskill/statistics.py | CalColson/openskill.py | ab61ca57fa6e60140d0a292c73440f22ceabd9a2 | [
"MIT"
] | 6 | 2022-01-20T16:45:28.000Z | 2022-03-28T23:48:07.000Z | import sys
import scipy.stats
normal = scipy.stats.norm(0, 1)
| 19.306122 | 83 | 0.516913 |
71abaaff24dc05f9c229f77e4b27cc8d68a5b7f5 | 14,189 | py | Python | src/openalea/container/graph.py | revesansparole/oacontainer | 066a15b8b1b22f857bf25ed443c5f39f4cbefb3e | [
"MIT"
] | null | null | null | src/openalea/container/graph.py | revesansparole/oacontainer | 066a15b8b1b22f857bf25ed443c5f39f4cbefb3e | [
"MIT"
] | null | null | null | src/openalea/container/graph.py | revesansparole/oacontainer | 066a15b8b1b22f857bf25ed443c5f39f4cbefb3e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Graph : graph package
#
# Copyright or Copr. 2006 INRIA - CIRAD - INRA
#
# File author(s): Jerome Chopard <jerome.chopard@sophia.inria.fr>
#
# Distributed under the Cecill-C License.
# See accompanying file LICENSE.txt or copy at
# http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html
#
# VPlants WebSite : https://gforge.inria.fr/projects/vplants/
#
"""This module provide a simple pure python implementation
for a graph interface
does not implement copy concept
"""
from id_dict import IdDict
| 26.571161 | 78 | 0.512651 |
71acaf064514ffdbe1a52492a693bd272d32dbf5 | 8,439 | py | Python | nets/mobilenet_v2_ssd.py | GT-AcerZhang/PaddlePaddle-SSD | 3833afe3470b7dc811409b3d8111b98dc31c6d0e | [
"Apache-2.0"
] | 47 | 2020-03-25T01:42:45.000Z | 2022-03-23T12:03:46.000Z | nets/mobilenet_v2_ssd.py | tianxiehu/PaddlePaddle-SSD | ae2ec69b65cc181fdb4275b295f145dc22e71ddb | [
"Apache-2.0"
] | 1 | 2021-06-30T13:02:59.000Z | 2022-01-13T09:48:07.000Z | nets/mobilenet_v2_ssd.py | tianxiehu/PaddlePaddle-SSD | ae2ec69b65cc181fdb4275b295f145dc22e71ddb | [
"Apache-2.0"
] | 9 | 2020-06-01T13:28:44.000Z | 2021-06-17T02:42:55.000Z | import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
if __name__ == '__main__':
data = fluid.data(name='data', shape=[None, 3, 300, 300])
build_ssd(data, 21, img_shape=[3, 300, 300])
| 44.650794 | 114 | 0.422799 |
71ad91d94d2021895fed2197ad1e1027179c068d | 5,844 | py | Python | oneflow/python/test/ops/test_object_bbox_scale.py | caishenghang/oneflow | db239cc9f98e551823bf6ce2d4395bd5c339b1c5 | [
"Apache-2.0"
] | 2 | 2021-09-10T00:19:49.000Z | 2021-11-16T11:27:20.000Z | oneflow/python/test/ops/test_object_bbox_scale.py | duijiudanggecl/oneflow | d2096ae14cf847509394a3b717021e2bd1d72f62 | [
"Apache-2.0"
] | null | null | null | oneflow/python/test/ops/test_object_bbox_scale.py | duijiudanggecl/oneflow | d2096ae14cf847509394a3b717021e2bd1d72f62 | [
"Apache-2.0"
] | 1 | 2021-11-10T07:57:01.000Z | 2021-11-10T07:57:01.000Z | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
import random
import cv2
import numpy as np
import oneflow as flow
import oneflow.typing as oft
if __name__ == "__main__":
unittest.main()
| 32.287293 | 88 | 0.688912 |
71ae6ca7d57af38b1b86f8540325942204357879 | 1,767 | py | Python | vagrant/kafka/bin/init.py | BertRaeymaekers/scrapbook | 3c8483d4594356fbc84deb8d6496db3d856492c1 | [
"MIT"
] | null | null | null | vagrant/kafka/bin/init.py | BertRaeymaekers/scrapbook | 3c8483d4594356fbc84deb8d6496db3d856492c1 | [
"MIT"
] | null | null | null | vagrant/kafka/bin/init.py | BertRaeymaekers/scrapbook | 3c8483d4594356fbc84deb8d6496db3d856492c1 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
import json
import os.path
import jinja2
DEFAULT_PARAMS = {
"ansible_user": "vagrant"
}
if __name__ == "__main__":
# Reading configuration
here = os.path.dirname(os.path.realpath(__file__ + "/../"))
with open(here + "/config.json", "r") as rf:
config = json.load(rf)
print(json.dumps(config, sort_keys=True, indent=4))
# Generating an inventory file
with open(here + "/playbook/inventory/hosts", "w") as inventory:
inventory.write("[kafka]\n")
for host in config["hosts"]:
# Setting default values and updating them when more specific.
params = dict()
params.update(DEFAULT_PARAMS)
params.update(config["params"])
params.update(config["hosts"][host])
# Setting some extra ansible paramters.
params["ansible_ssh_host"] = params["ip"]
inventory.write("%s\t%s\n" % (host, " ".join(("%s=%s" % (k,v) for k,v in params.items()))))
# Generating the Vagrantfile
env = jinja2.Environment(loader=jinja2.FileSystemLoader(here + "/templates/"))
template = env.get_template('Vagrantfile.j2')
template.stream(**config).dump(here + '/vagrant/Vagrantfile')
# Generating group vars for kafka
with open(here + "/playbook/group_vars/kafka.yml", "w") as gv:
gv.write("---\n")
gv.write("hosts:\n")
for (host, params) in config["hosts"].items():
gv.write(" %s: '%s.%s'\n" % (params["ip"], params["hostname"], config["params"]["domain" ]))
gv.write("kafka:\n")
gv.write(" hosts:\n")
for (host, params) in config["hosts"].items():
gv.write(" - %s.%s\n" % (params["hostname"], config["params"]["domain" ]))
| 35.34 | 107 | 0.589134 |
71aed94e4374b265d7146087fcd15cb6a8415441 | 883 | py | Python | harvest/models/beastsimulator.py | lmaurits/harvest | df6b549096da8ae2f4ed38aa2be19c7e82fa60e3 | [
"BSD-2-Clause"
] | 1 | 2016-10-23T13:24:44.000Z | 2016-10-23T13:24:44.000Z | harvest/models/beastsimulator.py | lmaurits/harvest | df6b549096da8ae2f4ed38aa2be19c7e82fa60e3 | [
"BSD-2-Clause"
] | null | null | null | harvest/models/beastsimulator.py | lmaurits/harvest | df6b549096da8ae2f4ed38aa2be19c7e82fa60e3 | [
"BSD-2-Clause"
] | null | null | null | import os
import harvest.dataframe
from harvest.models.simulator import Simulator
| 30.448276 | 73 | 0.673839 |
71af526fe8ec36b7ab5df62ce53a7484137b158f | 770 | py | Python | assimilator.py | DutChen18/slime-clusters-cuda | 186d198665a017cf0eacde33765b6cb3cb4aecb5 | [
"MIT"
] | null | null | null | assimilator.py | DutChen18/slime-clusters-cuda | 186d198665a017cf0eacde33765b6cb3cb4aecb5 | [
"MIT"
] | null | null | null | assimilator.py | DutChen18/slime-clusters-cuda | 186d198665a017cf0eacde33765b6cb3cb4aecb5 | [
"MIT"
] | null | null | null | # pylint: skip-file
import os
from assimilator import *
from Boinc import boinc_project_path
if __name__ == "__main__":
SlimeClustersAssimilator().run() | 29.615385 | 68 | 0.661039 |
71af9d8ca1143528cfcbc75651debdacf07e53c4 | 12,343 | py | Python | modin/core/execution/ray/implementations/cudf_on_ray/dataframe/dataframe.py | Rubtsowa/modin | 6550939753c76e896ef2bfd65bb9468d6ad161d7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | modin/core/execution/ray/implementations/cudf_on_ray/dataframe/dataframe.py | Rubtsowa/modin | 6550939753c76e896ef2bfd65bb9468d6ad161d7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | modin/core/execution/ray/implementations/cudf_on_ray/dataframe/dataframe.py | Rubtsowa/modin | 6550939753c76e896ef2bfd65bb9468d6ad161d7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses class that implements ``PandasOnRayDataframe`` class using cuDF."""
import numpy as np
import ray
from ..partitioning.partition import cuDFOnRayDataframePartition
from ..partitioning.partition_manager import cuDFOnRayDataframePartitionManager
from modin.core.execution.ray.implementations.pandas_on_ray.dataframe.dataframe import (
PandasOnRayDataframe,
)
from modin.error_message import ErrorMessage
| 41.006645 | 101 | 0.573767 |
71afcdef0e0e86f29155c36a2d10beb1ffdab1ce | 1,527 | py | Python | Exoplanet_Population.py | mw5868/University | 076c9b001dbfe3765607877be4f89ccf86a88331 | [
"MIT"
] | null | null | null | Exoplanet_Population.py | mw5868/University | 076c9b001dbfe3765607877be4f89ccf86a88331 | [
"MIT"
] | null | null | null | Exoplanet_Population.py | mw5868/University | 076c9b001dbfe3765607877be4f89ccf86a88331 | [
"MIT"
] | null | null | null | from astropy.table import Table, Column
import matplotlib.pyplot as plt
#url = "https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=exoplanets&select=pl_hostname,ra,dec&order=dec&format=csv"
url = "https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=exoplanets"
# This API returns Hostname, RA and Dec
t = Table.read(url, format="csv")
t_b = t[t["pl_letter"] == "b"]
t_c = t[t["pl_letter"] == "c"]
t_d = t[t["pl_letter"] == "d"]
t_e = t[t["pl_letter"] == "e"]
t_f = t[t["pl_letter"] == "f"]
t_g = t[t["pl_letter"] == "g"]
t_h = t[t["pl_letter"] == "h"]
t_i = t[t["pl_letter"] == "i"]
fig = plt.figure()
ax = fig.add_subplot(1,1,1,aspect="equal")
ax.scatter(t_b["ra"],t_b["dec"],color="Black",label = "2 Planets")
ax.scatter(t_c["ra"],t_c["dec"],color="red", label = "3 Planets")
ax.scatter(t_d["ra"],t_d["dec"],color="blue", label = "4 Planets")
ax.scatter(t_e["ra"],t_e["dec"],color="green", label = "5 Planets")
ax.scatter(t_f["ra"],t_f["dec"],color="yellow", label = "6 Planets")
ax.scatter(t_g["ra"],t_g["dec"],color="purple", label = "7 Planets")
ax.scatter(t_h["ra"],t_h["dec"],color="orange", label = "8 Planets")
ax.scatter(t_i["ra"],t_i["dec"],color="cyan", label = "9 Planets")
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.set_xlim(360,0)
ax.set_ylim(-90,90)
ax.set_ylabel("DEC")
ax.set_xlabel("RA")
ax.set_title("Positions of Explanets by number of planets in system")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show() | 42.416667 | 144 | 0.668631 |
71b199d12891c79153389fe28f6188e598ac7c21 | 792 | py | Python | src/pe_problem74.py | henrimitte/Project-Euler | 77fd9f5b076d1ca2e5ed4ef94bf8d32d9ed611eb | [
"MIT"
] | null | null | null | src/pe_problem74.py | henrimitte/Project-Euler | 77fd9f5b076d1ca2e5ed4ef94bf8d32d9ed611eb | [
"MIT"
] | null | null | null | src/pe_problem74.py | henrimitte/Project-Euler | 77fd9f5b076d1ca2e5ed4ef94bf8d32d9ed611eb | [
"MIT"
] | null | null | null | from tools import factorial
if __name__ == '__main__':
solve()
| 22.628571 | 55 | 0.474747 |
71b28ef18b75d4bcb886bea855f0ba76dd2bc9f2 | 27,966 | py | Python | thingsboard_gateway/connectors/modbus/modbus_connector.py | ferguscan/thingsboard-gateway | bc20fdb8e46f840b8538a010db2714ec6071fa5b | [
"Apache-2.0"
] | null | null | null | thingsboard_gateway/connectors/modbus/modbus_connector.py | ferguscan/thingsboard-gateway | bc20fdb8e46f840b8538a010db2714ec6071fa5b | [
"Apache-2.0"
] | null | null | null | thingsboard_gateway/connectors/modbus/modbus_connector.py | ferguscan/thingsboard-gateway | bc20fdb8e46f840b8538a010db2714ec6071fa5b | [
"Apache-2.0"
] | null | null | null | # Copyright 2022. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread
from time import sleep, time
from queue import Queue
from random import choice
from string import ascii_lowercase
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
# Try import Pymodbus library or install it and import
try:
from pymodbus.constants import Defaults
except ImportError:
print("Modbus library not found - installing...")
TBUtility.install_package("pymodbus", ">=2.3.0")
TBUtility.install_package('pyserial')
from pymodbus.constants import Defaults
try:
from twisted.internet import reactor
except ImportError:
TBUtility.install_package('twisted')
from twisted.internet import reactor
from twisted.internet import reactor
from pymodbus.bit_write_message import WriteSingleCoilResponse, WriteMultipleCoilsResponse
from pymodbus.register_write_message import WriteMultipleRegistersResponse, WriteSingleRegisterResponse
from pymodbus.register_read_message import ReadRegistersResponseBase
from pymodbus.bit_read_message import ReadBitsResponseBase
from pymodbus.client.sync import ModbusTcpClient, ModbusUdpClient, ModbusSerialClient
from pymodbus.client.sync import ModbusRtuFramer, ModbusSocketFramer, ModbusAsciiFramer
from pymodbus.exceptions import ConnectionException
from pymodbus.server.asynchronous import StartTcpServer, StartUdpServer, StartSerialServer, StopServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.version import version
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.datastore import ModbusSparseDataBlock
from thingsboard_gateway.connectors.connector import Connector, log
from thingsboard_gateway.connectors.modbus.constants import *
from thingsboard_gateway.connectors.modbus.slave import Slave
from thingsboard_gateway.connectors.modbus.backward_compability_adapter import BackwardCompatibilityAdapter
from thingsboard_gateway.connectors.modbus.bytes_modbus_downlink_converter import BytesModbusDownlinkConverter
CONVERTED_DATA_SECTIONS = [ATTRIBUTES_PARAMETER, TELEMETRY_PARAMETER]
FRAMER_TYPE = {
'rtu': ModbusRtuFramer,
'socket': ModbusSocketFramer,
'ascii': ModbusAsciiFramer
}
SLAVE_TYPE = {
'tcp': StartTcpServer,
'udp': StartUdpServer,
'serial': StartSerialServer
}
FUNCTION_TYPE = {
'coils_initializer': 'co',
'holding_registers': 'hr',
'input_registers': 'ir',
'discrete_inputs': 'di'
}
FUNCTION_CODE_WRITE = {
'holding_registers': (6, 16),
'coils_initializer': (5, 15)
}
FUNCTION_CODE_READ = {
'holding_registers': 3,
'coils_initializer': 1,
'input_registers': 4,
'discrete_inputs': 2
}
| 50.389189 | 121 | 0.567582 |
71b2acdd2d92ff5dd5a3e30aa5f776064be270a0 | 966 | py | Python | specs/test_gru_on_flat_babyai.py | xwu20/wmg_agent | 25378c8fc54eb6e0e8c9d969760a72e843572f09 | [
"MIT"
] | 23 | 2020-07-08T15:58:51.000Z | 2022-01-13T04:22:03.000Z | specs/test_gru_on_flat_babyai.py | xwu20/wmg_agent | 25378c8fc54eb6e0e8c9d969760a72e843572f09 | [
"MIT"
] | 3 | 2021-06-08T21:58:37.000Z | 2022-01-13T03:00:32.000Z | specs/test_gru_on_flat_babyai.py | xwu20/wmg_agent | 25378c8fc54eb6e0e8c9d969760a72e843572f09 | [
"MIT"
] | 11 | 2020-07-31T11:13:29.000Z | 2021-11-10T08:37:12.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
### CONTROLS (non-tunable) ###
# general
TYPE_OF_RUN = test_episodes # train, test, test_episodes, render
NUM_EPISODES_TO_TEST = 1000
MIN_FINAL_REWARD_FOR_SUCCESS = 1.0
LOAD_MODEL_FROM = models/gru_flat_babyai.pth
SAVE_MODELS_TO = None
# worker.py
ENV = BabyAI_Env
ENV_RANDOM_SEED = 1
AGENT_RANDOM_SEED = 1
REPORTING_INTERVAL = 1
TOTAL_STEPS = 1
ANNEAL_LR = False
# A3cAgent
AGENT_NET = GRU_Network
# BabyAI_Env
BABYAI_ENV_LEVEL = BabyAI-GoToLocal-v0
USE_SUCCESS_RATE = True
SUCCESS_RATE_THRESHOLD = 0.99
HELDOUT_TESTING = False
NUM_TEST_EPISODES = 10000
OBS_ENCODER = Flat
BINARY_REWARD = True
### HYPERPARAMETERS (tunable) ###
# A3cAgent
A3C_T_MAX = 4
LEARNING_RATE = 4e-05
DISCOUNT_FACTOR = 0.9
GRADIENT_CLIP = 512.0
ENTROPY_TERM_STRENGTH = 0.02
ADAM_EPS = 1e-12
REWARD_SCALE = 2.0
WEIGHT_DECAY = 0.
# RNNs
NUM_RNN_UNITS = 96
OBS_EMBED_SIZE = 512
AC_HIDDEN_LAYER_SIZE = 4096
| 19.714286 | 65 | 0.774327 |
71b31d76fcd9783bbf00ab94b135126e5908e931 | 3,474 | bzl | Python | haskell/private/actions/runghc.bzl | meisterT/rules_haskell | 7c0a867fc23da104ea8cbff26864894abcf137bc | [
"Apache-2.0"
] | null | null | null | haskell/private/actions/runghc.bzl | meisterT/rules_haskell | 7c0a867fc23da104ea8cbff26864894abcf137bc | [
"Apache-2.0"
] | null | null | null | haskell/private/actions/runghc.bzl | meisterT/rules_haskell | 7c0a867fc23da104ea8cbff26864894abcf137bc | [
"Apache-2.0"
] | null | null | null | """runghc support"""
load(":private/context.bzl", "render_env")
load(":private/packages.bzl", "expose_packages", "pkg_info_to_compile_flags")
load(
":private/path_utils.bzl",
"link_libraries",
"ln",
"target_unique_name",
)
load(
":private/set.bzl",
"set",
)
load(":providers.bzl", "get_ghci_extra_libs")
load("@bazel_skylib//lib:shell.bzl", "shell")
def build_haskell_runghc(
hs,
runghc_wrapper,
user_compile_flags,
extra_args,
hs_info,
cc_info,
output,
package_databases,
version,
lib_info = None):
"""Build runghc script.
Args:
hs: Haskell context.
hs_info: HaskellInfo.
package_databases: package caches excluding the cache file of the package
we're creating a runghc for.
lib_info: If we're building runghc for a library target, pass
HaskellLibraryInfo here, otherwise it should be None.
Returns:
None.
"""
(pkg_info_inputs, args) = pkg_info_to_compile_flags(
hs,
pkg_info = expose_packages(
package_ids = hs.package_ids,
package_databases = package_databases,
version = version,
),
prefix = "runghc-",
)
if lib_info != None:
for idir in set.to_list(hs_info.import_dirs):
args += ["-i{0}".format(idir)]
(ghci_extra_libs, ghc_env) = get_ghci_extra_libs(
hs,
cc_info,
path_prefix = "$RULES_HASKELL_EXEC_ROOT",
)
link_libraries(ghci_extra_libs, args)
runghc_file = hs.actions.declare_file(target_unique_name(hs, "runghc"))
# Extra arguments.
# `compiler flags` is the default set of arguments for runghc,
# augmented by `extra_args`.
# The ordering is important, first compiler flags (from toolchain
# and local rule), then from `extra_args`. This way the more
# specific arguments are listed last, and then have more priority in
# GHC.
# Note that most flags for GHCI do have their negative value, so a
# negative flag in `extra_args` can disable a positive flag set
# in `user_compile_flags`, such as `-XNoOverloadedStrings` will disable
# `-XOverloadedStrings`.
args += hs.toolchain.compiler_flags + user_compile_flags + hs.toolchain.repl_ghci_args
# ghc args need to be wrapped up in "--ghc-arg=" when passing to runghc
runcompile_flags = ["--ghc-arg=%s" % a for a in args]
runcompile_flags += extra_args
hs.actions.expand_template(
template = runghc_wrapper,
output = runghc_file,
substitutions = {
"{ENV}": render_env(ghc_env),
"{TOOL}": hs.tools.runghc.path,
"{CC}": hs.toolchain.cc_wrapper.executable.path,
"{ARGS}": " ".join([shell.quote(a) for a in runcompile_flags]),
},
is_executable = True,
)
# XXX We create a symlink here because we need to force
# hs.tools.runghc and the best way to do that is
# to use hs.actions.run. That action, in turn must produce
# a result, so using ln seems to be the only sane choice.
extra_inputs = depset(transitive = [
depset([
hs.tools.runghc,
runghc_file,
]),
package_databases,
pkg_info_inputs,
ghci_extra_libs,
hs_info.source_files,
hs.toolchain.cc_wrapper.runfiles.files,
])
ln(hs, runghc_file, output, extra_inputs)
| 31.017857 | 90 | 0.627231 |
71b4b6265ccad83e3c8c7743ef9150f9f16b46b0 | 8,456 | py | Python | tests/dicom/test_header_tweaks.py | pymedphys/pymedphys-archive-2019 | 6bb7c8d0da2e93ff56469bb47e65b15ece2ea25e | [
"Apache-2.0"
] | 1 | 2020-12-20T14:13:56.000Z | 2020-12-20T14:13:56.000Z | tests/dicom/test_header_tweaks.py | pymedphys/pymedphys-archive-2019 | 6bb7c8d0da2e93ff56469bb47e65b15ece2ea25e | [
"Apache-2.0"
] | 6 | 2020-10-06T15:36:46.000Z | 2022-02-27T05:15:17.000Z | tests/dicom/test_header_tweaks.py | cpbhatt/pymedphys | 177b3db8e2a6e83c44835d0007d1d5c7a420fd99 | [
"Apache-2.0"
] | 1 | 2020-12-20T14:14:00.000Z | 2020-12-20T14:14:00.000Z | # Copyright (C) 2019 Cancer Care Associates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import uuid
import numpy as np
import pydicom
from pymedphys._dicom.create import dicom_dataset_from_dict
from pymedphys._dicom.header import (
RED_adjustment_map_from_structure_names,
adjust_machine_name,
adjust_RED_by_structure_name,
adjust_rel_elec_density,
)
from pymedphys._dicom.utilities import remove_file
HERE = os.path.dirname(__file__)
ORIGINAL_DICOM_FILENAME = os.path.join(
HERE, "scratch", "original-{}.dcm".format(str(uuid.uuid4()))
)
ADJUSTED_DICOM_FILENAME = os.path.join(
HERE, "scratch", "adjusted-{}.dcm".format(str(uuid.uuid4()))
)
| 30.637681 | 87 | 0.534532 |
71b4b95cd8eac603e64cc2b55ede32f9146ce21d | 1,929 | py | Python | tests/components/http/test_data_validator.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 23 | 2017-11-15T21:03:53.000Z | 2021-03-29T21:33:48.000Z | tests/components/http/test_data_validator.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 39 | 2016-12-16T12:40:34.000Z | 2017-02-13T17:53:42.000Z | tests/components/http/test_data_validator.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 10 | 2018-01-01T00:12:51.000Z | 2021-12-21T23:08:05.000Z | """Test data validator decorator."""
from unittest.mock import Mock
from aiohttp import web
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.data_validator import RequestDataValidator
| 27.169014 | 85 | 0.610679 |
71b4ce87227b2fcaa01e098fed2fec676e7173d5 | 7,410 | py | Python | Conversely_Frontend/app/Server/ukjp/templates.py | sam-aldis/Conversley | 1fc30d6b768cc03f727229a52e0879fac3af1e3a | [
"MIT"
] | null | null | null | Conversely_Frontend/app/Server/ukjp/templates.py | sam-aldis/Conversley | 1fc30d6b768cc03f727229a52e0879fac3af1e3a | [
"MIT"
] | null | null | null | Conversely_Frontend/app/Server/ukjp/templates.py | sam-aldis/Conversley | 1fc30d6b768cc03f727229a52e0879fac3af1e3a | [
"MIT"
] | null | null | null | import days
STAGE_INIT = 0
STAGE_CHALLENGE_INIT = 1
STAGE_BOOKED = 2
messages = [
"Hey {{first_name}}, thankyou for your enquiry to be one of our Transformation Challengers",
"We have 2 Challenges available for you:\n\nThe 8 Week Bikini Challenge which helps you shed 3-9kg of unwanted body fat, flattens your tummy and tones your arms, abs, legs and butt.\n\nOr our 9in6 Challenge which helps you drop 9+kgs of pure fat in just 6 Weeks.",
"Please choose which challenge information you would like below..."
]
callbacks = {
"INIT_8WBC" : [
{
"type": "message",
"text" : "Thank you {{first_name}},\n\
The FREE 8 Week Bikini Challenge is a done for you - step by step PROVEN program that helps you lose the 3-7kg of unwanted body fat, flatten your tummy and tone your arms, legs and butt.\n\
\n\
This is your chance to transform your body in just 8 weeks for FREE"
},
{
"type" : "message",
"text" : "In exchange for the program being FREE....we ask that you allow us to share your transformation story on our Facebook fan page for marketing purposes to help motivate and inspire the ladies of Perth. \n\
(Please note, a small refundable deposit applies to keep you motivated throughout the 8 weeks)"
},
{
"type": "message",
"text": "The challenge is starting Monday 12th of June and to start your 8 Week Bikini Challenge, we just require you to attend the upcoming information meeting at the facility to quickly go over the program in person. \n\
\n\
There is absolutely no high pressure sales or obligation to join. Simply a meet and chat.\n\
\n\
To RSVP to the meeting click a suitable date below"
},
{
"type" : "json",
"template" : "init_8wbc"
}
],
"INIT_9IN6" : [
{
"type" : "message",
"text" : "Thank you {{first_name}},\n\
The 9in6 Transformation Challenge is a done for you - step by step PROVEN program that helps you lose 9kg kilos of unwanted body fat, flatten your tummy and tone your arms, legs and butt in just 6 weeks.\n\
\
\nThis is your chance to transform your body in just 6 weeks for FREE!"
},
{
"type" : "message",
"text" : "In exchange for the program, we ask that you allow us to showcase your transformation story on our Facebook fan page for marketing purposes to help motivate and inspire the ladies of Perth. When you complete the program its FREE. \n\
Please note, a small refundable \"incentive deposit\" applies to keep you motivated throughout the 6 weeks."
},
{
"type" : "message",
"text" : "The challenge is starting Monday 12th of June and to start your 9kg 6-week challenge, we require you to attend the upcoming information meeting where we explain the program in person. \n\
\n\
There is absolutely no high pressure sales or obligation to join at the end, just an opportunity for you learn about the program and how you can lose 9kg in 6 weeks for FREE\n\
\n\
To RSVP to the meeting click a suitable date below"
},
{
"type" : "json",
"template" : "init_9in6"
}
],
"TIME_TABLE_8WBC" : [
{
"type" : "message",
"text" : "Sure here's our lesson time table.."
},
{
"type" : "file",
"url" : "http://thetransformationcentre.com.au/img/timetable.pdf"
},
{
"type" : "json",
"template" : "init_8wbc"
}
]
} | 47.197452 | 276 | 0.523752 |
71b54a23f9d4b30c276bd6f326098f146a43547e | 1,349 | py | Python | var/spack/repos/builtin/packages/pagmo2/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/pagmo2/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/pagmo2/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
| 33.725 | 96 | 0.679021 |
71b725d9d3a609a2e8415f6bcdfe99ef3f2dd580 | 4,984 | py | Python | interferogram/sentinel/fetchCalES.py | earthobservatory/ariamh-pub | f33731e127f38ff33b02e02c07b16793c07651a6 | [
"Apache-2.0"
] | 4 | 2019-11-19T03:35:35.000Z | 2020-12-07T18:43:11.000Z | interferogram/sentinel/fetchCalES.py | earthobservatory/ariamh-pub | f33731e127f38ff33b02e02c07b16793c07651a6 | [
"Apache-2.0"
] | 3 | 2019-06-05T03:35:55.000Z | 2020-04-09T14:16:08.000Z | interferogram/sentinel/fetchCalES.py | earthobservatory/ariamh-pub | f33731e127f38ff33b02e02c07b16793c07651a6 | [
"Apache-2.0"
] | 6 | 2019-08-23T22:53:11.000Z | 2021-11-06T15:15:30.000Z | #!/usr/bin/env python3
import os, sys, re, json, requests, datetime, tarfile, argparse
from pprint import pprint
import numpy as np
from utils.UrlUtils import UrlUtils
server = 'https://qc.sentinel1.eo.esa.int/'
cal_re = re.compile(r'S1\w_AUX_CAL')
def cmdLineParse():
'''
Command line parser.
'''
parser = argparse.ArgumentParser(description='Fetch calibration auxiliary files ingested into HySDS')
parser.add_argument('-o', '--output', dest='outdir', type=str, default='.',
help='Path to output directory')
parser.add_argument('-d', '--dry-run', dest='dry_run', action='store_true',
help="Don't download anything; just output the URLs")
return parser.parse_args()
def download_file(url, outdir='.', session=None):
'''
Download file to specified directory.
'''
if session is None:
session = requests.session()
path = "%s.tgz" % os.path.join(outdir, os.path.basename(url))
print('Downloading URL: ', url)
request = session.get(url, stream=True, verify=False)
request.raise_for_status()
with open(path,'wb') as f:
for chunk in request.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
return path
def untar_file(path, outdir):
'''
Extract aux cal files.
'''
if not tarfile.is_tarfile(path):
raise RuntimeError("%s is not a tarfile." % path)
with tarfile.open(path) as f:
f.extractall(outdir)
def get_active_ids(es_url):
"""Query for the active calibration IDs."""
query = {
"query":{
"bool":{
"must":[
{"term":{"_id": "S1_AUX_CAL_ACTIVE"}},
]
}
},
"sort":[ { "starttime": { "order": "desc" } } ]
}
es_index = "grq_*_s1-aux_cal_active"
if es_url.endswith('/'):
search_url = '%s%s/_search' % (es_url, es_index)
else:
search_url = '%s/%s/_search' % (es_url, es_index)
r = requests.post(search_url, data=json.dumps(query))
if r.status_code == 200:
result = r.json()
#pprint(result)
total = result['hits']['total']
if total == 0:
raise RuntimeError("Failed to find S1_AUX_CAL_ACTIVE at %s." % search_url)
return result['hits']['hits'][0]['_source']['metadata']['active_ids']
else:
print("Failed to query %s:\n%s" % (es_url, r.text), file=sys.stderr)
print("query: %s" % json.dumps(query, indent=2), file=sys.stderr)
print("returned: %s" % r.text, file=sys.stderr)
r.raise_for_status()
def get_cal_url(id, es_url):
"""Query for the active calibration url."""
query = {
"query":{
"bool":{
"must":[
{"term":{"_id": id}},
]
}
},
"fields": ["urls", "metadata.archive_filename"]
}
es_index = "grq_*_s1-aux_cal"
if es_url.endswith('/'):
search_url = '%s%s/_search' % (es_url, es_index)
else:
search_url = '%s/%s/_search' % (es_url, es_index)
r = requests.post(search_url, data=json.dumps(query))
if r.status_code == 200:
result = r.json()
pprint(result)
total = result['hits']['total']
if total == 0:
raise RuntimeError("Failed to find %s at %s." % (id, search_url))
urls = result['hits']['hits'][0]['fields']['urls']
archive_fname = result['hits']['hits'][0]['fields']['metadata.archive_filename'][0]
url = [x for x in urls if x.startswith('http')][0]
#print(urls)
#print(url)
#print(archive_fname)
return os.path.join(url, archive_fname)
else:
print("Failed to query %s:\n%s" % (es_url, r.text), file=sys.stderr)
print("query: %s" % json.dumps(query, indent=2), file=sys.stderr)
print("returned: %s" % r.text, file=sys.stderr)
r.raise_for_status()
if __name__ == '__main__':
inps = cmdLineParse()
fetch(inps.outdir, inps.dry_run)
| 29.491124 | 105 | 0.573234 |
71b74d81702689c7914ede59827af8b7196bc18b | 2,590 | py | Python | www/conservancy/urls.py | stain/conservancy-website | 9e41ddff766fe517a99198d60701193e8b68415e | [
"0BSD"
] | null | null | null | www/conservancy/urls.py | stain/conservancy-website | 9e41ddff766fe517a99198d60701193e8b68415e | [
"0BSD"
] | null | null | null | www/conservancy/urls.py | stain/conservancy-website | 9e41ddff766fe517a99198d60701193e8b68415e | [
"0BSD"
] | null | null | null | # Copyright 2005-2008, James Garrison
# Copyright 2010, 2012 Bradley M. Kuhn
# This software's license gives you freedom; you can copy, convey,
# propagate, redistribute, modify and/or redistribute modified versions of
# this program under the terms of the GNU Affero General Public License
# (AGPL) as published by the Free Software Foundation (FSF), either
# version 3 of the License, or (at your option) any later version of the
# AGPL published by the FSF.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program in a file in the toplevel directory called
# "AGPLv3". If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import url, include
from django.contrib import admin, admindocs
from conservancy import feeds, frontpage, sponsors
import conservancy.apps.fundgoal.views as fundgoal_views
import conservancy.static.views as static_views
admin.autodiscover()
urlpatterns = [
url(r'^$', frontpage.view),
url(r'^sponsors$', frontpage.view),
url(r'^sponsors/$', sponsors.view),
url(r'^sponsors/index.html$', sponsors.view),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', admin.site.urls),
url(r'^feeds/blog/?$', feeds.BlogFeed()),
url(r'^feeds/news/?$', feeds.PressReleaseFeed()),
url(r'^feeds/omnibus/?$', feeds.OmnibusFeed()),
url(r'^feeds/?$', feeds.view),
url(r'^news(/|$)', include('conservancy.apps.news.urls')),
url(r'^blog(/|$)', include('conservancy.apps.blog.urls')),
# formerly static templated things... (dirs with templates)
url(r'^error/(40[134]|500)(?:/index\.html|/|)$', static_views.handler),
url(r'^error', static_views.index),
url(r'^about', static_views.index),
url(r'^donate', static_views.index),
url(r'^copyleft-compliance', static_views.index,
{'fundraiser_sought' : 'vmware-match-0'}),
url(r'^projects', static_views.index),
url(r'^npoacct', static_views.index,
{'fundraiser_sought' : 'npoacct'}),
url(r'^contractpatch', include('conservancy.apps.contractpatch.urls')),
url(r'^overview', static_views.index),
url(r'^privacy-policy', static_views.index),
url(r'^supporter', include('conservancy.apps.supporter.urls')),
url(r'^fundraiser_data', fundgoal_views.view),
]
| 44.655172 | 75 | 0.699614 |
71b901299fb22334462ebfb480d8b6d820375ea4 | 1,430 | py | Python | graphene_spike_tests/acceptances/test_query.py | FabienArcellier/spike-graphene-flask | bc7bce571a21826c3da852eb1c2e1904bbab99b4 | [
"MIT"
] | 1 | 2021-03-18T00:19:53.000Z | 2021-03-18T00:19:53.000Z | graphene_spike_tests/acceptances/test_query.py | FabienArcellier/spike-graphene-flask | bc7bce571a21826c3da852eb1c2e1904bbab99b4 | [
"MIT"
] | null | null | null | graphene_spike_tests/acceptances/test_query.py | FabienArcellier/spike-graphene-flask | bc7bce571a21826c3da852eb1c2e1904bbab99b4 | [
"MIT"
] | null | null | null | import unittest
from unittest.mock import Mock
from graphene import Schema
from graphene.test import Client
from graphene_spike.query import Query
| 26.481481 | 86 | 0.630769 |
71b9373dfb805ca37a8bda9472585bd77a94fc2f | 10,028 | py | Python | clikan.py | davidventasmarin/clikan | 401fe4053a14873872bb246739d55c55f8f6dcfa | [
"MIT"
] | null | null | null | clikan.py | davidventasmarin/clikan | 401fe4053a14873872bb246739d55c55f8f6dcfa | [
"MIT"
] | null | null | null | clikan.py | davidventasmarin/clikan | 401fe4053a14873872bb246739d55c55f8f6dcfa | [
"MIT"
] | null | null | null | from rich import print
from rich.console import Console
from rich.table import Table
import click
from click_default_group import DefaultGroup
import yaml
import os
##from terminaltables import SingleTable
import sys
from textwrap import wrap
import collections
import datetime
import configparser
import pkg_resources # part of setuptools
VERSION = pkg_resources.require("clikan")[0].version
pass_config = click.make_pass_decorator(Config, ensure=True)
def read_config(ctx, param, value):
"""Callback that is used whenever --config is passed. We use this to
always load the correct config. This means that the config is loaded
even if the group itself never executes so our aliases stay always
available.
"""
cfg = ctx.ensure_object(Config)
if value is None:
value = os.path.join(os.path.dirname(__file__), 'aliases.ini')
cfg.read_config(value)
return value
def read_data(config):
"""Read the existing data from the config datasource"""
try:
with open(config["clikan_data"], 'r') as stream:
try:
return yaml.load(stream, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
print("Ensure %s exists, as you specified it "
"as the clikan data file." % config['clikan_data'])
print(exc)
except IOError:
click.echo("No data, initializing data file.")
write_data(config, {"data": {}, "deleted": {}})
with open(config["clikan_data"], 'r') as stream:
return yaml.load(stream, Loader=yaml.FullLoader)
def write_data(config, data):
"""Write the data to the config datasource"""
with open(config["clikan_data"], 'w') as outfile:
yaml.dump(data, outfile, default_flow_style=False)
def read_config_yaml():
"""Read the app config from ~/.clikan.yaml"""
try:
home = get_clikan_home()
with open(home + "/.clikan.yaml", 'r') as stream:
try:
return yaml.load(stream, Loader=yaml.FullLoader)
except yaml.YAMLError:
print("Ensure %s/.clikan.yaml is valid, expected YAML." % home)
sys.exit()
except IOError:
print("Ensure %s/.clikan.yaml exists and is valid." % home)
sys.exit()
| 33.315615 | 102 | 0.603311 |
71b98f59428322523fe15276f1dd95e05126903b | 1,330 | py | Python | social_auth_ragtag_id/backends.py | RagtagOpen/python-social-auth-ragtag-id | 8d8e005231c09535098136213347934e9da7b3f2 | [
"MIT"
] | null | null | null | social_auth_ragtag_id/backends.py | RagtagOpen/python-social-auth-ragtag-id | 8d8e005231c09535098136213347934e9da7b3f2 | [
"MIT"
] | 3 | 2020-03-24T16:26:22.000Z | 2021-02-02T21:55:45.000Z | social_auth_ragtag_id/backends.py | RagtagOpen/python-social-auth-ragtag-id | 8d8e005231c09535098136213347934e9da7b3f2 | [
"MIT"
] | null | null | null | from social_core.backends.oauth import BaseOAuth2
| 35.945946 | 75 | 0.627068 |
71b9f1ca619e6a3da629a83c1ba692653be95c14 | 409 | py | Python | panel/api/models/provider.py | angeelgarr/DCPanel | 1901a0f4b1b4273b60d3a218797fb6614d05b4c0 | [
"MIT"
] | 7 | 2016-01-06T13:28:35.000Z | 2020-11-30T07:35:59.000Z | panel/api/models/provider.py | angeelgarr/DCPanel | 1901a0f4b1b4273b60d3a218797fb6614d05b4c0 | [
"MIT"
] | null | null | null | panel/api/models/provider.py | angeelgarr/DCPanel | 1901a0f4b1b4273b60d3a218797fb6614d05b4c0 | [
"MIT"
] | 6 | 2017-07-18T06:41:56.000Z | 2022-01-17T07:04:44.000Z | from django.db import models
from django.contrib import admin
| 20.45 | 44 | 0.682152 |
71b9f7585fb3ca8d7750b533bdb679556becb780 | 853 | py | Python | trial/src/sender.py | siddharthumakarthikeyan/Cable-Driven-Parallel-Robots-CDPR-Modelling | 4e8d991d55ae7da91b3c90773c679f3369a4dafa | [
"MIT"
] | 9 | 2021-06-01T12:19:58.000Z | 2022-02-28T12:30:09.000Z | trial/src/sender.py | siddharthumakarthikeyan/Cable-Driven-Parallel-Robots-CDPR-Modelling | 4e8d991d55ae7da91b3c90773c679f3369a4dafa | [
"MIT"
] | 1 | 2021-09-27T12:24:50.000Z | 2021-09-27T12:24:50.000Z | trial/src/sender.py | siddharthumakarthikeyan/Cable-Driven-Parallel-Robots-CDPR-Modelling | 4e8d991d55ae7da91b3c90773c679f3369a4dafa | [
"MIT"
] | 1 | 2021-08-02T00:48:11.000Z | 2021-08-02T00:48:11.000Z | #!/usr/bin/env python
# license removed for brevity
import rospy
from std_msgs.msg import String
from gazebo_msgs.msg import LinkState
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
| 24.371429 | 77 | 0.614302 |
71bb038e552d16449011833ef1582532136fc5b7 | 1,073 | py | Python | discriminator_dataset.py | kimmokal/CC-Art-Critics | af83762a5f22043f279c167cbd58e16737e3ec87 | [
"MIT"
] | null | null | null | discriminator_dataset.py | kimmokal/CC-Art-Critics | af83762a5f22043f279c167cbd58e16737e3ec87 | [
"MIT"
] | null | null | null | discriminator_dataset.py | kimmokal/CC-Art-Critics | af83762a5f22043f279c167cbd58e16737e3ec87 | [
"MIT"
] | null | null | null | import torch
from os import listdir, path
from PIL import Image
import torchvision
| 38.321429 | 103 | 0.692451 |
71bcf0be9208fd0fbb5c709b03c8fca5ba790724 | 951 | py | Python | emailmeld/sender.py | ionata/django-emailmeld | 28326933d22957f8737ab8a9564daa9cbfca6d06 | [
"BSD-2-Clause"
] | null | null | null | emailmeld/sender.py | ionata/django-emailmeld | 28326933d22957f8737ab8a9564daa9cbfca6d06 | [
"BSD-2-Clause"
] | 1 | 2017-11-21T22:11:04.000Z | 2017-11-22T00:37:49.000Z | emailmeld/sender.py | ionata/django-emailmeld | 28326933d22957f8737ab8a9564daa9cbfca6d06 | [
"BSD-2-Clause"
] | null | null | null | from django.core.mail.message import EmailMessage, EmailMultiAlternatives
from django.utils.translation import ugettext_lazy as _
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
| 47.55 | 126 | 0.785489 |
71be4424294b2ee2dc156eab695f7198203426e0 | 1,506 | py | Python | tests/test_hap_server.py | sander-vd/HAP-python | 991761ceadfd7796d454d61c87be7f5d4b75d432 | [
"Apache-2.0"
] | 3 | 2019-12-07T22:42:38.000Z | 2022-01-20T08:44:46.000Z | tests/test_hap_server.py | sander-vd/HAP-python | 991761ceadfd7796d454d61c87be7f5d4b75d432 | [
"Apache-2.0"
] | null | null | null | tests/test_hap_server.py | sander-vd/HAP-python | 991761ceadfd7796d454d61c87be7f5d4b75d432 | [
"Apache-2.0"
] | 1 | 2021-05-15T22:34:52.000Z | 2021-05-15T22:34:52.000Z | """Tests for the HAPServer."""
from socket import timeout
from unittest.mock import Mock, MagicMock, patch
import pytest
from pyhap import hap_server
| 32.73913 | 84 | 0.677955 |
71bf1e11839857da419f894d58ec4b485c55ada9 | 1,604 | py | Python | app/views/main.py | charlesashby/marketvault-front-end | 758cf8ba1d8486f45eac093ded78a15fc82df3dc | [
"MIT"
] | null | null | null | app/views/main.py | charlesashby/marketvault-front-end | 758cf8ba1d8486f45eac093ded78a15fc82df3dc | [
"MIT"
] | null | null | null | app/views/main.py | charlesashby/marketvault-front-end | 758cf8ba1d8486f45eac093ded78a15fc82df3dc | [
"MIT"
] | null | null | null | from flask import render_template, Blueprint, request
from app.utils.search import MySQLClient
from app.utils.preprocessor import TextPreprocessor
mainbp = Blueprint("main", __name__)
| 31.45098 | 126 | 0.663342 |
71bf83bddad54a592ea34fa0a46b33394f925a8d | 31,770 | py | Python | bag_testbenches/ckt_dsn/analog/amplifier/opamp_two_stage.py | tinapiao/Software-IC-Automation | 74b23cd94aa6e4658b110e93b5deb635e014f3a6 | [
"BSD-3-Clause"
] | null | null | null | bag_testbenches/ckt_dsn/analog/amplifier/opamp_two_stage.py | tinapiao/Software-IC-Automation | 74b23cd94aa6e4658b110e93b5deb635e014f3a6 | [
"BSD-3-Clause"
] | null | null | null | bag_testbenches/ckt_dsn/analog/amplifier/opamp_two_stage.py | tinapiao/Software-IC-Automation | 74b23cd94aa6e4658b110e93b5deb635e014f3a6 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""This module contains design algorithm for a traditional two stage operational amplifier."""
from typing import TYPE_CHECKING, List, Optional, Dict, Any, Tuple, Sequence
from copy import deepcopy
import numpy as np
import scipy.optimize as sciopt
from bag.math import gcd
from bag.data.lti import LTICircuit, get_stability_margins, get_w_crossings, get_w_3db
from bag.util.search import FloatBinaryIterator, BinaryIterator, minimize_cost_golden
from bag.simulation.core import MeasurementManager
from verification.mos.query import MOSDBDiscrete
from .components import LoadDiodePFB, InputGm
if TYPE_CHECKING:
from verification.ac.core import ACTB
| 42.53012 | 100 | 0.549292 |
71c07edf7c5c3864d451ebab890ced63f246e9c3 | 3,303 | py | Python | alipay/aop/api/domain/AlipayMerchantAuthDeleteModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayMerchantAuthDeleteModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayMerchantAuthDeleteModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
| 30.302752 | 75 | 0.578868 |
71c0cce85adc329d434c5d37b1c07b2cd22f1f21 | 410 | py | Python | test/torchaudio_unittest/models/emformer/emformer_cpu_test.py | LaudateCorpus1/audio | a007e922d34028270197c0549bf452b79499d039 | [
"BSD-2-Clause"
] | null | null | null | test/torchaudio_unittest/models/emformer/emformer_cpu_test.py | LaudateCorpus1/audio | a007e922d34028270197c0549bf452b79499d039 | [
"BSD-2-Clause"
] | null | null | null | test/torchaudio_unittest/models/emformer/emformer_cpu_test.py | LaudateCorpus1/audio | a007e922d34028270197c0549bf452b79499d039 | [
"BSD-2-Clause"
] | null | null | null | import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from torchaudio_unittest.models.emformer.emformer_test_impl import EmformerTestImpl
| 29.285714 | 83 | 0.814634 |
71c15aae1f82d17826550ce3299615cff978924d | 2,206 | py | Python | src/nba_analysis/pipelines/data_processing/pipeline.py | stanton119/nba-analysis | 79343150edaaa97472939c47b3ce521e038871b0 | [
"MIT"
] | null | null | null | src/nba_analysis/pipelines/data_processing/pipeline.py | stanton119/nba-analysis | 79343150edaaa97472939c47b3ce521e038871b0 | [
"MIT"
] | null | null | null | src/nba_analysis/pipelines/data_processing/pipeline.py | stanton119/nba-analysis | 79343150edaaa97472939c47b3ce521e038871b0 | [
"MIT"
] | 1 | 2021-12-16T01:04:09.000Z | 2021-12-16T01:04:09.000Z | """
Two pipelines:
* full history
* update latest season
* Only updates latest season year
"""
from functools import partial
import itertools
from kedro.pipeline import Pipeline, node
from nba_analysis.pipelines.data_processing import basketball_reference
from . import nodes
| 30.219178 | 131 | 0.602901 |
71c3d256540447d130560ac9efdd84ad55be2fad | 970 | py | Python | IceSpringMusicPlayer/plugins/IceSpringHelloWorldPlugin/helloWorldPlugin.py | baijifeilong/rawsteelp | 425547e6e2395bf4acb62435b18b5b3a4b7ebef4 | [
"MIT"
] | null | null | null | IceSpringMusicPlayer/plugins/IceSpringHelloWorldPlugin/helloWorldPlugin.py | baijifeilong/rawsteelp | 425547e6e2395bf4acb62435b18b5b3a4b7ebef4 | [
"MIT"
] | null | null | null | IceSpringMusicPlayer/plugins/IceSpringHelloWorldPlugin/helloWorldPlugin.py | baijifeilong/rawsteelp | 425547e6e2395bf4acb62435b18b5b3a4b7ebef4 | [
"MIT"
] | null | null | null | # Created by BaiJiFeiLong@gmail.com at 2022/1/21 17:13
import typing
from IceSpringRealOptional.typingUtils import gg
from PySide2 import QtWidgets, QtCore
from IceSpringMusicPlayer import tt
from IceSpringMusicPlayer.common.pluginMixin import PluginMixin
from IceSpringMusicPlayer.common.pluginWidgetMixin import PluginWidgetMixin
from IceSpringMusicPlayer.tt import Text
| 33.448276 | 93 | 0.760825 |
71c4e4d0291e170dbdedace4be31a3f5ab545979 | 3,259 | py | Python | SWHT/Ylm.py | 2baOrNot2ba/SWHT | 738718e90d615e624dacf7746f8a2dfa973ec9fe | [
"BSD-3-Clause"
] | null | null | null | SWHT/Ylm.py | 2baOrNot2ba/SWHT | 738718e90d615e624dacf7746f8a2dfa973ec9fe | [
"BSD-3-Clause"
] | null | null | null | SWHT/Ylm.py | 2baOrNot2ba/SWHT | 738718e90d615e624dacf7746f8a2dfa973ec9fe | [
"BSD-3-Clause"
] | null | null | null | """
An implementation on spherical harmonics in python becasue scipy.special.sph_harm in scipy<=0.13 is very slow
Originally written by Jozef Vesely
https://github.com/scipy/scipy/issues/1280
"""
import numpy as np
if __name__ == "__main__":
from scipy.special import sph_harm
from scipy.misc import factorial2, factorial
from timeit import Timer
print "Time: xfact(10)", Timer("xfact(10)",
"from __main__ import xfact, ref_xfact").timeit(100)
print "Time: ref_xfact(10)", Timer("ref_xfact(10)",
"from __main__ import xfact, ref_xfact").timeit(100)
print "Time: xfact(80)", Timer("xfact(80)",
"from __main__ import xfact, ref_xfact").timeit(100)
print "Time: ref_xfact(80)", Timer("ref_xfact(80)",
"from __main__ import xfact, ref_xfact").timeit(100)
print "m", "xfact", "ref_xfact"
for m in range(10) + range(80,90):
a = xfact(m)
b = ref_xfact(m)
print m, a, b
phi, theta = np.ogrid[0:2*np.pi:10j,-np.pi/2:np.pi/2:10j]
print "Time: Ylm(1,1,phi,theta)", Timer("Ylm(1,1,phi,theta)",
"from __main__ import Ylm, sph_harm, phi, theta").timeit(10)
print "Time: sph_harm(1,1,phi,theta)", Timer("sph_harm(1,1,phi,theta)",
"from __main__ import Ylm, sph_harm, phi, theta").timeit(10)
print "l", "m", "max|Ylm-sph_harm|"
for l in xrange(0,10):
for m in xrange(-l,l+1):
a = Ylm(l,m,phi,theta)
b = sph_harm(m,l,phi,theta)
print l,m, np.amax(np.abs(a-b))
| 32.919192 | 109 | 0.56367 |
71c7397a9aa9b39fdf9e024d5ca5dfdc737b974f | 1,820 | py | Python | 0673.GCBA-HOTEL_STAFF.py | alphacastio/connectors-gcba | d1b97fb851463694ea844b3b81402c3ea747863b | [
"MIT"
] | 1 | 2021-11-19T21:37:01.000Z | 2021-11-19T21:37:01.000Z | 0673.GCBA-HOTEL_STAFF.py | alphacastio/connectors-gcba | d1b97fb851463694ea844b3b81402c3ea747863b | [
"MIT"
] | null | null | null | 0673.GCBA-HOTEL_STAFF.py | alphacastio/connectors-gcba | d1b97fb851463694ea844b3b81402c3ea747863b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[9]:
import requests
import pandas as pd
from lxml import etree
from bs4 import BeautifulSoup
import datetime
import io
import numpy as np
from alphacast import Alphacast
from dotenv import dotenv_values
API_KEY = dotenv_values(".env").get("API_KEY")
alphacast = Alphacast(API_KEY)
# In[10]:
url1 = "https://www.estadisticaciudad.gob.ar/eyc/wp-content/uploads/2020/11/Eoh_PnoA_0811.xlsx"
df1 = pd.read_excel(url1)
df1[:2] = df1[:2].ffill(1)
df1.columns = "Personal No Asalariado - " + df1.iloc[1] + " - " + df1.iloc[2]
df1 = df1.drop(df1.columns[[1]], axis = 1)
df1 = df1.drop(index=1)
df1 = df1.drop(index=0)
df1 = df1.drop(index=2)
df1 = df1.dropna(subset = [df1.columns[3]])
#df1 = df1.iloc[2: , 3:-2]
#df1 = df1[~df1.iloc[:, 0].astype(str).str.isdigit()]
df1 = df1[df1.columns.dropna()]
df1.index = pd.date_range(start='1/1/2008', periods=len(df1), freq = "QS")
df1.index.name = "Date"
#df1 = df1[df1.columns.drop(list(df1.filter(regex='Participacin')))]
df1
# In[11]:
url2 = "https://www.estadisticaciudad.gob.ar/eyc/wp-content/uploads/2018/05/Eoh_PA_0811.xlsx"
df2 = pd.read_excel(url2)
df2[:2] = df2[:2].ffill(1)
df2.columns = "Personal Asalariado - " + df2.iloc[1] + " - " + df2.iloc[2]
df2 = df2.drop(df2.columns[[1]], axis = 1)
df2 = df2.drop(index=1)
df2 = df2.drop(index=0)
df2 = df2.drop(index=2)
df2 = df2.dropna(subset = [df2.columns[3]])
#df2 = df2.iloc[2: , 3:-2]
#df2 = df2[~df2.iloc[:, 0].astype(str).str.isdigit()]
df2 = df2[df2.columns.dropna()]
df2.index = pd.date_range(start='1/1/2008', periods=len(df2), freq = "QS")
df2.index.name = "Date"
df3 = df1.merge(df2, right_index=True, left_index=True)
alphacast.datasets.dataset(7432).upload_data_from_df(df3,
deleteMissingFromDB = True, onConflictUpdateDB = True, uploadIndex=True)
| 27.575758 | 95 | 0.686813 |
71c798c6020de830cf23434ebeb38ea555cc0bd8 | 5,572 | py | Python | simpleGmatch4py.py | aravi11/approxGed | 6c0a2ed4fd1bcc86c22169e3c96fcf4de717bf8c | [
"MIT"
] | null | null | null | simpleGmatch4py.py | aravi11/approxGed | 6c0a2ed4fd1bcc86c22169e3c96fcf4de717bf8c | [
"MIT"
] | null | null | null | simpleGmatch4py.py | aravi11/approxGed | 6c0a2ed4fd1bcc86c22169e3c96fcf4de717bf8c | [
"MIT"
] | null | null | null |
# import the GED using the munkres algorithm
import gmatch4py as gm
import networkx as nx
import collections
import csv
import pickle
from collections import OrderedDict
import json
import concurrent.futures as cf
import time
iter = 0
'''
def runParallelCode(pairList):
with cf.ProcessPoolExecutor(max_workers =2) as executor:
try:
for future in cf.as_completed((executor.map(getGraphDiff, pairList, timeout=5000000)), timeout=5000000):
print(str(type(future.result())))
if str(type(future.result())) == "<class 'NoneType'>":
pass
else:
print(future.result(timeout=5000000))
except cf._base.TimeoutError:
print("Time limit exceeded")
pass
'''
if __name__ == '__main__':
start_time = time.time()
main()
print("--- %s seconds ---" % (time.time() - start_time))
| 32.395349 | 130 | 0.644113 |
71c80c035280e16e1aaf199b5f9834181e50b2ad | 1,940 | py | Python | src/blockdiag/utils/rst/nodes.py | Dridi/blockdiag | bbb16f8a731cdf79a675a63c1ff847e70fdc4a5b | [
"Apache-2.0"
] | null | null | null | src/blockdiag/utils/rst/nodes.py | Dridi/blockdiag | bbb16f8a731cdf79a675a63c1ff847e70fdc4a5b | [
"Apache-2.0"
] | null | null | null | src/blockdiag/utils/rst/nodes.py | Dridi/blockdiag | bbb16f8a731cdf79a675a63c1ff847e70fdc4a5b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2011 Takeshi KOMIYA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from hashlib import sha1
from docutils import nodes
import blockdiag.parser
import blockdiag.builder
import blockdiag.drawer
| 35.272727 | 78 | 0.640206 |
71c81073e9fc83a90c2d12dc9cb29a2d00b1831d | 1,355 | py | Python | python-advanced/chp1/main.py | emiliachojak/bio-projects | d2e5290b48613ef6721e303b3490a98cf4cbf6c0 | [
"MIT"
] | 2 | 2019-12-11T20:55:46.000Z | 2020-06-17T14:01:07.000Z | python-advanced/chp1/main.py | emiliachojak/bio-projects | d2e5290b48613ef6721e303b3490a98cf4cbf6c0 | [
"MIT"
] | null | null | null | python-advanced/chp1/main.py | emiliachojak/bio-projects | d2e5290b48613ef6721e303b3490a98cf4cbf6c0 | [
"MIT"
] | 1 | 2019-12-11T20:58:45.000Z | 2019-12-11T20:58:45.000Z | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 19 20:00:00 2019
@author: Emilia Chojak
@e-mail: emilia.chojak@gmail.com
"""
tax_dict = {
'Pan troglodytes' : 'Hominoidea', 'Pongo abelii' : 'Hominoidea',
'Hominoidea' : 'Simiiformes', 'Simiiformes' : 'Haplorrhini',
'Tarsius tarsier' : 'Tarsiiformes', 'Haplorrhini' : 'Primates',
'Tarsiiformes' : 'Haplorrhini', 'Loris tardigradus' :
'Lorisidae',
'Lorisidae' : 'Strepsirrhini', 'Strepsirrhini' : 'Primates',
'Allocebus trichotis' : 'Lemuriformes', 'Lemuriformes' :
'Strepsirrhini',
'Galago alleni' : 'Lorisiformes', 'Lorisiformes' :
'Strepsirrhini',
'Galago moholi' : 'Lorisiformes'
}
print(last_common_ancestor(find_ancestors_for_many(["Galago alleni", "Galago moholi"]))) | 30.111111 | 88 | 0.677491 |
71c859d9d13c7c86199e6c92e91a1441fbf8c1ae | 334 | py | Python | Python/csv/1.py | LeishenKOBE/good-good-study | ac6b859f53b8b95f0746f35c5278009a5cad40a8 | [
"MIT"
] | null | null | null | Python/csv/1.py | LeishenKOBE/good-good-study | ac6b859f53b8b95f0746f35c5278009a5cad40a8 | [
"MIT"
] | null | null | null | Python/csv/1.py | LeishenKOBE/good-good-study | ac6b859f53b8b95f0746f35c5278009a5cad40a8 | [
"MIT"
] | null | null | null | import csv
# with open('./1.csv', newline='', encoding='utf-8') as f:
# reader = csv.reader(f)
# for row in reader:
# print(row)
with open('./1.csv', 'a', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(['4', '', '25', '1022', '886'])
writer.writerow(['5', '', '18', '2234', '3121'])
| 27.833333 | 58 | 0.535928 |
71c972e7ade9ba017ed68282dda02ffa0b10d89d | 5,110 | py | Python | src/solana/rpc/responses.py | broper2/solana-py | 146390d959f017e137238335ee6fa362ad1a1ab4 | [
"MIT"
] | 1 | 2021-12-13T05:28:52.000Z | 2021-12-13T05:28:52.000Z | src/solana/rpc/responses.py | broper2/solana-py | 146390d959f017e137238335ee6fa362ad1a1ab4 | [
"MIT"
] | 1 | 2021-08-11T10:33:16.000Z | 2021-08-11T10:33:16.000Z | src/solana/rpc/responses.py | broper2/solana-py | 146390d959f017e137238335ee6fa362ad1a1ab4 | [
"MIT"
] | 2 | 2021-06-23T15:29:56.000Z | 2022-01-29T06:24:01.000Z | """This module contains code for parsing RPC responses."""
from dataclasses import dataclass, field
from typing import Union, Tuple, Any, Dict, List, Optional, Literal
from apischema import alias
from apischema.conversions import as_str
from solana.publickey import PublicKey
from solana.transaction import TransactionSignature
as_str(PublicKey)
TransactionErrorResult = Optional[dict]
SlotsUpdatesItem = Union[FirstShredReceived, Completed, CreatedBank, Frozen, Dead, OptimisticConfirmation, Root]
SubscriptionNotification = Union[
AccountNotification,
LogsNotification,
ProgramNotification,
SignatureNotification,
SlotNotification,
RootNotification,
SlotsUpdatesNotification,
VoteNotification,
]
| 19.429658 | 112 | 0.73816 |
71c9afc9f2fd7d8896cef3ef910e93c309b9fb9f | 1,845 | py | Python | python/data_structures/binheap.py | adriennekarnoski/data-structures | 86ccf988ac02884749226236ad4ac37762873efa | [
"MIT"
] | 1 | 2017-11-05T20:59:04.000Z | 2017-11-05T20:59:04.000Z | python/data_structures/binheap.py | adriennekarnoski/data-structures | 86ccf988ac02884749226236ad4ac37762873efa | [
"MIT"
] | 5 | 2017-12-15T01:37:47.000Z | 2018-02-20T22:51:29.000Z | python/data_structures/binheap.py | adriennekarnoski/data-structures | 86ccf988ac02884749226236ad4ac37762873efa | [
"MIT"
] | null | null | null | """Build a binary min heap object."""
from math import floor
| 30.245902 | 67 | 0.505149 |
71ca38941354160e243965319c30a6e676cdeb33 | 1,547 | py | Python | vesper/archive_settings.py | RichardLitt/Vesper | 5360844f42a06942e7684121c650b08cf8616285 | [
"MIT"
] | 29 | 2017-07-10T14:49:15.000Z | 2022-02-02T23:14:38.000Z | vesper/archive_settings.py | Tubbz-alt/Vesper | 76e5931ca0c7fbe070c53b1362ec246ec9007beb | [
"MIT"
] | 167 | 2015-03-17T14:45:22.000Z | 2022-03-30T21:00:05.000Z | vesper/archive_settings.py | Tubbz-alt/Vesper | 76e5931ca0c7fbe070c53b1362ec246ec9007beb | [
"MIT"
] | 4 | 2015-02-06T03:30:27.000Z | 2020-12-27T08:38:52.000Z | """
Vesper archive settings.
The Vesper server serves the Vesper archive that is in the directory
in which the server starts. The archive settings are the composition
of a set of default settings (hard-coded in this module) and settings
(optionally) specified in the file "Archive Settings.yaml" in the
archive directory.
"""
from pathlib import Path
import os
import sys
from vesper.util.settings import Settings
from vesper.util.settings_type import SettingsType
import vesper.archive_paths as archive_paths
_DEFAULT_SETTINGS = Settings.create_from_yaml('''
database:
engine: SQLite
''')
_SETTINGS_TYPE = SettingsType('Archive Settings', _DEFAULT_SETTINGS)
_SETTINGS_FILE_NAME = 'Archive Settings.yaml'
archive_settings = _create_settings()
| 24.555556 | 75 | 0.701357 |
71ca8311a73312ae9b4e292ad1989e57d088b408 | 9,841 | py | Python | autotf/model/vgg16.py | DAIM-ML/autotf | 3f82d858f49c27d5ecb624cee555fb8fd47bf067 | [
"BSD-3-Clause"
] | 8 | 2018-03-07T06:58:16.000Z | 2019-01-30T07:49:44.000Z | autotf/model/vgg16.py | DAIM-ML/autotf | 3f82d858f49c27d5ecb624cee555fb8fd47bf067 | [
"BSD-3-Clause"
] | null | null | null | autotf/model/vgg16.py | DAIM-ML/autotf | 3f82d858f49c27d5ecb624cee555fb8fd47bf067 | [
"BSD-3-Clause"
] | 1 | 2018-03-31T09:06:12.000Z | 2018-03-31T09:06:12.000Z | #-*- coding=utf-8 -*-
from __future__ import division, print_function, absolute_import
from base_model import BaseModel
from helper import *
import tensorflow as tf
import pickle
import numpy as np
import time
| 39.681452 | 156 | 0.580124 |
71cad3858a3b017c8adbe9bb0a7f32ee389c518f | 3,226 | py | Python | LEGEND/modules/_exec.py | RAJESHSAINI2113/LEGENDX | 82c3c61062e804c3bf8b6e4ee31d1e603ab8bfd0 | [
"MIT"
] | 2 | 2021-03-01T03:50:22.000Z | 2021-03-05T07:13:19.000Z | LEGEND/modules/_exec.py | RAJESHSAINI2113/LEGENDX | 82c3c61062e804c3bf8b6e4ee31d1e603ab8bfd0 | [
"MIT"
] | null | null | null | LEGEND/modules/_exec.py | RAJESHSAINI2113/LEGENDX | 82c3c61062e804c3bf8b6e4ee31d1e603ab8bfd0 | [
"MIT"
] | 5 | 2021-03-01T08:40:31.000Z | 2021-10-01T16:32:04.000Z | import subprocess
from LEGEND import tbot as bot
from LEGEND import tbot as borg
from LEGEND.events import register
from LEGEND import OWNER_ID, SUDO_USERS
import asyncio
import traceback
import io
import os
import sys
import time
from telethon.tl import functions
from telethon.tl import types
from telethon.tl.types import *
from telethon.errors import *
| 27.810345 | 127 | 0.623993 |
71cb7166ebfc6dcb81586c67d3970300c6d339d5 | 2,850 | py | Python | src/tools/pch.py | MaxSac/build | 482c25f3a26171073c7e6c59f0427f2259a63fec | [
"BSL-1.0"
] | 11,356 | 2017-12-08T19:42:32.000Z | 2022-03-31T16:55:25.000Z | src/tools/pch.py | MaxSac/build | 482c25f3a26171073c7e6c59f0427f2259a63fec | [
"BSL-1.0"
] | 2,402 | 2017-12-08T22:31:01.000Z | 2022-03-28T19:25:52.000Z | src/tools/pch.py | MaxSac/build | 482c25f3a26171073c7e6c59f0427f2259a63fec | [
"BSL-1.0"
] | 1,343 | 2017-12-08T19:47:19.000Z | 2022-03-26T11:31:36.000Z | # Status: Being ported by Steven Watanabe
# Base revision: 47077
#
# Copyright (c) 2005 Reece H. Dunn.
# Copyright 2006 Ilya Sokolov
# Copyright (c) 2008 Steven Watanabe
#
# Use, modification and distribution is subject to the Boost Software
# License Version 1.0. (See accompanying file LICENSE_1_0.txt or
# http://www.boost.org/LICENSE_1_0.txt)
##### Using Precompiled Headers (Quick Guide) #####
#
# Make precompiled mypch.hpp:
#
# import pch ;
#
# cpp-pch mypch
# : # sources
# mypch.hpp
# : # requiremnts
# <toolset>msvc:<source>mypch.cpp
# ;
#
# Add cpp-pch to sources:
#
# exe hello
# : main.cpp hello.cpp mypch
# ;
from b2.build import type, feature, generators
from b2.tools import builtin
type.register('PCH', ['pch'])
type.register('C_PCH', [], 'PCH')
type.register('CPP_PCH', [], 'PCH')
# Control precompiled header (PCH) generation.
feature.feature('pch',
['on', 'off'],
['propagated'])
feature.feature('pch-header', [], ['free', 'dependency'])
feature.feature('pch-file', [], ['free', 'dependency'])
# NOTE: requirements are empty, default pch generator can be applied when
# pch=off.
generators.register(builtin.DummyGenerator(
"pch.default-c-pch-generator", False, [], ['C_PCH'], []))
generators.register(builtin.DummyGenerator(
"pch.default-cpp-pch-generator", False, [], ['CPP_PCH'], []))
| 33.928571 | 84 | 0.635789 |
71cba591b2f2458645ed1d92e6c6191526e0649e | 3,274 | py | Python | packages/pytest-simcore/src/pytest_simcore/helpers/utils_login.py | GitHK/osparc-simcore-forked | 5b01a28d1b8028afcf9a735e1d46a73daa13686e | [
"MIT"
] | null | null | null | packages/pytest-simcore/src/pytest_simcore/helpers/utils_login.py | GitHK/osparc-simcore-forked | 5b01a28d1b8028afcf9a735e1d46a73daa13686e | [
"MIT"
] | 17 | 2020-10-15T16:06:05.000Z | 2022-03-21T18:48:21.000Z | packages/pytest-simcore/src/pytest_simcore/helpers/utils_login.py | GitHK/osparc-simcore-forked | 5b01a28d1b8028afcf9a735e1d46a73daa13686e | [
"MIT"
] | null | null | null | import re
from typing import Dict
from aiohttp import web
from yarl import URL
from simcore_service_webserver.db_models import UserRole, UserStatus
from simcore_service_webserver.login.cfg import cfg, get_storage
from simcore_service_webserver.login.registration import create_invitation
from simcore_service_webserver.login.utils import encrypt_password, get_random_string
from .utils_assert import assert_status
TEST_MARKS = re.compile(r"TEST (\w+):(.*)")
def parse_test_marks(text):
"""Checs for marks as
TEST name:123123
TEST link:some-value
"""
marks = {}
for m in TEST_MARKS.finditer(text):
key, value = m.groups()
marks[key] = value.strip()
return marks
class NewUser:
def __init__(self, params=None, app: web.Application = None):
self.params = params
self.user = None
self.db = get_storage(app) if app else cfg.STORAGE # FIXME:
| 28.224138 | 85 | 0.668907 |
71cc3ccb2a64dc7939c236363e16d9e1816e901e | 2,806 | py | Python | indra/tests/test_sparser.py | jmuhlich/indra | feab2c08541ea73f328579faa6a21b08082cb026 | [
"BSD-2-Clause"
] | null | null | null | indra/tests/test_sparser.py | jmuhlich/indra | feab2c08541ea73f328579faa6a21b08082cb026 | [
"BSD-2-Clause"
] | null | null | null | indra/tests/test_sparser.py | jmuhlich/indra | feab2c08541ea73f328579faa6a21b08082cb026 | [
"BSD-2-Clause"
] | null | null | null | from indra import sparser
xml_str1 = '''
<article pmid="54321">
<interpretation>
<sentence-text>MEK1 phosphorylates ERK1</sentence-text>
<sem>
<ref category="phosphorylate">
<var name="agent">
<ref category="protein">
<var name="name">MP2K1_HUMAN</var>
<var name="uid">UP:MP2K1_HUMAN</var>
</ref>
</var>
<var name="substrate">
<ref category="protein">
<var name="name">MK03_HUMAN</var>
<var name="uid">UP:MK03_HUMAN</var>
</ref>
</var>
<var name="present"><ref category="present"></ref></var>
</ref>
</sem>
</interpretation>
</article>
'''
xml_str2 = '''
<article pmid="12345">
<interpretation>
<sentence-text>Hence ASPP2 can be phosphorylated at serine 827 by MAPK1 in vitro</sentence-text>
<sem>
<ref category="phosphorylate">
<var name="subordinate-conjunction">
<ref category="subordinate-conjunction"><var name="word">hence</var></ref></var>
<var name="substrate">
<ref category="protein">
<var name="name">ASPP2_HUMAN</var>
<var name="uid">UP:ASPP2_HUMAN</var>
</ref>
</var>
<var name="agent">
<ref category="protein">
<var name="context">
<ref category="in-vitro"></ref>
</var>
<var name="uid">UP:MK01_HUMAN</var>
<var name="name">MK01_HUMAN</var>
</ref>
</var>
<var name="site">
<ref category="residue-on-protein">
<var name="amino-acid">
<ref category="amino-acid"><var name="name">serine</var></ref>
</var>
<var name="position"> 827</var>
</ref>
</var>
<var name="modal"><ref category="can"></ref></var>
</ref>
</sem>
</interpretation>
</article>
'''
| 29.229167 | 98 | 0.573414 |
71cd46d78b6a8276fbfad5958ac1ac90396f36d3 | 685 | py | Python | examples/quickstart/run_example.py | siforrer/coreali | 261e321b546192e608edf87c47719d2173ab4645 | [
"MIT"
] | null | null | null | examples/quickstart/run_example.py | siforrer/coreali | 261e321b546192e608edf87c47719d2173ab4645 | [
"MIT"
] | null | null | null | examples/quickstart/run_example.py | siforrer/coreali | 261e321b546192e608edf87c47719d2173ab4645 | [
"MIT"
] | null | null | null | """ Simple Example using coreali to access a register model. Needs no h^ardware"""
# Import dependencies and compile register model with systemrdl-compiler
from systemrdl import RDLCompiler
import coreali
import numpy as np
import os
from coreali import RegisterModel
rdlc = RDLCompiler()
rdlc.compile_file(os.path.dirname(__file__)+"/../systemrdl/logger.rdl")
root = rdlc.elaborate()
# Generate hierarchical register model
rio = coreali.registerio.RegIoNoHW(np.zeros([256], np.uint8()))
logger = RegisterModel(root, rio)
# Use the generated register model
logger.Ctrl.read()
logger.LogMem.write(0,[1,2,3])
logger.LogMem.read()
logger.LogMem[1].write(0,[11,12,13])
print(logger)
| 28.541667 | 82 | 0.769343 |
71cd65bd2b7c6ec78dfa4527145f67145398f409 | 14,872 | py | Python | src/python/pants/base/specs.py | mcguigan/pants | e085d45669b72d0c51ab8a54602306fc76e07256 | [
"Apache-2.0"
] | null | null | null | src/python/pants/base/specs.py | mcguigan/pants | e085d45669b72d0c51ab8a54602306fc76e07256 | [
"Apache-2.0"
] | null | null | null | src/python/pants/base/specs.py | mcguigan/pants | e085d45669b72d0c51ab8a54602306fc76e07256 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import re
from abc import ABC, ABCMeta, abstractmethod
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from pants.engine.fs import PathGlobs
from pants.engine.objects import Collection
from pants.option.custom_types import GlobExpansionConjunction
from pants.option.global_options import GlobMatchErrorBehavior
from pants.util.collections import assert_single_element
from pants.util.dirutil import fast_relpath_optional, recursive_dirname
from pants.util.filtering import create_filters, wrap_filters
from pants.util.memo import memoized_property
from pants.util.meta import frozen_after_init
if TYPE_CHECKING:
from pants.engine.mapper import AddressFamily, AddressMapper
_specificity = {
SingleAddress: 0,
SiblingAddresses: 1,
AscendantAddresses: 2,
DescendantAddresses: 3,
type(None): 99
}
def more_specific(
address_spec1: Optional[AddressSpec], address_spec2: Optional[AddressSpec]
) -> AddressSpec:
"""Returns which of the two specs is more specific.
This is useful when a target matches multiple specs, and we want to associate it with
the "most specific" one, which will make the most intuitive sense to the user.
"""
# Note that if either of spec1 or spec2 is None, the other will be returned.
if address_spec1 is None and address_spec2 is None:
raise ValueError('internal error: both specs provided to more_specific() were None')
return cast(
AddressSpec,
address_spec1 if _specificity[type(address_spec1)] < _specificity[type(address_spec2)] else address_spec2
)
class FilesystemSpec(Spec, metaclass=ABCMeta):
pass
class FilesystemSpecs(Collection[FilesystemSpec]):
def path_globs_for_spec(
self, spec: Union[FilesystemLiteralSpec, FilesystemGlobSpec]
) -> PathGlobs:
"""Generate PathGlobs for the specific spec, automatically including the instance's
FilesystemIgnoreSpecs.
"""
return self._generate_path_globs(specs=(spec, *self.ignores))
def to_path_globs(self) -> PathGlobs:
"""Generate a single PathGlobs for the instance."""
return self._generate_path_globs(specs=(*self.includes, *self.ignores))
class AmbiguousSpecs(Exception):
pass
| 35.158392 | 109 | 0.739981 |
71cdfb38599df10c30320a2593f3e48d3acf2678 | 4,140 | py | Python | Mock/MockRequesterMixin.py | GordiigPinny/ApiRequesters | aeb36c7b7b5237c3a74dae6ced7c6141df729ab5 | [
"MIT"
] | null | null | null | Mock/MockRequesterMixin.py | GordiigPinny/ApiRequesters | aeb36c7b7b5237c3a74dae6ced7c6141df729ab5 | [
"MIT"
] | null | null | null | Mock/MockRequesterMixin.py | GordiigPinny/ApiRequesters | aeb36c7b7b5237c3a74dae6ced7c6141df729ab5 | [
"MIT"
] | null | null | null | import json
import requests
from enum import Enum
from typing import Dict
from ..exceptions import JsonDecodeError, UnexpectedResponse, RequestError, BaseApiRequestError
# -
# - -
# , GET/POST
def _handle_errors(self, token):
"""
,
"""
token = self.get_mine_error_part(token)
if token == self.ERRORS.ERROR_TOKEN.value:
raise BaseApiRequestError()
elif token == self.ERRORS.BAD_CODE_400_TOKEN.value:
self.raise_coded_error(400)
elif token == self.ERRORS.BAD_CODE_401_TOKEN.value:
self.raise_coded_error(401)
elif token == self.ERRORS.BAD_CODE_403_TOKEN.value:
self.raise_coded_error(403)
elif token == self.ERRORS.BAD_CODE_404_TOKEN.value:
self.raise_coded_error(404)
def _mock_token_handler(self, token: str, list_object=False):
"""
"""
self._handle_errors(token)
if list_object:
return requests.Response(), self.get_list_object_on_success(token)
else:
return requests.Response(), self.get_object_on_success(token)
| 34.214876 | 95 | 0.674396 |
71cfe2dcb6db7c33d02af00f0428d85a6126a273 | 313 | py | Python | tests/test_parse.py | vkleen/skidl | f09200c978a39c127e292ef71b8ff89c1a3c0f5a | [
"MIT"
] | 700 | 2016-08-16T21:12:50.000Z | 2021-10-10T02:15:18.000Z | tests/test_parse.py | 0dvictor/skidl | 458709a10b28a864d25ae2c2b44c6103d4ddb291 | [
"MIT"
] | 118 | 2016-08-16T20:51:05.000Z | 2021-10-10T08:07:18.000Z | tests/test_parse.py | 0dvictor/skidl | 458709a10b28a864d25ae2c2b44c6103d4ddb291 | [
"MIT"
] | 94 | 2016-08-25T14:02:28.000Z | 2021-09-12T05:17:08.000Z | # -*- coding: utf-8 -*-
# The MIT License (MIT) - Copyright (c) 2016-2021 Dave Vandenbout.
import pytest
from skidl import netlist_to_skidl
from .setup_teardown import get_filename, setup_function, teardown_function
| 22.357143 | 75 | 0.773163 |
71cfe70b6d19b560b2ea31dad54f5ad9cbddfef1 | 291 | py | Python | Projects/envirohat-monitor/clear-screen.py | pkbullock/RaspberryPi | 1c8e83566e97f65fe530d8d43293f4b26c015d0d | [
"MIT"
] | null | null | null | Projects/envirohat-monitor/clear-screen.py | pkbullock/RaspberryPi | 1c8e83566e97f65fe530d8d43293f4b26c015d0d | [
"MIT"
] | null | null | null | Projects/envirohat-monitor/clear-screen.py | pkbullock/RaspberryPi | 1c8e83566e97f65fe530d8d43293f4b26c015d0d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import ST7735
import sys
st7735 = ST7735.ST7735(
port=0,
cs=1,
dc=9,
backlight=12,
rotation=270,
spi_speed_hz=10000000
)
# Reset the display
st7735.begin()
st7735.reset()
st7735.set_backlight(0)
print "\nDone."
# Exit cleanly
sys.exit(0) | 12.652174 | 25 | 0.666667 |
71d0e460dfc97542581b94a812752a1bad4c2629 | 709 | py | Python | Scripts/nominatintest.py | carlosdenner/business_atlas | 8f95bbd07384baa6c5e51776690103e418b3875e | [
"MIT"
] | null | null | null | Scripts/nominatintest.py | carlosdenner/business_atlas | 8f95bbd07384baa6c5e51776690103e418b3875e | [
"MIT"
] | 4 | 2021-04-14T19:18:46.000Z | 2021-11-02T16:11:36.000Z | Scripts/nominatintest.py | carlosdenner/business_atlas | 8f95bbd07384baa6c5e51776690103e418b3875e | [
"MIT"
] | 3 | 2021-09-01T03:05:21.000Z | 2021-11-01T16:54:26.000Z |
from geopy.geocoders import Nominatim
from requests.models import LocationParseError
geolocator = Nominatim(user_agent="geoapiExercises")
Latitude = 25.594095
Longitude = 85.137566
location(Latitude, Longitude)
# Display
| 22.870968 | 52 | 0.61354 |
71d0fd4625a3f29310594a80dc408cff1d45b254 | 1,196 | py | Python | gamesystem.py | cristilianojr/JOKENPOH | 604970d4f3cfbcc5f851e993af72d3bc86926ae5 | [
"MIT"
] | 1 | 2022-02-02T15:23:00.000Z | 2022-02-02T15:23:00.000Z | gamesystem.py | cristilianojr/JOKENPOH | 604970d4f3cfbcc5f851e993af72d3bc86926ae5 | [
"MIT"
] | null | null | null | gamesystem.py | cristilianojr/JOKENPOH | 604970d4f3cfbcc5f851e993af72d3bc86926ae5 | [
"MIT"
] | null | null | null | import random
from tkinter import PhotoImage
"""
Esse arquivo define os estados do game
"""
def ia_chocer():
"""IA faz a escolha de um numero aleatrio"""
posibility = ['rock', 'paper', 'scissor']
value = posibility[random.randint(0, 2)]
return value
| 23 | 50 | 0.598662 |
71d178c96b191f21134b0e3351ee139671d87fc0 | 4,710 | py | Python | train/filelocks.py | mister-bailey/MagNET | 4f75a6e2fe34eabf455d13338f318e3dc4bf0295 | [
"Apache-2.0"
] | null | null | null | train/filelocks.py | mister-bailey/MagNET | 4f75a6e2fe34eabf455d13338f318e3dc4bf0295 | [
"Apache-2.0"
] | null | null | null | train/filelocks.py | mister-bailey/MagNET | 4f75a6e2fe34eabf455d13338f318e3dc4bf0295 | [
"Apache-2.0"
] | null | null | null | from filelock import FileLock, Timeout
import os
import time
| 34.888889 | 174 | 0.56603 |
71d4a4f18765db674c76811fc711aa0406e67032 | 144 | py | Python | speedcom/tests/__init__.py | emissible/emissilbe | 5537e787ccb883a101d2d40b38d480e257ac9755 | [
"MIT"
] | 1 | 2019-02-20T05:11:16.000Z | 2019-02-20T05:11:16.000Z | speedcom/tests/__init__.py | emissible/emissilbe | 5537e787ccb883a101d2d40b38d480e257ac9755 | [
"MIT"
] | null | null | null | speedcom/tests/__init__.py | emissible/emissilbe | 5537e787ccb883a101d2d40b38d480e257ac9755 | [
"MIT"
] | null | null | null | #from . import context
#from . import test_NNModels
#from . import test_data_extract
#from . import test_speedcom
#from . import test_utilities
| 24 | 32 | 0.791667 |
71d4b733728e3fe154331308ec40f232a937aaa6 | 1,637 | py | Python | todo/management/serializers/tasks.py | Sanguet/todo-challenge | 8eabc02081e7ce6b33408558d4a4a39edee3944c | [
"MIT"
] | null | null | null | todo/management/serializers/tasks.py | Sanguet/todo-challenge | 8eabc02081e7ce6b33408558d4a4a39edee3944c | [
"MIT"
] | null | null | null | todo/management/serializers/tasks.py | Sanguet/todo-challenge | 8eabc02081e7ce6b33408558d4a4a39edee3944c | [
"MIT"
] | null | null | null | # Django REST Framework
from rest_framework import serializers
# Model
from todo.management.models import Task
# Utils
from todo.utils.tasks import TaskMetrics
from todo.utils.serializer_fields import CompleteNameUser
| 25.578125 | 80 | 0.607819 |
71d5ac8fe8a1e3e087c79c30be252f654bc0722c | 1,895 | py | Python | outlier_detector.py | Sean-Ker/data_homework | 5f289c692690724ee5973683c53e83299958b270 | [
"Apache-2.0"
] | null | null | null | outlier_detector.py | Sean-Ker/data_homework | 5f289c692690724ee5973683c53e83299958b270 | [
"Apache-2.0"
] | null | null | null | outlier_detector.py | Sean-Ker/data_homework | 5f289c692690724ee5973683c53e83299958b270 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
'''
A function that detects outliers, where k is a tandard deviation threshold hyperparameter preferablly (2, 2.5, 3).
The algo could handle multivariable data frames with any number of features d.
For that manner, it first reduces the dimensionality to 2 using PCA, makes sure that the matrix is positive definite and calculates the Mahalanobis Distance with a threshold value.
Returns a series of n rows back.
'''
# https://www.youtube.com/watch?v=spNpfmWZBmg&t=0s
# Check that matrix is positive definite
| 35.754717 | 180 | 0.713456 |
71d79b9492d4549b986121f837ee137051811f29 | 1,631 | py | Python | arc113/b.py | nishio/atcoder | 8db36537b5d8580745d5f98312162506ad7d7ab4 | [
"MIT"
] | 1 | 2021-03-09T04:28:13.000Z | 2021-03-09T04:28:13.000Z | arc113/b.py | nishio/atcoder | 8db36537b5d8580745d5f98312162506ad7d7ab4 | [
"MIT"
] | null | null | null | arc113/b.py | nishio/atcoder | 8db36537b5d8580745d5f98312162506ad7d7ab4 | [
"MIT"
] | null | null | null | # included from snippets/main.py
# tests
T1 = """
4 3 2
"""
TEST_T1 = """
>>> as_input(T1)
>>> main()
4
"""
T2 = """
1 2 3
"""
TEST_T2 = """
>>> as_input(T2)
>>> main()
1
"""
T3 = """
3141592 6535897 9323846
"""
TEST_T3 = """
>>> as_input(T3)
>>> main()
2
"""
T4 = """
2 10 1
"""
TEST_T4 = """
>>> as_input(T4)
>>> main()
4
"""
T5 = """
2 20 1
"""
TEST_T5 = """
>>> as_input(T5)
>>> main()
6
"""
def as_input(s):
"use in test, use given string as input file"
import io
f = io.StringIO(s.strip())
g = globals()
g["input"] = lambda: bytes(f.readline(), "ascii")
g["read"] = lambda: bytes(f.read(), "ascii")
if __name__ == "__main__":
import sys
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
sys.setrecursionlimit(10 ** 6)
if sys.argv[-1] == "-t":
print("testing")
_test()
sys.exit()
main()
sys.exit()
# end of snippets/main.py
| 14.963303 | 59 | 0.498467 |
71d7b6b5d8927503c0de7b2300ecece8268c9b0c | 892 | py | Python | pythonG/objects.py | ezan2000/Cssi_2018 | 2385e9f4557c1a2aa642e21d42dcc935e24c88c3 | [
"Apache-2.0"
] | null | null | null | pythonG/objects.py | ezan2000/Cssi_2018 | 2385e9f4557c1a2aa642e21d42dcc935e24c88c3 | [
"Apache-2.0"
] | null | null | null | pythonG/objects.py | ezan2000/Cssi_2018 | 2385e9f4557c1a2aa642e21d42dcc935e24c88c3 | [
"Apache-2.0"
] | null | null | null | ezan = {
'name': 'ezan',
'age': 18,
'hair': 'brown',
'cool': True ,
}
print(ezan)
ezan = Person( name = "ezan", age = 18, hair = "black", cool = True, hungry = False)
print(ezan.name)
print('I am hungry')
Austin = Person(name = 'austin', age = 18, hair = "Shrek", cool = False, hungry = True)
| 25.485714 | 94 | 0.56278 |
71d7fc5500dfa419709498ae6eaa8bc5f3fa5a27 | 400 | py | Python | 62/main.py | pauvrepetit/leetcode | 6ad093cf543addc4dfa52d72a8e3c0d05a23b771 | [
"MIT"
] | null | null | null | 62/main.py | pauvrepetit/leetcode | 6ad093cf543addc4dfa52d72a8e3c0d05a23b771 | [
"MIT"
] | null | null | null | 62/main.py | pauvrepetit/leetcode | 6ad093cf543addc4dfa52d72a8e3c0d05a23b771 | [
"MIT"
] | null | null | null | # 62.
#
yanghui = [[0 for i in range(202)] for j in range(202)]
| 23.529412 | 55 | 0.51 |