hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
db44a6bdcd485ae98f0bcb11e91bddb17022662f | 1,456 | py | Python | contacts/migrations_old/0006_data_status.py | I-TECH-UW/mwachx | e191755c3369208d678fceec68dbb4f5f51c453a | [
"Apache-2.0"
] | 3 | 2015-05-27T14:35:49.000Z | 2016-02-26T21:04:32.000Z | contacts/migrations/0006_data_status.py | tperrier/mwachx | 94616659dc29843e661b2ecc9a2e7f1d4e81b5a4 | [
"Apache-2.0"
] | 375 | 2015-01-31T10:08:34.000Z | 2021-06-10T19:44:21.000Z | contacts/migrations_old/0006_data_status.py | I-TECH-UW/mwachx | e191755c3369208d678fceec68dbb4f5f51c453a | [
"Apache-2.0"
] | 6 | 2016-01-10T19:52:41.000Z | 2020-06-15T22:07:24.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import itertools as it
from django.db import models, migrations
def convert_status(apps, schema_editor):
''' Migrate Visit.skipped and ScheduledPhoneCall.skipped -> status
(pending,missed,deleted,attended)
'''
Visit = apps.get_model("contacts","Visit")
ScheduledPhoneCall = apps.get_model("contacts","ScheduledPhoneCall")
for obj in it.chain(Visit.objects.all(), ScheduledPhoneCall.objects.all()):
if obj.skipped is None:
obj.status = 'pending'
elif obj.skipped == False:
obj.status = 'attended'
elif obj.skipped == True:
obj.status = 'missed'
obj.save()
def unconvert_status(apps, schema_editor):
''' Reverse function sets skipped based on status'''
Visit = apps.get_model("contacts","Visit")
ScheduledPhoneCall = apps.get_model("contacts","ScheduledPhoneCall")
for obj in it.chain(Visit.objects.all(), ScheduledPhoneCall.objects.all()):
if obj.status == 'pending':
obj.skipped = None
elif obj.status == 'attended':
obj.skipped = False
elif obj.status == 'missed':
obj.skipped = True
obj.save()
| 30.333333 | 79 | 0.643544 |
db4545f1a4dfa83103a39912add856795ff6a347 | 813 | py | Python | core/tests/test_base_time_range_controller.py | One-Green/plant-keeper-master | 67101a4cc7070d26fd1685631a710ae9a60fc5e8 | [
"CC0-1.0"
] | 2 | 2022-02-04T17:52:38.000Z | 2022-02-04T17:52:40.000Z | core/tests/test_base_time_range_controller.py | shanisma/plant-keeper | 3ca92ae2d55544a301e1398496a08a45cca6d15b | [
"CC0-1.0"
] | 4 | 2021-06-16T20:01:50.000Z | 2022-03-09T20:17:53.000Z | core/tests/test_base_time_range_controller.py | shanisma/plant-keeper | 3ca92ae2d55544a301e1398496a08a45cca6d15b | [
"CC0-1.0"
] | 1 | 2021-06-27T10:45:36.000Z | 2021-06-27T10:45:36.000Z | import os
import sys
from datetime import time
import unittest
sys.path.append(
os.path.dirname(
os.path.dirname(os.path.join("..", "..", "..", os.path.dirname("__file__")))
)
)
from core.controller import BaseTimeRangeController
if __name__ == "__main__":
unittest.main()
| 26.225806 | 84 | 0.688807 |
db45d8bc1a8d49e33721d418ba06b6f827c48c0b | 4,098 | py | Python | generator_code/mp3_generator.py | jurganson/spingen | f8421a26356d0cd1d94a0692846791eb45fce6f5 | [
"MIT"
] | null | null | null | generator_code/mp3_generator.py | jurganson/spingen | f8421a26356d0cd1d94a0692846791eb45fce6f5 | [
"MIT"
] | null | null | null | generator_code/mp3_generator.py | jurganson/spingen | f8421a26356d0cd1d94a0692846791eb45fce6f5 | [
"MIT"
] | null | null | null | from gtts import gTTS as ttos
from pydub import AudioSegment
import os
| 54.64 | 175 | 0.708394 |
db4793142f42cba39f558b2249770456a14a7e8a | 600 | py | Python | relaax/algorithms/ddpg/parameter_server.py | deeplearninc/relaax | a0cf280486dc74dca3857c85ec0e4c34e88d6b2b | [
"MIT"
] | 71 | 2017-01-25T00:26:20.000Z | 2021-02-17T12:39:20.000Z | relaax/algorithms/ddpg/parameter_server.py | deeplearninc/relaax | a0cf280486dc74dca3857c85ec0e4c34e88d6b2b | [
"MIT"
] | 69 | 2017-01-23T19:29:23.000Z | 2018-08-21T13:26:39.000Z | relaax/algorithms/ddpg/parameter_server.py | deeplearninc/relaax | a0cf280486dc74dca3857c85ec0e4c34e88d6b2b | [
"MIT"
] | 13 | 2017-01-23T21:18:09.000Z | 2019-01-29T23:48:30.000Z | from __future__ import absolute_import
from relaax.server.parameter_server import parameter_server_base
from relaax.server.common import session
from . import ddpg_model
| 27.272727 | 69 | 0.745 |
db49fa274fd584b7dd27d84ca85b94655d65a8a2 | 6,946 | py | Python | scripts/make_VFS.py | nvoron23/brython | b1ce5fa39b5d38c0dde138b4e75723fbb3e574ab | [
"BSD-3-Clause"
] | 1 | 2015-11-06T09:32:34.000Z | 2015-11-06T09:32:34.000Z | scripts/make_VFS.py | nvoron23/brython | b1ce5fa39b5d38c0dde138b4e75723fbb3e574ab | [
"BSD-3-Clause"
] | null | null | null | scripts/make_VFS.py | nvoron23/brython | b1ce5fa39b5d38c0dde138b4e75723fbb3e574ab | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import os
import pyminifier
try:
import io as StringIO
except ImportError:
import cStringIO as StringIO # lint:ok
# Check to see if slimit or some other minification library is installed and
# Set minify equal to slimit's minify function.
try:
import slimit
js_minify = slimit.minify
except ImportError as error:
print(error)
js_minify = slimit = None
###############################################################################
def process_unittest(filename):
"""Process a VFS filename for Brython."""
print("Generating {}".format(filename))
nb = 0
nb_err = 0
_main_root = os.path.dirname(filename)
_VFS = {}
for _mydir in ("Lib",):
for _root, _dir, _files in os.walk(os.path.join(_main_root, _mydir)):
if 'unittest' not in _root:
continue
if '__pycache__' in _root:
continue
for _file in _files:
_ext = os.path.splitext(_file)[1]
if _ext not in ('.py'):
continue
nb += 1
file_name = os.path.join(_root, _file)
try: # python 3
with open(file_name, encoding="utf-8") as file_with_data:
_data = file_with_data.read()
except Exception as reason: # python 2
with open(file_name, "r") as file_with_data:
_data = str(file_with_data.read()).decode("utf-8")
if not len(_data):
print("No data for {} ({}).".format(_file, type(_data)))
if _ext.lower() == '.py' and _data:
try:
_data = pyminifier.remove_comments_and_docstrings(
_data)
_data = pyminifier.dedent(_data)
except Exception as error:
print(error)
nb_err += 1
_vfs_filename = os.path.join(
_root, _file).replace(_main_root, '')
_vfs_filename = _vfs_filename.replace("\\", "/")
mod_name = _vfs_filename[len(_mydir) + 2:].replace('/', '.')
mod_name, ext = os.path.splitext(mod_name)
is_package = mod_name.endswith('__init__')
if is_package:
mod_name = mod_name[:-9]
_VFS[mod_name] = [_data, 1]
else:
_VFS[mod_name] = [_data]
print(("Adding %s %s" % (mod_name, _vfs_filename)))
print('%s files, %s errors' % (nb, nb_err))
with open(filename, "w") as file_to_write_VFS:
file_to_write_VFS.write('__BRYTHON__.libs = __BRYTHON__.libs || {};\n')
file_to_write_VFS.write("__BRYTHON__.=libs['unittest']=%s;\n\n" % json.dumps(_VFS))
file_to_write_VFS.write("""
__BRYTHON__.import_from_unittest function(mod_name){
var stored = __BRYTHON__.libs['unittest'][mod_name]
if(stored!==undefined){
var module_contents = stored[0]
var is_package = stored[1]
var path = 'py_unittest'
var module = {name:mod_name,__class__:$B.$ModuleDict,is_package:is_package}
if(is_package){var package=mod_name}
else{
var elts = mod_name.split('.')
elts.pop()
var package = elts.join('.')
}
$B.modules[mod_name].$package = is_package
$B.modules[mod_name].__package__ = package
run_py(module,path,module_contents)
return true
}
return null
}
// add this import function to brython by doing the following:
// <body onload="brython({custom_import_funcs:[__BRYTHON__.import_from_unittest]})">
// this will allow us to import unittest modules.
""")
def process(filename, exclude_dirs=['unittest',]):
"""Process a VFS filename for Brython."""
print("Generating {}".format(filename))
nb = 0
nb_err = 0
_main_root = os.path.dirname(filename)
_VFS = {}
for _mydir in ("libs", "Lib"):
for _root, _dir, _files in os.walk(os.path.join(_main_root, _mydir)):
#if _root.endswith('lib_migration'):
_flag=False
for _exclude in exclude_dirs:
if _exclude in _root: #_root.endswith(_exclude):
_flag=True
continue
if _flag:
continue # skip these modules
if '__pycache__' in _root:
continue
nb += 1
for _file in _files:
_ext = os.path.splitext(_file)[1]
if _ext not in ('.js', '.py'):
continue
nb += 1
with open(os.path.join(_root, _file), "r") as file_with_data:
_data = file_with_data.read()
if len(_data) == 0:
print('no data for %s' % _file)
_data = unicode('')
print(_data, type(_data))
else:
_data = _data.decode('utf-8')
if _ext in '.js':
if js_minify is not None:
try:
_data = js_minify(_data)
except Exception as error:
print(error)
elif _ext == '.py' and len(_data) > 0:
try:
_data = pyminifier.remove_comments_and_docstrings(_data)
_data = pyminifier.dedent(_data)
except Exception as error:
print(error)
nb_err += 1
_vfs_filename = os.path.join(_root, _file).replace(_main_root, '')
_vfs_filename = _vfs_filename.replace("\\", "/")
if _vfs_filename.startswith('/libs/crypto_js/rollups/'):
if _file not in ('md5.js', 'sha1.js', 'sha3.js',
'sha224.js', 'sha384.js', 'sha512.js'):
continue
mod_name = _vfs_filename[len(_mydir) + 2:].replace('/', '.')
mod_name, ext = os.path.splitext(mod_name)
is_package = mod_name.endswith('__init__')
if is_package:
mod_name = mod_name[:-9]
_VFS[mod_name] = [ext, _data, 1]
else:
_VFS[mod_name] = [ext, _data]
print(("adding %s %s" % (mod_name, _vfs_filename)))
print('%s files, %s errors' % (nb, nb_err))
with open(filename, "w") as file_to_write_VFS:
file_to_write_VFS.write('__BRYTHON__.use_VFS = true;\n')
file_to_write_VFS.write('__BRYTHON__.VFS=%s;\n\n' % json.dumps(_VFS))
###############################################################################
if __name__ == '__main__':
_main_root = os.path.join(os.getcwd(), '../src')
process(os.path.join(_main_root, "py_VFS.js"))
| 36.177083 | 91 | 0.512093 |
db4a6abf2a3e16936115e864f7caf11878e6ba2c | 9,659 | py | Python | main.py | rcox771/spectrum_scanner | 71559d62ca9dc9f66d66b7ada4491de42c6cdd52 | [
"MIT"
] | null | null | null | main.py | rcox771/spectrum_scanner | 71559d62ca9dc9f66d66b7ada4491de42c6cdd52 | [
"MIT"
] | null | null | null | main.py | rcox771/spectrum_scanner | 71559d62ca9dc9f66d66b7ada4491de42c6cdd52 | [
"MIT"
] | null | null | null | from rtlsdr import RtlSdr
from contextlib import closing
from matplotlib import pyplot as plt
import numpy as np
from scipy.signal import spectrogram, windows
from scipy import signal
from skimage.io import imsave, imread
from datetime import datetime
import json
import os
from tqdm import tqdm
import time
from queue import Queue
import asyncio
from pathlib import Path
import warnings
for cat in [RuntimeWarning, UserWarning, FutureWarning]:
warnings.filterwarnings("ignore", category=cat)
# y -- spectrogram, nf by nt array
# dbf -- Dynamic range of the spectrum
from sklearn.preprocessing import MinMaxScaler, StandardScaler
#string_to_linspace('24M:28M:3M')
if __name__ == "__main__":
#split_images()
#plot_one()
scan(repeats=3, target_hpb=1500)
split_images()
#plot_one() | 27.997101 | 104 | 0.576871 |
db4a75d192569c27cd0ea38a505083fba87c919d | 67 | py | Python | test/__init__.py | donbowman/rdflib | c1be731c8e6bbe997cc3f25890bbaf685499c517 | [
"BSD-3-Clause"
] | 1,424 | 2015-01-04T13:10:22.000Z | 2022-03-29T15:12:38.000Z | test/__init__.py | donbowman/rdflib | c1be731c8e6bbe997cc3f25890bbaf685499c517 | [
"BSD-3-Clause"
] | 1,148 | 2015-01-01T18:26:18.000Z | 2022-03-31T21:51:53.000Z | test/__init__.py | jjon/rdflib | 4c2ab7b392b353bf3c6088017ec9351ce8ac3db6 | [
"BSD-3-Clause"
] | 459 | 2015-01-03T14:41:34.000Z | 2022-03-14T22:06:47.000Z | #
import os
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
| 13.4 | 53 | 0.746269 |
db4a91969cbd0645d892f196740aa9b468c864c7 | 7,333 | py | Python | examples/mnist1.py | braingineer/pyromancy | 7a7ab1a6835fd63b9153463dd08bb53630f15c62 | [
"MIT"
] | null | null | null | examples/mnist1.py | braingineer/pyromancy | 7a7ab1a6835fd63b9153463dd08bb53630f15c62 | [
"MIT"
] | 1 | 2021-03-25T22:13:53.000Z | 2021-03-25T22:13:53.000Z | examples/mnist1.py | braingineer/pyromancy | 7a7ab1a6835fd63b9153463dd08bb53630f15c62 | [
"MIT"
] | null | null | null | from __future__ import print_function
import argparse
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
from tqdm import tqdm
from pyromancy import pyromq
from pyromancy.losses import LossGroup, NegativeLogLikelihood
from pyromancy.metrics import MetricGroup, Accuracy
from pyromancy.subscribers import LogSubscriber
# noinspection PyCallingNonCallable,PyCallingNonCallable
def run_once(args, train_loader, test_loader):
broker = pyromq.Broker()
model = Net()
if args.cuda:
model.cuda()
training_events = pyromq.TrainingEventPublisher(broker=broker)
broker.add_subscriber(LogSubscriber(experiment_uid=args.experiment_name,
log_file=os.path.join('logs', args.experiment_name),
to_console=args.log_to_console))
opt = torch.optim.SGD(params=model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
momentum=args.momentum)
losses = LossGroup(optimizer=opt,
grad_clip_norm=args.grad_clip_norm,
name='losses',
channel_name=pyromq.channels.METRIC_EVENTS,
broker=broker)
losses.add(NegativeLogLikelihood(name='nll',
target_name='y_target',
output_name='y_pred'),
data_target='train')
# Metrics
metrics = MetricGroup(name='metrics',
channel_name=pyromq.channels.METRIC_EVENTS,
broker=broker)
metrics.add(Accuracy(name='acc',
target_name='y_target',
output_name='y_pred'),
data_target='*')
metrics.add(NegativeLogLikelihood(name='nll',
target_name='y_target',
output_name='y_pred'),
data_target='val')
training_events.training_start()
for _ in tqdm(range(args.epochs), total=args.epochs):
training_events.epoch_start()
model.train(True)
for data, target in train_loader:
# From the original example
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
# put the incoming batch data into a dictionary
batch_dict = {'x_data': data, 'y_target': target}
# Training Event
training_events.batch_start()
# Get model outputs
predictions = {'y_pred': model(batch_dict['x_data'])}
# Compute Metrics
metrics.compute(in_dict=batch_dict, out_dict=predictions,
data_type='train')
# Compute Losses
losses.compute(in_dict=batch_dict, out_dict=predictions,
data_type='train')
losses.step()
# Training Event
training_events.batch_end()
model.train(False)
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
batch_dict = {'x_data': data, 'y_target': target}
# Training Event
training_events.batch_start()
predictions = {'y_pred': model(batch_dict['x_data'])}
metrics.compute(in_dict=batch_dict,
out_dict=predictions,
data_type='val')
training_events.batch_end()
training_events.epoch_end()
if __name__ == "__main__":
main()
| 33.949074 | 92 | 0.571799 |
db4b0c2266fee61af6dfa6c16082c9e18c028c39 | 4,345 | py | Python | selfdrive/locationd/calibrationd.py | matthewklinko/openpilot | b0563a59684d0901f99abbb58ac1fbd729ded1f9 | [
"MIT"
] | 3 | 2019-06-29T08:32:58.000Z | 2019-09-06T15:58:03.000Z | selfdrive/locationd/calibrationd.py | matthewklinko/openpilot | b0563a59684d0901f99abbb58ac1fbd729ded1f9 | [
"MIT"
] | 1 | 2019-09-22T06:44:10.000Z | 2019-09-22T06:44:10.000Z | selfdrive/locationd/calibrationd.py | matthewklinko/openpilot | b0563a59684d0901f99abbb58ac1fbd729ded1f9 | [
"MIT"
] | 2 | 2020-03-18T02:56:23.000Z | 2020-05-12T16:22:31.000Z | #!/usr/bin/env python
import os
import copy
import json
import numpy as np
import selfdrive.messaging as messaging
from selfdrive.locationd.calibration_helpers import Calibration
from selfdrive.swaglog import cloudlog
from common.params import Params
from common.transformations.model import model_height
from common.transformations.camera import view_frame_from_device_frame, get_view_frame_from_road_frame, \
eon_intrinsics, get_calib_from_vp, H, W
MPH_TO_MS = 0.44704
MIN_SPEED_FILTER = 15 * MPH_TO_MS
MAX_YAW_RATE_FILTER = np.radians(2) # per second
INPUTS_NEEDED = 300 # allow to update VP every so many frames
INPUTS_WANTED = 600 # We want a little bit more than we need for stability
WRITE_CYCLES = 400 # write every 400 cycles
VP_INIT = np.array([W/2., H/2.])
# These validity corners were chosen by looking at 1000
# and taking most extreme cases with some margin.
VP_VALIDITY_CORNERS = np.array([[W//2 - 150, 280], [W//2 + 150, 540]])
DEBUG = os.getenv("DEBUG") is not None
if __name__ == "__main__":
main()
| 35.325203 | 105 | 0.711853 |
db4b58f91ffeef6d5055943e105969fe3018f79e | 24,453 | py | Python | hunter/main.py | datastax-labs/hunter | 3631cc3fa529991297a8b631bbae15b138cce307 | [
"Apache-2.0"
] | 17 | 2021-09-03T07:32:40.000Z | 2022-03-24T21:56:22.000Z | hunter/main.py | datastax-labs/hunter | 3631cc3fa529991297a8b631bbae15b138cce307 | [
"Apache-2.0"
] | 1 | 2021-12-02T14:05:07.000Z | 2021-12-02T14:05:07.000Z | hunter/main.py | datastax-labs/hunter | 3631cc3fa529991297a8b631bbae15b138cce307 | [
"Apache-2.0"
] | 2 | 2022-01-18T18:40:41.000Z | 2022-03-11T15:33:25.000Z | import argparse
import copy
import logging
import sys
from dataclasses import dataclass
from datetime import datetime, timedelta
from slack_sdk import WebClient
from typing import Dict, Optional, List
import pytz
from hunter import config
from hunter.attributes import get_back_links
from hunter.config import ConfigError, Config
from hunter.data_selector import DataSelector
from hunter.grafana import GrafanaError, Grafana, Annotation
from hunter.graphite import GraphiteError
from hunter.importer import DataImportError, Importers
from hunter.report import Report
from hunter.series import (
AnalysisOptions,
ChangePointGroup,
SeriesComparison,
compare,
AnalyzedSeries,
)
from hunter.slack import SlackNotifier, NotificationError
from hunter.test_config import TestConfigError, TestConfig, GraphiteTestConfig
from hunter.util import parse_datetime, DateFormatError, interpolate
def setup_data_selector_parser(parser: argparse.ArgumentParser):
parser.add_argument(
"--branch", metavar="STRING", dest="branch", help="name of the branch", nargs="?"
)
parser.add_argument(
"--metrics",
metavar="LIST",
dest="metrics",
help="a comma-separated list of metrics to analyze",
)
parser.add_argument(
"--attrs",
metavar="LIST",
dest="attributes",
help="a comma-separated list of attribute names associated with the runs "
"(e.g. commit, branch, version); "
"if not specified, it will be automatically filled based on available information",
)
since_group = parser.add_mutually_exclusive_group()
since_group.add_argument(
"--since-commit",
metavar="STRING",
dest="since_commit",
help="the commit at the start of the time span to analyze",
)
since_group.add_argument(
"--since-version",
metavar="STRING",
dest="since_version",
help="the version at the start of the time span to analyze",
)
since_group.add_argument(
"--since",
metavar="DATE",
dest="since_time",
help="the start of the time span to analyze; "
"accepts ISO, and human-readable dates like '10 weeks ago'",
)
until_group = parser.add_mutually_exclusive_group()
until_group.add_argument(
"--until-commit",
metavar="STRING",
dest="until_commit",
help="the commit at the end of the time span to analyze",
)
until_group.add_argument(
"--until-version",
metavar="STRING",
dest="until_version",
help="the version at the end of the time span to analyze",
)
until_group.add_argument(
"--until",
metavar="DATE",
dest="until_time",
help="the end of the time span to analyze; same syntax as --since",
)
parser.add_argument(
"--last",
type=int,
metavar="COUNT",
dest="last_n_points",
help="the number of data points to take from the end of the series"
)
def data_selector_from_args(args: argparse.Namespace) -> DataSelector:
data_selector = DataSelector()
if args.branch:
data_selector.branch = args.branch
if args.metrics is not None:
data_selector.metrics = list(args.metrics.split(","))
if args.attributes is not None:
data_selector.attributes = list(args.attributes.split(","))
if args.since_commit is not None:
data_selector.since_commit = args.since_commit
if args.since_version is not None:
data_selector.since_version = args.since_version
if args.since_time is not None:
data_selector.since_time = parse_datetime(args.since_time)
if args.until_commit is not None:
data_selector.until_commit = args.until_commit
if args.until_version is not None:
data_selector.until_version = args.until_version
if args.until_time is not None:
data_selector.until_time = parse_datetime(args.until_time)
if args.last_n_points is not None:
data_selector.last_n_points = args.last_n_points
return data_selector
def setup_analysis_options_parser(parser: argparse.ArgumentParser):
parser.add_argument(
"-P, --p-value",
dest="pvalue",
type=float,
default=0.001,
help="maximum accepted P-value of a change-point; "
"P denotes the probability that the change-point has "
"been found by a random coincidence, rather than a real "
"difference between the data distributions",
)
parser.add_argument(
"-M",
"--magnitude",
dest="magnitude",
type=float,
default=0.0,
help="minimum accepted magnitude of a change-point "
"computed as abs(new_mean / old_mean - 1.0); use it "
"to filter out stupidly small changes like < 0.01",
)
parser.add_argument(
"--window",
default=50,
type=int,
dest="window",
help="the number of data points analyzed at once; "
"the window size affects the discriminative "
"power of the change point detection algorithm; "
"large windows are less susceptible to noise; "
"however, a very large window may cause dismissing short regressions "
"as noise so it is best to keep it short enough to include not more "
"than a few change points (optimally at most 1)",
)
def analysis_options_from_args(args: argparse.Namespace) -> AnalysisOptions:
conf = AnalysisOptions()
if args.pvalue is not None:
conf.max_pvalue = args.pvalue
if args.magnitude is not None:
conf.min_magnitude = args.magnitude
if args.window is not None:
conf.window_len = args.window
return conf
def main():
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
parser = argparse.ArgumentParser(description="Hunts performance regressions in Fallout results")
subparsers = parser.add_subparsers(dest="command")
list_tests_parser = subparsers.add_parser("list-tests", help="list available tests")
list_tests_parser.add_argument("group", help="name of the group of the tests", nargs="*")
list_metrics_parser = subparsers.add_parser(
"list-metrics", help="list available metrics for a test"
)
list_metrics_parser.add_argument("test", help="name of the test")
subparsers.add_parser("list-groups", help="list available groups of tests")
analyze_parser = subparsers.add_parser(
"analyze",
help="analyze performance test results",
formatter_class=argparse.RawTextHelpFormatter,
)
analyze_parser.add_argument("tests", help="name of the test or group of the tests", nargs="+")
analyze_parser.add_argument(
"--update-grafana",
help="Update Grafana dashboards with appropriate annotations of change points",
action="store_true",
)
analyze_parser.add_argument(
"--notify-slack",
help="Send notification containing a summary of change points to given Slack channels",
nargs="+",
)
analyze_parser.add_argument(
"--cph-report-since",
help="Sets a limit on the date range of the Change Point History reported to Slack. Same syntax as --since.",
metavar="DATE",
dest="cph_report_since",
)
setup_data_selector_parser(analyze_parser)
setup_analysis_options_parser(analyze_parser)
regressions_parser = subparsers.add_parser("regressions", help="find performance regressions")
regressions_parser.add_argument(
"tests", help="name of the test or group of the tests", nargs="+"
)
setup_data_selector_parser(regressions_parser)
setup_analysis_options_parser(regressions_parser)
remove_annotations_parser = subparsers.add_parser("remove-annotations")
remove_annotations_parser.add_argument(
"tests", help="name of the test or test group", nargs="*"
)
remove_annotations_parser.add_argument(
"--force", help="don't ask questions, just do it", dest="force", action="store_true"
)
validate_parser = subparsers.add_parser("validate",
help="validates the tests and metrics defined in the configuration")
try:
args = parser.parse_args()
conf = config.load_config()
hunter = Hunter(conf)
if args.command == "list-groups":
hunter.list_test_groups()
if args.command == "list-tests":
group_names = args.group if args.group else None
hunter.list_tests(group_names)
if args.command == "list-metrics":
test = hunter.get_test(args.test)
hunter.list_metrics(test)
if args.command == "analyze":
update_grafana_flag = args.update_grafana
slack_notification_channels = args.notify_slack
slack_cph_since = parse_datetime(args.cph_report_since)
data_selector = data_selector_from_args(args)
options = analysis_options_from_args(args)
tests = hunter.get_tests(*args.tests)
tests_analyzed_series = {test.name: None for test in tests}
for test in tests:
try:
analyzed_series = hunter.analyze(test, selector=data_selector, options=options)
if update_grafana_flag:
if not isinstance(test, GraphiteTestConfig):
raise GrafanaError(f"Not a Graphite test")
hunter.update_grafana_annotations(test, analyzed_series)
if slack_notification_channels:
tests_analyzed_series[test.name] = analyzed_series
except DataImportError as err:
logging.error(err.message)
except GrafanaError as err:
logging.error(
f"Failed to update grafana dashboards for {test.name}: {err.message}"
)
if slack_notification_channels:
hunter.notify_slack(
tests_analyzed_series,
selector=data_selector,
channels=slack_notification_channels,
since=slack_cph_since,
)
if args.command == "regressions":
data_selector = data_selector_from_args(args)
options = analysis_options_from_args(args)
tests = hunter.get_tests(*args.tests)
regressing_test_count = 0
errors = 0
for test in tests:
try:
regressions = hunter.regressions(
test, selector=data_selector, options=options
)
if regressions:
regressing_test_count += 1
except HunterError as err:
logging.error(err.message)
errors += 1
except DataImportError as err:
logging.error(err.message)
errors += 1
if regressing_test_count == 0:
print("No regressions found!")
elif regressing_test_count == 1:
print("Regressions in 1 test found")
else:
print(f"Regressions in {regressing_test_count} tests found")
if errors > 0:
print(f"Some tests were skipped due to import / analyze errors. Consult error log.")
if args.command == "remove-annotations":
if args.tests:
tests = hunter.get_tests(*args.tests)
for test in tests:
hunter.remove_grafana_annotations(test, args.force)
else:
hunter.remove_grafana_annotations(None, args.force)
if args.command == "validate":
hunter.validate()
if args.command is None:
parser.print_usage()
except ConfigError as err:
logging.error(err.message)
exit(1)
except TestConfigError as err:
logging.error(err.message)
exit(1)
except GraphiteError as err:
logging.error(err.message)
exit(1)
except GrafanaError as err:
logging.error(err.message)
exit(1)
except DataImportError as err:
logging.error(err.message)
exit(1)
except HunterError as err:
logging.error(err.message)
exit(1)
except DateFormatError as err:
logging.error(err.message)
exit(1)
except NotificationError as err:
logging.error(err.message)
exit(1)
if __name__ == "__main__":
main()
| 38.630332 | 117 | 0.613953 |
db4c9fb7ae81031adc5833740cfc20ab17a83afb | 3,036 | py | Python | docs/python/conf.py | jun-yoon/onnxruntime | 806e24d5c69693533ed4b6fa56b84095efa5df70 | [
"MIT"
] | 2 | 2019-01-29T03:48:42.000Z | 2019-01-29T07:51:31.000Z | docs/python/conf.py | jun-yoon/onnxruntime | 806e24d5c69693533ed4b6fa56b84095efa5df70 | [
"MIT"
] | 2 | 2019-01-09T16:03:17.000Z | 2019-02-13T13:58:28.000Z | docs/python/conf.py | jun-yoon/onnxruntime | 806e24d5c69693533ed4b6fa56b84095efa5df70 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
import os
import sys
import shutil
# Check these extensions were installed.
import sphinx_gallery.gen_gallery
# The package should be installed in a virtual environment.
import onnxruntime
# The documentation requires two extensions available at:
# https://github.com/xadupre/sphinx-docfx-yaml
# https://github.com/xadupre/sphinx-docfx-markdown
import sphinx_modern_theme
# -- Project information -----------------------------------------------------
project = 'ONNX Runtime'
copyright = '2018, Microsoft'
author = 'Microsoft'
version = onnxruntime.__version__
release = version
# -- General configuration ---------------------------------------------------
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
"sphinx.ext.autodoc",
'sphinx.ext.githubpages',
"sphinx_gallery.gen_gallery",
'sphinx.ext.autodoc',
"docfx_yaml.extension",
"docfx_markdown",
"pyquickhelper.sphinxext.sphinx_runpython_extension",
]
templates_path = ['_templates']
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
source_suffix = ['.rst', '.md']
master_doc = 'intro'
language = "en"
exclude_patterns = []
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
html_theme = "sphinx_modern_theme"
html_theme_path = [sphinx_modern_theme.get_html_theme_path()]
html_logo = "../MSFT-Onnx-Runtime-11282019-Logo.png"
html_static_path = ['_static']
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for Sphinx Gallery ----------------------------------------------
sphinx_gallery_conf = {
'examples_dirs': 'examples',
'gallery_dirs': 'auto_examples',
}
# -- markdown options -----------------------------------------------------------
md_image_dest = "media"
md_link_replace = {
'#onnxruntimesessionoptionsenable-profiling)': '#class-onnxruntimesessionoptions)',
}
# -- Setup actions -----------------------------------------------------------
| 29.192308 | 118 | 0.635705 |
db4cd73478117d82a5229f15076b8071351fd162 | 586 | py | Python | traffic_sim/__main__.py | ngngardner/toc_project | 15a111a2731b583f82e65c622d16d32af4fe3ae0 | [
"MIT"
] | null | null | null | traffic_sim/__main__.py | ngngardner/toc_project | 15a111a2731b583f82e65c622d16d32af4fe3ae0 | [
"MIT"
] | null | null | null | traffic_sim/__main__.py | ngngardner/toc_project | 15a111a2731b583f82e65c622d16d32af4fe3ae0 | [
"MIT"
] | null | null | null | """Traffic simulator code."""
import sys
from os import path
from traffic_sim.analysis import TrafficExperiment
from traffic_sim.console import console
if not __package__:
_path = path.realpath(path.abspath(__file__))
sys.path.insert(0, path.dirname(path.dirname(_path)))
def main():
"""Run code from CLI."""
console.log('traffic sim')
num_trials = 30
ex = TrafficExperiment(
experiments=100,
trials=num_trials,
rows=10,
cols=10,
epochs=10,
)
ex.run()
ex.analyze()
if __name__ == '__main__':
main()
| 18.903226 | 57 | 0.643345 |
db4d954b047874012d94933f5000302aa9b31037 | 1,500 | py | Python | TSFpy/debug/sample_fibonacci.py | ooblog/TSF1KEV | f7d4b4ff88f52ba00b46eb53ed98f8ea62ec2f6d | [
"MIT"
] | null | null | null | TSFpy/debug/sample_fibonacci.py | ooblog/TSF1KEV | f7d4b4ff88f52ba00b46eb53ed98f8ea62ec2f6d | [
"MIT"
] | null | null | null | TSFpy/debug/sample_fibonacci.py | ooblog/TSF1KEV | f7d4b4ff88f52ba00b46eb53ed98f8ea62ec2f6d | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import division,print_function,absolute_import,unicode_literals
import sys
import os
os.chdir(sys.path[0])
sys.path.append('/mnt/sda2/github/TSF1KEV/TSFpy')
from TSF_io import *
#from TSF_Forth import *
from TSF_shuffle import *
from TSF_match import *
from TSF_calc import *
from TSF_time import *
TSF_Forth_init(TSF_io_argvs(),[TSF_shuffle_Initwords,TSF_match_Initwords,TSF_calc_Initwords,TSF_time_Initwords])
TSF_Forth_setTSF("TSF_Tab-Separated-Forth:",
"\t".join(["UTF-8","#TSF_encoding","200","#TSF_calcPR","N-Fibonacci:","#TSF_this","0","#TSF_fin."]),
TSF_style="T")
TSF_Forth_setTSF("N-Fibonacci:",
"\t".join(["TSF_argvs:","#TSF_cloneargvs","TSF_argvs:","#TSF_lenthe","[0]Z[Fibcount:0]~[TSF_argvs:0]","#TSF_calcDC","Fibcount:","0","#TSF_pokethe","Fibonacci:","#TSF_this"]),
TSF_style="T")
TSF_Forth_setTSF("Fibonacci:",
"\t".join(["[Fibcount:1]Z1~[Fibcount:1]","#TSF_calcDC","((2&(([0]+3)*[0]+2)^)/((2&(2*[0]+2)^)-(2&([0]+1)^)-1)\\1)#(2&([0]+1)^)","#TSF_calcDC","1","#TSF_echoN","[Fibcount:1]+1","#TSF_calcDC","Fibcount:","1","#TSF_pokethe","Fibjump:","[Fibcount:0]-([Fibcount:1]+1)o0~1","#TSF_calcDC","#TSF_peekthe","#TSF_this"]),
TSF_style="T")
TSF_Forth_setTSF("Fibcount:",
"\t".join(["20","-1"]),
TSF_style="T")
TSF_Forth_setTSF("Fibjump:",
"\t".join(["Fibonacci:","#exit"]),
TSF_style="T")
TSF_Forth_addfin(TSF_io_argvs())
TSF_Forth_argvsleftcut(TSF_io_argvs(),1)
TSF_Forth_run()
| 39.473684 | 315 | 0.675333 |
db4dff7ffc5831999b457d95fed00095a9bee6b8 | 6,545 | py | Python | Tomboy2Evernote.py | rguptan/Tomboy2Evernote | 2bee66537d080c13856811b806613ca6aaef8833 | [
"MIT"
] | null | null | null | Tomboy2Evernote.py | rguptan/Tomboy2Evernote | 2bee66537d080c13856811b806613ca6aaef8833 | [
"MIT"
] | null | null | null | Tomboy2Evernote.py | rguptan/Tomboy2Evernote | 2bee66537d080c13856811b806613ca6aaef8833 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import re
import sys, getopt
import glob
import os
if __name__ == "__main__":
main(sys.argv[1:])
| 34.088542 | 119 | 0.60382 |
db4e4eef4ddc738259fac8554c6c1cde5bc457e8 | 1,873 | py | Python | demo.py | williamfzc/pyat | 4e9792d4bfdc119d910eb88cf8a13a0ab7848518 | [
"MIT"
] | 20 | 2018-11-01T03:49:56.000Z | 2020-07-23T12:19:20.000Z | demo.py | williamfzc/pyat | 4e9792d4bfdc119d910eb88cf8a13a0ab7848518 | [
"MIT"
] | 2 | 2018-12-28T05:40:47.000Z | 2019-05-20T02:23:29.000Z | demo.py | williamfzc/pyat | 4e9792d4bfdc119d910eb88cf8a13a0ab7848518 | [
"MIT"
] | 14 | 2018-11-01T09:01:38.000Z | 2021-06-09T07:40:45.000Z | from pyatool import PYAToolkit
# toolkit
# adb
PYAToolkit.bind_cmd(func_name='test_a', command='shell pm list package | grep google')
#
PYAToolkit.bind_func(real_func=test_b)
# log
PYAToolkit.switch_logger(True)
#
d = PYAToolkit('123456F')
assert d.is_connected()
#
# d = PYAToolkit('123456F', mode='remote')
#
result = d.test_a()
#
# package:com.google.android.webview
#
result = d.test_b()
# i am test_b, running on 123456F
# `std` `standard_func`
#
d.std.get_current_activity(toolkit=d)
#
all_functions = d.current_function()
print(all_functions)
#
# id
d.hello_world()
#
installed_package = d.show_package()
#
current_activity_name = d.get_current_activity()
# apkurlpathgithub
d.install_from(url=r'https://github.com/williamfzc/simhand2/releases/download/v0.1.2/app-debug.apk')
# d.install_from(path=r'/Users/admin/some_path/some_apk.apk')
#
target_package_name = 'com.github.williamfzc.simhand2'
is_installed = d.is_installed(package_name=target_package_name)
#
d.clean_cache(target_package_name)
if is_installed:
d.uninstall(target_package_name)
# ip
local_address = d.get_ip_address()
print(local_address)
# wifi
d.switch_wifi(False)
#
d.switch_airplane(True)
d.switch_airplane(False)
d.switch_wifi(True)
#
d.set_ime('com.sohu.inputmethod.sogouoem/.SogouIME')
# push and pull
d.push('./README.md', '/sdcard/')
d.pull('/sdcard/README.md', './haha.md')
# send keyevent
d.input_key_event(26)
d.input_key_event(26)
# swipe
d.swipe(500, 1200, 500, 200)
# click
d.click(200, 200)
| 20.811111 | 100 | 0.767218 |
db4f84187c639afbc8e53e791899d9a207e520b3 | 1,791 | py | Python | nnlab/nn/graph.py | nlab-mpg/nnlab | 56aabb53fa7b86601b35c7b8c9e890d50e19d9af | [
"MIT"
] | null | null | null | nnlab/nn/graph.py | nlab-mpg/nnlab | 56aabb53fa7b86601b35c7b8c9e890d50e19d9af | [
"MIT"
] | null | null | null | nnlab/nn/graph.py | nlab-mpg/nnlab | 56aabb53fa7b86601b35c7b8c9e890d50e19d9af | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
from six.moves import xrange, zip
import tensorflow as tf
from .tensor import Tensor
| 33.166667 | 90 | 0.641541 |
db5061768015e77516d7fdac7ebe34947ba071f8 | 18,798 | py | Python | local-rotations.py | katiekruzan/masters-thesis | c9b89a0995957b5b50442b86ae8a38388f1fb720 | [
"MIT"
] | null | null | null | local-rotations.py | katiekruzan/masters-thesis | c9b89a0995957b5b50442b86ae8a38388f1fb720 | [
"MIT"
] | null | null | null | local-rotations.py | katiekruzan/masters-thesis | c9b89a0995957b5b50442b86ae8a38388f1fb720 | [
"MIT"
] | null | null | null | """
Here we're going to code for the local rotations. We're doing an object oriented approach
Left and right are in reference to the origin
"""
__version__ = 1.0
__author__ = 'Katie Kruzan'
import string # just to get the alphabet easily iterable
import sys # This just helps us in our printing
from typing import Dict # This helps us in our documentation
# Getting the structure for the classes we're putting together
def standardCircle(num_verts: int) -> (Dict[str, Segment], Dict[str, Outer], Dict[str, Inner]):
"""
This will go through and initialize our standard starting circle
:param num_verts: the number of outer nodes we will have
:returns: tuple(segs, outs, inns)
-segs - dictionary of str: Segment objects in the circle \\
-outs - dictionary of str: Outer objects in the circle \\
-inns - dictionary of str: Inner objects in the circle
"""
# Initializing our dictionaries
segs = dict()
outs = dict()
inns = dict()
# Running through the number of vertices we will be edning up with
for i in range(num_verts):
# start with an inner node - labeling with lowercase letters
inn = Inner(string.ascii_letters[i])
# If we aren't on the first one, connect it to the previous one.
if i != 0:
inn.setLeftInner(inns[string.ascii_letters[i - 1]])
# If we've hit the end of the line, go ahead and close up the circle.
if i == num_verts - 1:
inn.setRightInner(inns[string.ascii_letters[0]])
# then make the outer
out = Outer(str(i + 1))
# Go ahead and connect the inner we just made with this outer node
out.setAdjInner(inn)
# If we aren't on the first one, go ahead and connect it to the previous segment
if i != 0:
out.setLeftSegment(segs[str(-i)])
# Now time to make the segment
seg = Segment(str(-i - 1))
# Go ahead and connect the outer node we just made with this segment
seg.setLeftOuter(out)
# If we're at the end of the circle, then we close it up. Otherwise, move on
if i == num_verts - 1:
seg.setRightOuter(outs[str(1)])
# add them to our dictionaries
segs[seg.getName()] = seg
outs[out.getName()] = out
inns[inn.getName()] = inn
# If we've made it here, then we've made the full circle and are ready to return it
return segs, outs, inns
def findTheFace(source_in: Inner) -> list:
"""
This will take an inner node and use the algorithm to walk the face that it is on.
The order of the face will be i, o, s, o, i repeat
:param source_in: Inner node object we are starting from.
:return: face: a list representing the face. This list is of inner, outer, and segment objects in the
order i, o, s, o, i, repeat.
"""
# initialize the list
face = list()
# starting the face with the source inner node.
face.append(source_in)
# initialize the ending inner node we will be using for comparison
end_in = None
# As long as we haven't looped back around, go through the following process.
while source_in != end_in:
# inner: find adjacent outer
face.append(face[-1].getAdjOuter())
# outer: go to right seg
face.append(face[-1].getRightSegment())
# segment: go to right outer
face.append(face[-1].getRightOuter())
# outer: then adj inner
face.append(face[-1].getAdjInner())
# then left inner and repeat.
# set this inner node as our node to compare to our starting node.
end_in = face[-1].getLeftInner()
face.append(end_in)
return face
def faceCannonOrder(face: list) -> list:
"""
Just list the face with the face elements in order.
We will do it with the first numerical face, and then go right before it for an order that will be consistent.
:param face: a list representing the face. This list is of inner, outer, and segment objects in the
order i, o, s, o, i, repeat.
:return: ordered face in canonical order
"""
# find the first numerical face then go right before it
# initialize face num as a relatively high number we won't encounter
facenum = 333
# initialize the int for where we will split the list
start_ind = 0
# loop through and find the face we want to find
for i in range(len(face)):
try:
if int(face[i].getName()) < facenum:
# To get here, we must have found a lower face
# keep track of where this is located in the list
start_ind = i - 1
# make our current lowest face the new lowest face to keep comparing to.
facenum = int(face[i].getName())
# if we try casting a letter to a number, python will get upset, but that also means we're looking at
# an inner node, which we don't want for this anyways.
except ValueError:
continue
# make our ordered face getting from the starting index to the end, then wrapping around and getting the rest of
# the face
ord_face = face[start_ind:] + face[:start_ind]
# go through and make sure we don't have any duplicate elements right by each other. If we do, then drop them.
for i in range(len(ord_face) - 1):
if ord_face[i].toString() == ord_face[i + 1].toString():
ord_face.pop(i)
break
# return the ordered face
return ord_face
def grabAllTheFaces(inns: Dict[str, Inner]) -> list:
"""
Function to get the list of unique faces for our circle.
:param inns: dictionary of Inner objects. We will loop through these to get the faces
:return: faces: List of distinct faces in canonical order.
"""
# initialize the list of faces
faces = list()
# a set of all the elements we have covered by the faces. Will use this for a completeness check
covered = set()
# run through every inner node we've been given
for inn in inns:
# Generate the face that inner node lies on
face = findTheFace(inns[inn])
# put the face we've gotten in canonical order
face = faceCannonOrder(face)
# Check if we've already captured it.
if face not in faces:
# If not, then add it to our list of faces
faces.append(face)
# Go ahead and add the elements in this face to our covered set
covered.update(face)
# check we've gotten all the elements
if len(covered) == (3 * len(inns)):
print('We got em!!!')
# Now return a list of all the faces we have.
return faces
def printCircleStatus(segs: Dict[str, Segment], outs: Dict[str, Outer], inns: Dict[str, Inner]):
"""
Helper function that prints the status of the circle to the console
:param segs: dictionary of str: Segment objects in the circle
:param outs: dictionary of str: Outer objects in the circle
:param inns: dictionary of str: Inner objects in the circle
:return: None
"""
# Run through the segments
print('\nSegments:')
for k in segs:
print()
print(k)
print(segs[k].toString())
# Run through the Outer nodes
print('\nOuters:')
for k in outs:
print()
print(k)
print(outs[k].toString())
# Run through the Inner nodes
print('\nInners:')
for k in inns:
print()
print(k)
print(inns[k].toString())
if __name__ == '__main__':
# This is where you change the variables.
# must be a positive integer > 2
verts = 12
# Must be a string with spaces between each element. If you want to denote multiple cycles, you must add a |
switch_txt = '2 3 4 5 | 12 7'
# we're going to make a list of all the switches and all the cycles
switches = list()
# first, we get the cycles, split by '|'
cycles = switch_txt.split('|')
for c in cycles:
# We're going to split the switch into a list split by the whitespace
s = c.strip().split()
# Then we're going to append the switches in the cycle to the new list
switches.append(s)
# Go ahead and make the standard circle given the number of vertices we want to use.
segments, outers, inners = standardCircle(verts)
# Go through and grab the faces for our standard circle
facs = grabAllTheFaces(inners)
print('\nPrinting the faces')
for f in facs:
print()
for p in f:
sys.stdout.write(p.getName() + ' ')
# Go through and do the switches for each cycle
for switch in switches:
for num in range(len(switch)):
# store the current part of the switch we're working on
cs = switch[num]
# store the next part of the switch we're working on, looping to the beginning if we're at the end
ns = switch[(num + 1) % len(switch)]
# Do the actual switch
# Getting the new inner and outer validly switched up
inners[string.ascii_letters[int(cs) - 1]].setAdjOuter(outers[ns])
outers[ns].setAdjInner(inners[string.ascii_letters[int(cs) - 1]])
# print how the final rotation sits
printCircleStatus(segments, outers, inners)
# Go through and generate and print the new faces
new_facs = grabAllTheFaces(inners)
print('\nPrinting the new faces')
for f in new_facs:
print()
for p in f:
sys.stdout.write(p.getName() + ' ')
| 36.500971 | 123 | 0.6223 |
db51ec07d8e04f942c3e7a0e0c331ea715cd23c8 | 19,075 | py | Python | PT-FROST/frost.py | EtienneDavid/FROST | 1cea124d69f07e3ac7e3ad074059d29c0849254c | [
"MIT"
] | 2 | 2020-12-21T12:46:06.000Z | 2021-03-02T08:28:15.000Z | PT-FROST/frost.py | yogsin/FROST | 1cea124d69f07e3ac7e3ad074059d29c0849254c | [
"MIT"
] | null | null | null | PT-FROST/frost.py | yogsin/FROST | 1cea124d69f07e3ac7e3ad074059d29c0849254c | [
"MIT"
] | 2 | 2020-12-20T15:04:24.000Z | 2021-11-21T12:29:02.000Z | import random
import argparse
import numpy as np
import pandas as pd
import os
import time
import string
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from model import WideResnet
from cifar import get_train_loader, get_val_loader
from label_guessor import LabelGuessor
from lr_scheduler import WarmupCosineLrScheduler
from ema import EMA
import utils
## args
parser = argparse.ArgumentParser(description=' FixMatch Training')
parser.add_argument('--wresnet-k', default=2, type=int, help='width factor of wide resnet')
parser.add_argument('--wresnet-n', default=28, type=int, help='depth of wide resnet')
parser.add_argument('--n-classes', type=int, default=10, help='number of classes in dataset')
parser.add_argument('--n-labeled', type=int, default=10, help='number of labeled samples for training')
parser.add_argument('--n-epochs', type=int, default=256, help='number of training epochs')
parser.add_argument('--batchsize', type=int, default=64, help='train batch size of labeled samples')
parser.add_argument('--mu', type=int, default=7, help='factor of train batch size of unlabeled samples')
parser.add_argument('--mu-c', type=int, default=1, help='factor of train batch size of contrastive learing samples')
parser.add_argument('--thr', type=float, default=0.95, help='pseudo label threshold')
parser.add_argument('--n-imgs-per-epoch', type=int, default=50000, help='number of training images for each epoch')
parser.add_argument('--lam-x', type=float, default=1., help='coefficient of labeled loss')
parser.add_argument('--lam-u', type=float, default=1., help='coefficient of unlabeled loss')
parser.add_argument('--lam-clr', type=float, default=1., help='coefficient of contrastive loss')
parser.add_argument('--ema-alpha', type=float, default=0.999, help='decay rate for ema module')
parser.add_argument('--lr', type=float, default=0.03, help='learning rate for training')
parser.add_argument('--weight-decay', type=float, default=5e-4, help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum for optimizer')
parser.add_argument('--seed', type=int, default=-1, help='seed for random behaviors, no seed if negtive')
parser.add_argument('--feature_dim', default=128, type=int, help='Feature dim for latent vector')
parser.add_argument('--temperature', default=0.5, type=float, help='Temperature used in softmax')
parser.add_argument('--k', default=200, type=int, help='Top k most similar images used to predict the label')
parser.add_argument('--test', default=0, type=int, help='0 is softmax test function, 1 is similarity test function')
parser.add_argument('--bootstrap', type=int, default=16, help='Bootstrapping factor (default=16)')
parser.add_argument('--boot-schedule', type=int, default=1, help='Bootstrapping schedule (default=1)')
parser.add_argument('--balance', type=int, default=0, help='Balance class methods to use (default=0 None)')
parser.add_argument('--delT', type=float, default=0.2, help='Class balance threshold delta (default=0.2)')
args = parser.parse_args()
print(args)
# save results
save_name_pre = '{}_E{}_B{}_LX{}_LU{}_LCLR{}_THR{}_LR{}_WD{}'.format(args.n_labeled, args.n_epochs, args.batchsize,
args.lam_x, args.lam_u, args.lam_clr, args.thr, args.lr, args.weight_decay)
ticks = time.time()
result_dir = 'results/' + save_name_pre + '.' + str(ticks)
if not os.path.exists(result_dir):
os.mkdir(result_dir)
if __name__ == '__main__':
train()
| 46.074879 | 131 | 0.618244 |
db52db9f4875bf2abe871f56389adc2f255c93ca | 8,456 | py | Python | Logistic Regression/main.py | Frightera/LR-and-NN-for-Cancer-Data | 54f8c9455af529c512efe012d8b3ed3f6b594a57 | [
"MIT"
] | 4 | 2021-03-10T22:18:35.000Z | 2022-03-06T15:37:23.000Z | Logistic Regression/main.py | Frightera/LR-From-Scratch | 54f8c9455af529c512efe012d8b3ed3f6b594a57 | [
"MIT"
] | null | null | null | Logistic Regression/main.py | Frightera/LR-From-Scratch | 54f8c9455af529c512efe012d8b3ed3f6b594a57 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
data = pd.read_csv("data.csv")
data.info()
"""
Data columns (total 33 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 id 569 non-null int64
.
.
.
32 Unnamed: 32 0 non-null float64
"""
data.drop(["Unnamed: 32", "id"], axis = 1, inplace = True)
# data.head(10)
data.diagnosis = [1 if each == "M" else 0 for each in data.diagnosis]
y = data.diagnosis.values
x_data = data.drop(["diagnosis"], axis = 1)
# %% Normalization
x_normalized = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data)).values
x_data.head()
"""
x_data.head()
Out[9]:
radius_mean texture_mean ... symmetry_worst fractal_dimension_worst
0 17.99 10.38 ... 0.4601 0.11890
1 20.57 17.77 ... 0.2750 0.08902
2 19.69 21.25 ... 0.3613 0.08758
3 11.42 20.38 ... 0.6638 0.17300
4 20.29 14.34 ... 0.2364 0.07678
"""
x_normalized.head()
"""
x_normalized.head()
Out[10]:
radius_mean texture_mean ... symmetry_worst fractal_dimension_worst
0 0.521037 0.022658 ... 0.598462 0.418864
1 0.643144 0.272574 ... 0.233590 0.222878
2 0.601496 0.390260 ... 0.403706 0.213433
3 0.210090 0.360839 ... 1.000000 0.773711
4 0.629893 0.156578 ... 0.157500 0.142595
"""
# %% train test split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_normalized,y,test_size = 0.25, random_state = 42)
# test size & random state can be changed, test size can be choosen as 0.2 or 0.18
# sklearn randomly splits, with given state data will be splitted with same random pattern.
# rows as features
x_train = x_train.T
x_test = x_test.T
y_train = y_train.T
y_test = y_test.T
# %% Parameter Initialize
"""
If all the weights were initialized to zero,
backpropagation will not work as expected because the gradient for the intermediate neurons
and starting neurons will die out(become zero) and will not update ever.
"""
# %%
# Updating(learning) parameters
# prediction
#implementing logistic regression
# %% Hyperparameter tuning
logistic_regression(x_train, y_train, x_test, y_test,learning_rate = 3, num_iterations = 1500)
"""
Cost after iteration 0: 0.693035
Cost after iteration 100: 0.153169
Cost after iteration 200: 0.121662
Cost after iteration 300: 0.107146
Cost after iteration 400: 0.098404
Cost after iteration 500: 0.092401
Cost after iteration 600: 0.087937
Cost after iteration 700: 0.084435
Cost after iteration 800: 0.081582
Cost after iteration 900: 0.079191
Cost after iteration 1000: 0.077143
Cost after iteration 1100: 0.075359
Cost after iteration 1200: 0.073784
Cost after iteration 1300: 0.072378
Cost after iteration 1400: 0.071111
No handles with labels found to put in legend.
test accuracy: 98.6013986013986 %
train accuracy: 98.35680751173709 %
"""
logistic_regression(x_train, y_train, x_test, y_test,learning_rate = 1, num_iterations = 1500)
"""
Cost after iteration 0: 0.693035
Cost after iteration 100: 0.226383
Cost after iteration 200: 0.176670
Cost after iteration 300: 0.153585
Cost after iteration 400: 0.139306
Cost after iteration 500: 0.129319
Cost after iteration 600: 0.121835
Cost after iteration 700: 0.115963
Cost after iteration 800: 0.111204
Cost after iteration 900: 0.107248
No handles with labels found to put in legend.
Cost after iteration 1000: 0.103893
Cost after iteration 1100: 0.101001
Cost after iteration 1200: 0.098474
Cost after iteration 1300: 0.096240
Cost after iteration 1400: 0.094247
test accuracy: 97.9020979020979 %
train accuracy: 98.12206572769954 %
"""
logistic_regression(x_train, y_train, x_test, y_test,learning_rate = 0.3, num_iterations = 1500)
"""
Cost after iteration 0: 0.693035
Cost after iteration 100: 0.357455
Cost after iteration 200: 0.274917
Cost after iteration 300: 0.235865
Cost after iteration 400: 0.212165
Cost after iteration 500: 0.195780
Cost after iteration 600: 0.183524
Cost after iteration 700: 0.173868
Cost after iteration 800: 0.165980
Cost after iteration 900: 0.159363
Cost after iteration 1000: 0.153700
Cost after iteration 1100: 0.148775
Cost after iteration 1200: 0.144439
Cost after iteration 1300: 0.140581
Cost after iteration 1400: 0.137119
No handles with labels found to put in legend.
test accuracy: 97.9020979020979 %
train accuracy: 96.94835680751174 %
"""
# %% Sklearn
from sklearn.linear_model import LogisticRegression
x_train = x_train.T
x_test = x_test.T
y_train = y_train.T
y_test = y_test.T
logreg = LogisticRegression(random_state = 42,max_iter= 1500)
print("test accuracy: {} ".format(logreg.fit(x_train, y_train).score(x_test, y_test)))
print("train accuracy: {} ".format(logreg.fit(x_train, y_train).score(x_train, y_train)))
"""
test accuracy: 0.986013986013986
train accuracy: 0.9671361502347418
"""
# %%
| 35.830508 | 113 | 0.674078 |
db5470b1f6ebd8cb49e975c2e7b8774a4d607820 | 2,446 | py | Python | fine-tune/inference_embedding.py | LinHuiqing/nonparaSeq2seqVC_code | d40a0cb9dc11c77b8af56b8510e4ab041f2f2b25 | [
"MIT"
] | 199 | 2019-12-13T03:11:21.000Z | 2022-03-29T15:44:49.000Z | fine-tune/inference_embedding.py | LinHuiqing/nonparaSeq2seqVC_code | d40a0cb9dc11c77b8af56b8510e4ab041f2f2b25 | [
"MIT"
] | 39 | 2019-12-16T20:08:45.000Z | 2022-02-10T00:36:40.000Z | fine-tune/inference_embedding.py | LinHuiqing/nonparaSeq2seqVC_code | d40a0cb9dc11c77b8af56b8510e4ab041f2f2b25 | [
"MIT"
] | 57 | 2019-12-16T23:25:25.000Z | 2022-03-28T18:04:16.000Z | import os
import numpy as np
import torch
import argparse
from hparams import create_hparams
from model import lcm
from train import load_model
from torch.utils.data import DataLoader
from reader import TextMelIDLoader, TextMelIDCollate, id2sp
from inference_utils import plot_data
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--checkpoint_path', type=str,
help='directory to save checkpoints')
parser.add_argument('--hparams', type=str,
required=False, help='comma separated name=value pairs')
args = parser.parse_args()
checkpoint_path=args.checkpoint_path
hparams = create_hparams(args.hparams)
model = load_model(hparams)
model.load_state_dict(torch.load(checkpoint_path)['state_dict'], strict=False)
_ = model.eval()
print('Generating embedding of %s ...'%hparams.speaker_A)
gen_embedding(hparams.speaker_A)
print('Generating embedding of %s ...'%hparams.speaker_B)
gen_embedding(hparams.speaker_B)
| 33.054054 | 89 | 0.688062 |
db555bcdcf43aa3bbda4391fd627c19482dc0997 | 68,250 | py | Python | dalme_app/migrations/0001_initial.py | DALME/dalme | 46f9a0011fdb75c5098b552104fc73b1062e16e9 | [
"BSD-3-Clause"
] | 6 | 2019-05-07T01:06:04.000Z | 2021-02-19T20:45:09.000Z | dalme_app/migrations/0001_initial.py | DALME/dalme | 46f9a0011fdb75c5098b552104fc73b1062e16e9 | [
"BSD-3-Clause"
] | 23 | 2018-09-14T18:01:42.000Z | 2021-12-29T17:25:18.000Z | dalme_app/migrations/0001_initial.py | DALME/dalme | 46f9a0011fdb75c5098b552104fc73b1062e16e9 | [
"BSD-3-Clause"
] | 1 | 2020-02-10T16:20:57.000Z | 2020-02-10T16:20:57.000Z | # Generated by Django 3.1.2 on 2020-11-29 13:25
import dalme_app.models._templates
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_currentuser.middleware
import uuid
import wagtail.search.index
| 72.761194 | 257 | 0.636674 |
db55b37705e1ee35cb592342b49dbe69963ce12a | 105 | py | Python | django_app/DataEntrySystem/apps.py | Hezepeng/Financial-Acquisition-And-Editing-System | 0781101e596a31d90bcfa3d67622472c04c6149f | [
"MIT"
] | null | null | null | django_app/DataEntrySystem/apps.py | Hezepeng/Financial-Acquisition-And-Editing-System | 0781101e596a31d90bcfa3d67622472c04c6149f | [
"MIT"
] | null | null | null | django_app/DataEntrySystem/apps.py | Hezepeng/Financial-Acquisition-And-Editing-System | 0781101e596a31d90bcfa3d67622472c04c6149f | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 17.5 | 39 | 0.790476 |
db569a6325c560b769cb648e074b4a8fea4a1b00 | 3,954 | py | Python | bombgame/recursive_bt_maze.py | JeFaProductions/bombgame2 | fc2ca7c6606aecd2bec013ed307aa344a0adffc7 | [
"MIT"
] | null | null | null | bombgame/recursive_bt_maze.py | JeFaProductions/bombgame2 | fc2ca7c6606aecd2bec013ed307aa344a0adffc7 | [
"MIT"
] | 2 | 2019-04-04T13:53:11.000Z | 2019-11-28T17:02:00.000Z | bombgame/recursive_bt_maze.py | JeFaProductions/bombgame2 | fc2ca7c6606aecd2bec013ed307aa344a0adffc7 | [
"MIT"
] | null | null | null | # recursive_bt_maze.py
#
# Author: Jens Gansloser
# Created On: 16 Feb 2019
import os
import random
import numpy as np
| 28.861314 | 107 | 0.508346 |
db579a2c18ea2f40634d5108f68e0bca010002d0 | 5,608 | py | Python | KV_Reader.py | Nibuja05/KVConverter | 74f810df4ac82358f405eac9c2f56dce13b69302 | [
"MIT"
] | 2 | 2020-07-06T00:24:27.000Z | 2021-09-20T20:16:36.000Z | KV_Reader.py | Nibuja05/KVConverter | 74f810df4ac82358f405eac9c2f56dce13b69302 | [
"MIT"
] | null | null | null | KV_Reader.py | Nibuja05/KVConverter | 74f810df4ac82358f405eac9c2f56dce13b69302 | [
"MIT"
] | null | null | null |
import re
import math
def read_file(path):
#path = input("Please enter the path of the KV File:")
#path = "C:\\Steam\\steamapps\\common\\dota 2 beta\\game\\dota_addons\\heataria\\scripts\\npc\\abilities\\heataria_blaze_path.txt"
try:
file = open(path, "r")
text = file.read()
except FileNotFoundError:
text = read_file()
finally:
master = KVPart("master")
master.set_master(True)
progress_text(text, master)
return master
#processes a KV textfile into a KV_Part structure
| 26.704762 | 131 | 0.684736 |
db587b6771666fcfb06093ced1689bf5fcf21ace | 3,476 | py | Python | scripts/updatetestsuiterefimages.py | PaulDoessel/appleseed | 142908e05609cd802b3ab937ff27ef2b73dd3088 | [
"MIT"
] | null | null | null | scripts/updatetestsuiterefimages.py | PaulDoessel/appleseed | 142908e05609cd802b3ab937ff27ef2b73dd3088 | [
"MIT"
] | null | null | null | scripts/updatetestsuiterefimages.py | PaulDoessel/appleseed | 142908e05609cd802b3ab937ff27ef2b73dd3088 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2014-2016 Francois Beaune, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import print_function
import argparse
import os
import shutil
#--------------------------------------------------------------------------------------------------
# Utility functions.
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
# Update reference images in a given test suite directory.
#--------------------------------------------------------------------------------------------------
def update_ref_images(parent_dir):
renders_dir = os.path.join(parent_dir, "renders")
ref_dir = os.path.join(parent_dir, "ref")
safe_mkdir(ref_dir)
for filename in os.listdir(renders_dir):
if os.path.splitext(filename)[1] == ".png":
src_path = os.path.join(renders_dir, filename)
dst_path = os.path.join(ref_dir, filename)
print(" copying {0} to {1}...".format(src_path, dst_path))
shutil.copyfile(src_path, dst_path)
#--------------------------------------------------------------------------------------------------
# Entry point.
#--------------------------------------------------------------------------------------------------
if __name__ == '__main__':
main()
| 39.954023 | 99 | 0.592923 |
db58e1a129781006da344d7eb154b8ae346ffb44 | 4,244 | py | Python | raidquaza/poll/polls.py | Breee/raidquaza | 308d643e71eddf6f6dc432c01322a02d604ac70e | [
"MIT"
] | 2 | 2019-03-12T16:44:24.000Z | 2020-04-13T21:06:20.000Z | raidquaza/poll/polls.py | Breee/raidquaza | 308d643e71eddf6f6dc432c01322a02d604ac70e | [
"MIT"
] | 5 | 2019-07-13T00:11:42.000Z | 2021-07-29T11:55:39.000Z | raidquaza/poll/polls.py | Breee/raidquaza | 308d643e71eddf6f6dc432c01322a02d604ac70e | [
"MIT"
] | null | null | null | from typing import List, Any
import time
from discord import Embed, Reaction
from utils import uniquify
# EMOJIS regional_indicator_A to regional_indicator_T
reaction_emojies = ['\U0001F1E6',
'\U0001F1E7',
'\U0001F1E8',
'\U0001F1E9',
'\U0001F1EA',
'\U0001F1EB',
'\U0001F1EC',
'\U0001F1ED',
'\U0001F1EE',
'\U0001F1EF',
'\U0001F1F0',
'\U0001F1F1',
'\U0001F1F2',
'\U0001F1F3',
'\U0001F1F4',
'\U0001F1F5',
'\U0001F1F6',
'\U0001F1F7',
'\U0001F1F8',
'\U0001F1F9']
number_emojies = {'rq_plus_one': 1, 'rq_plus_two': 2, 'rq_plus_three': 3, 'rq_plus_four': 4}
| 41.203883 | 119 | 0.583176 |
db59947574fede70d491b2341a72a67a1fae3994 | 387 | py | Python | Python/Regex and Parsing/Validating and Parsing Email Addresses.py | pavstar619/HackerRank | 697ee46b6e621ad884a064047461d7707b1413cd | [
"MIT"
] | 61 | 2017-04-27T13:45:12.000Z | 2022-01-27T11:40:15.000Z | Python/Regex and Parsing/Validating and Parsing Email Addresses.py | fahad0193/HackerRank | eb6c95e16688c02921c1df6b6ea613667a251457 | [
"MIT"
] | 1 | 2017-06-24T14:16:06.000Z | 2017-06-24T14:16:28.000Z | Python/Regex and Parsing/Validating and Parsing Email Addresses.py | fahad0193/HackerRank | eb6c95e16688c02921c1df6b6ea613667a251457 | [
"MIT"
] | 78 | 2017-07-05T11:48:20.000Z | 2022-02-08T08:04:22.000Z | import email.utils as em
import re
if __name__ == '__main__':
obj = Main()
| 24.1875 | 87 | 0.4677 |
db5e2687d797299a53905ef091a13e9ae1079979 | 2,814 | py | Python | chatbot/train.py | codingsoo/virtaul_girlfriend | 7343cb95cc8ab345b735fdb07cfac8176cc41f76 | [
"Apache-2.0"
] | 4 | 2017-02-04T04:51:23.000Z | 2017-09-07T08:30:36.000Z | chatbot/train.py | HyungKen/Fake_love | 21397e346c933cbbace59a9bd26c06789ff5c172 | [
"MIT"
] | 11 | 2017-02-03T06:23:27.000Z | 2017-02-04T02:57:35.000Z | chatbot/train.py | HyungKen/Fake_love | 21397e346c933cbbace59a9bd26c06789ff5c172 | [
"MIT"
] | 7 | 2017-02-03T04:16:48.000Z | 2020-03-20T15:23:34.000Z | # -*- coding: utf-8 -*-
import tensorflow as tf
import random
import math
import os
from config import FLAGS
from model import Seq2Seq
from dialog import Dialog
if __name__ == "__main__":
tf.app.run()
| 31.617978 | 84 | 0.638237 |
db626314c3f603e0417951997ccb255cc99fda86 | 2,900 | py | Python | evaluation/dmp_behavior.py | rock-learning/approxik | 877d50d4d045457593a2fafefd267339a11de20f | [
"BSD-3-Clause"
] | 1 | 2020-03-27T01:53:57.000Z | 2020-03-27T01:53:57.000Z | evaluation/dmp_behavior.py | rock-learning/approxik | 877d50d4d045457593a2fafefd267339a11de20f | [
"BSD-3-Clause"
] | null | null | null | evaluation/dmp_behavior.py | rock-learning/approxik | 877d50d4d045457593a2fafefd267339a11de20f | [
"BSD-3-Clause"
] | 1 | 2020-12-18T02:09:21.000Z | 2020-12-18T02:09:21.000Z | # Author: Alexander Fabisch <Alexander.Fabisch@dfki.de>
import numpy as np
from bolero.representation import BlackBoxBehavior
from bolero.representation import DMPBehavior as DMPBehaviorImpl
| 28.712871 | 79 | 0.633103 |
db63fcffdf47984065f99dc88667ff4cd4c8ed3b | 489 | py | Python | logger.py | oxsoftdev/bitstampws-logger | 5597010cad53cd55e949235fbc191f8b1aad344d | [
"MIT"
] | null | null | null | logger.py | oxsoftdev/bitstampws-logger | 5597010cad53cd55e949235fbc191f8b1aad344d | [
"MIT"
] | null | null | null | logger.py | oxsoftdev/bitstampws-logger | 5597010cad53cd55e949235fbc191f8b1aad344d | [
"MIT"
] | null | null | null | import logging.config
import tornado
from bitstampws import Client as Websocket
import lib.configs.logging
from lib.subscribers import SimpleLoggerSubscriber
logging.config.dictConfig(lib.configs.logging.d)
if __name__ == '__main__':
with Websocket() as client:
with SimpleLoggerSubscriber(client):
client.connect()
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
client.close()
| 23.285714 | 56 | 0.678937 |
db643ae984ce9c0d8dd5236851af05c04998a27b | 6,746 | py | Python | engine/tree.py | dougsc/gp | d144dd1f483150b26483077e6e5032f4f21a6d4e | [
"Apache-2.0"
] | null | null | null | engine/tree.py | dougsc/gp | d144dd1f483150b26483077e6e5032f4f21a6d4e | [
"Apache-2.0"
] | null | null | null | engine/tree.py | dougsc/gp | d144dd1f483150b26483077e6e5032f4f21a6d4e | [
"Apache-2.0"
] | null | null | null | import random
from pprint import pformat
from copy import deepcopy
from utils.logger import GP_Logger
from terminal_set import TerminalSet
| 40.154762 | 141 | 0.67225 |
db64a112c82f9adeb1221b9eb9fef389c1ea9873 | 276 | py | Python | src/pyrin/packaging/__init__.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | src/pyrin/packaging/__init__.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | src/pyrin/packaging/__init__.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
packaging package.
"""
from pyrin.packaging.base import Package
| 16.235294 | 42 | 0.655797 |
db64c7127d561a8ba836f248730b0617bfb376eb | 368 | py | Python | chap7/heapq_merge.py | marble-git/python-laoqi | 74c4bb5459113e54ce64443e5da5a9c6a3052d6a | [
"MIT"
] | null | null | null | chap7/heapq_merge.py | marble-git/python-laoqi | 74c4bb5459113e54ce64443e5da5a9c6a3052d6a | [
"MIT"
] | null | null | null | chap7/heapq_merge.py | marble-git/python-laoqi | 74c4bb5459113e54ce64443e5da5a9c6a3052d6a | [
"MIT"
] | null | null | null | #coding:utf-8
'''
filename:heapq_merge.py
chap:7
subject:4-2
conditions:heapq.merge,sorted_list:lst1,lst2
lst3=merged_list(lst1,lst2) is sorted
solution:heapq.merge
'''
import heapq
lst1 = [1,3,5,7,9]
lst2 = [2,4,6,8]
if __name__ == '__main__':
lst3 = heapq.merge(lst1,lst2)
print('lst3',lst3)
print(list(lst3))
| 14.72 | 49 | 0.616848 |
db64f80a8ca557a291741dc4fd34c7d58b0c51f0 | 7,181 | py | Python | lib/googlecloudsdk/third_party/apis/serviceuser/v1/serviceuser_v1_client.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/third_party/apis/serviceuser/v1/serviceuser_v1_client.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 11 | 2020-02-29T02:51:12.000Z | 2022-03-30T23:20:08.000Z | lib/googlecloudsdk/third_party/apis/serviceuser/v1/serviceuser_v1_client.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 1 | 2020-07-25T18:17:57.000Z | 2020-07-25T18:17:57.000Z | """Generated client library for serviceuser version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.serviceuser.v1 import serviceuser_v1_messages as messages
| 38.40107 | 179 | 0.709372 |
db654a453fae8398e895160a150ba86dbbcc20b1 | 1,966 | py | Python | bindings/python/examples/feature_example.py | lithathampan/wav2letter | 8abf8431d99da147cc4aefc289ad33626e13de6f | [
"BSD-3-Clause"
] | 1 | 2020-07-27T20:51:32.000Z | 2020-07-27T20:51:32.000Z | bindings/python/examples/feature_example.py | lithathampan/wav2letter | 8abf8431d99da147cc4aefc289ad33626e13de6f | [
"BSD-3-Clause"
] | null | null | null | bindings/python/examples/feature_example.py | lithathampan/wav2letter | 8abf8431d99da147cc4aefc289ad33626e13de6f | [
"BSD-3-Clause"
] | 1 | 2021-09-27T16:18:20.000Z | 2021-09-27T16:18:20.000Z | #!/usr/bin/env python3
# adapted from wav2letter/src/feature/test/MfccTest.cpp
import itertools as it
import os
import sys
from wav2letter.feature import FeatureParams, Mfcc
if __name__ == "__main__":
if len(sys.argv) != 2:
print(f"usage: {sys.argv[0]} feature_test_data_path", file=sys.stderr)
print(" (usually: <wav2letter_root>/src/feature/test/data)", file=sys.stderr)
sys.exit(1)
data_path = sys.argv[1]
wavinput = load_data("sa1.dat")
# golden features to compare
htkfeatures = load_data("sa1-mfcc.htk")
assert len(wavinput) > 0
assert len(htkfeatures) > 0
params = FeatureParams()
# define parameters of the featurization
params.sampling_freq = 16000
params.low_freq_filterbank = 0
params.high_freq_filterbank = 8000
params.num_filterbank_chans = 20
params.num_cepstral_coeffs = 13
params.use_energy = False
params.zero_mean_frame = False
params.use_power = False
# apply MFCC featurization
mfcc = Mfcc(params)
features = mfcc.apply(wavinput)
# check that obtained features are the same as golden one
assert len(features) == len(htkfeatures)
assert len(features) % 39 == 0
numframes = len(features) // 39
featurescopy = features.copy()
for f in range(numframes):
for i in range(1, 39):
features[f * 39 + i - 1] = features[f * 39 + i]
features[f * 39 + 12] = featurescopy[f * 39 + 0]
features[f * 39 + 25] = featurescopy[f * 39 + 13]
features[f * 39 + 38] = featurescopy[f * 39 + 26]
differences = [abs(x[0] - x[1]) for x in zip(features, htkfeatures)]
print(f"max_diff={max(differences)}")
print(f"avg_diff={sum(differences)/len(differences)}")
| 30.71875 | 86 | 0.657172 |
db65bd23cd7117025faa3493e9ff0bcdc4419ed0 | 3,227 | py | Python | app.py | shreyashack/PY_Message_Decryption | 251a82ee26c529ff63668328230c9d494f4c9cfa | [
"MIT"
] | 1 | 2020-11-18T10:01:13.000Z | 2020-11-18T10:01:13.000Z | app.py | shreyashack/PY_Message_Decryption | 251a82ee26c529ff63668328230c9d494f4c9cfa | [
"MIT"
] | null | null | null | app.py | shreyashack/PY_Message_Decryption | 251a82ee26c529ff63668328230c9d494f4c9cfa | [
"MIT"
] | null | null | null | from tkinter import *
import onetimepad
if __name__ == "__main__":
root=Tk()
Message_Decrypt(root)
root.mainloop()
| 34.329787 | 154 | 0.577007 |
db66779a2882ba639d36d1d562ab73945afc92fc | 1,317 | py | Python | examples/rrbot_p2p_low_energy.py | abcamiletto/urdf2optcontrol | 39b3f761a4685cc7d50b48793b6b2906c89b1694 | [
"MIT"
] | null | null | null | examples/rrbot_p2p_low_energy.py | abcamiletto/urdf2optcontrol | 39b3f761a4685cc7d50b48793b6b2906c89b1694 | [
"MIT"
] | null | null | null | examples/rrbot_p2p_low_energy.py | abcamiletto/urdf2optcontrol | 39b3f761a4685cc7d50b48793b6b2906c89b1694 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from urdf2optcontrol import optimizer
from matplotlib import pyplot as plt
import pathlib
# URDF options
urdf_path = pathlib.Path(__file__).parent.joinpath('urdf', 'rrbot.urdf').absolute()
root = "link1"
end = "link3"
in_cond = [0] * 4
my_constraints = [my_constraint1, my_constraint2]
my_final_constraints = [my_final_constraint1, my_final_constraint2]
time_horizon = 2.0
steps = 40
# Load the urdf and calculate the differential equations
optimizer.load_robot(urdf_path, root, end)
# Loading the problem conditions
optimizer.load_problem(
my_cost_func,
steps,
in_cond,
time_horizon=time_horizon,
constraints=my_constraints,
final_constraints=my_final_constraints,
max_iter=500
)
# Solving the non linear problem
res = optimizer.solve()
print('u = ', res['u'][0])
print('q = ', res['q'][0])
# Print the results!
fig = optimizer.plot_result(show=True)
| 21.241935 | 83 | 0.688686 |
db66ffcc00192c85b05965750638c6febdb95b51 | 15,803 | py | Python | SocketServer/apps/django-db-pool-master/dbpool/db/backends/postgresql_psycopg2/base.py | fqc/SocketSample_Mina_Socket | f5a7bb9bcd6052fe9e2a419c877073b32be4dc3d | [
"MIT"
] | 23 | 2015-01-28T13:31:24.000Z | 2020-03-11T18:11:45.000Z | SocketServer/apps/django-db-pool-master/dbpool/db/backends/postgresql_psycopg2/base.py | fqc/SocketSample_Mina_Socket | f5a7bb9bcd6052fe9e2a419c877073b32be4dc3d | [
"MIT"
] | 1 | 2015-04-30T12:01:00.000Z | 2015-04-30T13:33:38.000Z | SocketServer/apps/django-db-pool-master/dbpool/db/backends/postgresql_psycopg2/base.py | fqc/SocketSample_Mina_Socket | f5a7bb9bcd6052fe9e2a419c877073b32be4dc3d | [
"MIT"
] | 10 | 2015-05-27T12:52:19.000Z | 2021-01-13T13:35:11.000Z | """
Pooled PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
from django import get_version as get_django_version
from django.db.backends.postgresql_psycopg2.base import \
DatabaseWrapper as OriginalDatabaseWrapper
from django.db.backends.signals import connection_created
from threading import Lock
import logging
import sys
try:
import psycopg2 as Database
import psycopg2.extensions
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
logger = logging.getLogger(__name__)
'''
This holds our connection pool instances (for each alias in settings.DATABASES that
uses our PooledDatabaseWrapper.)
'''
connection_pools = {}
connection_pools_lock = Lock()
pool_config_defaults = {
'MIN_CONNS': None,
'MAX_CONNS': 1,
'TEST_ON_BORROW': False,
'TEST_ON_BORROW_QUERY': 'SELECT 1'
}
def _set_up_pool_config(self):
'''
Helper to configure pool options during DatabaseWrapper initialization.
'''
self._max_conns = self.settings_dict['OPTIONS'].get('MAX_CONNS', pool_config_defaults['MAX_CONNS'])
self._min_conns = self.settings_dict['OPTIONS'].get('MIN_CONNS', self._max_conns)
self._test_on_borrow = self.settings_dict["OPTIONS"].get('TEST_ON_BORROW',
pool_config_defaults['TEST_ON_BORROW'])
if self._test_on_borrow:
self._test_on_borrow_query = self.settings_dict["OPTIONS"].get('TEST_ON_BORROW_QUERY',
pool_config_defaults['TEST_ON_BORROW_QUERY'])
else:
self._test_on_borrow_query = None
def _create_connection_pool(self, conn_params):
'''
Helper to initialize the connection pool.
'''
connection_pools_lock.acquire()
try:
# One more read to prevent a read/write race condition (We do this
# here to avoid the overhead of locking each time we get a connection.)
if (self.alias not in connection_pools or
connection_pools[self.alias]['settings'] != self.settings_dict):
logger.info("Creating connection pool for db alias %s" % self.alias)
logger.info(" using MIN_CONNS = %s, MAX_CONNS = %s, TEST_ON_BORROW = %s" % (self._min_conns,
self._max_conns,
self._test_on_borrow))
from psycopg2 import pool
connection_pools[self.alias] = {
'pool': pool.ThreadedConnectionPool(self._min_conns, self._max_conns, **conn_params),
'settings': dict(self.settings_dict),
}
finally:
connection_pools_lock.release()
'''
Simple Postgres pooled connection that uses psycopg2's built-in ThreadedConnectionPool
implementation. In Django, use this by specifying MAX_CONNS and (optionally) MIN_CONNS
in the OPTIONS dictionary for the given db entry in settings.DATABASES.
MAX_CONNS should be equal to the maximum number of threads your app server is configured
for. For example, if you are running Gunicorn or Apache/mod_wsgi (in a multiple *process*
configuration) MAX_CONNS should be set to 1, since you'll have a dedicated python
interpreter per process/worker. If you're running Apache/mod_wsgi in a multiple *thread*
configuration set MAX_CONNS to the number of threads you have configured for each process.
By default MIN_CONNS will be set to MAX_CONNS, which prevents connections from being closed.
If your load is spikey and you want to recycle connections, set MIN_CONNS to something lower
than MAX_CONNS. I suggest it should be no lower than your 95th percentile concurrency for
your app server.
If you wish to validate connections on each check out, specify TEST_ON_BORROW (set to True)
in the OPTIONS dictionary for the given db entry. You can also provide an optional
TEST_ON_BORROW_QUERY, which is "SELECT 1" by default.
'''
'''
Choose a version of the DatabaseWrapper class to use based on the Django
version. This is a bit hacky, what's a more elegant way?
'''
django_version = get_django_version()
if django_version.startswith('1.3'):
from django.db.backends.postgresql_psycopg2.base import CursorWrapper
elif django_version.startswith('1.4') or django_version.startswith('1.5'):
from django.conf import settings
from django.db.backends.postgresql_psycopg2.base import utc_tzinfo_factory, \
CursorWrapper
# The force_str call around the password seems to be the only change from
# 1.4 to 1.5, so we'll use the same DatabaseWrapper class and make
# force_str a no-op.
try:
from django.utils.encoding import force_str
except ImportError:
force_str = lambda x: x
elif django_version.startswith('1.6'):
else:
raise ImportError("Unsupported Django version %s" % django_version)
| 44.767705 | 124 | 0.627729 |
db682583f2b418b3755329c159971a743aab45f6 | 589 | py | Python | backend/tests/test_api/test_api_auth.py | abodacs/fastapi-ml-skeleton | fa9a013d06e70cbaff9b9469db32246e41ce7e0f | [
"Apache-2.0"
] | null | null | null | backend/tests/test_api/test_api_auth.py | abodacs/fastapi-ml-skeleton | fa9a013d06e70cbaff9b9469db32246e41ce7e0f | [
"Apache-2.0"
] | 3 | 2020-03-16T22:07:31.000Z | 2021-06-25T15:33:38.000Z | backend/tests/test_api/test_api_auth.py | abodacs/fastapi-ml-skeleton | fa9a013d06e70cbaff9b9469db32246e41ce7e0f | [
"Apache-2.0"
] | null | null | null | # Skeleton
from fastapi_skeleton.core import messages
| 34.647059 | 86 | 0.728353 |
db68dcb7ad2aa62124559726780ed4b83d08a974 | 2,510 | py | Python | docker/cleanup_generators.py | hashnfv/hashnfv-nfvbench | 8da439b932537748d379c7bd3bdf560ef739b203 | [
"Apache-2.0"
] | null | null | null | docker/cleanup_generators.py | hashnfv/hashnfv-nfvbench | 8da439b932537748d379c7bd3bdf560ef739b203 | [
"Apache-2.0"
] | null | null | null | docker/cleanup_generators.py | hashnfv/hashnfv-nfvbench | 8da439b932537748d379c7bd3bdf560ef739b203 | [
"Apache-2.0"
] | 1 | 2019-07-14T14:54:15.000Z | 2019-07-14T14:54:15.000Z | # Copyright 2016 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
TREX_OPT = '/opt/trex'
TREX_UNUSED = [
'_t-rex-64-debug', '_t-rex-64-debug-o', 'bp-sim-64', 'bp-sim-64-debug',
't-rex-64-debug', 't-rex-64-debug-o', 'automation/__init__.py',
'automation/graph_template.html',
'automation/config', 'automation/h_avc.py', 'automation/phantom',
'automation/readme.txt', 'automation/regression', 'automation/report_template.html',
'automation/sshpass.exp', 'automation/trex_perf.py', 'wkhtmltopdf-amd64'
]
def remove_unused_libs(path, files):
"""
Remove files not used by traffic generator.
"""
for f in files:
f = os.path.join(path, f)
try:
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f)
except OSError:
print "Skipped file:"
print f
continue
def get_dir_size(start_path='.'):
"""
Computes size of directory.
:return: size of directory with subdirectiories
"""
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
try:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
except OSError:
continue
return total_size
if __name__ == "__main__":
versions = os.listdir(TREX_OPT)
for version in versions:
trex_path = os.path.join(TREX_OPT, version)
print 'Cleaning TRex', version
try:
size_before = get_dir_size(trex_path)
remove_unused_libs(trex_path, TREX_UNUSED)
size_after = get_dir_size(trex_path)
print '==== Saved Space ===='
print size_before - size_after
except OSError:
import traceback
print traceback.print_exc()
print 'Cleanup was not finished.'
| 31.772152 | 88 | 0.622709 |
db693358ac60e6cb090422f46492eb2fca4b02bf | 2,434 | py | Python | object_detection/box_coders/mean_stddev_box_coder.py | ophirSarusi/TF_Object_Detection | e08ccd18c6f14586e048048a445cf5a10dbc7c4d | [
"MIT"
] | 59 | 2018-09-23T09:34:24.000Z | 2020-03-10T04:31:27.000Z | object_detection/box_coders/mean_stddev_box_coder.py | ophirSarusi/TF_Object_Detection | e08ccd18c6f14586e048048a445cf5a10dbc7c4d | [
"MIT"
] | 46 | 2018-07-10T23:53:15.000Z | 2022-02-06T03:31:47.000Z | object_detection/box_coders/mean_stddev_box_coder.py | ophirSarusi/TF_Object_Detection | e08ccd18c6f14586e048048a445cf5a10dbc7c4d | [
"MIT"
] | 58 | 2018-09-23T10:31:47.000Z | 2021-11-08T11:34:40.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mean stddev box coder.
This box coder use the following coding schema to encode boxes:
rel_code = (box_corner - anchor_corner_mean) / anchor_corner_stddev.
"""
from object_detection.core import box_coder
from object_detection.core import box_list
| 34.28169 | 81 | 0.671323 |
db6a7657f91ac9f80bc299ba273000b77ee1c28c | 490 | py | Python | storage/aug_buffer.py | nsortur/equi_rl | 83bd2ee9dfaab715e51b71ffff90ab990aaed5f8 | [
"MIT"
] | 9 | 2022-02-20T18:18:51.000Z | 2022-03-24T03:04:44.000Z | storage/aug_buffer.py | nsortur/equi_rl | 83bd2ee9dfaab715e51b71ffff90ab990aaed5f8 | [
"MIT"
] | null | null | null | storage/aug_buffer.py | nsortur/equi_rl | 83bd2ee9dfaab715e51b71ffff90ab990aaed5f8 | [
"MIT"
] | 2 | 2022-02-19T05:17:06.000Z | 2022-02-21T20:53:26.000Z | from storage.buffer import QLearningBuffer
from utils.torch_utils import ExpertTransition, augmentTransition
from utils.parameters import buffer_aug_type
| 25.789474 | 71 | 0.72449 |
db6aa256e7b60e45c5a9fbde4a14ff7a63101137 | 3,544 | py | Python | hlrl/torch/agents/wrappers/agent.py | Chainso/HLRL | 584f4ed2fa4d8b311a21dbd862ec9434833dd7cd | [
"MIT"
] | null | null | null | hlrl/torch/agents/wrappers/agent.py | Chainso/HLRL | 584f4ed2fa4d8b311a21dbd862ec9434833dd7cd | [
"MIT"
] | null | null | null | hlrl/torch/agents/wrappers/agent.py | Chainso/HLRL | 584f4ed2fa4d8b311a21dbd862ec9434833dd7cd | [
"MIT"
] | null | null | null | import torch
from typing import Any, Dict, List, OrderedDict, Tuple
from hlrl.core.agents import RLAgent
from hlrl.core.common.wrappers import MethodWrapper
| 27.905512 | 79 | 0.577596 |
db6b5bcc7b8379dc6e51f6670d5ff0c0d562417c | 649 | py | Python | PixivConstant.py | NHOrus/PixivUtil2 | facd6b1a21e4adf5edf1de4d4809e94e834246b6 | [
"BSD-2-Clause"
] | null | null | null | PixivConstant.py | NHOrus/PixivUtil2 | facd6b1a21e4adf5edf1de4d4809e94e834246b6 | [
"BSD-2-Clause"
] | null | null | null | PixivConstant.py | NHOrus/PixivUtil2 | facd6b1a21e4adf5edf1de4d4809e94e834246b6 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
PIXIVUTIL_VERSION = '20191220-beta1'
PIXIVUTIL_LINK = 'https://github.com/Nandaka/PixivUtil2/releases'
PIXIVUTIL_DONATE = 'https://bit.ly/PixivUtilDonation'
# Log Settings
PIXIVUTIL_LOG_FILE = 'pixivutil.log'
PIXIVUTIL_LOG_SIZE = 10485760
PIXIVUTIL_LOG_COUNT = 10
PIXIVUTIL_LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
# Download Results
PIXIVUTIL_NOT_OK = -1
PIXIVUTIL_OK = 0
PIXIVUTIL_SKIP_OLDER = 1
PIXIVUTIL_SKIP_BLACKLIST = 2
PIXIVUTIL_KEYBOARD_INTERRUPT = 3
PIXIVUTIL_SKIP_DUPLICATE = 4
PIXIVUTIL_SKIP_LOCAL_LARGER = 5
PIXIVUTIL_CHECK_DOWNLOAD = 6
PIXIVUTIL_ABORTED = 9999
BUFFER_SIZE = 8192
| 25.96 | 77 | 0.784284 |
db6b74f1fcb56888f5ba09963ca5bb5ed146122f | 8,906 | py | Python | dynamic_schemas/views.py | Threemusketeerz/DSystems | cd03ad2fa6b55872d57bfd01a4ac781aa5cbed8c | [
"BSD-2-Clause"
] | 1 | 2018-01-23T12:23:48.000Z | 2018-01-23T12:23:48.000Z | dynamic_schemas/views.py | Threemusketeerz/DSystems | cd03ad2fa6b55872d57bfd01a4ac781aa5cbed8c | [
"BSD-2-Clause"
] | 1 | 2018-01-19T08:43:59.000Z | 2018-01-23T12:20:43.000Z | dynamic_schemas/views.py | Threemusketeerz/DSystems | cd03ad2fa6b55872d57bfd01a4ac781aa5cbed8c | [
"BSD-2-Clause"
] | null | null | null | from django.http import Http404
from django.shortcuts import render, redirect, reverse
from django.views.generic import ListView
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.renderers import TemplateHTMLRenderer
from .models import Schema, SchemaColumn, SchemaResponse, SchemaUrl
from .forms import SchemaResponseForm, ResponseUpdateForm
from .serializers import SchemaResponseSerializer
from .prepare_data import getcolumns
import pytz
""" API Views """
| 32.50365 | 79 | 0.613631 |
db6d31174807080316cb8c996b05fcc9ce69a5b7 | 40 | py | Python | my_classes/.history/ModulesPackages_PackageNamespaces/example3a/main_20210725220637.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/.history/ModulesPackages_PackageNamespaces/example3a/main_20210725220637.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/.history/ModulesPackages_PackageNamespaces/example3a/main_20210725220637.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null |
import os.path
import types
import sys
| 8 | 14 | 0.8 |
db6ec26c39a9f24fdd4d35e11407f85831432a46 | 24,215 | py | Python | api/views.py | conscience99/lyriko | 0ecc9e4d5ec8e3d746fcb286209a1e7993548a66 | [
"MIT"
] | null | null | null | api/views.py | conscience99/lyriko | 0ecc9e4d5ec8e3d746fcb286209a1e7993548a66 | [
"MIT"
] | null | null | null | api/views.py | conscience99/lyriko | 0ecc9e4d5ec8e3d746fcb286209a1e7993548a66 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from rest_framework import response
from rest_framework.serializers import Serializer
from . import serializers
from rest_framework.response import Response
from rest_framework.views import APIView
from django.views import View
from rest_framework import status
from . models import SaveList, User, Lyrics, SearchHistory, VerificationCode, SubmitLyrics
from rest_framework.permissions import BasePermission, IsAuthenticated, SAFE_METHODS, IsAdminUser
from rest_framework.authtoken.models import Token
from django.contrib.auth.hashers import make_password, check_password
from django.contrib.auth import login, authenticate
import requests
from django.db.models import Q
from bs4 import BeautifulSoup
import json
from datetime import datetime
import random
from django.core.mail import EmailMessage, EmailMultiAlternatives
from django.conf import settings
from django.template.loader import get_template
from django.urls import reverse
import jwt
from django.utils.encoding import force_bytes, force_text, DjangoUnicodeDecodeError
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from django.contrib.sites.shortcuts import get_current_site
from .utils import Util
from rest_framework_simplejwt.tokens import RefreshToken
from django.template import Context
from django.http import HttpResponse, HttpResponseNotFound
import os
import re
import urllib
from datetime import datetime
import random
import time
now = datetime.now()
import json
''' class EditLyricsView(APIView):
def post(self, request, pk, *args, **kwargs ):
data=request.data
lyrics=Lyrics.objects.get(pk=pk)
lyrics.title=request.POST['title']
lyrics.artist=request.POST['artist']
lyrics.body=request.POST['body']
Lyrics.objects.get(pk=pk)
lyrics.save()
lyrics_item=Lyrics.objects.get(pk=pk)
serializer=serializers.LyricsSerializer(lyrics_item,many=False)
response={'lyrics':serializer.data}
return Response(response,status=status.HTTP_200_OK ) '''
""" class SignupView(APIView):
def post(self, request, *args, **kwargs):
user=User()
serializer=serializers.UserSerializer(data=request.data)
print(request.data)
if serializer.is_valid():
password=make_password(request.data['password'])
username=request.data['username']
user.username=username
user.first_name=request.data['first_name']
user.last_name=request.data['last_name']
user.email=request.data['email']
user.email_username=request.data['email']
user.password=password
user.save()
new_user=User.objects.get(username=username)
print(new_user)
token=Token.objects.create(user=new_user)
response={'token':token.key, 'user':serializer.data}
return Response(response, status=status.HTTP_200_OK)
else:
return Response(serializer.errors) """
""" data = requests.get(f"https://api.lyrics.ovh/v1/{artistSlug}/{titleSlug}/")
lyric = data.json()
if data.status_code == 200:
lyrics.title=title
lyrics.artist=artist
lyrics.title_slug=titleSlug
lyrics.artist_slug=artistSlug
lyrics.body=lyric['lyrics']
lyrics.save()
lyrics_item=Lyrics.objects.get(title_slug=title_slug, artist_slug=artist_slug)
searchHistory.lyrics_id = lyrics_item.id
searchHistory.searcher_username = request.user.username
searchHistory.moment=now.strftime('%Y-%m-%d %H:%M:%S')
searchHistory.save()
serializer=serializers.LyricsSerializer(lyrics_item, many=False)
response={'lyrics':serializer.data}
return Response(response,status=status.HTTP_200_OK ) """
| 38.436508 | 356 | 0.617097 |
db7042284fa2b7f2b0d11816372b28c2a0aa4dd3 | 1,755 | py | Python | __dm__.py | AbhilashDatta/InstagramBot | 21916fcfc621ae3185df8494b12aa35743c165f8 | [
"MIT"
] | 12 | 2021-07-17T09:19:07.000Z | 2022-01-18T18:49:43.000Z | __dm__.py | kumarankm/InstagramBot | db08f0ae12f22b76d31f844a9ff7f037622e534f | [
"MIT"
] | 1 | 2021-08-12T22:04:07.000Z | 2021-08-13T14:14:10.000Z | __dm__.py | kumarankm/InstagramBot | db08f0ae12f22b76d31f844a9ff7f037622e534f | [
"MIT"
] | 8 | 2021-07-17T09:19:19.000Z | 2021-09-13T19:15:04.000Z | from selenium import webdriver
from time import sleep
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
def Dm(driver,user,message):
''' This function is used to direct message a single user/group '''
driver.get('https://www.instagram.com/direct/inbox/')
send_message_button = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div/div[3]/div/button'))).click()
search_user = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[5]/div/div/div[2]/div[1]/div/div[2]/input')))
search_user.send_keys(user)
selector = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[5]/div/div/div[2]/div[2]/div/div/div[3]/button/span'))).click()
next_button = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[5]/div/div/div[1]/div/div[2]/div/button/div'))).click()
try:
text = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[2]/textarea')))
text.send_keys(message)
send = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[3]/button'))).click()
driver.get('https://www.instagram.com/direct/inbox/')
except:
print('No message sent to '+user)
driver.get('https://www.instagram.com/direct/inbox/') | 56.612903 | 193 | 0.699145 |
db7052a530fb46c3cf9935b4a0d738b78df5d9c6 | 11,060 | py | Python | mashov.py | Yotamefr/BeitBiram | 84bd6abddf6ac865b502e0692561ee48d510ef7c | [
"MIT"
] | 1 | 2020-12-31T07:32:28.000Z | 2020-12-31T07:32:28.000Z | mashov.py | Yotamefr/BeitBiram | 84bd6abddf6ac865b502e0692561ee48d510ef7c | [
"MIT"
] | null | null | null | mashov.py | Yotamefr/BeitBiram | 84bd6abddf6ac865b502e0692561ee48d510ef7c | [
"MIT"
] | null | null | null | import requests
from datetime import datetime
import json
from extras import Day, Lesson
def send(self, url, method="get", params={}, files={}):
"""
Parameters
------------
url -> Represents the url to go to
method -> Represents the method to use. Can be either `get` or `post`
params -> Represents the parameters to send to the website. Only use it on `post`
files -> Pretty much the same as for the params
------------
"""
return getattr(self.session, str(method).strip().lower())(self.url.format(url), data=json.dumps(params),
files=files)
def __str__(self):
return json.dumps({
"MashovAPI": {
"url": self.url,
"sessionH": dict(self.session.headers),
"sessionC": self.session.cookies.get_dict(),
"username": self.username,
"password": self.password,
"schoolData": self.school_data,
"schoolID": self.school_ID,
"currentYear": self.current_year,
"loginData": self.login_data,
"isLoggedIn": self.is_logged_in,
"authID": self.auth_ID,
"userID": self.user_ID,
"uid": self.uid,
"uID": self.uID,
"guid": self.guid,
"guID": self.guID,
"schoolSite": self.school_site,
"moodleSite": self.moodle_site,
"schoolName": self.school_name,
"lastName": self.last_name,
"firstName": self.first_name,
"className": self.class_name,
"lastPass": self.last_pass,
"lastLogin": self.last_login,
"schoolYears": self.school_years,
"csrfToken": self.csrf_token,
"userChildren": self.user_children
}})
def get_day(self, day_num: int):
"""
Parameters
------------
day -> Represents the day number
------------
"""
day = []
timetable = []
for i in self.timetable:
if i["timeTable"]["day"] == day_num:
timetable.append(i)
for i in range(len(timetable)):
for j in range(i+1, len(timetable), 1):
if timetable[i]["timeTable"]["lesson"] > timetable[j]["timeTable"]["lesson"]:
temp = timetable[i]
timetable[i] = timetable[j]
timetable[j] = temp
for i in timetable:
if not "'" in i["groupDetails"]["subjectName"]: # We don't need that. It's useless.
if len(day) > 0:
while i["timeTable"]["lesson"] > day[-1].number + 1:
day.append(Lesson(
lesson="",
lesson_number=day[-1].number + 1,
lesson_time="",
classroom="",
teacher="",
)
)
i["groupDetails"]["groupTeachers"][0]["teacherName"] = i["groupDetails"]["groupTeachers"][0]["teacherName"].replace("-", " ")
day.append(Lesson(
lesson=i["groupDetails"]["subjectName"],
lesson_number=i["timeTable"]["lesson"],
lesson_time="",
classroom=i["timeTable"]["roomNum"],
teacher=i["groupDetails"]["groupTeachers"][0]["teacherName"]
)
)
return Day(day_num, day)
def get_today(self):
"""
Parameters
------------
------------
"""
today = datetime.now().weekday()
today += 2
if today > 7:
today -= 7
return self.get_day(today)
| 34.88959 | 168 | 0.499458 |
db705bf281d4e51af41d8edd5763fe3fe1cf7124 | 3,936 | py | Python | lab6.py | jschmidtnj/CS115 | fa2374f1ae9c9b63e572850a97af6086112d7a36 | [
"MIT"
] | null | null | null | lab6.py | jschmidtnj/CS115 | fa2374f1ae9c9b63e572850a97af6086112d7a36 | [
"MIT"
] | null | null | null | lab6.py | jschmidtnj/CS115 | fa2374f1ae9c9b63e572850a97af6086112d7a36 | [
"MIT"
] | 1 | 2022-01-03T01:44:39.000Z | 2022-01-03T01:44:39.000Z | '''
Created on 10/11/2017
@author: jschmid3@stevens.edu
Pledge: I pledge my honor that I have abided by the Stevens Honor System -Joshua Schmidt
CS115 - Lab 6
'''
def isOdd(n):
'''Returns whether or not the integer argument is odd.'''
#question 1: base_2 of 42: 101010
if n == 0:
return False
if n % 2 != 0:
return True
return False
#question 2: if given an odd base-10 number, the least-significant bit of its base-2 representation will be a 1.
#question 3: if given an even base-10 number, the least-significant bit of its base-2 representation will be a 0.
#This is because 2^0 = 1, and that is the only way to make an odd number, by having a 1 in the least significant bit.
#question 4: By eliminating the least significant bit, the original number decreases by a factor of 2, if the bit is a 0.
#if the least significant bit is a 1, the original number is decreased by a factor of 2, - 1.
#question 5: If N is odd, the base-2 of N is Y + "1". If N is even, the base-2 of N is Y + "0".
#This is because to get from N base-10 to N base-2 you do successive division by 2, keeping the remainder, so given
#the base-2 of all of the division except for the first, one must put that remainder in front, hence the answer given.
def numToBinary(n):
'''Precondition: integer argument is non-negative.
Returns the string with the binary representation of non-negative integer n.
If n is 0, the empty string is returned.'''
if n == 0:
return ""
elif isOdd(n):
return numToBinary(n // 2) + "1"
else: return numToBinary(n // 2) + "0"
#print(numToBinary(15))
def binaryToNum(s):
'''Precondition: s is a string of 0s and 1s.
Returns the integer corresponding to the binary representation in s.
Note: the empty string represents 0.'''
if s == "":
return 0
return int(s[0])*(2**(len(s)-1)) + binaryToNum(s[1:])
#print(binaryToNum("1111"))
def addBin(s, numAdd, carry = 0):
"""adds 2 binary numbers"""
if s == "" or numAdd == "":
if carry == 0:
return s + numAdd
place = carry
carry = 0
if s != "" and s[-1] == "1":
carry = place
place = 1 - place
if numAdd != "" and numAdd[-1] == "1":
carry += place
place = 1 - place
return addBin(s[:-1], numAdd[:-1], carry) + str(place)
#print(addBin("100", "001", 0))
def makeEightBit(a):
"""makes a binary number 8 bit"""
if len(a) == 8:
print(str(a))
return str(a)
elif len(a) > 8:
#print(a[(len(a)-8):])
makeEightBit(a[(len(a)-8):])
else:
makeEightBit("0" + a)
return ""
def increment(s):
'''Precondition: s is a string of 8 bits.
Returns the binary representation of binaryToNum(s) + 1.'''
#numAdd = "00000001"
dec = binaryToNum(s)
dec += 1
answer = numToBinary(dec)
#print(answer)
if len(answer) > 8:
return answer[(len(answer)-8):]
answer = (8-len(answer))*"0" + answer
return answer
#print(increment("1110100000"))
def count(s, n):
'''Precondition: s is an 8-bit string and n >= 0.
Prints s and its n successors.'''
if n == 0:
print(s)
return ""
print(s)
return count(increment(s), n-1)
#print(count("11111110", 5))
#print("a")
def numToTernary(n):
'''Precondition: integer argument is non-negative.
Returns the string with the ternary representation of non-negative integer
n. If n is 0, the empty string is returned.'''
if n == 0:
return ""
return numToTernary(n // 3) + str(n % 3)
#print(numToTernary(42))
def ternaryToNum(s):
'''Precondition: s is a string of 0s, 1s, and 2s.
Returns the integer corresponding to the ternary representation in s.
Note: the empty string represents 0.'''
if s == "":
return 0
return int(s[0])*(3**(len(s)-1)) + ternaryToNum(s[1:])
#print(ternaryToNum('12211010'))
| 33.641026 | 121 | 0.621697 |
db713485817468ad0752428e7966eefdca79459b | 4,233 | py | Python | clover.py | imyz/25000 | 909b6ceaf326138b0684e6600f347a38fe68f9f0 | [
"MIT"
] | 8 | 2015-08-10T03:43:06.000Z | 2022-01-18T21:23:31.000Z | clover.py | jcrocholl/25000 | 0607a9c2f5f16f0776d88e56460c6479921616cb | [
"MIT"
] | null | null | null | clover.py | jcrocholl/25000 | 0607a9c2f5f16f0776d88e56460c6479921616cb | [
"MIT"
] | 6 | 2015-06-28T20:02:01.000Z | 2018-01-06T17:37:38.000Z | #!/usr/bin/env python
from math import *
import sys
frame_width = 200
frame_height = 75
drill = 1.6 # 1/16 inch radius.
extrusion = 15
motor_screw_grid = 31
motor_cutout_diameter = 22
motor_width = 42.2
motor_offset = 35 # Motor face to extrusion.
motor_side, motor_bend = rotate(0, motor_offset + extrusion, 30)
motor_side += extrusion/2
motor_side += extrusion/cos(pi/6)
mc = motor_cutout_diameter/2 + drill
#nema23 = 47.14 # Mounting screws center-to-center
clover = 6
thickness = 0.0478 * 25.4 # 18 gauge steel.
enable_perimeter = False
print >> sys.stderr, 'thickness', thickness
print >> sys.stderr, 'motor_bend', motor_bend
print >> sys.stderr, 'motor_side', motor_side
print >> sys.stderr, 'mc', mc
print >> sys.stderr, 'extrusion-to-extrusion', frame_width
print >> sys.stderr, 'edge-to-edge', frame_width + 2*extrusion
xa = motor_side - drill # Outside wings start
xb = motor_side + motor_bend + drill
xs1 = xa + extrusion/2 # Extrusion screws
xs2 = xb - extrusion/2
# xe = frame_width/2 # Extrusion corner
xt = motor_width/2
xms = motor_screw_grid/sqrt(2)
xgs = 19
ya = frame_height/2 + drill # Top without flange
yb = frame_height/2 + drill - extrusion
ys = frame_height/2 - extrusion/2 # Extrusion screws
yt = motor_width/2
yt2 = yt + 4
yms = xms
ygs = xgs
s2 = sqrt(2)
print 'G17 ; Select XY plane for arcs'
print 'G90 ; Absolute coordinates'
move('G92', x=0, y=0, z=0)
linear(x=0, y=0, z=0)
print '; Gasket screw holes'
for x in (-xgs, xgs):
for y in (-x, x):
jump(x=x, y=y)
# clockwise(i=1)
if enable_perimeter:
print '; Horizontal extrusion screw holes'
for x in (xs1, xs2):
jump(x=x, y=ys)
for x in (xs2, xs1, -xs1, -xs2):
jump(x=x, y=-ys)
for x in (-xs2, -xs1):
jump(x=x, y=ys)
#print '; 22mm dia cutout for reference'
#jump(x=0, y=11)
#clockwise(j=-11)
#print '; NEMA17 square for reference'
#jump(x=0, y=yt*s2)
#linear(x=xt*s2, y=0)
#linear(x=0, y=-yt*s2)
#linear(x=-xt*s2, y=0)
#linear(x=0, y=yt*s2)
for z in (-1, -2.5):
clovercut(z)
if enable_perimeter:
for z in (-1, -2.5):
perimeter(z)
print '; All done'
up()
| 25.196429 | 64 | 0.632412 |
db71abd1961c2779351e3978214beab6ac4916f7 | 915 | py | Python | scripts/mnist_inference.py | asiakaczmar/noise2self | 75daaf188c49bff0da22c235540da20f4eca9614 | [
"MIT"
] | null | null | null | scripts/mnist_inference.py | asiakaczmar/noise2self | 75daaf188c49bff0da22c235540da20f4eca9614 | [
"MIT"
] | null | null | null | scripts/mnist_inference.py | asiakaczmar/noise2self | 75daaf188c49bff0da22c235540da20f4eca9614 | [
"MIT"
] | null | null | null | import torch
from torchvision.datasets import MNIST
from torchvision import transforms
from torch.utils.data import DataLoader
from scripts.utils import SyntheticNoiseDataset
from models.babyunet import BabyUnet
CHECKPOINTS_PATH = '../checkpoints/'
mnist_test = MNIST('../inferred_data/MNIST', download=True,
transform=transforms.Compose([
transforms.ToTensor(),
]), train=False)
noisy_mnist_test = SyntheticNoiseDataset(mnist_test, 'test')
data_loader = DataLoader(noisy_mnist_test, batch_size=256, shuffle=True)
for x in range(0, 200, 10):
trained_model = BabyUnet()
trained_model.load_state_dict( CHECKPOINTS_PATH + 'model' + str(x))
trained_model.eval()
for i, batch in enumerate(data_loader):
denoised = trained_model(batch)
break()
np.save(denoised.numpy(), '../inferred_data/model' + str(x) + '.npz')
| 31.551724 | 73 | 0.696175 |
db73a20804b8cf971455500dd1ae60cb3137e6bf | 4,321 | py | Python | src/processing/augmentation.py | sdcubber/kaggle_carvana | 44f6c7f1e80be2caa3c7ad4c7fb69067af45fe8f | [
"MIT"
] | null | null | null | src/processing/augmentation.py | sdcubber/kaggle_carvana | 44f6c7f1e80be2caa3c7ad4c7fb69067af45fe8f | [
"MIT"
] | null | null | null | src/processing/augmentation.py | sdcubber/kaggle_carvana | 44f6c7f1e80be2caa3c7ad4c7fb69067af45fe8f | [
"MIT"
] | null | null | null | # Script for data augmentation functions
import numpy as np
from collections import deque
from PIL import Image
import cv2
from data.config import *
def imread_cv2(image_path):
"""
Read image_path with cv2 format (H, W, C)
if image is '.gif' outputs is a numpy array of {0,1}
"""
image_format = image_path[-3:]
if image_format == 'jpg':
image = cv2.imread(image_path)
else:
image = np.array(Image.open(image_path))
return image
def image_to_tensor(image, mean=0, std=1.):
"""Transform image (input is numpy array, read in by cv2) """
if len(image.shape) == 2:
image = image.reshape(image.shape[0], image.shape[1], 1)
image = image.astype(np.float32)
image = (image-mean)/std
image = image.transpose((2,0,1))
tensor = torch.from_numpy(image)
return tensor
# --- Data Augmentation functions --- #
# A lot of functions can be found here:
# https://github.com/fchollet/keras/blob/master/keras/preprocessing/image.py#L223
# transform image and label
def randomHorizontalFlip(image, mask, p=0.5):
"""Do a random horizontal flip with probability p"""
if np.random.random() < p:
image = np.fliplr(image)
mask = np.fliplr(mask)
return image, mask
def randomVerticalFlip(image, mask, p=0.5):
"""Do a random vertical flip with probability p"""
if np.random.random() < p:
image = np.flipud(image)
mask = np.flipud(mask)
return image, mask
def randomHorizontalShift(image, mask, max_shift=0.05, p=0.5):
"""Do random horizontal shift with max proportion shift and with probability p
Elements that roll beyond the last position are re-introduced at the first."""
max_shift_pixels = int(max_shift*image.shape[1])
shift = np.random.choice(np.arange(-max_shift_pixels, max_shift_pixels+1))
if np.random.random() < p:
image = np.roll(image, shift, axis=1)
mask = np.roll(mask, shift, axis=1)
return image, mask
def randomVerticalShift(image, mask, max_shift=0.05, p=0.5):
"""Do random vertical shift with max proportion shift and probability p
Elements that roll beyond the last position are re-introduced at the first."""
max_shift_pixels = int(max_shift*image.shape[0])
shift = np.random.choice(np.arange(-max_shift_pixels, max_shift_pixels+1))
if np.random.random() < p:
image = np.roll(image, shift, axis=0)
mask = np.roll(mask, shift, axis=0)
return image, mask
def randomInvert(image, mask, p=0.5):
"""Randomly invert image with probability p"""
if np.random.random() < p:
image = 255 - image
mask = mask
return image, mask
def randomBrightness(image, mask, p=0.75):
"""With probability p, randomly increase or decrease brightness.
See https://stackoverflow.com/questions/37822375/python-opencv-increasing-image-brightness-without-overflowing-uint8-array"""
if np.random.random() < p:
max_value = np.percentile(255-image, q=25) # avoid burning out white cars, so take image-specific maximum
value = np.random.choice(np.arange(-max_value, max_value))
if value > 0:
image = np.where((255 - image) < value,255,image+value).astype(np.uint8)
else:
image = np.where(image < -value,0,image+value).astype(np.uint8)
return image, mask
def randomHue(image, mask, p=0.25, max_value=75):
"""With probability p, randomly increase or decrease hue.
See https://stackoverflow.com/questions/32609098/how-to-fast-change-image-brightness-with-python-opencv"""
if np.random.random() < p:
value = np.random.choice(np.arange(-max_value, max_value))
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hsv[:,:,0] = hsv[:,:,0] + value
hsv = np.clip(hsv, a_min=0, a_max=255).astype(np.uint8)
image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return image, mask
def GaussianBlur(image, mask, kernel=(1, 1),sigma=1, p=0.5):
"""With probability p, apply Gaussian blur"""
# TODO
return image, mask
def randomRotate(image, mask, max_angle, p=0.5):
"""Perform random rotation with max_angle and probability p"""
# TODO
return(image, mask)
| 36.931624 | 129 | 0.669058 |
db73bf308ebc49eac8469a2ce4652a4342c9902b | 295 | py | Python | substitute_finder/templatetags/substitute_finder_extra.py | tohugaby/pur_beurre_web | c3bdacee50907eea79821e7a8b3fe0f349719d88 | [
"MIT"
] | 1 | 2020-01-05T18:58:51.000Z | 2020-01-05T18:58:51.000Z | substitute_finder/templatetags/substitute_finder_extra.py | tohugaby/pur_beurre_web | c3bdacee50907eea79821e7a8b3fe0f349719d88 | [
"MIT"
] | 3 | 2020-06-05T18:35:47.000Z | 2021-06-10T20:32:44.000Z | substitute_finder/templatetags/substitute_finder_extra.py | tomlemeuch/pur_beurre_web | c3bdacee50907eea79821e7a8b3fe0f349719d88 | [
"MIT"
] | null | null | null | """
substitute_finder app custom templatetags module
"""
from django import template
register = template.Library()
| 17.352941 | 48 | 0.688136 |
db74905cc0d77c3c1aff987d3c4f57d66e26cc16 | 1,905 | py | Python | terrafirma/core/views/env.py | AlexandraAlter/django-terrafirma | afce5946f173aded2b4bfea78cf1b1034ec32272 | [
"MIT"
] | null | null | null | terrafirma/core/views/env.py | AlexandraAlter/django-terrafirma | afce5946f173aded2b4bfea78cf1b1034ec32272 | [
"MIT"
] | null | null | null | terrafirma/core/views/env.py | AlexandraAlter/django-terrafirma | afce5946f173aded2b4bfea78cf1b1034ec32272 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse_lazy
from django import views
from django.views import generic as g_views
from django.views.generic import base as b_views, edit as e_views
from .. import forms, models
| 30.238095 | 85 | 0.67979 |
db752d631ccf3257bd962fe18b4682f3220a6fa6 | 178 | py | Python | geoviz/__init__.py | JustinGOSSES/geoviz | 159b0665d9efcffe46061313c15ad09ced840d2d | [
"MIT"
] | 6 | 2018-10-16T16:38:15.000Z | 2018-10-22T13:56:13.000Z | geoviz/__init__.py | JustinGOSSES/geoviz | 159b0665d9efcffe46061313c15ad09ced840d2d | [
"MIT"
] | 5 | 2018-10-14T21:49:00.000Z | 2018-11-12T18:59:48.000Z | geoviz/__init__.py | nathangeology/geoviz | 5643e8880b4ecc241d4f8806743bf0441dd435c1 | [
"MIT"
] | 1 | 2019-05-30T23:36:29.000Z | 2019-05-30T23:36:29.000Z | from load_las_data import LoadLasData
from altair_log_plot import AltAirLogPlot
from load_shapefile_data import LoadShpData
from alitair_well_location_map import WellLocationMap
| 35.6 | 53 | 0.910112 |
db76b4e07eb1879ec4babded5e9e5a77166fce6b | 424 | py | Python | core/data/utils.py | ahmad-PH/auto_lcc | 55a6ac0e92994f4eed9951a27b7aa0d834f9d804 | [
"MIT"
] | 2 | 2022-01-01T22:09:05.000Z | 2022-01-01T23:00:43.000Z | core/data/utils.py | ahmad-PH/auto_lcc | 55a6ac0e92994f4eed9951a27b7aa0d834f9d804 | [
"MIT"
] | null | null | null | core/data/utils.py | ahmad-PH/auto_lcc | 55a6ac0e92994f4eed9951a27b7aa0d834f9d804 | [
"MIT"
] | null | null | null | import pickle
import pandas as pd
from typing import List, Tuple
| 28.266667 | 79 | 0.660377 |
db7701392b667ccf9ad8bc520bcd09b9ef9711c5 | 608 | py | Python | apps/users/adminx.py | hhdMrLion/mxshop-api | 1472ad0d959439ea80c1f8d8bfd3629c15d3017d | [
"Apache-2.0"
] | null | null | null | apps/users/adminx.py | hhdMrLion/mxshop-api | 1472ad0d959439ea80c1f8d8bfd3629c15d3017d | [
"Apache-2.0"
] | null | null | null | apps/users/adminx.py | hhdMrLion/mxshop-api | 1472ad0d959439ea80c1f8d8bfd3629c15d3017d | [
"Apache-2.0"
] | null | null | null | import xadmin
from users.models import VerifyCode
from xadmin import views
xadmin.site.register(VerifyCode, VerifyCodeAdmin)
xadmin.site.register(views.BaseAdminView, BaseSetting)
xadmin.site.register(views.CommAdminView, GlobalSettings)
| 22.518519 | 58 | 0.708882 |
db777f4b56a68caa06eca0c2b86f08c668527cb4 | 2,717 | py | Python | Archive/train_cnn.py | Yeok-c/Urban-Sound-Classification | 98c46eb54266ef7b859d192e9bebe8a5d48e1708 | [
"Apache-2.0"
] | null | null | null | Archive/train_cnn.py | Yeok-c/Urban-Sound-Classification | 98c46eb54266ef7b859d192e9bebe8a5d48e1708 | [
"Apache-2.0"
] | null | null | null | Archive/train_cnn.py | Yeok-c/Urban-Sound-Classification | 98c46eb54266ef7b859d192e9bebe8a5d48e1708 | [
"Apache-2.0"
] | null | null | null | ### Load necessary libraries ###
import numpy as np
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
import tensorflow as tf
from tensorflow import keras
from sklearn.metrics import ConfusionMatrixDisplay
model = get_network()
model.summary()
### Train and evaluate via 10-Folds cross-validation ###
accuracies = []
folds = np.array(['fold1','fold2','fold3','fold4',
'fold5','fold6','fold7','fold8',
'fold9','fold10'])
load_dir = "UrbanSounds8K/processed/"
kf = KFold(n_splits=10)
for train_index, test_index in kf.split(folds):
x_train, y_train = [], []
for ind in train_index:
# read features or segments of an audio file
train_data = np.load("{0}/{1}.npz".format(load_dir,folds[ind]),
allow_pickle=True)
# for training stack all the segments so that they are treated as an example/instance
features = np.concatenate(train_data["features"], axis=0)
labels = np.concatenate(train_data["labels"], axis=0)
x_train.append(features)
y_train.append(labels)
# stack x,y pairs of all training folds
x_train = np.concatenate(x_train, axis = 0).astype(np.float32)
y_train = np.concatenate(y_train, axis = 0).astype(np.float32)
# for testing we will make predictions on each segment and average them to
# produce single label for an entire sound clip.
test_data = np.load("{0}/{1}.npz".format(load_dir,
folds[test_index][0]), allow_pickle=True)
x_test = test_data["features"]
y_test = test_data["labels"]
log_dir="logs/fit/" + folds[test_index][0]
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
model = get_network()
model.fit(x_train, y_train, epochs = 20, batch_size = 64, verbose = 1, validation_split=0.2,
use_multiprocessing=True, workers=8, callbacks=[tensorboard_callback])
# evaluate on test set/fold
y_true, y_pred = [], []
for x, y in zip(x_test, y_test):
# average predictions over segments of a sound clip
avg_p = np.argmax(np.mean(model.predict(x), axis = 0))
y_pred.append(avg_p)
# pick single label via np.unique for a sound clip
y_true.append(np.unique(y)[0])
accuracies.append(accuracy_score(y_true, y_pred))
print("Fold n accuracy: {0}".format(accuracy_score(y_true, y_pred)))
cm = ConfusionMatrixDisplay.from_predictions(y_true, y_pred)
cm.figure_.savefig('conf_mat_' + str(test_index) + '_acc_' + str(accuracy_score(y_true, y_pred)) + '.png',dpi=1000)
print("Average 10 Folds Accuracy: {0}".format(np.mean(accuracies)))
| 40.552239 | 123 | 0.670225 |
db77b07e8a875d39eb972f8b432c0f0db96a2c4f | 6,105 | py | Python | metaflow/plugins/kfp/tests/flows/resources_flow.py | zillow/metaflow | a42dc9eab04695f2b0a429874e607ed67d5a2b45 | [
"Apache-2.0"
] | 7 | 2020-07-24T17:07:58.000Z | 2021-05-19T21:47:12.000Z | metaflow/plugins/kfp/tests/flows/resources_flow.py | zillow/metaflow | a42dc9eab04695f2b0a429874e607ed67d5a2b45 | [
"Apache-2.0"
] | 55 | 2020-07-20T16:56:27.000Z | 2022-03-28T12:51:15.000Z | metaflow/plugins/kfp/tests/flows/resources_flow.py | zillow/metaflow | a42dc9eab04695f2b0a429874e607ed67d5a2b45 | [
"Apache-2.0"
] | 6 | 2020-10-15T18:38:35.000Z | 2021-06-20T03:05:43.000Z | import os
import pprint
import subprocess
import time
from typing import Dict, List
from kubernetes.client import (
V1EnvVar,
V1EnvVarSource,
V1ObjectFieldSelector,
V1ResourceFieldSelector,
)
from metaflow import FlowSpec, step, environment, resources, current
kubernetes_vars = get_env_vars(
{
"LOCAL_STORAGE": "requests.ephemeral-storage",
"LOCAL_STORAGE_LIMIT": "limits.ephemeral-storage",
"CPU": "requests.cpu",
"CPU_LIMIT": "limits.cpu",
"MEMORY": "requests.memory",
"MEMORY_LIMIT": "limits.memory",
}
)
kubernetes_vars.append(
V1EnvVar(
name="MY_POD_NAME",
value_from=V1EnvVarSource(
field_ref=V1ObjectFieldSelector(field_path="metadata.name")
),
)
)
annotations = {
"metaflow.org/flow_name": "MF_NAME",
"metaflow.org/step": "MF_STEP",
"metaflow.org/run_id": "MF_RUN_ID",
"metaflow.org/experiment": "MF_EXPERIMENT",
"metaflow.org/tag_metaflow_test": "MF_TAG_METAFLOW_TEST",
"metaflow.org/tag_test_t1": "MF_TAG_TEST_T1",
}
for annotation, env_name in annotations.items():
kubernetes_vars.append(
V1EnvVar(
name=env_name,
value_from=V1EnvVarSource(
field_ref=V1ObjectFieldSelector(
field_path=f"metadata.annotations['{annotation}']"
)
),
)
)
labels = {
"aip.zillowgroup.net/kfp-pod-default": "KF_POD_DEFAULT",
"tags.ledger.zgtools.net/ai-flow-name": "AI_FLOW_NAME",
"tags.ledger.zgtools.net/ai-step-name": "AI_STEP_NAME",
"tags.ledger.zgtools.net/ai-experiment-name": "AI_EXPERIMENT_NAME",
}
for label, env_name in labels.items():
kubernetes_vars.append(
V1EnvVar(
name=env_name,
value_from=V1EnvVarSource(
field_ref=V1ObjectFieldSelector(
field_path=f"metadata.labels['{label}']"
)
),
)
)
if __name__ == "__main__":
ResourcesFlow()
| 30.073892 | 71 | 0.591646 |
db79520622b9fcae917edbc819e1d1c2cae17bf8 | 5,951 | py | Python | src/nb_utils/general.py | redfrexx/osm_association_rules | 33975ce25047f9ab3b21e890bc5ed9bab59a0a2f | [
"BSD-3-Clause"
] | null | null | null | src/nb_utils/general.py | redfrexx/osm_association_rules | 33975ce25047f9ab3b21e890bc5ed9bab59a0a2f | [
"BSD-3-Clause"
] | null | null | null | src/nb_utils/general.py | redfrexx/osm_association_rules | 33975ce25047f9ab3b21e890bc5ed9bab59a0a2f | [
"BSD-3-Clause"
] | 2 | 2021-05-10T10:19:13.000Z | 2021-09-15T10:32:10.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Functions used for data handling
"""
__author__ = "Christina Ludwig, GIScience Research Group, Heidelberg University"
__email__ = "christina.ludwig@uni-heidelberg.de"
import os
import yaml
from shapely.geometry import box
import numpy as np
import pandas as pd
import geopandas as gpd
import json
from nb_utils.utils import create_bbox, reproject_to_utm
CONTEXT_NAMES = {"area": "Area", "building_density": "Building density", "age": "Days since creation",
"n_tags": "Number of tags", "changes": "Number of changes", "max_version": "Version number",
"user_count_inner": "Inner user count", "user_density_inner": "Inner user density",
"user_count_outer": "Outer user count", "user_density_outer": "Outer user density",
"feature_count": "Feature count", "random": "Random"}
rules_colnames = ['antecedents', 'consequents', 'antecedent support',
'consequent support', 'support', 'confidence', 'lift', 'leverage',
'conviction', "context", "context_min", "context_max", "context_p_min", "context_p_max", "nfeatures", "rule"]
pretty_names_units = {"area": "Area [ha]", "building_density": "Building density", "feature_count": "Feature count", "age": "Days since creation", "n_tags": "Number of tags", "changes": "Number of changes", "max_version": "Version number", "user_count_inner": "Inner user count", "user_density_inner": "Inner user density", "user_count_outer": "Outer user count",
"user_density_outer": "Outer user density", "random": "Random"}
def load_config(config_file, cities):
"""
Load config parameters from file
:param config_file:
:param cities:
:return:
"""
if not os.path.exists(config_file):
print("ERROR: Config file {} does not exist.".format(config_file))
else:
with open(config_file, 'r') as src:
config = yaml.load(src, Loader=yaml.FullLoader)
config_cities = config["locations"]
config_cities = {city: config_cities[city] for city in cities}
return config_cities
def load_data(cities, data_dir):
"""
Load data into notebook from file
:return:
"""
loaded_tags_dfs = []
loaded_context_dfs = []
for city in cities:
print("Loading {}...".format(city))
# Check paths
tags_file = os.path.join(data_dir, city, "{}_tags.json".format(city))
context_file = os.path.join(data_dir, city, "{}_context.geojson".format(city))
if (not os.path.exists(tags_file)) or (not os.path.exists(context_file)):
print("{}: Input files not found.".format(city))
return None, None, None
# Read data and set index
tags_df = pd.read_json(tags_file).set_index("@osmId")
context_df = gpd.read_file(context_file).set_index("@osmId")
# Calculate area (should be moved to data_extraction)
context_df["area"] = reproject_to_utm(context_df).area #/ 10000. # conversion to ha
# Add column holding the city name
context_df["city"] = city
loaded_tags_dfs.append(tags_df)
loaded_context_dfs.append(context_df)
# Convert list of dataframes to dataframe
all_tags_df = pd.concat(loaded_tags_dfs, axis=0)
all_tags_df = all_tags_df.fillna(False)
all_context_df = pd.concat(loaded_context_dfs, axis=0)
all_features = all_context_df.join(all_tags_df, sort=False)
# Add dummy columns for "no antecedent" and random context variable
all_features["none"] = True
all_features["random"] = np.random.rand(len(all_features))
# The park iteself is always counted as an objects inside of it. Therefore, subtract 1.
all_features["feature_count"] = all_features["feature_count"] - 1
# Delete unnecessary columns
unnecessary_cols = list(filter(lambda x: x.startswith("gt:"), all_features.columns)) + ["leisure=park"]
all_features.drop(unnecessary_cols, axis=1, inplace=True)
return all_features
def create_city_bboxes(config_cities):
"""
Creat bboxes of cities
:return:
"""
bboxes = {c: box(*create_bbox(config_cities[c]["center"], config_cities[c]["width"])) for c in config_cities.keys()}
bbox_df = pd.DataFrame().from_dict(bboxes, orient="index", columns=["geometry"])
return gpd.GeoDataFrame(bbox_df)
def dump_city_rules(city_rules, interim_dir):
"""
Write results from context based association rule analysis to file
:param city_rules:
:param interim_dir:
:return:
"""
city_rules_dir = os.path.join(interim_dir, "city_rules")
if not os.path.exists(city_rules_dir):
os.mkdir(city_rules_dir)
for k, v in city_rules.items():
print(k)
v["heatmap"].to_json(os.path.join(city_rules_dir, "{}_heatmap.json".format(k)))
v["valid_rules"].reset_index().to_json(os.path.join(city_rules_dir, "{}_valid_rules.json".format(k)))
with open(os.path.join(city_rules_dir, "{}_sel_features.json".format(k)), "w") as dst:
json.dump(list(v["sel_features"].index), dst)
def load_city_rules(cities, interim_dir, all_features):
"""
Load results from context based association rule analysis to file
:param cities:
:param interim_dir:
:param all_features:
:return:
"""
city_rules = {}
for city in cities:
with open(os.path.join(interim_dir, "city_rules", "{}_sel_features.json".format(city))) as dst:
selected_ids = json.load(dst)
sel_features = all_features.loc[selected_ids]
selected_osmids = json
city_rules[city] = {
"heatmap": pd.read_json(os.path.join(interim_dir, "city_rules", "{}_heatmap.json".format(city))),
"valid_rules": pd.read_json(
os.path.join(interim_dir, "city_rules", "{}_valid_rules.json".format(city))).set_index("index"),
"sel_features": sel_features}
return city_rules
| 40.482993 | 363 | 0.668627 |
db7ad2b92a14b73e461a5d252d3a7ab245920c9f | 3,922 | py | Python | keystoneclient/auth/identity/v3/federated.py | darren-wang/ksc | fd096540e8e57b6bd7c923f4cb4ad6616d103cc8 | [
"Apache-1.1"
] | 1 | 2019-09-11T11:56:19.000Z | 2019-09-11T11:56:19.000Z | tools/dockerize/webportal/usr/lib/python2.7/site-packages/keystoneclient/auth/identity/v3/federated.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | null | null | null | tools/dockerize/webportal/usr/lib/python2.7/site-packages/keystoneclient/auth/identity/v3/federated.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_config import cfg
import six
from keystoneclient.auth.identity.v3 import base
from keystoneclient.auth.identity.v3 import token
__all__ = ['FederatedBaseAuth']
| 35.017857 | 77 | 0.63284 |
db7bb396855ddfa537f07ed9527e3bc449422f2a | 274 | py | Python | bin/Python27/Lib/site-packages/tables/utilsExtension.py | lefevre-fraser/openmeta-mms | 08f3115e76498df1f8d70641d71f5c52cab4ce5f | [
"MIT"
] | null | null | null | bin/Python27/Lib/site-packages/tables/utilsExtension.py | lefevre-fraser/openmeta-mms | 08f3115e76498df1f8d70641d71f5c52cab4ce5f | [
"MIT"
] | null | null | null | bin/Python27/Lib/site-packages/tables/utilsExtension.py | lefevre-fraser/openmeta-mms | 08f3115e76498df1f8d70641d71f5c52cab4ce5f | [
"MIT"
] | null | null | null | from warnings import warn
from tables.utilsextension import *
_warnmsg = ("utilsExtension is pending deprecation, import utilsextension instead. "
"You may use the pt2to3 tool to update your source code.")
warn(_warnmsg, DeprecationWarning, stacklevel=2)
| 39.142857 | 85 | 0.751825 |
db7cbd4afe84d62fa37ba5ff4602788af4116b50 | 802 | py | Python | config.py | iDevHank/i18n | ec731b5d6fab330a868ebb9f9e11ff1caef629ef | [
"MIT"
] | null | null | null | config.py | iDevHank/i18n | ec731b5d6fab330a868ebb9f9e11ff1caef629ef | [
"MIT"
] | null | null | null | config.py | iDevHank/i18n | ec731b5d6fab330a868ebb9f9e11ff1caef629ef | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# The format of your own localizable method.
# This is an example of '"string".localized'
SUFFIX = '.localized'
KEY = r'"(?:\\.|[^"\\])*"'
LOCALIZABLE_RE = r'%s%s' % (KEY, SUFFIX)
# Specify the path of localizable files in project.
LOCALIZABLE_FILE_PATH = ''
LOCALIZABLE_FILE_NAMES = ['Localizable']
LOCALIZABLE_FILE_TYPES = ['strings']
# File types of source file.
SEARCH_TYPES = ['swift', 'm', 'json']
SOURCE_FILE_EXCLUSIVE_PATHS = [
'Assets.xcassets', 'Carthage', 'ThirdParty',
'Pods', 'Media.xcassets', 'Framework', 'bin']
LOCALIZABLE_FILE_EXCLUSIVE_PATHS = ['Carthage', 'ThirdParty',
'Pods', 'Framework', 'bin']
LOCALIZABLE_FORMAT_RE = r'"(?:\\.|[^"\\])*"\s*=\s*"(?:\\.|[^"\\])*";\n'
DEFAULT_TARGET_PATH = 'generated.strings'
| 33.416667 | 71 | 0.634663 |
db7ce31c6a43ef5813c6d71caa6c1ea9655847e6 | 188 | py | Python | dashboard_analytics/tasks/transaction_processor.py | Astewart1510/pvt-algoranddashboard | 6fb6cf37b483339f24cc86f0a95fb2245be492ca | [
"MIT"
] | null | null | null | dashboard_analytics/tasks/transaction_processor.py | Astewart1510/pvt-algoranddashboard | 6fb6cf37b483339f24cc86f0a95fb2245be492ca | [
"MIT"
] | null | null | null | dashboard_analytics/tasks/transaction_processor.py | Astewart1510/pvt-algoranddashboard | 6fb6cf37b483339f24cc86f0a95fb2245be492ca | [
"MIT"
] | null | null | null | from dashboard_analytics.models import AccountType, InstrumentType, Account, Transaction | 37.6 | 88 | 0.787234 |
db7dbf749958b5f62cb5ff7deb97ed3b8e66afdf | 1,771 | py | Python | MuonGun/resources/scripts/histreduce.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | 1 | 2020-12-24T22:00:01.000Z | 2020-12-24T22:00:01.000Z | MuonGun/resources/scripts/histreduce.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | null | null | null | MuonGun/resources/scripts/histreduce.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | 3 | 2020-07-17T09:20:29.000Z | 2021-03-30T16:44:18.000Z | #!/usr/bin/env python
"""
Add all (potentially gigantic) histograms in a group of files.
"""
import dashi
import tables
import os, sys, operator, shutil
from optparse import OptionParser
parser = OptionParser(usage="%prog [OPTIONS] infiles outfile", description=__doc__)
parser.add_option("--blocksize", dest="blocksize", type=int, default=2048)
opts, args = parser.parse_args()
if len(args) < 2:
parser.error("You must specify at least one output and one input file")
infiles, outfile = args[:-1], args[-1]
if os.path.exists(outfile):
parser.error("%s already exists!" % outfile)
shutil.copy(infiles[0], outfile)
from collections import defaultdict
paths = defaultdict(list)
for fname in infiles[1:]:
with tables.openFile(fname) as hdf:
for group in hdf.walkNodes(where='/', classname='Group'):
if 'ndim' in group._v_attrs: # a dashi histogram
path = group._v_pathname
paths[path].append(fname)
def histadd(sourceGroup, destGroup, blocksize=1):
"""
Add dashi histograms stored in HDF5 groups
:param blocksize: operate on blocksize I/O chunks at a time
"""
for arr in '_h_bincontent', '_h_squaredweights':
source = sourceGroup._v_children[arr]
dest = destGroup._v_children[arr]
chunksize = blocksize*reduce(operator.mul, dest.chunkshape)
size = reduce(operator.mul, dest.shape)
for i in range(0, size, chunksize):
dest[i:i+chunksize] += source[i:i+chunksize]
for prop in 'nentries', 'nans', 'nans_wgt', 'nans_sqwgt':
destGroup._v_attrs[prop] += sourceGroup._v_attrs[prop]
with tables.openFile(outfile, 'a') as ohdf:
for path, fnames in paths.iteritems():
print(path)
destGroup = ohdf.getNode(path)
for fname in fnames:
with tables.openFile(fname) as hdf:
histadd(hdf.getNode(path), destGroup, opts.blocksize)
| 31.070175 | 83 | 0.728967 |
db7e13c9886abafe9915d05b01539badc566a636 | 2,108 | py | Python | procrastinate/exceptions.py | ignaciocabeza/procrastinate | 95ba8c7acdf39aa7a1216c19903802b4f65b65d1 | [
"MIT"
] | null | null | null | procrastinate/exceptions.py | ignaciocabeza/procrastinate | 95ba8c7acdf39aa7a1216c19903802b4f65b65d1 | [
"MIT"
] | null | null | null | procrastinate/exceptions.py | ignaciocabeza/procrastinate | 95ba8c7acdf39aa7a1216c19903802b4f65b65d1 | [
"MIT"
] | null | null | null | import datetime
| 22.913043 | 84 | 0.675047 |
db7edea364132ddeeca859f58229a42b6ea2f0ae | 534 | py | Python | config/settings/local.py | vyshakTs/STORE_MANAGEMENT_SYSTEM | b6b82a02c0b512083c35a8656e191436552569a9 | [
"CC0-1.0"
] | null | null | null | config/settings/local.py | vyshakTs/STORE_MANAGEMENT_SYSTEM | b6b82a02c0b512083c35a8656e191436552569a9 | [
"CC0-1.0"
] | null | null | null | config/settings/local.py | vyshakTs/STORE_MANAGEMENT_SYSTEM | b6b82a02c0b512083c35a8656e191436552569a9 | [
"CC0-1.0"
] | null | null | null | from .base import *
DEBUG = True
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'SMS',
'USER': 'postgres',
'PASSWORD': 'password',
'HOST': 'localhost',
'PORT': '',
}
}
INSTALLED_APPS += [
'debug_toolbar.apps.DebugToolbarConfig',
'django_extensions',
]
ALLOWED_HOSTS += ['.herokuapp.com']
# Loads SECRET_KEY from .env file
# SECRET_KEY = get_env_variable('SECRET_KEY')
| 19.777778 | 64 | 0.617978 |
db7efcd3ba8afeab68792a36832e16d7660931cd | 1,097 | py | Python | question3.py | haojunsng/foodpanda-dataeng | b1b9a5c615113a1b8727c9c7dfe7ad3e50059428 | [
"MIT"
] | null | null | null | question3.py | haojunsng/foodpanda-dataeng | b1b9a5c615113a1b8727c9c7dfe7ad3e50059428 | [
"MIT"
] | null | null | null | question3.py | haojunsng/foodpanda-dataeng | b1b9a5c615113a1b8727c9c7dfe7ad3e50059428 | [
"MIT"
] | null | null | null | from functions import get_df, write_df
import geopy
from geopy import distance
"""
The function question3 takes in the latitude and longitude of potential distress locations,
and returns the nearest port with essential provisions such as water, fuel_oil and diesel.
"""
if __name__ == "__main__":
question3("foodpanda_tables", 32.610982, -38.706256)
| 36.566667 | 143 | 0.71103 |
db808d5da5102b2f6086cfb47bc515cc8e85e1ce | 6,587 | py | Python | plugins/aea-cli-benchmark/aea_cli_benchmark/case_acn_communication/case.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | plugins/aea-cli-benchmark/aea_cli_benchmark/case_acn_communication/case.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | plugins/aea-cli-benchmark/aea_cli_benchmark/case_acn_communication/case.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Check amount of time for acn connection communications."""
import asyncio
import logging
import os
import time
from contextlib import contextmanager
from tempfile import TemporaryDirectory
from typing import Callable, List, Tuple, Union
from aea_cli_benchmark.case_acn_communication.utils import (
DEFAULT_DELEGATE_PORT,
DEFAULT_MAILBOX_PORT,
DEFAULT_NODE_PORT,
_make_libp2p_client_connection,
_make_libp2p_connection,
_make_libp2p_mailbox_connection,
)
from aea.connections.base import Connection
from aea.mail.base import Envelope
from packages.fetchai.protocols.default.message import DefaultMessage
def make_envelope(from_addr: str, to_addr: str) -> Envelope:
"""Construct an envelope."""
msg = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"hello",
)
envelope = Envelope(
to=to_addr,
sender=from_addr,
message=msg,
)
return envelope
def run(connection: str, run_times: int = 10) -> List[Tuple[str, Union[int, float]]]:
"""Check construction time and memory usage."""
logging.basicConfig(level=logging.CRITICAL)
cwd = os.getcwd()
try:
if connection == "p2pnode":
elif connection == "client":
elif connection == "mailbox":
else:
raise ValueError(f"Unsupported connection: {connection}")
with TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
coro = _run(con_maker)
first_time, second_time = asyncio.get_event_loop().run_until_complete(coro)
return [
("first time (seconds)", first_time),
("second time (seconds)", second_time),
]
finally:
os.chdir(cwd)
| 30.780374 | 93 | 0.591316 |
db80f4198878eb7bd4645b74c2bea6781e993672 | 4,663 | py | Python | examples/pybullet/vr_kuka_setup.py | q4a/bullet3 | b077f74f5675fb9ca7bafd238f097f87bf6c0367 | [
"Zlib"
] | 12 | 2017-08-24T05:58:53.000Z | 2021-07-15T17:32:26.000Z | examples/pybullet/vr_kuka_setup.py | mofed8461/BulletPhysics-EarthQuakeSimulation | d411684d0293a18039d4180f5bc8dab33d063fce | [
"Zlib"
] | null | null | null | examples/pybullet/vr_kuka_setup.py | mofed8461/BulletPhysics-EarthQuakeSimulation | d411684d0293a18039d4180f5bc8dab33d063fce | [
"Zlib"
] | 2 | 2018-01-13T07:49:58.000Z | 2020-10-21T02:48:25.000Z | import pybullet as p
#p.connect(p.UDP,"192.168.86.100")
p.connect(p.SHARED_MEMORY)
p.resetSimulation()
objects = [p.loadURDF("plane.urdf", 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000)]
objects = [p.loadURDF("samurai.urdf", 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000)]
objects = [p.loadURDF("pr2_gripper.urdf", 0.500000,0.300006,0.700000,-0.000000,-0.000000,-0.000031,1.000000)]
pr2_gripper = objects[0]
print ("pr2_gripper=")
print (pr2_gripper)
jointPositions=[ 0.550569, 0.000000, 0.549657, 0.000000 ]
for jointIndex in range (p.getNumJoints(pr2_gripper)):
p.resetJointState(pr2_gripper,jointIndex,jointPositions[jointIndex])
pr2_cid = p.createConstraint(pr2_gripper,-1,-1,-1,p.JOINT_FIXED,[0,0,0],[0.2,0,0],[0.500000,0.300006,0.700000])
print ("pr2_cid")
print (pr2_cid)
objects = [p.loadURDF("kuka_iiwa/model_vr_limits.urdf", 1.400000,-0.200000,0.600000,0.000000,0.000000,0.000000,1.000000)]
kuka = objects[0]
jointPositions=[ -0.000000, -0.000000, 0.000000, 1.570793, 0.000000, -1.036725, 0.000001 ]
for jointIndex in range (p.getNumJoints(kuka)):
p.resetJointState(kuka,jointIndex,jointPositions[jointIndex])
p.setJointMotorControl2(kuka,jointIndex,p.POSITION_CONTROL,jointPositions[jointIndex],0)
objects = [p.loadURDF("lego/lego.urdf", 1.000000,-0.200000,0.700000,0.000000,0.000000,0.000000,1.000000)]
objects = [p.loadURDF("lego/lego.urdf", 1.000000,-0.200000,0.800000,0.000000,0.000000,0.000000,1.000000)]
objects = [p.loadURDF("lego/lego.urdf", 1.000000,-0.200000,0.900000,0.000000,0.000000,0.000000,1.000000)]
objects = p.loadSDF("gripper/wsg50_one_motor_gripper_new_free_base.sdf")
kuka_gripper = objects[0]
print ("kuka gripper=")
print(kuka_gripper)
p.resetBasePositionAndOrientation(kuka_gripper,[0.923103,-0.200000,1.250036],[-0.000000,0.964531,-0.000002,-0.263970])
jointPositions=[ 0.000000, -0.011130, -0.206421, 0.205143, -0.009999, 0.000000, -0.010055, 0.000000 ]
for jointIndex in range (p.getNumJoints(kuka_gripper)):
p.resetJointState(kuka_gripper,jointIndex,jointPositions[jointIndex])
p.setJointMotorControl2(kuka_gripper,jointIndex,p.POSITION_CONTROL,jointPositions[jointIndex],0)
kuka_cid = p.createConstraint(kuka, 6, kuka_gripper,0,p.JOINT_FIXED, [0,0,0], [0,0,0.05],[0,0,0])
objects = [p.loadURDF("jenga/jenga.urdf", 1.300000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)]
objects = [p.loadURDF("jenga/jenga.urdf", 1.200000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)]
objects = [p.loadURDF("jenga/jenga.urdf", 1.100000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)]
objects = [p.loadURDF("jenga/jenga.urdf", 1.000000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)]
objects = [p.loadURDF("jenga/jenga.urdf", 0.900000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)]
objects = [p.loadURDF("jenga/jenga.urdf", 0.800000,-0.700000,0.750000,0.000000,0.707107,0.000000,0.707107)]
objects = [p.loadURDF("table/table.urdf", 1.000000,-0.200000,0.000000,0.000000,0.000000,0.707107,0.707107)]
objects = [p.loadURDF("teddy_vhacd.urdf", 1.050000,-0.500000,0.700000,0.000000,0.000000,0.707107,0.707107)]
objects = [p.loadURDF("cube_small.urdf", 0.950000,-0.100000,0.700000,0.000000,0.000000,0.707107,0.707107)]
objects = [p.loadURDF("sphere_small.urdf", 0.850000,-0.400000,0.700000,0.000000,0.000000,0.707107,0.707107)]
objects = [p.loadURDF("duck_vhacd.urdf", 0.850000,-0.400000,0.900000,0.000000,0.000000,0.707107,0.707107)]
objects = p.loadSDF("kiva_shelf/model.sdf")
ob = objects[0]
p.resetBasePositionAndOrientation(ob,[0.000000,1.000000,1.204500],[0.000000,0.000000,0.000000,1.000000])
objects = [p.loadURDF("teddy_vhacd.urdf", -0.100000,0.600000,0.850000,0.000000,0.000000,0.000000,1.000000)]
objects = [p.loadURDF("sphere_small.urdf", -0.100000,0.955006,1.169706,0.633232,-0.000000,-0.000000,0.773962)]
objects = [p.loadURDF("cube_small.urdf", 0.300000,0.600000,0.850000,0.000000,0.000000,0.000000,1.000000)]
objects = [p.loadURDF("table_square/table_square.urdf", -1.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000)]
ob = objects[0]
jointPositions=[ 0.000000 ]
for jointIndex in range (p.getNumJoints(ob)):
p.resetJointState(ob,jointIndex,jointPositions[jointIndex])
objects = [p.loadURDF("husky/husky.urdf", 2.000000,-5.000000,1.000000,0.000000,0.000000,0.000000,1.000000)]
ob = objects[0]
jointPositions=[ 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000 ]
for jointIndex in range (p.getNumJoints(ob)):
p.resetJointState(ob,jointIndex,jointPositions[jointIndex])
p.setGravity(0.000000,0.000000,0.000000)
p.setGravity(0,0,-10)
p.stepSimulation()
p.disconnect()
| 58.2875 | 121 | 0.749517 |
db83659c6d0ac1aa7ea69a87f18b5fd2867e5ddc | 3,651 | py | Python | genomics_algo/utilities/string_cmp.py | SvoONs/genomics_algo | 3174c1e9e685db12c5849ce5c7e3411f1922a4be | [
"MIT"
] | null | null | null | genomics_algo/utilities/string_cmp.py | SvoONs/genomics_algo | 3174c1e9e685db12c5849ce5c7e3411f1922a4be | [
"MIT"
] | 38 | 2020-11-11T21:26:56.000Z | 2021-03-20T23:25:49.000Z | genomics_algo/utilities/string_cmp.py | SvoONs/genomics_algo | 3174c1e9e685db12c5849ce5c7e3411f1922a4be | [
"MIT"
] | 1 | 2020-11-13T21:38:43.000Z | 2020-11-13T21:38:43.000Z | def longest_common_prefix(s1: str, s2: str) -> str:
"""
Finds the longest common prefix (substring) given two strings
s1: First string to compare
s2: Second string to compare
Returns:
Longest common prefix between s1 and s2
>>> longest_common_prefix("ACTA", "GCCT")
''
>>> longest_common_prefix("ACTA", "ACT")
'ACT'
>>> longest_common_prefix("ACT", "ACTA")
'ACT'
>>> longest_common_prefix("GATA", "GAAT")
'GA'
>>> longest_common_prefix("ATGA", "")
''
>>> longest_common_prefix("", "GCCT")
''
>>> longest_common_prefix("GCCT", "GCCT")
'GCCT'
"""
i = 0
while i < min(len(s1), len(s2)):
if s1[i] != s2[i]:
break
i += 1
return s1[:i]
def longest_common_suffix(s1: str, s2: str) -> str:
"""
Finds the longest common suffix (substring) given two strings
s1: First string to compare
s2: Second string to compare
Returns:
Longest common suffix between s1 and s2
>>> longest_common_suffix("ACTA", "GCCT")
''
>>> longest_common_suffix("ACTA", "CTA")
'CTA'
>>> longest_common_suffix("CTA", "ACTA")
'CTA'
>>> longest_common_suffix("GATAT", "GAATAT")
'ATAT'
>>> longest_common_suffix("ACTA", "")
''
>>> longest_common_suffix("", "GCCT")
''
>>> longest_common_suffix("GCCT", "GCCT")
'GCCT'
"""
return longest_common_prefix(s1[::-1], s2[::-1])[::-1]
def find_hamming_distance(s1: str, s2: str) -> int:
"""Compute the Hamming distance between two strings of equal length
>>> find_hamming_distance("ATG", "ATC")
1
>>> find_hamming_distance("ATG", "TGA")
3
>>> find_hamming_distance("A", "A")
0
>>> find_hamming_distance("ATG", "ATG")
0
>>> find_hamming_distance("", "")
0
>>> find_hamming_distance("GAGGTAGCGGCGTTTAAC", "GTGGTAACGGGGTTTAAC")
3
"""
assert len(s1) == len(s2)
return sum(1 for i in range(len(s1)) if s1[i] != s2[i])
def find_levenshtein_distance(s1: str, s2: str) -> int:
"""Compute the Levenshtein distance between two strings (i.e., minimum number
of edits including substitution, insertion and deletion needed in a string to
turn it into another)
>>> find_levenshtein_distance("AT", "")
2
>>> find_levenshtein_distance("AT", "ATC")
1
>>> find_levenshtein_distance("ATG", "ATC")
1
>>> find_levenshtein_distance("ATG", "TGA")
2
>>> find_levenshtein_distance("ATG", "ATG")
0
>>> find_levenshtein_distance("", "")
0
>>> find_levenshtein_distance("GAGGTAGCGGCGTTTAAC", "GTGGTAACGGGGTTTAAC")
3
>>> find_levenshtein_distance("TGGCCGCGCAAAAACAGC", "TGACCGCGCAAAACAGC")
2
>>> find_levenshtein_distance("GCGTATGCGGCTAACGC", "GCTATGCGGCTATACGC")
2
"""
# initializing a matrix for with `len(s1) + 1` rows and `len(s2) + 1` columns
D = [[0 for x in range(len(s2) + 1)] for y in range(len(s1) + 1)]
# fill first column
for i in range(len(s1) + 1):
D[i][0] = i
# fill first row
for j in range(len(s2) + 1):
D[0][j] = j
# fill rest of the matrix
for i in range(1, len(s1) + 1):
for j in range(1, len(s2) + 1):
distance_left = D[i][j - 1] + 1 # deletion in pattern
distance_above = D[i - 1][j] + 1 # insertion in pattern
distance_diagonal = D[i - 1][j - 1] + (
s1[i - 1] != s2[j - 1]
) # substitution
D[i][j] = min(distance_left, distance_above, distance_diagonal)
# return the last value (i.e., right most bottom value)
return D[-1][-1]
| 28.97619 | 81 | 0.586962 |
db836b59bf5fd8d655aefd6e4020d61dca742b2c | 11,906 | py | Python | whyqd/parsers/wrangling_parser.py | whythawk/whyqd | 8ee41768d6788318458d41831200594b61777ccc | [
"BSD-3-Clause"
] | 17 | 2020-02-21T14:41:24.000Z | 2022-01-31T20:25:53.000Z | whyqd/parsers/wrangling_parser.py | whythawk/whyqd | 8ee41768d6788318458d41831200594b61777ccc | [
"BSD-3-Clause"
] | null | null | null | whyqd/parsers/wrangling_parser.py | whythawk/whyqd | 8ee41768d6788318458d41831200594b61777ccc | [
"BSD-3-Clause"
] | null | null | null | from __future__ import annotations
from typing import Optional, Dict, List, Union, Type, TYPE_CHECKING
from datetime import date, datetime
import pandas as pd
import numpy as np
import re
import locale
try:
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
except locale.Error:
# Readthedocs has a problem, but difficult to replicate
locale.setlocale(locale.LC_ALL, "")
from . import CoreScript
from ..models import ColumnModel
from ..types import MimeType
if TYPE_CHECKING:
from ..schema import Schema
from ..models import DataSourceModel
| 38.160256 | 128 | 0.534184 |
db83c7d51feb9c6d2d6569094bc6e9a0eb64b2ce | 432 | py | Python | 0x02-python-import_modules/2-args.py | FatChicken277/holbertonschool-higher_level_programming | 520d6310a5e2a874f8c5f5185d0fb769b6412e7c | [
"CNRI-Python"
] | null | null | null | 0x02-python-import_modules/2-args.py | FatChicken277/holbertonschool-higher_level_programming | 520d6310a5e2a874f8c5f5185d0fb769b6412e7c | [
"CNRI-Python"
] | null | null | null | 0x02-python-import_modules/2-args.py | FatChicken277/holbertonschool-higher_level_programming | 520d6310a5e2a874f8c5f5185d0fb769b6412e7c | [
"CNRI-Python"
] | null | null | null | #!/usr/bin/python3
if __name__ == "__main__":
import sys
args(sys.argv)
| 25.411765 | 52 | 0.518519 |
db847e24bb7a6401d0b23e464a5ea391ad69edb8 | 89 | py | Python | taurex/data/profiles/__init__.py | rychallener/TauREx3_public | eb0eeeeca8f47e5e7d64d8d70b43a3af370b7677 | [
"BSD-3-Clause"
] | 10 | 2019-12-18T09:19:16.000Z | 2021-06-21T11:02:06.000Z | taurex/data/profiles/__init__.py | rychallener/TauREx3_public | eb0eeeeca8f47e5e7d64d8d70b43a3af370b7677 | [
"BSD-3-Clause"
] | 10 | 2020-03-24T18:02:15.000Z | 2021-08-23T20:32:09.000Z | taurex/data/profiles/__init__.py | rychallener/TauREx3_public | eb0eeeeca8f47e5e7d64d8d70b43a3af370b7677 | [
"BSD-3-Clause"
] | 8 | 2020-03-26T14:16:42.000Z | 2021-12-18T22:11:25.000Z | """
These modules contain sub-modules related to defining various profiles in a model
""" | 29.666667 | 81 | 0.775281 |
db8615ff95bbb42756435769fd0cc3b6f45c202c | 503 | py | Python | day-2/part_b.py | yuetsin/AoC | a7c5aea245ee6e77312352907fc4d1ac8eac2d3a | [
"CC0-1.0"
] | null | null | null | day-2/part_b.py | yuetsin/AoC | a7c5aea245ee6e77312352907fc4d1ac8eac2d3a | [
"CC0-1.0"
] | null | null | null | day-2/part_b.py | yuetsin/AoC | a7c5aea245ee6e77312352907fc4d1ac8eac2d3a | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
import re
lines = get_input()
count = 0
for line in lines:
lower, upper, char, password = re.split(r'-|: | ', line)
lower, upper = int(lower) - 1, int(upper) - 1
try:
if (password[lower] == char) ^ (password[upper] == char):
count += 1
except:
# don't care about boundaries
pass
print(count)
| 20.12 | 67 | 0.554672 |
db867f235dd317f96cc49c5953d0a91169b52a4a | 3,977 | py | Python | src/tone.py | devanshslnk/HelpOff | bbeddc8bbb9d26bbc85f572d4769fc9fc92d5c4a | [
"MIT"
] | 2 | 2018-10-08T06:01:42.000Z | 2021-06-22T08:35:11.000Z | src/tone.py | devanshslnk/HelpOff | bbeddc8bbb9d26bbc85f572d4769fc9fc92d5c4a | [
"MIT"
] | null | null | null | src/tone.py | devanshslnk/HelpOff | bbeddc8bbb9d26bbc85f572d4769fc9fc92d5c4a | [
"MIT"
] | 3 | 2018-10-09T19:04:14.000Z | 2019-01-22T11:59:28.000Z | from __future__ import print_function
import json
from os.path import join, dirname
from watson_developer_cloud import ToneAnalyzerV3
from watson_developer_cloud.tone_analyzer_v3 import ToneInput
from pprint import pprint
# If service instance provides API key authentication
# service = ToneAnalyzerV3(
# ## url is optional, and defaults to the URL below. Use the correct URL for your region.
# url='https://gateway.watsonplatform.net/tone-analyzer/api',
# version='2017-09-21',
# iam_apikey='your_apikey')
service = ToneAnalyzerV3(
## url is optional, and defaults to the URL below. Use the correct URL for your region.
# url='https://gateway.watsonplatform.net/tone-analyzer/api',
username='f0ec47cc-5191-4421-8fca-2395917e1640',
password='q7JOpjOabiY5',
version='2017-09-21')
# print("\ntone_chat() example 1:\n")
# utterances = [{
# 'text': 'I am very happy.',
# 'user': 'glenn'
# }, {
# 'text': 'It is a good day.',
# 'user': 'glenn'
# }]
# tone_chat = service.tone_chat(utterances).get_result()
# print(json.dumps(tone_chat, indent=2))
# print("\ntone() example 1:\n")
# print(
# json.dumps(
# service.tone(
# tone_input='I am very happy. It is a good day.',
# content_type="text/plain").get_result(),
# indent=2))
# print("\ntone() example 2:\n")
# with open(join(dirname(__file__),
# '../resources/tone-example.json')) as tone_json:
# tone = service.tone(json.load(tone_json)['text'], "text/plain").get_result()
# print(json.dumps(tone, indent=2))
# print("\ntone() example 3:\n")
# with open(join(dirname(__file__),
# '../resources/tone-example.json')) as tone_json:
# tone = service.tone(
# tone_input=json.load(tone_json)['text'],
# content_type='text/plain',
# sentences=True).get_result()
# print(json.dumps(tone, indent=2))
# print("\ntone() example 4:\n")
# with open(join(dirname(__file__),
# '../resources/tone-example.json')) as tone_json:
# tone = service.tone(
# tone_input=json.load(tone_json),
# content_type='application/json').get_result()
# print(json.dumps(tone, indent=2))
# print("\ntone() example 5:\n")
# with open(join(dirname(__file__),
# '../resources/tone-example-html.json')) as tone_html:
# tone = service.tone(
# json.load(tone_html)['text'], content_type='text/html').get_result()
# print(json.dumps(tone, indent=2))
# print("\ntone() example 6 with GDPR support:\n")
# service.set_detailed_response(True)
# with open(join(dirname(__file__),
# '../resources/tone-example-html.json')) as tone_html:
# tone = service.tone(
# json.load(tone_html)['text'],
# content_type='text/html',
# headers={
# 'Custom-Header': 'custom_value'
# })
# print(tone)
# print(tone.get_headers())
# print(tone.get_result())
# print(tone.get_status_code())
# service.set_detailed_response(False)
# print("\ntone() example 7:\n")
test_tone="Hi Team, The times are difficult! Our sales have been disappointing for the past three quarters for our data analytics product suite. We have a competitive data analytics product suite in the industry. However, we are not doing a good job at selling it, and this is really frustrating.We are missing critical sales opportunities. We cannot blame the economy for our lack of execution. Our clients need analytical tools to change their current business outcomes. In fact, it is in times such as this, our clients want to get the insights they need to turn their businesses around. It is disheartening to see that we are failing at closing deals, in such a hungry market. Let's buckle up and execute.Jennifer BakerSales Leader, North-East region"
tone_input = ToneInput(test_tone)
result = service.tone(tone_input=tone_input, content_type="application/json").get_result()
# print(type(json.dumps(tone, indent=2)))
pprint(result) | 41.863158 | 755 | 0.681167 |
db8707b6679e39765f15056eb4cf61c517a7c762 | 9,435 | py | Python | hcloud/servers/domain.py | usmannasir/hcloud-python | 2a90551fb1c4d9d8a6aea5d8b6601a7c1360494d | [
"MIT"
] | 1 | 2019-10-23T01:00:08.000Z | 2019-10-23T01:00:08.000Z | hcloud/servers/domain.py | usmannasir/hcloud-python | 2a90551fb1c4d9d8a6aea5d8b6601a7c1360494d | [
"MIT"
] | null | null | null | hcloud/servers/domain.py | usmannasir/hcloud-python | 2a90551fb1c4d9d8a6aea5d8b6601a7c1360494d | [
"MIT"
] | 1 | 2019-06-19T17:53:10.000Z | 2019-06-19T17:53:10.000Z | # -*- coding: utf-8 -*-
from hcloud.core.domain import BaseDomain
from hcloud.helpers.descriptors import ISODateTime
| 30.337621 | 147 | 0.598728 |
db874da91d4a01e76e9bd18e99b073b83ddddd62 | 6,050 | py | Python | AutomationFramework/tests/interfaces/test_if_subif.py | sbarguil/Testing-framework | f3ef69f1c4f0aeafd02e222d846162c711783b15 | [
"Apache-2.0"
] | 1 | 2020-04-23T15:22:16.000Z | 2020-04-23T15:22:16.000Z | AutomationFramework/tests/interfaces/test_if_subif.py | sbarguil/Testing-framework | f3ef69f1c4f0aeafd02e222d846162c711783b15 | [
"Apache-2.0"
] | 44 | 2020-08-13T19:35:41.000Z | 2021-03-01T09:08:00.000Z | AutomationFramework/tests/interfaces/test_if_subif.py | sbarguil/Testing-framework | f3ef69f1c4f0aeafd02e222d846162c711783b15 | [
"Apache-2.0"
] | 6 | 2020-04-23T15:29:38.000Z | 2022-03-03T14:23:38.000Z | import pytest
from AutomationFramework.page_objects.interfaces.interfaces import Interfaces
from AutomationFramework.tests.base_test import BaseTest
| 72.02381 | 120 | 0.615372 |
db8779ff5f2f1e236cb5f3cfe96c63ab0de64f28 | 5,766 | py | Python | keystone/common/sql/migrate_repo/versions/001_add_initial_tables.py | sanket4373/keystone | 7cf7e7497729803f0470167315af9349b88fe0ec | [
"Apache-2.0"
] | null | null | null | keystone/common/sql/migrate_repo/versions/001_add_initial_tables.py | sanket4373/keystone | 7cf7e7497729803f0470167315af9349b88fe0ec | [
"Apache-2.0"
] | null | null | null | keystone/common/sql/migrate_repo/versions/001_add_initial_tables.py | sanket4373/keystone | 7cf7e7497729803f0470167315af9349b88fe0ec | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
| 36.961538 | 79 | 0.620534 |
db885085ce16df342f9eaff7d4d323eb7dc1a85c | 15,984 | py | Python | boa3_test/examples/ico.py | DanPopa46/neo3-boa | e4ef340744b5bd25ade26f847eac50789b97f3e9 | [
"Apache-2.0"
] | null | null | null | boa3_test/examples/ico.py | DanPopa46/neo3-boa | e4ef340744b5bd25ade26f847eac50789b97f3e9 | [
"Apache-2.0"
] | null | null | null | boa3_test/examples/ico.py | DanPopa46/neo3-boa | e4ef340744b5bd25ade26f847eac50789b97f3e9 | [
"Apache-2.0"
] | null | null | null | from typing import Any, List, Union
from boa3.builtin import NeoMetadata, metadata, public
from boa3.builtin.contract import Nep17TransferEvent
from boa3.builtin.interop.blockchain import get_contract
from boa3.builtin.interop.contract import GAS, NEO, call_contract
from boa3.builtin.interop.runtime import calling_script_hash, check_witness
from boa3.builtin.interop.storage import delete, get, put
from boa3.builtin.type import UInt160
# -------------------------------------------
# METADATA
# -------------------------------------------
# -------------------------------------------
# Storage Key Prefixes
# -------------------------------------------
KYC_WHITELIST_PREFIX = b'KYCWhitelistApproved'
TOKEN_TOTAL_SUPPLY_PREFIX = b'TokenTotalSupply'
TRANSFER_ALLOWANCE_PREFIX = b'TransferAllowancePrefix_'
# -------------------------------------------
# TOKEN SETTINGS
# -------------------------------------------
# Script hash of the contract owner
TOKEN_OWNER = UInt160()
# Symbol of the Token
TOKEN_SYMBOL = 'ICO'
# Number of decimal places
TOKEN_DECIMALS = 8
# Initial Supply of tokens in the system
TOKEN_INITIAL_SUPPLY = 10_000_000 * 100_000_000 # 10m total supply * 10^8 (decimals)
# -------------------------------------------
# Events
# -------------------------------------------
on_transfer = Nep17TransferEvent
# -------------------------------------------
# Methods
# -------------------------------------------
def is_administrator() -> bool:
"""
Validates if the invoker has administrative rights
:return: whether the contract's invoker is an administrator
"""
return check_witness(TOKEN_OWNER)
def is_valid_address(address: UInt160) -> bool:
"""
Validates if the address passed through the kyc.
:return: whether the given address is validated by kyc
"""
return get(KYC_WHITELIST_PREFIX + address).to_int() > 0
# -------------------------------------------
# Public methods from NEP5.1
# -------------------------------------------
def post_transfer(from_address: Union[UInt160, None], to_address: Union[UInt160, None], amount: int, data: Any):
"""
Checks if the one receiving NEP17 tokens is a smart contract and if it's one the onPayment method will be called
:param from_address: the address of the sender
:type from_address: UInt160
:param to_address: the address of the receiver
:type to_address: UInt160
:param amount: the amount of cryptocurrency that is being sent
:type amount: int
:param data: any pertinent data that might validate the transaction
:type data: Any
"""
if not isinstance(to_address, None): # TODO: change to 'is not None' when `is` semantic is implemented
contract = get_contract(to_address)
if not isinstance(contract, None): # TODO: change to 'is not None' when `is` semantic is implemented
call_contract(to_address, 'onPayment', [from_address, amount, data])
# -------------------------------------------
# Public methods from KYC
# -------------------------------------------
| 33.509434 | 118 | 0.673486 |
db8859ce66203d2b7d494162105376778915c59d | 20,640 | py | Python | emotion_recognition.py | Partaourides/SERN | e6cc0a9a0cc3ac4b9a87e3ccdf5781792f85d718 | [
"MIT"
] | 10 | 2019-05-07T02:20:02.000Z | 2020-10-09T02:20:31.000Z | emotion_recognition.py | Partaourides/SERN | e6cc0a9a0cc3ac4b9a87e3ccdf5781792f85d718 | [
"MIT"
] | 2 | 2020-06-27T13:09:03.000Z | 2021-07-28T04:55:38.000Z | emotion_recognition.py | Partaourides/SERN | e6cc0a9a0cc3ac4b9a87e3ccdf5781792f85d718 | [
"MIT"
] | 1 | 2019-07-18T00:28:13.000Z | 2019-07-18T00:28:13.000Z | import os
# Restrict the script to run on CPU
os.environ ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# Import Keras Tensoflow Backend
# from keras import backend as K
import tensorflow as tf
# Configure it to use only specific CPU Cores
config = tf.ConfigProto(intra_op_parallelism_threads=4,
inter_op_parallelism_threads=4,
device_count={"CPU": 1, "GPU": 0},
allow_soft_placement=True)
# import tensorflow as tf
import numpy as np
from IEOMAP_dataset_AC import dataset, IeomapSentenceIterator
from sklearn.metrics import confusion_matrix
from models_AC import SentenceModel
import json
import os
| 58.971429 | 128 | 0.463275 |
db889090b0a80e5b1926c1a844e99f3562167374 | 1,779 | py | Python | dashboard/rpc/alias.py | flaree/Toxic-Cogs | e33c3fe3a81c86ef3c89928b0a977fae13b916a9 | [
"MIT"
] | null | null | null | dashboard/rpc/alias.py | flaree/Toxic-Cogs | e33c3fe3a81c86ef3c89928b0a977fae13b916a9 | [
"MIT"
] | null | null | null | dashboard/rpc/alias.py | flaree/Toxic-Cogs | e33c3fe3a81c86ef3c89928b0a977fae13b916a9 | [
"MIT"
] | null | null | null | import discord
from redbot.core.bot import Red
from redbot.core.commands import commands
from redbot.core.utils.chat_formatting import humanize_list
from .utils import permcheck, rpccheck
| 32.345455 | 81 | 0.540191 |
db88f0e02537c3b3ec61c4fbd738d9a4605bd04a | 6,939 | py | Python | train.py | hafezgh/music_classification | 68fa398b7d4455475d07ae17c3b6b94459a96ac7 | [
"MIT"
] | 1 | 2021-07-15T18:47:02.000Z | 2021-07-15T18:47:02.000Z | train.py | hafezgh/music_classification | 68fa398b7d4455475d07ae17c3b6b94459a96ac7 | [
"MIT"
] | null | null | null | train.py | hafezgh/music_classification | 68fa398b7d4455475d07ae17c3b6b94459a96ac7 | [
"MIT"
] | null | null | null | import torch
DEVICE = 'cuda'
import math
import torch.optim as optim
from model import *
import os
import copy, gzip, pickle, time
data_dir = './drive/MyDrive/music_classification/Data'
classes = os.listdir(data_dir+'/images_original')
| 40.343023 | 118 | 0.607292 |
db8a89f5042414f5dbf4f47067a5e2131c5f76b8 | 1,881 | py | Python | dlk/core/schedulers/__init__.py | cstsunfu/dlkit | 69e0efd372fa5c0ae5313124d0ba1ef55b535196 | [
"Apache-2.0"
] | null | null | null | dlk/core/schedulers/__init__.py | cstsunfu/dlkit | 69e0efd372fa5c0ae5313124d0ba1ef55b535196 | [
"Apache-2.0"
] | null | null | null | dlk/core/schedulers/__init__.py | cstsunfu/dlkit | 69e0efd372fa5c0ae5313124d0ba1ef55b535196 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 cstsunfu. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""schedulers"""
import importlib
import os
from dlk.utils.register import Register
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
import math
scheduler_config_register = Register("Schedule config register.")
scheduler_register = Register("Schedule register.")
# automatically import any Python files in the schedulers directory
schedulers_dir = os.path.dirname(__file__)
import_schedulers(schedulers_dir, "dlk.core.schedulers")
| 30.836066 | 87 | 0.701223 |
db8c048cea31b2b7400108b7a16a198179252811 | 24,553 | py | Python | projectq/backends/_qracksim/_simulator_test.py | vm6502q/ProjectQ | 1eac4b1f529551dfc1668443eba0c68dee54120b | [
"Apache-2.0"
] | 1 | 2019-08-29T19:04:27.000Z | 2019-08-29T19:04:27.000Z | projectq/backends/_qracksim/_simulator_test.py | vm6502q/ProjectQ | 1eac4b1f529551dfc1668443eba0c68dee54120b | [
"Apache-2.0"
] | 6 | 2019-01-27T17:05:25.000Z | 2020-02-24T00:15:59.000Z | projectq/backends/_qracksim/_simulator_test.py | vm6502q/ProjectQ | 1eac4b1f529551dfc1668443eba0c68dee54120b | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for projectq.backends._sim._simulator.py, using both the Python
and the C++ simulator as backends.
"""
import copy
import math
import cmath
import numpy
import pytest
import random
import scipy
import scipy.sparse
import scipy.sparse.linalg
from projectq import MainEngine
from projectq.cengines import (BasicEngine, BasicMapperEngine, DummyEngine,
LocalOptimizer, NotYetMeasuredError)
from projectq.ops import (All, Allocate, BasicGate, BasicMathGate, CNOT, C,
Command, H, Measure, QubitOperator, Rx, Ry, Rz, S,
TimeEvolution, Toffoli, X, Y, Z, Swap, SqrtSwap,
UniformlyControlledRy, UniformlyControlledRz)
from projectq.libs.math import (AddConstant,
AddConstantModN,
SubConstant,
SubConstantModN,
MultiplyByConstantModN)
from projectq.meta import Compute, Uncompute, Control, Dagger, LogicalQubitIDTag
from projectq.types import WeakQubitRef
from projectq.backends import Simulator
tolerance = 1e-6
class Mock1QubitGate(BasicGate):
def test_simulator_is_available(sim):
backend = DummyEngine(save_commands=True)
eng = MainEngine(backend, [])
qubit = eng.allocate_qubit()
Measure | qubit
qubit[0].__del__()
assert len(backend.received_commands) == 3
# Test that allocate, measure, basic math, and deallocate are available.
for cmd in backend.received_commands:
assert sim.is_available(cmd)
new_cmd = backend.received_commands[-1]
new_cmd.gate = Mock6QubitGate()
assert not sim.is_available(new_cmd)
new_cmd.gate = MockNoMatrixGate()
assert not sim.is_available(new_cmd)
new_cmd.gate = Mock1QubitGate()
assert sim.is_available(new_cmd)
new_cmd = backend.received_commands[-2]
assert len(new_cmd.qubits) == 1
new_cmd.gate = AddConstantModN(1, 2)
assert sim.is_available(new_cmd)
new_cmd.gate = MultiplyByConstantModN(1, 2)
assert sim.is_available(new_cmd)
#new_cmd.gate = DivideByConstantModN(1, 2)
#assert sim.is_available(new_cmd)
def test_simulator_cheat(sim):
# cheat function should return a tuple
assert isinstance(sim.cheat(), tuple)
# first entry is the qubit mapping.
# should be empty:
assert len(sim.cheat()[0]) == 0
# state vector should only have 1 entry:
assert len(sim.cheat()[1]) == 1
eng = MainEngine(sim, [])
qubit = eng.allocate_qubit()
# one qubit has been allocated
assert len(sim.cheat()[0]) == 1
assert sim.cheat()[0][0] == 0
assert len(sim.cheat()[1]) == 2
assert 1. == pytest.approx(abs(sim.cheat()[1][0]))
qubit[0].__del__()
# should be empty:
assert len(sim.cheat()[0]) == 0
# state vector should only have 1 entry:
assert len(sim.cheat()[1]) == 1
def test_simulator_functional_measurement(sim):
eng = MainEngine(sim, [])
qubits = eng.allocate_qureg(5)
# entangle all qubits:
H | qubits[0]
for qb in qubits[1:]:
CNOT | (qubits[0], qb)
All(Measure) | qubits
bit_value_sum = sum([int(qubit) for qubit in qubits])
assert bit_value_sum == 0 or bit_value_sum == 5
def test_simulator_measure_mapped_qubit(sim):
eng = MainEngine(sim, [])
qb1 = WeakQubitRef(engine=eng, idx=1)
qb2 = WeakQubitRef(engine=eng, idx=2)
cmd0 = Command(engine=eng, gate=Allocate, qubits=([qb1],))
cmd1 = Command(engine=eng, gate=X, qubits=([qb1],))
cmd2 = Command(engine=eng, gate=Measure, qubits=([qb1],), controls=[],
tags=[LogicalQubitIDTag(2)])
with pytest.raises(NotYetMeasuredError):
int(qb1)
with pytest.raises(NotYetMeasuredError):
int(qb2)
eng.send([cmd0, cmd1, cmd2])
eng.flush()
with pytest.raises(NotYetMeasuredError):
int(qb1)
assert int(qb2) == 1
def test_simulator_kqubit_exception(sim):
m1 = Rx(0.3).matrix
m2 = Rx(0.8).matrix
m3 = Ry(0.1).matrix
m4 = Rz(0.9).matrix.dot(Ry(-0.1).matrix)
m = numpy.kron(m4, numpy.kron(m3, numpy.kron(m2, m1)))
eng = MainEngine(sim, [])
qureg = eng.allocate_qureg(3)
with pytest.raises(Exception):
KQubitGate() | qureg
with pytest.raises(Exception):
H | qureg
def test_simulator_swap(sim):
eng = MainEngine(sim, [])
qubits1 = eng.allocate_qureg(1)
qubits2 = eng.allocate_qureg(1)
X | qubits1
Swap | (qubits1, qubits2)
All(Measure) | qubits1
All(Measure) | qubits2
assert (int(qubits1[0]) == 0) and (int(qubits2[0]) == 1)
SqrtSwap | (qubits1, qubits2)
SqrtSwap | (qubits1, qubits2)
All(Measure) | qubits1
All(Measure) | qubits2
assert (int(qubits1[0]) == 1) and (int(qubits2[0]) == 0)
def test_simulator_math(sim):
eng = MainEngine(sim, [])
qubits = eng.allocate_qureg(8)
AddConstant(1) | qubits;
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 1
AddConstantModN(10, 256) | qubits;
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 11
controls = eng.allocate_qureg(1)
# Control is off
C(AddConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 11
# Turn control on
X | controls
C(AddConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 21
SubConstant(5) | qubits;
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 16
C(SubConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 6
# Turn control off
X | controls
C(SubConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 6
MultiplyByConstantModN(2, 256) | qubits;
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 12
# Control is off
C(MultiplyByConstantModN(2, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 12
# Turn control on
X | controls
C(MultiplyByConstantModN(10, 256)) | (controls, qubits)
All(Measure) | qubits
value = 0
for i in range(len(qubits)):
value += int(qubits[i]) << i
assert value == 120
def test_simulator_probability(sim, mapper):
engine_list = [LocalOptimizer()]
if mapper is not None:
engine_list.append(mapper)
eng = MainEngine(sim, engine_list=engine_list)
qubits = eng.allocate_qureg(6)
All(H) | qubits
eng.flush()
bits = [0, 0, 1, 0, 1, 0]
for i in range(6):
assert (eng.backend.get_probability(bits[:i], qubits[:i]) ==
pytest.approx(0.5**i))
extra_qubit = eng.allocate_qubit()
with pytest.raises(RuntimeError):
eng.backend.get_probability([0], extra_qubit)
del extra_qubit
All(H) | qubits
Ry(2 * math.acos(math.sqrt(0.3))) | qubits[0]
eng.flush()
assert eng.backend.get_probability([0], [qubits[0]]) == pytest.approx(0.3)
Ry(2 * math.acos(math.sqrt(0.4))) | qubits[2]
eng.flush()
assert eng.backend.get_probability([0], [qubits[2]]) == pytest.approx(0.4)
assert (numpy.isclose(0.12, eng.backend.get_probability([0, 0], qubits[:3:2]), rtol=tolerance, atol=tolerance))
assert (numpy.isclose(0.18, eng.backend.get_probability([0, 1], qubits[:3:2]), rtol=tolerance, atol=tolerance))
assert (numpy.isclose(0.28, eng.backend.get_probability([1, 0], qubits[:3:2]), rtol=tolerance, atol=tolerance))
All(Measure) | qubits
def test_simulator_amplitude(sim, mapper):
engine_list = [LocalOptimizer()]
if mapper is not None:
engine_list.append(mapper)
eng = MainEngine(sim, engine_list=engine_list)
qubits = eng.allocate_qureg(6)
All(X) | qubits
All(H) | qubits
eng.flush()
bits = [0, 0, 1, 0, 1, 0]
polR, polPhi = cmath.polar(eng.backend.get_amplitude(bits, qubits))
while polPhi < 0:
polPhi += 2 * math.pi
assert polR == pytest.approx(1. / 8.)
bits = [0, 0, 0, 0, 1, 0]
polR2, polPhi2 = cmath.polar(eng.backend.get_amplitude(bits, qubits))
while polPhi2 < math.pi:
polPhi2 += 2 * math.pi
assert polR2 == pytest.approx(polR)
assert (polPhi2 - math.pi) == pytest.approx(polPhi)
bits = [0, 1, 1, 0, 1, 0]
polR3, polPhi3 = cmath.polar(eng.backend.get_amplitude(bits, qubits))
while polPhi3 < math.pi:
polPhi3 += 2 * math.pi
assert polR3 == pytest.approx(polR)
assert (polPhi3 - math.pi) == pytest.approx(polPhi)
All(H) | qubits
All(X) | qubits
Ry(2 * math.acos(0.3)) | qubits[0]
eng.flush()
bits = [0] * 6
polR, polPhi = cmath.polar(eng.backend.get_amplitude(bits, qubits))
assert polR == pytest.approx(0.3)
bits[0] = 1
polR, polPhi = cmath.polar(eng.backend.get_amplitude(bits, qubits))
assert (polR ==
pytest.approx(math.sqrt(0.91)))
All(Measure) | qubits
# raises if not all qubits are in the list:
with pytest.raises(RuntimeError):
eng.backend.get_amplitude(bits, qubits[:-1])
# doesn't just check for length:
with pytest.raises(RuntimeError):
eng.backend.get_amplitude(bits, qubits[:-1] + [qubits[0]])
extra_qubit = eng.allocate_qubit()
eng.flush()
# there is a new qubit now!
with pytest.raises(RuntimeError):
eng.backend.get_amplitude(bits, qubits)
def test_simulator_set_wavefunction(sim, mapper):
engine_list = [LocalOptimizer()]
if mapper is not None:
engine_list.append(mapper)
eng = MainEngine(sim, engine_list=engine_list)
qubits = eng.allocate_qureg(2)
wf = [0., 0., math.sqrt(0.2), math.sqrt(0.8)]
with pytest.raises(RuntimeError):
eng.backend.set_wavefunction(wf, qubits)
eng.flush()
eng.backend.set_wavefunction(wf, qubits)
assert pytest.approx(eng.backend.get_probability('1', [qubits[0]])) == .8
assert pytest.approx(eng.backend.get_probability('01', qubits)) == .2
assert pytest.approx(eng.backend.get_probability('1', [qubits[1]])) == 1.
All(Measure) | qubits
def test_simulator_set_wavefunction_always_complex(sim):
""" Checks that wavefunction is always complex """
eng = MainEngine(sim)
qubit = eng.allocate_qubit()
eng.flush()
wf = [1., 0]
eng.backend.set_wavefunction(wf, qubit)
Y | qubit
eng.flush()
amplitude = eng.backend.get_amplitude('1', qubit)
assert amplitude == pytest.approx(1j) or amplitude == pytest.approx(-1j)
def test_simulator_collapse_wavefunction(sim, mapper):
engine_list = [LocalOptimizer()]
if mapper is not None:
engine_list.append(mapper)
eng = MainEngine(sim, engine_list=engine_list)
qubits = eng.allocate_qureg(4)
# unknown qubits: raises
with pytest.raises(RuntimeError):
eng.backend.collapse_wavefunction(qubits, [0] * 4)
eng.flush()
eng.backend.collapse_wavefunction(qubits, [0] * 4)
assert pytest.approx(eng.backend.get_probability([0] * 4, qubits)) == 1.
All(H) | qubits[1:]
eng.flush()
assert pytest.approx(eng.backend.get_probability([0] * 4, qubits)) == .125
# impossible outcome: raises
with pytest.raises(RuntimeError):
eng.backend.collapse_wavefunction(qubits, [1] + [0] * 3)
eng.backend.collapse_wavefunction(qubits[:-1], [0, 1, 0])
probability = eng.backend.get_probability([0, 1, 0, 1], qubits)
assert probability == pytest.approx(.5)
eng.backend.set_wavefunction([1.] + [0.] * 15, qubits)
H | qubits[0]
CNOT | (qubits[0], qubits[1])
eng.flush()
eng.backend.collapse_wavefunction([qubits[0]], [1])
probability = eng.backend.get_probability([1, 1], qubits[0:2])
assert probability == pytest.approx(1.)
def test_simulator_no_uncompute_exception(sim):
eng = MainEngine(sim, [])
qubit = eng.allocate_qubit()
H | qubit
with pytest.raises(RuntimeError):
qubit[0].__del__()
# If you wanted to keep using the qubit, you shouldn't have deleted it.
assert qubit[0].id == -1
def test_simulator_functional_entangle(sim):
eng = MainEngine(sim, [])
qubits = eng.allocate_qureg(5)
# entangle all qubits:
H | qubits[0]
for qb in qubits[1:]:
CNOT | (qubits[0], qb)
# check the state vector:
assert .5 == pytest.approx(abs(sim.cheat()[1][0])**2, rel=tolerance, abs=tolerance)
assert .5 == pytest.approx(abs(sim.cheat()[1][31])**2, rel=tolerance, abs=tolerance)
for i in range(1, 31):
assert 0. == pytest.approx(abs(sim.cheat()[1][i]), rel=tolerance, abs=tolerance)
# unentangle all except the first 2
for qb in qubits[2:]:
CNOT | (qubits[0], qb)
# entangle using Toffolis
for qb in qubits[2:]:
Toffoli | (qubits[0], qubits[1], qb)
# check the state vector:
assert .5 == pytest.approx(abs(sim.cheat()[1][0])**2, rel=tolerance, abs=tolerance)
assert .5 == pytest.approx(abs(sim.cheat()[1][31])**2, rel=tolerance, abs=tolerance)
for i in range(1, 31):
assert 0. == pytest.approx(abs(sim.cheat()[1][i]), rel=tolerance, abs=tolerance)
# uncompute using multi-controlled NOTs
with Control(eng, qubits[0:-1]):
X | qubits[-1]
with Control(eng, qubits[0:-2]):
X | qubits[-2]
with Control(eng, qubits[0:-3]):
X | qubits[-3]
CNOT | (qubits[0], qubits[1])
H | qubits[0]
# check the state vector:
assert 1. == pytest.approx(abs(sim.cheat()[1][0])**2, rel=tolerance, abs=tolerance)
for i in range(1, 32):
assert 0. == pytest.approx(abs(sim.cheat()[1][i]), rel=tolerance, abs=tolerance)
All(Measure) | qubits
def test_simulator_convert_logical_to_mapped_qubits(sim):
mapper = BasicMapperEngine()
mapper.receive = receive
eng = MainEngine(sim, [mapper])
qubit0 = eng.allocate_qubit()
qubit1 = eng.allocate_qubit()
mapper.current_mapping = {qubit0[0].id: qubit1[0].id,
qubit1[0].id: qubit0[0].id}
assert (sim._convert_logical_to_mapped_qureg(qubit0 + qubit1) ==
qubit1 + qubit0)
def slow_implementation(angles, control_qubits, target_qubit, eng, gate_class):
"""
Assumption is that control_qubits[0] is lowest order bit
We apply angles[0] to state |0>
"""
assert len(angles) == 2**len(control_qubits)
for index in range(2**len(control_qubits)):
with Compute(eng):
for bit_pos in range(len(control_qubits)):
if not (index >> bit_pos) & 1:
X | control_qubits[bit_pos]
with Control(eng, control_qubits):
gate_class(angles[index]) | target_qubit
Uncompute(eng)
| 33.680384 | 115 | 0.629455 |
db8d61593765031987787c7a317fdb992cec34a2 | 779 | py | Python | app/deps.py | jshwi/jss | b9f29d47c63cd57d0efc1abec37152e97a92049f | [
"MIT"
] | 1 | 2021-11-07T14:50:00.000Z | 2021-11-07T14:50:00.000Z | app/deps.py | jshwi/jss | b9f29d47c63cd57d0efc1abec37152e97a92049f | [
"MIT"
] | 75 | 2021-09-30T03:33:57.000Z | 2022-03-29T08:42:07.000Z | app/deps.py | jshwi/jss | b9f29d47c63cd57d0efc1abec37152e97a92049f | [
"MIT"
] | null | null | null | """
app.deps
========
Register dependencies that are not part of a ``Flask`` extension.
"""
from flask import Flask
from redis import Redis
from rq import Queue
def init_app(app: Flask) -> None:
"""Register application helpers that are not ``Flask-`` extensions.
As these are not ``Flask`` extensions they do not have an
``init_app`` method, and so can be attached to the app by declaring
them as instance attributes.
.. todo:: These are not declared in ``__init__`` and are a bit of a
code-smell. Using ``flask.g`` may be more appropriate...
:param app: Application factory object.
"""
app.redis = Redis.from_url(app.config["REDIS_URL"]) # type: ignore
app.task_queue = Queue("jss-tasks", connection=app.redis) # type: ignore
| 29.961538 | 77 | 0.677792 |
db8f2e8178561d3e8ad8161722df05fc9f1febff | 55 | py | Python | uncertainty/util/__init__.py | sangdon/intern2020_cocal | 2f434b76fbf3426c6685fb92c5bbc2d32fcba7ba | [
"Apache-2.0"
] | null | null | null | uncertainty/util/__init__.py | sangdon/intern2020_cocal | 2f434b76fbf3426c6685fb92c5bbc2d32fcba7ba | [
"Apache-2.0"
] | 4 | 2020-09-02T04:20:06.000Z | 2022-02-10T02:13:35.000Z | uncertainty/util/__init__.py | sangdon/intern2020_cocal | 2f434b76fbf3426c6685fb92c5bbc2d32fcba7ba | [
"Apache-2.0"
] | 1 | 2020-08-31T16:17:28.000Z | 2020-08-31T16:17:28.000Z | from util.args import *
from util.logger import Logger
| 18.333333 | 30 | 0.8 |
db8f8bf38af010e37a76dcb939676a34f09f75d2 | 1,693 | py | Python | com_reader.py | plusterm/plusterm | 45e9382accdaae7d51c65cab77e571bc6d264936 | [
"MIT"
] | 2 | 2018-01-10T16:20:45.000Z | 2018-01-16T12:04:13.000Z | com_reader.py | plusterm/plusterm | 45e9382accdaae7d51c65cab77e571bc6d264936 | [
"MIT"
] | 14 | 2018-01-10T12:56:43.000Z | 2018-05-11T16:28:31.000Z | com_reader.py | plusterm/plusterm | 45e9382accdaae7d51c65cab77e571bc6d264936 | [
"MIT"
] | null | null | null | # from wx.lib.pubsub import pub
from pubsub import pub
import serial
import threading
import queue
import time
| 29.701754 | 74 | 0.517425 |
db8ff673815400dfc9c26d89afa7b79ffbf19f2f | 1,032 | py | Python | docker/app/app.py | ganeshkumarsv/datadog-cloudfoundry-buildpack | 7c622dfc7990da83e5dfa4f474878a642fd40fd3 | [
"Apache-2.0"
] | 5 | 2018-04-19T18:33:06.000Z | 2021-05-13T03:19:31.000Z | docker/app/app.py | ganeshkumarsv/datadog-cloudfoundry-buildpack | 7c622dfc7990da83e5dfa4f474878a642fd40fd3 | [
"Apache-2.0"
] | 24 | 2018-05-04T13:42:24.000Z | 2021-12-13T12:18:53.000Z | docker/app/app.py | ganeshkumarsv/datadog-cloudfoundry-buildpack | 7c622dfc7990da83e5dfa4f474878a642fd40fd3 | [
"Apache-2.0"
] | 14 | 2018-05-04T13:29:34.000Z | 2022-02-22T17:41:20.000Z | from flask import Flask
from datadog import statsd
import logging
import os
# This is a small example application
# It uses tracing and dogstatsd on a sample flask application
log = logging.getLogger("app")
app = Flask(__name__)
# The app has two routes, a basic endpoint and an exception endpoint
# This is meant to be run directly, instead of executed through flask run
if __name__ == '__main__':
# It grabs the host and port from the environment
port = 5001
host = '0.0.0.0'
if os.environ.get('HOST'):
host = os.environ.get('HOST')
if os.environ.get('PORT'):
port = os.environ.get('PORT')
app.run(debug=True, host=host, port=port)
| 27.891892 | 83 | 0.670543 |
db9205da95b6b253dd5132561aaf107f9a429836 | 1,135 | py | Python | Data Structure using Python/Linked_List/2linked_list1.py | shubhamsah/OpenEDU | a4c68d05f67e7ce6d2305f4ca1567b8f4e95b835 | [
"MIT"
] | 1 | 2020-05-29T05:19:37.000Z | 2020-05-29T05:19:37.000Z | Data Structure using Python/Linked_List/2linked_list1.py | Mithilesh1609/OpenEDU | 85fa1f29285ab1e079e93f6ede3b0d5196ed9cd9 | [
"MIT"
] | null | null | null | Data Structure using Python/Linked_List/2linked_list1.py | Mithilesh1609/OpenEDU | 85fa1f29285ab1e079e93f6ede3b0d5196ed9cd9 | [
"MIT"
] | 1 | 2020-05-09T07:09:11.000Z | 2020-05-09T07:09:11.000Z | # Lets create a linked list that has the following elements
'''
1. FE
2. SE
3. TE
4. BE
'''
# Creating a Node class to create individual Nodes
number_list= LinkedList()
number_list.add("FE")
number_list.add("SE")
number_list.add("TE")
number_list.add("BE")
| 19.237288 | 61 | 0.572687 |
db92111d1426d48f852fa3b382344c31b99bb952 | 2,446 | py | Python | monai/networks/blocks/selfattention.py | function2-llx/MONAI | 4cddaa830b61b88ec78e089bb5f21e05bb1a78f4 | [
"Apache-2.0"
] | 1 | 2022-03-16T01:18:43.000Z | 2022-03-16T01:18:43.000Z | monai/networks/blocks/selfattention.py | function2-llx/MONAI | 4cddaa830b61b88ec78e089bb5f21e05bb1a78f4 | [
"Apache-2.0"
] | null | null | null | monai/networks/blocks/selfattention.py | function2-llx/MONAI | 4cddaa830b61b88ec78e089bb5f21e05bb1a78f4 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from monai.utils import optional_import
Rearrange, _ = optional_import("einops.layers.torch", name="Rearrange")
| 38.825397 | 114 | 0.656582 |
db933a7c4e56e24f7c3bf21ad73b25c489317eb1 | 1,642 | py | Python | api/tests/opentrons/commands/test_protocol_commands.py | mrakitin/opentrons | d9c7ed23d13cdb62bd1bc397dc2871d4bd5b77e9 | [
"Apache-2.0"
] | null | null | null | api/tests/opentrons/commands/test_protocol_commands.py | mrakitin/opentrons | d9c7ed23d13cdb62bd1bc397dc2871d4bd5b77e9 | [
"Apache-2.0"
] | null | null | null | api/tests/opentrons/commands/test_protocol_commands.py | mrakitin/opentrons | d9c7ed23d13cdb62bd1bc397dc2871d4bd5b77e9 | [
"Apache-2.0"
] | null | null | null | import pytest
from opentrons.commands import protocol_commands
def test_delay_with_message():
"""It should allow a message to be appended to the delay text."""
command = protocol_commands.delay(seconds=1, minutes=1, msg="Waiting...")
assert command["payload"]["text"] == (
"Delaying for 1 minutes and 1.0 seconds. Waiting..."
)
| 35.695652 | 77 | 0.596224 |
db9557d7a7cbb9a18b934e17eeb9d696dbc28b20 | 1,467 | py | Python | tests/test_histogram_source.py | ess-dmsc/just-bin-it | 8fcd03337a8a88087f25c510c589d482bdd9e4ad | [
"BSD-2-Clause"
] | null | null | null | tests/test_histogram_source.py | ess-dmsc/just-bin-it | 8fcd03337a8a88087f25c510c589d482bdd9e4ad | [
"BSD-2-Clause"
] | 23 | 2018-12-04T11:50:37.000Z | 2022-03-17T11:30:39.000Z | tests/test_histogram_source.py | ess-dmsc/just-bin-it | 8fcd03337a8a88087f25c510c589d482bdd9e4ad | [
"BSD-2-Clause"
] | 2 | 2019-07-24T11:13:41.000Z | 2020-08-04T18:33:22.000Z | from unittest.mock import patch
import pytest
from just_bin_it.endpoints.sources import HistogramSource
from tests.doubles.consumer import StubConsumer
TEST_MESSAGE = b"this is a byte message"
INVALID_FB = b"this is an invalid fb message"
| 30.5625 | 87 | 0.69666 |
db95ca16068801b73d6de76c353700c64c6cc5f8 | 3,558 | py | Python | lctools/shortcuts.py | novel/lc-tools | 1b9032357e2e87aebd76d87664077caa5747c220 | [
"Apache-2.0"
] | 5 | 2015-03-24T11:04:18.000Z | 2021-07-11T00:06:44.000Z | lctools/shortcuts.py | novel/lc-tools | 1b9032357e2e87aebd76d87664077caa5747c220 | [
"Apache-2.0"
] | null | null | null | lctools/shortcuts.py | novel/lc-tools | 1b9032357e2e87aebd76d87664077caa5747c220 | [
"Apache-2.0"
] | null | null | null | import getopt
import sys
from libcloud.compute.types import NodeState
from lc import get_lc
from printer import Printer
def lister_main(what, resource=None,
extension=False, supports_location=False, **kwargs):
"""Shortcut for main() routine for lister
tools, e.g. lc-SOMETHING-list
@param what: what we are listing, e.g. 'nodes'
@param extension: is it an extension of core libcloud functionality?
@param kwargs: additional arguments for the call
@type what: C{string}
@param supports_location: tells that objects we
listing could be filtered by location
@type supports_location: C{bool}
"""
list_method = "%slist_%s" % ({True: 'ex_', False: ''}[extension], what)
profile = "default"
format = location = None
options = "f:p:"
if supports_location:
options += "l:"
try:
opts, args = getopt.getopt(sys.argv[1:], options)
except getopt.GetoptError, err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
for o, a in opts:
if o == "-f":
format = a
if o == "-p":
profile = a
if o == "-l":
location = a
try:
conn = get_lc(profile, resource=resource)
list_kwargs = kwargs
if supports_location and location is not None:
nodelocation = filter(lambda loc: str(loc.id) == location,
conn.list_locations())[0]
list_kwargs["location"] = nodelocation
for node in getattr(conn, list_method)(**list_kwargs):
Printer.do(node, format)
except Exception, err:
sys.stderr.write("Error: %s\n" % str(err))
def save_image_main():
"""Shortcut for main() routine for provider
specific image save tools.
"""
profile = 'default'
name = node_id = None
try:
opts, args = getopt.getopt(sys.argv[1:], "i:n:p:")
except getopt.GetoptError, err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
for o, a in opts:
if o == "-i":
node_id = a
if o == "-n":
name = a
if o == "-p":
profile = a
if node_id is None or name is None:
usage(sys.argv[0])
sys.exit(1)
conn = get_lc(profile)
node = get_node_or_fail(conn, node_id, print_error_and_exit,
("Error: cannot find node with id '%s'." % node_id,))
Printer.do(conn.ex_save_image(node, name))
def get_node_or_fail(conn, node_id, coroutine=None, cargs=(), ckwargs={}):
"""Shortcut to get a single node by its id. In case when
such node could not be found, coroutine could be called
to handle such case. Typically coroutine will output an
error message and exit from application.
@param conn: libcloud connection handle
@param node_id: id of the node to search for
@param coroutine: a callable object to handle case
when node cannot be found
@param cargs: positional arguments for coroutine
@param kwargs: keyword arguments for coroutine
@return: node object if found, None otherwise"""
try:
node = [node for node in conn.list_nodes()
if str(node.id) == str(node_id)][0]
return node
except IndexError:
if callable(coroutine):
coroutine(*cargs, **ckwargs)
return None
| 28.693548 | 88 | 0.606239 |
db97ce46b14abaf409f42c8462a567f6cfb0edfc | 31,396 | py | Python | tests/test_flash_vl.py | andr1976/thermo | 42d10b3702373aacc88167d4046ea9af92abd570 | [
"MIT"
] | 380 | 2016-07-04T09:45:20.000Z | 2022-03-20T18:09:45.000Z | tests/test_flash_vl.py | andr1976/thermo | 42d10b3702373aacc88167d4046ea9af92abd570 | [
"MIT"
] | 104 | 2016-07-10T20:47:12.000Z | 2022-03-22T20:43:39.000Z | tests/test_flash_vl.py | andr1976/thermo | 42d10b3702373aacc88167d4046ea9af92abd570 | [
"MIT"
] | 96 | 2016-07-05T20:54:05.000Z | 2022-02-23T03:06:02.000Z | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2020, Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
import pytest
from fluids.core import C2K
import thermo
from chemicals.utils import *
from thermo import *
from fluids.numerics import *
from math import *
import json
import os
import numpy as np
| 79.483544 | 862 | 0.701968 |
db991c0b9d90667e802fd9ff394fd81d65368331 | 624 | py | Python | ex38.py | YunMeMeThaw/python_exercises | 151d5d3695d578059611ac09c94b3677442197d7 | [
"MIT"
] | null | null | null | ex38.py | YunMeMeThaw/python_exercises | 151d5d3695d578059611ac09c94b3677442197d7 | [
"MIT"
] | null | null | null | ex38.py | YunMeMeThaw/python_exercises | 151d5d3695d578059611ac09c94b3677442197d7 | [
"MIT"
] | null | null | null | ten_things = "Apples Oranges cows Telephone Light Sugar"
print ("Wait there are not 10 things in that list. Let's fix")
stuff = ten_things.split(' ')
more_stuff = {"Day", "Night", "Song", "Firebee",
"Corn", "Banana", "Girl", "Boy"}
while len(stuff) !=10:
next_one = more_stuff.pop()
print("Adding: ", next_one)
stuff.append(next_one)
print (f"There are {len(stuff)} items n ow.")
print ("There we go : ", stuff)
print ("Let's do some things with stuff.")
print (stuff[1])
print (stuff[-1]) # whoa! cool!
print (stuff.pop())
print (' '.join(stuff)) # what? cool !
print ('#'.join(stuff[3:5])) #super stealler!
| 27.130435 | 62 | 0.647436 |
db99d0c184b26e85aa45a341b38434f288a19023 | 700 | py | Python | var/spack/repos/builtin/packages/diffmark/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | var/spack/repos/builtin/packages/diffmark/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | var/spack/repos/builtin/packages/diffmark/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
| 30.434783 | 73 | 0.688571 |
db9a10e90482e634cd4e39a1baf5cb649420edce | 10,817 | py | Python | bbp/comps/irikura_gen_srf.py | ZhangHCFJEA/bbp | 33bd999cf8d719c49f9a904872c62f02eb5850d1 | [
"BSD-3-Clause"
] | 28 | 2017-10-31T09:16:30.000Z | 2022-02-28T23:44:29.000Z | bbp/comps/irikura_gen_srf.py | ZhangHCFJEA/bbp | 33bd999cf8d719c49f9a904872c62f02eb5850d1 | [
"BSD-3-Clause"
] | 37 | 2017-05-23T15:15:35.000Z | 2022-02-05T09:13:18.000Z | bbp/comps/irikura_gen_srf.py | ZhangHCFJEA/bbp | 33bd999cf8d719c49f9a904872c62f02eb5850d1 | [
"BSD-3-Clause"
] | 26 | 2017-09-21T17:43:33.000Z | 2021-11-29T06:34:30.000Z | #!/usr/bin/env python
"""
Copyright 2010-2019 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
import math
import shutil
# Import Broadband modules
import plot_srf
import bband_utils
from irikura_gen_srf_cfg import IrikuraGenSrfCfg
from install_cfg import InstallCfg
if __name__ == "__main__":
print("Testing Module: %s" % os.path.basename((sys.argv[0])))
ME = IrikuraGenSrf(sys.argv[1], sys.argv[2], sys.argv[3],
sys.argv[4], sim_id=int(sys.argv[5]))
ME.run()
| 41.764479 | 104 | 0.524637 |
db9c6841dad833eb81be4efbbef24d978326ad58 | 11,120 | py | Python | core/tests/test_models.py | EthanMarrs/digit2 | 207569a3b7a61282a2d0bd5f354a837ad81ef55d | [
"BSD-2-Clause"
] | null | null | null | core/tests/test_models.py | EthanMarrs/digit2 | 207569a3b7a61282a2d0bd5f354a837ad81ef55d | [
"BSD-2-Clause"
] | null | null | null | core/tests/test_models.py | EthanMarrs/digit2 | 207569a3b7a61282a2d0bd5f354a837ad81ef55d | [
"BSD-2-Clause"
] | null | null | null | """test_models.py: runs tests on the models for digit."""
import pytest
from core.models import (Grade,
Subject,
Question,
Comment,
Option,
Topic,
Block,
Syllabus,
StateException,
)
from django.test import TestCase
from django.contrib.auth.models import User
| 38.082192 | 95 | 0.629496 |