max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
scripts/processing/shared_processing_functions.py
|
andreasfloros/Project-EMILY
| 0
|
12783251
|
<reponame>andreasfloros/Project-EMILY
from imports import *
'''The following functions are used by both the streamlined and non-streamlined versions of the pipeline'''
def print_processing_selections(app):
# get input values from user, note that window_size and window_stride are now in samples
processing_method = (app.selected_processing_method.get()).lower()
expected_duration = float(app.expected_duration_scale.get())
sample_rate = int(app.selected_sample_rate.get())
window_size = int((2 ** float(app.window_size_spinbox.get())))
window_stride = int(sample_rate * float(app.window_stride_scale.get()))
# expected duration is in seconds
expected_num_samples_per_track = int(expected_duration * sample_rate)
print('\n')
print('|-------------------Processing Selections-------------------|')
print('| processing_method: {}'.format(processing_method))
print('| expected_duration: {}'.format(expected_duration))
print('| sample_rate: {}'.format(sample_rate))
print('| window_size: {}'.format(window_size))
print('| window_stride: {}'.format(window_stride))
print('| expected_num_samples_per_track: {}'.format(expected_num_samples_per_track))
print('|-----------------------------------------------------------|')
def make_track_correct_size(signal, expected_num_samples_per_track):
# if track is shorter than expected, append it with zeros
if len(signal) < expected_num_samples_per_track:
num_zeros_to_pad = expected_num_samples_per_track - len(signal)
zeros = num_zeros_to_pad * [0.]
extended_signal = np.append(signal, zeros)
return extended_signal
# if track is longer than expected, truncate it
elif len(signal) > expected_num_samples_per_track:
return signal[:expected_num_samples_per_track]
# else return the original track
else:
return signal
def fast_RMS(signal, window_size, window_stride):
# initializations / declarations
signal_len = len(signal)
RMS_len = int((signal_len - window_size) / window_stride + 1)
RMS = []
squared_signal = [el * el for el in signal]
window_gap = window_size - window_stride
if window_gap > 0:
old_squares_sum = 0
extra_squares_idx = window_stride - window_gap - 1
# initialize old_squares_sum and get the partial value for the first RMS
tmp = 0
for i in range(min(window_size, signal_len)):
if i < window_stride:
old_squares_sum += squared_signal[i]
else:
tmp += squared_signal[i]
# get first RMS and initialize the indices
RMS.append(tmp + old_squares_sum)
old_squares_idx = window_stride # only need this in the case of extra_squares_idx < 0
new_squares_idx = window_stride + window_gap
# main program for window_size > window_stride (works for equality too)
# this is optimal for small window_stride values
for i in range(1, RMS_len):
RMS.append(RMS[i - 1] - old_squares_sum) # remove squares that aren't in the new window
# get new old squares
if extra_squares_idx < 0:
if extra_squares_idx == -1: # in this case, the new old_squares_sum value is readily available
old_squares_sum = RMS[i]
else: # tmp contains more squares than required, adding from the beginning is optimal (window_stride is small)
old_squares_sum = 0
for _ in range(min(window_stride, signal_len - old_squares_idx)):
old_squares_sum += squared_signal[old_squares_idx]
old_squares_idx += 1
# add new squares
for j in range(min(window_stride, signal_len - new_squares_idx)):
RMS[i] += squared_signal[new_squares_idx]
if j == extra_squares_idx: # this can only trigger if extra_squares_idx > -1 i.e. tmp did not contain enough squares initially
old_squares_sum = RMS[i]
new_squares_idx += 1
if extra_squares_idx > signal_len:
old_squares_sum = RMS[i]
else:
start = 0
for i in range(RMS_len):
RMS.append(0)
for j in range(start, min(start + window_size, signal_len)):
RMS[i] += squared_signal[j]
start += window_stride
# dividing and square rooting
for i in range(RMS_len):
RMS[i] = math.sqrt(abs(RMS[i] / window_size)) # warp in abs to catch any math domain errors due to floating point error
RMS = np.array(RMS)
RMS = np.expand_dims(RMS, axis=0)
RMS = np.expand_dims(RMS, axis=2)
return RMS
def audio_track_to_features(signal, processing_method, window_size, window_stride):
if processing_method == 'none':
# if no processing method is selected we only averaged the signal every 32 samples
# pad signal to be divisible by 32
if len(signal) % 32 != 0:
num_zeros_to_pad = 32 - (len(signal) % 32)
signal = np.append(signal, num_zeros_to_pad * [0.])
averaged = np.mean(signal.reshape(-1, 32), axis=1)
averaged = np.expand_dims(averaged, axis=0)
averaged = np.expand_dims(averaged, axis=2)
return averaged, averaged.shape
elif processing_method == 'stft':
# perform Short Time Fourier Transform (STFT)
stft = librosa.stft(y = signal,
n_fft = window_size,
hop_length = window_stride,
window = scipy.signal.windows.boxcar(window_size),
center = False)
# calculate abs values on complex numbers to get magnitude
spectrogram = np.abs(stft)
# transpose and return the spectrogram matrix
transposed_spectrogram = spectrogram.transpose()
transposed_spectrogram = np.expand_dims(transposed_spectrogram, axis=2)
return transposed_spectrogram, transposed_spectrogram.shape
else: # RMS
RMS = fast_RMS(signal, window_size, window_stride)
return RMS, RMS.shape
| 2.546875
| 3
|
pyscreenshot/plugins/xwd.py
|
ponty/pyscreenshot
| 416
|
12783252
|
import logging
from easyprocess import EasyProcess
from pyscreenshot.plugins.backend import CBackend
from pyscreenshot.tempexport import RunProgError, read_func_img
from pyscreenshot.util import extract_version
log = logging.getLogger(__name__)
PROGRAM = "xwd"
# wikipedia: https://en.wikipedia.org/wiki/Xwd
# xwd | xwdtopnm | pnmtopng > Screenshot.png
# xwdtopnm is buggy: https://bugs.launchpad.net/ubuntu/+source/netpbm-free/+bug/1379480
# solution : imagemagick convert
# xwd -root -display :0 | convert xwd:- file.png
# TODO: xwd sometimes grabs the wrong window so this backend will be not added now
def read_xwd_img():
def run_prog(fpng, bbox=None):
fxwd = fpng + ".xwd"
pxwd = EasyProcess([PROGRAM, "-root", "-out", fxwd])
pxwd.call()
if pxwd.return_code != 0:
raise RunProgError(pxwd.stderr)
pconvert = EasyProcess(["convert", "xwd:" + fxwd, fpng])
pconvert.call()
if pconvert.return_code != 0:
raise RunProgError(pconvert.stderr)
im = read_func_img(run_prog)
return im
class XwdWrapper(CBackend):
name = "xwd"
is_subprocess = True
def grab(self, bbox=None):
im = read_xwd_img()
if bbox:
im = im.crop(bbox)
return im
def backend_version(self):
return extract_version(EasyProcess([PROGRAM, "-version"]).call().stdout)
| 2.25
| 2
|
core/meta/method.py
|
ponyatov/metaLpy
| 0
|
12783253
|
<gh_stars>0
## @file
from core.active import *
from .meta import *
## @ingroup meta
class Method(Meta, Fn):
pass
| 1.164063
| 1
|
lv1/matrix.py
|
mrbartrns/programmers-algorithm
| 0
|
12783254
|
<reponame>mrbartrns/programmers-algorithm<filename>lv1/matrix.py<gh_stars>0
def solution(arr1, arr2):
answer = []
for i in range(len(arr1)):
temp = []
for j in range(len(arr1[0])):
tot = 0
tot = arr1[i][j] + arr2[i][j]
temp.append(tot)
answer.append(temp)
return answer
print(solution([[1, 2], [2, 3]], [[3, 4], [5, 6]]))
| 3.71875
| 4
|
checkov/version.py
|
acdha/checkov
| 0
|
12783255
|
version = '1.0.290'
| 1.101563
| 1
|
third_party/Sparkle/Sparkle_custom.gyp
|
leiferikb/bitpop-private
| 1
|
12783256
|
<gh_stars>1-10
# Copyright (c) 2011 House of Life Property ltd.
# Copyright (c) 2011 Crystalnix <<EMAIL>>
{
'conditions': [
['OS=="mac"', {
'targets': [
{
'target_name': 'Sparkle',
'type': 'shared_library',
'dependencies': [
'relaunch_tool',
],
'configurations': {
'Debug': {
'xcode_config_file': 'Configurations/ConfigFrameworkDebug.xcconfig',
},
'Release': {
'xcode_config_file': 'Configurations/ConfigFrameworkRelease.xcconfig',
'xcode_settings': {
'GCC_GENERATE_DEBUGGING_SYMBOLS': 'YES',
},
},
},
'product_name': 'Sparkle',
'mac_bundle': 1,
'xcode_settings': {
'DYLIB_INSTALL_NAME_BASE': '@loader_path/Frameworks',
'LD_DYLIB_INSTALL_NAME':
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(WRAPPER_NAME)/$(PRODUCT_NAME)',
'GCC_WARN_64_TO_32_BIT_CONVERSION': 'NO',
'GCC_PREFIX_HEADER': 'Sparkle.pch',
'GCC_PRECOMPILE_PREFIX_HEADER': 'YES',
},
'variables': {
'sparkle_public_headers': [
'Sparkle.h',
'SUAppcast.h',
'SUAppcastItem.h',
'SUUpdater.h',
'SUVersionComparisonProtocol.h',
],
'sparkle_private_headers': [
'SUPlainInstallerInternals.h',
'SUUpdateAlert.h',
'SUStatusController.h',
'SUDSAVerifier.h',
'SUConstants.h',
'SUUnarchiver.h',
'SUAutomaticUpdateAlert.h',
'NTSynchronousTask.h',
'SUStandardVersionComparator.h',
'SUSystemProfiler.h',
'SUUpdatePermissionPrompt.h',
'SUWindowController.h',
'SUInstaller.h',
'SUPlainInstaller.h',
'SUPackageInstaller.h',
'SUBasicUpdateDriver.h',
'SUUIBasedUpdateDriver.h',
'SUAutomaticUpdateDriver.h',
'SUScheduledUpdateDriver.h',
'SUUpdateDriver.h',
'SUProbingUpdateDriver.h',
'SUUserInitiatedUpdateDriver.h',
'SUDiskImageUnarchiver.h',
'SUUnarchiver_Private.h',
'SUPipedUnarchiver.h',
'SUHost.h',
'bspatch.h',
'SUBinaryDeltaUnarchiver.h',
'SUBinaryDeltaApply.h',
'SUBinaryDeltaCommon.h',
],
},
'sources': [
'<@(sparkle_public_headers)',
'<@(sparkle_private_headers)',
'SUUpdater.m',
'SUPlainInstallerInternals.m',
'SUAppcast.m',
'SUAppcastItem.m',
'SUUpdateAlert.m',
'SUStatusController.m',
'SUDSAVerifier.m',
'SUConstants.m',
'SUUnarchiver.m',
'SUAutomaticUpdateAlert.m',
'NTSynchronousTask.m',
'SUStandardVersionComparator.m',
'SUSystemProfiler.m',
'SUUpdatePermissionPrompt.m',
'SUWindowController.m',
'SUInstaller.m',
'SUPlainInstaller.m',
'SUPackageInstaller.m',
'SUBasicUpdateDriver.m',
'SUUIBasedUpdateDriver.m',
'SUAutomaticUpdateDriver.m',
'SUScheduledUpdateDriver.m',
'SUUpdateDriver.m',
'SUProbingUpdateDriver.m',
'SUUserInitiatedUpdateDriver.m',
'SUDiskImageUnarchiver.m',
'SUUnarchiver_Private.m',
'SUPipedUnarchiver.m',
'SUHost.m',
'bspatch.c',
'SUBinaryDeltaApply.m',
'SUBinaryDeltaCommon.m',
'SUBinaryDeltaUnarchiver.m',
],
'mac_framework_headers': [
'<@(sparkle_public_headers)',
],
'mac_bundle_resources': [
'License.txt',
'Info.plist',
'SUModelTranslation.plist',
'SUStatus.nib',
'cs.lproj',
'da.lproj',
'de.lproj',
'en.lproj',
'es.lproj',
'fr.lproj',
'is.lproj',
'it.lproj',
'ja.lproj',
'nl.lproj',
'pl.lproj',
'pt_BR.lproj',
'pt_PT.lproj',
'ru.lproj',
'sv.lproj',
'tr.lproj',
'zh_CN.lproj',
'zh_TW.lproj',
'<(PRODUCT_DIR)/relaunch',
],
'include_dirs': [
'.', '..',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Security.framework',
'$(SDKROOT)/System/Library/Frameworks/WebKit.framework',
'$(SDKROOT)/System/Library/Frameworks/IOKit.framework',
'$(SDKROOT)/usr/lib/libbz2.dylib',
'$(SDKROOT)/usr/lib/libxar.1.dylib',
'$(SDKROOT)/usr/lib/libz.dylib',
'$(SDKROOT)/usr/lib/libcrypto.dylib',
'$(SDKROOT)/System/Library/Frameworks/Cocoa.framework',
],
},
'postbuilds': [
{
'postbuild_name': 'Link fr_CA to fr',
'action': [
'/usr/bin/env', 'ruby',
'-e', 'resources = "#{ENV["BUILT_PRODUCTS_DIR"]}/#{ENV["WRAPPER_NAME"]}/Resources"',
'-e', '`ln -sfh "fr.lproj" "#{resources}/fr_CA.lproj"`',
],
},
{
'postbuild_name': 'Link pt to pt_BR',
'action': [
'/usr/bin/env', 'ruby',
'-e', 'resources = "#{ENV["BUILT_PRODUCTS_DIR"]}/#{ENV["WRAPPER_NAME"]}/Resources"',
'-e', '`ln -sfh "pt_BR.lproj" "#{resources}/pt.lproj"`',
],
},
{
'postbuild_name': 'Create public Sparkle Framework headers dir',
'action': [
'mkdir', '-p', '$BUILT_PRODUCTS_DIR/$WRAPPER_NAME/Headers',
],
},
{
'postbuild_name': 'Copy public Sparkle Framework Headers',
'action': [
'cp', '<@(sparkle_public_headers)', '$BUILT_PRODUCTS_DIR/$WRAPPER_NAME/Headers/',
],
},
{
'postbuild_name': 'Fix permissions on relaunch tool for ninja builds',
'action': [
'chmod', '755', '$BUILT_PRODUCTS_DIR/$WRAPPER_NAME/Resources/relaunch',
],
},
],
},
{
'target_name': 'relaunch_tool',
'type': 'executable',
'product_name': 'relaunch',
'configurations': {
'Debug': {
'xcode_config_file': 'Configurations/ConfigRelaunchDebug.xcconfig',
},
'Release': {
'xcode_config_file': 'Configurations/ConfigRelaunchRelease.xcconfig',
},
},
'sources': [
'relaunch.m',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Cocoa.framework',
],
},
},
],
}, ],
],
}
| 1.070313
| 1
|
dsaii/forms.py
|
khushi0205/DSAII
| 0
|
12783257
|
from django import forms
from .models import Comments
class CommentForm(forms.ModelForm):
class Meta:
model = Comments
fields = ('name', 'body')
widgets = {
'name': forms.TextInput(attrs={'class' : 'form-control'}),
'body' : forms.Textarea(attrs={'class': 'form-control' })
}
class CF(forms.ModelForm):
class Meta:
model = Comments
fields = ('name', 'body')
widgets = {
'name': forms.TextInput(attrs={'class' : 'form-control'}),
'body' : forms.Textarea(attrs={'class': 'form-control' })
}
| 2.28125
| 2
|
helpers/allocation.py
|
hugombarreto/credibility_allocation
| 0
|
12783258
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from os import listdir, path
from helpers.file_name import FileName
class Allocation(object):
def __init__(self, data_path):
files = sorted(listdir(data_path))
files = filter(lambda x: x[-4:] == '.csv', files)
alloc_files = filter(lambda x: x.startswith('alloc'), files)
user_needs_files = filter(lambda x: x.startswith('user_needs'), files)
user_needs_files = map(FileName, user_needs_files)
user_needs_files = {n.attributes['resource_type']: path.join(
data_path, n.name) for n in user_needs_files}
self._allocations = defaultdict(list)
for f in alloc_files:
file_name = FileName(f)
params_dict = file_name.attributes
resource_type = params_dict['resource_type']
params_dict['file_name'] = path.join(data_path, f)
if resource_type in user_needs_files:
params_dict['types_file_name'] = user_needs_files[resource_type]
self._allocations[resource_type].append(params_dict)
self.user_type_files = user_needs_files
def resource_types(self):
return self._allocations.keys()
def __getattr__(self, name):
return self._allocations[name]
def iteritems(self):
return self._allocations.iteritems()
| 2.84375
| 3
|
Python3/Listas/ListaEx3.py
|
arthursiq5/programacao-progressiva
| 0
|
12783259
|
<filename>Python3/Listas/ListaEx3.py
numeros = list(range(5))
print(numeros)
| 3.140625
| 3
|
nevergrad/optimization/test_special.py
|
risto-trajanov/nevergrad
| 3,217
|
12783260
|
<reponame>risto-trajanov/nevergrad
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
import collections
import typing as tp
import pytest
import numpy as np
from .optimizerlib import registry
from . import test_optimizerlib
KEY = "NEVERGRAD_SPECIAL_TESTS"
if not os.environ.get(KEY, ""):
pytest.skip(f"These tests only run if {KEY} is set in the environment", allow_module_level=True)
@pytest.mark.parametrize("dimension", (2, 4, 7, 77))
@pytest.mark.parametrize("num_workers", (1,))
@pytest.mark.parametrize("scale", (4.0,))
@pytest.mark.parametrize("baseline", ["MetaModel", "CMA", "ECMA"])
@pytest.mark.parametrize("budget", [400, 4000])
@pytest.mark.parametrize("ellipsoid", [True, False])
def test_metamodel_sqp_chaining(
dimension: int, num_workers: int, scale: float, budget: int, ellipsoid: bool, baseline: str
) -> None:
"""The test can operate on the sphere or on an elliptic funciton."""
target = test_optimizerlib.QuadFunction(scale=scale, ellipse=ellipsoid)
baseline = baseline if dimension > 1 else "OnePlusOne"
chaining = "ChainMetaModelSQP"
# In both cases we compare MetaModel and CMA for a same given budget.
# But we expect MetaModel to be clearly better only for a larger budget in the ellipsoid case.
contextual_budget = budget if ellipsoid else 3 * budget
contextual_budget *= 5 * int(max(1, np.sqrt(scale)))
num_trials = 27
successes = 0.0
durations: tp.Dict[str, float] = collections.defaultdict(int)
for _ in range(num_trials):
if successes >= num_trials / 2:
break
# Let us run the comparison.
recoms: tp.Dict[str, np.ndarray] = {}
for name in (chaining, baseline):
opt = registry[name](dimension, contextual_budget, num_workers=num_workers)
t0 = time.time()
recoms[name] = opt.minimize(target).value
durations[name] += time.time() - t0
if target(recoms[baseline]) < target(recoms[chaining]):
successes += 1
if target(recoms[baseline]) == target(recoms[chaining]):
successes += 0.5
if successes <= num_trials // 2:
print(
f"ChainMetaModelSQP fails ({successes}/{num_trials}) for d={dimension}, scale={scale}, "
f"num_workers={num_workers}, ellipsoid={ellipsoid}, budget={budget}, vs {baseline}"
)
raise AssertionError("ChaingMetaModelSQP fails by performance.")
print(
f"ChainMetaModelSQP wins for d={dimension}, scale={scale}, num_workers={num_workers}, "
f"ellipsoid={ellipsoid}, budget={budget}, vs {baseline}"
)
assert durations[chaining] < 7 * durations[baseline], "Computationally more than 7x more expensive."
@pytest.mark.parametrize("args", test_optimizerlib.get_metamodel_test_settings(special=True))
@pytest.mark.parametrize("baseline", ("CMA", "ECMA"))
def test_metamodel_special(baseline: str, args: tp.Tuple[tp.Any, ...]) -> None:
"""The test can operate on the sphere or on an elliptic funciton."""
kwargs = dict(zip(test_optimizerlib.META_TEST_ARGS, args))
test_optimizerlib.check_metamodel(baseline=baseline, **kwargs)
| 2.125
| 2
|
fastreid/modeling/losses/triplet_loss.py
|
weleen/MGH.pytorch
| 4
|
12783261
|
<filename>fastreid/modeling/losses/triplet_loss.py
# encoding: utf-8
"""
@author: liaoxingyu
@contact: <EMAIL>
"""
import torch
import torch.nn.functional as F
from fastreid.utils import comm, euclidean_dist
def softmax_weights(dist, mask):
max_v = torch.max(dist * mask, dim=1, keepdim=True)[0]
diff = dist - max_v
Z = torch.sum(torch.exp(diff) * mask, dim=1, keepdim=True) + 1e-6 # avoid division by zero
W = torch.exp(diff) * mask / Z
return W
def hard_example_mining(dist_mat, is_pos, return_indices=False, bound=9999.0):
"""For each anchor, find the hardest positive and negative sample.
Args:
dist_mat: pair wise distance between samples, shape [N, M]
is_pos: positive index with shape [N, M]
Returns:
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
p_inds: pytorch LongTensor, with shape [N];
indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1
n_inds: pytorch LongTensor, with shape [N];
indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1
"""
assert dist_mat.abs().max() < bound, 'dist_mat should be bounded in [-{}, {}]'.format(bound, bound)
sorted_dist_mat, positive_indices = torch.sort(
dist_mat + -bound * (1 - is_pos), dim=1, descending=True
)
dist_ap = sorted_dist_mat[:, 0]
relative_p_inds = positive_indices[:, 0]
sorted_dist_mat, negative_indices = torch.sort(
dist_mat + bound * (is_pos), dim=1, descending=False
)
dist_an = sorted_dist_mat[:, 0]
relative_n_inds = negative_indices[:, 0]
if return_indices:
return dist_ap, dist_an, relative_p_inds, relative_n_inds
return dist_ap, dist_an
def weighted_example_mining(dist_mat, is_pos, weight_mining=False):
"""For each anchor, find the weighted positive and negative sample.
Args:
dist_mat: pytorch Variable, pair wise distance between samples, shape [N, M]
is_pos: positive index with shape [N, M]
weight_mining: enable weighted mining or not
Returns:
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
"""
assert len(dist_mat.size()) == 2
is_pos = is_pos
is_neg = 1 - is_pos
dist_ap = dist_mat * is_pos
dist_an = dist_mat * is_neg
weights_ap = softmax_weights(dist_ap, is_pos)
weights_an = softmax_weights(-dist_an, is_neg)
if weight_mining:
dist_ap = torch.sum(dist_ap * weights_ap, dim=1)
dist_an = torch.sum(dist_an * weights_an, dim=1)
else:
dist_ap = torch.sum(dist_ap, dim=1)
dist_an = torch.sum(dist_an, dim=1)
return dist_ap, dist_an
class TripletLoss(object):
"""Modified from Tong Xiao's open-reid (https://github.com/Cysu/open-reid).
Related Triplet Loss theory can be found in paper 'In Defense of the Triplet
Loss for Person Re-Identification'."""
def __init__(self, cfg):
self._margin = cfg.MODEL.LOSSES.TRI.MARGIN
self._normalize_feature = cfg.MODEL.LOSSES.TRI.NORM_FEAT
self._scale = cfg.MODEL.LOSSES.TRI.SCALE
self._hard_mining = cfg.MODEL.LOSSES.TRI.HARD_MINING
self._weight_mining = cfg.MODEL.LOSSES.TRI.WEIGHT_MINING
def __call__(self, _, embedding, targets, **kwargs):
if self._normalize_feature: embedding = F.normalize(embedding, dim=1)
# For distributed training, gather all features from different process.
if comm.get_world_size() > 1:
all_embedding = comm.concat_all_gather(embedding)
all_targets = comm.concat_all_gather(targets)
else:
all_embedding = embedding
all_targets = targets
dist_mat = euclidean_dist(embedding, all_embedding)
N, N = dist_mat.size()
is_pos = targets.view(N, 1).expand(N, N).eq(all_targets.view(N, 1).expand(N, N).t())
if self._hard_mining:
dist_ap, dist_an = hard_example_mining(dist_mat, is_pos)
else:
dist_ap, dist_an = weighted_example_mining(dist_mat, is_pos, self._weight_mining)
y = dist_an.new().resize_as_(dist_an).fill_(1)
if self._margin > 0:
loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=self._margin)
else:
loss = F.soft_margin_loss(dist_an - dist_ap, y)
if loss == float('Inf'): loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=0.3)
return {
"loss_triplet": loss * self._scale,
}
class SoftmaxTripletLoss(object):
def __init__(self, cfg):
self._margin = cfg.MODEL.LOSSES.STRI.MARGIN
self._normalize_feature = cfg.MODEL.LOSSES.STRI.NORM_FEAT
self._scale = cfg.MODEL.LOSSES.STRI.SCALE
self._tau = cfg.MODEL.LOSSES.STRI.TAU
def __call__(self, _, embedding, targets, **kwargs):
assert 'outs_mean' in kwargs, 'outs_mean not found in input, only {}'.format(kwargs.keys())
if self._normalize_feature:
embedding = F.normalize(embedding, dim=1)
# For distributed training, gather all features from different process.
if comm.get_world_size() > 1:
all_embedding = comm.concat_all_gather(embedding)
all_targets = comm.concat_all_gather(targets)
else:
all_embedding = embedding
all_targets = targets
dist_mat = euclidean_dist(embedding, all_embedding)
N, M = dist_mat.size()
is_pos = targets.view(N, 1).expand(N, M).eq(all_targets.view(M, 1).expand(M, N).t()).float()
dist_ap, dist_an = hard_example_mining(dist_mat, is_pos)
triplet_dist = F.log_softmax(torch.stack((dist_ap, dist_an), dim=1) / self._tau, dim=1)
loss = (-self._margin * triplet_dist[:, 0] - (1 - self._margin) * triplet_dist[:, 1]).mean()
return {
"loss_softmax_triplet": loss * self._scale,
}
class SoftSoftmaxTripletLoss(object):
def __init__(self, cfg):
self._margin = cfg.MODEL.LOSSES.SSTRI.MARGIN
self._normalize_feature = cfg.MODEL.LOSSES.SSTRI.NORM_FEAT
self._scale = cfg.MODEL.LOSSES.SSTRI.SCALE
self._tau = cfg.MODEL.LOSSES.SSTRI.TAU
def __call__(self, _, embedding, targets, **kwargs):
assert 'outs_mean' in kwargs, 'outs_mean not found in input, only {}'.format(kwargs.keys())
results_mean = kwargs['outs_mean']
embedding_mean = results_mean['outputs']['features']
if self._normalize_feature:
embedding = F.normalize(embedding, dim=1)
embedding_mean = F.normalize(embedding_mean, dim=1)
# For distributed training, gather all features from different process.
if comm.get_world_size() > 1:
all_embedding = comm.concat_all_gather(embedding)
all_targets = comm.concat_all_gather(targets)
all_embedding_mean = comm.concat_all_gather(embedding_mean)
else:
all_embedding = embedding
all_targets = targets
all_embedding_mean = embedding_mean
dist_mat = euclidean_dist(embedding, all_embedding)
N, M = dist_mat.size()
is_pos = targets.view(N, 1).expand(N, M).eq(all_targets.view(M, 1).expand(M, N).t()).float()
dist_ap, dist_an, ap_idx, an_idx = hard_example_mining(dist_mat, is_pos, return_indices=True)
triplet_dist = F.log_softmax(torch.stack((dist_ap, dist_an), dim=1) / self._tau, dim=1)
# reference from mean_net
dist_mat_ref = euclidean_dist(embedding_mean, all_embedding_mean)
dist_ap_ref = torch.gather(dist_mat_ref, 1, ap_idx.view(N, 1).expand(N, M))[:, 0]
dist_an_ref = torch.gather(dist_mat_ref, 1, an_idx.view(N, 1).expand(N, M))[:, 0]
triplet_dist_ref = F.softmax(torch.stack((dist_ap_ref, dist_an_ref), dim=1) / self._tau, dim=1).detach()
loss = (-triplet_dist_ref * triplet_dist).mean(0).sum()
return {
"loss_soft_softmax_triplet": loss * self._scale,
}
class ActiveTripletLoss(object):
def __init__(self, cfg):
# TODO: add in default config
self._margin = 0.3
self._scale = 1.0
def __call__(self, _, global_features, targets):
_, dim = global_features.size()
global_features = global_features.view(-1, 3, dim)
anchors = global_features[:, 0]
positive = global_features[:, 1]
negative = global_features[:, 2]
loss = F.triplet_margin_loss(anchors, positive, negative, margin=self._margin)
return {
"loss_triplet_a": loss * self._scale,
}
| 2.03125
| 2
|
src/examples/run_all.py
|
RaedanWulfe/oddimorf
| 0
|
12783262
|
#!/usr/bin/python3.8
# -*- coding: utf-8 -*-
"""
Runs all example scripts.
"""
import os
import subprocess
from flask import Flask, request
from flask_restful import Resource, Api
from subprocess import PIPE
python_command = "python"
processes = []
supervisor = None
supervisor_url = 'http://localhost:8070'
dirs = [
"control",
"data_feeder",
"emulator",
"processor",
"recorder",
"spoofer",
"tracker_matlab"
]
class Shutdown(Resource):
def get(self):
try:
for process in processes:
process.terminate()
print(f"supervisor halted, terminating all associated processes...")
finally:
shutdown_hook = request.environ.get(supervisor_url)
if shutdown_hook is not None:
shutdown_hook()
return "terminate received"
# -----------------------------------------------------------------------------
# Execute requisite logic
if __name__ == '__main__':
supervisor = Flask(__name__)
api = Api(supervisor)
api.add_resource(Shutdown, '/shutdown')
print(f"initializing associated processes...")
for dir in dirs:
processes.append(subprocess.Popen([python_command, os.path.join(
os.path.dirname(__file__), dir + "/main.py")], stdout=PIPE, stderr=PIPE))
print(f"initialized {len(processes)} processes to supervisor (terminate by accessing {supervisor_url})...")
supervisor.run(port=8070)
print(f"supervisor launched")
| 2.296875
| 2
|
collector/query_sky_and_obsy_conditions.py
|
d33psky/rolloffroof
| 4
|
12783263
|
#!/usr/bin/env python3
import datetime
import json
from pathlib import Path
import requests
import MySQLdb
db = MySQLdb.connect(
host="lxc-rrd",
port=3306,
user='sens',
passwd='<PASSWORD>',
db="observatory1")
db_cursor = db.cursor()
def check_db(minutes):
sql = """
SELECT sensors_id
,create_time
FROM sensors
ORDER BY create_time DESC
LIMIT 1;
"""
db_cursor.execute(sql)
db_result_tuple = db_cursor.fetchone()
try:
sensors_id = db_result_tuple[0]
db_date = db_result_tuple[1]
except:
raise
if db_date < datetime.datetime.utcnow() - datetime.timedelta(minutes=minutes) :
print("DB last timestamp {db_date} is More than {minutes} minutes ago -> close".format(db_date=db_date, minutes=minutes))
print("Stop_Imaging (do not wait). Park (wait), Close_Roof")
quit(1)
else:
print("DB last timestamp {db_date} is Less than {minutes} minutes ago -> open".format(db_date=db_date, minutes=minutes))
return(sensors_id)
def check_sqm(sensors_id, sqm_min):
sql = """
SELECT sqm1_sqm
FROM sensors
WHERE sensors_id = {sensors_id}
LIMIT 1;
""".format(sensors_id=sensors_id)
db_cursor.execute(sql)
db_result_tuple = db_cursor.fetchone()
#print(db_result_tuple)
try:
sqm = db_result_tuple[0]
except:
raise
if sqm >= sqm_min:
print("SQM {sqm} >= minimum {sqm_min} -> open".format(sqm=sqm, sqm_min=sqm_min))
return(True)
else:
print("SQM {sqm} < minimum {sqm_min} -> close".format(sqm=sqm, sqm_min=sqm_min))
return(False)
def check_sqm_past(sqm_min, seconds, outlier_count_max):
sql = """
SELECT COUNT(*)
FROM sensors
WHERE create_time > DATE_SUB(UTC_TIMESTAMP(), INTERVAL {seconds} second)
AND sqm1_sqm < {sqm_min};
""".format(seconds=seconds, sqm_min=sqm_min)
#print(sql)
db_cursor.execute(sql)
db_result_tuple = db_cursor.fetchone()
#print(db_result_tuple)
try:
count = db_result_tuple[0]
except:
raise
if count <= outlier_count_max:
print("SQM < minimum {sqm_min} count over the last {seconds} seconds is {count} <= {outlier_count_max} -> open".format(sqm_min=sqm_min, seconds=seconds, count=count, outlier_count_max=outlier_count_max))
return(True)
else:
print("SQM < minimum {sqm_min} count over the last {seconds} seconds is {count} > {outlier_count_max} -> close".format(sqm_min=sqm_min, seconds=seconds, count=count, outlier_count_max=outlier_count_max))
return(False)
def check_rain(sensors_id, drops_min):
sql = """
SELECT rainsensor1_drops
FROM sensors
WHERE sensors_id = {sensors_id}
LIMIT 1;
""".format(sensors_id=sensors_id)
db_cursor.execute(sql)
db_result_tuple = db_cursor.fetchone()
#print(db_result_tuple)
try:
drops = db_result_tuple[0]
except:
raise
if drops <= drops_min:
print("Rain drops {drops} <= minimum {drops_min} -> open".format(drops=drops, drops_min=drops_min))
return(True)
else:
print("Rain drops {drops} > minimum {drops_min} -> close".format(drops=drops, drops_min=drops_min))
return(False)
def check_rain_past(drops_min, seconds, outlier_count_max):
sql = """
SELECT COUNT(*)
FROM sensors
WHERE create_time > DATE_SUB(UTC_TIMESTAMP(), INTERVAL {seconds} second)
AND rainsensor1_drops > {drops_min};
""".format(seconds=seconds, drops_min=drops_min)
db_cursor.execute(sql)
db_result_tuple = db_cursor.fetchone()
#print(db_result_tuple)
try:
count = db_result_tuple[0]
except:
raise
if count <= outlier_count_max:
print("Rain drops <= minimum {drops_min} count over the last {seconds} seconds is {count} <= {outlier_count_max} -> open".format(drops_min=drops_min, seconds=seconds, count=count, outlier_count_max=outlier_count_max))
return(True)
else:
print("Rain drops > minimum {drops_min} count over the last {seconds} seconds is {count} > {outlier_count_max} -> close".format(drops_min=drops_min, seconds=seconds, count=count, outlier_count_max=outlier_count_max))
return(False)
def check_ups_is_on_mains(sensors_id, min_ups_bcharge):
sql = """
SELECT ups1_status, ups1_bcharge
FROM sensors
WHERE sensors_id = {sensors_id}
LIMIT 1;
""".format(sensors_id=sensors_id)
db_cursor.execute(sql)
db_result_tuple = db_cursor.fetchone()
# print(db_result_tuple)
try:
ups_status = db_result_tuple[0]
ups_bcharge = db_result_tuple[1]
except:
raise
if ups_status == 1 and ups_bcharge >= min_ups_bcharge:
print("UPS is powered and battery charge {bcharge} >= {min_ups_bcharge} -> open".format(bcharge=ups_bcharge, min_ups_bcharge=min_ups_bcharge))
return True
else:
if ups_status != 1:
print("UPS is on battery (and battery charge is {bcharge}) -> close".format(bcharge=ups_bcharge))
else:
print("UPS is powered but battery charge {bcharge} < {min_ups_bcharge} -> open".format(bcharge=ups_bcharge, min_ups_bcharge=min_ups_bcharge))
return False
def check_infrared(sensors_id, sensor, minimum_delta_t):
sql = """
SELECT {sensor}_temperature_sensor
, {sensor}_temperature_sky
, {sensor}_temperature_sensor - {sensor}_temperature_sky
FROM sensors
WHERE sensors_id = {sensors_id}
LIMIT 1;
""".format(sensor=sensor, sensors_id=sensors_id)
db_cursor.execute(sql)
db_result_tuple = db_cursor.fetchone()
#print(db_result_tuple)
try:
temperature_sensor = db_result_tuple[0]
temperature_sky = db_result_tuple[1]
delta_t = db_result_tuple[2]
except:
raise
if delta_t >= minimum_delta_t:
print("Sensor {sensor} sky temperature delta ({temperature_sensor} - {temperature_sky} = {delta_t}) >= {minimum_delta_t} -> open".format(sensor=sensor, temperature_sensor=temperature_sensor, temperature_sky=temperature_sky, delta_t=delta_t, minimum_delta_t=minimum_delta_t))
return(True)
else:
print("Sensor {sensor} sky temperature delta ({temperature_sensor} - {temperature_sky} = {delta_t}) < {minimum_delta_t} -> close".format(sensor=sensor, temperature_sensor=temperature_sensor, temperature_sky=temperature_sky, delta_t=delta_t, minimum_delta_t=minimum_delta_t))
return(False)
def check_infrared_past(sensor, minimum_delta_t, seconds, outlier_count_max):
sql = """
SELECT COUNT(*)
FROM sensors
WHERE create_time > DATE_SUB(UTC_TIMESTAMP(), INTERVAL {seconds} second)
AND {sensor}_temperature_sensor - {sensor}_temperature_sky < {minimum_delta_t};
""".format(sensor=sensor, seconds=seconds, minimum_delta_t=minimum_delta_t)
db_cursor.execute(sql)
db_result_tuple = db_cursor.fetchone()
#print(db_result_tuple)
try:
count = db_result_tuple[0]
except:
raise
if count < outlier_count_max:
print("Sensor {sensor} sky temperature delta < {minimum_delta_t} count over the last {seconds} seconds is {count} <= {outlier_count_max} -> open".format(sensor=sensor, minimum_delta_t=minimum_delta_t, seconds=seconds, count=count, outlier_count_max=outlier_count_max))
return(True)
else:
print("Sensor {sensor} sky temperature delta < {minimum_delta_t} count over the last {seconds} seconds is {count} > {outlier_count_max} -> close".format(sensor=sensor, minimum_delta_t=minimum_delta_t, seconds=seconds, count=count, outlier_count_max=outlier_count_max))
return(False)
def last_event_long_enough_ago(event, seconds, outlier_count_max):
sql = """
SELECT COUNT(*)
FROM events
WHERE create_time > DATE_SUB(UTC_TIMESTAMP(), INTERVAL {seconds} second)
AND event = '{event}';
""".format(event=event, seconds=seconds)
db_cursor.execute(sql)
db_result_tuple = db_cursor.fetchone()
#print(db_result_tuple)
try:
count = db_result_tuple[0]
except:
raise
if count < outlier_count_max:
print("Event {event} count over the last {seconds} seconds is {count} < {outlier_count_max} -> open".format(event=event, seconds=seconds, count=count, outlier_count_max=outlier_count_max))
return(True)
else:
print("Event {event} count over the last {seconds} seconds is {count} >= {outlier_count_max} -> close".format(event=event, seconds=seconds, count=count, outlier_count_max=outlier_count_max))
return(False)
def retrieve_previous_open_ok():
sql = """
SELECT create_time,open_ok
FROM roof
ORDER BY roof_id DESC
LIMIT 1;
"""
db_cursor.execute(sql)
db_result_tuple = db_cursor.fetchone()
last_open_ok = bool(db_result_tuple[1])
print("Roof open status is {}".format(last_open_ok))
return(last_open_ok)
#def retrieve_previous_open(sensors_id):
# sql = """
# SELECT open
# FROM roof
# WHERE sensors_id = {}
# LIMIT 1;
# """.format(sensors_id)
# sql = """
# SELECT create_time, open_ok
# FROM roof
# ORDER BY roof_id DESC
# LIMIT 1;
# """
# db_cursor.execute(sql)
# db_result_tuple = db_cursor.fetchone()
# open = db_result_tuple[1]
# return(bool(open))
def store_roof_status(utcnow, sensors_id, open_ok, reasons):
sql_keys = []
sql_values = []
sql_keys.append("create_time")
sql_values.append('"' + str(utcnow) + '"')
sql_keys.append("sensors_id")
sql_values.append(str(sensors_id))
sql_keys.append("open_ok")
sql_values.append(str(open_ok))
sql_keys.append("reasons")
sql_values.append('"' + reasons + '"')
sql = """
INSERT INTO observatory1.roof ({keys})
VALUES ({values});
""".format(keys = ','.join(sql_keys),
values = ','.join(sql_values))
#print("{}".format(sql.lstrip().rstrip()))
try:
db_cursor.execute(sql)
db.commit()
#print(db_cursor.rowcount, "record inserted.")
except:
db.rollback()
raise
#def get_roof_status(minutes):
# sql = """
# SELECT open_ok
# ,create_time
# FROM roof
# ORDER BY roof_id DESC
# LIMIT 1;
# """
# db_cursor.execute(sql)
# db_result_tuple = db_cursor.fetchone()
# try:
# last_open_ok = db_result_tuple[0]
# db_date = db_result_tuple[1]
# except:
# raise
# if db_date < datetime.datetime.utcnow() - datetime.timedelta(minutes=minutes) :
# print("DB last timestamp {db_date} is More than {minutes} minutes ago -> close".format(db_date=db_date, minutes=minutes))
# print("Stop_Imaging (do not wait). Park (wait), Close_Roof")
# quit(1)
# else:
# return(last_open_ok)
def store_event(utcnow, event, reason = None):
sql_keys = []
sql_values = []
sql_keys.append("create_time")
sql_values.append('"' + str(utcnow) + '"')
sql_keys.append("event")
sql_values.append('"' + event + '"')
if reason:
sql_keys.append("reason")
sql_values.append('"' + reason + '"')
sql = """
INSERT INTO observatory1.events ({keys})
VALUES ({values});
""".format(keys = ','.join(sql_keys),
values = ','.join(sql_values))
#print("{}".format(sql.lstrip().rstrip()))
try:
db_cursor.execute(sql)
db.commit()
#print(db_cursor.rowcount, "record inserted.")
except:
db.rollback()
raise
def sendToMattermost(url, message):
print("Send to mattermost: {}".format(message))
payload = {}
payload['text'] = message
r = requests.post(url, data={'payload': json.dumps(payload, sort_keys=True, indent=4)})
if r.status_code != 200:
try:
r = json.loads(r.text)
except ValueError:
r = {'message': r.text, 'status_code': r.status_code}
raise RuntimeError("{} ({})".format(r['message'], r['status_code']))
def main():
home = str(Path.home())
mattermost_url_file = open(home + "/.mattermosturl", 'r')
url = mattermost_url_file.read().rstrip('\n')
mattermost_url_file.close()
sensors_id = check_db(minutes=2)
# roof_status = get_roof_status(minutes=2)
# if roof_status == 1:
last_open_ok = retrieve_previous_open_ok()
if last_open_ok is True:
sqm_min_hysterese = 6
minimum_delta_t_hysterese = 7
else:
sqm_min_hysterese = 0
minimum_delta_t_hysterese = 0
sqm_now_ok = check_sqm(sensors_id, sqm_min=17.5 - sqm_min_hysterese)
rain_now_ok = check_rain(sensors_id, drops_min=1)
ups_now_ok1 = check_ups_is_on_mains(sensors_id, 99.0)
if ups_now_ok1 is False:
# might be self-test. check previous minute
ups_now_ok2 = check_ups_is_on_mains(sensors_id - 1, 99.0)
infrared1_now_ok = check_infrared(sensors_id, sensor='BAA1', minimum_delta_t=20 - minimum_delta_t_hysterese)
infrared2_now_ok = check_infrared(sensors_id, sensor='BCC1', minimum_delta_t=20 - minimum_delta_t_hysterese)
sqm_past_ok = check_sqm_past(sqm_min=17.5 - sqm_min_hysterese, seconds=3600, outlier_count_max=5)
rain_past_ok = check_rain_past(drops_min=1, seconds=3600, outlier_count_max=2)
infrared1_past_ok = check_infrared_past(sensor='BAA1', minimum_delta_t=20 - minimum_delta_t_hysterese, seconds=3600, outlier_count_max=5)
infrared2_past_ok = check_infrared_past(sensor='BCC1', minimum_delta_t=20 - minimum_delta_t_hysterese, seconds=3600, outlier_count_max=5)
closing_event_past_ok = last_event_long_enough_ago(event="closing", seconds=3600, outlier_count_max=1)
reason_open = []
reason_close = []
if sensors_id:
reason_open.append("DB ok")
else:
reason_close.append("DB not ok")
if sqm_now_ok:
if sqm_past_ok:
reason_open.append("Dark long enough")
else:
reason_close.append("Not dark long enough")
else:
if sqm_past_ok:
reason_close.append("Not dark enough anymore")
else:
reason_close.append("Still not dark enough")
if rain_now_ok:
if rain_past_ok:
reason_open.append("Dry long enough")
else:
reason_close.append("Not dry long enough")
else:
if rain_past_ok:
reason_close.append("Started raining")
else:
reason_close.append("Still raining")
if ups_now_ok1:
reason_open.append("UPS works")
ups_now_ok = True
else:
if ups_now_ok2:
reason_open.append("UPS selftest or on battery")
ups_now_ok = True
else:
reason_close.append("UPS on battery")
ups_now_ok = False
if infrared1_now_ok or infrared2_now_ok:
if infrared1_past_ok or infrared2_past_ok:
reason_open.append("Clear long enough")
else:
reason_close.append("Not clear long enough")
else:
if infrared1_past_ok or infrared2_past_ok:
reason_close.append("Too cloudy")
else:
reason_close.append("Still too cloudy")
if closing_event_past_ok:
reason_open.append("Roof has been closed long enough")
else:
reason_close.append("Roof was just closed")
#print(reason_open)
#print(reason_close)
if sensors_id and sqm_now_ok and sqm_past_ok and rain_now_ok and rain_past_ok and ups_now_ok and (infrared1_now_ok or infrared2_now_ok) and (infrared1_past_ok or infrared2_past_ok) and closing_event_past_ok:
open_ok = True
reasons = "All sensors are go"
#reasons = "{}".format(', '.join(reason_open))
# print("roof open ok, {}".format(', '.join(reason_open)))
else:
open_ok = False
reasons = "{}".format(', '.join(reason_close))
# print("roof open not ok: {}".format(', '.join(reason_close)))
# print(reasons)
utcnow = datetime.datetime.utcnow()
# last_open_ok = retrieve_previous_open_ok()
event = ''
roof_change = False
if last_open_ok is False:
if open_ok is True:
roof_change = True
event = "opening"
else:
event = "stays closed"
else:
if open_ok is False:
roof_change = True
event = "closing"
else:
event = "stays open"
print("Roof {}, {}".format(event, reasons))
if roof_change is True:
sendToMattermost(url, event + ", " + reasons)
store_event(utcnow, event, reasons)
# last_open_ok = retrieve_previous_open_ok()
# if last_open_ok != open_ok:
# sendToMattermost(url, open_ok_str + reasons)
store_roof_status(utcnow, sensors_id, open_ok, reasons)
print("")
if __name__ == "__main__":
main()
| 2.796875
| 3
|
UserProfile/models.py
|
Hudeh/Hubmart
| 0
|
12783264
|
from django.db import models
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
# from safedelete.models import SafeDeleteModel
# from safedelete.models import SOFT_DELETE_CASCADE
# from safedelete.managers import SafeDeleteAllManager
class UserManager(BaseUserManager):
def create_user(self, email, password=<PASSWORD>, active=True, staff=False, admin=False, **extra_fields):
""" usermanger for creating user profile"""
if not email:
raise ValueError("please provide username or email")
user = self.model(email=email, **extra_fields)
email=self.normalize_email(email)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
""" creating superuser """
user = self.create_user(email, password)
user.admin = True
user.staff = True
user.active = True
user.superuser = True
user.save(using=self._db)
return user
class MyUser(AbstractBaseUser):
""" user model """
active = models.BooleanField(default=True)
is_verified = models.BooleanField(default=False)
staff = models.BooleanField(default=False)
admin = models.BooleanField(default=False)
first_name = models.CharField(verbose_name='First Name',max_length=254,blank=True, null=True)
phone = models.CharField(verbose_name='Phone',max_length=254,blank=True, null=True)
last_name = models.CharField(verbose_name='Last Name',max_length=254,blank=True, null=True)
username = models.CharField(verbose_name='Username',max_length=254, blank=True, null=True)
email = models.EmailField(verbose_name='Email',max_length=255,unique=True,blank=True, null=True)
timestamp = models.DateTimeField(auto_now=True)
objects = UserManager()
USERNAME_FIELD = 'email'
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
return True
def has_module_perms(self, app_label):
return True
def tokens():
return ''
@property
def is_staff(self):
return self.staff
def is_admin(self):
return self.admin
@property
def is_active(self):
return self.active
| 2.453125
| 2
|
Easy/350. Intersection of Two Arrays II/solution (2).py
|
czs108/LeetCode-Solutions
| 3
|
12783265
|
<gh_stars>1-10
# 350. Intersection of Two Arrays II
# Runtime: 48 ms, faster than 67.57% of Python3 online submissions for Intersection of Two Arrays II.
# Memory Usage: 14.1 MB, less than 99.75% of Python3 online submissions for Intersection of Two Arrays II.
class Solution:
def intersect(self, nums1: list[int], nums2: list[int]) -> list[int]:
nums1.sort()
nums2.sort()
ins = 0
p1, p2 = 0, 0
while p1 < len(nums1) and p2 < len(nums2):
if nums1[p1] > nums2[p2]:
p2 += 1
elif nums1[p1] < nums2[p2]:
p1 += 1
else:
nums1[ins] = nums1[p1]
ins += 1
p1 += 1
p2 += 1
return nums1[:ins]
| 3.546875
| 4
|
pyfos/utils/layer2/lldp_global_tlv_add.py
|
brocade/pyfos
| 44
|
12783266
|
<filename>pyfos/utils/layer2/lldp_global_tlv_add.py
#!/usr/bin/env python3
# Copyright © 2019 Broadcom. All rights reserved.
# The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# lldp_global_tlv_add.py(pyGen v1.0.0)
"""
:mod:`lldp_global_tlv_add` - PyFOS util to create for lldp_global
*******************************************************************************
The :mod:`lldp_global_tlv_add` PyFOS util to create for lldp_global
The LLDP switch level configuration and operational parameters.
lldp_global_tlv_add : usage
* Infrastructure Options:
* -i,--ipaddr=IPADDR: The IP address of the FOS switch.
* -L,--login=LOGIN: The login name.
* -P,--password=PASSWORD: The password.
* -f,--vfid=VFID: The VFID to which the request is directed.
* -s,--secured=MODE: The HTTPS mode "self" or "CA" [Optional].
* -v,--verbose: Verbose mode [Optional].
* Util Script Options:
* --tlv=TLV The list of optional TLVs enabled on the switch. The dcbx,\
fcoe-app, fcoe-lls, sys-name and port-desc TLVs are enabled by default\
and user can disable them if required. The dcbx TLV should be enabled\
beforehand to enable fcoe-app and fcoe-lls TLVs.
* Output:
* Python dictionary content with RESTCONF response data.
.. function:: lldp_global_tlv_add.create_lldp_global(session,\
optional_tlvs_tlv)
*Create lldp_global*
Example Usage of the Method::
ret = lldp_global_tlv_add.create_lldp_global(session,\
optional_tlvs_tlv)
print (ret)
Details::
lldp_globalObj = lldp_global()
lldp_globalObj.set_optional_tlvs_tlv(optional_tlvs_tlv)
print (ret)
* Input::
:param session: The session returned by the login.
:param optional_tlvs_tlv: The list of optional TLVs enabled on the\
switch. The dcbx, fcoe-app, fcoe-lls, sys-name and port-desc\
TLVs are enabled by default and user can disable them if\
required. The dcbx TLV should be enabled beforehand to enable\
fcoe-app and fcoe-lls TLVs.
* Output:
:rtype: Dictionary of response
"""
# Start utils imports
import sys
from pyfos import pyfos_auth
from pyfos import pyfos_util
from pyfos.pyfos_brocade_lldp import lldp_global
from pyfos.utils import brcd_util
# End module imports
def _create_lldp_global(session, lldp_globalObj):
return lldp_globalObj.post(session)
def create_lldp_global(session, optional_tlvs_tlv=None):
lldp_globalObj = lldp_global()
lldp_globalObj.set_optional_tlvs_tlv(optional_tlvs_tlv)
return _create_lldp_global(session, lldp_globalObj)
def validate(lldp_globalObj):
if lldp_globalObj.peek_optional_tlvs_tlv() == "[]":
return 1
return 0
def main(argv):
filters = ["optional_tlvs_tlv"]
inputs = brcd_util.parse(argv, lldp_global, filters, validate)
session = brcd_util.getsession(inputs)
result = _create_lldp_global(session, inputs['utilobject'])
pyfos_util.response_print(result)
pyfos_auth.logout(session)
if __name__ == "__main__":
main(sys.argv[1:])
| 1.664063
| 2
|
main_evaluate.py
|
ehoogeboom/convolution_exponential_and_sylvester
| 21
|
12783267
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import time
import numpy as np
import torch
import torch.utils.data
from optimization.training import evaluate, plot_samples
from utils.load_data import load_dataset
from os.path import join
parser = argparse.ArgumentParser(description='PyTorch Discrete Normalizing flows')
parser.add_argument('-d', '--dataset', type=str, default='cifar10',
choices=['cifar10', 'imagenet32', 'imagenet64', 'svhn'],
metavar='DATASET',
help='Dataset choice.')
parser.add_argument('-bs', '--batch_size', type=int, default=1000, metavar='BATCH_SIZE',
help='input batch size for training (default: 100)')
parser.add_argument('--snap_dir', type=str, default='')
def main():
args = parser.parse_args()
args.cuda = torch.cuda.is_available()
args.break_epoch = False
snap_dir = args.snap_dir = join('snapshots', args.snap_dir) + '/'
train_loader, val_loader, test_loader, args = load_dataset(args)
final_model = torch.load(snap_dir + 'a.model', map_location='cpu')
if args.cuda:
final_model = final_model.cuda()
# Just for timing at the moment.
with torch.no_grad():
final_model.eval()
timing_results = []
for i in range(100):
torch.cuda.synchronize()
start = time.time()
x_sample = final_model.sample(n_samples=100)
torch.cuda.synchronize()
duration = time.time() - start
timing_results.append(duration)
print('Timings: ', timing_results)
print('Mean time:', np.mean(timing_results))
plot_samples(final_model, args, epoch=9999, bpd=0.0)
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
final_model = torch.nn.DataParallel(final_model, dim=0)
test_bpd = evaluate(test_loader, final_model, args)
with open(snap_dir + 'log.txt', 'a') as ff:
msg = 'FINAL \ttest negative elbo bpd {:.4f}'.format(
test_bpd)
print(msg)
print(msg, file=ff)
test_bpd = evaluate(test_loader, final_model, args, iw_samples=1000)
with open(snap_dir + 'log.txt', 'a') as ff:
msg = 'FINAL \ttest negative log_px bpd {:.4f}'.format(
test_bpd)
print(msg)
print(msg, file=ff)
if __name__ == '__main__':
main()
| 2.171875
| 2
|
cryptomonsters/mining/views.py
|
kryptokardz/cryptocards
| 1
|
12783268
|
<filename>cryptomonsters/mining/views.py
"""Base views for cryptomonsters."""
import json
from django.conf import settings
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render
from django.views.generic import ListView
from mining.scripts.blockchain import BlockChain
from monsters.models import Monster
from django.urls import reverse_lazy
import redis
import json
blockchain = BlockChain()
conn = redis.Redis('localhost')
class MiningHomeView(LoginRequiredMixin, ListView):
"""View to start mining."""
model = Monster
template_name = 'mining/mining.html'
redirect_field_name = '/accounts/login'
class MiningStart(LoginRequiredMixin, ListView): # pragma: no cover
"""View to show goblin gif while mining is happening."""
model = Monster
template_name = 'mining/mining_start.html'
redirect_field_name = '/accounts/login'
def get_context_data(self, **kwargs):
"""Return the context id which is key for redis."""
context = super(MiningStart, self).get_context_data(**kwargs)
user = context['view'].request.user
async_id = blockchain.new_block(user)
context['async_id'] = async_id
context['ready'] = False
return context
class MiningNewBlock(LoginRequiredMixin, ListView): # pragma: no cover
"""Check if monster is done being mined for."""
model = Monster
template_name = 'mining/mining_start.html'
redirect_field_name = '/accounts/login'
def get_context_data(self, **kwargs):
"""If the monster is ready then send it to view else keep waiting."""
context = super(MiningNewBlock, self).get_context_data(**kwargs)
async_id = self.request.GET['id']
try:
json.loads(conn.get("celery-task-meta-{}".format(async_id)).decode('utf8'))
context['ready'] = True
user = context['view'].request.user
monster = user.monsters.last()
context['data'] = monster
return context
except AttributeError:
context['async_id'] = async_id
context['wait'] = True
return context
def blockchain_view(request):
"""View the blockchain."""
with open('cryptomonsters/static/blockchain/blockchain.json') as file:
chain = json.load(file)
page = request.GET.get('page', 1)
paginator = Paginator(chain[::-1], 5)
try:
blocks = paginator.page(page)
except PageNotAnInteger: # pragma: no cover
blocks = paginator.page(1)
except EmptyPage: # pragma: no cover
blocks = paginator.page(paginator.num_pages)
return render(request, 'mining/blockchain.html', {'blockchain': blocks})
| 2.390625
| 2
|
pysql/main_c.py
|
Devansh3712/PySQL
| 12
|
12783269
|
<gh_stars>10-100
"""
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import sys
# create relative path for importing modules
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import stdiomask
import time
import platform
import urllib.request
from colorama import init, Fore, Style
import pysql.packages.auth as auth
import pysql.packages.ddl_commands as ddl
import pysql.packages.dml_commands as dml
import pysql.data.info as info
import pysql.data.export as export
import pysql.data.imports as imports
import pysql.utils.update as update
import pysql.utils.user as user
__version__ = "1.1.2"
if platform.system() == "Windows":
init(convert = True)
print(f"{Fore.LIGHTRED_EX}{info.ascii_art}{reset}")
time.sleep(1)
# check if a default user exists
check = user.User().check_default_user()
# colors
green = Fore.GREEN
red = Fore.RED
cyan = Fore.CYAN
yellow = Fore.LIGHTYELLOW_EX
magenta = Fore.LIGHTMAGENTA_EX
reset = Style.RESET_ALL
if check is True:
# get default user's name
def_user = user.User().get_default_user()
print(f"\n[{green}+{reset}]{green} Default user {reset}{cyan}{def_user[0]}{reset}{green} authenticated{reset}")
uname = def_user[0]
passwd = def_user[1]
else:
sys.stdout.write(f"{cyan}Username: {reset}")
uname = input()
passwd = stdiomask.getpass(prompt = f"{cyan}Password: {reset}")
# authenticate user credentials
authenticate = auth.Database(uname, passwd).authenticate()
if authenticate is False:
print(f"\n[{red}-{reset}]{red} User could not be authenticated{reset}")
exit()
print(f"\n[{green}+{reset}]{green} User authenticated{reset}")
time.sleep(1)
print(info.menu)
# package class instances as objects for calling functions
ddl_obj = ddl.DDL(uname, passwd)
dml_obj = dml.DML(uname, passwd)
exp_obj = export.Export(uname, passwd)
imp_obj = imports.Import(uname, passwd)
# information of database in use
current_db = ""
db_use = False
while (True):
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> ")
user_input = input()
else:
user_input = input("pysql> ")
try:
if user_input.lower() in ["-a", "about"]:
print(info.about)
elif user_input.lower() in ["-h", "help"]:
print(info.menu)
elif user_input.lower() in ["-q", "quit", "exit"]:
print(f"{magenta}Goodbye{reset}")
break
elif user_input.lower() in ["-c", "commands"]:
print(info.commands)
elif user_input.lower() in ["-v", "version"]:
print(__version__ + "\n")
elif user_input.lower() in ["-d", "def user"]:
print(info.default_user)
elif user_input.lower() in ["-u", "updates"]:
gh_version = urllib.request.urlopen("https://raw.githubusercontent.com/Devansh3712/PySQL/main/pysql/__version__.py")
gh_version = gh_version.read().decode("utf-8")
if gh_version > __version__:
result = update.update_pysql()
if result is True:
print(f"[{green}+{reset}]{green} PySQL updated to v{gh_version} succesfully{reset}\n")
break
else:
print(f"[{red}-{reset}]{red} Unable to update PySQL{reset}\n")
else:
print(f"[{green}+{reset}]{green} PySQL is up-to-date{reset}\n")
elif user_input.lower() == "adduser":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter username: ")
_uname = input()
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter password: ")
_passwd = stdiomask.getpass(prompt = "")
else:
_uname = input("pysql> Enter username: ")
_passwd = stdiomask.getpass(prompt = "pysql> Enter password: ")
result = user.User().add_default_user(_uname, _passwd)
if result is True:
print(f"[{green}+{reset}]{green} Default user {reset}{cyan}{_uname}{reset}{green} created{reset}\n")
else:
print(f"[{red}-{reset}]{red} Unable to create default user{reset}\n")
elif user_input.lower() == "removeuser":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter password: ")
_passwd = stdiomask.getpass(prompt = "")
else:
_passwd = stdiomask.getpass(prompt = "pysql> Enter password: ")
def_user = user.User().get_default_user()
if _passwd == def_user[1]:
result = user.User().remove_default_user()
if result is True:
print(f"[{green}+{reset}]{green} Default user {reset}{cyan}{def_user[0]}{reset}{green} removed{reset}\n")
else:
print(f"[{red}-{reset}]{red} Unable to remove default user{reset}\n")
else:
print(f"[{red}-{reset}]{red} Unable to authenticate{reset}\n")
elif user_input.lower() == "ddl":
print(info.data_definition_language)
elif user_input.lower() == "showdb":
result = ddl_obj.show_databases()
if result:
print(result + "\n")
else:
print(f"[{red}-{reset}]{red} Unable to show databases{reset}\n")
elif user_input.lower() == "createdb":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter database name: ")
db_name = input()
else:
db_name = input("pysql> Enter database name: ")
result = ddl_obj.create_database(db_name)
if result is True:
print(f"[{green}+{reset}]{green} Created database {reset}{db_name}\n")
else:
print(f"[{red}-{reset}]{red} Unable to create database {reset}{db_name}\n")
elif user_input.lower() == "usedb":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter database name: ")
db_name = input()
else:
db_name = input("pysql> Enter database name: ")
result = ddl_obj.use_database(db_name)
if result is True:
current_db = db_name
db_use = True
print(f"[{green}+{reset}]{green} Connected to database {reset}{db_name}\n")
else:
print(f"[{red}-{reset}]{red} Unable to connect to database {reset}{db_name}\n")
elif user_input.lower() == "dropdb":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter database name: ")
db_name = input()
else:
db_name = input("pysql> Enter database name: ")
result = ddl_obj.drop_database(db_name)
if result is True:
print(f"[{green}+{reset}]{green} Deleted database {reset}{db_name}\n")
current_db = ""
db_use = False
else:
print(f"[{red}-{reset}]{red} Unable to delete database {reset}{db_name}\n")
elif user_input.lower() == "showtb":
if db_use is True:
result = ddl_obj.show_tables(current_db)
if result:
print(result + "\n")
else:
print(f"[{red}-{reset}]{red} Unable to show tables{reset}\n")
else:
print(f"[{red}-{reset}]{red} No database in use{reset}\n")
elif user_input.lower() == "createtb":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter table name: ")
tb_name = input()
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter table details: ")
args = input()
args = args.split(",")
result = ddl_obj.create_table(current_db, tb_name, args)
if result is True:
print(f"[{green}+{reset}]{green} Created table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} Unable to create table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} No database in use{reset}\n")
elif user_input.lower() == "droptb":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter table name: ")
tb_name = input()
result = ddl_obj.drop_table(current_db, tb_name)
if result is True:
print(f"[{green}+{reset}]{green} Deleted table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} Unable to delete table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} No database in use{reset}\n")
elif user_input.lower() == "trunctb":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter table name: ")
tb_name = input()
result = ddl_obj.truncate_table(current_db, tb_name)
if result is True:
print(f"[{green}+{reset}]{green} Truncated table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} Unable to truncate table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} No database in use{reset}\n")
elif user_input.lower() == "desctb":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter table name: ")
tb_name = input()
result = ddl_obj.desc_table(current_db, tb_name)
if result:
print(result + "\n")
else:
print(f"[{red}-{reset}]{red} Unable to display table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} No database in use{reset}\n")
elif user_input.lower() == "altertb":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter table name: ")
tb_name = input()
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter arguments: ")
args = input()
args = args.split(",")
result = ddl_obj.alter_table(current_db, tb_name, args)
if result is True:
print(f"[{green}+{reset}]{green} Altered table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} Unable to alter table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} No database in use{reset}\n")
elif user_input.lower() == "dml":
print(info.data_manipulation_language)
elif user_input.lower() == "select":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter table name: ")
tb_name = input()
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter selection columns: ")
columns = input()
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter table details: ")
args = input()
result = dml_obj.select(current_db, tb_name, columns, args)
if result:
print(result + "\n")
else:
print(f"[{red}-{reset}]{red} Unable to show selected values{reset}\n")
else:
print(f"[{red}-{reset}]{red} No database in use{reset}\n")
elif user_input.lower() in ["insert -s", "insert"]:
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter table name: ")
tb_name = input()
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter values: ")
args = input()
result = dml_obj.insert(current_db, tb_name, args)
if result is True:
print(f"[{green}+{reset}]{green} Inserted values in table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} Unable to insert value in table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} No database in use{reset}\n")
elif user_input.lower() == "insert -m":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter table name: ")
tb_name = input()
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter number of records: ")
num = int(input())
flag = True
for records in range (num):
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter values: ")
args = input()
result = dml_obj.insert(current_db, tb_name, args)
if result is False:
print(f"[{red}-{reset}]{red} Unable to insert value in table {reset}{tb_name}\n")
flag = False
break
if flag is True:
print(f"[{green}+{reset}]{green} Inserted values in table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} No database in use{reset}\n")
elif user_input.lower() == "insert -f":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter table name: ")
tb_name = input()
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter path to CSV file: ")
path = input()
result = dml_obj.insert_file(current_db, tb_name, path)
if result is True:
print(f"[{green}+{reset}]{green} Inserted values in table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} Unable to insert value in table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} No database in use{reset}\n")
elif user_input.lower() == "update":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter table name: ")
tb_name = input()
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter columns to update: ")
columns = input()
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter arguments: ")
args = input()
result = dml_obj.update(current_db, tb_name, columns, args)
if result is True:
print(f"[{green}+{reset}]{green} Updated values in table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} Unable to update values in table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} No database in use{reset}\n")
elif user_input.lower() == "delete":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter table name: ")
tb_name = input()
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter columns to delete: ")
columns = input()
result = dml_obj.delete(current_db, tb_name, columns)
if result is True:
print(f"[{green}+{reset}]{green} Deleted values from table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} Unable to delete values from table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} No database in use{reset}\n")
elif user_input.lower() == "all":
print(info.all_commands)
elif user_input.lower() == "export":
print(info.export)
elif user_input.lower() == "import":
print(info.import_)
elif user_input.lower() == "exportdb":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter database name: ")
db_name = input()
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter path to export: ")
path = input()
else:
db_name = input("pysql> Enter database name: ")
path = input("pysql> Enter path to export: ")
result = exp_obj.export_database(db_name, path)
if result is True:
print(f"[{green}+{reset}]{green} Exported database {reset}{db_name}\n")
else:
print(f"[{red}-{reset}]{red} Unable to export database {reset}{db_name}\n")
elif user_input.lower() == "exporttb -json":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter table name: ")
tb_name = input()
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter path to export: ")
path = input()
result = exp_obj.export_table_json(current_db, tb_name, path)
if result is True:
print(f"[{green}+{reset}]{green} Exported table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} Unable to export table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} No database in use{reset}\n")
elif user_input.lower() == "exporttb -csv":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter table name: ")
tb_name = input()
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter path to export: ")
path = input()
result = exp_obj.export_table_csv(current_db, tb_name, path)
if result is True:
print(f"[{green}+{reset}]{green} Exported table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} Unable to export table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} No database in use{reset}\n")
elif user_input.lower() == "exporttb -sql":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter table name: ")
tb_name = input()
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter path to export: ")
path = input()
result = exp_obj.export_table_sql(current_db, tb_name, path)
if result is True:
print(f"[{green}+{reset}]{green} Exported table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} Unable to export table {reset}{tb_name}\n")
else:
print(f"[{red}-{reset}]{red} No database in use{reset}\n")
elif user_input.lower() == "exportall -json":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter path to export: ")
path = input()
result = exp_obj.export_all_json(current_db, path)
if result is True:
print(f"[{green}+{reset}]{green} Exported all tables in {reset}{current_db}\n")
else:
print(f"[{red}-{reset}]{red} Unable to export tables in {reset}{current_db}\n")
else:
print(f"[{red}-{reset}]{red} No database in use{reset}\n")
elif user_input.lower() == "exportall -csv":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter path to export: ")
path = input()
result = exp_obj.export_all_csv(current_db, path)
if result is True:
print(f"[{green}+{reset}]{green} Exported all tables in {reset}{current_db}\n")
else:
print(f"[{red}-{reset}]{red} Unable to export tables in {reset}{current_db}\n")
else:
print(f"[{red}-{reset}]{red} No database in use{reset}\n")
elif user_input.lower() == "exportall -sql":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter path to export: ")
path = input()
result = exp_obj.export_all_sql(current_db, path)
if result is True:
print(f"[{green}+{reset}]{green} Exported all tables in {reset}{current_db}\n")
else:
print(f"[{red}-{reset}]{red} Unable to export tables in {reset}{current_db}\n")
else:
print(f"[{red}-{reset}]{red} No database in use{reset}\n")
elif user_input.lower() == "importdb":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter database name: ")
db_name = input()
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter path of file: ")
path = input()
else:
db_name = input("pysql> Enter database name: ")
path = input("pysql> Enter path of file: ")
result = imp_obj.import_database(db_name, path)
if result is True:
print(f"[{green}+{reset}]{green} Imported to database {reset}{db_name}\n")
else:
print(f"[{red}-{reset}]{red} Unable to import to database {reset}{db_name}\n")
elif user_input.lower() == "importtb":
if db_use is True:
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter table name: ")
tb_name = input()
sys.stdout.write(f"{yellow}({current_db}){reset} pysql> Enter path of file: ")
path = input()
else:
tb_name = input("pysql> Enter table name: ")
path = input("pysql> Enter path of file: ")
result = imp_obj.import_table(tb_name, path)
if result is True:
print(f"[{green}+{reset}]{green} Imported table to database {reset}{db_name}\n")
else:
print(f"[{red}-{reset}]{red} Unable to import table to database {reset}{db_name}\n")
else:
print(f"[{red}-{reset}]{red} Choose a valid option{reset}\n")
except:
print(f"[{red}-{reset}]{red} Unable to execute command{reset}\n")
# entry point for running PySQL CLI
def cli():
pass
| 1.6875
| 2
|
UNIVESPalgortimo_1/testeparagit.py
|
joaorobsonR/algoritmo1
| 0
|
12783270
|
def tabuada(n):
for i in range(0, 11):
res = n * i
print(n ,'x', i, '=', res)
tabuada(5)
| 3.390625
| 3
|
DOS_DOG/flow_dump.py
|
Network-Hub/Dos_Dog
| 0
|
12783271
|
# encoding: utf-8
import queue
from flow_detector import *
# 定义一个字典用来记录每个流已经嗅探到的数据包个数
flow_recorder = {}
# 定义一个队列用来缓存数据包
q = queue.Queue()
def put_pkt_to_queue(pkts):
for pkt in pkts:
q.put(pkt)
def get_pkt_id(pkt):
if pkt.haslayer('IP'):
src_ip = pkt["IP"].src
dst_ip = pkt["IP"].dst
if pkt.haslayer('TCP'):
src_port = pkt['TCP'].sport
dst_port = pkt['TCP'].dport
protocol = 'TCP'
elif pkt.haslayer('UDP'):
src_port = pkt['UDP'].sport
dst_port = pkt['UDP'].dport
protocol = 'UDP'
elif pkt.haslayer(' ICMP'):
src_port = "NULL"
dst_port = "NULL"
protocol = 'ICMP'
else:
src_port = "NULL"
dst_port = "NULL"
protocol = 'OTHERS'
else:
return ""
pkt_id = str(src_ip) + "_" + str(src_port) + "_" + str(dst_ip) + "_" + str(dst_port) + "_" + str(protocol)
return pkt_id
def make_dir(new_dir_path):
"""
create the new directory if it not exist
:param new_dir_path: absolutely path , the new directory
:return: absolutely path
"""
if os.path.exists(new_dir_path):
pass
else:
os.makedirs(new_dir_path)
return new_dir_path
# 冒泡事件触发函数
def put_pkt_to_folder(pkt, pkt_id):
save_folder = make_dir(folder_root + os.sep + "flow" + os.sep + str(pkt_id))
wrpcap(save_folder + os.sep + str(random.randint(0, 1000)) + ".pcap", pkt)
# print(pkt_id, " dumped to ", save_folder, " success!")
# 更新字典中的值
if pkt_id in flow_recorder:
flow_recorder[pkt_id] = flow_recorder[pkt_id] + 1
else:
flow_recorder[pkt_id] = 1
if is_full(pkt_id):
# flow_recorder.pop(pkt_id)
print(flow_recorder[pkt_id])
flow_recorder[pkt_id] = 0 # 重新计数
return 1
else:
return 0
def start_detect(pkt_id):
flow_array = transform_main(pkt_id)
label = detector_main(flow_array)
print(pkt_id, "is:", label)
return label
def dump_pkt_from_queue():
if q.empty():
# print("queue is empty!")
return "NULL", -1
else:
pkt = q.get()
pkt_id = get_pkt_id(pkt)
if pkt_id == "":
return "NULL", -1
else:
flag = put_pkt_to_folder(pkt, pkt_id)
return pkt_id, flag
# 要开一个新的线程来专门嗅探数据包
def sniff_pkt():
while True:
pkts = sniff(filter=sniff_filter, iface=sniff_iface, count=sniff_count) # 抓包 # prn=lambda x: x.show()
put_pkt_to_queue(pkts)
def sniff_main(save_path):
# 创建一个线程用来监听数据包
thread1 = threading.Thread(target=sniff_pkt, name="thread1")
# 开启监听
thread1.start()
# 尝试从队列中获取数据包
i = 0
while True:
pkt_id, flag = dump_pkt_from_queue()
if flag == 1:
label = start_detect(pkt_id)
save_label(save_path, label, i)
i += 1
def is_full(pkt_id):
if flow_recorder[pkt_id] >= pkt_num:
return True
else:
return False
if __name__ == '__main__':
sniff_main()
| 2.765625
| 3
|
didyoumean-discordpy/main.py
|
daima3629/didyoumean-discordpy
| 1
|
12783272
|
from discord.ext import commands
import difflib
from .message_generator import DefaultMessageGenerator, MessageGenerator
from typing import Optional, Mapping, Set, List
class DidYouMean(commands.Cog):
"""
Core class of this library.
Attributes
----------
bot
The bot object.
matcher_dict : Mapping[str, difflib.SequenceMatcher]
A dict for storing matchers.
max_suggest : int
Maximum number of suggestions.
"""
def __init__(self, bot) -> None:
self.bot = bot
self.matcher_dict: Mapping[str, difflib.SequenceMatcher] = {}
self.max_suggest = 3
self._command_names: Set[str] = set()
self._listup_commands(self.bot)
self._max_command_length = max((len(c) for c in self._command_names))
self._message_generator = DefaultMessageGenerator
def set_message_generator(self, generator) -> None:
"""
The function to set message generator.
Parameters
----------
generator
This class inherits from the `MessageGenerator` class.
Raises
------
TypeError
If the class does not inherit from `MessageGenerator`.
"""
if not isinstance(generator, MessageGenerator):
raise TypeError("Message generator must extend 'MessageGenerator'.")
self._message_generator = generator
def create_matcher(self, command_name: str) -> difflib.SequenceMatcher:
matcher = difflib.SequenceMatcher(None, command_name)
self.matcher_dict[command_name] = matcher
return matcher
def similar_factor_extraction(self, command: str) -> Optional[List[str]]:
matcher = self.matcher_dict.get(command) or self.create_matcher(command)
similar_cmd_list = []
for name in self._command_names:
matcher.set_seq2(name)
ratio = matcher.ratio()
if ratio > 0.6:
similar_cmd_list.append((name, ratio))
similar_cmd_list.sort(key=lambda c: c[1], reverse=True)
if not similar_cmd_list:
return
return [c[0] for c in similar_cmd_list][:self.max_suggest]
def _listup_commands(self, group, prefix=None) -> None:
if prefix is None:
prefix = []
prefix_str = ' '.join(prefix) + ' ' if len(prefix) > 0 else ''
for command in group.commands:
if command.hidden:
continue
elif isinstance(command, commands.Group):
names = [command.name] + list(command.aliases)
for name in names:
self._command_names.add(prefix_str + name)
prefix.append(command.name)
self._listup_commands(command, prefix)
prefix.pop()
elif isinstance(command, commands.Command):
names = [command.name] + list(command.aliases)
for name in names:
self._command_names.add(prefix_str + name)
@commands.Cog.listener()
async def on_ready(self):
self._listup_commands(self.bot)
@commands.Cog.listener()
async def on_command_error(self, ctx, err) -> None:
if not isinstance(err, commands.CommandNotFound):
return
invalid_command = ctx.message.content.lstrip(ctx.prefix)[:self._max_command_length]
similar_list = self.similar_factor_extraction(invalid_command)
if similar_list is None:
return
await self._message_generator(invalid_command[:len(similar_list[0])], similar_list).send(ctx)
def setup(bot):
bot.add_cog(DidYouMean(bot))
| 2.421875
| 2
|
hypernets/core/trial.py
|
lyhue1991/Hypernets
| 3
|
12783273
|
<filename>hypernets/core/trial.py
# -*- coding:utf-8 -*-
"""
"""
import datetime
import os
import pickle
import shutil
from collections import OrderedDict
import pandas as pd
from hypernets.utils.common import isnotebook, to_repr
from ..core.searcher import OptimizeDirection
class Trial():
def __init__(self, space_sample, trial_no, reward, elapsed, model_file=None, succeeded=True):
self.space_sample = space_sample
self.trial_no = trial_no
self.reward = reward
self.elapsed = elapsed
self.model_file = model_file
self.succeeded = succeeded
self.memo = {}
self.iteration_scores = {}
def __repr__(self):
return to_repr(self)
def _repr_html_(self):
html = f'<div><h>Trial:</h>'
html += '''<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th>key</th>
<th>value</th>
</tr>
</thead>
<tbody>'''
html += f'''<tr>
<td>Trial No.</td>
<td>{self.trial_no}</td>
</tr>
<tr>
<td>Reward</td>
<td>{self.reward}</td>
</tr>
<tr>
<td>Elapsed</td>
<td>{self.elapsed}</td>
</tr>
<tr>
<td>space.signature</td>
<td>{self.space_sample.signature}</td>
</tr>
<tr>
<td>space.vectors</td>
<td>{self.space_sample.vectors}</td>
</tr>'''
params = self.space_sample.get_assigned_params()
for i, hp in enumerate(params):
html += f'''<tr>
<td>{i}-{hp.alias}</td>
<td>{hp.value}</td>
</tr>
<tr>'''
html += ''' </tbody>
</table>
</div>'''
return html
def __getstate__(self):
try:
state = super().__getstate__()
except AttributeError:
state = self.__dict__
state = {k: v for k, v in state.items() if k != 'memo'}
return state
def to_df(self, include_params=False):
out = OrderedDict(trial_no=self.trial_no, succeeded=self.succeeded, reward=self.reward, elapsed=self.elapsed)
if include_params:
for p in self.space_sample.get_assigned_params():
out[p.alias] = p.value
return pd.DataFrame({k: [v] for k, v in out.items()})
class TrialHistory():
def __init__(self, optimize_direction):
self.trials = []
self.optimize_direction = optimize_direction
def append(self, trial):
old_best = self.get_best()
self.trials.append(trial)
new_best = self.get_best()
improved = old_best != new_best
return improved
def is_existed(self, space_sample):
return space_sample.vectors in [t.space_sample.vectors for t in self.trials]
def get_trial(self, space_sample):
all_vectors = [t.space_sample.vectors for t in self.trials]
index = all_vectors.index(space_sample.vectors)
if index >= 0:
return self.trials[index]
else:
return None
def get_best(self):
top1 = self.get_top(1)
if len(top1) <= 0:
return None
else:
return top1[0]
def get_worst(self):
topn = self.get_top()
return topn[-1] if len(topn) > 0 else None
def get_top(self, n=None):
assert n is None or isinstance(n, int)
valid_trials = [t for t in self.trials if t.succeeded]
if len(valid_trials) <= 0:
return []
sorted_trials = sorted(valid_trials, key=lambda t: t.reward,
reverse=self.optimize_direction in ['max', OptimizeDirection.Maximize])
if isinstance(n, int) and n < len(sorted_trials):
sorted_trials = sorted_trials[:n]
return sorted_trials
def get_space_signatures(self):
signatures = set()
for s in [t.space_sample for t in self.trials]:
signatures.add(s.signature)
return signatures
def diff(self, trials):
signatures = set()
for s in [t.space_sample for t in trials]:
signatures.add(s.signature)
diffs = {}
for sign in signatures:
ts = [t for t in trials if t.space_sample.signature == sign]
pv_dict = {}
for t in ts:
for p in t.space_sample.get_assigned_params():
k = p.alias
v = str(p.value)
if pv_dict.get(k) is None:
pv_dict[k] = {}
if pv_dict[k].get(v) is None:
pv_dict[k][v] = [t.reward]
else:
pv_dict[k][v].append(t.reward)
diffs[sign] = pv_dict
return diffs
def get_trajectories(self):
times, best_rewards, rewards = [0.0], [0.0], [0.0]
his = sorted(self.trials, key=lambda t: t.trial_no)
best_trial_no = 0
best_elapsed = 0
for t in his:
rewards.append(t.reward)
times.append(t.elapsed + times[-1])
if t.reward > best_rewards[-1]:
best_rewards.append(t.reward)
best_trial_no = t.trial_no
best_elapsed = times[-1]
else:
best_rewards.append(best_rewards[-1])
return times, best_rewards, rewards, best_trial_no, best_elapsed
def save(self, filepath):
if filepath.endswith('.pkl'):
with open(filepath, 'wb') as output:
pickle.dump(self, output, protocol=pickle.HIGHEST_PROTOCOL)
return
with open(filepath, 'w') as output:
output.write(f'{self.optimize_direction}\r\n')
for trial in self.trials:
data = f'{trial.trial_no}|{trial.space_sample.vectors}|{trial.reward}|{trial.elapsed}' + \
f'|{trial.model_file if trial.model_file else ""}|{trial.succeeded}\r\n'
output.write(data)
@staticmethod
def load_history(space_fn, filepath):
if filepath.endswith('.pkl'):
with open(filepath, 'rb') as input:
history = pickle.load(input)
return history
with open(filepath, 'r') as input:
line = input.readline()
history = TrialHistory(line.strip())
while line is not None and line != '':
line = input.readline()
if line is None or line.strip() == '':
continue
fields = line.strip().split('|')
assert len(fields) >= 4, f'Trial format is not correct. \r\nline:[{line}]'
sample = space_fn()
vector = [float(n) if n.__contains__('.') else int(n) for n in
fields[1].replace('[', '').replace(']', '').split(',')]
sample.assign_by_vectors(vector)
if len(fields) > 4:
model_file = fields[4]
else:
model_file = None
trial = Trial(space_sample=sample, trial_no=int(fields[0]), reward=float(fields[2]),
elapsed=float(fields[3]), model_file=model_file)
if len(fields) > 5:
trial.succeeded = bool(fields[5])
history.append(trial)
return history
def __repr__(self):
out = OrderedDict(direction=self.optimize_direction)
if len(self.trials) > 0:
tops = self.get_top()
out['size'] = len(self.trials)
out['succeeded'] = len(tops)
if len(tops) > 0:
out['best_reward'] = tops[0].reward
out['worst_reward'] = tops[-1].reward
repr_ = ', '.join('%s=%r' % (k, v) for k, v in out.items())
return f'{type(self).__name__}({repr_})'
def to_df(self, include_params=False):
if len(self.trials) > 0:
df = pd.concat([t.to_df(include_params) for t in self.trials], axis=0)
df.reset_index(drop=True, inplace=True)
else:
df = pd.DataFrame()
return df
def plot_hyperparams(self, destination='notebook', output='hyperparams.html'):
"""Plot hyperparams in a parallel line chart
Args:
destination: one of notebook, html
output: the html file path
Returns:
"""
try:
import pyecharts
except Exception as e:
raise Exception("You may not install 'pyecharts',"
"please refer to https://github.com/pyecharts/pyecharts and install it.")
import pyecharts.options as opts
from pyecharts.charts import Parallel
if destination == 'notebook' and not isnotebook():
raise Exception("You may not running in a notebook,"
" try to set 'destination' to 'html' or run it in notebook ")
if self.trials is None or len(self.trials) < 1:
raise Exception("Trials is empty ")
REWARD_METRIC_COL = 'Reward metric'
def get_space_params(trial):
space = trial.space_sample
params_dict = {}
for hyper_param in space.get_all_params():
references = list(hyper_param.references)
if len(references) > 0:
param_name = hyper_param.alias[len(list(hyper_param.references)[0].name) + 1:]
param_value = hyper_param.value
if isinstance(param_value, int) or isinstance(param_value, float):
if not isinstance(param_value, bool):
params_dict[param_name] = param_value
params_dict[REWARD_METRIC_COL] = trial.reward
return params_dict
def make_dims(df_params):
parallel_axis = []
for i, col in enumerate(df_params.columns):
if df_params.dtypes[col].kind == 'O':
parallel_axis.append({
"dim": i,
"name": col,
"type": "category",
"data": df_params[col].unique().tolist(),
})
else:
parallel_axis.append({'dim': i, 'name': col})
return parallel_axis
trials_params = [get_space_params(trial) for trial in self.trials]
param_names = list(set([v for ps in trials_params for v in ps.keys()]))
param_names.remove(REWARD_METRIC_COL)
param_names.insert(len(param_names), REWARD_METRIC_COL)
trial_params_values = []
for t in trials_params:
param_values = [t.get(n) for n in param_names]
trial_params_values.append(param_values)
df_train_params = pd.DataFrame(data=trial_params_values, columns=param_names)
# remove if all is None
df_train_params.dropna(axis=1, how='all', inplace=True)
parallel_axis = make_dims(df_train_params)
chart = \
Parallel(init_opts=opts.InitOpts(width="%dpx" % (len(param_names) * 100), height="400px")) \
.add_schema(schema=parallel_axis).add(series_name="",
data=df_train_params.values.tolist(),
linestyle_opts=opts.LineStyleOpts(width=1, opacity=0.5),
).set_global_opts(
visualmap_opts=[
opts.VisualMapOpts(
type_="color",
is_calculable=True,
precision=2,
# dimension=0,
pos_left="-10",
pos_bottom="30",
max_=df_train_params[REWARD_METRIC_COL].max().tolist(),
min_=df_train_params[REWARD_METRIC_COL].min().tolist()
)
]
)
if destination == 'notebook':
return chart.render_notebook()
else:
return chart.render(output)
class TrialStore(object):
def __init__(self):
self.reset()
self.load()
def put(self, dataset_id, trial):
self.put_to_cache(dataset_id, trial)
self._put(dataset_id, trial)
def get_from_cache(self, dataset_id, space_sample):
if self._cache.get(dataset_id) is None:
self._cache[dataset_id] = {}
dataset = self._cache[dataset_id]
if dataset.get(space_sample.signature) is None:
dataset[space_sample.signature] = {}
trial = dataset[space_sample.signature].get(self.sample2key(space_sample))
return trial
def put_to_cache(self, dataset_id, trial):
if self._cache.get(dataset_id) is None:
self._cache[dataset_id] = {}
dataset = self._cache[dataset_id]
if dataset.get(trial.space_sample.signature) is None:
dataset[trial.space_sample.signature] = {}
dataset[trial.space_sample.signature][self.sample2key(trial.space_sample)] = trial
def get(self, dataset_id, space_sample):
trial = self.get_from_cache(dataset_id, space_sample)
if trial is None:
trial = self._get(dataset_id, space_sample)
if trial is not None:
self.put_to_cache(dataset_id, trial)
return trial
def _get(self, dataset_id, space_sample):
raise NotImplementedError
def _put(self, dataset_id, trial):
raise NotImplementedError
def load(self):
raise NotImplementedError
def get_all(self, dataset_id, space_signature):
raise NotImplementedError
def reset(self):
raise NotImplementedError
def persist(self):
raise NotImplementedError
def sample2key(self, space_sample):
key = ','.join([str(f) for f in space_sample.vectors])
return key
def check_trial(self, trial):
pass
class DiskTrialStore(TrialStore):
def __init__(self, home_dir=None):
self.home_dir = self.prepare_home_dir(home_dir)
TrialStore.__init__(self)
def prepare_home_dir(self, home_dir):
if home_dir is None:
home_dir = 'trial_store'
if home_dir[-1] == '/':
home_dir = home_dir[:-1]
home_dir = os.path.expanduser(home_dir)
if not os.path.exists(home_dir):
os.makedirs(home_dir)
return home_dir
def _prepare_output_dir(self, log_dir, searcher):
if log_dir is None:
log_dir = 'log'
if log_dir[-1] == '/':
log_dir = log_dir[:-1]
running_dir = f'exp_{searcher.__class__.__name__}_{datetime.datetime.now().__format__("%m%d-%H%M%S")}'
output_path = os.path.expanduser(f'{log_dir}/{running_dir}/')
if not os.path.exists(output_path):
os.makedirs(output_path)
return output_path
def load(self):
pass
def clear_history(self):
shutil.rmtree(self.home_dir)
self.prepare_home_dir(self.home_dir)
self.reset()
def reset(self):
self._cache = {}
def _get(self, dataset_id, space_sample):
path = self.get_trial_path(dataset_id, space_sample)
trial = self._load_trial(path)
if trial is not None:
trial.space_sample = space_sample
return trial
def _load_trial(self, path):
if not os.path.exists(path):
return None
else:
with open(path, 'rb') as f:
trial = pickle.load(f)
self.check_trial(trial)
return trial
def _put(self, dataset_id, trial):
path = self.get_trial_path(dataset_id, trial.space_sample)
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'wb') as f:
temp = Trial(space_sample=None,
trial_no=trial.trial_no,
reward=trial.reward,
elapsed=trial.elapsed,
model_file=trial.model_file)
temp.space_sample_vectors = trial.space_sample.vectors
pickle.dump(temp, f)
def persist(self):
pass
def get_trial_path(self, dataset_id, space_sample):
path = f'{self.home_dir}/{dataset_id}/{space_sample.signature}/{self.sample2key(space_sample)}.pkl'
return path
def get_all(self, dataset_id, space_signature):
path = f'{self.home_dir}/{dataset_id}/{space_signature}'
trials = []
if not os.path.exists(path):
return trials
files = os.listdir(path)
for f in files:
if f.endswith('.pkl'):
f = path + '/' + f
trial = self._load_trial(f)
trials.append(trial)
return trials
default_trial_store = None
def get_default_trial_store():
return default_trial_store
def set_default_trial_store(trial_store):
global default_trial_store
default_trial_store = trial_store
| 2.140625
| 2
|
ST7735fb.py
|
boochow/FBConsole
| 35
|
12783274
|
# ST7735S driver wrapper for FBConsole
# ST7735 driver: https://github.com/boochow/MicroPython-ST7735
# FBConsole: https://github.com/boochow/FBConsole
class TFTfb(object):
def __init__(self, tft, font):
self.height = 160
self.width = 128
self.tft = tft
self.font = font
tft.setvscroll(tft.tfa, tft.bfa)
tft.fill(0)
tft.vscroll(0)
self.voffset = 0
def _abs2tft(self, v) :
''' convert screen y coord to LCD address'''
return (self.voffset + v) % self.height
def fill(self, color) :
self.tft.fill(color)
def fill_rect(self, x, y, w, h, color) :
top = self._abs2tft(y)
bottom = self._abs2tft(y + h)
if (bottom > top):
self.tft.fillrect((x, top), (w, h), color)
else:
self.tft.fillrect((x, top), (w, self.height - top), color)
self.tft.fillrect((x, 0), (w, bottom), color)
def scroll(self, dx, dy) :
self.voffset = (self.voffset - dy + self.height) % self.height
self.tft.vscroll(self.voffset)
def hline(self, x, y, w, color) :
self.tft.hline((x, self._abs2tft(y)), w, color)
def text(self, c, x, y, color) :
self.tft.char((x, self._abs2tft(y)), c, color, self.font, (1, 1))
| 2.390625
| 2
|
demo/ner/money/money.py
|
zhengxin2016/corpus
| 0
|
12783275
|
#!/usr/bin/env python3
import os,sys
import re
class Money():
def __init__(self):
self.resource_path = 'resource'
self.money_set = set()
self.digital_set = set()
self.get_money()
self.mon_r = self.get_money_r()
def get_money(self):
with open(self.resource_path+'/money_cn.txt') as f:
for line in f:
self.money_set.add(line.strip())
with open(self.resource_path+'/money_en.txt') as f:
for line in f:
self.money_set.add(line.strip())
with open(self.resource_path+'/digital.txt') as f:
for line in f:
self.digital_set.add(line.strip())
def get_money_r(self):
money_1 = u'([0-90-9两零一二三四五六七八九十百千万亿兆几壹贰叁肆伍陆柒捌玖拾]{1}[0-90-9,,两零一二三四五六七八九十百千万亿兆几壹贰叁肆伍陆柒捌玖拾\.]'
money_2 = u')){1,3}[0-90-9两零一二三四五六七八九]{0,1}'
mon_r = u'{0,30}(元|块|分|人民币|角|毛|RMB){1}(?!儿|去|形|钟|'
suf_error = []
with open(self.resource_path+'/suf_error.txt') as f:
for line in f:
suf_error.append(line.strip())
return money_1 + mon_r + '|'.join(suf_error) + money_2
def find_money(self, string):
T_list = ['O'] * len(string)
ite = re.finditer(self.mon_r, string)
if ite:
for _ in ite:
T_list[_.start()] = 'B-MNY'
for i in range(_.start() + 1, _.end() - 1):
T_list[i] = 'I-MNY'
T_list[_.end() - 1] = 'E-MNY'
stop = 0
for i in range(len(string)):
if i >= stop:
for j in range(len(string), i, -1):
if string[i:j] in self.money_set:
if i > 0 and string[i-1] in self.digital_set:
for k in range(i-1, -1, -1):
if string[k] in self.digital_set and k != 0:
T_list[k] = 'I-MNY'
elif string[k] in self.digital_set and k == 0:
T_list[k] = 'B-MNY'
else:
T_list[k+1] = 'B-MNY'
break
T_list[j-1] = 'E-MNY'
for k in range(i, j-1):
T_list[k] = 'I-MNY'
stop = j
break
return T_list
if __name__ == '__main__':
mny = Money()
string = '我要取1000块钱,存两万美元'
print(string)
print(mny.find_money(string))
while 1:
string = input('input:')
print(mny.find_money(string.strip()))
| 3.484375
| 3
|
cowsay/lib/cows/kitty.py
|
Ovlic/cowsay_py
| 0
|
12783276
|
def Kitty(thoughts, eyes, eye, tongue):
return f"""
{thoughts}
{thoughts}
("\`-' '-/") .___..--' ' "\`-._
\` {eye}_ {eye} ) \`-. ( ) .\`-.__. \`)
(_Y_.) ' ._ ) \`._\` ; \`\` -. .-'
_.. \`--'_..-_/ /--' _ .' ,4
( i l ),-'' ( l i),' ( ( ! .-'
"""
| 2.671875
| 3
|
tests/test_adfgvx.py
|
SF-11/hist-crypt
| 0
|
12783277
|
<reponame>SF-11/hist-crypt<gh_stars>0
import ciphers.adfgvx
alpha_5x5 = [
["b", "t", "a", "l", "p"],
["d", "h", "o", "z", "k"],
["q", "f", "v", "s", "n"],
["g", "ij", "c", "u", "x"],
["m", "r", "e", "w", "y"]
]
alpha_6x6 = [
["n", "a", "1", "c", "3", "h"],
["8", "t", "b", "2", "o", "m"],
["e", "5", "w", "r", "p", "d"],
["4", "f", "6", "g", "7", "i"],
["9", "j", "0", "k", "l", "q"],
["s", "u", "v", "x", "y", "z"]
]
def test_encrypt_5x5():
plaintext = "ATTACKATONCE"
trans_key = "CARGO"
assert ciphers.adfgvx.encrypt(plaintext, trans_key, alpha_5x5) == "FAXDFADDDGDGFFFAFAXAFAFX"
def test_decrypt_5x5():
ciphertext = "FAXDFADDDGDGFFFAFAXAFAFX"
key = "CARGO"
assert ciphers.adfgvx.decrypt(ciphertext, key, alpha_5x5) == "ATTACKATONCE"
def test_encrypt_6x6():
plaintext = "ATTACKAT1200AM"
trans_key = "PRIVACY"
assert ciphers.adfgvx.encrypt(plaintext, trans_key, alpha_6x6) == "DGDDDAGDDGAFADDFDADVDVFAADVX"
def test_decrypt_6x6():
ciphertext = "DGDDDAGDDGAFADDFDADVDVFAADVX"
trans_key = "PRIVACY"
assert ciphers.adfgvx.decrypt(ciphertext, trans_key, alpha_6x6) == "ATTACKAT1200AM"
def test_bad_alpha_square():
alpha_5x5 = [
["b", "b", "a", "l", "p"],
["d", "h", "o", "z", "k"],
["q", "f", "v", "s", "n"],
["g", "i", "c", "u", "x"],
["m", "r", "e", "w", "y"]
]
plaintext = "<PASSWORD>"
trans_key = "CARGO"
try:
ciphers.adfgvx.encrypt(plaintext, trans_key, alpha_5x5) == "FAXDFADDDGDGFFFAFAXAFAFX"
assert False
except ValueError:
assert True
def test_load_alpha_square5x5():
expected = alpha_5x5
actual = ciphers.adfgvx.load_alpha_square(open("tests/adfgvx_square5x5", "r"))
assert actual == expected
def test_load_alpha_square6x6():
expected = alpha_6x6
actual = ciphers.adfgvx.load_alpha_square(open("tests/adfgvx_square6x6", "r"))
assert actual == expected
def test_load_alpha_square_bad_shape():
try:
ciphers.adfgvx.load_alpha_square(open("tests/adfgvx_square6x6_bad_shape", "r"))
assert False
except ValueError:
assert True
| 2.609375
| 3
|
bfro/bfro_weather_join.py
|
timothyrenner/bfro_sightings_data
| 7
|
12783278
|
import click
import json
import csv
from toolz import get, get_in
@click.command()
@click.argument('report_file', type=click.File('r'))
@click.argument('weather_file', type=click.File('r'))
@click.argument('weather_join_file', type=click.File('w'))
def main(report_file, weather_file, weather_join_file):
weather_reader = csv.reader(weather_file)
# Load the weather into a dictionary.
weather_cache = {
# Extract the dict with the weather information.
(r[0], r[1]): get_in(["daily", "data", 0], json.loads(r[-1]), {})
for r in weather_reader
}
report_reader = csv.DictReader(report_file)
fieldnames = report_reader.fieldnames + [
"temperature_high",
"temperature_mid",
"temperature_low",
"dew_point",
"humidity",
"cloud_cover",
"moon_phase",
"precip_intensity",
"precip_probability",
"precip_type",
"pressure",
"summary",
"uv_index",
"visibility",
"wind_bearing",
"wind_speed"
]
writer = csv.DictWriter(weather_join_file, fieldnames=fieldnames)
writer.writeheader()
for line in report_reader:
weather = get((line["geohash"], line["date"]), weather_cache, {})
temperature_high = get("temperatureHigh", weather, None)
temperature_low = get("temperatureLow", weather, None)
line["temperature_high"] = temperature_high
line["temperature_mid"] = (
temperature_low + (temperature_high - temperature_low)/2
) if temperature_high and temperature_low else None
line["temperature_low"] = temperature_low
line["dew_point"] = get("dewPoint", weather, None)
line["humidity"] = get("humidity", weather, None)
line["cloud_cover"] = get("cloudCover", weather, None)
line["moon_phase"] = get("moonPhase", weather, None)
line["precip_intensity"] = get("precipIntensity", weather, None)
line["precip_probability"] = get("precipProbability", weather, None)
line["precip_type"] = get("precipType", weather, None)
line["pressure"] = get("pressure", weather, None)
line["summary"] = get("summary", weather, None)
line["uv_index"] = get("uvIndex", weather, None)
line["visibility"] = get("visibility", weather, None)
line["wind_bearing"] = get("windBearing", weather, None)
line["wind_speed"] = get("windSpeed", weather, None)
writer.writerow(line)
if __name__ == "__main__":
main()
| 3.328125
| 3
|
alethioclock/config.py
|
A-Somma/alethioclock
| 0
|
12783279
|
<filename>alethioclock/config.py
#firebase service account
SERVICE_ACCOUNT_PATH = '../.firebase/service-account.json'
SERVICE_ADDRESS = '532'
STATUSES = ['AUTOMATIC', 'HOME', 'LOST', 'UNKOWN']
| 1.046875
| 1
|
src/waldur_openstack/openstack_tenant/tests/test_snapshots.py
|
geant-multicloud/MCMS-mastermind
| 26
|
12783280
|
<gh_stars>10-100
from ddt import data, ddt
from rest_framework import status, test
from waldur_openstack.openstack_tenant import models
from . import factories, fixtures
@ddt
class SnapshotRestoreTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.OpenStackTenantFixture()
def _make_restore_request(self):
url = factories.SnapshotFactory.get_url(
snapshot=self.fixture.snapshot, action='restore'
)
request_data = {
'name': '/dev/sdb1',
}
response = self.client.post(url, request_data)
return response
@data('global_support', 'customer_support', 'member')
def test_user_cannot_restore_snapshot_if_he_has_not_admin_access(self, user):
self.client.force_authenticate(user=getattr(self.fixture, user))
response = self._make_restore_request()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@data('staff', 'owner', 'admin', 'manager')
def test_user_can_restore_snapshot_only_if_he_has_admin_access(self, user):
self.client.force_authenticate(user=getattr(self.fixture, user))
response = self._make_restore_request()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
@data('user')
def test_user_cannot_restore_snapshot_if_he_has_no_project_level_permissions(
self, user
):
self.client.force_authenticate(user=getattr(self.fixture, user))
response = self._make_restore_request()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_snapshot_restore_creates_volume(self):
self.client.force_authenticate(self.fixture.owner)
response = self._make_restore_request()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(models.SnapshotRestoration.objects.count(), 1)
restoration = models.SnapshotRestoration.objects.first()
restored_volume = models.Volume.objects.exclude(
pk=self.fixture.snapshot.source_volume.pk
).first()
self.assertEqual(self.fixture.snapshot, restoration.snapshot)
self.assertEqual(restored_volume, restoration.volume)
def test_user_is_able_to_specify_a_name_of_the_restored_volume(self):
self.client.force_authenticate(self.fixture.owner)
url = factories.SnapshotFactory.get_url(
snapshot=self.fixture.snapshot, action='restore'
)
expected_name = 'C:/ Drive'
request_data = {
'name': expected_name,
}
response = self.client.post(url, request_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
created_volume = models.SnapshotRestoration.objects.first().volume
self.assertIn(expected_name, created_volume.name)
self.assertEqual(response.data['uuid'], created_volume.uuid.hex)
self.assertEqual(response.data['name'], created_volume.name)
def test_user_is_able_to_specify_a_description_of_the_restored_volume(self):
self.client.force_authenticate(self.fixture.owner)
url = factories.SnapshotFactory.get_url(
snapshot=self.fixture.snapshot, action='restore'
)
expected_description = 'Restored after blue screen.'
request_data = {
'name': '/dev/sdb2',
'description': expected_description,
}
response = self.client.post(url, request_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
created_volume = models.SnapshotRestoration.objects.first().volume
self.assertIn(expected_description, created_volume.description)
self.assertEqual(response.data['uuid'], created_volume.uuid.hex)
self.assertEqual(response.data['description'], created_volume.description)
def test_restore_is_not_available_if_snapshot_is_not_in_OK_state(self):
self.client.force_authenticate(self.fixture.owner)
snapshot = factories.SnapshotFactory(
service_settings=self.fixture.openstack_tenant_service_settings,
project=self.fixture.project,
source_volume=self.fixture.volume,
state=models.Snapshot.States.ERRED,
)
url = factories.SnapshotFactory.get_url(snapshot=snapshot, action='restore')
response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
def test_restore_cannot_be_made_if_volume_exceeds_quota(self):
self.client.force_authenticate(self.fixture.owner)
quota = self.fixture.openstack_tenant_service_settings.quotas.get(
name='volumes'
)
quota.limit = quota.usage
quota.save()
snapshot = self.fixture.snapshot
expected_volumes_amount = models.Volume.objects.count()
url = factories.SnapshotFactory.get_url(snapshot=snapshot, action='restore')
response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
snapshot.refresh_from_db()
self.assertEqual(snapshot.state, snapshot.States.OK)
self.assertEqual(expected_volumes_amount, models.Volume.objects.count())
def test_restore_cannot_be_made_if_volume_exceeds_volume_type_quota(self):
self.client.force_authenticate(self.fixture.owner)
snapshot = self.fixture.snapshot
snapshot.service_settings.set_quota_limit(
f'gigabytes_{snapshot.source_volume.type.name}', 0
)
expected_volumes_amount = models.Volume.objects.count()
url = factories.SnapshotFactory.get_url(snapshot=snapshot, action='restore')
response = self.client.post(url, {'name': 'My Volume'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
snapshot.refresh_from_db()
self.assertEqual(snapshot.state, snapshot.States.OK)
self.assertEqual(expected_volumes_amount, models.Volume.objects.count())
@ddt
class SnapshotRetrieveTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.OpenStackTenantFixture()
@data('staff', 'owner', 'admin', 'manager', 'global_support')
def test_a_list_of_restored_volumes_are_displayed_if_user_has_project_level_permissions(
self, user
):
self.client.force_authenticate(user=getattr(self.fixture, user))
snapshot_restoration = factories.SnapshotRestorationFactory(
snapshot=self.fixture.snapshot
)
url = factories.SnapshotFactory.get_url(snapshot=snapshot_restoration.snapshot)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['uuid'], snapshot_restoration.snapshot.uuid.hex)
self.assertIn('restorations', response.data)
self.assertEquals(len(response.data['restorations']), 1)
@data('user')
def test_user_cannot_see_snapshot_restoration_if_has_no_project_level_permissions(
self, user
):
self.client.force_authenticate(user=getattr(self.fixture, user))
self.fixture.snapshot
url = factories.SnapshotFactory.get_list_url()
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 0)
| 1.992188
| 2
|
notebooks/myfuncts.py
|
bradleysawler/blog
| 0
|
12783281
|
<reponame>bradleysawler/blog<filename>notebooks/myfuncts.py
import pandas as pd
import numpy as np
import re
from fractions import Fraction
# import plot libraries
import seaborn as sns
sns.set_palette('Set2')
import matplotlib.pyplot as plt
# list number of files
import os, os.path
from pathlib import Path
import datetime
# General functions defined
def fn_drop_columns(cols_todrop, df):
"""drop unwanted columns. inplace=False for raw data"""
df = df.drop(df.columns[cols_todrop], axis=1, inplace=False)
return df
def fn_check_missing_data(df):
"""check for any missing data in the df (display in descending order)"""
return df.isnull().sum().sort_values(ascending=False)
def fn_remove_col_white_space(df,col):
"""remove white space at the beginning of string"""
df[col] = df[col].str.lstrip()
def fn_convert_str_datetime(df, str_date_col_name, converted_name_column):
"""
Convert a string based date column to a datetime64[ns] timestamp without the timezone.
To include the UTC timezone remove the .apply section.
"""
df[converted_name_column] = pd.to_datetime(df[str_date_col_name],
format='%Y-%m-%d %H:%M:%S').apply(lambda x: x.tz_localize(None))
def fn_col_mapping(df):
"""Return a list of column names and their associated column index number."""
return [f'{c[0]}:{c[1]}' for c in enumerate(df.columns)]
def fn_col_mapping_dict(df):
"""Return a column mapping dictionary with column name and associated column index number.
This can be used to assign new column names"""
return {c[0]:c[1] for c in enumerate(df.columns)}
def fn_col_mapping_dict_for_rename(df):
"""Return a column mapping dictionary with column name and associated column index number.
This can be used to assign new column names"""
return {c[1]:c[1] for c in enumerate(df.columns)}
def fn_modification_date(filename):
"""Return the filename and the modification data.
Parameters
---------
filename: Pass in Path(filepath variable)
"""
fn = os.path.split(filename)[1]
t = os.path.getmtime(filename)
t = datetime.datetime.fromtimestamp(t)
print(f'Source file: {fn} was modified on: {t}')
def fn_floatformat(decimals):
dec = '{:.'+str(decimals)+'f}'
pd.options.display.float_format = dec.format
def fn_col_value_from_key(df, cols):
"""
Provide a dataframe and a list of column interger(s) to display. fn_col_mapping_dict(df) is called to get a dictionary
of the dataframe columns. A list of column keys are iterated and their values are return. These values can be used
in iloc slicing.
"""
df_cols_dict = fn_col_mapping_dict(df)
col_names = [df_cols_dict[x] for x in cols]
return col_names
def fn_df_unique_2cols(df, columns):
"""Returns a unique list in the last column of the dataframe based on the first 2 columns.
Inputs:
df: Dataframe of interest
columns: List of 2 column names. First for grouping and second generate the unique values for.
Notes: 2 columns must be based in. The unique values are seperated by '/' """
# Rename to unique column so not to confuse with orginal name
rename_unique_col = columns[1]+'_uq'
df_unique = df[[columns[0], columns[1]]]\
.drop_duplicates()\
.groupby([columns[0]])[columns[1]]\
.apply(list)\
.reset_index()\
.rename(columns={columns[1]: rename_unique_col})
df_unique[rename_unique_col] = df_unique.apply(lambda x:(' / '.join([str(s) for s in x[rename_unique_col]])), axis=1)
return df_unique
def fn_df_unique_3cols(df, columns):
"""Returns a unique list in the last column of the dataframe based on the first 2 columns.
Inputs:
df: Dataframe of interest
columns: List of 3 column names. First 2 for grouping and last the column for unique values
Notes: 3 columns must be based in. The unique values are seperated by '/' """
# Rename to unique column so not to confuse with orginal name
rename_unique_col = columns[2]+''
df_unique = df[[columns[0], columns[1], columns[2]]]\
.drop_duplicates()\
.groupby([columns[0],columns[1]])[columns[2]]\
.apply(list)\
.reset_index()\
.rename(columns={columns[2]: rename_unique_col})
df_unique[rename_unique_col] = df_unique.apply(lambda x:(' / '.join([str(s) for s in x[rename_unique_col]])),
axis=1)
return df_unique
def fn_pd_options_display(max_columns = 20, max_rows=50, max_colwidth=200):
"""Set Pandas Display options"""
pd.options.display.max_columns = max_columns # None -> No Restrictions
pd.options.display.max_rows = max_rows # None -> Be careful with this
pd.options.display.max_colwidth = max_colwidth
def fn_filter_column_all_search_words(df, column_to_search, search_words, cols_to_display):
"""Search a specified column for ALL search words passed in as a list.
Inputs:
df: dataframe
column_to_search: 1 column name in the dataframe as a string
search_words: words as string within a list
cols_to_display: pass in list of intergers for columns from the dataframe to display
"""
return df.loc[df[column_to_search].apply(lambda lookin: all(word.upper() in lookin for word in search_words))].iloc[
:, cols_to_display]
def fn_df_drop_blank_cols_row(df):
"""Input a dataframe and drop all null columns / rows and return a copy"""
return df.dropna(how='all').dropna(axis=1, how='all').copy()
def fn_clean_header(df):
"""Pass in df and amend replacments accordingly"""
# Note the order of replacment is not sequence hence '__' has to be added at the end to clean up double underscores
header_replace = { (' ', '_'), ('\n', ''), ('/', ''), ('.', ''), ('(', '_'), (')', ''), ('#', 'num')}
for k, v in header_replace:
df.columns = df.columns.str.strip().str.lower().str.replace(k, v).str.replace('__', '_')
def fn_filter_words_in_column(df, col_to_search, search_words, any_words=True):
"""Filter for any (default) or all words in a dataframe column. Case insensitive.
df: dataframe to search
col_to_search: column name to search as a str
search_words: list of search words
any_words: Default True to search for any word. False will search to match all words.
"""
if any_words == True:
df_f = df.loc[df[col_to_search].str.lower().apply(
lambda x: any(word.lower() in x for word in search_words))]
else:
df_f = df.loc[df[col_to_search].str.lower().apply(
lambda x: all(word.lower() in x for word in search_words))]
return df_f
def fn_pivot_table(data, index_cols, agg_dict, margins=True, margins_name='TOTAL QTY'):
"""Pivot a dataframe.
data: dataframe
index_cols = list of columns names
agg_dict: dictionary of aggregate values
margins: True or False
margins_name: str
values: set as keys of the agg_dict
"""
return pd.pivot_table(data, index=index_cols, aggfunc=agg_dict, values=agg_dict.keys(),
margins=margins, margins_name=margins_name)
def fn_keep_columns(df, cols_tokeep):
"""Return dataframe with columns passed in as a list of string names. """
return df.filter(df.columns[cols_tokeep])
def fn_local_time_zone():
"""Return the local timezone"""
return datetime.datetime.now(datetime.timezone(datetime.timedelta(0))).astimezone().tzinfo
| 2.921875
| 3
|
scrapy_compose/cmdline.py
|
Sphynx-HenryAY/scrapy-compose
| 0
|
12783282
|
import sys
import optparse
from inspect import isclass
from scrapy.cmdline import (
_run_print_help,
_run_command,
_print_commands,
_print_unknown_command
)
class EntryPoint:
name = "scrapy-compose"
from scrapy.commands import ScrapyCommand as BaseCommand
_action = None
_cmd = None
_cmds = None
_parser = None
@staticmethod
def iscommand( obj ):
BaseCommand = EntryPoint.BaseCommand
return (
isclass( obj ) and
issubclass( obj, BaseCommand ) and
obj != BaseCommand
)
@property
def action( self ):
if not self._action:
argv = self.argv
if argv and not argv[0].startswith( "-" ):
self._action = argv.pop( 0 )
return self._action
@property
def commands( self ):
if not self._cmds:
from scrapy_compose.utils.load import package as load_package
cmds = {}
iscommand = self.iscommand
inproject = self.inproject
load_package(
"scrapy_compose.commands",
key = lambda c: (
iscommand( c ) and
( inproject or not c.requires_project ) and
cmds.update(
{ c.__module__.split(".")[-1]: c() }
)
)
)
self._cmds = cmds
return self._cmds
@property
def parser( self ):
if not self._parser:
import optparse
self._parser = optparse.OptionParser(
conflict_handler = 'resolve',
formatter = optparse.TitledHelpFormatter(),
)
return self._parser
@property
def cmd( self ):
if not self._cmd:
from scrapy.crawler import CrawlerProcess
cmd = self.commands[ self.action ]
settings = self.settings
settings.setdict( cmd.default_settings, priority = "command" )
parser = self.parser
parser.usage = " ".join([ self.name, self.action, cmd.syntax() ])
parser.description = cmd.long_desc()
cmd.settings = settings
cmd.add_options( parser )
cmd.crawler_process = CrawlerProcess(settings)
self._cmd = cmd
return self._cmd
def __init__( self, argv = None, settings = None ):
from scrapy.utils.project import inside_project, get_project_settings
self.argv = ( sys.argv if argv is None else argv )[1:]
self.inproject = inside_project()
self.settings = get_project_settings() if settings is None else settings
self.settings.setmodule(
"scrapy_compose.compose_settings"
, priority = "default"
)
def print_header( self ):
import scrapy
p_str = "Scrapy " + scrapy.__version__ + " - "
if self.inproject:
p_str += "project : " + self.settings['BOT_NAME']
else:
p_str += "no active project"
print( "" )
def print_commands( self ):
self.print_header()
print("Usage:")
print(" " + self.name + " <command> [options] [args]\n")
print("Available commands:")
for c_name, cmd in sorted( self.commands.items() ):
print( " %-13s %s" % ( c_name, cmd.short_desc() ) )
if not self.inproject:
print( "" )
print( " [ more ] More commands available when run from project directory" )
print( "" )
print( 'Use "scrapy <command> -h" to see more info about a command' )
def print_unknown_command( self ):
self.print_header()
print( "Unknown or unavailable command: " + self.action )
print( 'Use "scrapy-compose" to see available commands' )
def __call__( self ):
action = self.action
if not action:
self.print_commands()
sys.exit(0)
elif (
action not in self.commands or
not self.inproject and self.cmd.requires_project
):
self.print_unknown_command()
sys.exit(2)
settings = self.settings
cmd = self.cmd
parser = self.parser
opts, args = parser.parse_args( args = self.argv )
_run_print_help(parser, cmd.process_options, args, opts)
_run_print_help(parser, _run_command, cmd, args, opts)
sys.exit(cmd.exitcode)
main = EntryPoint()
| 2.21875
| 2
|
src/position.py
|
hideraldus13/loro
| 0
|
12783283
|
<reponame>hideraldus13/loro<gh_stars>0
import pyautogui
import time
for i in range(10):
print(pyautogui.position())
time.sleep(0.5)
| 2.15625
| 2
|
test/svid/jwtsvid/test_jwt_svid.py
|
LaudateCorpus1/py-spiffe
| 5
|
12783284
|
import pytest
import datetime
from calendar import timegm
import jwt
from cryptography.hazmat.primitives.asymmetric import rsa, ec
from cryptography.hazmat.backends import default_backend
from pyspiffe.svid import INVALID_INPUT_ERROR
from pyspiffe.svid.jwt_svid import JwtSvid
from pyspiffe.bundle.jwt_bundle.jwt_bundle import JwtBundle
from pyspiffe.exceptions import ArgumentError
from pyspiffe.svid.exceptions import (
TokenExpiredError,
JwtSvidError,
InvalidTokenError,
MissingClaimError,
)
from pyspiffe.bundle.jwt_bundle.exceptions import AuthorityNotFoundError
from test.svid.test_utils import (
get_keys_pems,
create_jwt,
DEFAULT_SPIFFE_ID,
DEFAULT_AUDIENCE,
DEFAULT_KEY,
DEFAULT_TRUST_DOMAIN,
DEFAULT_EXPIRY,
)
JWT_BUNDLE = JwtBundle(DEFAULT_TRUST_DOMAIN, {'kid1': DEFAULT_KEY.public_key()})
"""
parse_insecure tests
"""
@pytest.mark.parametrize(
'test_input_token,test_input_audience, expected',
[
('', [], INVALID_INPUT_ERROR.format('token cannot be empty.')),
('', None, INVALID_INPUT_ERROR.format('token cannot be empty.')),
(None, [], INVALID_INPUT_ERROR.format('token cannot be empty.')),
(None, None, INVALID_INPUT_ERROR.format('token cannot be empty.')),
],
)
def test_parse_insecure_invalid_input(test_input_token, test_input_audience, expected):
with pytest.raises(ArgumentError) as exception:
JwtSvid.parse_insecure(test_input_token, test_input_audience)
assert str(exception.value) == expected
@pytest.mark.parametrize(
'test_input_token,test_input_audience, expected',
[
(
jwt.encode(
{
'sub': 'spiffeid://somewhere.over.the',
'exp': timegm(
(
datetime.datetime.utcnow() + datetime.timedelta(hours=72)
).utctimetuple()
),
},
'secret',
headers={'alg': 'RS256', 'typ': 'JOSE'},
),
["spire"],
str(MissingClaimError('aud')),
), # no aud
(
jwt.encode(
{
'aud': ['spire'],
'sub': 'spiffeid://somewhere.over.the',
},
'secret',
headers={'alg': 'ES384', 'typ': 'JWT'},
),
["spire"],
str(MissingClaimError('exp')),
), # no exp
(
jwt.encode(
{
'aud': ['spire'],
'exp': timegm(
(
datetime.datetime.utcnow() - datetime.timedelta(hours=1)
).utctimetuple()
),
},
'secret',
headers={'alg': 'RS512', 'typ': 'JWT'},
),
["spire"],
str(MissingClaimError('sub')),
), # no sub
(
jwt.encode(
{
'aud': ['spire'],
'sub': 'spiffeid://somewhere.over.the',
'exp': timegm(
(
datetime.datetime.utcnow() - datetime.timedelta(hours=1)
).utctimetuple()
),
},
'secret',
headers={'alg': 'PS512', 'typ': 'JOSE'},
),
['spire'],
str(TokenExpiredError()),
), # expired token
],
)
def test_parse_insecure_invalid_claims(test_input_token, test_input_audience, expected):
with pytest.raises(JwtSvidError) as exception:
JwtSvid.parse_insecure(test_input_token, test_input_audience)
assert str(exception.value) == expected
@pytest.mark.parametrize(
'test_input_token,test_input_audience',
[
(
'<KEY>',
['spire'],
), # middle
(
'<KEY>',
['spire'],
), # first
],
)
def test_parse_insecure_invalid_token(test_input_token, test_input_audience):
with pytest.raises(InvalidTokenError):
JwtSvid.parse_insecure(test_input_token, test_input_audience)
@pytest.mark.parametrize(
'test_input_token,test_input_audience, expected',
[
(
jwt.encode(
{
'aud': ['spire'],
'sub': 'spiffe://test.org',
'exp': timegm(
(
datetime.datetime.utcnow() + datetime.timedelta(hours=100)
).utctimetuple()
),
},
'secret',
headers={'alg': 'RS256', 'typ': 'JWT'},
),
['spire'],
'spiffe://test.org',
),
(
jwt.encode(
{
'aud': ['spire', 'test', 'valid'],
'sub': 'spiffe://test.com.br',
'exp': timegm(
(
datetime.datetime.utcnow() + datetime.timedelta(hours=1)
).utctimetuple()
),
},
'secret key',
headers={'alg': 'PS384', 'typ': 'JOSE'},
),
{'spire', 'test'},
"spiffe://test.com.br",
),
],
)
def test_parse_insecure_valid(test_input_token, test_input_audience, expected):
result = JwtSvid.parse_insecure(test_input_token, test_input_audience)
assert result.token == test_input_token
assert str(result.spiffe_id) == expected
"""
parse_and_validate tests
"""
@pytest.mark.parametrize(
'test_input_token,test_input_jwt_bundle, test_input_audience, expected',
[
(
'',
None,
['spire'],
INVALID_INPUT_ERROR.format('token cannot be empty.'),
),
(
'eyJhbGciOiJFUzI1NiIsImtpZCI6Imd1eTdsOWZSQzhkQW1IUmFtaFpQbktRa3lId2FHQzR0IiwidHlwIjoiSldUIn0.eyJhdWQiOlsib3RoZXItc2VydmljZSJdLCJleHAiOjE2MTIyOTAxODMsImlhdCI6MTYxMjI4OTg4Mywic3ViIjoic3hthrtmZlOi8vZXhhbXBsZS5vcmcvc2VydmljZSJ9.W7CLQvYVBQ8Zg3ELcuB1K9hE4I9wyCMB_8PJTZXbjnlMBcgd0VDbSm5OjoqcGQF975eaVl_AdkryJ_lzxsEQ4A',
None,
['spire'],
INVALID_INPUT_ERROR.format('jwt_bundle cannot be empty.'),
),
],
)
def test_parse_and_validate_invalid_parameters(
test_input_token, test_input_jwt_bundle, test_input_audience, expected
):
with pytest.raises(ArgumentError) as exception:
JwtSvid.parse_and_validate(
test_input_token, test_input_jwt_bundle, test_input_audience
)
assert str(exception.value) == expected
def test_parse_and_validate_invalid_missing_kid_header():
token = create_jwt(kid='')
with pytest.raises(InvalidTokenError) as exception:
JwtSvid.parse_and_validate(token, JWT_BUNDLE, ['spire'])
assert str(exception.value) == 'key_id cannot be empty.'
def test_parse_and_validate_invalid_missing_sub():
token = create_jwt(spiffe_id='')
with pytest.raises(InvalidTokenError) as exception:
JwtSvid.parse_and_validate(token, JWT_BUNDLE, ['spire'])
assert str(exception.value) == 'SPIFFE ID cannot be empty.'
def test_parse_and_validate_invalid_missing_kid():
key_id = 'kid10'
token = create_jwt(kid=key_id)
with pytest.raises(AuthorityNotFoundError) as exception:
JwtSvid.parse_and_validate(token, JWT_BUNDLE, ['spire'])
assert str(exception.value) == 'Key (' + key_id + ') not found in authorities.'
def test_parse_and_validate_invalid_kid_mismatch():
rsa_key2 = rsa.generate_private_key(public_exponent=65537, key_size=2048)
jwt_bundle = JwtBundle(
DEFAULT_TRUST_DOMAIN,
{'kid1': DEFAULT_KEY.public_key(), 'kid10': rsa_key2.public_key()},
)
token = create_jwt(kid='kid10')
with pytest.raises(InvalidTokenError) as exception:
JwtSvid.parse_and_validate(token, jwt_bundle, ['spire'])
assert str(exception.value) == 'Signature verification failed.'
def test_parse_and_validate_valid_token_RSA():
token = create_jwt()
jwt_svid = JwtSvid.parse_and_validate(token, JWT_BUNDLE, ['spire'])
assert jwt_svid.audience == DEFAULT_AUDIENCE
assert str(jwt_svid.spiffe_id) == DEFAULT_SPIFFE_ID
assert jwt_svid.expiry == DEFAULT_EXPIRY
assert jwt_svid.token == token
def test_parse_and_validate_valid_token_EC():
ec_key = ec.generate_private_key(ec.SECP384R1(), default_backend())
jwt_bundle = JwtBundle(DEFAULT_TRUST_DOMAIN, {'kid_ec': ec_key.public_key()})
ec_key_pem, _ = get_keys_pems(ec_key)
token = create_jwt(ec_key_pem, 'kid_ec', alg='ES512')
jwt_svid = JwtSvid.parse_and_validate(token, jwt_bundle, ['spire'])
assert jwt_svid.audience == DEFAULT_AUDIENCE
assert str(jwt_svid.spiffe_id) == DEFAULT_SPIFFE_ID
assert jwt_svid.expiry == DEFAULT_EXPIRY
assert jwt_svid.token == token
def test_parse_and_validate_valid_token_multiple_keys_bundle():
ec_key = ec.generate_private_key(ec.SECP521R1(), default_backend())
jwt_bundle = JwtBundle(
DEFAULT_TRUST_DOMAIN,
{'kid_rsa': DEFAULT_KEY.public_key(), 'kid_ec': ec_key.public_key()},
)
ec_key_pem, _ = get_keys_pems(ec_key)
token = create_jwt(ec_key_pem, kid='kid_ec', alg='ES512')
jwt_svid1 = JwtSvid.parse_and_validate(token, jwt_bundle, ['spire'])
assert jwt_svid1.audience == DEFAULT_AUDIENCE
assert str(jwt_svid1.spiffe_id) == DEFAULT_SPIFFE_ID
assert jwt_svid1.expiry == DEFAULT_EXPIRY
assert jwt_svid1.token == token
token2 = create_jwt(kid='kid_rsa')
jwt_svid2 = JwtSvid.parse_and_validate(token2, jwt_bundle, ['spire'])
assert jwt_svid2.audience == DEFAULT_AUDIENCE
assert str(jwt_svid2.spiffe_id) == DEFAULT_SPIFFE_ID
assert jwt_svid2.expiry == DEFAULT_EXPIRY
assert jwt_svid2.token == token2
| 2.078125
| 2
|
auto-generated-sdk/setup.py
|
afernandes85/sampleapi-python-sdk
| 0
|
12783285
|
<reponame>afernandes85/sampleapi-python-sdk<filename>auto-generated-sdk/setup.py
# coding: utf-8
"""
Swagger Petstore
This is a sample server Petstore server. You can find out more about Swagger at [http://swagger.io](http://swagger.io) or on [irc.freenode.net, #swagger](http://swagger.io/irc/). For this sample, you can use the api key `special-key` to test the authorization filters. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
from setuptools import setup, find_packages # noqa: H301
NAME = "fds.sampleapi"
VERSION = "0.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="Swagger Petstore",
author="OpenAPI Generator community",
author_email="<EMAIL>",
url="https://github.factset.com/factset/sampleapi-python-sdk",
keywords=["OpenAPI", "OpenAPI-Generator", "Swagger Petstore"],
install_requires=REQUIRES,
packages=find_packages(exclude=["test", "tests"]),
include_package_data=True,
license="Apache 2.0",
long_description="""\
This is a sample server Petstore server. You can find out more about Swagger at [http://swagger.io](http://swagger.io) or on [irc.freenode.net, #swagger](http://swagger.io/irc/). For this sample, you can use the api key `special-key` to test the authorization filters. # noqa: E501
"""
)
| 1.28125
| 1
|
app.py
|
cibinsb/EMBL_task
| 0
|
12783286
|
import falcon
import json
import pathlib
from helper.log import logger
from src.middleware import HttpMethodValidator
from helper.utils import Constants
from src.query_builder import RunQuery
from src.db import DataBase
from falcon_swagger_ui import register_swaggerui_app
try:
logger.info("Connecting to Database")
database = DataBase(True)
logger.info(database)
logger.info("Connection successful")
except Exception as ex:
logger.info("Error " + str(ex))
raise Exception("Error Couldn't connect to %s Database" % (Constants.database.value))
class Home:
def on_get(self, req, resp):
logger.info("Sending response")
resp.status = falcon.HTTP_200
resp.body = json.dumps([{Constants.message.value: "server works"}], ensure_ascii=False)
SWAGGERUI_URL = '/swagger'
SCHEMA_URL = '/static/v1/swagger.json'
STATIC_PATH = pathlib.Path(__file__).parent / 'static'
home = Home()
search = RunQuery(database)
api = falcon.API(middleware=[HttpMethodValidator()])
api.add_static_route('/static', str(STATIC_PATH))
api.add_route('/', home)
api.add_route('/api/v1/embl/search', search)
page_title = 'EMBL search API doc'
register_swaggerui_app(
api, SWAGGERUI_URL, SCHEMA_URL,
page_title=page_title,
config={'supportedSubmitMethods': ['get'], }
)
| 2.3125
| 2
|
test/test3.py
|
LUSHDigital/lrpi_display
| 0
|
12783287
|
#!/usr/bin/env python
import pygame
from pygame.locals import *
import os
from time import sleep
print(dir(pygame))
DIM = [480,320] # screen framebuffer dimensions
WHITE = (255, 255, 255)
BLACK = ( 0, 0, 0)
BLUE = ( 0, 0, 255)
GREEN = ( 0, 255, 0)
RED = (255, 0, 0)
ORANGE = (255, 165, 0)
GREY = (128, 128, 128)
YELLOW = (255, 255, 0)
PINK = (255, 192, 203)
LBLUE = (191, 238, 244)
os.putenv('SDL_DIRECTFB_LINUX_INPUT', '1')
os.putenv('SDL_VIDEODRIVER', 'fbcon')
os.putenv('SDL_FBDEV', '/dev/fb0')
os.putenv('SDL_MOUSEDRV', 'TSLIB')
os.putenv('SDL_MOUSEDEV', '/dev/input/event0')
#os.putenv('SDL_MOUSEDEV', '/devices/virtual/input/input0')
#os.environ["SDL_FBDEV"] = "/dev/fb0"
#os.environ["SDL_MOUSEDRV"] = "TSLIB"
#os.environ["SDL_MOUSEDEV"] = "/dev/input/event0"
pygame.init()
pygame.mouse.set_visible(False)
lcd = pygame.display.set_mode(DIM)
lcd.fill((0,0,0))
pygame.display.update()
font_big = pygame.font.Font(None, 50)
touch_buttons = {'1':(DIM[0]/3,DIM[1]/3), '2':(DIM[0]/3*2,DIM[1]/3), '3':(DIM[0]/3,DIM[1]/3*2), '4':(DIM[0]/3*2,DIM[1]/3*2)}
def draw_background():
lcd.fill((0,0,0))
pygame.display.update()
for k,v in touch_buttons.items():
text_surface = font_big.render('%s'%k, True, WHITE)
rect = text_surface.get_rect(center=v)
lcd.blit(text_surface, rect)
pygame.display.update()
draw_background()
while True:
# Scan touchscreen events
for event in pygame.event.get():
print(dir(event))
print('type:', event.type)
if(event.type == MOUSEBUTTONDOWN or event.type == MOUSEMOTION):
#if(event.type == MOUSEMOTION):
pass
draw_background()
pos = pygame.mouse.get_pos()
print pos
pygame.draw.circle(lcd, RED, (pos[0],pos[1]), 50)
pygame.display.update()
elif(event.type is MOUSEBUTTONUP):
pass
#pos = pygame.mouse.get_pos()
#print pos
#Find which quarter of the screen we're in
#x,y = pos
#if y < 120:
# if x < 160:
# GPIO.output(17, False)
# else:
# GPIO.output(4, False)
#else:
# if x < 160:
# GPIO.output(17, True)
# else:
# GPIO.output(4, True)
sleep(0.1)
| 2.703125
| 3
|
manim_express/math/quaternion.py
|
hkeliang/manim-kunyuan
| 0
|
12783288
|
import numpy as np
from manimlib import *
class Quaternion:
def __init__(self, x=None, y=0, z=0, w=1):
"""Quaternion style [x, y, z, w]"""
if issubclass(type(x), (np.ndarray, list, tuple)):
self._x = x[0]
self._y = x[1]
self._z = x[2]
self._w = x[3]
else:
if x is None:
x = 0
self._x = x
self._y = y
self._z = z
self._w = w
self._vec = np.array([self._x, self._y, self._z])
self._q = np.array([*self._vec, self._w])
def _set_q(self):
self._vec = np.array([self._x, self._y, self._z])
self._q = np.array([*self._vec, self._w])
def to_array(self):
return self._q
def normalise(self):
L = np.linalg.norm(self._vec)
# self._q /= L
self._x /= L
self._y /= L
self._z /= L
self._w /= L
self._set_q()
def slerp(self):
"""TODO"""
pass
def multi(self, *quats):
q = self
for qi in quats:
q = Quaternion.multiply_quat_2(q, qi)
self._vec = q._vec
self._q = q._q
# self._set_q()
return q
@staticmethod
def multiply_quat(q1, q2):
"""reference http://www.euclideanspace.com/maths/algebra/realNormedAlgebra/quaternions/code/index.htm"""
x = q1.x * q2.w + q1.y * q2.z - q1.z * q2.y + q1.w * q2.x
y = -q1.x * q2.z + q1.y * q2.w + q1.z * q2.x + q1.w * q2.y
z = q1.x * q2.y - q1.y * q2.x + q1.z * q2.w + q1.w * q2.z
w = -q1.x * q2.x - q1.y * q2.y - q1.z * q2.z + q1.w * q2.w
new_q = object.__new__(Quaternion)
new_q.__init__(x, y, z, w)
return new_q
@staticmethod
def multiply_quat_2(q1, q2):
"""Graßmann Product"""
v1 = q1._vec
v2 = q2._vec
w1 = q1._w
w2 = q2._w
vec = w1 * v2 + w2 * v1 + np.cross(v1, v2)
w = w1 * w2 - v1.dot(v2)
new_q = object.__new__(Quaternion)
new_q.__init__([*vec, w])
return new_q
def __new__(cls, *args, **kwargs):
return object.__new__(cls)
def copy(self):
obj = object.__new__(Quaternion)
obj.__init__(*self._q)
return obj
def set_x(self, value):
self._x = value
self._set_q()
def set_y(self, value):
self._y = value
self._set_q()
def set_z(self, value):
self._z = value
self._set_q()
def set_w(self, value):
self._w = value
self._set_q()
def set_from_euler(self):
"""TODO"""
pass
def set_from_axis_angle(self, axis: np.ndarray, angle):
axis = normalize(np.array(axis))
half_angle = angle / 2
s = np.sin(half_angle)
self._x = axis[0] * s
self._y = axis[1] * s
self._z = axis[2] * s
self._w = np.cos(half_angle)
self._set_q()
return self
def conjugate(self, in_place=True):
if in_place:
self._vec *= -1
self._set_q()
return self
else:
q = self.copy()
q._vec *= -1
q._set_q()
return q
def invert(self):
return self.conjugate()
def dot(self, v):
return self._q.dot(v)
def __str__(self):
return self._q.__str__()
@property
def x(self):
return self._vec[0]
@property
def y(self):
return self._vec[1]
@property
def z(self):
return self._vec[2]
@property
def w(self):
return self._w
if __name__ == "__main__":
axis = np.array([1, 1, 1])
q1 = Quaternion().set_from_axis_angle(axis, 20 * DEGREES)
q2 = Quaternion().set_from_axis_angle(axis, 30 * DEGREES)
print(Quaternion.multiply_quat(q1, q2))
print(Quaternion.multiply_quat_2(q1, q2))
| 3.265625
| 3
|
detect_car.py
|
marty331/license-plate-recognition
| 12
|
12783289
|
import matplotlib.pyplot as plt
import cv2
#import imutils
import requests
import base64
import json
import numpy as np
from PIL import Image
from PIL import ImageEnhance
from skimage import color, data, restoration
from scipy.signal import convolve2d
import pytesseract
import PIL.ImageOps
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files (x86)\Tesseract-OCR\tesseract.exe'
plate=None
def main (img):
img = cv2.imread(img,cv2.IMREAD_COLOR)
img = cv2.resize(img, (600,400) )
img = cv2.resize(img, (600,400) )
threshold = 180 # to be determined
_, img_binarized = cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY)
pil_img = Image.fromarray(img_binarized)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 13, 15, 15)
edged = cv2.Canny(gray, 30, 200)
thresh = cv2.adaptiveThreshold(gray, 255, 1, 1, 11, 2)
#contours = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
#contours = imutils.grab_contours(contours)
contours = sorted(contours, key = cv2.contourArea, reverse = True)[:30]
screenCnt = None
gaussian_blur_license_plate = cv2.GaussianBlur(
img, (5, 5), 0)
for c in contours:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None:
detected = 0
print ("No contour detected")
else:
detected = 1
if detected == 1:
cv2.drawContours(img, [screenCnt], -1, (0, 0, 255), 3)
mask = np.zeros(gray.shape,np.uint8)
new_image = cv2.drawContours(mask,[screenCnt],0,255,-1,)
new_image = cv2.bitwise_and(img,img,mask=mask)
(x, y) = np.where(mask == 255)
(topx, topy) = (np.min(x), np.min(y))
(bottomx, bottomy) = (np.max(x), np.max(y))
Cropped = gray[topx:bottomx+1, topy:bottomy+1]
text = pytesseract.image_to_string(Cropped, lang='eng')
print("programming_fever's License Plate Recognition\n")
print("Detected license plate Number is:",text)
img = cv2.resize(img,(500,300))
Cropped = cv2.resize(Cropped,(400,200))
im = Image.fromarray(Cropped)
im.save('test.png')
image = Image.open('test.png')
enh_bri = ImageEnhance.Brightness(image )
brightness = 1.0
image_brightened = enh_bri.enhance(brightness)
imwhole = np.array(image_brightened)
cv2.imshow('car',img)
cv2.imshow('Cropped',imwhole)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 2.625
| 3
|
_unittests/ut_garden/test_poulet.py
|
Pandinosaurus/mlstatpy
| 9
|
12783290
|
<filename>_unittests/ut_garden/test_poulet.py
# -*- coding: utf-8 -*-
"""
@brief test log(time=4s)
"""
import unittest
from mlstatpy.garden.poulet import maximum, find_maximum, histogramme_poisson_melange, proba_poisson_melange
class TestPoulet(unittest.TestCase):
def test_poulet1(self):
res = maximum(2, 5, 1, 80)
m = find_maximum(res)
self.assertEqual(m, (86, 228.50205712688214))
self.assertEqual(
res[:3], [(0, 0.0), (1, 2.9999999999999942), (2, 5.9999999999999885)])
def test_poulet2(self):
h = histogramme_poisson_melange([48, 10, 4], [1, 2, 3])
self.assertTrue(max(h) > 0.01)
self.assertEqual(h[:4], [0.0, 0.0, 0.0, 0.0])
def test_poulet3(self):
h = proba_poisson_melange([48, 10, 4], [1, 2, 3], 20)
self.assertEqual(h, 0)
h = proba_poisson_melange([48, 10, 4], [1, 2, 3], 40)
self.assertTrue(h < 0.1)
if __name__ == "__main__":
unittest.main()
| 2.671875
| 3
|
explorer/services/deal.py
|
AthenaExplorer/xm_s_explorer_v2
| 0
|
12783291
|
<gh_stars>0
import datetime
from mongoengine import Q
from base.utils.fil import _d, height_to_datetime, datetime_to_height, bson_to_decimal
from explorer.models.deal import Deal, Dealinfo,DealDay,DealStat
from explorer.models.miner import MinerDealPriceHistory
from base.utils.paginator import mongo_paginator
from mongoengine.connection import get_db
from bson.decimal128 import Decimal128
class DealService(object):
"""
订单服务
"""
@classmethod
def get_deal_list(cls, key_words=None, is_verified=0, start_time_height=None, end_time_height=None, page_index=1,
page_size=20):
"""
获取订单列表
:param key_words:
:param is_verified:
:param start_time_height:
:param end_time_height:
:param page_index:
:param page_size:
:return:
"""
query = Q()
if key_words:
if key_words.isdigit():
query = Q(deal_id=key_words)
else:
query = (Q(client=key_words) | Q(provider=key_words))
if is_verified == 1:
query &= Q(is_verified=True)
if is_verified == 0:
query &= Q(is_verified=False)
if start_time_height:
query &= Q(height__gte=start_time_height)
if end_time_height:
query &= Q(height__lte=end_time_height)
query = Deal.objects(query).order_by("-deal_id")
result = mongo_paginator(query, page_index, page_size)
result['objects'] = [info.to_dict(only_fields=("deal_id", "height_time", "client", "provider", "piece_size",
"is_verified", "storage_price_per_epoch"))
for info in result['objects']]
return result
@classmethod
def get_deal_info(cls, deal_id):
"""
获取订单详情
:param deal_id:
:return:
"""
wallet = Deal.objects(deal_id=deal_id).first()
data = wallet.to_dict()
data["start_time"] = height_to_datetime(data["start_epoch"], need_format=True)
data["end_time"] = height_to_datetime(data["end_epoch"], need_format=True)
return data
@classmethod
def get_deal_info_list(cls, key_words=None, is_verified=0, start_time_height=None, end_time_height=None,
page_index=1, page_size=20):
"""
获取订单列表
:param key_words:
:param is_verified:
:param start_time_height:
:param end_time_height:
:param page_index:
:param page_size:
:return:
"""
query = Q()
if key_words:
if key_words.isdigit():
query = Q(deal_id=key_words)
else:
query = (Q(client=key_words) | Q(provider=key_words))
if is_verified == 1:
query &= Q(verified_deal=True)
if is_verified == 0:
query &= Q(verified_deal=False)
if start_time_height:
query &= Q(sector_start_epoch__gte=start_time_height)
if end_time_height:
query &= Q(sector_start_epoch__lte=end_time_height)
query = Dealinfo.objects(query).order_by("-sector_start_epoch")
result = mongo_paginator(query, page_index, page_size)
result['objects'] = [info.to_dict() for info in result['objects']]
return result
class DealStatService(object):
@classmethod
def sync_deal_stat(cls):
"""
24小订单统计
:return:
"""
obj_dict = {}
now_height = datetime_to_height(datetime.datetime.now())
height = now_height - 2880
# 所有数据
deal_data = Deal.objects(height__gte=height).aggregate([
{"$group": {"_id": 0,
"data_size": {"$sum": "$piece_size"},
"count": {"$sum": 1}}},
])
deal_data = list(deal_data)
deal_data = deal_data[0] if deal_data else {}
obj_dict["deal_count"] = deal_data.get("count", 0)
obj_dict["data_size"] = bson_to_decimal(deal_data.get("data_size", Decimal128("0")))
# 验证的数据
verified_deal_data = Deal.objects(height__gte=height, is_verified=True).aggregate([
{"$group": {"_id": 0,
"data_size": {"$sum": "$piece_size"},
"count": {"$sum": 1}}},
])
verified_deal_data = list(verified_deal_data)
verified_deal_data = verified_deal_data[0] if verified_deal_data else {}
obj_dict["verified_deal_count"] = verified_deal_data.get("count", 0)
obj_dict["verified_data_size"] = bson_to_decimal(verified_deal_data.get("data_size", Decimal128("0")))
# 活跃客户
client_data = Deal.objects(height__gte=height).aggregate([
{"$project": {"client": 1}},
{"$group": {"_id": "$client"}},
{"$group": {"_id": 0,
"count": {"$sum": 1}}},
])
client_data = list(client_data)
client_data = client_data[0] if client_data else {}
obj_dict["client_count"] = client_data.get("count", 0)
# 文件数
piece_cid_data = Deal.objects(height__gte=height).aggregate([
{"$project": {"piece_cid": 1}},
{"$group": {"_id": "$piece_cid"}},
{"$group": {"_id": 0,
"count": {"$sum": 1}}},
])
piece_cid_data = list(piece_cid_data)
piece_cid_data = piece_cid_data[0] if piece_cid_data else {}
obj_dict["piece_cid_count"] = piece_cid_data.get("count", 0)
# 成本
total_gas = _d(0)
pipeline = [
{"$match": {"height": {"$gte": height}, "msg_method_name": "PublishStorageDeals"}},
{"$group": {"_id": 0,
"gascost_total_cost": {"$sum": "$gascost_total_cost"}}},
]
table_names = set()
table_names.add(height_to_datetime(height).strftime("%Y%m"))
table_names.add(height_to_datetime(now_height).strftime("%Y%m"))
for table_name in table_names:
for per in get_db("base")["messages@zone_" + table_name].aggregate(pipeline):
total_gas += bson_to_decimal(per.get("gascost_total_cost"))
deal_gas_by_t = total_gas / (deal_data.get("data_size") / _d(1024 ** 4))
obj_dict["deal_gas_by_t"] = deal_gas_by_t
DealStat.objects(height=height).upsert_one(**obj_dict)
# 删除7天前的数据
DealStat.objects(height__lte=height-(7*2880)).delete()
@classmethod
def sync_deal_day(cls, date_str=None):
now_date = datetime.datetime.now().date()
if date_str:
date = datetime.datetime.strptime(date_str, "%Y-%m-%d").date()
else:
day_date = "2020-10-15"
deal_day = DealDay.objects().order_by("-date").first()
if deal_day:
day_date = deal_day.date
date = (datetime.datetime.strptime(day_date, "%Y-%m-%d") + datetime.timedelta(days=1)).date()
if date > now_date - datetime.timedelta(days=1):
return
date_str = date.strftime("%Y-%m-%d")
# 当天的最后一个高度数据
obj_dict={}
height = datetime_to_height(date)
client_count = len(Deal.objects(height__gte=height, height__lt=height + 2880).distinct(
"client"))
obj_dict["client_count"]=client_count
# 所有数据
deal_data = Deal.objects(height__gte=height, height__lt=height + 2880).aggregate([
{"$group": {"_id": 0,
"data_size": {"$sum": "$piece_size"},
"count": {"$sum": 1}}},
])
deal_data = list(deal_data)
deal_data = deal_data[0] if deal_data else {}
obj_dict["deal_count"] = deal_data.get("count", 0)
obj_dict["data_size"] = bson_to_decimal(deal_data.get("data_size", Decimal128("0")))
# 验证的数据
verified_deal_data = Deal.objects(height__gte=height, height__lt=height + 2880, is_verified=True).aggregate([
{"$group": {"_id": 0,
"data_size": {"$sum": "$piece_size"},
"count": {"$sum": 1}}},
])
verified_deal_data = list(verified_deal_data)
verified_deal_data = verified_deal_data[0] if verified_deal_data else {}
obj_dict["verified_deal_count"] = verified_deal_data.get("count", 0)
obj_dict["verified_data_size"] = bson_to_decimal(verified_deal_data.get("data_size", Decimal128("0")))
# 成本
total_gas = _d(0)
pipeline = [
{"$match": {"height": {"$gte": height, "$lt": height + 2880}, "msg_method_name": "PublishStorageDeals"}},
{"$group": {"_id": 0,
"gascost_total_cost": {"$sum": "$gascost_total_cost"}}},
]
table_names = set()
table_names.add(height_to_datetime(height).strftime("%Y%m"))
table_names.add(height_to_datetime(height + 2880).strftime("%Y%m"))
for table_name in table_names:
for per in get_db("base")["messages@zone_" + table_name].aggregate(pipeline):
total_gas += bson_to_decimal(per.get("gascost_total_cost"))
deal_gas_by_t = total_gas / (deal_data.get("data_size") / _d(1024 ** 4))
obj_dict["deal_gas_by_t"] = deal_gas_by_t
return DealDay.objects(date=date_str).upsert_one(**obj_dict).id
@classmethod
def get_deal_stat(cls):
"""
24小时的订单数据
:return:
"""
deal_stat = DealStat.objects().order_by("-height").first()
result = deal_stat.to_dict()
# result["avg_price"] = bson_to_decimal(MinerDealPriceHistory.objects(price__gt=0).average("price"))
return result
@classmethod
def get_deal_day(cls, stats_type):
"""
按天获取订单数据
:param stats_type:
:return:
"""
limit = int(stats_type[0:stats_type.find("d")])
days = DealDay.objects().order_by("-date").limit(limit)
data = []
for info in days:
tmp = info.to_dict()
data.append(tmp)
if limit == 90:
data = data[::3]
if limit == 180:
data = data[::6]
if limit == 360:
data = data[::120]
return data
| 2.28125
| 2
|
opensea.py
|
btproghornbt/scripts
| 0
|
12783292
|
<gh_stars>0
#!/bin/env python3
# importing os module
import os
import requests
import shutil # to save it locally
from tenacity import retry, wait_exponential, TryAgain
def mkdir_p(path):
try:
# similar to `mkdir -p` in bash
# makedirs for recursion; exist_ok = True for no error if dir exists
os.makedirs(path, exist_ok = True)
except OSError as error:
print("Directory '%s' can not be created" % directory)
@retry(wait=wait_exponential(multiplier=1, min=4, max=10))
def get_addresses_from_opensea_collection(collection="degenimals", offset=0, limit=1):
try:
url = f"https://api.opensea.io/api/v1/assets?order_direction=desc&collection={collection}&offset={offset}&limit={limit}"
response = requests.get(url)
rj = response.json()
except Exception as e:
raise TryAgain
return rj
@retry(wait=wait_exponential(multiplier=1, min=4, max=10))
def get_image_from_url(image_url,image_pathname, ext="jpg"):
try:
# Open the url image, set stream to True, this will return the stream content.
r = requests.get(image_url, stream = True)
# Check if the image was retrieved successfully
if r.status_code == 200:
# Set decode_content value to True, otherwise the downloaded image file's size will be zero.
r.raw.decode_content = True
# Open a local file with wb ( write binary ) permission.
with open(image_pathname + f".{ext}",'wb') as f:
shutil.copyfileobj(r.raw, f)
print("Avatar image saved")
f.close
except Exception as e:
raise TryAgain
def main():
mkdir_p("assets/image")
mkdir_p("assets/metadata")
for i in range(10):
json = get_addresses_from_opensea_collection(collection="degenimals", offset=i, limit=1)
name = json["assets"][0]["name"]
print(f"Found Degenimal: {name}!")
metadata_path = os.path.join("assets/metadata",name + ".json")
# if metadata exists, skip
if not os.path.exists(metadata_path):
with open(metadata_path,"w") as f:
f.write(str(json))
f.close()
print("Metadata saved")
else:
print("Metadata exists, skipped...")
image_path = os.path.join("assets/image",name)
# if asset image already exists, skip
if not os.path.exists(image_path):
image_url = json["assets"][0]["image_url"]
get_image_from_url(image_url,image_path)
else:
print("Avatar image exists, skipped...")
print()
if __name__ == "__main__":
main()
| 2.875
| 3
|
chp1/dictAttack.py
|
farhan3/violent-python
| 0
|
12783293
|
<gh_stars>0
#!/usr/bin/env python
"""
Perform Dictionary Attack using the provided password file and dictionary,
along with the algorithm, e.g. DES, SHA512.
Usage:
dictAttack.py dictAttack [-v] <passwdFileName> <dictionary> <algorithm>
dictAttack.py dictAttackExample [-v]
dictAttack.py -h | --help
dictAttack.py --version
Options:
-v verbose
-h --help Show this screen.
--version Show version.
Examples:
dictAttack.py dictAttack passwords.txt dictionary.txt DES
"""
import sys
sys.path.insert(0, '../')
from docopt import docopt
from utils import *
import crypt
def checkDict(dictFileName, passwdHash, algorithm='DES'):
"""
Generate hashes for the words in the dictionary file, using the first
two bytes of the passwdHash as the salt.
If a matching word is found in the dictionary, return it, otherwise return
None.
"""
salt = passwdHash[0:2]
dictFile = open(dictFileName, 'r')
for word in dictFile.readlines():
word = word.strip()
hashValue = crypt.crypt(word, salt)
if hashValue == passwdHash:
ok('Found password: ' + word)
return word
warn('Password not found.')
return None
def dictAttack(passwdFileName, dictFileName, algorithm='DES'):
"""
Try to crack the passwords in the password file by using a dictionary
attack. The words to use are provide dby the dictionary file.
"""
passwdFile = open(passwdFileName)
for line in passwdFile.readlines():
if verbose:
info('Processing line in ' + passwdFileName + ': ' + line)
if ':' in line:
splits = line.split(':')
uid = splits[0].strip()
passwdHash = splits[1].strip()
info('Cracking password of user: ' + uid)
checkDict(dictFileName, passwdHash, algorithm)
def dictAttackExample():
dictAttack('res/passwords.txt', 'res/dictionary.txt', algorithm='DES')
def main():
if args['dictAttack']:
print dictAttack(args['<passwdFileName>'],
args['<dictionary>'], args['<algorithm>'])
elif args['dictAttackExample']:
dictAttackExample()
if __name__ == '__main__':
args = docopt(__doc__, version='0.1')
verbose = args['-v']
main()
| 4.125
| 4
|
flex_nav_create_flexbe_behaviors/src/flex_nav_create_flexbe_behaviors/create_move_base_sm.py
|
CNURobotics/chris_create_flexible_navigation
| 0
|
12783294
|
#!/usr/bin/env python
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from flex_nav_flexbe_states.get_pose_state import GetPoseState
from flex_nav_flexbe_states.get_path_state import GetPathState
from flex_nav_flexbe_states.follow_path_state import FollowPathState
from flex_nav_flexbe_states.clear_costmaps_state import ClearCostmapsState
from flex_nav_flexbe_states.rotate_angle_state import RotateAngleState
from flexbe_states.operator_decision_state import OperatorDecisionState
from flexbe_states.log_state import LogState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Fri Aug 19 2016
@author: <NAME>
'''
class CreateMoveBaseSM(Behavior):
'''
A drop in replacement for move_base that works with CHRISLab Flexible Navigation system and iRobot Create
'''
def __init__(self):
super(CreateMoveBaseSM, self).__init__()
self.name = 'Create Move Base'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:175 y:115, x:629 y:210
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:56 y:20
OperatableStateMachine.add('Get Pose',
GetPoseState(topic='/move_base_simple/goal'),
transitions={'done': 'Global Planner'},
autonomy={'done': Autonomy.Off},
remapping={'goal': 'goal'})
# x:52 y:155
OperatableStateMachine.add('Global Planner',
GetPathState(planner_topic='global_planner'),
transitions={'planned': 'Local Planner', 'empty': 'Log Fail', 'failed': 'Log Fail'},
autonomy={'planned': Autonomy.Off, 'empty': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'goal': 'goal', 'plan': 'plan'})
# x:251 y:155
OperatableStateMachine.add('Local Planner',
FollowPathState(topic='local_planner'),
transitions={'done': 'Continue', 'failed': 'Log Fail', 'preempted': 'Continue'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off, 'preempted': Autonomy.Off},
remapping={'plan': 'plan'})
# x:441 y:128
OperatableStateMachine.add('Clear Costmaps',
ClearCostmapsState(costmap_topics=['global_planner/clear_costmap','local_planner/clear_costmap']),
transitions={'done': 'Rotate Recovery', 'failed': 'failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off})
# x:448 y:28
OperatableStateMachine.add('Rotate Recovery',
RotateAngleState(target_time=5.0, cmd_topic='/create_node/cmd_vel', odometry_topic='/create_node/odom'),
transitions={'done': 'Get Pose'},
autonomy={'done': Autonomy.Off})
# x:237 y:59
OperatableStateMachine.add('Continue',
OperatorDecisionState(outcomes=['yes', 'no'], hint=None, suggestion=None),
transitions={'yes': 'Get Pose', 'no': 'finished'},
autonomy={'yes': Autonomy.Off, 'no': Autonomy.Off})
# x:167 y:270
OperatableStateMachine.add('Log Fail',
LogState(text='Navigation failed!', severity=Logger.logerr),
transitions={'done': 'Autonomy'},
autonomy={'done': Autonomy.Off})
# x:437 y:272
OperatableStateMachine.add('Autonomy',
OperatorDecisionState(outcomes=['yes', 'no'], hint=None, suggestion='yes'),
transitions={'yes': 'Clear Costmaps', 'no': 'failed'},
autonomy={'yes': Autonomy.High, 'no': Autonomy.Off})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| 1.75
| 2
|
fluo_timelapse.py
|
simonepignotti/fluo_timelapse
| 0
|
12783295
|
#!/usr/bin/env python3
import os
import sys
import time
import json
import argparse
# filter wheel
import hid
# relay switch
import serial
# camera
import gphoto2 as gp
DEBUG = True
config_dict = {
'camera': {
'aWHITE': {
'exp': '1/4',
'iso': '200',
'f_val': '2',
},
'bGFP': {
'exp': '1/8',
'iso': '200',
'f_val': '1',
},
'cCFP': {
'exp': '1/8',
'iso': '400',
'f_val': '2',
},
'mCherry': {
'exp': '1/4',
'iso': '400',
'f_val': '1',
},
},
'wheel': {
'ven_id': 0x1278,
'pro_id': 0x0920,
'filters': ['aWHITE', 'bGFP', 'cCFP', 'mCherry'],
},
'relay': {
'path': '/dev/tty.usbmodem1421',
},
'interval': 5,
'work_dir': '.',
'out_fmt': 'arw',
}
# class CameraException(Exception):
# pass
#
# class WheelException(Exception):
# pass
#
# class RelayException(Exception):
# pass
def clean_env(camera, wheel, relay):
if DEBUG:
print('Closing camera connection...', file=sys.stderr)
gp.check_result(gp.gp_camera_exit(camera))
if DEBUG:
print('Camera connection closed', file=sys.stderr)
print('Closing filter wheel connection...', file=sys.stderr)
wheel.close()
if DEBUG:
print('Filter wheel connection closed', file=sys.stderr)
print('Closing relay switch connection...', file=sys.stderr)
relay.close()
if DEBUG:
print('Relay switch connection closed', file=sys.stderr)
def set_camera_config(camera, exp, iso, f_val):
if DEBUG:
print('Getting previous camera configuration...', file=sys.stderr)
camera_config = camera.get_config()
error, exp_conf = gp.gp_widget_get_child_by_name(camera_config, 'shutterspeed')
assert error == 0, "ERROR while retrieving current exposure"
error, iso_conf = gp.gp_widget_get_child_by_name(camera_config, 'iso')
assert error == 0, "ERROR while retrieving current ISO"
error, f_conf = gp.gp_widget_get_child_by_name(camera_config, 'f-number')
assert error == 0, "ERROR while retrieving current aperture"
error = gp.check_result(gp.gp_widget_set_value(exp_conf, exp))
assert error == 0, "ERROR while setting exposure to {}".format(exp)
error = gp.check_result(gp.gp_widget_set_value(iso_conf, iso))
assert error == 0, "ERROR while setting ISO to {}".format(iso)
error = gp.check_result(gp.gp_widget_set_value(f_conf, f_val))
assert error == 0, "ERROR while setting aperture to {}".format(f_val)
if DEBUG:
print("Setting new camera configuration (exp {}, iso {}, f {})...".format(exp, iso, f_val), file=sys.stderr)
error = gp.check_result(gp.gp_camera_set_config(camera, camera_config))
assert error == 0, "ERROR while setting camera configuration"
if DEBUG:
print('New camera configuration set', file=sys.stderr)
def timelapse(camera, wheel, relay, config_dict):
ch_idx = 0
# TODO: detect pictures already present in work_dir and continue numbering
capture_idx = -1
work_dir = config_dict['work_dir']
interval = config_dict['interval']
out_fmt = config_dict['out_fmt']
channels = config_dict['wheel']['filters']
assert len(config_dict['camera']) == len(channels), "ERROR: Different number of channels for camera and filter wheel"
ch = channels[0]
# INIT
exp = str(config_dict['camera'][ch]['exp'])
iso = str(config_dict['camera'][ch]['iso'])
f_val = float(config_dict['camera'][ch]['f_val'])
set_camera_config(camera, exp, iso, f_val)
relay.write("reset\n\r".encode('utf-8'))
wheel.write([1, 0])
try:
while True:
if ch_idx == 0:
capture_idx += 1
if DEBUG:
print("CHANNEL {} (ch) [IT {}]".format(ch_idx, capture_idx), file=sys.stderr)
# LIGHTS UP AND CAPTURE
if DEBUG:
print("Lights up...", file=sys.stderr)
relay_cmd = "relay on {}\n\r".format(ch_idx).encode('utf-8')
relay.write(relay_cmd)
if DEBUG:
print("Lights up! Relay status:", file=sys.stderr)
relay_cmd = "relay readall\n\r".encode('utf-8')
relay.write(relay_cmd)
print(relay.readlines(), file=sys.stderr)
print("Shoot...", file=sys.stderr)
camera_fn = gp.check_result(gp.gp_camera_capture(camera, gp.GP_CAPTURE_IMAGE))
if DEBUG:
print("Shoot!", file=sys.stderr)
print("Lights down...", file=sys.stderr)
relay_cmd = "relay off {}\n\r".format(ch_idx).encode('utf-8')
relay.write(relay_cmd)
if DEBUG:
print("Lights down! Relay status:", file=sys.stderr)
relay_cmd = "relay readall\n\r".encode('utf-8')
relay.write(relay_cmd)
print(relay.readlines(), file=sys.stderr)
# SAVE PICTURE
# TODO: save all channels' pictures during sleep
if DEBUG:
print('Saving picture...', file=sys.stderr)
camera_f = gp.check_result(gp.gp_camera_file_get(camera, camera_fn.folder, camera_fn.name, gp.GP_FILE_TYPE_NORMAL))
out_fn = os.path.join(work_dir, "{}_{}.{}".format(str(capture_idx).zfill(10), ch, out_fmt))
gp.check_result(gp.gp_file_save(camera_f, out_fn))
if DEBUG:
print('Picture saved', file=sys.stderr)
if ch_idx == 0:
time_first_shot = time.time()
# GET READY FOR NEXT SHOT
ch_idx = (ch_idx+1) % len(channels)
ch = channels[ch_idx]
# TODO: multithreaded/asynchronous config
exp = str(config_dict['camera'][ch]['exp'])
iso = str(config_dict['camera'][ch]['iso'])
f_val = float(config_dict['camera'][ch]['f_val'])
set_camera_config(camera, exp, iso, f_val)
# TODO: check that the wheel is on the right position
if DEBUG:
print('Rotating filter wheel...', file=sys.stderr)
wheel.write([ch_idx+1, 0])
time.sleep(1)
if DEBUG:
print('Filter wheel rotated', file=sys.stderr)
if ch_idx == 0:
# just to be sure...if relay off command lost, screw up only one shot
relay.write("reset\n\r".encode('utf-8'))
if DEBUG:
print("Relay switch reset! Relay status:", file=sys.stderr)
relay_cmd = "relay readall\n\r".encode('utf-8')
relay.write(relay_cmd)
print(relay.readlines(), file=sys.stderr)
# TODO: sleep the diff between time of first shot and now
# (so that same channel has ~ interval)
print("Going to sleep", file=sys.stderr)
time.sleep(interval)
except KeyboardInterrupt:
clean_env(camera, wheel, relay)
def init_camera(**kwd_args):
context = gp.gp_context_new()
error, camera = gp.gp_camera_new()
error = gp.gp_camera_init(camera, context)
if DEBUG:
error, summary = gp.gp_camera_get_summary(camera, context)
print('Summary', file=sys.stderr)
print('=======', file=sys.stderr)
print(summary.text, file=sys.stderr)
return camera
def init_wheel(ven_id, pro_id, **kwd_args):
wheel = hid.device()
wheel.open(ven_id, pro_id)
if DEBUG:
# TODO: check filter total positions
if not wheel:
print("Error", file=sys.stderr)
return wheel
def init_relay(path, **kwd_args):
relay = serial.Serial(path, 19200, timeout=1)
relay.write(b'reset\n\r')
if DEBUG:
relay.readlines()
relay.write(b'relay readall\n\r')
res = relay.readlines()
# TODO: check that all relays are off
if not res:
print("Error", file=sys.stderr)
return relay
def parse_args():
desc = "Script for running fluorescent timelapses"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'config_fn',
metavar='conf.json',
type=str,
help='json file containing the channel configurations'
)
args = parser.parse_args()
return args
def main(config_dict):
if DEBUG:
print('Initializing camera connection...', file=sys.stderr)
camera = init_camera(**config_dict['camera'])
if DEBUG:
print('Camera connection initialized', file=sys.stderr)
print('Initializing filter wheel connection...', file=sys.stderr)
wheel = init_wheel(**config_dict['wheel'])
if DEBUG:
print('Filter wheel connection initialized', file=sys.stderr)
print('Initializing relay switch connection...', file=sys.stderr)
relay = init_relay(**config_dict['relay'])
if DEBUG:
print('Relay switch connection initialized', file=sys.stderr)
print('Starting timelapse', file=sys.stderr)
timelapse(camera, wheel, relay, config_dict)
if __name__ == '__main__':
if len(sys.argv) > 1:
args = parse_args()
with open(args.config_fn, 'r') as config_f:
config_dict = json.load(config_f)
if DEBUG:
print(config_dict, file=sys.stderr)
main(config_dict)
| 2.296875
| 2
|
Climate_API.py
|
wdawn9618/sqlalchemy-challenge
| 0
|
12783296
|
<filename>Climate_API.py
from flask import Flask, jsonify
import numpy as np
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from sqlalchemy.pool import StaticPool
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
Measure = Base.classes.measurement
Stat = Base.classes.station
session = Session(engine)
app = Flask(__name__)
#Home Page
@app.route("/")
def home():
"""List All Available API Routes"""
return(
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/2017-06-20<br/>"
f"/api/v1.0/2017-06-20/2017-06-28"
)
#Precipitation Route
@app.route("/api/v1.0/precipitation")
def precipitation():
prior_year = dt.date(2017,8,23) - dt.timedelta(days=365)
precip_data = session.query(Measure.date, Measure.prcp).\
filter(Measure.date >= prior_year).orderby(Measure.date).all()
precip_dict = dict(precip_data)
return jsonify(precip_dict)
#Station Route
@app.route("/api/v1.0/stations")
def stations():
stat_list = session.query(Stat.station, Stat.name).all()
stations_list = list(stat_list)
return jsonify(stations_list)
#TOBS Route
@app.route("/api/v1.0/tobs")
def tobs():
prior_year = dt.date(2017,8,23) - dt.timedelta(days=365)
tobs = session.query(Measure.date, Measure.tobs).\
filter(Measure.date >= prior_year).\
order_by(Measure.date).all()
tobs_list = list(tobs)
return jsonify(tobs_list)
#Start Routes
@app.route("/api/v1.0/<start>/<end>")
def end_route(start, end):
end_route = session.query(Measure.date, func.min(Measure.tobs), func.avg(Measure.tobs), func.max(Measure.tobs)).\
filter(Measure.date >= start).\
filter(Measure.date <= end).\
group_by(Measure.date).all()
end_list = list(end_route)
return jsonify(end_list)
if __name__ == '__main__':
app.run(debug=True)
| 2.90625
| 3
|
reference/regexercise_solutions/literals.py
|
JaDogg/__py_playground
| 1
|
12783297
|
def search(strings, chars):
"""Given a sequence of strings and an iterator of chars, return True
if any of the strings would be a prefix of ''.join(chars); but
only consume chars up to the end of the match."""
if not all(strings):
return True
tails = strings
for ch in chars:
tails = [tail[1:] for tail in tails if tail[0] == ch]
if not all(tails):
return True
return False
| 3.90625
| 4
|
python/schuelerpraktika/joshua_neubeck/mein_programm.py
|
maximilianharr/code_snippets
| 0
|
12783298
|
<reponame>maximilianharr/code_snippets<gh_stars>0
# -*- coding: utf-8 -*-
import time
import random
import sys
print('*********************************'); time.sleep(0.5)
print('*** SCHERE ** STEIN ** PAPIER ***'); time.sleep(0.5)
print('*********************************\n'); time.sleep(0.5)
print('Dabei ist die Schere die Nummer 1,'); time.sleep(0.5)
print(' der Stein die Nummer 2,'); time.sleep(0.5)
print(' das Papier die Nummer 3,'); time.sleep(3.5)
regeln = { 2 : 1, 1 : 3, 3 : 2 }
print('Die Regeln sind simpel: Papier schlägt Stein, Stein schlägt Schere und Schere schlägt Papier')
time.sleep(0.0)
while 1:
x = int(input("Wähle nun und gib die Zahl des Gegenstandes ein: "))
random.seed()
computer = random.randint(1,3)
if x == computer: print('Unentschieden')
elif regeln[x] == computer: print('Du hast gewonnen') #blinken
else: print('Ich der Computer habe gewonnen') #lange rot aufleuchten
a = int(input("Noch eine Runde? ;) JA=1, NEIN=0 "))
if a == 0: sys.exit()
| 3.5
| 4
|
hw1/test.py
|
pannag/cs294
| 1
|
12783299
|
#!/usr/bin/envpython
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
def get_data():
base_cond=[[18,20,19,18,13,4,1],
[20,17,12,9,3,0,0],
[20,20,20,12,5,3,0]]
cond1=[[18,19,18,19,20,15,14],
[19,20,18,16,20,15,9],
[19,20,20,20,17,10,0],
[20,20,20,20,7,9,1]]
cond2=[[20,20,20,20,19,17,4],
[20,20,20,20,20,19,7],
[19,20,20,19,19,15,2]]
cond3=[[20,20,20,20,19,17,12],
[18,20,19,18,13,4,1],
[20,19,18,17,13,2,0],
[19,18,20,20,15,6,0]]
return base_cond,cond1,cond2,cond3
def main():
#loadthedata
results=get_data()
print(results[0], len(results[0]), len(results[0][0]))
fig=plt.figure()
xdata = np.array(range(0, 7))
sns.tsplot(time=xdata, data=results[0], color='r', linestyle='-')
sns.tsplot(time=xdata, data=results[1], color='b', linestyle='--')
sns.tsplot(time=xdata, data=results[2], color='g', linestyle='-.')
sns.tsplot(time=xdata, data=results[3], color='k', linestyle=':')
plt.ylabel('Sucess rate', fontsize=25)
plt.xlabel('Iteration num', fontsize=25, labelpad=-4)
plt.title('Robot performance', fontsize=25)
plt.legend(loc='bottom left')
plt.show()
if __name__=='__main__':
main()
| 2.53125
| 3
|
src/audio_utils/mel/__init__.py
|
stefantaubert/audio-utils
| 0
|
12783300
|
from audio_utils.mel.main import (get_wav_tensor_segment, mel_to_numpy,
wav_to_float32_tensor)
from audio_utils.mel.mel_plot import (concatenate_mels, plot_melspec,
plot_melspec_np)
from audio_utils.mel.stft import STFT
from audio_utils.mel.taco_stft import STFTHParams, TacotronSTFT, TSTFTHParams
from audio_utils.mel.msd import align_mels_with_dtw, get_msd
| 1.4375
| 1
|
app/api/src/services/test_services.py
|
Greyman-Seu/AiServer
| 0
|
12783301
|
<filename>app/api/src/services/test_services.py
# @File : test_services.py
# @Date : 2020/08/17
# @Author: zhuyangkun
# @Email:<EMAIL>
from app.core.logging_core.logging_fastapi import fastapi_logger
def print_name(name):
fastapi_logger.info(f"name is {name}")
if __name__ == "__main__":
print_name("sbo")
| 1.695313
| 2
|
examples_and_tutorial/dD_examples.py
|
timotheehornek/sparsetorch
| 0
|
12783302
|
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import torch
from sparsetorch.dD_basis_functions import Tensorprod, Elemprod, Sparse
from sparsetorch.oneD_basis_functions import Hat, Gauss, Fourier, Chebyshev, Legendre
from sparsetorch.plotter import plot_3D_all
from sparsetorch.utils import get_equidist_coord, get_rand_coord
from sparsetorch.solver import Model, Solver
def f_dD(x):
"""Simple example function defined on interval `[0, 1]`
Parameters
----------
x : torch.Tensor
coordinates for evaluation
Returns
-------
torch.Tensor
function evaluations
"""
result = 4 * x[0] * (x[0] - 1)
for x_i in x[1:]:
result *= 4 * x_i * (x_i - 1)
result *= torch.exp(2 * torch.prod(x, dim=0))
return result
def g_dD(x):
"""Complicated example function defined on interval `[0, 6]`
Parameters
----------
x : torch.Tensor
coordinates for evaluation
Returns
-------
torch.Tensor
function evaluations
"""
result = x[0] * (x[0] - 6) / 9
for x_i in x[1:]:
result *= x_i * (x_i - 6) / 9
result *= torch.exp(torch.sin(torch.prod(x, dim=0)))
return result
def step_dD(x):
"""Another example function defined on interval `[0, 1]`, discontinuous
Parameters
----------
x : torch.Tensor
coordinates for evaluation
Returns
-------
torch.Tensor
function evaluations
"""
result = 1.0
for x_i in x:
result *= torch.round(2 * x_i)
return result
def example_1():
"""Example with same equidistant basis functions in 2D and tensorprod combination"""
#############
# settings: #
#############
# basis function settings
basis = Gauss # Hat or Gauss
bf_num = 30 # number of basis functions in one dimension
BF_dD = Tensorprod # Tensorprod, Elemprod, or Sparse
# evaluation coordinates
eval_num = 100 # number of function evaluations in one dimension
input = get_equidist_coord(torch.zeros(2), torch.ones(2),
torch.ones(2) * eval_num)
# function evaluations
target = f_dD(input)
#############
# create 1D basis with equidistant basis functions
bf_1D = basis.equidist(bf_num)
bfs_1D = [bf_1D] * 2
# create dD basis with above declared 1D basis functions
bf_dD = BF_dD(bfs_1D)
# create model
model = Model(bf_dD, bf_dD.bf_num)
# create solver
solver = Solver(model, input, target)
# solve linear equation / least squares
solver.le()
# plot
plot_3D_all(model, f_dD, "Example 1")
def example_2():
"""Example with different equidistant basis functions in 2D,
tensorprod combination and different number of basis functions
in different dimensions"""
#############
# settings: #
#############
# basis function settings
basis_x = Hat # Hat or Gauss
basis_y = Gauss # Hat or Gauss
bf_num_x = 7 # number of basis functions in x direction
bf_num_y = 3 # number of basis functions in y direction
BF_dD = Tensorprod # Tensorprod, Elemprod, or Sparse
# evaluation coordinates
eval_num_x = 50 # number of function evaluations in x direction
eval_num_y = 60 # number of function evaluations in y direction
input = get_equidist_coord(torch.zeros(2), torch.ones(2),
torch.tensor([eval_num_x, eval_num_y]))
# function evaluations
target = f_dD(input)
#############
# create 1D basis with equidistant basis functions
bf_1D_x = basis_x.equidist(bf_num_x)
bf_1D_y = basis_y.equidist(bf_num_y)
bfs_1D = [bf_1D_x, bf_1D_y]
# create dD basis with above declared 1D basis functions
bf_dD = BF_dD(bfs_1D)
# create model
model = Model(bf_dD, bf_dD.bf_num)
# create solver
solver = Solver(model, input, target)
# solve linear equation / least squares
solver.le()
# plot
plot_3D_all(model, f_dD, "Example 2")
def example_3():
"""Example with custom basis functions and elemprod combination"""
#############
# settings: #
#############
# basis function settings
basis_x = Hat # Hat or Gauss
basis_y = Gauss # Hat or Gauss
bf_num = 50 # number of basis functions
torch.manual_seed(332)
# position and width parameters of basis functions
mu_x = torch.rand(bf_num)
h_x = torch.rand(bf_num)
mu_y = torch.rand(bf_num)
h_y = torch.rand(bf_num)
BF_dD = Elemprod # Tensorprod, Elemprod, or Sparse
# evaluation coordinates
eval_num = 60 # number of function evaluations in one dimension
input = get_equidist_coord(torch.zeros(2), torch.ones(2),
torch.ones(2) * eval_num)
# function evaluations
target = f_dD(input)
#############
# create 1D basis with equidistant basis functions
bf_1D_x = basis_x(mu_x, h_x)
bf_1D_y = basis_y(mu_y, h_y)
bfs_1D = [bf_1D_x, bf_1D_y]
# create dD basis with above declared 1D basis functions
bf_dD = BF_dD(bfs_1D)
# create model
model = Model(bf_dD, bf_dD.bf_num)
# create solver
solver = Solver(model, input, target)
# solve linear equation / least squares
solver.le()
# plot
plot_3D_all(model, f_dD, "Example 3")
def example_4():
"""Example with same hierarchical basis functions in 2D, sparse combination
and approximated function nonzero on boundary
"""
#
#############
# settings: #
#############
# basis function settings
basis = Hat # Hat or Gauss
level = 5 # highest level of basis functions in one dimension
BF_dD = Sparse # Tensorprod, Elemprod, or Sparse
# evaluation coordinates
eval_num = 100 # number of function evaluations in one dimension
input = get_equidist_coord(torch.zeros(2), torch.ones(2),
torch.ones(2) * eval_num)
# function evaluations
target = step_dD(input)
#############
# create 1D basis with hierarchical basis functions
bf_1D = basis.hierarchical(level, boundary=True)
bfs_1D = [bf_1D] * 2
# create dD basis with above declared 1D basis functions
bf_dD = BF_dD(bfs_1D)
# create model
model = Model(bf_dD, bf_dD.bf_num)
# create solver
solver = Solver(model, input, target)
# solve linear equation / least squares
solver.le()
# plot
plot_3D_all(model, step_dD, "Example 4")
def example_5():
"""Example with hierarchical basis functions in 2D, sparse combination
and approximated function nonzero on boundary
"""
#############
# settings: #
#############
# basis function settings
basis = Hat # Hat or Gauss
level_x = 4 # highest level of basis functions in x direction
level_y = 5 # highest level of basis functions in y direction
BF_dD = Sparse # Tensorprod, Elemprod, or Sparse
# evaluation coordinates
eval_num = 100 # number of function evaluations in one dimension
input = get_equidist_coord(torch.zeros(2), torch.ones(2),
torch.ones(2) * eval_num)
# function evaluations
target = step_dD(input)
#############
# create 1D basis with hierarchical basis functions
bf_1D_x = basis.hierarchical(level_x, boundary=True)
bf_1D_y = basis.hierarchical(level_y, boundary=True)
bfs_1D = [bf_1D_x, bf_1D_y]
# create dD basis with above declared 1D basis functions
bf_dD = BF_dD(bfs_1D)
# create model
model = Model(bf_dD, bf_dD.bf_num)
# create solver
solver = Solver(model, input, target)
# solve linear equation / least squares
solver.le()
# plot
plot_3D_all(model, step_dD, "Example 5")
def example_6():
"""Example with orthogonal basis functionsin 2D, sparse combination
and approximated function nonzero on boundary
"""
#
#############
# settings: #
#############
# basis function settings
basis = Chebyshev # Fourier, Chebyshev, or Legendre
n_max = 40 # maximum level of basis functions
BF_dD = Sparse # Tensorprod, Elemprod, or Sparse
# evaluation coordinates
eval_num = 100 # number of function evaluations in one dimension
input = get_equidist_coord(torch.zeros(2), torch.ones(2),
torch.ones(2) * eval_num)
# function evaluations
target = step_dD(input)
#############
# create 1D basis with orthogonal basis functions
bfs_1D = [basis(n_max)] * 2
# create dD basis with above declared 1D basis functions
bf_dD = BF_dD(bfs_1D)
# create model
model = Model(bf_dD, bf_dD.bf_num)
# create solver
solver = Solver(model, input, target)
# solve linear equation / least squares
solver.le()
# plot
plot_3D_all(model, step_dD, "Example 6")
def example_7():
"""Example with challenging function, orthogonal basis functions,
sparse combination and approximated function nonzero on boundary
"""
#############
# settings: #
#############
# basis function settings
basis = Fourier # Fourier, Chebyshev, or Legendre
n_max = 16 # maximum level of basis functions
BF_dD = Sparse # Tensorprod, Elemprod, or Sparse
# evaluation coordinates
eval_num = 100 # number of function evaluations in one dimension
input = get_equidist_coord(torch.zeros(2), 6 * torch.ones(2),
torch.ones(2) * eval_num)
# function evaluations
target = g_dD(input)
#############
# create 1D basis with orthogonal basis functions
bfs_1D = [basis(n_max, a=0.0, b=6.0)] * 2
# create dD basis with above declared 1D basis functions
bf_dD = BF_dD(bfs_1D)
# create model
model = Model(bf_dD, bf_dD.bf_num)
# create solver
solver = Solver(model, input, target)
# solve linear equation / least squares with regularization
solver.le()
# plot
plot_3D_all(
model,
g_dD,
"Example 7",
x_min=0,
x_max=6,
y_min=0,
y_max=6,
steps=2 * eval_num,
)
def example_8():
"""Example with challenging function, hierarchical basis functions,
sparse combination and approximated function nonzero on boundary
"""
#############
# settings: #
#############
# basis function settings
basis = Hat # Hat or Gauss
level = 8 # highest level of basis functions in one dimension
BF_dD = Sparse # Tensorprod, Elemprod, or Sparse
# evaluation coordinates
eval_num = 150 # number of function evaluations in one dimension
input = get_equidist_coord(torch.zeros(2), 6 * torch.ones(2),
torch.ones(2) * eval_num)
# function evaluations
target = g_dD(input)
# create 1D basis with hierarchical basis functions
bf_1D = basis.hierarchical(level, boundary=False, a=0, b=6)
bfs_1D = [bf_1D] * 2
# create dD basis with above declared 1D basis functions
bf_dD = BF_dD(bfs_1D)
# create model
model = Model(bf_dD, bf_dD.bf_num)
# create solver
solver = Solver(model, input, target)
# solve linear equation / least squares with regularization
solver.le()
# plot
plot_3D_all(
model,
g_dD,
"Example 8",
x_min=0,
x_max=6,
y_min=0,
y_max=6,
steps=2 * eval_num,
)
if __name__ == "__main__":
example_1()
example_2()
example_3()
example_5()
example_6()
example_7()
example_8()
| 2.234375
| 2
|
util/device.py
|
SENERGY-Platform/mgw-kasa-dc
| 0
|
12783303
|
<reponame>SENERGY-Platform/mgw-kasa-dc<filename>util/device.py
"""
Copyright 2021 InfAI (CC SES)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import typing
import mgw_dc.dm
from kasa import SmartDevice
__all__ = ("KasaDevice",)
class KasaDevice(mgw_dc.dm.Device):
def __init__(self, id: str, name: str, type: str, kasa_device: SmartDevice, state: typing.Optional[str] = None,
attributes=None):
super().__init__(id, name, type, state, attributes)
self._kasa_device = kasa_device
def get_kasa(self) -> SmartDevice:
return self._kasa_device
| 2.046875
| 2
|
avgn/custom_parsing/stowell_birds.py
|
xingjeffrey/avgn_paper
| 0
|
12783304
|
<reponame>xingjeffrey/avgn_paper
import pandas as pd
from avgn.utils.audio import get_samplerate
import librosa
from avgn.utils.json import NoIndentEncoder
import json
import avgn
from avgn.utils.paths import DATA_DIR
def parse_csv(csvrow, DSLOC):
wav_df = pd.DataFrame(
columns=[
"species",
"year",
"fgbg",
"trntst",
"indv",
"cutted",
"groundx",
"wavnum",
"wavloc",
]
)
csv = pd.read_csv(csvrow.csvloc)
for idx, wavrow in csv.iterrows():
cutted, bgx, indv, wavnum = wavrow.wavfilename[:-4].split("_")
wf = list(DSLOC.glob("wav/*/" + wavrow.wavfilename))
if len(wf) > 0:
wf = wf[0]
else:
print("No wav available, skipping")
wav_df.loc[len(wav_df)] = [
csvrow.species,
csvrow.withinacross,
csvrow.fgbg,
csvrow.traintest,
indv,
cutted,
bgx,
wavnum,
wf,
]
return wav_df
def generate_json(row, DT_ID, noise_indv_df):
""" generate a json from available wav information for stowell dataset
"""
DATASET_ID = "stowell_" + row.species
sr = get_samplerate(row.wavloc.as_posix())
wav_duration = librosa.get_duration(filename=row.wavloc)
# make json dictionary
json_dict = {}
json_dict["indvs"] = {row.indv: {}}
# add species
json_dict["species"] = row.species
species = {
"chiffchaff": "Phylloscopus collybita",
"littleowl" : "Athene noctua",
"pipit" : "Anthus trivialis",
}
json_dict["species"] = species[row.species]
json_dict["common_name"] = row.species
# add year information
json_dict["year"] = row.year
# add train/test split
json_dict["train"] = row.trntst
# add wav number
json_dict["wav_num"] = int(row.wavnum)
# add wav location
json_dict["wav_loc"] = row.wavloc.as_posix()
# rate and length
json_dict["samplerate_hz"] = sr
json_dict["length_s"] = wav_duration
# get noise loc
noise_indv_df = noise_indv_df[
(noise_indv_df.species == row.species)]
noise_indv_df = noise_indv_df[
(noise_indv_df.year == row.year)]
noise_indv_df = noise_indv_df[
(noise_indv_df.groundx == row.groundx)]
noise_indv_df = noise_indv_df[
(noise_indv_df.fgbg == 'bg')]
if len(noise_indv_df[noise_indv_df.wavnum == row.wavnum]) > 0:
noise_loc = (
noise_indv_df[noise_indv_df.wavnum == row.wavnum].iloc[0].wavloc.as_posix()
)
else:
if len(noise_indv_df) > 0:
noise_loc = noise_indv_df.iloc[0].wavloc.as_posix()
else:
noise_loc = ''
return
json_dict["noise_loc"] = noise_loc
# dump json
json_txt = json.dumps(json_dict, cls=NoIndentEncoder, indent=2)
# save information
json_out = (
DATA_DIR / "processed" / DATASET_ID / DT_ID / "JSON" / (row.wavloc.stem + ".JSON")
)
# save json
avgn.utils.paths.ensure_dir(json_out.as_posix())
print(json_txt, file=open(json_out.as_posix(), "w"))
| 2.734375
| 3
|
coding/learn_gevent/gevent_14_spawn.py
|
yatao91/learning_road
| 3
|
12783305
|
# -*- coding: utf-8 -*-
import gevent
from gevent import Greenlet
def foo(message, n):
gevent.sleep(n)
print(message)
thread1 = Greenlet.spawn(foo, "Hello", 1)
thread2 = gevent.spawn(foo, "I live!", 2)
thread3 = gevent.spawn(lambda x: (x + 1), 2)
threads = [thread1, thread2, thread3]
gevent.joinall(threads)
| 3.15625
| 3
|
pikuli/uia/control_wrappers/data_grid.py
|
NVoronchev/pikuli
| 0
|
12783306
|
<gh_stars>0
# -*- coding: utf-8 -*-
from pikuli import FindFailed, logger
from pikuli.uia.control_wrappers.data_item import DataItem
from .uia_control import UIAControl
class DataGrid(UIAControl):
CONTROL_TYPE = 'DataGrid'
def __init__(self, *args, **kwargs):
super(DataGrid, self).__init__(*args, **kwargs)
self._last_tree = []
def find_row(self, row_name, force_expand=False):
''' Если row_name:
a) str или unicode, то это просто имя строки таблицы
б) если список, то в нем перечислены вложения строк таблицы. Последняя в списке -- искомая строка.
force_expand -- разворачивать ли свернутые строки, если они обнаружены при поиске строки и являются для нее группирующими.
'''
def _find_row_precisely(obj, nested_name, exact_level):
rows = [DataItem(e) for e in obj.find_all(Name=nested_name, exact_level=exact_level) if e.CONTROL_TYPE in ["DataItem"]]
if len(rows) > 1:
Exception('ANPropGrid_Table.find_row._find_row_precisely(...): len(rows) != 0\n\tlen(rows) = %i\n\trows = %s' % (len(rows), str(rows)))
elif len(rows) == 0:
raise FindFailed('pikuli.ANPropGrid_Table: row \'%s\' not found.\nSearch arguments:\n\trow_name = %s\n\tforce_expand = %s' % (str(nested_name), str(row_name), str(force_expand)))
return rows[0]
logger.debug('pikuli.ANPropGrid_Table.find_row: searching by criteria item_name = \'%s\'' % str(row_name))
if isinstance(row_name, list):
row = _find_row_precisely(self, row_name[0], 1)
for nested_name in row_name[1:]:
if not row.is_expanded() and not force_expand:
raise FindFailed('pikuli.ANPropGrid_Table: row \'%s\' was found, but it is collapsed. Try to set force_expand = True.\nSearch arguments:\n\trow_name = %s\n\tforce_expand = %s' % (str(nested_name), str(row_name), str(force_expand)))
row.expand()
row = _find_row_precisely(self, row_name[0], 1)
row = _find_row_precisely(row, nested_name, 0) # Раньше: Так функция сперва изет Next, а потом -- Previous. Т.о., максимальная скорость (если строки не найдется, то фейл теста -- можно и потратить время на previous-поиск)
found_elem = row
else:
found_elem = _find_row_precisely(self, row_name, 1)
return found_elem
| 2.046875
| 2
|
interlinks/scripts/JudaicaLink-interlink-04.py
|
judaicalink/judaicalink-generators
| 1
|
12783307
|
# <NAME>
#This code extracts further information from GND for the authors of Freimann collection who have an GND-ID assigned to them.
#15/01/2018
#Ver. 01
import rdflib
from rdflib import Namespace, URIRef, Graph , Literal
from SPARQLWrapper import SPARQLWrapper2, XML , RDF , JSON
from rdflib.namespace import RDF, FOAF , SKOS ,RDFS
import os
os.chdir('C:\Users\Maral\Desktop')
sparql = SPARQLWrapper2("http://localhost:3030/Datasets/sparql")
foaf = Namespace("http://xmlns.com/foaf/0.1/")
skos = Namespace("http://www.w3.org/2004/02/skos/core#")
gndo = Namespace("http://d-nb.info/standards/elementset/gnd#")
jl = Namespace("http://data.judaicalink.org/ontology/")
owl = Namespace ("http://www.w3.org/2002/07/owl#")
graph = Graph()
#graph.parse('C:\Users\Maral\Desktop\interlinks-04.ttl', format="turtle")
#graph.parse('C:\Users\Maral\Desktop\interlinks-04-enriched-01.ttl', format="turtle")
#graph.parse('C:\Users\Maral\Desktop\interlinks-04-enriched-02.ttl', format="turtle")
#graph.parse('C:\Users\Maral\Desktop\interlinks-04-enriched-03.ttl', format="turtle")
#graph.parse('C:\Users\Maral\Desktop\interlinks-04-enriched-04.ttl', format="turtle")
#graph.parse('C:\Users\Maral\Desktop\interlinks-04-enriched-05.ttl', format="turtle")
graph.parse('C:\Users\Maral\Desktop\interlinks-04-enriched-06.ttl', format="turtle")
sparql.setQuery("""
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX gndo: <http://d-nb.info/standards/elementset/gnd#>
PREFIX pro: <http://purl.org/hpi/patchr#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX edm: <http://www.europeana.eu/schemas/edm/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dblp: <http://dblp.org/rdf/schema-2015-01-26#>
PREFIX dcterms: <http://purl.org/dc/terms/>
PREFIX bibtex: <http://data.bibbase.org/ontology/#>
Select ?x ?same ?same2
{
GRAPH <http://maral.wisslab.org/graphs/interlinks> {
?x owl:sameAs ?same
}
#GRAPH <http://maral.wisslab.org/graphs/gnd_persons> {
#GRAPH <http://maral.wisslab.org/graphs/bhr> {
#GRAPH <http://maral.wisslab.org/graphs/dbpedia_persons> {
#GRAPH <http://maral.wisslab.org/graphs/rujen> {
GRAPH <http://maral.wisslab.org/graphs/freimann-gnd> {
?same a foaf:Person.
?same owl:sameAs ?same2
}
}
""")
sparql.setReturnFormat(XML)
results = sparql.query().convert()
graph.bind('foaf',foaf)
graph.bind('skos',skos)
graph.bind('gndo',gndo)
graph.bind('jl',jl)
graph.bind('owl',owl)
if (u"x",u"same2") in results:
bindings = results[u"x",u"same2"]
for b in bindings:
uri = b['x'].value
same = b['same2'].value
if uri != same:
graph.add( (URIRef(uri), RDF.type , foaf.Person ) )
graph.add( (URIRef(uri) , owl.sameAs , URIRef(same2) ))
#graph.serialize(destination='interlinks-04-enriched-01.ttl', format="turtle")
#graph.serialize(destination='interlinks-04-enriched-02.ttl', format="turtle")
#graph.serialize(destination='interlinks-04-enriched-03.ttl', format="turtle")
#graph.serialize(destination='interlinks-04-enriched-04.ttl', format="turtle")
#graph.serialize(destination='interlinks-04-enriched-05.ttl', format="turtle")
#graph.serialize(destination='interlinks-04-enriched-06.ttl', format="turtle")
graph.serialize(destination='interlinks-04-enriched-07.ttl', format="turtle")
| 2.28125
| 2
|
Python/PyinstallerUI.py
|
ClericPy/somethings
| 4
|
12783308
|
<reponame>ClericPy/somethings
# -*- coding: utf-8 -*-
# pip install pyinstallerui
# > python -m pyinstallerui
from pyinstallerui.core import main
main()
| 1.109375
| 1
|
backup_initialize/checksum.py
|
hwwilliams/backup-tarfile-s3
| 0
|
12783309
|
import hashlib
import json
import os
import logging
import re
logger = logging.getLogger(__name__)
def get_hash(backup_name, backup_sources):
sha256_hash = hashlib.sha256()
for source in backup_sources:
source_path = source['Path']
exclusions = '(?:% s)' % '|'. join(source['Exclude'])
try:
for root, dirs, files in os.walk(source_path):
for name in files:
filepath = os.path.join(root, name)
if not re.search(exclusions, filepath):
logger.debug(
f'Calculating checksum: {json.dumps({"File": filepath})}')
file_object = open(filepath, 'rb')
sha256_hash.update(file_object.read(8192))
file_object.close()
except IOError as error:
if file_object:
file_object.close()
if 'Permission denied' in error.strerror:
logger.error(
f'Failed to calculate checksum. Permission Denied: {json.dumps({"Backup": backup_name, "Source": source})}')
raise error
return sha256_hash.hexdigest()
class CalculateChecksum():
def __init__(self, backup_config):
self.name = backup_config['Name']
self.sources = backup_config['Sources']
logger.debug(
f'Attempting to calculate checksum of backup: {json.dumps({"Backup": self.name})}')
self.hash = get_hash(self.name, self.sources)
logger.debug(
f'Successfully calculated checksum of backup: {json.dumps({"Backup": self.name, "Checksum": self.hash})}')
| 2.640625
| 3
|
tests/experiments/panda-physics.py
|
LuCeHe/home-platform
| 1
|
12783310
|
<reponame>LuCeHe/home-platform
from panda3d.core import Vec3, ClockObject
from panda3d.bullet import BulletWorld
from panda3d.bullet import BulletPlaneShape
from panda3d.bullet import BulletRigidBodyNode
from panda3d.bullet import BulletBoxShape
from direct.showbase import ShowBase
base = ShowBase.ShowBase()
base.cam.setPos(10, -30, 20)
base.cam.lookAt(0, 0, 5)
# World
world = BulletWorld()
world.setGravity(Vec3(0, 0, -9.81))
# Plane
shape = BulletPlaneShape(Vec3(0, 0, 1), 1)
node = BulletRigidBodyNode('Ground')
node.addShape(shape)
np = base.render.attachNewNode(node)
np.setPos(0, 0, -2)
world.attachRigidBody(node)
# Boxes
model = base.loader.loadModel('models/box.egg')
model.setPos(-0.5, -0.5, -0.5)
model.flattenLight()
shape = BulletBoxShape(Vec3(0.5, 0.5, 0.5))
for i in range(10):
node = BulletRigidBodyNode('Box')
node.setMass(1.0)
node.addShape(shape)
np = base.render.attachNewNode(node)
np.setPos(0, 0, 2 + i * 2)
world.attachRigidBody(node)
model.copyTo(np)
globalClock = ClockObject.getGlobalClock()
# Update
def update(task):
dt = globalClock.getDt()
world.doPhysics(dt)
return task.cont
base.taskMgr.add(update, 'update')
base.run()
| 2.046875
| 2
|
helper_functions.py
|
zywkloo/Insomnia-Dungeon
| 0
|
12783311
|
#### ====================================================================================================================== ####
############# IMPORTS #############
#### ====================================================================================================================== ####
import csv
#### ====================================================================================================================== ####
############# CSV_LOADER #############
#### ====================================================================================================================== ####
def csv_loader(filename, readall=False):
''' Helper function that reads in a CSV file. Optional flag for including header row.
Input: filename (string), bool_flag (optional)
Output: List of Rows (comma separated)
'''
returnList = []
with open(filename) as csvfile:
for row in csv.reader(csvfile):
returnList.append(row)
if readall:
return returnList
else:
return returnList[1:]
| 1.9375
| 2
|
test/unit/test_noise_gates.py
|
stjordanis/pyquil
| 677
|
12783312
|
import numpy as np
from pyquil.gates import RZ, RX, I, CZ, ISWAP, CPHASE
from pyquil.noise_gates import _get_qvm_noise_supported_gates, THETA
def test_get_qvm_noise_supported_gates_from_compiler_isa(compiler_isa):
gates = _get_qvm_noise_supported_gates(compiler_isa)
for q in [0, 1, 2]:
for g in [
I(q),
RX(np.pi / 2, q),
RX(-np.pi / 2, q),
RX(np.pi, q),
RX(-np.pi, q),
RZ(THETA, q),
]:
assert g in gates
assert CZ(0, 1) in gates
assert CZ(1, 0) in gates
assert ISWAP(1, 2) in gates
assert ISWAP(2, 1) in gates
assert CPHASE(THETA, 2, 0) in gates
assert CPHASE(THETA, 0, 2) in gates
ASPEN_8_QUBITS_NO_RX = {8, 9, 10, 18, 19, 28, 29, 31}
ASPEN_8_QUBITS_NO_RZ = {8, 9, 10, 18, 19, 28, 29, 31}
ASPEN_8_EDGES_NO_CZ = {(0, 1), (10, 11), (1, 2), (21, 22), (17, 10), (12, 25)}
def test_get_qvm_noise_supported_gates_from_aspen8_isa(qcs_aspen8_quantum_processor, noise_model_dict):
gates = _get_qvm_noise_supported_gates(qcs_aspen8_quantum_processor.to_compiler_isa())
for q in range(len(qcs_aspen8_quantum_processor._isa.architecture.nodes)):
if q not in ASPEN_8_QUBITS_NO_RX:
for g in [
RX(np.pi / 2, q),
RX(-np.pi / 2, q),
RX(np.pi, q),
RX(-np.pi, q),
]:
assert g in gates
if q not in ASPEN_8_QUBITS_NO_RZ:
assert RZ(THETA, q) in gates
for edge in qcs_aspen8_quantum_processor._isa.architecture.edges:
if (
edge.node_ids[0],
edge.node_ids[1],
) in ASPEN_8_EDGES_NO_CZ:
continue
assert CZ(edge.node_ids[0], edge.node_ids[1]) in gates
assert CZ(edge.node_ids[1], edge.node_ids[0]) in gates
| 2.015625
| 2
|
tools/eslint/internal/toolchain.bzl
|
jmillikin/rules_javascript
| 2
|
12783313
|
<gh_stars>1-10
# Copyright 2019 the rules_javascript authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
load(
"//javascript/node:node.bzl",
_node_common = "node_common",
)
TOOLCHAIN_TYPE = "@rules_javascript//tools/eslint:toolchain_type"
EslintToolchainInfo = provider(fields = ["files", "vars", "eslint_executable"])
def _eslint_toolchain_info(ctx):
node_toolchain = ctx.attr._node_toolchain[_node_common.ToolchainInfo]
runfiles = ctx.attr.eslint[DefaultInfo].default_runfiles.files
toolchain = EslintToolchainInfo(
eslint_executable = ctx.executable.eslint,
files = depset(
direct = [ctx.executable.eslint],
transitive = [
runfiles,
node_toolchain.files,
],
),
vars = {"ESLINT": ctx.executable.eslint.path},
)
return [
platform_common.ToolchainInfo(eslint_toolchain = toolchain),
platform_common.TemplateVariableInfo(toolchain.vars),
]
eslint_toolchain_info = rule(
_eslint_toolchain_info,
attrs = {
"eslint": attr.label(
mandatory = True,
executable = True,
cfg = "host",
),
"_node_toolchain": attr.label(
default = "//javascript/node:toolchain",
),
},
provides = [
platform_common.ToolchainInfo,
platform_common.TemplateVariableInfo,
],
)
def _eslint_toolchain_alias(ctx):
toolchain = ctx.toolchains[TOOLCHAIN_TYPE].eslint_toolchain
return [
DefaultInfo(files = toolchain.files),
toolchain,
platform_common.TemplateVariableInfo(toolchain.vars),
]
eslint_toolchain_alias = rule(
_eslint_toolchain_alias,
toolchains = [TOOLCHAIN_TYPE],
provides = [
DefaultInfo,
EslintToolchainInfo,
platform_common.TemplateVariableInfo,
],
)
| 1.867188
| 2
|
applanga.py
|
applanga/applanga-cli
| 2
|
12783314
|
<reponame>applanga/applanga-cli
#!/usr/bin/env python
import click
from lib import constants
import commands
@click.group()
@click.version_option(constants.VERSION_NUMBER)
@click.option('--debug/--no-debug', default=False)
@click.pass_context
def cli(ctx, debug):
ctx.obj['DEBUG'] = debug
# Add all the commands we support
cli.add_command(commands.config.config)
cli.add_command(commands.init.init)
cli.add_command(commands.pull.pull)
cli.add_command(commands.push.push)
cli.add_command(commands.pullSource.pullSource)
cli.add_command(commands.pushTarget.pushTarget)
# Initialize the command line tool
if __name__ == '__main__':
cli(obj={})
| 1.867188
| 2
|
RIFF/riffedit.py
|
mirabilos/ev-useful
| 0
|
12783315
|
#!/usr/bin/python3
# coding: UTF-8
#-
# Copyright © 2018, 2020 mirabilos <<EMAIL>>
#
# Provided that these terms and disclaimer and all copyright notices
# are retained or reproduced in an accompanying document, permission
# is granted to deal in this work without restriction, including un‐
# limited rights to use, publicly perform, distribute, sell, modify,
# merge, give away, or sublicence.
#
# This work is provided “AS IS” and WITHOUT WARRANTY of any kind, to
# the utmost extent permitted by applicable law, neither express nor
# implied; without malicious intent or gross negligence. In no event
# may a licensor, author or contributor be held liable for indirect,
# direct, other damage, loss, or other issues arising in any way out
# of dealing in the work, even if advised of the possibility of such
# damage or existence of a defect, except proven that it results out
# of said person’s immediate fault when using the work as intended.
#-
# python3 riffedit.py -d src.sf2 # dump info only
# python3 riffedit.py -i src.sf2 # identify metadata (see below)
# python3 riffedit.py src.sf2 dst.sf2 { [-az] 'chnk' 'content' } ...
# where -a means to align with NULs and -z to NUL-terminate
# chnk means the RIFF chunk, LIST<chnk>/chnk is also supported
# Chunks currently need to exist in the input, insertion and deletion
# is missing for some later version to add.
# The comment field is limited to 65535 ASCII bytes, the others to 255.
#
# Metadata from a soundfont only includes chunks useful in copyright
# tracking. It outputs the INFO chunks, using input ordering, in the
# format “chunk_name \xFE chunk_body \xFF”, where both name and body
# (properly UTF-8 encoded) have all characters not valid for XML re‐
# moved or replaced with the OPTU-16 value or U+FFFD.
#
# You may also use this under the same terms as the Fluid (R3) soundfont.
from io import SEEK_SET, SEEK_CUR
import os
import struct
import sys
assert(sys.version_info[0] >= 3)
class RIFFChunk(object):
def __init__(self, parent):
self.parent = parent
self.file = parent
while isinstance(self.file, RIFFChunk):
self.file = self.file.parent
cn = self.file.read(4)
cs = self.file.read(4)
ct = None
cf = cn
if (len(cn) != 4) or (len(cs) != 4):
raise EOFError
co = self.file.tell()
try:
cs = struct.unpack_from('<L', cs)[0]
except struct.error:
raise EOFError
if cn in (b'RIFF', b'LIST'):
ct = self.file.read(4)
if len(ct) != 4:
raise EOFError
cf = cn + b'<' + ct + b'>'
self.chunkname = cn
self.chunksize = cs
self.chunk_pad = cs & 1
self.container = ct
self.children = []
self.chunkfmt = cf
self.data_ofs = co
self.data_mem = None
self.justpast = self.data_ofs + self.chunksize + self.chunk_pad
if isinstance(self.parent, RIFFChunk) and \
self.justpast > self.parent.justpast:
raise IndexError('End of this %s chunk %d > end of parent %s chunk %d' % \
(self.chunkfmt, self.justpast, self.parent.chunkfmt, self.parent.justpast))
if self.container is not None:
while True:
try:
child = RIFFChunk(self)
except EOFError:
break
self.children.append(child)
if child.skip_past():
break
def __str__(self):
s = '<RIFFChunk(%s)' % self.chunkfmt
if self.container is not None:
q = '['
for child in self.children:
s += q + str(child)
q = ', '
s += ']'
return s + '>'
def skip_past(self):
self.file.seek(self.justpast, SEEK_SET)
return isinstance(self.parent, RIFFChunk) and \
self.justpast == self.parent.justpast
def __getitem__(self, key):
if self.container is None:
raise IndexError('Chunk %s is not of a container type' % self.chunkname)
for child in self.children:
if child.chunkfmt == key:
return child
raise IndexError('Chunk %s does not have a child %s' % (self.chunkname, key))
def print(self):
if self.container is not None:
raise IndexError('Chunk %s is of a container type' % self.chunkname)
if self.data_mem is not None:
return self.data_mem
self.file.seek(self.data_ofs, SEEK_SET)
s = self.file.read(self.chunksize)
if len(s) != self.chunksize:
raise IOError('Could not read %d data bytes (got %d)' % (self.chunksize, len(s)))
return s
def write(self, file):
if not isinstance(self.chunkname, bytes):
raise ValueError('Chunk name %s is not of type bytes' % self.chunkname)
if len(self.chunkname) != 4:
raise ValueError('Chunk name %s is not of length 4')
if file.write(self.chunkname + struct.pack('<L', self.chunksize)) != 8:
raise IOError('Could not write header bytes to destination file at chunk %s' % \
self.chunkfmt)
if self.container is not None:
cld = file.tell()
if not isinstance(self.container, bytes):
raise ValueError('Container type %s is not of type bytes' % self.container)
if len(self.container) != 4:
raise ValueError('Container type %s is not of length 4')
if file.write(self.container) != 4:
raise IOError('Could not write container bytes to destination file at chunk %s' % \
self.chunkfmt)
for child in self.children:
child.write(file)
cld = file.tell() - cld
if cld != self.chunksize:
raise ValueError('Children wrote %d bytes (expected %d) file at chunk %s' % \
(cld, self.chunksize, self.chunkfmt))
else:
if self.data_mem is not None:
if file.write(self.data_mem) != self.chunksize:
raise IOError('Could not write %d data bytes to destination file at chunk %s' % \
(self.chunksize, self.chunkfmt))
else:
self.file.seek(self.data_ofs, SEEK_SET)
total = self.chunksize
while total > 0:
n = 65536
if n > total:
n = total
buf = self.file.read(n)
n = len(buf)
total -= n
if file.write(buf) != n:
raise IOError('Could not write %d data bytes to destination file at chunk %s' % \
(n, self.chunkfmt))
if self.chunk_pad > 0:
file.write(b'\0')
if file.tell() & 1:
raise ValueError('Misaligned file after chunk %s' % self.chunkfmt)
def set_length(self, newlen):
old = self.chunksize + self.chunk_pad
self.chunksize = newlen
self.chunk_pad = self.chunksize & 1
new = self.chunksize + self.chunk_pad
if isinstance(self.parent, RIFFChunk):
self.parent.adjust_length(new - old)
def set_content(self, content, nul_pad=False):
if self.container is not None:
raise ValueError('Cannot set content of container type %s' % self.chunkfmt)
if isinstance(content, str):
content = content.encode('UTF-8')
if not isinstance(content, bytes):
raise ValueError('New content is not of type bytes')
if nul_pad and (len(content) & 1):
content += b'\0'
self.data_mem = content
self.set_length(len(content))
def adjust_length(self, delta):
self.set_length(self.chunksize + delta)
class RIFFFile(RIFFChunk):
def __init__(self, file):
self.file = file
self.container = True
self.children = []
child = None
while True:
try:
child = RIFFChunk(f)
except EOFError:
break
self.children.append(child)
if child is None:
raise IndexError('No RIFF chunks found')
self.justpast = child.justpast
def __str__(self):
s = '<RIFFFile'
q = '['
for child in self.children:
s += q + str(child)
q = ', '
return s + ']>'
def __getitem__(self, key):
return self.children[key]
def write(self, file):
for child in self.children:
child.write(file)
def dumpriff(container, level=0, isinfo=False):
indent = ('%s%ds' % ('%', 2*level)) % ''
print(indent + 'BEGIN level=%d' % level)
for chunk in container.children:
#print(indent + ' CHUNK %s of size %d, data at %d, next at %d' % (chunk.chunkfmt, chunk.chunksize, chunk.data_ofs, chunk.justpast))
if isinfo:
print(indent + ' CHUNK %s(%d): %s' % (chunk.chunkfmt, chunk.chunksize, chunk.print()))
else:
print(indent + ' CHUNK %s of size %d' % (chunk.chunkfmt, chunk.chunksize))
if chunk.container is not None:
dumpriff(chunk, level+1, chunk.chunkfmt == b'LIST<INFO>')
print(indent + 'END level=%d' % level)
if sys.argv[1] == '-i':
encode_table = {}
# bad characters in XML
for i in range(0, 32):
if i not in (0x09, 0x0A, 0x0D):
encode_table[i] = None
encode_table[0x7F] = 0xFFFD
for i in range(0x80, 0xA0):
encode_table[i] = 0xEF00 + i
for i in range(0xD800, 0xE000):
encode_table[i] = 0xFFFD
for i in range(0, 0x110000, 0x10000):
encode_table[i + 0xFFFE] = 0xFFFD
encode_table[i + 0xFFFF] = 0xFFFD
for i in range(0xFDD0, 0xFDF0):
encode_table[i] = 0xFFFD
# surrogateescape to OPTU-16
for i in range(128, 256):
encode_table[0xDC00 + i] = 0xEF00 + i
ident_encode_table = str.maketrans(encode_table)
del encode_table
def ident_encode(s):
return s.rstrip(b'\x00').\
decode(encoding='utf-8', errors='surrogateescape').\
translate(ident_encode_table).\
encode(encoding='utf-8', errors='replace')
if sys.argv[2] == '-':
f = sys.stdin.buffer
else:
f = open(sys.argv[2], 'rb')
riff = RIFFFile(f)
for chunk in riff[0][b'LIST<INFO>'].children:
if chunk.chunkname not in (b'ifil', b'isng', b'IPRD', b'ISFT'):
for x in (ident_encode(chunk.chunkname), b'\xFE',
ident_encode(chunk.print()), b'\xFF'):
sys.stdout.buffer.write(x)
sys.exit(0)
print('START')
if sys.argv[1] == '-d':
with open(sys.argv[2], 'rb') as f:
riff = RIFFFile(f)
dumpriff(riff)
else:
with open(sys.argv[1], 'rb') as f, open(sys.argv[2], 'wb', buffering=65536) as dst:
riff = RIFFFile(f)
dumpriff(riff)
i = 3
_flags = { '-a': 1, '-z': 2, '-az': 3 }
while i < len(sys.argv):
flags = 0
if sys.argv[i] in _flags:
flags = _flags[sys.argv[i]]
i += 1
if i >= len(sys.argv):
break
chunks = sys.argv[i].split('/')
if chunks[0].isnumeric():
chnk = riff
else:
chnk = riff[0]
for cur in chunks:
chnk = chnk[os.fsencode(cur)]
val = os.fsencode(sys.argv[i + 1])
if flags & 2:
val += b'\0'
chnk.set_content(val, bool(flags & 1))
i += 2
print("=> after processing:")
dumpriff(riff)
riff.write(dst)
print('OUT')
| 1.804688
| 2
|
digital-curling/myenv_keras/env.py
|
km-t/dcpython
| 0
|
12783316
|
import sys
import gym
import numpy as np
import gym.spaces
import math
import pandas as pd
df = pd.read_csv('./logs.csv', sep=',')
df = df.sample(frac=1)
def getData(line, keyNum):
if keyNum == 0: # vec
vec = str(df.iloc[line, 0])
v = np.zeros(11, dtype=np.float32)
for i in range(11):
v[i] = float(vec[i+1])
return v
else: # where, angle, power, reward
if keyNum==4:
ans=df.iloc[line+1, keyNum]
else:
ans = df.iloc[line, keyNum]
return ans
class MyEnv(gym.core.Env):
def __init__(self):
self.board = np.zeros(11, dtype=np.float32)
self.action_space = gym.spaces.Discrete(30)
low_bound = 0
high_bound = 1
self.observation_space = gym.spaces.Box(
low=low_bound, high=high_bound, shape=self.board.shape, dtype=np.float32)
self.time = 0
self.obs = getData(0,0)
def step(self, action):
st = "9"
for i in range(len(observation)):
st+=str(int(observation[i]))
power = math.floor(action/6)
action = action-power*6
angle = math.floor(action/3)
action = action-angle*3
where = action
df2 = df[(df['vec']==st)&(df['where']==where)&(df['angle']==angle)&(df['power']==power)]
df2 = df2.sample(frac=1)
reward = float(df2.iloc[0,4])
self.time+=1
observation = getData(self.time, 0)
done = True
return observation, reward, done, {}
def reset(self):
self.obs = getData(self.time, 0)
| 2.890625
| 3
|
setup.py
|
fdvty/open-box
| 184
|
12783317
|
<gh_stars>100-1000
#!/usr/bin/env python
# This en code is licensed under the MIT license found in the
# LICENSE file in the root directory of this en tree.
import sys
import importlib.util
from pathlib import Path
from distutils.core import setup
from setuptools import find_packages
requirements = dict()
for extra in ["dev", "main"]:
# Skip `package @ git+[repo_url]` because not supported by pypi
if (3, 6) <= sys.version_info < (3, 7):
requirements[extra] = [r
for r in Path("requirements/%s_py36.txt" % extra).read_text().splitlines()
if '@' not in r
]
else:
requirements[extra] = [r
for r in Path("requirements/%s.txt" % extra).read_text().splitlines()
if '@' not in r
]
# Find version number
spec = importlib.util.spec_from_file_location("openbox.pkginfo", str(Path(__file__).parent / "openbox" / "pkginfo.py"))
pkginfo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(pkginfo)
version = pkginfo.version
package_name = pkginfo.package_name
# Get the platform info.
def get_platform():
platforms = {
'linux': 'Linux',
'linux1': 'Linux',
'linux2': 'Linux',
'darwin': 'OSX',
'win32': 'Windows'
}
if sys.platform not in platforms:
raise ValueError('Unsupported platform - %s.' % sys.platform)
return platforms[sys.platform]
platform = get_platform()
# Get readme strings.
def readme() -> str:
return open("README.md", encoding='utf-8').read()
setup(
name=package_name,
version=version,
description="Efficient and generalized blackbox optimization (BBO) system",
long_description=readme(),
long_description_content_type="text/markdown",
url='https://github.com/PKU-DAIR/open-box',
author="<NAME> from <EMAIL> <EMAIL>",
packages=find_packages(),
license="MIT",
install_requires=requirements["main"],
extras_require={"dev": requirements["dev"]},
package_data={"open-box": ["py.typed"]},
include_package_data=True,
python_requires='>=3.6.0',
entry_points={
"console_scripts": [
"openbox = openbox.__main__:main",
]
}
)
| 1.96875
| 2
|
setup.py
|
matt-carr/cloudstorage
| 0
|
12783318
|
import io
import re
from glob import glob
from os.path import basename, dirname, join, splitext
from setuptools import find_packages, setup
def read(*names, **kwargs):
with io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
) as fh:
return fh.read()
setup(
name='cloudstorage',
version='0.10.0',
license='MIT',
description='Unified cloud storage API for storage services.',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub(
'', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/scottwernervt/cloudstorage/',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords=' '.join([
'storage',
'amazon',
'aws',
's3',
'azure',
'rackspace',
'cloudfiles',
'google',
'cloudstorage',
'gcs',
'minio',
]),
install_requires=[
'inflection>=0.3.1', # MIT
'python-dateutil>=2.7.3', # Simplified BSD
'python-magic>=0.4.15', # MIT
# Python 3.4 needs backports
'typing;python_version<"3.5"', # PSF
'httpstatus35;python_version<"3.5"', # PSF
],
extras_require={
'amazon': [
'boto3>=1.8.00', # Apache 2.0
],
'google': [
'google-cloud-storage>=1.18.0', # Apache 2.0
'requests>=2.19.1', # Apache 2.0
],
'local': [
'filelock>=3.0.0', # Public Domain
'itsdangerous>=1.1.0', # BSD License
'xattr>=0.9.6', # MIT
],
'microsoft': [
'azure>=4.0.0', # MIT
],
'minio': [
'minio>=4.0.0', # Apache 2.0
],
'rackspace': [
'openstacksdk<=0.17.2', # Apache 2.0
'rackspacesdk>=0.7.5', # Apache 2.0
'requests>=2.19.1', # Apache 2.0
],
'docs': [
'sphinx', # BSD
'sphinx_rtd_theme', # MIT
'sphinx_autodoc_typehints', # MIT
'Pygments', # BSD
],
},
setup_requires=[
'pytest-runner', # MIT
],
tests_require=[
'flake8', # MIT
'pytest', # MIT
'prettyconf', # MIT
'requests>=2.19.1',
'tox', # MIT
],
test_suite='tests',
)
| 2.03125
| 2
|
utils/own_utils.py
|
arslan-chaudhry/orthog_subspace
| 17
|
12783319
|
<filename>utils/own_utils.py<gh_stars>10-100
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import tensorflow as tf
def OWNNorm(W):
"""
Implements the Orthogonalize Weight Normalization
Args:
W [in X out], [h, w, in_channels, out_channels]
Returns
W = VP, where P = UD^{-1/2}U^T.
"""
shape = W.get_shape().as_list()
V = tf.reshape(W, [-1, shape[-1]])
Vc = V - tf.reduce_mean(V, axis=0, keepdims=True) # Zero center the vectors
S = tf.matmul(tf.transpose(Vc), Vc)
s, u, _ = tf.linalg.svd(S)
D = tf.linalg.diag(tf.math.rsqrt(s))
P = tf.linalg.matmul(tf.linalg.matmul(u, D), tf.transpose(u))
W_hat = tf.matmul(Vc, P)
return tf.reshape(W_hat, shape)
| 2.09375
| 2
|
06-appexecute/app_execute.py
|
AppTestBot/AppTestBot
| 0
|
12783320
|
import time
import pyaudio
import wave
from google_speech import Speech
class Call_APP():
def __init__(self, appname):
self.chunk = 1024
self.appname = appname
self.f = wave.open(r"/home/kimsoohyun/00-Research/02-Graph/06-appexecute/하이빅스비_2.wav","rb")
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format =self.p.get_format_from_width(self.f.getsampwidth()),
channels = self.f.getnchannels(),
rate = self.f.getframerate(),
output = True)
self.lang = 'ko'
self.sox_effects = ("speed", "1.1")
def call_bixby(self):
data = self.f.readframes(self.chunk)
while data:
self.stream.write(data)
data = self.f.readframes(self.chunk)
self.stream.stop_stream()
self.p.terminate()
def call_appname(self):
text = f'{self.appname}실행'
speech = Speech(text, self.lang)
speech.play(self.sox_effects)
def exit_appname(self):
text = f'{self.appname}종료'
speech = Speech(text, self.lang)
speech.play(self.sox_effects)
def start_main(self):
#self.call_bixby()
#time.sleep(0.5)
self.call_appname()
def end_main(self):
self.call_bixby()
time.sleep(0.5)
self.exit_appname()
def main(self, startend):
if startend == 'start':
self.start_main()
elif startend == "end":
self.end_main()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--appname','-a',
type=str,
required=True,
help='input appname')
parser.add_argument('--startend','-s',
type=str,
required=True,
help='input start or end message')
args = parser.parse_args()
c = Call_APP(args.appname)
c.main(args.startend)
| 3.015625
| 3
|
2021/day12/day12.py
|
ChrisCh7/advent-of-code
| 3
|
12783321
|
<filename>2021/day12/day12.py
from collections import defaultdict
paths = []
def part1(connections: dict[str, list[str]]):
traverse('start', connections, [])
print('Part 1:', len(paths))
def part2(connections: dict[str, list[str]]):
counter = traverse_p2('start', connections, [])
print('Part 2:', counter)
def traverse(from_node: str, connections: dict[str, list[str]], path: list[str]):
global paths
if from_node == 'end':
path.append(from_node)
paths.append(path)
return
next_nodes = connections[from_node]
for node in next_nodes:
if node == 'start':
continue
if node.islower() and node in path:
continue
traverse(node, connections, path + [from_node])
def traverse_p2(from_node: str, connections: dict[str, list[str]], path: list[str], counter=0):
if from_node == 'end':
return 1
next_nodes = connections[from_node]
for node in next_nodes:
if node == 'start':
continue
lower_nodes = [node for node in path if node.islower()]
double_exists = len(lower_nodes) == len(set(lower_nodes)) + 1
if node.islower() and node in path and double_exists:
continue
counter += traverse_p2(node, connections, path + [node])
return counter
if __name__ == '__main__':
with open('in.txt') as file:
lines = file.read().splitlines()
connections = defaultdict(list)
for line in lines:
from_node, to_node = line.split('-')
connections[from_node].append(to_node)
connections[to_node].append(from_node)
part1(connections)
part2(connections)
| 3.703125
| 4
|
Chapter04/references/musicvae.py
|
loftwah/hands-on-music-generation-with-magenta
| 0
|
12783322
|
<filename>Chapter04/references/musicvae.py<gh_stars>0
# -*- coding: utf-8 -*-
"""MusicVAE.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/notebooks/magenta/music_vae/music_vae.ipynb
Copyright 2017 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
# MusicVAE: A Hierarchical Latent Vector Model for Learning Long-Term Structure in Music.
### ___<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>___
[MusicVAE](https://g.co/magenta/music-vae) learns a latent space of musical scores, providing different modes
of interactive musical creation, including:
* Random sampling from the prior distribution.
* Interpolation between existing sequences.
* Manipulation of existing sequences via attribute vectors.
Examples of these interactions can be generated below, and selections can be heard in our
[YouTube playlist](https://www.youtube.com/playlist?list=PLBUMAYA6kvGU8Cgqh709o5SUvo-zHGTxr).
For short sequences (e.g., 2-bar "loops"), we use a bidirectional LSTM encoder
and LSTM decoder. For longer sequences, we use a novel hierarchical LSTM
decoder, which helps the model learn longer-term structures.
We also model the interdependencies between instruments by training multiple
decoders on the lowest-level embeddings of the hierarchical decoder.
For additional details, check out our [blog post](https://g.co/magenta/music-vae) and [paper](https://goo.gl/magenta/musicvae-paper).
___
This colab notebook is self-contained and should run natively on google cloud. The [code](https://github.com/tensorflow/magenta/tree/master/magenta/models/music_vae) and [checkpoints](http://download.magenta.tensorflow.org/models/music_vae/checkpoints.tar.gz) can be downloaded separately and run locally, which is required if you want to train your own model.
# Basic Instructions
1. Double click on the hidden cells to make them visible, or select "View > Expand Sections" in the menu at the top.
2. Hover over the "`[ ]`" in the top-left corner of each cell and click on the "Play" button to run it, in order.
3. Listen to the generated samples.
4. Make it your own: copy the notebook, modify the code, train your own models, upload your own MIDI, etc.!
# Environment Setup
Includes package installation for sequence synthesis. Will take a few minutes.
"""
#@title Setup Environment
#@test {"output": "ignore"}
import glob
print 'Copying checkpoints and example MIDI from GCS. This will take a few minutes...'
!gsutil -q -m cp -R gs://download.magenta.tensorflow.org/models/music_vae/colab2/* /content/
print 'Installing dependencies...'
!apt-get update -qq && apt-get install -qq libfluidsynth1 fluid-soundfont-gm build-essential libasound2-dev libjack-dev
!pip install -q pyfluidsynth
!pip install -qU magenta
# Hack to allow python to pick up the newly-installed fluidsynth lib.
# This is only needed for the hosted Colab environment.
import ctypes.util
orig_ctypes_util_find_library = ctypes.util.find_library
def proxy_find_library(lib):
if lib == 'fluidsynth':
return 'libfluidsynth.so.1'
else:
return orig_ctypes_util_find_library(lib)
ctypes.util.find_library = proxy_find_library
print 'Importing libraries and defining some helper functions...'
from google.colab import files
import magenta.music as mm
from magenta.models.music_vae import configs
from magenta.models.music_vae.trained_model import TrainedModel
import numpy as np
import os
import tensorflow as tf
# Necessary until pyfluidsynth is updated (>1.2.5).
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
def play(note_sequence):
mm.play_sequence(note_sequence, synth=mm.fluidsynth)
def interpolate(model, start_seq, end_seq, num_steps, max_length=32,
assert_same_length=True, temperature=0.5,
individual_duration=4.0):
"""Interpolates between a start and end sequence."""
note_sequences = model.interpolate(
start_seq, end_seq,num_steps=num_steps, length=max_length,
temperature=temperature,
assert_same_length=assert_same_length)
print 'Start Seq Reconstruction'
play(note_sequences[0])
print 'End Seq Reconstruction'
play(note_sequences[-1])
print 'Mean Sequence'
play(note_sequences[num_steps // 2])
print 'Start -> End Interpolation'
interp_seq = mm.sequences_lib.concatenate_sequences(
note_sequences, [individual_duration] * len(note_sequences))
play(interp_seq)
mm.plot_sequence(interp_seq)
return interp_seq if num_steps > 3 else note_sequences[num_steps // 2]
def download(note_sequence, filename):
mm.sequence_proto_to_midi_file(note_sequence, filename)
files.download(filename)
print 'Done'
"""# 2-Bar Drums Model
Below are 4 pre-trained models to experiment with. The first 3 map the 61 MIDI drum "pitches" to a reduced set of 9 classes (bass, snare, closed hi-hat, open hi-hat, low tom, mid tom, high tom, crash cymbal, ride cymbal) for a simplified but less expressive output space. The last model uses a [NADE](http://homepages.inf.ed.ac.uk/imurray2/pub/11nade/) to represent all possible MIDI drum "pitches".
* **drums_2bar_oh_lokl**: This *low* KL model was trained for more *realistic* sampling. The output is a one-hot encoding of 2^9 combinations of hits. It has a single-layer bidirectional LSTM encoder with 512 nodes in each direction, a 2-layer LSTM decoder with 256 nodes in each layer, and a Z with 256 dimensions. During training it was given 0 free bits, and had a fixed beta value of 0.8. After 300k steps, the final accuracy is 0.73 and KL divergence is 11 bits.
* **drums_2bar_oh_hikl**: This *high* KL model was trained for *better reconstruction and interpolation*. The output is a one-hot encoding of 2^9 combinations of hits. It has a single-layer bidirectional LSTM encoder with 512 nodes in each direction, a 2-layer LSTM decoder with 256 nodes in each layer, and a Z with 256 dimensions. During training it was given 96 free bits and had a fixed beta value of 0.2. It was trained with scheduled sampling with an inverse sigmoid schedule and a rate of 1000. After 300k, steps the final accuracy is 0.97 and KL divergence is 107 bits.
* **drums_2bar_nade_reduced**: This model outputs a multi-label "pianoroll" with 9 classes. It has a single-layer bidirectional LSTM encoder with 512 nodes in each direction, a 2-layer LSTM-NADE decoder with 512 nodes in each layer and 9-dimensional NADE with 128 hidden units, and a Z with 256 dimensions. During training it was given 96 free bits and has a fixed beta value of 0.2. It was trained with scheduled sampling with an inverse sigmoid schedule and a rate of 1000. After 300k steps, the final accuracy is 0.98 and KL divergence is 110 bits.
* **drums_2bar_nade_full**: The output is a multi-label "pianoroll" with 61 classes. A single-layer bidirectional LSTM encoder with 512 nodes in each direction, a 2-layer LSTM-NADE decoder with 512 nodes in each layer and 61-dimensional NADE with 128 hidden units, and a Z with 256 dimensions. During training it was given 0 free bits and has a fixed beta value of 0.2. It was trained with scheduled sampling with an inverse sigmoid schedule and a rate of 1000. After 300k steps, the final accuracy is 0.90 and KL divergence is 116 bits.
"""
#@title Load Pretrained Models
drums_models = {}
# One-hot encoded.
drums_config = configs.CONFIG_MAP['cat-drums_2bar_small']
drums_models['drums_2bar_oh_lokl'] = TrainedModel(drums_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/drums_2bar_small.lokl.ckpt')
drums_models['drums_2bar_oh_hikl'] = TrainedModel(drums_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/drums_2bar_small.hikl.ckpt')
# Multi-label NADE.
drums_nade_reduced_config = configs.CONFIG_MAP['nade-drums_2bar_reduced']
drums_models['drums_2bar_nade_reduced'] = TrainedModel(drums_nade_reduced_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/drums_2bar_nade.reduced.ckpt')
drums_nade_full_config = configs.CONFIG_MAP['nade-drums_2bar_full']
drums_models['drums_2bar_nade_full'] = TrainedModel(drums_nade_full_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/drums_2bar_nade.full.ckpt')
"""## Generate Samples"""
#@title Generate 4 samples from the prior of one of the models listed above.
drums_sample_model = "drums_2bar_oh_lokl" #@param ["drums_2bar_oh_lokl", "drums_2bar_oh_hikl", "drums_2bar_nade_reduced", "drums_2bar_nade_full"]
temperature = 0.5 #@param {type:"slider", min:0.1, max:1.5, step:0.1}
drums_samples = drums_models[drums_sample_model].sample(n=4, length=32, temperature=temperature)
for ns in drums_samples:
play(ns)
#@title Optionally download generated MIDI samples.
for i, ns in enumerate(drums_samples):
download(ns, '%s_sample_%d.mid' % (drums_sample_model, i))
"""## Generate Interpolations"""
#@title Option 1: Use example MIDI files for interpolation endpoints.
input_drums_midi_data = [
tf.gfile.Open(fn).read()
for fn in sorted(tf.gfile.Glob('/content/midi/drums_2bar*.mid'))]
#@title Option 2: upload your own MIDI files to use for interpolation endpoints instead of those provided.
input_drums_midi_data = files.upload().values() or input_drums_midi_data
#@title Extract drums from MIDI files. This will extract all unique 2-bar drum beats using a sliding window with a stride of 1 bar.
drums_input_seqs = [mm.midi_to_sequence_proto(m) for m in input_drums_midi_data]
extracted_beats = []
for ns in drums_input_seqs:
extracted_beats.extend(drums_nade_full_config.data_converter.to_notesequences(
drums_nade_full_config.data_converter.to_tensors(ns)[1]))
for i, ns in enumerate(extracted_beats):
print "Beat", i
play(ns)
#@title Interpolate between 2 beats, selected from those in the previous cell.
drums_interp_model = "drums_2bar_oh_hikl" #@param ["drums_2bar_oh_lokl", "drums_2bar_oh_hikl", "drums_2bar_nade_reduced", "drums_2bar_nade_full"]
start_beat = 0 #@param {type:"integer"}
end_beat = 1 #@param {type:"integer"}
start_beat = extracted_beats[start_beat]
end_beat = extracted_beats[end_beat]
temperature = 0.5 #@param {type:"slider", min:0.1, max:1.5, step:0.1}
num_steps = 13 #@param {type:"integer"}
drums_interp = interpolate(drums_models[drums_interp_model], start_beat, end_beat, num_steps=num_steps, temperature=temperature)
#@title Optionally download interpolation MIDI file.
download(drums_interp, '%s_interp.mid' % drums_interp_model)
"""# 2-Bar Melody Model
The pre-trained model consists of a single-layer bidirectional LSTM encoder with 2048 nodes in each direction, a 3-layer LSTM decoder with 2048 nodes in each layer, and Z with 512 dimensions. The model was given 0 free bits, and had its beta valued annealed at an exponential rate of 0.99999 from 0 to 0.43 over 200k steps. It was trained with scheduled sampling with an inverse sigmoid schedule and a rate of 1000. The final accuracy is 0.95 and KL divergence is 58 bits.
"""
#@title Load the pre-trained model.
mel_2bar_config = configs.CONFIG_MAP['cat-mel_2bar_big']
mel_2bar = TrainedModel(mel_2bar_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/mel_2bar_big.ckpt')
"""## Generate Samples"""
#@title Generate 4 samples from the prior.
temperature = 0.5 #@param {type:"slider", min:0.1, max:1.5, step:0.1}
mel_2_samples = mel_2bar.sample(n=4, length=32, temperature=temperature)
for ns in mel_2_samples:
play(ns)
#@title Optionally download samples.
for i, ns in enumerate(mel_2_samples):
download(ns, 'mel_2bar_sample_%d.mid' % i)
"""## Generate Interpolations"""
#@title Option 1: Use example MIDI files for interpolation endpoints.
input_mel_midi_data = [
tf.gfile.Open(fn).read()
for fn in sorted(tf.gfile.Glob('/content/midi/mel_2bar*.mid'))]
#@title Option 2: Upload your own MIDI files to use for interpolation endpoints instead of those provided.
input_mel_midi_data = files.upload().values() or input_mel_midi_data
#@title Extract melodies from MIDI files. This will extract all unique 2-bar melodies using a sliding window with a stride of 1 bar.
mel_input_seqs = [mm.midi_to_sequence_proto(m) for m in input_mel_midi_data]
extracted_mels = []
for ns in mel_input_seqs:
extracted_mels.extend(
mel_2bar_config.data_converter.to_notesequences(
mel_2bar_config.data_converter.to_tensors(ns)[1]))
for i, ns in enumerate(extracted_mels):
print "Melody", i
play(ns)
#@title Interpolate between 2 melodies, selected from those in the previous cell.
start_melody = 0 #@param {type:"integer"}
end_melody = 1 #@param {type:"integer"}
start_mel = extracted_mels[start_melody]
end_mel = extracted_mels[end_melody]
temperature = 0.5 #@param {type:"slider", min:0.1, max:1.5, step:0.1}
num_steps = 13 #@param {type:"integer"}
mel_2bar_interp = interpolate(mel_2bar, start_mel, end_mel, num_steps=num_steps, temperature=temperature)
#@title Optionally download interpolation MIDI file.
download(mel_2bar_interp, 'mel_2bar_interp.mid')
"""# 16-bar Melody Models
The pre-trained hierarchical model consists of a 2-layer stacked bidirectional LSTM encoder with 2048 nodes in each direction for each layer, a 16-step 2-layer LSTM "conductor" decoder with 1024 nodes in each layer, a 2-layer LSTM core decoder with 1024 nodes in each layer, and a Z with 512 dimensions. It was given 256 free bits, and had a fixed beta value of 0.2. After 25k steps, the final accuracy is 0.90 and KL divergence is 277 bits.
"""
#@title Load the pre-trained models.
mel_16bar_models = {}
hierdec_mel_16bar_config = configs.CONFIG_MAP['hierdec-mel_16bar']
mel_16bar_models['hierdec_mel_16bar'] = TrainedModel(hierdec_mel_16bar_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/mel_16bar_hierdec.ckpt')
flat_mel_16bar_config = configs.CONFIG_MAP['flat-mel_16bar']
mel_16bar_models['baseline_flat_mel_16bar'] = TrainedModel(flat_mel_16bar_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/mel_16bar_flat.ckpt')
"""## Generate Samples"""
#@title Generate 4 samples from the selected model prior.
mel_sample_model = "hierdec_mel_16bar" #@param ["hierdec_mel_16bar", "baseline_flat_mel_16bar"]
temperature = 0.5 #@param {type:"slider", min:0.1, max:1.5, step:0.1}
mel_16_samples = mel_16bar_models[mel_sample_model].sample(n=4, length=256, temperature=temperature)
for ns in mel_16_samples:
play(ns)
#@title Optionally download MIDI samples.
for i, ns in enumerate(mel_16_samples):
download(ns, '%s_sample_%d.mid' % (mel_sample_model, i))
"""## Generate Means"""
#@title Option 1: Use example MIDI files for interpolation endpoints.
input_mel_16_midi_data = [
tf.gfile.Open(fn).read()
for fn in sorted(tf.gfile.Glob('/content/midi/mel_16bar*.mid'))]
#@title Option 2: upload your own MIDI files to use for interpolation endpoints instead of those provided.
input_mel_16_midi_data = files.upload().values() or input_mel_16_midi_data
#@title Extract melodies from MIDI files. This will extract all unique 16-bar melodies using a sliding window with a stride of 1 bar.
mel_input_seqs = [mm.midi_to_sequence_proto(m) for m in input_mel_16_midi_data]
extracted_16_mels = []
for ns in mel_input_seqs:
extracted_16_mels.extend(
hierdec_mel_16bar_config.data_converter.to_notesequences(
hierdec_mel_16bar_config.data_converter.to_tensors(ns)[1]))
for i, ns in enumerate(extracted_16_mels):
print "Melody", i
play(ns)
#@title Compute the reconstructions and mean of the two melodies, selected from the previous cell.
mel_interp_model = "hierdec_mel_16bar" #@param ["hierdec_mel_16bar", "baseline_flat_mel_16bar"]
start_melody = 0 #@param {type:"integer"}
end_melody = 1 #@param {type:"integer"}
start_mel = extracted_16_mels[start_melody]
end_mel = extracted_16_mels[end_melody]
temperature = 0.5 #@param {type:"slider", min:0.1, max:1.5, step:0.1}
mel_16bar_mean = interpolate(mel_16bar_models[mel_interp_model], start_mel, end_mel, num_steps=3, max_length=256, individual_duration=32, temperature=temperature)
#@title Optionally download mean MIDI file.
download(mel_16bar_mean, '%s_mean.mid' % mel_interp_model)
"""#16-bar "Trio" Models (lead, bass, drums)
We present two pre-trained models for 16-bar trios: a hierarchical model and a flat (baseline) model.
The pre-trained hierarchical model consists of a 2-layer stacked bidirectional LSTM encoder with 2048 nodes in each direction for each layer, a 16-step 2-layer LSTM "conductor" decoder with 1024 nodes in each layer, 3 (lead, bass, drums) 2-layer LSTM core decoders with 1024 nodes in each layer, and a Z with 512 dimensions. It was given 1024 free bits, and had a fixed beta value of 0.1. It was trained with scheduled sampling with an inverse sigmoid schedule and a rate of 1000. After 50k steps, the final accuracy is 0.82 for lead, 0.87 for bass, and 0.90 for drums, and the KL divergence is 1027 bits.
The pre-trained flat model consists of a 2-layer stacked bidirectional LSTM encoder with 2048 nodes in each direction for each layer, a 3-layer LSTM decoder with 2048 nodes in each layer, and a Z with 512 dimensions. It was given 1024 free bits, and had a fixed beta value of 0.1. It was trained with scheduled sampling with an inverse sigmoid schedule and a rate of 1000. After 50k steps, the final accuracy is 0.67 for lead, 0.66 for bass, and 0.79 for drums, and the KL divergence is 1016 bits.
"""
#@title Load the pre-trained models.
trio_models = {}
hierdec_trio_16bar_config = configs.CONFIG_MAP['hierdec-trio_16bar']
trio_models['hierdec_trio_16bar'] = TrainedModel(hierdec_trio_16bar_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/trio_16bar_hierdec.ckpt')
flat_trio_16bar_config = configs.CONFIG_MAP['flat-trio_16bar']
trio_models['baseline_flat_trio_16bar'] = TrainedModel(flat_trio_16bar_config, batch_size=4, checkpoint_dir_or_path='/content/checkpoints/trio_16bar_flat.ckpt')
"""## Generate Samples"""
#@title Generate 4 samples from the selected model prior.
trio_sample_model = "hierdec_trio_16bar" #@param ["hierdec_trio_16bar", "baseline_flat_trio_16bar"]
temperature = 0.5 #@param {type:"slider", min:0.1, max:1.5, step:0.1}
trio_16_samples = trio_models[trio_sample_model].sample(n=4, length=256, temperature=temperature)
for ns in trio_16_samples:
play(ns)
#@title Optionally download MIDI samples.
for i, ns in enumerate(trio_16_samples):
download(ns, '%s_sample_%d.mid' % (trio_sample_model, i))
"""## Generate Means"""
#@title Option 1: Use example MIDI files for interpolation endpoints.
input_trio_midi_data = [
tf.gfile.Open(fn).read()
for fn in sorted(tf.gfile.Glob('/content/midi/trio_16bar*.mid'))]
#@title Option 2: Upload your own MIDI files to use for interpolation endpoints instead of those provided.
input_trio_midi_data = files.upload().values() or input_trio_midi_data
#@title Extract trios from MIDI files. This will extract all unique 16-bar trios using a sliding window with a stride of 1 bar.
trio_input_seqs = [mm.midi_to_sequence_proto(m) for m in input_trio_midi_data]
extracted_trios = []
for ns in trio_input_seqs:
extracted_trios.extend(
hierdec_trio_16bar_config.data_converter.to_notesequences(
hierdec_trio_16bar_config.data_converter.to_tensors(ns)[1]))
for i, ns in enumerate(extracted_trios):
print "Trio", i
play(ns)
#@title Compute the reconstructions and mean of the two trios, selected from the previous cell.
trio_interp_model = "hierdec_trio_16bar" #@param ["hierdec_trio_16bar", "baseline_flat_trio_16bar"]
start_trio = 0 #@param {type:"integer"}
end_trio = 1 #@param {type:"integer"}
start_trio = extracted_trios[start_trio]
end_trio = extracted_trios[end_trio]
temperature = 0.5 #@param {type:"slider", min:0.1, max:1.5, step:0.1}
trio_16bar_mean = interpolate(trio_models[trio_interp_model], start_trio, end_trio, num_steps=3, max_length=256, individual_duration=32, temperature=temperature)
#@title Optionally download mean MIDI file.
download(trio_16bar_mean, '%s_mean.mid' % trio_interp_model)
| 1.546875
| 2
|
Properties/analysis/complexity metrics/complexity/PMSmetrics.py
|
NazaninBayati/SCA
| 0
|
12783323
|
<filename>Properties/analysis/complexity metrics/complexity/PMSmetrics.py
db = open("Project Metrics Summary.txt","r")
db = db.read()
db_st=[]
db_st2=[]
db_st = db.split("\n")
#print(db_st.__len__())
i = 0
db_list=[]
print(db_st[0])
#print(db.split("\n"))
db_temp = db_st[2:db_st.__len__()-1]
db_st=db_temp
#print(db_st)
with open('Project Metrics Summary Report.txt', 'w') as filehandle:
for listitem in db_st:
filehandle.write('%s\n' % listitem)
#print(db_st)
| 2.875
| 3
|
Data science/Analise de dados/exercicio python/ler csv.py
|
Andrelirao/aulas-graduacao
| 10
|
12783324
|
<filename>Data science/Analise de dados/exercicio python/ler csv.py
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 23:28:44 2020
@author: luisc
"""
import csv
arquivo = open('titanic.csv');
linhas = csv.reader(arquivo);
for linha in linhas:
print(linha)
| 3.171875
| 3
|
Rendering/Core/Testing/Python/TexturedSphere.py
|
jasper-yeh/VtkDotNet
| 3
|
12783325
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
#
# Texture a sphere.
#
# renderer and interactor
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# read the volume
reader = vtk.vtkJPEGReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/beach.jpg")
#---------------------------------------------------------
# Do the surface rendering
sphereSource = vtk.vtkSphereSource()
sphereSource.SetRadius(100)
textureSphere = vtk.vtkTextureMapToSphere()
textureSphere.SetInputConnection(sphereSource.GetOutputPort())
sphereStripper = vtk.vtkStripper()
sphereStripper.SetInputConnection(textureSphere.GetOutputPort())
sphereStripper.SetMaximumLength(5)
sphereMapper = vtk.vtkPolyDataMapper()
sphereMapper.SetInputConnection(sphereStripper.GetOutputPort())
sphereMapper.ScalarVisibilityOff()
sphereTexture = vtk.vtkTexture()
sphereTexture.SetInputConnection(reader.GetOutputPort())
sphereProperty = vtk.vtkProperty()
# sphereProperty.BackfaceCullingOn()
sphere = vtk.vtkActor()
sphere.SetMapper(sphereMapper)
sphere.SetTexture(sphereTexture)
sphere.SetProperty(sphereProperty)
#---------------------------------------------------------
ren.AddViewProp(sphere)
camera = ren.GetActiveCamera()
camera.SetFocalPoint(0, 0, 0)
camera.SetPosition(100, 400, -100)
camera.SetViewUp(0, 0, -1)
ren.ResetCameraClippingRange()
renWin.Render()
#---------------------------------------------------------
# test-related code
def TkCheckAbort (object_binding, event_name):
foo = renWin.GetEventPending()
if (foo != 0):
renWin.SetAbortRender(1)
iren.Initialize()
#iren.Start()
| 2.265625
| 2
|
app/Main/SYS/UniqueOperators.py
|
fineans/Vython
| 0
|
12783326
|
<gh_stars>0
from rply.token import BaseBox
import sys
from Main.Errors import error, errors
class UniqueOp(BaseBox):
def __init__(self, var):
self.var = var
class Increment(UniqueOp):
def eval(self):
try:
self.var.value = self.var.value + 1
return self.var.value
except:
try:
self.var.value = self.var.expression().increment()
return self.var.value
except:
error(errors.IMPOSSIBLEOPERATION, "", {
"type": "operationtype, var",
"operationtype": "Increase",
"var": self.var
})
sys.exit(1)
class Decrement(UniqueOp):
def eval(self):
try:
self.var.value = self.var.value - 1
return self.var.value
except:
try:
self.var.value = self.var.expression().decrement()
return self.var.value
except:
error(errors.IMPOSSIBLEOPERATION, "", {
"type": "operationtype, var",
"operationtype": "Decrease",
"var": self.var
})
sys.exit(1)
| 2.421875
| 2
|
tests/test_staroid.py
|
staroids/staroid-python
| 2
|
12783327
|
import unittest
import tempfile
import os
import pathlib
import shutil
from staroid import Staroid
def integration_test_ready():
return "STAROID_ACCESS_TOKEN" in os.environ and "STAROID_ACCOUNT" in os.environ
class TestStaroid(unittest.TestCase):
def test_initialize(self):
s = Staroid()
def test_read_config(self):
# unset env
at = None
ac = None
if "STAROID_ACCESS_TOKEN" in os.environ:
at = os.environ["STAROID_ACCESS_TOKEN"]
del os.environ["STAROID_ACCESS_TOKEN"]
if "STAROID_ACCOUNT" in os.environ:
ac = os.environ["STAROID_ACCOUNT"]
del os.environ["STAROID_ACCOUNT"]
# given
fp = tempfile.NamedTemporaryFile()
fp.write(b"access_token: abc\naccount: GITHUB/user1")
fp.flush()
# when
s = Staroid(config_path=fp.name)
# then
self.assertEqual("abc", s.get_access_token())
self.assertEqual("GITHUB/user1", s.get_account())
# restore env
if at != None:
os.environ["STAROID_ACCESS_TOKEN"] = at
if ac != None:
os.environ["STAROID_ACCOUNT"] = ac
def test_download_chisel(self):
# given
tmp_dir = tempfile.mkdtemp()
s = Staroid(cache_dir=tmp_dir)
# when
chisel_path = s.get_chisel_path()
# then
self.assertIsNotNone(chisel_path)
self.assertTrue(os.path.isfile(chisel_path))
# clean up
shutil.rmtree(pathlib.Path(tmp_dir))
@unittest.skipUnless(integration_test_ready(), "Integration test environment is not configured")
def test_read_default_account(self):
# given access_token is set but account is not set
ac = None
if "STAROID_ACCOUNT" in os.environ:
ac = os.environ["STAROID_ACCOUNT"]
del os.environ["STAROID_ACCOUNT"]
# when
s = Staroid()
# then
self.assertNotEqual(None, s.get_account())
# restore env
if ac != None:
os.environ["STAROID_ACCOUNT"] = ac
| 2.46875
| 2
|
configuration/defaults.py
|
Wildertrek/gamechanger-data
| 18
|
12783328
|
<reponame>Wildertrek/gamechanger-data
from . import RENDERED_DIR
from pathlib import Path
DEFAULT_APP_CONFIG_NAME = "local"
DEFAULT_ES_CONFIG_NAME = "local"
RENDERED_DEFAULTS_PATH = Path(RENDERED_DIR, "defaults.json")
TEMPLATE_FILENAME_SUFFIX = '.template'
| 1.3125
| 1
|
src/wavegrad/random.py
|
JeremyCCHsu/test-debug
| 0
|
12783329
|
from scipy.stats import truncnorm
import numpy as np
import torch
def truncnorm_like(x):
size = [int(s) for s in x.shape]
eps = truncnorm.rvs(-3.001, 3.001, size=size) / 3.
return torch.from_numpy(eps)
| 2.625
| 3
|
iconic/iconic/roller.py
|
mikkoJJ/iconic-roller
| 0
|
12783330
|
<reponame>mikkoJJ/iconic-roller
""" Functions for randomly rolling the relationships.
"""
from .defaults import ICONS, RELATIONSHIP_TYPES
from .icon_relationships import IconRelationship
import random
from typing import List
def roll_relationships(relationship_points: int, min_icons: int) -> List[IconRelationship]:
"""
:param relationship_points:
How many points are spent to relationship.
:param min_icons:
Minimum number of different icons to have a relationship to.
:return:
A list of icon relationship objects representing the rolled relationships.
"""
relationships = []
for i in range(0, relationship_points):
if len(relationships) < min_icons or random.randint(0, 1) == 0:
relationships.append(roll_new_icon())
else:
add_to_relationships(relationships)
return relationships
def roll_new_icon() -> IconRelationship:
icon = random.choice(ICONS)
ICONS.remove(icon)
relation_type = random.choice(RELATIONSHIP_TYPES)
relationship = IconRelationship(icon, relation_type)
return relationship
def add_to_relationships(relationships: list):
random.choice(relationships).points += 1
| 3.75
| 4
|
lspeas/tests/_test_centerline.py
|
almarklein/stentseg
| 1
|
12783331
|
<filename>lspeas/tests/_test_centerline.py
import numpy as np
import visvis as vv
import os
from stentseg.utils import PointSet
from stentseg.utils.centerline import find_centerline, points_from_mesh, smooth_centerline, pp_to_graph
from stentseg.utils.datahandling import select_dir, loadvol, loadmodel
from stentseg.utils.visualization import show_ctvolume
from stentseg.stentdirect import stentgraph
TEST = 14
if TEST == 1:
import imageio
im = imageio.imread('~/Desktop/test_centerline.png')[:,:,0]
y, x = np.where(im < 200)
pp = PointSet(np.column_stack([x, y]))
start = (260, 60)
ends = [(230, 510), (260, 510), (360, 510)]
centerline = find_centerline(pp, start, ends, 8, ndist=20, regfactor=0.2, regsteps=10)
vv.figure(1); vv.clf()
a1 = vv.subplot(111)
vv.plot(pp, ms='.', ls='')
vv.plot(start[0], start[1], ms='.', ls='', mc='g', mw=15)
vv.plot([e[0] for e in ends], [e[1] for e in ends], ms='.', ls='', mc='r', mw=15)
vv.plot(centerline, ms='x', ls='', mw=15, mc='y')
a1.daspectAuto = False
elif TEST > 10:
# Select the ssdf basedir
basedir = select_dir(r'D:\LSPEAS\LSPEAS_ssdf',
r'F:\LSPEAS_ssdf_backup', r'G:\LSPEAS_ssdf_backup')
basedirstl = r'D:\Profiles\koenradesma\SURFdrive\UTdrive\MedDataMimics\LSPEAS_Mimics\Tests'
ptcode = 'LSPEAS_002'
ctcode = '12months'
cropname = 'stent'
showAxis = False # True or False
showVol = 'MIP' # MIP or ISO or 2D or None
clim0 = (0,2500)
# clim0 = -550,500
isoTh = 250
s = loadvol(basedir, ptcode, ctcode, cropname, 'avgreg')
if TEST == 11:
fname = os.path.join(basedirstl, ptcode, 'LSPEAS_002_12M_MGK Smoothed_Wrapped_DRGseEditRG 2_001.stl')
start1 = (110, 100, 70) # x,y,z ; dist
start2 = (87, 106, 40) # branch right
ends = [(110, 120, 15)] # prox
elif TEST == 12:
fname = os.path.join(basedirstl, ptcode, 'LSPEAS_003_12M_MGK Smoothed_Wrapped_DRGseEditRG 2_001.stl')
start1 = (190, 165, 60)
start2 = (207, 184, 34)
ends = [(179, 169, 17)]
elif TEST == 13:
fname = os.path.join(basedirstl, ptcode, 'LSPEAS_004_D_stent-l-th500.stl')
start1 = (146.1, 105.3, 69.3) # x,y,z
ends = [(112.98, 100.08, 62.03)]
elif TEST == 14:
from stentseg.apps._3DPointSelector import select3dpoints
points = select3dpoints(s.vol,nr_of_stents = 1)
start1 = points[0][0]
ends = points[1]
print('Get Endpoints: done')
else:
raise RuntimeError('Invalid test')
if TEST == 14:
pp = points_from_nodes_in_graph(sd._nodes1)
else:
# Get pointset from STL, remove duplicates
pp = points_from_mesh(fname, invertZ = True)
# Find main centerline
#regsteps = distance of centerline points from where the start/end point have no affect on centerline finding
centerline1 = find_centerline(pp, start1, ends, 0.5, ndist=20, regfactor=0.2, regsteps=10, verbose=True)
# centerline1 = find_centerline(pp, start1, ends, 2, ndist=100, regfactor=0.2, regsteps=10, verbose=True)
# Find centerline of branch, using (last part of) main centerline as an end.
centerline2 = find_centerline(pp, start2, centerline1[-100:], 0.5, ndist=20, regfactor=0.2, regsteps=10, verbose=True)
# centerline2 = smooth_centerline(centerline2, 3)
centerline_nodes = pp_to_graph(centerline1)
f = vv.figure(1); vv.clf()
f.position = 709.00, 30.00, 1203.00, 1008.00
a1 = vv.subplot(121)
vv.plot(pp, ms='.', ls='', alpha=0.2, mw = 7) # vessel
vv.plot(PointSet(list(start1)), ms='.', ls='', mc='g', mw=18) # start1
vv.plot(PointSet(list(start2)), ms='.', ls='', mc='m', mw=16) # start2
vv.plot([e[0] for e in ends], [e[1] for e in ends], [e[2] for e in ends], ms='.', ls='', mc='r', mw=18) # ends
vv.plot(centerline1, ms='.', ls='', mw=8, mc='y')
vv.plot(centerline2, ms='.', ls='', mw=8, mc='c')
a1.axis.visible = showAxis
a1.daspect = 1,1,-1
a2 = vv.subplot(122)
show_ctvolume(s.vol, None, showVol=showVol, clim=clim0, isoTh=isoTh, removeStent=False)
centerline_nodes.Draw(mc='y', mw=8, lc='g', lw=2)
a2.axis.visible = showAxis
a2.daspect = 1,1,-1
a1.camera = a2.camera
| 2.09375
| 2
|
javatools/change.py
|
sokoslee/jvm.py
| 0
|
12783332
|
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see
# <http://www.gnu.org/licenses/>.
"""
Some abstraction of changes. Useful for the classdiff and jardiff
modules.
:author: <NAME> <<EMAIL>>
:license: LGPL
"""
from functools import wraps
__all__ = (
"squash",
"collect_by_typename", "collect_by_type",
"iterate_by_type", "yield_sorted_by_type",
"Change", "Addition", "Removal",
"GenericChange", "SuperChange",
"SquashedChange", "SquashedAddition", "SquashedRemoval", )
def collect_by_typename(obj_sequence, cache=None):
"""
collects objects from obj_sequence and stores them into buckets by
type name. cache is an optional dict into which we collect the
results.
"""
if cache is None:
cache = {}
for val in obj_sequence:
key = type(val).__name__
bucket = cache.get(key, None)
if bucket is not None:
bucket.append(val)
else:
cache[key] = [val]
return cache
def collect_by_type(obj_sequence, cache=None):
"""
collects objects from obj_sequence and stores them into buckets by
type. cache is an optional dict into which we collect the results.
"""
if cache is None:
cache = {}
for val in obj_sequence:
key = type(val)
bucket = cache.get(key, None)
if bucket is not None:
bucket.append(val)
else:
cache[key] = [val]
return cache
def iterate_by_type(objs, typelist):
"""
collects a sequence of objs into buckets by type, then re-emits
objs from the buckets, sorting through the buckets in the order
specified by typelist. Any objects of a type not specified in
typelist will be emitted last in no guaranteed order (but still
grouped by type).
"""
cache = collect_by_type(objs)
for t in typelist:
for val in cache.pop(t, tuple()):
yield val
for tl in cache.values():
for val in tl:
yield val
def yield_sorted_by_type(*typelist):
"""
a useful decorator for the collect_impl method of SuperChange
subclasses. Caches the yielded changes, and re-emits them
collected by their type. The order of the types can be specified
by listing the types as arguments to this decorator. Unlisted
types will be yielded last in no guaranteed order.
Grouping happens by exact type match only. Inheritance is not
taken into consideration for grouping.
"""
def decorate(fun):
@wraps(fun)
def decorated(*args, **kwds):
return iterate_by_type(fun(*args, **kwds), typelist)
return decorated
return decorate
class Change(object):
"""
Base class for representing a specific change between two objects
"""
label = "Change"
def __init__(self, ldata, rdata):
self.ldata = ldata
self.rdata = rdata
self.description = None
self.changed = False
self.entry = None
def __del__(self):
self.clear()
def clear(self):
self.ldata = None
self.rdata = None
self.description = None
self.changed = False
self.entry = None
def check(self):
pass
def get_ldata(self):
return self.ldata
def get_rdata(self):
return self.rdata
def is_change(self):
return self.changed
def is_ignored(self, options):
"""
is this change ignorable, given parameters on the options
object.
"""
return False
def get_description(self):
return self.description or \
(self.label + (" changed" if self.is_change() else " unchanged"))
def collect(self, force=False):
return tuple()
def simplify(self, options=None):
"""
returns a dict describing a simple snapshot of this change, and
its children if any.
"""
simple = {
"class": type(self).__name__,
"is_change": self.is_change(),
"description": self.get_description(),
"label": self.label,
}
if options:
simple["is_ignored"] = self.is_ignored(options)
if isinstance(self, Addition):
simple["is_addition"] = True
if isinstance(self, Removal):
simple["is_removal"] = True
if self.entry:
simple["entry"] = self.entry
return simple
class Removal(Change):
"""
A type of change indicating that something was removed
"""
label = "Removal"
def is_change(self):
return True
class Addition(Change):
"""
A type of change indicating that something was added
"""
label = "Addition"
def is_change(self):
return True
class GenericChange(Change):
"""
A generalized test for a single change on two objects: a left and
a right. Subclasses should override the label and the check_impl
method at a minimum.
"""
label = "Generic Change"
def fn_data(self, side_data):
"""
Get the data to be used in fn_differ from side_data. By default,
this method is the identity
"""
return side_data
def fn_pretty(self, side_data):
"""
override to provide a way to show the pretty version of the left
or right data. Defaults to fn_data
"""
return self.fn_data(side_data)
def fn_pretty_desc(self, side_data):
"""
override to provide a way to describe the data left or right
data. Defaults to fn_pretty
"""
return self.fn_pretty(side_data)
def fn_differ(self, left_data, right_data):
"""
override to provide the check for whether get_ldata() and
get_rdata() differ. defaults to an inequality (!=) check
"""
return left_data != right_data
def get_ldata(self):
"""
returns fn_data of ldata
"""
return self.fn_data(self.ldata)
def get_rdata(self):
"""
returns fn_data of rdata
"""
return self.fn_data(self.rdata)
def pretty_ldata(self):
"""
returns fn_pretty of ldata (NOT the fn_pretty of get_ldata)
"""
return self.fn_pretty(self.ldata)
def pretty_rdata(self):
"""
returns fn_pretty of rdata (NOT the fn_pretty of get_rdata)
"""
return self.fn_pretty(self.rdata)
def pretty_ldata_desc(self):
"""
returns fn_pretty_desc of ldata (NOT the fn_pretty_desc of
get_ldata)
"""
return self.fn_pretty_desc(self.ldata)
def pretty_rdata_desc(self):
"""
returns fn_pretty_desc of rdata (NOT the fn_pretty_desc of
get_rdata)
"""
return self.fn_pretty_desc(self.rdata)
def check_impl(self):
"""
returns a tuple of (is_change,description) which are then stored
in self.changed and self.description
The default implementation will get the data from the left and
right sides by calling self.fn_data, then compare them via
self.fn_differ. If they do differ, a message will be
constructed using self.fn_pretty to create human-readable
versions of the data that changed.
"""
if self.fn_differ(self.get_ldata(), self.get_rdata()):
left = self.pretty_ldata_desc()
right = self.pretty_rdata_desc()
msg = "%s changed: %s to %s" % (self.label, left, right)
return True, msg
else:
return False, None
def check(self):
"""
if necessary, override check_impl to change the behaviour of
subclasses of GenericChange.
"""
self.changed, self.description = self.check_impl()
def simplify(self, options=None):
"""
provide a simple representation of this change as a dictionary
"""
# TODO: we might want to get rid of this method and just move
# it into the JSONEncoder in report.py
simple = super(GenericChange, self).simplify(options)
ld = self.pretty_ldata()
if ld is not None:
simple["old_data"] = ld
rd = self.pretty_rdata()
if rd is not None:
simple["new_data"] = rd
return simple
class SuperChange(GenericChange):
"""
A collection of changes.
For simplest use, override the change_types class field with a
list of Change subclasses. When the default collect_impl is called
from collect, an instance of each type will be created with the
same left and right data as the SuperChange instance was created
with. The check_impl (called from check) will iterate over the
instances and call their check method in-turn.
An instance of SuperChange is considered unchanged if all of its
sub-changes are also unchanged (or if there were no sub-changes).
An instance of SuperChange is considered ignored if it was a
change and all of its changed children were also ignored.
"""
label = "Super Change"
# override with change classes
change_types = tuple()
def __init__(self, ldata, rdata):
super(SuperChange, self).__init__(ldata, rdata)
self.changes = tuple()
def fn_pretty(self, c):
return None
def clear(self):
"""
clears all child changes and drops the reference to them
"""
super(SuperChange, self).clear()
for c in self.changes:
c.clear()
self.changes = tuple()
def collect_impl(self):
"""
instantiates each of the entries in in the overriden change_types
field with the left and right data
"""
ldata = self.get_ldata()
rdata = self.get_rdata()
for change_type in self.change_types:
yield change_type(ldata, rdata)
def collect(self, force=False):
"""
calls collect_impl and stores the results as the child changes of
this super-change. Returns a tuple of the data generated from
collect_impl. Caches the result rather than re-computing each
time, unless force is True
"""
if force or not self.changes:
self.changes = tuple(self.collect_impl())
return self.changes
def check_impl(self):
"""
sets self.changes to the result of self.changes_impl, then if any
member of those checks shows as a change, will return
True,None
"""
c = False
for change in self.collect():
change.check()
c = c or change.is_change()
return c, None
def is_ignored(self, options):
"""
If we have changed children and all the children which are changes
are ignored, then we are ignored. Otherwise, we are not
ignored
"""
if not self.is_change():
return False
changes = self.collect()
if not changes:
return False
for change in changes:
if change.is_change() and not change.is_ignored(options):
return False
return True
def simplify(self, options=None):
"""
generate a simple dict representing this change data, and
collecting all of the sub-change instances (which are NOT
immediately simplified themselves)
"""
data = super(SuperChange, self).simplify(options)
show_ignored = False
show_unchanged = False
if options:
show_ignored = getattr(options, "show_ignored", show_ignored)
show_unchanged = getattr(options, "show_unchanged", show_unchanged)
# build a list of sub-changes honoring show-ignored and
# show-unchanged
subs = list()
for s in self.collect():
if s.is_change():
if show_ignored or not s.is_ignored(options):
subs.append(s)
elif show_unchanged:
subs.append(s)
data["children"] = subs
return data
def squash_children(self, options):
"""
reduces the memory footprint of this super-change by converting
all child changes into squashed changes
"""
oldsubs = self.collect()
self.changes = tuple(squash(c, options=options) for c in oldsubs)
for change in oldsubs:
change.clear()
class SquashedChange(Change):
"""
For when you want to keep just the overall data from a change,
including whether it was ignored, but want to discard the more
in-depth information.
"""
label = "SquashedChange"
def __init__(self, change, is_ignored=False):
super(SquashedChange, self).__init__(None, None)
self.label = change.label
self.description = change.get_description()
self.changed = change.is_change()
self.ignored = is_ignored
self.origclass = type(change)
self.entry = getattr(change, "entry", None)
def is_ignored(self, options):
return self.ignored
def is_change(self):
return self.changed
def simplify(self, options=None):
simple = super(SquashedChange, self).simplify(options)
simple["original_class"] = self.origclass.__name__
return simple
def clear(self):
pass
class SquashedRemoval(SquashedChange, Removal):
"""
Squashed change indicating something was removed
"""
label = "SquashedRemoval"
class SquashedAddition(SquashedChange, Addition):
"""
Squashed change indicating something was added
"""
label = "SquashedAddition"
def squash(change, is_ignored=False, options=None):
"""
squashes the in-depth information of a change to a simplified (and
less memory-intensive) form
"""
if options:
is_ignored = change.is_ignored(options)
result = None
if isinstance(change, Removal):
result = SquashedRemoval(change, is_ignored)
elif isinstance(change, Addition):
result = SquashedAddition(change, is_ignored)
else:
result = SquashedChange(change, is_ignored)
return result
#
# The end.
| 2.3125
| 2
|
pywatts/callbacks/debug_callback.py
|
zyxsachin/pyWATTS
| 30
|
12783333
|
<gh_stars>10-100
import xarray as xr
from typing import Dict, Optional
from pywatts.callbacks.base_callback import BaseCallback
class PrintCallback(BaseCallback):
"""
Print callback class to print out result data into terminal for debugging.
:param BaseCallback: Base callback class.
:type BaseCallback: BaseCallback
"""
def __call__(self, data_dict: xr.DataArray):
"""
Implementation of abstract base method to print out
pipeline result data into terminal.
:param data_dict: Dict of DataArrays that should be printed out into terminal.
:type data_dict: Dict[str, xr.DataArray]
"""
# NOTE: print out pandas arrays is a little bit more understandable IMO.
print("\n# Print Callback")
for key in data_dict:
print(f"## {key}")
print(data_dict[key].to_pandas())
class StatisticCallback(BaseCallback):
"""
Statistic callback class to print out statistical information about the results
into terminal for better understanding and debugging.
:param BaseCallback: Base callback class.
:type BaseCallback: BaseCallback
"""
def __init__(self, prefix: str, use_filemanager: Optional[bool] = None):
"""
Initialise Statistical callback object given a filename and
optional use_filemanager flag.
:param prefix: Prefix to use for the line plot output file.
:type prefix: str
:param use_filemanager: Flag to denote if the filemanager of the pipeline should be used.
:type use_filemanager: Optional[bool]
"""
if use_filemanager is None:
# use base class default if use_filemanager is not set
super().__init__()
else:
super().__init__(use_filemanager)
self.prefix = prefix
def __call__(self, data_dict: Dict[str, xr.DataArray]):
"""
Implementation of abstract base method to print out
pipeline statistical information of step results into terminal.
:param data_dict: Dict of DataArrays that statistical information should be printed out.
:type data_dict: Dict[str, xr.DataArray]
"""
result_string = "\n# Statistical Callback\n"
print("\n# Statistical Callback")
for key in data_dict:
print(f"## {key}")
print(data_dict[key].to_pandas().describe())
result_string +=f"## {key}\n {data_dict[key].to_pandas().describe()} \n"
with open(self.get_path(f"{self.prefix}_Statistics.md"), "w") as file:
file.write(result_string)
| 2.96875
| 3
|
#074 - Maior e menor valores em Tupla.py
|
EronBruce/Exercicios_Python
| 0
|
12783334
|
<reponame>EronBruce/Exercicios_Python<gh_stars>0
from random import randint
numeros = (randint(0,10),randint(0,10),randint(0,10),randint(0,10),randint(0,10))
print('Os valores sorteados foram: ', end='')
for n in numeros:
print(f'{n} ', end='')
print('\nO maior valor sorteado foi {}'.format(max(numeros)))
print('O menor valor sorteado foi {}'.format(min(numeros)))
| 3.6875
| 4
|
redis/_compat.py
|
yujinrobot/redis-py
| 0
|
12783335
|
<reponame>yujinrobot/redis-py
"""Internal module for Python 2 backwards compatibility."""
import sys
if sys.version_info[0] < 3:
from urlparse import urlparse
from itertools import imap, izip
from string import letters as ascii_letters
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
iteritems = lambda x: x.iteritems()
dictkeys = lambda x: x.keys()
dictvalues = lambda x: x.values()
nativestr = lambda x: \
x if isinstance(x, str) else x.encode('utf-8', 'replace')
u = lambda x: x.decode()
b = lambda x: x
next = lambda x: x.next()
byte_to_chr = lambda x: x
unichr = unichr
xrange = xrange
basestring = basestring
unicode = unicode
bytes = str
long = long
else:
from urllib.parse import urlparse
from io import BytesIO
from string import ascii_letters
iteritems = lambda x: x.items()
dictkeys = lambda x: list(x.keys())
dictvalues = lambda x: list(x.values())
byte_to_chr = lambda x: chr(x)
nativestr = lambda x: \
x if isinstance(x, str) else x.decode('utf-8', 'replace')
u = lambda x: x
b = lambda x: x.encode('iso-8859-1')
next = next
unichr = chr
imap = map
izip = zip
xrange = range
basestring = str
unicode = str
bytes = bytes
long = int
| 2.515625
| 3
|
model.py
|
RistoranteRist/FastFlow
| 0
|
12783336
|
<gh_stars>0
from tkinter import HIDDEN
from numpy import require
import torch
import torch.nn as nn
import numpy as np
import FrEIA.modules as Fm
import FrEIA.framework as Ff
from FrEIA.framework import *
from FrEIA.framework import topological_order
from typing import List, Tuple, Iterable, Union, Optional
from torch import Tensor
from FrEIA.modules.base import InvertibleModule
# CNN hidden channel size, ratio to input channel size
HIDDEN_SIZE = 128
class Identity(nn.Module):
def __init__(self, return_value=None):
super(Identity, self).__init__()
self.return_value = return_value
def forward(self, x, *args, **kwargs):
return x
class FeatureExtractor:
def __init__(self, backbone):
self.clear()
self.bb = backbone
def __call__(self, module, module_in, module_out):
self.saved_feature = module_out.detach()
if self.bb == "deit_base_distilled_patch16_384":
self.saved_feature = self.saved_feature[:, 2:]
def clear(self):
self.saved_feature = None
class OwnGraphINN(InvertibleModule):
def __init__(self, node_list, force_tuple_output=False, verbose=False):
# Gather lists of input, output and condition nodes
in_nodes = [node_list[i] for i in range(len(node_list))
if isinstance(node_list[i], InputNode)]
out_nodes = [node_list[i] for i in range(len(node_list))
if isinstance(node_list[i], OutputNode)]
condition_nodes = [node_list[i] for i in range(len(node_list)) if
isinstance(node_list[i], ConditionNode)]
# Check that all nodes are in the list
for node in node_list:
for in_node, idx in node.inputs:
if in_node not in node_list:
raise ValueError(f"{node} gets input from {in_node}, "
f"but the latter is not in the node_list "
f"passed to GraphINN.")
for out_node, idx in node.outputs:
if out_node not in node_list:
raise ValueError(f"{out_node} gets input from {node}, "
f"but the it's not in the node_list "
f"passed to GraphINN.")
# Build the graph and tell nodes about their dimensions so that they can
# build the modules
node_list = topological_order(node_list, in_nodes, out_nodes)
global_in_shapes = [node.output_dims[0] for node in in_nodes]
global_out_shapes = [node.input_dims[0] for node in out_nodes]
global_cond_shapes = [node.output_dims[0] for node in condition_nodes]
# Only now we can set out shapes
super().__init__(global_in_shapes, global_cond_shapes)
self.node_list = node_list
# Now we can store everything -- before calling super constructor,
# nn.Module doesn't allow assigning anything
self.in_nodes = in_nodes
self.condition_nodes = condition_nodes
self.out_nodes = out_nodes
self.global_out_shapes = global_out_shapes
self.force_tuple_output = force_tuple_output
self.module_list = nn.ModuleList([n.module for n in node_list
if n.module is not None])
if verbose:
print(self)
def output_dims(self, input_dims: List[Tuple[int]]) -> List[Tuple[int]]:
if len(self.global_out_shapes) == 1 and not self.force_tuple_output:
raise ValueError("You can only call output_dims on a "
"GraphINN with more than one output "
"or when setting force_tuple_output=True.")
return self.global_out_shapes
def forward(self, x_or_z: Union[Tensor, Iterable[Tensor]],
c: Iterable[Tensor] = None, rev: bool = False, jac: bool = True,
intermediate_outputs: bool = False, x: None = None) \
-> Tuple[Tuple[Tensor], Tensor]:
"""
Forward or backward computation of the whole net.
"""
if x is not None:
x_or_z = x
warnings.warn("You called GraphINN(x=...). x is now called x_or_z, "
"please pass input as positional argument.")
if torch.is_tensor(x_or_z):
x_or_z = x_or_z,
if torch.is_tensor(c):
c = c,
jacobian = torch.zeros((x_or_z[0].shape[0], 1, *x_or_z[0].shape[2:])).to(x_or_z[0])
outs = {}
jacobian_dict = {} if jac else None
# Explicitly set conditions and starts
start_nodes = self.out_nodes if rev else self.in_nodes
if len(x_or_z) != len(start_nodes):
raise ValueError(f"Got {len(x_or_z)} inputs, but expected "
f"{len(start_nodes)}.")
for tensor, start_node in zip(x_or_z, start_nodes):
outs[start_node, 0] = tensor
if c is None:
c = []
if len(c) != len(self.condition_nodes):
raise ValueError(f"Got {len(c)} conditions, but expected "
f"{len(self.condition_nodes)}.")
for tensor, condition_node in zip(c, self.condition_nodes):
outs[condition_node, 0] = tensor
# Go backwards through nodes if rev=True
for node in self.node_list[::-1 if rev else 1]:
# Skip all special nodes
if node in self.in_nodes + self.out_nodes + self.condition_nodes:
continue
has_condition = len(node.conditions) > 0
mod_in = []
mod_c = []
for prev_node, channel in (node.outputs if rev else node.inputs):
mod_in.append(outs[prev_node, channel])
for cond_node in node.conditions:
mod_c.append(outs[cond_node, 0])
mod_in = tuple(mod_in)
mod_c = tuple(mod_c)
try:
if has_condition:
mod_out = node.module(mod_in, c=mod_c, rev=rev, jac=jac)
else:
mod_out = node.module(mod_in, rev=rev, jac=jac)
except Exception as e:
raise RuntimeError(f"{node} encountered an error.") from e
out, mod_jac = self._check_output(node, mod_out, jac, rev)
for out_idx, out_value in enumerate(out):
outs[node, out_idx] = out_value
if jac:
jacobian = jacobian + mod_jac
jacobian_dict[node] = mod_jac
for out_node in (self.in_nodes if rev else self.out_nodes):
# This copies the one input of the out node
outs[out_node, 0] = outs[(out_node.outputs if rev
else out_node.inputs)[0]]
if intermediate_outputs:
return outs, jacobian_dict
else:
out_list = [outs[out_node, 0] for out_node
in (self.in_nodes if rev else self.out_nodes)]
if len(out_list) == 1 and not self.force_tuple_output:
return out_list[0], jacobian
else:
return tuple(out_list), jacobian
def _check_output(self, node, mod_out, jac, rev):
if torch.is_tensor(mod_out):
raise ValueError(
f"The node {node}'s module returned a tensor only. This "
f"is deprecated without fallback. Please follow the "
f"signature of InvertibleOperator#forward in your module "
f"if you want to use it in a GraphINN.")
if len(mod_out) != 2:
raise ValueError(
f"The node {node}'s module returned a tuple of length "
f"{len(mod_out)}, but should return a tuple `z_or_x, jac`.")
out, mod_jac = mod_out
if torch.is_tensor(out):
raise ValueError(f"The node {node}'s module returns a tensor. "
f"This is deprecated.")
if len(out) != len(node.inputs if rev else node.outputs):
raise ValueError(
f"The node {node}'s module returned {len(out)} output "
f"variables, but should return "
f"{len(node.inputs if rev else node.outputs)}.")
if not torch.is_tensor(mod_jac):
if isinstance(mod_jac, (float, int)):
mod_jac = torch.zeros((out[0].shape[0], 1, *out[0].shape[2:])).to(out[0].device) \
+ mod_jac
elif jac:
raise ValueError(
f"The node {node}'s module returned a non-tensor as "
f"Jacobian: {mod_jac}")
elif not jac and mod_jac is not None:
raise ValueError(
f"The node {node}'s module returned neither None nor a "
f"Jacobian: {mod_jac}")
return out, mod_jac
def log_jacobian_numerical(self, x, c=None, rev=False, h=1e-04):
"""
Approximate log Jacobian determinant via finite differences.
"""
if isinstance(x, (list, tuple)):
batch_size = x[0].shape[0]
ndim_x_separate = [np.prod(x_i.shape[1:]) for x_i in x]
ndim_x_total = sum(ndim_x_separate)
x_flat = torch.cat([x_i.view(batch_size, -1) for x_i in x], dim=1)
else:
batch_size = x.shape[0]
ndim_x_total = np.prod(x.shape[1:])
x_flat = x.reshape(batch_size, -1)
J_num = torch.zeros(batch_size, ndim_x_total, ndim_x_total)
for i in range(ndim_x_total):
offset = x[0].new_zeros(batch_size, ndim_x_total)
offset[:, i] = h
if isinstance(x, (list, tuple)):
x_upper = torch.split(x_flat + offset, ndim_x_separate, dim=1)
x_upper = [x_upper[i].view(*x[i].shape) for i in range(len(x))]
x_lower = torch.split(x_flat - offset, ndim_x_separate, dim=1)
x_lower = [x_lower[i].view(*x[i].shape) for i in range(len(x))]
else:
x_upper = (x_flat + offset).view(*x.shape)
x_lower = (x_flat - offset).view(*x.shape)
y_upper, _ = self.forward(x_upper, c=c, rev=rev, jac=False)
y_lower, _ = self.forward(x_lower, c=c, rev=rev, jac=False)
if isinstance(y_upper, (list, tuple)):
y_upper = torch.cat(
[y_i.view(batch_size, -1) for y_i in y_upper], dim=1)
y_lower = torch.cat(
[y_i.view(batch_size, -1) for y_i in y_lower], dim=1)
J_num[:, :, i] = (y_upper - y_lower).view(batch_size, -1) / (2 * h)
logdet_num = x[0].new_zeros(batch_size)
for i in range(batch_size):
logdet_num[i] = torch.slogdet(J_num[i])[1]
return logdet_num
def get_node_by_name(self, name) -> Optional[Node]:
"""
Return the first node in the graph with the provided name.
"""
for node in self.node_list:
if node.name == name:
return node
return None
def get_module_by_name(self, name) -> Optional[nn.Module]:
"""
Return module of the first node in the graph with the provided name.
"""
node = self.get_node_by_name(name)
try:
return node.module
except AttributeError:
return None
class OwnActNorm(InvertibleModule):
def __init__(self, dims_in, dims_c=None, init_data=None):
super().__init__(dims_in, dims_c)
self.dims_in = dims_in[0]
param_dims = [1, self.dims_in[0]] + [1 for i in range(len(self.dims_in) - 1)]
self.scale = nn.Parameter(torch.zeros(*param_dims))
self.bias = nn.Parameter(torch.zeros(*param_dims))
if init_data:
self.initialize_with_data(init_data)
else:
self.init_on_next_batch = True
def on_load_state_dict(*args):
# when this module is loading state dict, we SHOULDN'T init with data,
# because that will reset the trained parameters. Registering a hook
# that disable this initialisation.
self.init_on_next_batch = False
self._register_load_state_dict_pre_hook(on_load_state_dict)
def initialize_with_data(self, data):
# Initialize to mean 0 and std 1 with sample batch
# 'data' expected to be of shape (batch, channels[, ...])
assert all([data.shape[i+1] == self.dims_in[i] for i in range(len(self.dims_in))]),\
"Can't initialize ActNorm layer, provided data don't match input dimensions."
self.scale.data.view(-1)[:] \
= torch.log(1 / data.transpose(0,1).contiguous().view(self.dims_in[0], -1).std(dim=-1))
data = data * self.scale.exp()
self.bias.data.view(-1)[:] \
= -data.transpose(0,1).contiguous().view(self.dims_in[0], -1).mean(dim=-1)
self.init_on_next_batch = False
def forward(self, x, rev=False, jac=True):
if self.init_on_next_batch:
self.initialize_with_data(x[0])
#jac = (self.scale.sum() * np.prod(self.dims_in[1:])).repeat(x[0].shape[0])
jac = self.scale.sum(dim=1, keepdim=True).repeat(x[0].shape[0], 1, *self.dims_in[1:])
if rev:
jac = -jac
if not rev:
return [x[0] * self.scale.exp() + self.bias], jac
else:
return [(x[0] - self.bias) / self.scale.exp()], jac
def output_dims(self, input_dims):
assert len(input_dims) == 1, "Can only use 1 input"
return input_dims
class FastFlowBlock(Fm.coupling_layers.GLOWCouplingBlock):
def __init__(self, dims_in, dims_c=[], subnet_constructor=None, clamp=0.15, clamp_activation="ATAN"):
super().__init__(dims_in, dims_c=dims_c, subnet_constructor=subnet_constructor, clamp=clamp, clamp_activation=clamp_activation)
self.subnet1.apply(init_with_xavier)
self.subnet2.apply(init_with_xavier)
def _coupling1(self, x1, u2, rev=False):
a2 = self.subnet2(u2)
s2, t2 = a2[:, :self.split_len1], a2[:, self.split_len1:]
s2 = self.clamp * self.f_clamp(s2)
j1 = s2
if rev:
y1 = (x1 - t2) * torch.exp(-s2)
return y1, -j1
else:
y1 = torch.exp(s2) * x1 + t2
return y1, j1
def _coupling2(self, x2, u1, rev=False):
a1 = self.subnet1(u1)
s1, t1 = a1[:, :self.split_len2], a1[:, self.split_len2:]
s1 = self.clamp * self.f_clamp(s1)
j2 = s1
if rev:
y2 = (x2 - t1) * torch.exp(-s1)
return y2, -j2
else:
y2 = torch.exp(s1) * x2 + t1
return y2, j2
def subnet_conv_3x3(c_in, c_out):
return nn.Sequential(nn.Conv2d(c_in, HIDDEN_SIZE, 3, padding=1), nn.ReLU(),
nn.Conv2d(HIDDEN_SIZE, c_out, 3, padding=1))
def subnet_conv_1x1(c_in, c_out):
return nn.Sequential(nn.Conv2d(c_in, HIDDEN_SIZE, 1), nn.ReLU(),
nn.Conv2d(HIDDEN_SIZE, c_out, 1))
def init_with_xavier(module):
#gain = nn.init.calculate_gain('relu')
gain = 1/50.0
if isinstance(module, nn.Conv2d):
nn.init.xavier_uniform_(module.weight, gain=gain)
def init_last_conv_with_zeros(module):
if isinstance(module[-1], nn.Conv2d):
nn.init.zeros_(module[-1].weight)
nn.init.zeros_(module[-1].bias)
def build_fast_flow(clamp, clamp_activation, encoded_shape=(768, 28, 28)):
nodes = [Ff.InputNode(*encoded_shape, name='Input')]
for i in range(20):
nodes.append(Ff.Node(nodes[-1], OwnActNorm, {}, name='ActNorm'))
nodes.append(Ff.Node(nodes[-1], Fm.PermuteRandom, {}, name="ChannelPermute"))
if i % 2 == 0:
nodes.append(Ff.Node(
nodes[-1],
FastFlowBlock,
{
'subnet_constructor': subnet_conv_3x3,
'clamp': clamp,
'clamp_activation': clamp_activation
},
name='FastFlowStep_{}_3x3'.format(i)
))
else:
nodes.append(Ff.Node(
nodes[-1],
FastFlowBlock,
{
'subnet_constructor': subnet_conv_1x1,
'clamp': clamp,
'clamp_activation': clamp_activation
},
name='FastFlowStep_{}_1x1'.format(i)
))
nodes.append(Ff.OutputNode(nodes[-1], name='output'))
conv_inn = OwnGraphINN(nodes)
return conv_inn
| 1.945313
| 2
|
Data_Science/Neural Networks from Scratch/P.1 Intro and Neuron Code.py
|
maledicente/cursos
| 1
|
12783337
|
import sys
import numpy as np
import matplotlib
"""print("Python:", sys.version)
print("Numpy", np.__version__)
print("Matplotlib", matplotlib.__version__)"""
inputs = [1.2,5.1,2.1]
weights = [3.1,2.1,8.7]
bias = 3
output = (inputs[0] * weights[0] +
inputs[1] * weights[1] +
inputs[2] * weights[2] + bias)
print(output)
| 2.984375
| 3
|
research/cv/retinanet_resnet152/train.py
|
mindspore-ai/models
| 77
|
12783338
|
<gh_stars>10-100
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Train retinanet and get checkpoint files."""
import os
import mindspore
import mindspore.nn as nn
from mindspore import context, Tensor
from mindspore.communication.management import init, get_rank, get_group_size
from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor, TimeMonitor, Callback
from mindspore.train import Model
from mindspore.context import ParallelMode
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.common import set_seed
from src.retinahead import retinanetWithLossCell, TrainingWrapper, retinahead
from src.backbone import resnet152
from src.model_utils.config import config
from src.model_utils.moxing_adapter import moxing_wrapper
from src.model_utils.device_adapter import get_device_num, get_device_id
from src.dataset import create_retinanet_dataset, create_mindrecord
from src.lr_schedule import get_lr
from src.init_params import init_net_param, filter_checkpoint_parameter
set_seed(1)
class Monitor(Callback):
"""
Monitor loss and time.
Args:
lr_init (numpy array): train lr
Returns:
None
Examples:
>>> Monitor(100,lr_init=Tensor([0.05]*100).asnumpy())
"""
def __init__(self, lr_init=None):
super(Monitor, self).__init__()
self.lr_init = lr_init
self.lr_init_len = len(lr_init)
def step_end(self, run_context):
cb_params = run_context.original_args()
print("lr:[{:8.6f}]".format(self.lr_init[cb_params.cur_step_num-1]))
def modelarts_process():
config.save_checkpoint_path = os.path.join(config.output_path, str(get_device_id()), config.save_checkpoint_path)
if config.need_modelarts_dataset_unzip:
config.coco_root = os.path.join(config.coco_root, config.modelarts_dataset_unzip_name)
print(os.listdir(os.path.join(config.data_path, config.modelarts_dataset_unzip_name)))
@moxing_wrapper(pre_process=modelarts_process)
def train_retinanet_resnet152():
""" train_retinanet_resnet152 """
context.set_context(mode=context.GRAPH_MODE, device_target=config.run_platform)
if config.run_platform == "Ascend":
if config.distribute:
if os.getenv("DEVICE_ID", "not_set").isdigit():
context.set_context(device_id=int(os.getenv("DEVICE_ID")))
init()
device_num = get_device_num()
rank = get_rank()
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
device_num=device_num)
else:
rank = 0
device_num = 1
context.set_context(device_id=get_device_id())
elif config.run_platform == "GPU":
rank = config.device_id
device_num = config.device_num
if config.distribute:
init()
rank = get_rank()
device_num = get_group_size()
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
device_num=device_num)
else:
raise ValueError("Unsupported platform.")
mindrecord_file = create_mindrecord(config.dataset, "retina6402.mindrecord", True)
if not config.only_create_dataset:
loss_scale = float(config.loss_scale)
# When create MindDataset, using the fitst mindrecord file, such as retinanet.mindrecord0.
dataset = create_retinanet_dataset(mindrecord_file, repeat_num=1,
batch_size=config.batch_size, device_num=device_num, rank=rank)
dataset_size = dataset.get_dataset_size()
print("Create dataset done!")
backbone = resnet152(config.num_classes)
retinanet = retinahead(backbone, config)
net = retinanetWithLossCell(retinanet, config)
net.to_float(mindspore.float32)
init_net_param(net)
if config.pre_trained:
if config.pre_trained_epoch_size <= 0:
raise KeyError("pre_trained_epoch_size must be greater than 0.")
param_dict = load_checkpoint(config.pre_trained)
if config.filter_weight:
filter_checkpoint_parameter(param_dict)
load_param_into_net(net, param_dict)
lr = Tensor(get_lr(global_step=config.global_step,
lr_init=config.lr_init, lr_end=config.lr_end_rate * config.lr, lr_max=config.lr,
warmup_epochs1=config.warmup_epochs1, warmup_epochs2=config.warmup_epochs2,
warmup_epochs3=config.warmup_epochs3, warmup_epochs4=config.warmup_epochs4,
warmup_epochs5=config.warmup_epochs5, total_epochs=config.epoch_size,
steps_per_epoch=dataset_size))
opt = nn.Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr,
config.momentum, config.weight_decay, loss_scale)
net = TrainingWrapper(net, opt, loss_scale)
model = Model(net)
print("Start train retinanet, the first epoch will be slower because of the graph compilation.")
cb = [TimeMonitor(), LossMonitor()]
cb += [Monitor(lr_init=lr.asnumpy())]
config_ck = CheckpointConfig(save_checkpoint_steps=dataset_size * config.save_checkpoint_epochs,
keep_checkpoint_max=config.keep_checkpoint_max)
ckpt_cb = ModelCheckpoint(prefix="retinanet", directory=config.save_checkpoint_path, config=config_ck)
if config.distribute:
if rank == 0:
cb += [ckpt_cb]
model.train(config.epoch_size, dataset, callbacks=cb, dataset_sink_mode=True)
else:
cb += [ckpt_cb]
model.train(config.epoch_size, dataset, callbacks=cb, dataset_sink_mode=True)
if __name__ == '__main__':
train_retinanet_resnet152()
| 1.453125
| 1
|
Physics250-ME30/energydensityofwire.py
|
illusion173/Physics250
| 0
|
12783339
|
<filename>Physics250-ME30/energydensityofwire.py
import numpy as np
import math
extraNumber = 4 * math.pi * pow(10,-7)
def energydensityofWire():
length = float(input('Input length: '))
amps = float(input('Input Amps: '))
length = length/100
part1 = (extraNumber * amps)/(2*math.pi*length)
part2 = ((pow(part1,2))/(2*extraNumber)) * pow(10,6)
print(part2)
energydensityofWire()
| 3.53125
| 4
|
pixelpuncher/game/models.py
|
ej2/pixelpuncher
| 0
|
12783340
|
from django.db import models
from django_extensions.db import fields
from pixelpuncher.player.models import Player
class GameState(object):
PLAY = 'PLAY'
GAME_OVER = 'OVER'
CLOSED = 'CLSD'
STATE = (
('PLAY', 'Playing',),
('OVER', 'Game Over',),
('CLSD', 'Closed',),
)
class CardType(object):
BOMB = 'B'
ONE = '1'
TWO = '2'
FOUR = '4'
EIGHT = '8'
SPECIAL = 'S'
CARDS = (
('B', 'Bomb'),
('1', 'One'),
('2', 'Two'),
('4', 'Four'),
('8', 'Eight'),
('S', 'Special'),
)
class GameMessage(models.Model):
player = models.ForeignKey(Player)
message = models.TextField()
shown = models.BooleanField(default=False)
date_created = fields.CreationDateTimeField(editable=True)
def __unicode__(self):
return self.message
class MatchGame(models.Model):
player = models.ForeignKey(Player)
date_created = fields.CreationDateTimeField(editable=True)
state = models.CharField(max_length=4, choices=GameState.STATE, default='PLAY')
points = models.IntegerField(default=0)
multiplier = models.IntegerField(default=1)
class MatchCard(models.Model):
game = models.ForeignKey(MatchGame, related_name='cards')
card_type = models.CharField(max_length=1, choices=CardType.CARDS)
flipped = models.BooleanField(default=False)
position = models.IntegerField(default=0)
@property
def image(self):
if self.flipped:
return "images/cards/{}.png".format(self.card_type)
else:
return "images/cards/card_back.png"
class CheatCode(models.Model):
code = models.CharField(max_length=64, unique=True)
menu_text = models.CharField(max_length=32)
cheat_class = models.CharField(max_length=128)
description = models.TextField(null=True, blank=True)
date_created = fields.CreationDateTimeField(editable=True)
players = models.ManyToManyField(Player, related_name="cheatcodes", blank=True)
admin_only = models.BooleanField(default=False)
show_on_menu = models.BooleanField(default=True)
def __unicode__(self):
return self.code
| 2.171875
| 2
|
test/mx/test_mx_rolling.py
|
RingoIngo/gluon-ts
| 7
|
12783341
|
<filename>test/mx/test_mx_rolling.py
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
This example shows how to fit a model and evaluate its predictions.
"""
# third party imports
import pandas as pd
import pytest
# first party imports
from gluonts.dataset.common import ListDataset
from gluonts.dataset.field_names import FieldName
from gluonts.dataset.rolling_dataset import (
StepStrategy,
generate_rolling_dataset,
)
from gluonts.evaluation import Evaluator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.model.deepar import DeepAREstimator
from gluonts.mx.trainer import Trainer
def create_dynamic_dataset(
start: str, length: int, num_dynamic: int
) -> ListDataset:
"""Create a ListDataset with dynamic values equal to the target."""
return ListDataset(
[
{
FieldName.TARGET: list(range(length)),
FieldName.START: pd.Timestamp(start),
FieldName.FEAT_DYNAMIC_REAL: [list(range(length))]
* num_dynamic,
FieldName.FEAT_DYNAMIC_CAT: [list(range(length))]
* num_dynamic,
}
],
freq="D",
)
@pytest.mark.parametrize(
"train_length, test_length, prediction_length, target_start, rolling_start, num_dynamic_feat",
[
(10, 15, 2, "01-01-2019", "01-13-2019", 1),
(10, 15, 2, "01-01-2019", "01-11-2019", 2),
],
)
def test_dynamic_integration(
train_length: int,
test_length: int,
prediction_length: int,
target_start: str,
rolling_start: str,
num_dynamic_feat: int,
):
"""
Trains an estimator on a rolled dataset with dynamic features.
Tests https://github.com/awslabs/gluon-ts/issues/1390
"""
train_ds = create_dynamic_dataset(
target_start, train_length, num_dynamic_feat
)
rolled_ds = generate_rolling_dataset(
dataset=create_dynamic_dataset(
target_start, test_length, num_dynamic_feat
),
strategy=StepStrategy(prediction_length=prediction_length),
start_time=pd.Timestamp(rolling_start),
)
estimator = DeepAREstimator(
freq="D",
prediction_length=prediction_length,
context_length=2 * prediction_length,
use_feat_dynamic_real=True,
trainer=Trainer(epochs=1),
)
predictor = estimator.train(training_data=train_ds)
forecast_it, ts_it = make_evaluation_predictions(
rolled_ds, predictor=predictor, num_samples=100
)
training_agg_metrics, _ = Evaluator(num_workers=0)(ts_it, forecast_it)
# it should have failed by this point if the dynamic features were wrong
assert training_agg_metrics
| 2.359375
| 2
|
cottonformation/res/lookoutequipment.py
|
MacHu-GWU/cottonformation-project
| 5
|
12783342
|
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
#--- Resource declaration ---
@attr.s
class InferenceScheduler(Resource):
"""
AWS Object Type = "AWS::LookoutEquipment::InferenceScheduler"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html
Property Document:
- ``rp_DataInputConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html#cfn-lookoutequipment-inferencescheduler-datainputconfiguration
- ``rp_DataOutputConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html#cfn-lookoutequipment-inferencescheduler-dataoutputconfiguration
- ``rp_DataUploadFrequency``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html#cfn-lookoutequipment-inferencescheduler-datauploadfrequency
- ``rp_ModelName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html#cfn-lookoutequipment-inferencescheduler-modelname
- ``rp_RoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html#cfn-lookoutequipment-inferencescheduler-rolearn
- ``p_DataDelayOffsetInMinutes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html#cfn-lookoutequipment-inferencescheduler-datadelayoffsetinminutes
- ``p_InferenceSchedulerName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html#cfn-lookoutequipment-inferencescheduler-inferenceschedulername
- ``p_ServerSideKmsKeyId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html#cfn-lookoutequipment-inferencescheduler-serversidekmskeyid
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html#cfn-lookoutequipment-inferencescheduler-tags
"""
AWS_OBJECT_TYPE = "AWS::LookoutEquipment::InferenceScheduler"
rp_DataInputConfiguration: dict = attr.ib(
default=None,
validator=attr.validators.instance_of(dict),
metadata={AttrMeta.PROPERTY_NAME: "DataInputConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html#cfn-lookoutequipment-inferencescheduler-datainputconfiguration"""
rp_DataOutputConfiguration: dict = attr.ib(
default=None,
validator=attr.validators.instance_of(dict),
metadata={AttrMeta.PROPERTY_NAME: "DataOutputConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html#cfn-lookoutequipment-inferencescheduler-dataoutputconfiguration"""
rp_DataUploadFrequency: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "DataUploadFrequency"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html#cfn-lookoutequipment-inferencescheduler-datauploadfrequency"""
rp_ModelName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ModelName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html#cfn-lookoutequipment-inferencescheduler-modelname"""
rp_RoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html#cfn-lookoutequipment-inferencescheduler-rolearn"""
p_DataDelayOffsetInMinutes: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "DataDelayOffsetInMinutes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html#cfn-lookoutequipment-inferencescheduler-datadelayoffsetinminutes"""
p_InferenceSchedulerName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "InferenceSchedulerName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html#cfn-lookoutequipment-inferencescheduler-inferenceschedulername"""
p_ServerSideKmsKeyId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ServerSideKmsKeyId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html#cfn-lookoutequipment-inferencescheduler-serversidekmskeyid"""
p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib(
default=None,
converter=Tag.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html#cfn-lookoutequipment-inferencescheduler-tags"""
@property
def rv_InferenceSchedulerArn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutequipment-inferencescheduler.html#aws-resource-lookoutequipment-inferencescheduler-return-values"""
return GetAtt(resource=self, attr_name="InferenceSchedulerArn")
| 2.078125
| 2
|
firmware_password_manager.py
|
jwrn3/firmware_password_manager
| 1
|
12783343
|
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
"""
This should not be blank.
"""
# Copyright (c) 2020 University of Utah Student Computing Labs. ################
# All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appears in all copies and
# that both that copyright notice and this permission notice appear
# in supporting documentation, and that the name of The University
# of Utah not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission. This software is supplied as is without expressed or
# implied warranties of any kind.
################################################################################
# firmware_password_manager.py #################################################
#
# A Python script to help Macintosh administrators manage the firmware passwords
# of their computers.
#
#
# 2.0.0 2015.11.05 Initial python rewrite. tjm
#
# 2.1.0 2016.03.07 "Now with spinning rims"
# bug fixes, obfuscation features,
# additional tools and examples. tjm
#
# 2.1.1 2016.03.16 slack identifier customization,
# logic clarifications. tjm
#
# 2.1.2 2016.03.16 cleaned up argparse. tjm
#
# 2.1.3 2016.04.04 remove obsolete flag logic. tjm
#
# 2.1.4 2017.10.23 using rm -P for secure delete,
# added additional alerting, additional pylint cleanup. tjm
#
# 2.5.0 2017.11.14 removed flags, uses configuration file,
# reintroduced setregproptool functionality,
# removed management_tools, ported to
# python3, added testing fuctionality. tjm
#
# 2.5.0 2020.01.23 2.5 actually finished and committed. tjm
#
#
#
# keyfile format:
#
# | comment:passwords <-- comments are ignored, except for new.
# | new:newpassword <-- the new password to be installed.
#
################################################################################
# notes: #######################################################################
#
# ./firmware_password_manager_cfg_v2.5b3.py -c private.INI -t
#
#
# sudo pyinstaller --onefile firmware_password_manager.py
#
#
#
################################################################################
# external tool documentation ##################################################
#
# firmwarepasswd v 1.0
# Copyright (C) 2014 Apple Inc. All Rights Reserved.
#
#
# Usage: firmwarepasswd [OPTION]
#
# ? Show usage
# -h Show usage
# -setpasswd Set a firmware password. You will be promted for passwords as needed.
# NOTE: if this is the first password set, and no mode is
# in place, the mode will automatically be set to "command"
# -setmode [mode] Set mode to:
# "command" - password required to change boot disk
# "full" - password required on all startups
# NOTE: cannot set a mode without having set a password
# -mode Prints out the current mode setting
# -check Prints out whether there is / isn't a firmware password is set
# -delete Delete current firmware password and mode setting
# -verify Verify current firmware password
# -unlockseed Generates a firmware password recovery key
# NOTE: Machine must be stable for this command to generate
# a valid seed. No pending changes that need a restart.
# NOTE: Seed is only valid until the next time a firmware password
# command occurs.
#
#
#
# setregproptool v 2.0 (9) Aug 24 2013
# Copyright (C) 2001-2010 Apple Inc.
# All Rights Reserved.
#
# Usage: setregproptool [-c] [-d [-o <old password>]] [[-m <mode> -p <password>] -o <old password>]
#
# -c Check whether password is enabled.
# Sets return status of 0 if set, 1 otherwise.
# -d Delete current password/mode.
# Requires current password on some machines.
# -p Set password.
# Requires current password on some machines.
# -m Set security mode.
# Requires current password on some machines.
# Mode can be either "full" or "command".
# Full mode requires entry of the password on
# every boot, command mode only requires entry
# of the password if the boot picker is invoked
# to select a different boot device.
#
# When enabling the Firmware Password for the first
# time, both the password and mode must be provided.
# Once the firmware password has been enabled, providing
# the mode or password alone will change that parameter
# only.
#
# -o Old password.
# Only required on certain machines to disable
# or change password or mode. Optional, if not
# provided the tool will prompt for the password.
#
################################################################################
#
# imports
from argparse import RawTextHelpFormatter
import argparse
import base64
import configparser
import hashlib
import inspect
import json
import logging
import os
import platform
import plistlib
import re
import socket
import subprocess
import sys
import pexpect
import requests
class FWPM_Object(object):
"""
This should not be blank.
"""
def __init__(self, args, logger, master_version):
"""
This should not be blank.
"""
self.args = args
self.logger = logger
self.master_version = master_version
self.srp_path = None
self.fwpwd_path = None
self.config_options = {}
self.local_identifier = None
self.passwords_raw = None
self.fwpw_managed_string = None
self.new_password = None
self.other_password_list = []
self.current_fwpw_state = False
self.current_fwpm_hash = None
self.clean_exit = False
self.read_config = False
self.read_keyfile = False
self.modify_fwpw = False
self.modify_nvram = False
self.matching_hashes = False
self.matching_passwords = False
self.configuration_path = None
self.system_version = platform.mac_ver()[0].split(".")
self.srp_check()
self.fwpwd_check()
if self.fwpwd_path:
self.current_fwpw_state = self.fwpwd_current_state()
elif self.srp_path:
self.current_fwpw_state = self.srp_current_state()
self.injest_config()
if self.config_options["slack"]["use_slack"]:
self.slack_optionator()
self.injest_keyfile()
self.hash_current_state()
self.hash_incoming()
#
# What if the string isn't a hash?!?
if (self.current_fwpm_hash == self.fwpw_managed_string) and self.config_options["flags"]["management_string_type"] == 'hash':
self.matching_hashes = True
self.master_control()
def master_control(self):
"""
This should not be blank.
"""
if self.logger:
self.logger.info("%s: activated" % inspect.stack()[0][3])
if self.current_fwpm_hash == self.fwpw_managed_string:
if self.logger:
self.logger.info("Hashes match. No change required.")
else:
if self.logger:
self.logger.info("Hashes DO NOT match. Change required.")
if self.fwpwd_path:
self.fwpwd_change()
self.secure_delete()
elif self.srp_path:
self.srp_change()
self.secure_delete()
else:
print("No FW tool found.")
quit()
#
# nvram maintenance
#
self.nvram_manager()
#
# some kind of post action reporting.
# handle reboot flag here?
#
self.exit_manager()
def hash_current_state(self):
"""
This should not be blank.
"""
if self.logger:
self.logger.info("%s: activated" % inspect.stack()[0][3])
existing_keyfile_hash = None
if self.logger:
self.logger.info("Checking existing hash.")
try:
existing_keyfile_hash_raw = subprocess.check_output(["/usr/sbin/nvram", "-p"]).decode('utf-8')
existing_keyfile_hash_raw = existing_keyfile_hash_raw.split('\n')
for item in existing_keyfile_hash_raw:
if "fwpw-hash" in item:
existing_keyfile_hash = item
else:
self.current_fwpm_hash = None
self.current_fwpm_hash = existing_keyfile_hash.split("\t")[1]
if self.args.testmode:
print("Existing hash: %s" % self.current_fwpm_hash)
except:
pass
def hash_incoming(self):
"""
This should not be blank.
"""
if self.logger:
self.logger.info("%s: activated" % inspect.stack()[0][3])
if self.logger:
self.logger.info("Checking incoming hash.")
if self.config_options["flags"]["management_string_type"] == "custom":
#
# ?!?!?!?!?!?!?
#
self.fwpw_managed_string = self.config_options["flags"]["management_string_type"]
elif self.config_options["flags"]["management_string_type"] == "hash":
hashed_key = hashlib.new('sha256')
# hashed_key.update(self.passwords_raw.encode('utf-8'))
hashed_key.update(self.new_password.encode('utf-8'))
for entry in sorted(self.other_password_list):
hashed_key.update(entry.encode('utf-8'))
self.fwpw_managed_string = hashed_key.hexdigest()
# prepend '2:' to denote hash created with v2 of script, will force a password change from v1
self.fwpw_managed_string = '2:' + self.fwpw_managed_string
else:
self.fwpw_managed_string = None
if self.args.testmode:
print("Incoming hash: %s" % self.fwpw_managed_string)
def secure_delete(self):
"""
attempts to securely delete the keyfile with medium overwrite and zeroing settings
"""
if self.logger:
self.logger.info("%s: activated" % inspect.stack()[0][3])
if self.logger:
self.logger.info("Deleting keyfile")
use_srm = bool(os.path.exists("/usr/bin/srm"))
if self.args.testmode:
if self.logger:
self.logger.info("Test mode, keyfile not deleted.")
return
if use_srm:
try:
subprocess.call(["/usr/bin/srm", "-mz", self.config_options["keyfile"]["path"]])
if self.logger:
self.logger.info("keyfile deleted successfuly.")
except Exception as exception_message:
if self.logger:
self.logger.critical("Issue with attempt to remove keyfile. %s" % exception_message)
else:
try:
deleted_keyfile = subprocess.call(["/bin/rm", "-Pf", self.config_options["keyfile"]["path"]])
print("return: %r" % deleted_keyfile)
if self.logger:
self.logger.info("keyfile deleted successfuly.")
except Exception as exception_message:
if self.logger:
self.logger.critical("Issue with attempt to remove keyfile. %s" % exception_message)
# is this really needed?
if os.path.exists(self.config_options["keyfile"]["path"]):
if self.logger:
self.logger.critical("Failure to remove keyfile.")
else:
if self.logger:
self.logger.info("Keyfile removed.")
return
def injest_config(self):
"""
attempts to consume and format configuration file
"""
# handle parsing errors in cfg?!?
# where to handle looking for cfg in specific locations?!?
if self.logger:
self.logger.info("%s: activated" % inspect.stack()[0][3])
try:
if os.path.exists(self.args.configfile):
# firmware_password_manager_cfg_v2.5b8.py:434: DeprecationWarning: The SafeConfigParser class has been renamed to ConfigParser in Python 3.2. This alias will be removed in future versions. Use ConfigParser directly instead.
config = configparser.ConfigParser(allow_no_value=True)
config.read(self.args.configfile)
self.config_options["flags"] = {}
self.config_options["keyfile"] = {}
self.config_options["logging"] = {}
self.config_options["slack"] = {}
self.config_options["os"] = {}
self.config_options["fwpm"] = {}
for section in ["flags", "keyfile", "logging", "slack"]:
for item in config.options(section):
if "use_" in item:
try:
self.config_options[section][item] = config.getboolean(section, item)
except:
self.config_options[section][item] = False
elif "path" in item:
self.config_options[section][item] = config.get(section, item)
else:
self.config_options[section][item] = config.get(section, item)
if self.args.testmode:
print("Configuration file variables:")
for key, value in self.config_options.items():
print(key)
for sub_key, sub_value in value.items():
print("\t%s %r" % (sub_key, sub_value))
else:
if self.logger:
self.logger.critical("Issue locating configuration file, exiting.")
sys.exit()
except Exception as exception_message:
if self.logger:
self.logger.critical("Issue reading configuration file, exiting. %s" % exception_message)
sys.exit()
self.read_config = True
def sanity_check(self):
"""
This should not be blank.
"""
if self.logger:
self.logger.info("%s: activated" % inspect.stack()[0][3])
def srp_check(self):
"""
full setregproptool support later, if ever.
"""
if self.logger:
self.logger.info("%s: activated" % inspect.stack()[0][3])
if os.path.exists('/usr/local/bin/setregproptool'):
self.srp_path = '/usr/local/bin/setregproptool'
elif os.path.exists(os.path.dirname(os.path.abspath(__file__)) + '/setregproptool'):
self.srp_path = os.path.dirname(os.path.abspath(__file__)) + '/setregproptool'
else:
print("SRP #3a")
if self.logger:
self.logger.info("SRP path: %s" % self.srp_path)
def srp_current_state(self):
"""
full setregproptool support later, if ever.
"""
if self.logger:
self.logger.info("%s: activated" % inspect.stack()[0][3])
try:
existing_fw_pw = subprocess.call([self.srp_path, "-c"])
if self.logger:
self.logger.info("srp says %r" % existing_fw_pw)
if existing_fw_pw:
return False
# it's weird, I know. Blame Apple.
else:
return True
except:
if self.logger:
self.logger.info("ERROR srp says %r" % existing_fw_pw)
return False
#
# # E:451,15: Undefined variable 'CalledProcessError' (undefined-variable)
# except CalledProcessError:
# if self.logger:
# self.logger.info("ERROR srp says %r" % existing_fw_pw)
# return False
def srp_change(self):
"""
full setregproptool support later, if ever.
"""
if self.logger:
self.logger.info("%s: activated" % inspect.stack()[0][3])
print("Using srp tool!")
print("%r" % self.current_fwpw_state)
def fwpwd_check(self):
"""
This should not be blank.
"""
if self.logger:
self.logger.info("%s: activated" % inspect.stack()[0][3])
if os.path.exists('/usr/sbin/firmwarepasswd'):
self.fwpwd_path = '/usr/sbin/firmwarepasswd'
else:
print("FWPWD #2b")
if self.logger:
self.logger.info("FWPWD path: %s" % self.fwpwd_path)
def fwpwd_current_state(self):
"""
This should not be blank.
"""
if self.logger:
self.logger.info("%s: activated" % inspect.stack()[0][3])
existing_fw_pw = subprocess.check_output([self.fwpwd_path, "-check"])
# R:484, 8: The if statement can be replaced with 'return bool(test)' (simplifiable-if-statement)
# return bool('Yes' in existing_fw_pw)
if b'Yes' in existing_fw_pw:
return True
else:
return False
def fwpwd_change(self):
"""
This should not be blank.
"""
if self.logger:
self.logger.info("%s: activated" % inspect.stack()[0][3])
known_current_password = False
current_password = ''
# is this really needed?!?
new_fw_tool_cmd = [self.fwpwd_path, '-verify']
if self.current_fwpw_state:
if self.logger:
self.logger.info("Verifying current FW password")
for index in reversed(range(len(self.other_password_list))):
child = pexpect.spawn(' '.join(new_fw_tool_cmd))
child.expect('Enter password:')
child.sendline(self.other_password_list[index])
result = child.expect(['Correct', 'Incorrect'])
if result == 0:
#
# correct password, exit loop
current_password = self.other_password_list[index]
known_current_password = True
break
else:
#
# wrong password, keep going
continue
#
# We've discovered the currently set firmware password
if known_current_password:
#
# Deleting firmware password
if not self.config_options["flags"]["use_fwpw"]:
if self.logger:
self.logger.info("Deleting FW password")
new_fw_tool_cmd = [self.fwpwd_path, '-delete']
if self.logger:
self.logger.info(' '.join(new_fw_tool_cmd))
child = pexpect.spawn(' '.join(new_fw_tool_cmd))
child.expect('Enter password:')
child.sendline(current_password)
result = child.expect(['removed', 'incorrect'])
if result == 0:
#
# password accepted, log result and exit
if self.logger:
self.logger.info("Finished. Password should be removed. Restart required. [%i]" % (index + 1))
self.clean_exit = True
else:
if self.logger:
self.logger.critical("Asked to delete, current password not accepted. Exiting.")
# secure_delete_keyfile(logger, args, config_options)
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :no_entry:\n" + "Asked to delete, current password not accepted.", '', 'error')
# self.error_bot.send_message("_*" + self.local_identifier + "*_ :no_entry:\n" + "Asked to delete, current password not accepted.")
sys.exit(1)
#
# Current and new password are identical
#
#
# WAIT. How (is/would) this possible, clearly the hashes don't match!!! What if they aren't using hashes?
#
#
elif current_password == self.new_password:
self.matching_passwords = True
self.clean_exit = True
#
# Change current firmware password to new password
else:
if self.logger:
self.logger.info("Updating FW password")
new_fw_tool_cmd = [self.fwpwd_path, '-setpasswd']
if self.logger:
self.logger.info(' '.join(new_fw_tool_cmd))
child = pexpect.spawn(' '.join(new_fw_tool_cmd))
result = child.expect('Enter password:')
if result == 0:
pass
else:
if self.logger:
self.logger.error("bad response from firmwarepasswd. Exiting.")
self.secure_delete()
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :no_entry:\n" + "Bad response from firmwarepasswd.", '', 'error')
sys.exit(1)
child.sendline(current_password)
result = child.expect('Enter new password:')
if result == 0:
pass
else:
if self.logger:
self.logger.error("bad response from firmwarepasswd. Exiting.")
self.secure_delete()
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :no_entry:\n" + "Bad response from firmwarepasswd.", '', 'error')
sys.exit(1)
child.sendline(self.new_password)
result = child.expect('Re-enter new password:')
if result == 0:
pass
else:
if self.logger:
self.logger.error("bad response from firmwarepasswd. Exiting.")
self.secure_delete()
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :no_entry:\n" + "Bad response from firmwarepasswd.", '', 'error')
sys.exit(1)
child.sendline(self.new_password)
child.expect(pexpect.EOF)
child.close()
if self.logger:
self.logger.info("Updated FW Password.")
self.clean_exit = True
#
# Unable to match current password with contents of keyfile
else:
if self.logger:
self.logger.critical("Current FW password not in keyfile. Quitting.")
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :no_entry:\n" + "Current FW password not in keyfile.", '', 'error')
self.secure_delete()
sys.exit(1)
#
# No current firmware password, setting it
else:
new_fw_tool_cmd = [self.fwpwd_path, '-setpasswd']
if self.logger:
self.logger.info(' '.join(new_fw_tool_cmd))
child = pexpect.spawn(' '.join(new_fw_tool_cmd))
result = child.expect('Enter new password:')
print(child.before)
if result == 0:
pass
else:
if self.logger:
self.logger.error("bad response from firmwarepasswd. Exiting.")
self.secure_delete()
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :no_entry:\n" + "Bad response from firmwarepasswd.", '', 'error')
sys.exit(1)
child.sendline(self.new_password)
result = child.expect('Re-enter new password:')
if result == 0:
pass
else:
if self.logger:
self.logger.error("bad response from firmwarepasswd. Exiting.")
self.secure_delete()
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :no_entry:\n" + "Bad response from firmwarepasswd.", '', 'error')
sys.exit(1)
child.sendline(self.new_password)
child.expect(pexpect.EOF)
child.close()
if self.logger:
self.logger.info("Added FW Password.")
self.clean_exit = True
def slack_optionator(self):
"""
ip, mac, hostname
computername
serial
"""
if self.logger:
self.logger.info("%s: activated" % inspect.stack()[0][3])
if self.verify_network():
try:
full_ioreg = subprocess.check_output(['ioreg', '-l']).decode('utf-8')
serial_number_raw = re.findall('\"IOPlatformSerialNumber\" = \"(.*)\"', full_ioreg)
serial_number = serial_number_raw[0]
if self.args.testmode:
print("Serial number: %r" % serial_number)
if self.config_options["slack"]["slack_identifier"].lower() == 'ip' or self.config_options["slack"]["slack_identifier"].lower() == 'mac' or self.config_options["slack"]["slack_identifier"].lower() == 'hostname':
processed_device_list = []
# Get ordered list of network devices
base_network_list = subprocess.check_output(["/usr/sbin/networksetup", "-listnetworkserviceorder"]).decode('utf-8')
network_device_list = re.findall(r'\) (.*)\n\(.*Device: (.*)\)', base_network_list)
ether_up_list = subprocess.check_output(["/sbin/ifconfig", "-au", "ether"]).decode('utf-8')
for device in network_device_list:
device_name = device[0]
port_name = device[1]
try:
if self.args.testmode:
print(device_name, port_name)
if port_name in ether_up_list:
device_info_raw = subprocess.check_output(["/sbin/ifconfig", port_name]).decode('utf-8')
mac_address = re.findall('ether (.*) \n', device_info_raw)
if self.args.testmode:
print("%r" % mac_address)
ether_address = re.findall('inet (.*) netmask', device_info_raw)
if self.args.testmode:
print("%r" % ether_address)
if len(ether_address) and len(mac_address):
processed_device_list.append([device_name, port_name, ether_address[0], mac_address[0]])
except Exception as this_exception:
print(this_exception)
if processed_device_list:
if self.logger:
self.logger.info("1 or more active IP addresses. Choosing primary.")
if self.args.testmode:
print("Processed devices: ", processed_device_list)
if self.config_options["slack"]["slack_identifier"].lower() == 'ip':
self.local_identifier = processed_device_list[0][2] + " (" + processed_device_list[0][0] + ":" + processed_device_list[0][1] + ")"
elif self.config_options["slack"]["slack_identifier"].lower() == 'mac':
self.local_identifier = processed_device_list[0][3] + " (" + processed_device_list[0][0] + ":" + processed_device_list[0][1] + ")"
elif self.config_options["slack"]["slack_identifier"].lower() == 'hostname':
try:
self.local_identifier = socket.getfqdn()
except:
if self.logger:
self.logger.error("error discovering hostname info.")
self.local_identifier = serial_number
else:
if self.logger:
self.logger.error("error discovering IP info.")
self.local_identifier = serial_number
elif self.config_options["slack"]["slack_identifier"].lower() == 'computername':
try:
cname_identifier_raw = subprocess.check_output(['/usr/sbin/scutil', '--get', 'ComputerName'])
self.local_identifier = cname_identifier_raw.split('\n')[0]
if self.logger:
self.logger.info("Computername: %r" % self.local_identifier)
except:
if self.logger:
self.logger.info("error discovering computername.")
self.local_identifier = serial_number
elif self.config_options["slack"]["slack_identifier"].lower() == 'serial':
self.local_identifier = serial_number
if self.logger:
self.logger.info("Serial number: %r" % self.local_identifier)
else:
if self.logger:
self.logger.info("bad or no identifier flag, defaulting to serial number.")
self.local_identifier = serial_number
if self.args.testmode:
print("Local identifier: %r" % self.local_identifier)
except Exception as this_exception:
print(this_exception)
self.config_options["slack"]["use_slack"] = False
else:
self.config_options["slack"]["use_slack"] = False
if self.logger:
self.logger.info("No network detected.")
def slack_message(self, message, icon, type):
"""
This should not be blank.
"""
if self.logger:
self.logger.info("%s: activated" % inspect.stack()[0][3])
slack_info_channel = False
slack_error_channel = False
if self.config_options["slack"]["use_slack"] and self.config_options["slack"]["slack_info_url"]:
slack_info_channel = True
if self.config_options["slack"]["use_slack"] and self.config_options["slack"]["slack_error_url"]:
slack_error_channel = True
if slack_error_channel and type == 'error':
slack_url = self.config_options["slack"]["slack_error_url"]
elif slack_info_channel:
slack_url = self.config_options["slack"]["slack_info_url"]
else:
return
payload = {'text': message, 'username': 'FWPM ' + self.master_version, 'icon_emoji': ':key:'}
response = requests.post(slack_url, data=json.dumps(payload), headers={'Content-Type': 'application/json'})
self.logger.info('Response: ' + str(response.text))
self.logger.info('Response code: ' + str(response.status_code))
def reboot_exit(self):
"""
This should not be blank.
"""
if self.logger:
self.logger.info("%s: activated" % inspect.stack()[0][3])
def injest_keyfile(self):
"""
This should not be blank.
"""
if self.logger:
self.logger.info("%s: activated" % inspect.stack()[0][3])
path_to_keyfile_exists = os.path.exists(self.config_options["keyfile"]["path"])
if not path_to_keyfile_exists:
if self.logger:
self.logger.critical("%r does not exist. Exiting." % self.config_options["keyfile"]["path"])
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :no_entry:\n" + "Keyfile does not exist.", '', 'error')
sys.exit(2)
if self.logger:
self.logger.info("Reading password file")
if self.config_options["keyfile"]["use_obfuscation"]:
#
# unobfuscate plist
if self.logger:
self.logger.info("Reading plist")
passwords = []
if "plist" in self.config_options["keyfile"]["path"]:
try:
keyfile_plist = plistlib.readPlist(self.config_options["keyfile"]["path"])
content_raw = keyfile_plist["data"]
except:
if self.logger:
self.logger.critical("Error reading plist. Exiting.")
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :no_entry:\n" + "Error reading plist.", '', 'error')
sys.exit(1)
else:
try:
with open(self.config_options["keyfile"]["path"], 'r') as reader:
content_raw = reader.read()
except:
if self.logger:
self.logger.critical("Error reading plist. Exiting.")
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :no_entry:\n" + "Error reading plist.", '', 'error')
sys.exit(1)
content_raw = base64.b64decode(content_raw)
content_raw = content_raw.decode('utf-8').split(",")
content_raw = [x for x in content_raw if x]
output_string = ""
for item in content_raw:
label, pword = item.split(':')
pword = base64.b64decode(pword)
try:
commented = label.split('#')[1]
commented = base64.b64decode(commented)
is_commented = True
except:
is_commented = False
if is_commented:
output_string = "#" + commented.decode('utf-8') + ":" + pword.decode('utf-8')
passwords.append(output_string)
else:
uncommented = base64.b64decode(label)
output_string = uncommented.decode('utf-8') + ":" + pword.decode('utf-8')
passwords.append(output_string)
else:
#
# read keyfile
if self.logger:
self.logger.info("Reading plain text")
try:
with open(self.config_options["keyfile"]["path"], "r") as keyfile:
self.passwords_raw = keyfile.read()
passwords = self.passwords_raw.splitlines()
except:
if self.logger:
self.logger.critical("Error reading keyfile. Exiting.")
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :no_entry:\n" + "Error reading keyfile.", '', 'error')
sys.exit(1)
if self.logger:
self.logger.info("Closed password file")
# new_password = <PASSWORD>
# other_password_list = []
#
# parse data from keyfile and build list of passwords
for entry in passwords:
try:
key, value = entry.split(":", 1)
except Exception as this_exception:
if self.logger:
self.logger.critical("Malformed keyfile, key:value format required. %r. Quitting." % this_exception)
self.secure_delete()
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :no_entry:\n" + "Malformed keyfile.", '', 'error')
sys.exit(1)
if key.lower() == 'new':
if self.new_password is not None:
if self.logger:
self.logger.critical("Malformed keyfile, multiple new keys. Quitting.")
self.secure_delete()
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :no_entry:\n" + "Malformed keyfile.", '', 'error')
sys.exit(1)
else:
self.new_password = value
self.other_password_list.append(value)
else:
self.other_password_list.append(value)
if self.logger:
self.logger.info("Sanity checking password file contents")
if self.new_password is None and self.config_options["flags"]["use_fwpw"]:
if self.logger:
self.logger.critical("Malformed keyfile, no \'new\' key. Quitting.")
self.secure_delete()
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :no_entry:\n" + "Malformed keyfile.", '', 'error')
sys.exit(1)
self.read_keyfile = True
try:
self.other_password_list.remove(self.new_password)
except:
pass
def nvram_manager(self):
"""
This should not be blank.
"""
if self.logger:
self.logger.info("%s: activated" % inspect.stack()[0][3])
if self.clean_exit:
if not self.config_options["flags"]["use_fwpw"]:
try:
subprocess.call(["/usr/sbin/nvram", "-d", "fwpw-hash"])
if self.logger:
self.logger.info("nvram entry pruned.")
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :unlock:\n" + "FWPW and nvram entry removed.", '', 'info')
#
# Should we return here?
#
except Exception as exception_message:
if self.logger:
self.logger.warning("nvram reported error attempting to remove hash. Exiting. %s" % exception_message)
#
# Slack?
#
sys.exit(1)
if self.config_options["flags"]["management_string_type"] == "None":
try:
# ?
# existing_keyfile_hash = subprocess.check_output(["/usr/sbin/nvram", "fwpw-hash"])
try:
subprocess.call(["/usr/sbin/nvram", "-d", "fwpw-hash"])
if self.logger:
self.logger.info("nvram entry pruned.")
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :closed_lock_with_key:\n" + "FWPW updated.", '', 'info')
except Exception as exception_message:
if self.logger:
self.logger.warning("nvram reported error attempting to remove hash. Exiting. %s" % exception_message)
sys.exit(1)
except:
# assuming hash doesn't exist.
if self.logger:
self.logger.info("Assuming nvram entry doesn't exist.")
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :closed_lock_with_key:\n" + "FWPW updated.", '', 'info')
elif self.config_options["flags"]["management_string_type"] == "custom" or self.config_options["flags"]["management_string_type"] == "hash":
if self.matching_hashes:
if self.matching_passwords:
if self.logger:
self.logger.info("Hashes and Passwords match. No changes needed.")
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :white_check_mark::white_check_mark:\n" + "FWPM hashes and FW passwords match.", '', 'info')
else:
if self.logger:
self.logger.info("Hashes match, password modified.")
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :white_check_mark::heavy_exclamation_mark:\n" + "FWPM hashes and FW passwords match.", '', 'info')
else:
try:
subprocess.call(["/usr/sbin/nvram", "fwpw-hash=" + self.fwpw_managed_string])
if self.logger:
self.logger.info("nvram modified.")
except Exception as exception_message:
if self.logger:
self.logger.warning("nvram modification failed. nvram reported error. %s" % exception_message)
#
# slack error message?
#
sys.exit(1)
if self.matching_passwords:
if self.logger:
self.logger.info("Hash mismatch, Passwords match. Correcting hash.")
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :heavy_exclamation_mark: :white_check_mark:\n" + "Hash mismatch, Passwords match. Correcting hash.", '', 'info')
else:
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :closed_lock_with_key:\n" + "FWPW and hash updated.", '', 'info')
else:
if self.logger:
self.logger.critical("An error occured. Failed to modify firmware password.")
if self.config_options["slack"]["use_slack"]:
self.slack_message("_*" + self.local_identifier + "*_ :no_entry:\n" + "An error occured. Failed to modify firmware password.", '', 'error')
sys.exit(1)
def exit_manager(self):
"""
This should not be blank.
"""
if self.logger:
self.logger.info("%s: activated" % inspect.stack()[0][3])
#
# check the new booleans, etc to find out what we accomplished...
#
# self.clean_exit = False
#
# self.read_config = False
# self.read_keyfile = False
# self.modify_fwpw = False
# self.modify_nvram = False
#
if self.config_options["flags"]["use_reboot_on_exit"]:
if self.args.testmode:
if self.logger:
self.logger.info("Test mode, cancelling reboot.")
else:
if self.logger:
self.logger.warning("Normal completion. Rebooting.")
os.system('reboot')
else:
if self.logger:
self.logger.info("FWPM exiting normally.")
sys.exit(0)
def verify_network(self):
"""
Host: 8.8.8.8 (google-public-dns-a.google.com)
OpenPort: 53/tcp
Service: domain (DNS/TCP)
"""
try:
_ = requests.get("https://8.8.8.8", timeout=3)
return True
except requests.ConnectionError as exception_message:
print(exception_message)
return False
def main():
"""
This should not be blank.
"""
master_version = "2.5"
logo = """
/_ _/ /_ _/ University of Utah
_/ _/ Marriott Library
_/ _/ Mac Group
_/ _/ https://apple.lib.utah.edu/
_/_/ https://github.com/univ-of-utah-marriott-library-apple
"""
desc = "Manages the firmware password on Apple Macintosh computers."
#
# require root to run.
if os.geteuid():
print("Must be root to run script.")
sys.exit(2)
#
# parse option definitions
parser = argparse.ArgumentParser(description=logo+desc, formatter_class=RawTextHelpFormatter)
#
# required, mutually exclusive commands
prime_group = parser.add_argument_group('Required management settings', 'Choosing one of these options is required to run FWPM. They tell FWPM how you want to manage the firmware password.')
subprime = prime_group.add_mutually_exclusive_group(required=True)
subprime.add_argument('-c', '--configfile', help='Read configuration file')
parser.add_argument('-b', '--reboot', action="store_true", default=False, help='Reboots the computer after the script completes successfully.')
parser.add_argument('-t', '--testmode', action="store_true", default=False, help='Test mode. Verbose logging, will not delete keyfile.')
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + master_version)
args = parser.parse_args()
if args.testmode:
print(args)
#
# Open log file
try:
log_path = '/var/log/' + 'FWPW_Manager_' + master_version
logging.basicConfig(filename=log_path, level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
logger.info("Running Firmware Password Manager " + master_version)
except:
logger = None
FWPM_Object(args, logger, master_version)
if __name__ == '__main__':
main()
| 2.03125
| 2
|
glance/__init__.py
|
GlanceApps/Glance-Website
| 1
|
12783344
|
<filename>glance/__init__.py
""" glance """
| 0.859375
| 1
|
app/views_info.py
|
fossabot/stream_vod_indexer
| 0
|
12783345
|
from django.shortcuts import render
from django.http import JsonResponse
NotImplemented = JsonResponse({"error":"NotImplemented"})
# Create your views here.
def slugs(request):
return NotImplemented
def last_updated(request):
return NotImplemented
def streamer(request):
return NotImplemented
def endpoints(request):
return NotImplemented
| 2.03125
| 2
|
src/ensae_projects/hackathon/random_answers.py
|
sdpython/ensae_projects
| 1
|
12783346
|
# -*- coding: utf-8 -*-
"""
@file
@brief Generates random answers for challenges.
"""
import os
import numpy
import pandas
def random_answers_2020_images():
"""
Generates random answers the deep learning challenge of
hackathons :ref:`l-hackathon-2020`.
"""
name = os.path.join(os.path.split(__file__)[0], "labels_2020_random.csv")
df = pandas.read_csv(name)[['file_name']]
df['label'] = numpy.random.randint(low=0, high=2, size=(df.shape[0], ))
df['score'] = numpy.random.random((df.shape[0], ))
return df
def random_answers_2020_ml():
"""
Generates random answers the machine learning challenge of
hackathons :ref:`l-hackathon-2020`.
"""
df = pandas.DataFrame({"index": numpy.arange(473333)})
df['label'] = numpy.random.randint(low=0, high=2, size=(df.shape[0], ))
df['score'] = numpy.random.random((df.shape[0], ))
return df
| 3.0625
| 3
|
maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py
|
JunhoPark0314/DCNet
| 93
|
12783347
|
<reponame>JunhoPark0314/DCNet
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from maskrcnn_benchmark.modeling import registry
from torch import nn
import torch
@registry.ROI_BOX_PREDICTOR.register("FastRCNNPredictor")
class FastRCNNPredictor(nn.Module):
def __init__(self, config, in_channels):
super(FastRCNNPredictor, self).__init__()
assert in_channels is not None
num_inputs = in_channels
num_classes = config.MODEL.ROI_BOX_HEAD.NUM_CLASSES
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.cls_score = nn.Linear(num_inputs, num_classes)
num_bbox_reg_classes = 2 if config.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes
self.bbox_pred = nn.Linear(num_inputs, num_bbox_reg_classes * 4)
nn.init.normal_(self.cls_score.weight, mean=0, std=0.01)
nn.init.constant_(self.cls_score.bias, 0)
nn.init.normal_(self.bbox_pred.weight, mean=0, std=0.001)
nn.init.constant_(self.bbox_pred.bias, 0)
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
cls_logit = self.cls_score(x)
bbox_pred = self.bbox_pred(x)
return cls_logit, bbox_pred
@registry.ROI_BOX_PREDICTOR.register("FPNPredictor")
class FPNPredictor(nn.Module):
def __init__(self, cfg, in_channels):
super(FPNPredictor, self).__init__()
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
representation_size = in_channels
self.cls_score = nn.Linear(representation_size, num_classes)
self.num_bbox_reg_classes = 2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes
#self.bbox_pred = nn.Linear(representation_size, num_bbox_reg_classes * 4)
self.bbox_pred = nn.Linear(representation_size, 4)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
def forward(self, xc, xr=None):
if xr is not None:
scores = self.cls_score(xc)
bbox_deltas = self.bbox_pred(xr).repeat(1, self.num_bbox_reg_classes)
return scores, bbox_deltas
else:
xcs = []
for feature in xc:
xcs.append(self.cls_score(feature))
return xcs
@registry.ROI_BOX_PREDICTOR.register("FPNCosinePredictor")
class FPNCosinePredictor(nn.Module):
def __init__(self, cfg, in_channels):
super(FPNCosinePredictor, self).__init__()
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
representation_size = in_channels
self.cls_score = nn.Linear(representation_size, num_classes)
self.num_bbox_reg_classes = 2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes
#self.bbox_pred = nn.Linear(representation_size, num_bbox_reg_classes * 4)
self.bbox_pred = nn.Linear(representation_size, 4)
self.scale = cfg.MODEL.ROI_BOX_HEAD.COSINE_SCALE
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
def forward(self, xc, xr=None):
#if x.ndimension() == 4:
# assert list(x.shape[2:]) == [1, 1]
# x = x.view(x.size(0), -1)
if xr is not None:
xc_norm = torch.norm(xc, p=2, dim=1).unsqueeze(1).expand_as(xc)
xc_normalized = xc.div(xc_norm + 1e-5)
temp_norm = torch.norm(self.cls_score.weight.data,p=2, dim=1).unsqueeze(1).expand_as(self.cls_score.weight.data)
self.cls_score.weight.data = self.cls_score.weight.data.div(temp_norm + 1e-5)
cos_dist = self.cls_score(xc_normalized)
scores = self.scale * cos_dist
bbox_deltas = self.bbox_pred(xr).repeat(1, self.num_bbox_reg_classes)
return scores, bbox_deltas
else:
xcs = []
for feature in xc:
xc_norm = torch.norm(feature, p=2, dim=1).unsqueeze(1).expand_as(feature)
xc_normalized = xc.div(xc_norm + 1e-5)
temp_norm = torch.norm(self.cls_score.weight.data,p=2, dim=1).unsqueeze(1).expand_as(self.cls_score.weight.data)
self.cls_score.weight.data = self.cls_score.weight.data.div(temp_norm + 1e-5)
cos_dist = self.cls_score(xc_normalized)
scores = self.scale * cos_dist
xcs.append(scores)
return xcs
@registry.ROI_BOX_PREDICTOR.register("FPNPredictor2")
class FPNPredictor2(nn.Module):
def __init__(self, cfg, in_channels):
super(FPNPredictor2, self).__init__()
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
num_classes2 = cfg.MODEL.ROI_BOX_HEAD.NUM_ALL_CLASSES
representation_size = in_channels
self.cls_score = nn.Linear(representation_size, num_classes)
self.cls_score2 = nn.Linear(num_classes,num_classes2)
self.num_bbox_reg_classes = 2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes2
#self.bbox_pred = nn.Linear(representation_size, num_bbox_reg_classes * 4)
self.bbox_pred = nn.Linear(representation_size, 4)
#nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.cls_score2.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score2, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
def forward(self, xc, xr=None):
#if x.ndimension() == 4:
# assert list(x.shape[2:]) == [1, 1]
# x = x.view(x.size(0), -1)
if xr is not None:
scores = self.cls_score(xc)
scores = self.cls_score2(scores)
bbox_deltas = self.bbox_pred(xr).repeat(1, self.num_bbox_reg_classes)
return scores, bbox_deltas
else:
xcs = []
for feature in xc:
xcs.append(self.cls_score2(self.cls_score(feature)))
return xcs
def make_roi_box_predictor(cfg, in_channels):
func = registry.ROI_BOX_PREDICTOR[cfg.MODEL.ROI_BOX_HEAD.PREDICTOR]
return func(cfg, in_channels)
| 1.921875
| 2
|
check_deps.py
|
racitup/ddrescue_used
| 2
|
12783348
|
<gh_stars>1-10
"""
Checks for tool package dependencies.
##License:
Original work Copyright 2016 <NAME>
Everyone is permitted to copy, distribute and modify this software,
subject to this statement and the copyright notice above being included.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM.
"""
import shutil
import sys
import os, re
import helpers
from pkg_resources import parse_version
deps_mandatory = {
'blktrace': ('1.0.5-1', 'blktrace', 'blkparse'),
'testdisk': ('6.14-2', 'testdisk'),
# NOTE: there a lot of bugs in ddrescuelog before 1.19.1
'gddrescue': ('1.17-1', 'ddrescue', 'ddrescuelog'),
'mount': ('2.20.1-5.1ubuntu20.7', 'losetup', 'mount', 'umount'),
'util-linux': ('2.20.1-5.1ubuntu20.7', 'blkid', 'blockdev'),
'parted': ('2.3-19ubuntu1', 'partprobe'),
'coreutils': ('8.21-1ubuntu5.4', 'truncate'),
'hdparm': ('9.43-1ubuntu3', 'hdparm'),
'e2fsprogs': ('1.43~WIP.2016.03.15-2', 'filefrag', 'e2image', 'e2fsck'),
'diffutils': ('1:3.3-1', 'diff') }
deps_optional = {
'dosfstools': ('3.0.26-1', 'fsck.fat'),
'hfsprogs': ('332.25-11', 'fsck.hfsplus'),
'ntfs-3g': ('1:2013.1.13AR.1-2ubuntu2', 'ntfsfix', 'ntfsclone'),
'btrfs-tools': ('4.1', 'btrfs', 'btrfstune', 'btrfs-image'),
'xfsprogs': ('3.2.1ubuntu1', 'xfs_repair', 'xfs_db'),
'ddrescueview': ('0.4~alpha2-1~ubuntu14.04.1', 'ddrescueview') }
def checkroot():
"Checks for running as root."
if not os.geteuid() == 0:
sys.exit('Must be run as root (sudo)')
def processdeps(deps):
"Checks the versions of installed dependencies. Returns list of missing progs."
missing = []
for package in deps:
version = deps[package][0]
vernum = parse_version(version)
cmd = ['dpkg', '-s', package]
proc, text = helpers.get_procoutput(cmd, log=False)
if proc.returncode == 0:
pkgver = re.search(r"^Version: (.+)$", text, re.MULTILINE).group(1)
pkgnum = parse_version(pkgver)
if vernum > pkgnum:
print('ERROR: Package dependency: {} >= {} required, {} installed.'
.format(package, version, pkgver))
missing += deps[package][1:]
else:
print('ERROR: Package dependency: {} >= {} required but not installed.'
.format(package, version))
missing += deps[package][1:]
return missing
def check():
"""Checks application dependencies.
Returns list of optional progs if not installed
Requires python 3.3 for which()
"""
if not shutil.which('dpkg'):
sys.exit('Cannot check dependencies; dpkg not installed')
if processdeps(deps_mandatory):
sys.exit('Mandatory dependency errors.')
not_installed = processdeps(deps_optional)
if not_installed:
print('\nWARNING: Old or missing programs may cause unexpected results:\n{}\n'
.format(not_installed))
return not_installed
| 1.640625
| 2
|
img2dataset/logger.py
|
rom1504/img2dataset
| 482
|
12783349
|
"""logging utils for the downloader"""
import wandb
import time
from collections import Counter
import fsspec
import json
from multiprocessing import Process, Queue
import queue
class CappedCounter:
"""Maintain a counter with a capping to avoid memory issues"""
def __init__(self, max_size=10 ** 5):
self.max_size = max_size
self.counter = Counter()
def increment(self, key):
if len(self.counter) >= self.max_size:
self._keep_most_frequent()
self.counter[key] += 1
def _keep_most_frequent(self):
self.counter = Counter(dict(self.counter.most_common(int(self.max_size / 2))))
def most_common(self, k):
return self.counter.most_common(k)
def update(self, counter):
self.counter.update(counter.counter)
if len(self.counter) >= self.max_size:
self._keep_most_frequent()
def dump(self):
return self.counter
@classmethod
def load(cls, d, max_size=10 ** 5):
c = CappedCounter(max_size)
c.counter = Counter(d)
return c
class Logger:
"""logger which logs when number of calls reaches a value or a time interval has passed"""
def __init__(self, processes_count=1, min_interval=0):
"""Log only every processes_count and if min_interval (seconds) have elapsed since last log"""
# wait for all processes to return
self.processes_count = processes_count
self.processes_returned = 0
# min time (in seconds) before logging a new table (avoids too many logs)
self.min_interval = min_interval
self.last = time.perf_counter()
# keep track of whether we logged the last call
self.last_call_logged = False
self.last_args = None
self.last_kwargs = None
def __call__(self, *args, **kwargs):
self.processes_returned += 1
if self.processes_returned % self.processes_count == 0 and time.perf_counter() - self.last > self.min_interval:
self.do_log(*args, **kwargs)
self.last = time.perf_counter()
self.last_call_logged = True
else:
self.last_call_logged = False
self.last_args = args
self.last_kwargs = kwargs
def do_log(self, *args, **kwargs):
raise NotImplementedError()
def sync(self):
"""Ensure last call is logged"""
if not self.last_call_logged:
self.do_log(*self.last_args, **self.last_kwargs)
# reset for next file
self.processes_returned = 0
class SpeedLogger(Logger):
"""Log performance metrics"""
def __init__(self, prefix, enable_wandb, **logger_args):
super().__init__(**logger_args)
self.prefix = prefix
self.start = time.perf_counter()
self.count = 0
self.success = 0
self.failed_to_download = 0
self.failed_to_resize = 0
self.enable_wandb = enable_wandb
def __call__(
self, duration, count, success, failed_to_download, failed_to_resize
): # pylint: disable=arguments-differ
self.count += count
self.success += success
self.failed_to_download += failed_to_download
self.failed_to_resize += failed_to_resize
super().__call__(duration, self.count, self.success, self.failed_to_download, self.failed_to_resize)
def do_log(
self, duration, count, success, failed_to_download, failed_to_resize
): # pylint: disable=arguments-differ
img_per_sec = count / duration
success_ratio = 1.0 * success / count
failed_to_download_ratio = 1.0 * failed_to_download / count
failed_to_resize_ratio = 1.0 * failed_to_resize / count
print(
" - ".join(
[
f"{self.prefix:<7}",
f"success: {success_ratio:.3f}",
f"failed to download: {failed_to_download_ratio:.3f}",
f"failed to resize: {failed_to_resize_ratio:.3f}",
f"images per sec: {img_per_sec:.0f}",
f"count: {count}",
]
)
)
if self.enable_wandb:
wandb.log(
{
f"{self.prefix}/img_per_sec": img_per_sec,
f"{self.prefix}/success": success_ratio,
f"{self.prefix}/failed_to_download": failed_to_download_ratio,
f"{self.prefix}/failed_to_resize": failed_to_resize_ratio,
f"{self.prefix}/count": count,
}
)
class StatusTableLogger(Logger):
"""Log status table to W&B, up to `max_status` most frequent items"""
def __init__(self, max_status=100, min_interval=60, enable_wandb=False, **logger_args):
super().__init__(min_interval=min_interval, **logger_args)
# avoids too many errors unique to a specific website (SSL certificates, etc)
self.max_status = max_status
self.enable_wandb = enable_wandb
def do_log(self, status_dict, count): # pylint: disable=arguments-differ
if self.enable_wandb:
status_table = wandb.Table(
columns=["status", "frequency", "count"],
data=[[k, 1.0 * v / count, v] for k, v in status_dict.most_common(self.max_status)],
)
wandb.run.log({"status": status_table})
def write_stats(
output_folder,
shard_id,
count,
successes,
failed_to_download,
failed_to_resize,
start_time,
end_time,
status_dict,
oom_shard_count,
):
"""Write stats to disk"""
stats = {
"count": count,
"successes": successes,
"failed_to_download": failed_to_download,
"failed_to_resize": failed_to_resize,
"duration": end_time - start_time,
"status_dict": status_dict.dump(),
}
fs, output_path = fsspec.core.url_to_fs(output_folder)
shard_name = "{shard_id:0{oom_shard_count}d}".format(shard_id=shard_id, oom_shard_count=oom_shard_count)
json_file = f"{output_path}/{shard_name}_stats.json"
with fs.open(json_file, "w") as f:
json.dump(stats, f, indent=4)
# https://docs.python.org/3/library/multiprocessing.html
# logger process that reads stats files regularly, aggregates and send to wandb / print to terminal
class LoggerProcess(Process):
"""Logger process that reads stats files regularly, aggregates and send to wandb / print to terminal"""
def __init__(self, output_folder, enable_wandb, wandb_project, config_parameters, processes_count, log_interval=60):
super().__init__()
self.log_interval = log_interval
self.enable_wandb = enable_wandb
self.fs, self.output_path = fsspec.core.url_to_fs(output_folder)
self.stats_files = set()
self.wandb_project = wandb_project
self.config_parameters = config_parameters
self.processes_count = processes_count
self.q = Queue()
def run(self):
"""Run logger process"""
if self.enable_wandb:
self.current_run = wandb.init(project=self.wandb_project, config=self.config_parameters, anonymous="allow")
else:
self.current_run = None
self.total_speed_logger = SpeedLogger(
"total", processes_count=self.processes_count, enable_wandb=self.enable_wandb
)
self.status_table_logger = StatusTableLogger(
processes_count=self.processes_count, enable_wandb=self.enable_wandb
)
start_time = time.perf_counter()
last_check = 0
total_status_dict = CappedCounter()
while True:
time.sleep(0.1)
try:
self.q.get(False)
last_one = True
except queue.Empty as _:
last_one = False
if not last_one and time.perf_counter() - last_check < self.log_interval:
continue
try:
# read stats files
stats_files = self.fs.glob(self.output_path + "/*.json")
# get new stats files
new_stats_files = set(stats_files) - self.stats_files
if len(new_stats_files) == 0:
if last_one:
self.finish()
return
# read new stats files
for stats_file in new_stats_files:
with self.fs.open(stats_file, "r") as f:
stats = json.load(f)
SpeedLogger("worker", enable_wandb=self.enable_wandb)(
duration=stats["duration"],
count=stats["count"],
success=stats["successes"],
failed_to_download=stats["failed_to_download"],
failed_to_resize=stats["failed_to_resize"],
)
self.total_speed_logger(
duration=time.perf_counter() - start_time,
count=stats["count"],
success=stats["successes"],
failed_to_download=stats["failed_to_download"],
failed_to_resize=stats["failed_to_resize"],
)
status_dict = CappedCounter.load(stats["status_dict"])
total_status_dict.update(status_dict)
self.status_table_logger(total_status_dict, self.total_speed_logger.count)
self.stats_files.add(stats_file)
last_check = time.perf_counter()
if last_one:
self.finish()
return
except Exception as e: # pylint: disable=broad-except
print(e)
self.finish()
return
def finish(self):
"""Finish logger process"""
self.total_speed_logger.sync()
self.status_table_logger.sync()
if self.current_run is not None:
self.current_run.finish()
def join(self, timeout=None):
"""Stop logger process"""
self.q.put("stop")
super().join()
self.q.close()
| 2.84375
| 3
|
materials/migrations/0055_auto_20190327_2305.py
|
mgovoni-devel/MatD3
| 7
|
12783350
|
# Generated by Django 2.1.7 on 2019-03-28 03:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('materials', '0054_auto_20190318_1704'),
]
operations = [
migrations.AlterField(
model_name='dataset',
name='dimensionality',
field=models.PositiveSmallIntegerField(choices=[(3, 3), (2, 2)]),
),
]
| 1.632813
| 2
|