hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f5553f600d9e51ffdced6978931c7ede4d5b363d
| 7,458
|
py
|
Python
|
src/extract_features.py
|
AymericBebert/MusicLearning
|
8fbc931330029baa8ae9cfcfa20c79e41b5eca8f
|
[
"MIT"
] | null | null | null |
src/extract_features.py
|
AymericBebert/MusicLearning
|
8fbc931330029baa8ae9cfcfa20c79e41b5eca8f
|
[
"MIT"
] | null | null | null |
src/extract_features.py
|
AymericBebert/MusicLearning
|
8fbc931330029baa8ae9cfcfa20c79e41b5eca8f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*-coding:utf-8-*-
"""
This module is used to extract features from the data
"""
import numpy as np
from scipy.fftpack import fft
from scipy.fftpack.realtransforms import dct
import python_speech_features
eps = 0.00000001
def file_length(soundParams):
"""Returns the file length, in seconds"""
return soundParams[3] / soundParams[2]
def zcr(frame):
"""Computes zero crossing rate of frame"""
count = len(frame)
countZ = np.sum(np.abs(np.diff(np.sign(frame)))) / 2
return countZ / (count - 1)
def energy(frame):
"""Computes signal energy of frame"""
return np.sum(frame ** 2) / len(frame)
def energy_entropy(frame, numOfShortBlocks=10):
"""Computes entropy of energy"""
tfe = np.sum(frame ** 2) # total frame energy
L = len(frame)
subWinLength = int(np.floor(L / numOfShortBlocks))
if L != subWinLength * numOfShortBlocks:
frame = frame[0:subWinLength * numOfShortBlocks]
# subWindows is of size [numOfShortBlocks x L]
subWindows = frame.reshape(subWinLength, numOfShortBlocks, order='F').copy()
# Compute normalized sub-frame energies:
s = np.sum(subWindows ** 2, axis=0) / (tfe + eps)
# Compute entropy of the normalized sub-frame energies:
entropy = -1 * np.sum(s * np.log2(s + eps))
return entropy
def spectral_centroid_and_spread(X, fs):
"""Computes spectral centroid of frame (given abs(FFT))"""
ind = (np.arange(1, len(X) + 1)) * (fs/(2.0 * len(X)))
Xt = X.copy()
Xt = Xt / Xt.max()
NUM = np.sum(ind * Xt)
DEN = np.sum(Xt) + eps
C = (NUM / DEN) # Centroid
S = np.sqrt(np.sum(((ind - C) ** 2) * Xt) / DEN) # Spread
# Normalize:
C = C / (fs / 2.0)
S = S / (fs / 2.0)
return (C, S)
def avg_mfcc(sound_obj, avg=True):
"""Extract the MFCC from the sound object"""
soundD = sound_obj["sound"] # raw data
sr = sound_obj["params"][2] # samplerate
# nf = sound_obj["params"][3] # nframes
all_mfcc = python_speech_features.mfcc(soundD, samplerate=sr, winlen=0.025, winstep=1)
if avg:
return np.mean(all_mfcc, axis=0)
return all_mfcc
def mfcc_init_filter_banks(fs, nfft):
"""Computes the triangular filterbank for MFCC computation"""
# filter bank params:
lowfreq = 133.33
linsc = 200/3.
logsc = 1.0711703
numLinFiltTotal = 13
numLogFilt = 27
# Total number of filters
nFiltTotal = numLinFiltTotal + numLogFilt
# Compute frequency points of the triangle:
freqs = np.zeros(nFiltTotal+2)
freqs[:numLinFiltTotal] = lowfreq + np.arange(numLinFiltTotal) * linsc
freqs[numLinFiltTotal:] = freqs[numLinFiltTotal-1] * logsc ** np.arange(1, numLogFilt + 3)
heights = 2./(freqs[2:] - freqs[0:-2])
# Compute filterbank coeff (in fft domain, in bins)
fbank = np.zeros((nFiltTotal, nfft))
nfreqs = np.arange(nfft) / (1. * nfft) * fs
for i in range(nFiltTotal):
lowTrFreq = freqs[i]
cenTrFreq = freqs[i+1]
highTrFreq = freqs[i+2]
lid = np.arange(np.floor(lowTrFreq * nfft / fs) + 1, np.floor(cenTrFreq * nfft / fs) + 1, dtype=np.int)
lslope = heights[i] / (cenTrFreq - lowTrFreq)
rid = np.arange(np.floor(cenTrFreq * nfft / fs) + 1, np.floor(highTrFreq * nfft / fs) + 1, dtype=np.int)
rslope = heights[i] / (highTrFreq - cenTrFreq)
fbank[i][lid] = lslope * (nfreqs[lid] - lowTrFreq)
fbank[i][rid] = rslope * (highTrFreq - nfreqs[rid])
return fbank, freqs
def mfcc(X, fbank, nceps=13):
"""Computes the MFCCs of a frame, given the fft mag"""
mspec = np.log10(np.dot(X, fbank.T)+eps)
ceps = dct(mspec, type=2, norm='ortho', axis=-1)[:nceps]
return ceps
def extract_all_features0(sound_obj):
"""Extract the features from the sound object"""
# fl = file_length(sound_obj["params"])
test_mfcc_avg = avg_mfcc(sound_obj)
# return np.concatenate(([fl], test_mfcc_avg))
return test_mfcc_avg
def features_labels0():
"""Give a name to each feature"""
return ["mfcc{}".format(i) for i in range(13)]
def extract_all_features(sound_obj, wins=None, steps=None):
"""Extract the features from the sound object"""
sr = sound_obj["params"][2] # samplerate
nbs = sound_obj["params"][3] # number of samples
if wins is None:
wins = int(0.050 * sr)
if steps is None:
steps = int(nbs/15 - wins)
# Signal normalization
signal = sound_obj["sound"]
signal = signal / (2.0 ** 15)
DC = signal.mean()
MAX = (np.abs(signal)).max()
signal = (signal - DC) / (MAX + 0.0000000001)
N = len(signal) # total number of samples
curPos = steps // 2 # skip the very beginning
nFFT = wins // 2
# compute the triangular filter banks used in the mfcc calculation
#[fbank, _] = mfcc_init_filter_banks(sr, nFFT)
totalNumOfFeatures = 5 + 13
stFeatures = []
while curPos + wins - 1 < N: # for each short-term window until the end of signal
x = signal[curPos:curPos+wins] # get current window
curPos = curPos + steps # update window position
X = abs(fft(x)) # get fft magnitude
X = X[0:nFFT] # normalize fft
X = X / len(X)
curFV = np.zeros(totalNumOfFeatures)
curFV[0] = zcr(x) # zero crossing rate
curFV[1] = energy(x) # short-term energy
curFV[2] = energy_entropy(x) # short-term entropy of energy
[curFV[3], curFV[4]] = spectral_centroid_and_spread(X, sr) # spectral centroid and spread
# curFV[5] = stSpectralEntropy(X) # spectral entropy
# curFV[6] = stSpectralFlux(X, Xprev) # spectral flux
# curFV[7] = stSpectralRollOff(X, 0.90, sr) # spectral rolloff
# curFV[numOfTimeSpectralFeatures:numOfTimeSpectralFeatures+nceps, 0] = stMFCC(X, fbank, nceps).copy() # MFCCs
#
# chromaNames, chromaF = stChromaFeatures(X, sr, nChroma, nFreqsPerChroma)
# curFV[numOfTimeSpectralFeatures + nceps: numOfTimeSpectralFeatures + nceps + numOfChromaFeatures - 1] = chromaF
# curFV[numOfTimeSpectralFeatures + nceps + numOfChromaFeatures - 1] = chromaF.std()
#curFV[5:18] = mfcc(X, fbank, 13)
#curFV[0:13] = mfcc(X, fbank, 13)
curFV[5:18] = python_speech_features.mfcc(x, samplerate=sr, winlen=wins/sr, winstep=steps/sr)
# TEMP
#curFV = python_speech_features.mfcc(signal, samplerate=sr, winlen=wins, winstep=steps).T
stFeatures.append(curFV)
# stFeatures = np.array(stFeatures)
stFeatures = np.concatenate(stFeatures, 0).flatten()
#stFeatures = np.mean(stFeatures, axis=0)
# stFeatures = python_speech_features.mfcc(signal, samplerate=sr, winlen=wins/sr, winstep=steps/sr)
# stFeatures = np.mean(stFeatures, axis=0)
return stFeatures
# sound_obj2 = sound_obj.copy()
# sound_obj2["sound"] = signal
#
# # fl = file_length(sound_obj["params"])
# test_mfcc_avg = avg_mfcc(sound_obj2)
# # return np.concatenate(([fl], test_mfcc_avg))
# return test_mfcc_avg
def features_labels():
"""Give a name to each feature"""
return ["zrc", "energy", "en_ent", "centr", "spread"] + ["mfcc{}".format(i) for i in range(13)]
| 34.850467
| 121
| 0.616519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,834
| 0.379995
|
f558d1166458b00b259c7deac962e45e929e8c73
| 255
|
py
|
Python
|
src/gme/estimate/__init__.py
|
USITC-Gravity-Group/GME
|
640e1cd6a571e6802a62b5fdcb00544f3b8c0b32
|
[
"CC0-1.0"
] | 10
|
2018-10-17T18:50:08.000Z
|
2021-11-05T22:27:45.000Z
|
src/gme/estimate/__init__.py
|
USITC-Gravity-Group/GME
|
640e1cd6a571e6802a62b5fdcb00544f3b8c0b32
|
[
"CC0-1.0"
] | 7
|
2020-06-03T20:04:10.000Z
|
2021-03-31T13:59:01.000Z
|
src/gme/estimate/__init__.py
|
USITC-Gravity-Group/GME
|
640e1cd6a571e6802a62b5fdcb00544f3b8c0b32
|
[
"CC0-1.0"
] | 6
|
2020-05-12T12:43:55.000Z
|
2022-02-25T08:47:17.000Z
|
from .combine_sector_results import *
from .DiagnosticsLog import *
from .EstimationModel import *
from .format_regression_table import *
from .save_and_load import *
from .SlimResults import *
from .Specification import *
from .visualize_results import *
| 31.875
| 38
| 0.815686
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f5592d87345b5a481da2afaed4ea4665c57dc09d
| 2,435
|
py
|
Python
|
tools/blender/io_export_curve.py
|
waskosky/patches
|
f80a33eb6fd029b905aca55894ec7a7526b89042
|
[
"MIT"
] | 187
|
2015-09-21T15:08:57.000Z
|
2017-07-31T08:01:22.000Z
|
tools/blender/io_export_curve.py
|
waskosky/patches
|
f80a33eb6fd029b905aca55894ec7a7526b89042
|
[
"MIT"
] | 1,533
|
2015-09-15T23:49:33.000Z
|
2017-08-01T08:52:00.000Z
|
tools/blender/io_export_curve.py
|
waskosky/patches
|
f80a33eb6fd029b905aca55894ec7a7526b89042
|
[
"MIT"
] | 52
|
2015-10-11T10:42:50.000Z
|
2017-07-16T22:31:42.000Z
|
# Part of the Engi-WebGL suite.
from bpy.props import *
from bpy_extras.io_utils import ExportHelper
from mathutils import *
from functools import reduce
import os, sys, os.path, bpy, bmesh, math, struct, base64, itertools
bl_info = {
'name': 'Curve Export (.json)',
'author': 'Lasse Nielsen',
'version': (0, 2),
'blender': (2, 72, 0),
'location': 'File > Export > Curve (.json)',
'description': 'Curve Export (.json)',
'category': 'Import-Export'
}
# Compress number representation to save as much space as possible.
def cnr(n):
s = '%.4f' % n
while s[-1] == '0':
s = s[:-1]
if s[-1] == '.':
s = s[:-1]
return s
def format_stream(ident, id, s):
return '%s%s: [%s]' % (ident, id, ','.join(map(cnr, s)))
class EngiCurveExporter(bpy.types.Operator, ExportHelper):
bl_idname = 'curve.json'
bl_label = 'Export Curve (.json)'
bl_options = {'PRESET'}
filename_ext = ".json"
filter_glob = StringProperty(default="*.json", options={'HIDDEN'})
#filepath = StringProperty()
filename = StringProperty()
directory = StringProperty()
# Black Magic...
check_extension = True
def execute(self, context):
filename = os.path.splitext(self.filename)[0]
filename = filename + '.json'
# Check for a valid selection. We expect a single object of type 'CURVE'.
if bpy.context.active_object.type != 'CURVE':
print('The current selection is invalid. Please select a single curve to export.')
return {'FINISHED'}
spline = bpy.context.active_object.data.splines[0]
points = spline.points
json = '{\n'
json += '\t"count": ' + str(len(points)) + ',\n'
x_stream = []
y_stream = []
z_stream = []
for point in points:
x_stream.append(point.co[0])
y_stream.append(point.co[1])
z_stream.append(point.co[2])
json += format_stream('\t', '"x"', x_stream) + ',\n'
json += format_stream('\t', '"y"', y_stream) + ',\n'
json += format_stream('\t', '"z"', z_stream) + '\n'
json += '}'
with open(self.directory + filename, 'w') as out:
out.write(json)
return {'FINISHED'}
def menu_func(self, context):
self.layout.operator(EngiCurveExporter.bl_idname, text="Curve (.json)")
def register():
bpy.utils.register_class(EngiCurveExporter)
bpy.types.INFO_MT_file_export.append(menu_func)
def unregister():
bpy.utils.unregister_class(EngiCurveExporter)
bpy.types.INFO_MT_file_export.remove(menu_func)
if __name__ == '__main__':
register()
| 24.59596
| 85
| 0.657906
| 1,324
| 0.543737
| 0
| 0
| 0
| 0
| 0
| 0
| 681
| 0.279671
|
f5599fb599f6ac244f63777232a27937cf321454
| 2,678
|
py
|
Python
|
organize/filters/mimetype.py
|
tank0226/organize
|
d5595a52f06ea6c805fe421dcc2429a3ccd03b09
|
[
"MIT"
] | 1,231
|
2018-01-13T17:06:24.000Z
|
2022-03-31T22:14:36.000Z
|
organize/filters/mimetype.py
|
tank0226/organize
|
d5595a52f06ea6c805fe421dcc2429a3ccd03b09
|
[
"MIT"
] | 170
|
2018-03-13T19:15:17.000Z
|
2022-03-31T10:14:15.000Z
|
organize/filters/mimetype.py
|
tank0226/organize
|
d5595a52f06ea6c805fe421dcc2429a3ccd03b09
|
[
"MIT"
] | 86
|
2018-03-14T02:12:49.000Z
|
2022-03-27T00:16:07.000Z
|
import mimetypes
from pathlib import Path
from organize.utils import DotDict, flatten
from .filter import Filter
class MimeType(Filter):
"""
Filter by MIME type associated with the file extension.
Supports a single string or list of MIME type strings as argument.
The types don't need to be fully specified, for example "audio" matches everything
from "audio/midi" to "audio/quicktime".
You can see a list of known MIME types on your system by running this oneliner:
.. code-block:: yaml
python3 -c "import mimetypes as m; print('\\n'.join(sorted(set(m.common_types.values()) | set(m.types_map.values()))))"
Examples:
- Show MIME types:
.. code-block:: yaml
:caption: config.yaml
rules:
- folders: '~/Downloads'
filters:
- mimetype
actions:
- echo: '{mimetype}'
- Filter by "image" mimetype:
.. code-block:: yaml
:caption: config.yaml
rules:
- folders: '~/Downloads'
filters:
- mimetype: image
actions:
- echo: This file is an image: {mimetype}
- Filter by specific MIME type:
.. code-block:: yaml
:caption: config.yaml
rules:
- folders: '~/Desktop'
filters:
- mimetype: application/pdf
actions:
- echo: 'Found a PDF file'
- Filter by multiple specific MIME types:
.. code-block:: yaml
:caption: config.yaml
rules:
- folders: '~/Music'
filters:
- mimetype:
- application/pdf
- audio/midi
actions:
- echo: 'Found Midi or PDF.'
"""
def __init__(self, *mimetypes):
self.mimetypes = list(map(str.lower, flatten(list(mimetypes))))
@staticmethod
def mimetype(path):
type_, _ = mimetypes.guess_type(path, strict=False)
return type_
def matches(self, path: Path):
mimetype = self.mimetype(path)
if mimetype is None:
return False
if not self.mimetypes:
return True
return any(mimetype.startswith(x) for x in self.mimetypes)
def pipeline(self, args: DotDict):
if self.matches(args.path):
result = self.mimetype(args.path)
return {"mimetype": result}
return None
def __str__(self):
return "MimeType(%s)" % ", ".join(self.mimetypes)
| 26.514851
| 125
| 0.5295
| 2,560
| 0.955937
| 0
| 0
| 118
| 0.044063
| 0
| 0
| 1,811
| 0.676251
|
f55a03a501c8713245dc76b3760e3ffdd100d23e
| 1,857
|
py
|
Python
|
third_party/conan/recipes/libprotobuf-mutator/conanfile.py
|
tufeigunchu/orbit
|
407354cf7c9159ff7e3177c603a6850b95509e3a
|
[
"BSD-2-Clause"
] | 1,847
|
2020-03-24T19:01:42.000Z
|
2022-03-31T13:18:57.000Z
|
third_party/conan/recipes/libprotobuf-mutator/conanfile.py
|
tufeigunchu/orbit
|
407354cf7c9159ff7e3177c603a6850b95509e3a
|
[
"BSD-2-Clause"
] | 1,100
|
2020-03-24T19:41:13.000Z
|
2022-03-31T14:27:09.000Z
|
third_party/conan/recipes/libprotobuf-mutator/conanfile.py
|
tufeigunchu/orbit
|
407354cf7c9159ff7e3177c603a6850b95509e3a
|
[
"BSD-2-Clause"
] | 228
|
2020-03-25T05:32:08.000Z
|
2022-03-31T11:27:39.000Z
|
from conans import ConanFile, CMake, tools
class LibprotobufMutatorConan(ConanFile):
name = "libprotobuf-mutator"
version = "20200506"
license = "Apache-2.0"
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
exports_sources = "patches/*",
build_requires = "protoc_installer/3.9.1@bincrafters/stable",
options = { "fPIC" : [True, False] }
default_options = { "fPIC" : True }
short_paths = True
def configure(self):
if self.settings.os == "Windows":
del self.options.fPIC
def source(self):
tools.get(**self.conan_data["sources"][self.version])
for patch in self.conan_data["patches"][self.version]:
tools.patch(**patch)
def requirements(self):
self.requires("lzma_sdk/19.00@orbitdeps/stable")
self.requires("zlib/1.2.11")
self.requires("protobuf/3.9.1@bincrafters/stable")
def build(self):
self._source_subfolder = self.conan_data["source_subfolder"][self.version]
cmake = CMake(self)
cmake.definitions["LIB_PROTO_MUTATOR_TESTING"] = False
cmake.definitions["CMAKE_CXX_FLAGS"] = "-fPIE"
cmake.definitions["CMAKE_C_FLAGS"] = "-fPIE"
cmake.configure(source_folder=self._source_subfolder)
cmake.build()
def package(self):
self.copy("*.h", dst="include",
src="{}/src".format(self._source_subfolder))
self.copy("*.h", dst="include/port",
src="{}/port".format(self._source_subfolder))
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.pdb", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libdirs = ["lib"]
self.cpp_info.libs = ["protobuf-mutator-libfuzzer", "protobuf-mutator"]
| 36.411765
| 82
| 0.622509
| 1,811
| 0.975229
| 0
| 0
| 0
| 0
| 0
| 0
| 482
| 0.259558
|
f55c8c9f40e1cf4319ff4ee1c9422d7c3883f725
| 524
|
py
|
Python
|
animation/common.py
|
codyly/locomotion-by-mann
|
89139466829ef7802bf645f865e335d4cda444e4
|
[
"MIT"
] | null | null | null |
animation/common.py
|
codyly/locomotion-by-mann
|
89139466829ef7802bf645f865e335d4cda444e4
|
[
"MIT"
] | null | null | null |
animation/common.py
|
codyly/locomotion-by-mann
|
89139466829ef7802bf645f865e335d4cda444e4
|
[
"MIT"
] | null | null | null |
import numpy as np
VEC_FORWARD = np.array([0, 0, 1])
VEC_UP = np.array([0, 1, 0])
VEC_RIGHT = np.array([1, 0, 0])
STYLE_NOMOVE = np.array([1, 0, 0, 0, 0, 0])
STYLE_TROT = np.array([0, 1, 0, 0, 0, 0])
STYLE_JUMP = np.array([0, 0, 1, 0, 0, 0])
STYLE_SIT = np.array([0, 0, 0, 1, 0, 0])
STYLE_STAND = np.array([0, 0, 0, 0, 1, 0])
STYLE_LAY = np.array([0, 0, 0, 0, 0, 1])
NUM_STYLES = 6
SYS_FREQ = 60
DURATION = 9
NUM_QUERIES = SYS_FREQ * DURATION
MOCAP_SAMPLE_PATH = "animation/data/mocap-sample.txt"
| 23.818182
| 54
| 0.593511
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 0.062977
|
f55da49181d53035411252526f6236de7beb9882
| 2,222
|
py
|
Python
|
codes3d/build_gene_index.py
|
Genome3d/codes3d-v1
|
fe4897cb07bd8b2c10cfc29defe8570d447b69e0
|
[
"MIT"
] | null | null | null |
codes3d/build_gene_index.py
|
Genome3d/codes3d-v1
|
fe4897cb07bd8b2c10cfc29defe8570d447b69e0
|
[
"MIT"
] | 4
|
2018-10-25T02:09:37.000Z
|
2019-06-27T20:50:27.000Z
|
codes3d/build_gene_index.py
|
Genome3d/codes3d-v1
|
fe4897cb07bd8b2c10cfc29defe8570d447b69e0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import argparse,codes3d,configparser, os
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create a BED file detailing the locations of genes in the genome, and a database containing additional gene information. Note: If a file in .gtf format is supplied, no other arguments are required.")
parser.add_argument("-i","--gene_files",required=True,nargs='+',help="The gene file/s to be indexed; either in tabular format, or, by default, the .gtf file format, as supplied by the GTEx project.")
parser.add_argument("-g","--symbol_col",type=int,help="The index of the column containing the gene symbol (non-zero based; default: ).")
parser.add_argument("-c","--chr_col",type=int,help="The index of the column containing the chromosome name (non-zero based; default: ).")
parser.add_argument("-s","--start_col",type=int,help="The index of the column containing the gene start site (non-zero based; default: ).")
parser.add_argument("-e","--end_col",type=int,help="The index of the column containing the gene end site (non-zero based; default: ).")
parser.add_argument("-p","--p_threshold_col",type=int,help="The index of the column containing the GTEx p-threshold for this gene (optional; non-zero based; default: ).")
parser.add_argument("-H","--no_header",action="store_true",help="Use this option if the table has no header.")
parser.add_argument("-b","--output_bed_fp",help="The path to which to output the resultant BED file of gene locations (default: the input file name with the extension \".bed\").")
parser.add_argument("-o","--output_db",help="The path to which to output the resultant gene index database (default: the input file name with the extension \".db\").")
parser.add_argument("-C","--config_file",default=os.path.join(os.path.dirname(__file__),"../docs/codes3d.conf"),help="The configuration file specifying the location of the CoDeS3D library (default: docs/codes3d.conf).")
args = parser.parse_args()
config = configparser.ConfigParser()
config.read(args.config_file)
codes3d.build_gene_index(args.gene_files,args.output_bed_fp,args.output_db,config,args.symbol_col,args.chr_col,args.start_col,args.end_col,args.p_threshold_col,args.no_header)
| 96.608696
| 246
| 0.756976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,414
| 0.636364
|
f55e3e29a41fea6104e2a766525f7a160ac34c13
| 5,900
|
py
|
Python
|
Kinematic/forward.py
|
DDDong2666/tum-adlr-ws20-02
|
2e439886e0287777589cd276d614fd03bea4ed0c
|
[
"MIT"
] | null | null | null |
Kinematic/forward.py
|
DDDong2666/tum-adlr-ws20-02
|
2e439886e0287777589cd276d614fd03bea4ed0c
|
[
"MIT"
] | null | null | null |
Kinematic/forward.py
|
DDDong2666/tum-adlr-ws20-02
|
2e439886e0287777589cd276d614fd03bea4ed0c
|
[
"MIT"
] | null | null | null |
import numpy as np
from Optimizer.path import get_x_substeps
from Kinematic import frames, chain as kc
def initialize_frames(shape, robot, mode='hm'):
return frames.initialize_frames(shape=shape + (robot.n_frames,), n_dim=robot.n_dim, mode=mode)
def initialize_frames_jac(shape, robot, mode='hm'):
f = initialize_frames(shape=shape, robot=robot, mode=mode)
j = frames.initialize_frames(shape=shape + (robot.n_dof, robot.n_frames), n_dim=robot.n_dim, mode='zero')
return f, j
# General
def get_frames(q, robot):
return robot.get_frames(q)
def get_frames_jac(*, q, robot):
return robot.get_frames_jacs(q=q)
def get_x_frames(*, q, robot):
return robot.get_frames(q=q)[..., :-1, -1]
def frames2pos(f, frame_idx, rel_pos):
return (f[:, :, frame_idx, :, :] @ rel_pos[:, :, np.newaxis])[..., :-1, 0]
def frames2spheres(f, robot):
"""
x_spheres (n_samples, n_wp, n_links, n_dim)
"""
return frames2pos(f, frame_idx=robot.spheres_frame_idx, rel_pos=robot.spheres_position)
def frames2spheres_jac(f, j, robot):
"""
x_spheres (n_samples, n_wp, n_spheres, n_dim)
dx_dq (n_samples, n_wp, n_dof, n_spheres, n_dim)
"""
x_spheres = frames2spheres(f=f, robot=robot)
dx_dq = (j[:, :, :, robot.spheres_frame_idx, :, :] @ robot.spheres_position[:, :, np.newaxis])[..., :-1, 0]
return x_spheres, dx_dq
def get_x_spheres(q, robot, return_frames2=False):
f = robot.get_frames(q=q)
x_spheres = frames2spheres(f=f, robot=robot)
if return_frames2:
return f, x_spheres
else:
return x_spheres
def get_x_spheres_jac(*, q, robot, return_frames2=False):
f, j = robot.get_frames_jac(q=q)
x_spheres, dx_dq = frames2spheres_jac(f=f, j=j, robot=robot)
if return_frames2:
return (f, j), (x_spheres, dx_dq)
else:
return x_spheres, dx_dq
def get_x_spheres_substeps(*, q, robot, n_substeps, return_frames2=False):
q_ss = get_x_substeps(x=q, n_substeps=n_substeps, infinity_joints=robot.infinity_joints, include_end_point=True)
return get_x_spheres(q=q_ss, robot=robot, return_frames2=return_frames2)
def get_x_spheres_substeps_jac(*, q, robot, n_substeps, return_frames2=False):
q_ss = get_x_substeps(x=q, n_substeps=n_substeps, infinity_joints=robot.infinity_joints, include_end_point=True)
return get_x_spheres_jac(q=q_ss, robot=robot, return_frames2=return_frames2)
def get_frames_substeps(*, q, robot, n_substeps):
q_ss = get_x_substeps(x=q, n_substeps=n_substeps, infinity_joints=robot.infinity_joints, include_end_point=True)
return get_frames(q=q_ss, robot=robot)
def get_frames_substeps_jac(*, q, robot, n_substeps):
q_ss = get_x_substeps(x=q, n_substeps=n_substeps, infinity_joints=robot.infinity_joints, include_end_point=True)
return get_frames_jac(q=q_ss, robot=robot)
# nfi - next frame index
# iff - influence frame frame
# Helper
# Combine fun
def create_frames_dict(f, nfi):
"""
Create a dict to minimize the calculation of unnecessary transformations between the frames
The value to the key 0 holds all transformations form the origin to the whole chain.
Each next field holds the transformation from the current frame to all frames to come.
The calculation happens from back to front, to save some steps
# 0 1 2 3 4
# F01
# F02 F12
# F03 F13 F23
# F04 F14 F24 F34
# F05 F15 F25 F35 F45
"""
n_frames = f.shape[-3]
d = {}
for i in range(n_frames - 1, -1, -1):
nfi_i = nfi[i]
if nfi_i == -1:
d[i] = f[..., i:i + 1, :, :]
elif isinstance(nfi_i, (list, tuple)):
d[i] = np.concatenate([
f[..., i:i + 1, :, :],
f[..., i:i + 1, :, :] @ np.concatenate([d[j] for j in nfi_i], axis=-3)],
axis=-3)
else:
d[i] = np.concatenate([f[..., i:i + 1, :, :],
f[..., i:i + 1, :, :] @ d[nfi_i]], axis=-3)
return d
def combine_frames(f, prev_frame_idx):
for i, pfi in enumerate(prev_frame_idx[1:], start=1):
f[..., i, :, :] = f[..., pfi, :, :] @ f[..., i, :, :]
def combine_frames_jac(j, d, robot):
jf_all, jf_first, jf_last = kc.__get_joint_frame_indices_first_last(jfi=robot.joint_frame_idx)
pfi_ = robot.prev_frame_idx[jf_first]
joints_ = np.arange(robot.n_dof)[pfi_ != -1]
jf_first_ = jf_first[pfi_ != -1]
pfi_ = pfi_[pfi_ != -1]
# Previous to joint frame
# j(b)__a_b = f__a_b * j__b
j[..., joints_, jf_first_, :, :] = (d[0][..., pfi_, :, :] @ j[..., joints_, jf_first_, :, :])
# After
for i in range(robot.n_dof):
jf_inf_i = robot.joint_frame_influence[i, :]
jf_inf_i[:jf_last[i] + 1] = False
nfi_i = robot.next_frame_idx[jf_last[i]]
# Handle joints which act on multiple frames
if jf_first[i] != jf_last[i]:
for kk, fj_cur in enumerate(jf_all[i][:-1]):
jf_next = jf_all[i][kk + 1]
jf_next1 = jf_next - 1
if jf_next - fj_cur > 1:
j[..., i, fj_cur + 1:jf_next, :, :] = (j[..., i, fj_cur:fj_cur + 1, :, :] @
d[robot.next_frame_idx[fj_cur]][..., :jf_next - fj_cur - 1, :, :])
j[..., i, jf_next, :, :] = ((j[..., i, jf_next1, :, :] @ d[robot.next_frame_idx[jf_next1]][..., 0, :, :]) +
(d[0][..., jf_next1, :, :] @ j[..., i, jf_next, :, :]))
# j(b)__a_c = j__a_b * f__b_c
if isinstance(nfi_i, (list, tuple)):
j[..., i, jf_inf_i, :, :] = (j[..., i, jf_last[i]:jf_last[i] + 1, :, ] @ np.concatenate([d[j] for j in nfi_i], axis=-3))
elif nfi_i != -1:
j[..., i, jf_inf_i, :, :] = (j[..., i, jf_last[i]:jf_last[i] + 1, :, :] @ d[nfi_i])
| 33.908046
| 132
| 0.597119
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 901
| 0.152712
|
f560897ff46b99cf1a7890d1251f2fa26c8a2e3a
| 977
|
py
|
Python
|
dnslookup.py
|
r1nzler/dnslookup
|
74613614b694602244582bfd555ffd8a5dea8bff
|
[
"MIT"
] | null | null | null |
dnslookup.py
|
r1nzler/dnslookup
|
74613614b694602244582bfd555ffd8a5dea8bff
|
[
"MIT"
] | null | null | null |
dnslookup.py
|
r1nzler/dnslookup
|
74613614b694602244582bfd555ffd8a5dea8bff
|
[
"MIT"
] | null | null | null |
import dns.resolver
import dns.ipv4
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-l', "--list", help="List of dns names you want IP's for")
parser.add_argument('-o', "--output", help="Output file to save list")
args = parser.parse_args()
ip_list = [...]
subs = open(args.list, 'r', newline='')
if args.list:
for host in subs:
host = host.strip('\n',)
host = host.strip('https://')
host = host.strip('http://')
# print(host)
try:
i = dns.resolver.query(host,'A' )
#print(i.rrset.items[0])
for item in i:
if not item in ip_list:
ip_list.append(item)
print(item)
except Exception as error:
a = error
if args.output:
file = open(args.output, "w")
for p in ip_list:
file.write(str(p))
file.write("\n")
file.close()
| 27.914286
| 79
| 0.518936
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 164
| 0.167861
|
f5609c24bd958aa1dc8093dff8643942d2269130
| 8,416
|
py
|
Python
|
eval/report.py
|
DBCobra/CobraBench
|
d48697248948decc206cfba0a6e40fea8a772ff9
|
[
"MIT"
] | 1
|
2021-03-03T06:52:50.000Z
|
2021-03-03T06:52:50.000Z
|
eval/report.py
|
DBCobra/CobraBench
|
d48697248948decc206cfba0a6e40fea8a772ff9
|
[
"MIT"
] | 1
|
2021-03-05T09:36:50.000Z
|
2021-03-08T12:02:53.000Z
|
eval/report.py
|
DBCobra/CobraBench
|
d48697248948decc206cfba0a6e40fea8a772ff9
|
[
"MIT"
] | 1
|
2021-03-03T06:57:02.000Z
|
2021-03-03T06:57:02.000Z
|
import pandas
import numpy as np
import math
import os
import sys
import re
from utils import *
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
percentiles = [ 10, 25, 50, 75, 90, 95, 99, 99.9 ]
DATA_FOLDER = DIR_PATH + '/data'
def getResult(trial_string, thread, client_num=2):
print("thread: {}".format(thread))
datas = []
end_times = []
for i in range(1, client_num+1):
first_start_time = math.inf
last_start_time = 0
first_end_time = math.inf
last_end_time = 0
trial_name = DIR_PATH + '/./trials/{}-{}-{}'.format(i, trial_string, thread)
lats_folder = trial_name + '/cobra/lats'
if not os.path.exists(lats_folder):
continue
files = os.listdir(lats_folder)
for fname in files:
fpath = lats_folder + '/' + fname
data = pandas.read_csv(fpath, sep=' ').values
start_time = np.min(data[:, 0])
end_time = np.max(data[:, 1])
first_start_time = min(first_start_time, start_time)
last_start_time = max(last_start_time, start_time)
first_end_time = min(first_end_time, end_time)
last_end_time = max(last_end_time, end_time)
end_times.append(first_end_time - first_start_time)
data -= first_start_time
datas.append(data)
print("{}: start time gap: {}, end time gap: {}".format(i, (last_start_time - first_start_time) / 1e9,
(last_end_time - first_end_time) / 1e9))
print("total end time gap of all clients: {}s".format((max(end_times)-min(end_times))/1e9))
count_start = 0
count_end = min(end_times) - 0
count_time = count_end - count_start
print("total time: {}s".format((last_end_time - first_start_time)/1e9))
print("counted time: {}s".format(count_time/1e9))
res = []
res.append(thread)
lats = []
before_trimming = 0
for data in datas:
before_trimming += data.shape[0]
data = data[np.where(data[:,1] > count_start)]
data = data[np.where(data[:,1] < count_end)]
lats += (data[:,1]-data[:,0]).tolist()
print("Data size before trimming: {}, after trimming: {}".format(before_trimming, len(lats)))
tps = len(lats)/count_time*1e9
res.append(tps)
print('TPS: {}'.format(tps))
lats = np.array(lats)
lats.sort()
print('Latencies:')
for per in percentiles:
latency_value = np.percentile(lats, per)/1e6
print('{}%(ms) : {}'.format(per, latency_value))
res.append(latency_value)
# plt.hist(lats[:-int(0.001*len(lats))], bins="auto")
# plt.show()
return res
def get_report(trial_string, client_num):
thread_tps_lats = []
threads = {}
dir_names = os.listdir('trials')
for s in dir_names:
if '-'+trial_string+'-' in s:
threads[int(s.split('-')[-1])] = True
if len(threads.keys()) == 0:
return
for thread in sorted(threads.keys()):
res = getResult(trial_string, thread, client_num)
thread_tps_lats.append(res)
df = pandas.DataFrame(thread_tps_lats)
if not os.path.exists(DATA_FOLDER):
os.makedirs(DATA_FOLDER)
fname = DATA_FOLDER + '/{}.data'.format(trial_string)
df.to_csv(fname, sep=' ', header=['#thread', 'tps']+percentiles, index=False, float_format="%.5f")
printG("FINISHED: " + trial_string)
def get_network_old(fname):
net_thpt_rx = []
net_thpt_tx = []
with open(fname) as f:
for sline in f:
line = sline.split()
net_thpt_tx.append(float(line[1]))
net_thpt_rx.append(float(line[2]))
net_thpt_rx = np.array(net_thpt_rx)
net_thpt_tx = np.array(net_thpt_tx)
net_thpt_rx.sort()
net_thpt_tx.sort()
# print('receive peak: {}, send peak: {}'.format(net_thpt_rx[-1], net_thpt_tx[-1]))
top10p = int(len(net_thpt_rx) *100 / 30)
avg_rx = net_thpt_rx[-top10p: -1].mean()
avg_tx = net_thpt_tx[-top10p: -1].mean()
# print('avg of top 10% rx: {}'.format(avg_rx))
# print('avg of top 10% tx: {}'.format(avg_tx))
return avg_rx, avg_tx
def get_num_op(trial_string):
threads = {}
dir_names = os.listdir('trials')
for s in dir_names:
if trial_string in s:
threads[int(s.split('-')[-1])] = True
if len(threads.keys()) == 0:
printB('not found: ' + trial_string)
return
thread = 24
trial_name = DIR_PATH + '/./trials/{}-{}-{}/client.txt'.format(1, trial_string, thread)
result = ''
with open(trial_name) as f:
for line in f:
if re.search(r'NumOp: [0-9]+', line):
result = line
break
result = result.split()[1]
return result
def get_network(fname):
lines = []
with open(fname) as f:
for sline in f:
line = sline.split()
lines.append(line)
rx = int(lines[2][4]) - int(lines[0][4])
tx = int(lines[3][4]) - int(lines[1][4])
return (rx, tx)
def get_trace_size(trial_string):
threads = {}
dir_names = os.listdir('trials')
for s in dir_names:
if trial_string in s:
threads[int(s.split('-')[-1])] = True
if len(threads.keys()) == 0:
printB('not found: ' + trial_string)
return
thread = 24
trial_name = DIR_PATH + '/./trials/{}-{}-{}/client.txt'.format(1, trial_string, thread)
result = ''
with open(trial_name) as f:
for line in f:
if re.search(r'SizeOfTrace: [0-9]+', line):
result = line
break
result = result.split()[1]
return result
def main():
if len(sys.argv) == 1:
databases = ['rocksdb', 'postgres', 'google']
workload = 'cheng'
inst_level = 'cloud'
for database in databases:
for contention in ['low', 'high']:
for workload in ['cheng', 'tpcc', 'twitter', 'ycsb', 'rubis']:
for inst_level in ['no', 'ww', 'cloud', 'cloudnovnofz', 'cloudnofz', 'local']:
trial_string = '{}-{}-{}-{}'.format(database, workload, contention, inst_level)
get_report(trial_string, 10 if database == 'postgres' else 1)
elif sys.argv[1] == 'net':
database = 'postgres'
workloads = ['cheng', 'ycsb', 'twitter', 'rubis', 'tpcc']
inst_levels = ['no', 'local']
result_str = 'workload ' + ' '.join(inst_levels) + '\n'
for contention in ['low']:
for workload in workloads:
result_row = workload
for inst_level in inst_levels:
trial_string = '{}-{}-{}-{}'.format(database, workload, contention, inst_level)
thread = 24
rx, tx = get_network('netstats/netstats-'+trial_string + '-{}.log'.format(thread))
print('{}-{}: {}, {}'.format(workload, inst_level, rx, tx))
result_row += ' {}'.format(tx)
result_str += result_row + '\n'
print(result_str)
return
elif sys.argv[1] == 'numop':
inst_levels = ['no', 'cloud', 'ww']
result_str = 'workload ' + ' '.join(inst_levels) + '\n'
for contention in ['low']:
for workload in ['cheng', 'tpcc', 'twitter', 'ycsb', 'rubis']:
result_str += workload
for inst_level in inst_levels:
trial_string = '{}-{}-{}-{}'.format(database, workload, contention, inst_level)
numop = get_num_op(trial_string)
result_str += ' {}'.format(numop)
result_str += '\n'
print(result_str)
elif sys.argv[1] == 'tracesize':
database = 'rocksdb'
inst_levels = ['cloud', 'ww']
result_str = 'workload ' + ' '.join(inst_levels) + '\n'
for contention in ['low']:
for workload in ['cheng', 'ycsb', 'twitter', 'rubis', 'tpcc']:
result_str += workload
for inst_level in inst_levels:
trial_string = '{}-{}-{}-{}'.format(database, workload, contention, inst_level)
numop = get_trace_size(trial_string)
result_str += ' {}'.format(numop)
result_str += '\n'
print(result_str)
if __name__ == "__main__":
main()
| 33.52988
| 110
| 0.557153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,248
| 0.148289
|
f560efe52fd0d8fc1e6638e6bf52578a71fd2927
| 1,821
|
py
|
Python
|
platypush/backend/foursquare.py
|
RichardChiang/platypush
|
1777ebb0516118cdef20046a92caab496fa7c6cb
|
[
"MIT"
] | 228
|
2018-01-30T11:17:09.000Z
|
2022-03-24T11:22:26.000Z
|
platypush/backend/foursquare.py
|
RichardChiang/platypush
|
1777ebb0516118cdef20046a92caab496fa7c6cb
|
[
"MIT"
] | 167
|
2017-12-11T19:35:38.000Z
|
2022-03-27T14:45:30.000Z
|
platypush/backend/foursquare/__init__.py
|
BlackLight/runbullet
|
8d26c8634d2677b4402f0a21b9ab8244b44640db
|
[
"MIT"
] | 16
|
2018-05-03T07:31:56.000Z
|
2021-12-05T19:27:37.000Z
|
from typing import Optional
from platypush.backend import Backend
from platypush.context import get_plugin
from platypush.message.event.foursquare import FoursquareCheckinEvent
class FoursquareBackend(Backend):
"""
This backend polls for new check-ins on the user's Foursquare account and triggers an event when a new check-in
occurs.
Requires:
* The :class:`platypush.plugins.foursquare.FoursquarePlugin` plugin configured and enabled.
Triggers:
- :class:`platypush.message.event.foursquare.FoursquareCheckinEvent` when a new check-in occurs.
"""
_last_created_at_varname = '_foursquare_checkin_last_created_at'
def __init__(self, poll_seconds: Optional[float] = 60.0, *args, **kwargs):
"""
:param poll_seconds: How often the backend should check for new check-ins (default: one minute).
"""
super().__init__(*args, poll_seconds=poll_seconds, **kwargs)
self._last_created_at = None
def __enter__(self):
self._last_created_at = int(get_plugin('variable').get(self._last_created_at_varname).
output.get(self._last_created_at_varname) or 0)
self.logger.info('Started Foursquare backend')
def loop(self):
checkins = get_plugin('foursquare').get_checkins().output
if not checkins:
return
last_checkin = checkins[0]
last_checkin_created_at = last_checkin.get('createdAt', 0)
if self._last_created_at and last_checkin_created_at <= self._last_created_at:
return
self.bus.post(FoursquareCheckinEvent(checkin=last_checkin))
self._last_created_at = last_checkin_created_at
get_plugin('variable').set(**{self._last_created_at_varname: self._last_created_at})
# vim:sw=4:ts=4:et:
| 34.358491
| 115
| 0.697968
| 1,618
| 0.888523
| 0
| 0
| 0
| 0
| 0
| 0
| 624
| 0.342669
|
f560ffe95556ccc11b3d6d39837b76f47f81ba08
| 2,980
|
py
|
Python
|
src/data/make_dataset.py
|
karsti11/caffe_bar_sales_analysis
|
f7001bbf2d09c1ceeb8aef35322652a8495949ed
|
[
"MIT"
] | null | null | null |
src/data/make_dataset.py
|
karsti11/caffe_bar_sales_analysis
|
f7001bbf2d09c1ceeb8aef35322652a8495949ed
|
[
"MIT"
] | null | null | null |
src/data/make_dataset.py
|
karsti11/caffe_bar_sales_analysis
|
f7001bbf2d09c1ceeb8aef35322652a8495949ed
|
[
"MIT"
] | null | null | null |
import os
import time
import pandas as pd
from src.utils import get_project_root
from src.data.item_names_replacement import REPLACE_DICT1, REPLACE_DICT1
YEARS = [str(x) for x in list(range(2013,2021))]
ROOT_DIR = get_project_root()
def string_to_float(number):
#Custom function for converting 'sales_value' column to float
#because of faulty data. 28 rows have eg. '400.200.000.000.000.000'
try:
return float(number)
except:
return 0.5
def load_data(data_abs_path: str) -> pd.DataFrame:
"""Load raw data
Parameters:
-----------
data_abs_path: absolute path of csv data
Returns:
--------
data_df: raw data dataframe
"""
data_df = pd.read_csv(data_abs_path)
data_df.sales_datetime = pd.to_datetime(data_df.sales_datetime, format='%Y-%m-%d', utc=True)
data_df.set_index('sales_datetime', inplace=True)
return data_df
def arrange_data(data_df):
# Drop unnecessary columns -> no known meaning
data_df.drop(labels=[4,10,11], axis=1, inplace=True)
data_df.columns = ['bar_name', 'number2', 'feature1', 'sales_datetime', 'feature2',
'item_name', 'item_class', 'sales_qty', 'feature3', 'sales_value']
#data_df.sales_value=data_df.sales_value.apply(lambda x: string_to_float(x))
data_df.sales_datetime = pd.to_datetime(data_df.sales_datetime, utc=True)
data_df.set_index('sales_datetime', inplace=True)
data_df['item_price'] = abs(data_df['sales_value']/data_df['sales_qty'])
return data_df
def load_dataset():
columns_to_keep = ['item_name', 'sales_qty', 'sales_value', 'item_price']
all_data_df = pd.DataFrame(columns = columns_to_keep)
for year in YEARS:
start_time = time.time()
filename = os.path.join(ROOT_DIR, f'data/raw/{year}_eKasa_RECEIPT_ENTRIES.csv')
df = pd.read_csv(filename,
delimiter=';',
header=None,
converters={12: string_to_float})
data_df = arrange_data(df)
all_data_df = all_data_df.append(data_df[columns_to_keep])
print("Dataframe shape: ",df.shape)
#print("Dataframe head: ",df.head())
end_time = time.time()
print("Time (s): ", end_time-start_time)
print(f"{year} done.")
all_data_df.sales_qty = all_data_df.sales_qty.astype('int64')
all_data_df.item_name.replace(to_replace=REPLACE_DICT1, inplace=True)
all_data_df.item_name.replace(to_replace=REPLACE_DICT1, inplace=True)
all_data_df.index.name = 'sales_date'
all_data_daily_sales = all_data_df.groupby(['item_name', pd.Grouper(freq='D')]).agg({'sales_qty':'sum',
'item_price': 'mean',
'sales_value': 'sum'}).reset_index()
print(all_data_daily_sales)
return all_data_daily_sales
| 40.27027
| 125
| 0.632215
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 868
| 0.291275
|
f5617dd2284793a4d37b296ffc5aba3ca5a2e5d9
| 1,143
|
py
|
Python
|
aiml_bot/utilities.py
|
hosford42/pyaiml
|
42bb344d5f1d75c136e512bd05a44945d506f490
|
[
"BSD-2-Clause"
] | 9
|
2017-08-17T08:34:44.000Z
|
2021-01-06T16:08:09.000Z
|
aiml_bot/utilities.py
|
hosford42/pyaiml
|
42bb344d5f1d75c136e512bd05a44945d506f490
|
[
"BSD-2-Clause"
] | 2
|
2017-08-17T19:53:41.000Z
|
2020-01-22T23:19:44.000Z
|
aiml_bot/utilities.py
|
hosford42/pyaiml
|
42bb344d5f1d75c136e512bd05a44945d506f490
|
[
"BSD-2-Clause"
] | 1
|
2018-07-29T19:16:14.000Z
|
2018-07-29T19:16:14.000Z
|
"""
This file contains assorted general utility functions used by other
modules in the aiml_bot package.
"""
# TODO: Correctly handle abbreviations.
def split_sentences(text: str) -> list:
"""Split the string s into a list of sentences."""
if not isinstance(text, str):
raise TypeError(text)
position = 0
results = []
length = len(text)
while position < length:
try:
period = text.index('.', position)
except ValueError:
period = length + 1
try:
question = text.index('?', position)
except ValueError:
question = length + 1
try:
exclamation = text.index('!', position)
except ValueError:
exclamation = length + 1
end = min(period, question, exclamation)
sentence = text[position:end].strip()
if sentence:
results.append(sentence)
position = end + 1
# If no sentences were found, return a one-item list containing
# the entire input string.
if not results:
results.append(text.strip())
# print(results)
return results
| 29.307692
| 67
| 0.594051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 311
| 0.272091
|
f565e620ce2b4fec57d532c3907bb966211865f1
| 5,858
|
py
|
Python
|
hard-gists/5181631/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 21
|
2019-07-08T08:26:45.000Z
|
2022-01-24T23:53:25.000Z
|
hard-gists/5181631/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 5
|
2019-06-15T14:47:47.000Z
|
2022-02-26T05:02:56.000Z
|
hard-gists/5181631/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 17
|
2019-05-16T03:50:34.000Z
|
2021-01-14T14:35:12.000Z
|
import os, time, random
from collections import defaultdict
from System import Console, ConsoleColor, ConsoleKey
from System.Threading import Thread, ThreadStart
class Screen(object):
red = ConsoleColor.Red; green = ConsoleColor.Green; blue = ConsoleColor.Blue;black = ConsoleColor.Black
dimension = (21,39)
def __update_input(self):
mapping = defaultdict(lambda: None,
{ConsoleKey.A:Snake.left,ConsoleKey.J:Snake.left, ConsoleKey.LeftArrow:Snake.left,
ConsoleKey.D:Snake.right,ConsoleKey.L:Snake.right,ConsoleKey.RightArrow:Snake.right,
ConsoleKey.W:Snake.up,ConsoleKey.I:Snake.up,ConsoleKey.UpArrow:Snake.up,
ConsoleKey.S:Snake.down,ConsoleKey.K:Snake.down,ConsoleKey.DownArrow:Snake.down})
while True: self.last_input = mapping[Console.ReadKey(True).Key]
def __init__(self):
self.last_input = None; self.__input_update_thread = Thread(ThreadStart(self.__update_input)); self.__input_update_thread.Start()
os.system("cls") # os.system("clear")
Console.Title = "Snake by LuYU426"
# The next line needed to be commented out on Unix-like systems. However before running, the console needs to be adjusted accordingly
Console.CursorVisible = False; Console.WindowWidth = 80; Console.WindowHeight = 25;Console.BufferHeight = Console.WindowHeight; Console.BufferWidth = Console.WindowWidth
for i in range(0,24):
for j in range(0, 80):
if i == 0 or j == 0: self.__show(j, i, Screen.black, "#")
elif i == 22 or j == 79: self.__show(j, i, Screen.black,"#")
else: self.__show(j, i, Screen.black," ")
def __show(self,left,top,color,content): Console.CursorLeft = left; Console.CursorTop = top; Console.BackgroundColor = color; Console.Write(content)
def show_score(self,score): self.__show(3,23,Screen.black,"Score: {0}".format(score))
def color(self, position, width, height, color):
for row in range(position[0], position[0] + height):
for col in range(position[1], position[1] + width):
self.__show(col * 2 + 1,row + 1,color," ")
class GameLogic(object):
def update(self, screen, snake, fruit, stats):
stats.increase_score()
screen.show_score(stats.current_score)
update_result = snake.update(screen.last_input,fruit.current_position)
if update_result[0] == False: return True
if update_result[1] == True: return False
if update_result[2][0] < 0 or update_result[2][1] < 0: return False
if update_result[2][0] >= Screen.dimension[0] or update_result[2][1] >= Screen.dimension[1]: return False
screen.color(update_result[2],1,1,screen.green)
if update_result[3] is None:
fruit.reset_position()
while snake.position_in_buffer(fruit.current_position): fruit.reset_position()
screen.color(fruit.current_position,1,1,screen.red)
stats.increase_level()
else: screen.color(update_result[3],1,1,screen.black)
return True
def end(self): screen.color((0,0),39,21,Screen.blue)
class Snake(object):
up = 0x00; down = 0x01; left = 0x10; right = 0x11
def __init__(self):
self.__buffer = list(); self.__current_time_slice = 0
self.__buffer = [[Screen.dimension[0]/2 + 1,Screen.dimension[1]/2 + 1]]
self.__current_direction = Snake.up
def __current_speed(self):
_s = 8 - len(self.__buffer)/2
return 1 if _s < 1 else _s
def position_in_buffer(self, fruit_pos):
for item in self.__buffer:
if item == fruit_pos:
return True
return False
# returns [whether_need_update_screen(bool), whether_fail(bool), head_pos_to_draw(x,y), tail_pos_to_remove(x,y)]
def update(self, direction, fruit_pos):
self.__current_time_slice += 1
self.__current_time_slice %= self.__current_speed()
if self.__current_time_slice != 0: return [False, False]
if direction is None: direction = self.__current_direction
if direction ^ self.__current_direction == 0x01: direction = self.__current_direction
self.__current_direction = direction; candidate = [0, 0]; head = self.__buffer[len(self.__buffer) - 1]
candidate[0] = head[0] + 1 if self.__current_direction == Snake.down else head[0] - 1 if self.__current_direction == Snake.up else head[0]
candidate[1] = head[1] + 1 if self.__current_direction == Snake.right else head[1] - 1 if self.__current_direction == Snake.left else head[1]
if self.position_in_buffer(candidate): return [True, True]
if candidate == fruit_pos: self.__buffer.append(candidate); return [True, False, candidate, None]
else:
self.__buffer.append(candidate); tail = self.__buffer[0]; self.__buffer.remove(tail)
return [True, False, candidate, tail]
class Fruit(object):
def __init__(self): self.reset_position()
@property
def current_position(self): return self.__position
def reset_position(self): self.__position = [random.randint(0,Screen.dimension[0]-1),random.randint(0,Screen.dimension[1]-1)]
class Stastics(object):
def __init__(self): self.current_score = 0; self.__level = 0
def increase_score(self): self.current_score += 1
def increase_level(self): self.__level += 1; self.current_score += pow(2,self.__level-1)
if __name__ == "__main__":
screen = Screen(); logic = GameLogic(); stats = Stastics(); fruit = Fruit(); snake = Snake()
while snake.position_in_buffer(fruit.current_position): fruit.reset_position()
screen.color(fruit.current_position,1,1,screen.red)
while logic.update(screen, snake, fruit, stats): time.sleep(0.05)
logic.end()
| 59.171717
| 177
| 0.669
| 5,337
| 0.911062
| 0
| 0
| 64
| 0.010925
| 0
| 0
| 325
| 0.05548
|
f566d3437e302ac56089e454a2ea9560ed781683
| 14,376
|
py
|
Python
|
dttpy/dttdata.py
|
neouniverse/dttpy
|
c5ff8870d796d84b39c4e6f82ec4eefe523cc3e7
|
[
"MIT"
] | null | null | null |
dttpy/dttdata.py
|
neouniverse/dttpy
|
c5ff8870d796d84b39c4e6f82ec4eefe523cc3e7
|
[
"MIT"
] | null | null | null |
dttpy/dttdata.py
|
neouniverse/dttpy
|
c5ff8870d796d84b39c4e6f82ec4eefe523cc3e7
|
[
"MIT"
] | null | null | null |
#
#! coding:utf-8
import xml.etree.ElementTree as ET
from xml.etree import ElementTree
import base64
import binascii
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
SubType = {'1':'ASD','2':'CSD','3':'TF','4':'???','5':'COH'}
average_type = {'0':'Fixed','1':'Exponential','2':'Accumulative'} # not comfirmed
window_type = {'0':'Uniform','1':'Hanning','2':'Flat-top',
'3':'Welch','4':'Bartlet','5':'BMH'} # not comfirmed
class DttXMLSpectrum():
def __init__(self,child):
self.Name = child.attrib["Name"]
self._getAttribute(child)
self._getStream(child)
def _getAttribute(self,child):
self.dt = child.find("./Param[@Name='dt']").text
self.t0 = child.find("./Time[@Type='GPS']").text
self.BW = child.find("./Param[@Name='BW']").text
self.f0 = child.find("./Param[@Name='f0']").text
self.df = child.find("./Param[@Name='df']").text
self.N = int(child.find("./Param[@Name='N']").text)
self.Window = child.find("./Param[@Name='Window']").text
self.AveType = child.find("./Param[@Name='AverageType']").text
self.Averages = child.find("./Param[@Name='Averages']").text
self.Flag = child.find("./Param[@Name='Flag']").text
self.Subtype = SubType[child.find("./Param[@Name='Subtype']").text]
self.M = int(child.find("./Param[@Name='M']").text)
self.dim = child.find('./Array/Dim').text
channel = child.findall("./Param[@Unit='channel']")
self.Channel = list(map(lambda x:{x.attrib['Name']:x.text},channel))
Channel = OrderedDict(self.Channel[0])
for c in self.Channel:
Channel.update(OrderedDict(c))
self.Channel = Channel
def showInfo(self):
fmt = 'dt [s]\t:{dt:2.10f}\n'+\
't0(GPS)\t:{t0:10.1f}\n'+\
'BW [Hz]\t:{bw:2.10f} \n'+\
'f0 [Hz]\t:{f0:2.10f} \n'+\
'df [Hz]\t:{df:2.10f} \n'+\
'average\t:{average:12d} \n'+\
'Points\t:{n:12d} \n'+\
'window\t:{window:12s} \n'+\
'type\t:{aveType:12s}\n'+\
'flag\t:{flag:12s}'
text = fmt.format(dt=float(self.dt),
t0=float(self.t0),
bw=float(self.BW),
f0=float(self.f0),
df=float(self.df),
n=int(self.N),
window=self.Window,
aveType=self.AveType,
average=int(self.Averages),
flag=self.Flag
)
print(text)
def _getStream(self,child):
stream_str = child.find('./Array/Stream').text
stream_bin = binascii.a2b_base64(stream_str)
if self.Subtype == 'ASD': # float : asd
self.spectrum = np.frombuffer(stream_bin, dtype=np.float32)
self.f = np.arange(len(self.spectrum))*float(self.df)
elif self.Subtype == 'CSD': # floatcomplex : cross spectrum
self.spectrum = np.frombuffer(stream_bin, dtype=np.float32)
real = self.spectrum[0::2]
real = real.reshape(self.M,self.N)
imag = self.spectrum[1::2]
imag = imag.reshape(self.M,self.N)
imag = 1j*imag
c = real+imag
#print c[0,:5]
# Cxy
# x:ChannelA
# y:ChannelB[0-]
self.csd = np.absolute(c)
self.deg = np.rad2deg(np.angle(c))
self.f = np.arange(len(self.csd[0]))*float(self.df)
elif self.Subtype == '???': # float : coherence?
self.spectrum = np.frombuffer(stream_bin, dtype=np.float32)
self.f = np.arange(len(self.spectrum))*float(self.df)
#print len(self.spectrum),len(self.f)
class DttXMLTransferFunction():
def __init__(self,child):
self.Name = child.attrib["Name"]
self._getAttribute(child)
self._getStream(child)
def _getAttribute(self,child):
#self.dt = child.find("./Param[@Name='dt']").text
#self.t0 = child.find("./Time[@Type='GPS']").text
self.BW = child.find("./Param[@Name='BW']").text
self.f0 = child.find("./Param[@Name='f0']").text
self.df = child.find("./Param[@Name='df']").text
self.N = int(child.find("./Param[@Name='N']").text)
self.Window = child.find("./Param[@Name='Window']").text
self.AveType = child.find("./Param[@Name='AverageType']").text
self.Averages = child.find("./Param[@Name='Averages']").text
self.Flag = child.find("./Param[@Name='Flag']").text
self.Subtype = SubType[child.find("./Param[@Name='Subtype']").text]
self.M = int(child.find("./Param[@Name='M']").text)
self.dim = child.find('./Array/Dim').text
channel = child.findall("./Param[@Unit='channel']")
self.Channel = list(map(lambda x:{x.attrib['Name']:x.text},channel))
Channel = OrderedDict(self.Channel[0])
for c in self.Channel:
Channel.update(OrderedDict(c))
self.Channel = Channel
def showInfo(self):
fmt = 'dt [s]\t:{dt:2.10f}\n'+\
't0(GPS)\t:{t0:10.1f}\n'+\
'BW [Hz]\t:{bw:2.10f} \n'+\
'f0 [Hz]\t:{f0:2.10f} \n'+\
'df [Hz]\t:{df:2.10f} \n'+\
'average\t:{average:12d} \n'+\
'Points\t:{n:12d} \n'+\
'window\t:{window:12s} \n'+\
'type\t:{aveType:12s}\n'+\
'flag\t:{flag:12s}'
text = fmt.format(dt=float(self.dt),
t0=float(self.t0),
bw=float(self.BW),
f0=float(self.f0),
df=float(self.df),
n=int(self.N),
window=self.Window,
aveType=self.AveType,
average=int(self.Averages),
flag=self.Flag
)
print(text)
def _getStream(self,child):
stream_str = child.find('./Array/Stream').text
stream_bin = binascii.a2b_base64(stream_str)
#print(stream_bin)
#print(self.Subtype)
#
if self.Subtype == 'ASD': # float : asd
self.spectrum = np.frombuffer(stream_bin, dtype=np.float32)
#self.f = np.arange(len(self.spectrum))*float(self.df)
elif self.Subtype == 'CSD': # floatcomplex : cross spectrum
self.spectrum = np.frombuffer(stream_bin, dtype=np.float32)
real = self.spectrum[0::2]
real = real.reshape(self.M,self.N)
imag = self.spectrum[1::2]
imag = imag.reshape(self.M,self.N)
imag = 1j*imag
c = real+imag
#print c[0,:5]
# Cxy
# x:ChannelA
# y:ChannelB[0-]
self.csd = np.absolute(c)
self.deg = np.rad2deg(np.angle(c))
#self.f = np.arange(len(self.csd[0]))*float(self.df)
elif self.Subtype == 'TF':
self.spectrum = np.frombuffer(stream_bin, dtype=np.float32)
real = self.spectrum[0::2]
real = real.reshape(self.M+1,self.N)
imag = self.spectrum[1::2]
imag = imag.reshape(self.M+1,self.N)
imag = 1j*imag
c = real+imag
self.mag = np.absolute(c)
self.deg = np.rad2deg(np.angle(c))
#print('mag',self.mag)
#print('deg',self.deg)
#self.f = np.arange(len(self.mag[0]))*float(self.df)
#print('###',self.f,self.df)
#exit()
elif self.Subtype == 'COH':
self.spectrum = np.frombuffer(stream_bin, dtype=np.float32)
self.spectrum = self.spectrum.reshape(self.M+1,self.N)
self.mag = self.spectrum
else:
raise ValueError('!')
class DttXMLTestParameter():
def __init__(self,child):
self.Name = child.attrib["Name"]
self._getAttribute(child)
def _getAttribute(self,child):
#self.dt = child.find("./Param[@Name='dt']").text
#self.t0 = child.find("./Time[@Type='GPS']").text
self.sp = child.find("./Param[@Name='SweepPoints']").text
#print(self.sp)
self.sp = list(map(float,self.sp.split()))[0::2]
class DttData():
def __init__(self,xmlname):
'''
'''
tree = ElementTree.parse(xmlname)
root = tree.getroot()
self.spect = [DttXMLSpectrum(child) for child in \
root.findall("./LIGO_LW[@Type='Spectrum']")]
if not self.spect:
self.tfmode = True
self.spect = [DttXMLTransferFunction(child) for child in \
root.findall("./LIGO_LW[@Type='TransferFunction']")]
huge = root.findall("./LIGO_LW[@Type='TestParameter']")
hoge = DttXMLTestParameter(huge[0])
self.f = hoge.sp
def getAllSpectrumName(self):
'''
'''
for s in self.spect:
print(s.Name,s.Subtype,s.Channel['ChannelA'])
def getASDInfo(self,chname,ref=False):
'''
'''
asd = filter(lambda x:x.Subtype=="ASD", self.spect)
asd = filter(lambda x:x.Channel['ChannelA']==chname, asd)
asd = list(asd)
if len(asd)==1:
asd = asd[0]
else:
raise ValueError('Error!')
asd.showInfo()
def getASD(self,chname,ref=False):
'''
'''
asdlist = filter(lambda x:x.Subtype=="ASD", self.spect)
asdlist = filter(lambda x:x.Channel['ChannelA']==chname, asdlist)
asdlist = list(asdlist)
if len(asdlist)==0:
raise ValueError('No ASD with : {0}'.format(chname))
for asd in asdlist:
print(asd.Name,asd.Subtype)
if ref==False:
if 'Result' in asd.Name:
return asd.f,asd.spectrum
else:
raise ValueError('No name')
elif ref==True:
if 'Reference' in asd.Name:
return asd.f,asd.spectrum
else:
raise ValueError('No reference')
else:
print('!')
return None
print('!')
def getResultNum(self,chname,ref=False):
'''
'''
asd = list(filter(lambda x:x.Subtype=="ASD", self.spect))
asd = list(filter(lambda x:x.Channel['ChannelA']==chname, asd))
num = asd[0].Name
return int(num.split('[')[1][0])
def getCSD(self,chnameA,chnameB,ref=False,**kwargs):
'''
'''
import re
csd = list(filter(lambda x:x.Subtype=="CSD", self.spect))
csd = list(filter(lambda x:x.Channel['ChannelA']==chnameA, csd))
if not ref:
csd = list(filter(lambda x: 'Reference' not in x.Name , csd))
numA = self.getResultNum(chnameA,**kwargs)
for c in csd[0].Channel.keys():
if csd[0].Channel[c] == chnameB:
num = int(c[:-1].split('[')[1])
if num >= numA:
num = num -1
elif num < numA:
num = num
#print numA,num,csd[0].Channel[c]
return csd[0].f,csd[0].csd[num],csd[0].deg[num]
def getCoherence(self,chnameA,chnameB,ref=False):
'''
'''
if not self.tfmode:
freq = None
freq,CSD_AB,deg = self.getCSD(chnameA,chnameB)
freq,ASD_A = self.getASD(chnameA)
freq,ASD_B = self.getASD(chnameB)
mag = (CSD_AB/(ASD_A*ASD_B))**2
else:
import re
csd = list(filter(lambda x:x.Subtype=="COH", self.spect))
csd = list(filter(lambda x:x.Channel['ChannelA']==chnameA, csd))
if not ref:
csd = list(filter(lambda x: 'Reference' not in x.Name , csd))
else:
csd = list(filter(lambda x: 'Reference' in x.Name , csd))
if len(csd)==1:
csd = csd[0]
else:
raise ValueError('!')
chnames = list(csd.Channel.values())
label = list(csd.Channel.keys())
print(chnameA,chnames)
num = chnames.index(chnameB)
if ref:
freq = csd.mag[0]
else:
freq = self.f
mag = csd.mag[num]
return freq,mag
def getTF(self,chnameA,chnameB,ref=False,db=True):
'''
'''
if not self.tfmode:
f = None
f,CSD_AB,deg = self.getCSD(chnameA,chnameB)
f,ASD_A = self.getASD(chnameA)
f,ASD_B = self.getASD(chnameB)
mag = CSD_AB/(ASD_B*ASD_B)
return f,mag,deg
else:
import re
csd = list(filter(lambda x:x.Subtype=="TF", self.spect))
csd = list(filter(lambda x:x.Channel['ChannelA']==chnameA, csd))
if not ref:
csd = list(filter(lambda x: 'Reference' not in x.Name , csd))
else:
csd = list(filter(lambda x: 'Reference' in x.Name , csd))
if len(csd)==1:
csd = csd[0]
else:
raise ValueError('!')
chnames = list(csd.Channel.values())
label = list(csd.Channel.keys())
print(chnameA,chnames)
num = chnames.index(chnameB)
if ref:
freq = csd.mag[0]
else:
freq = self.f
mag = csd.mag[num]
deg = csd.deg[num]
if db:
mag = 20*np.log10(mag)
return freq,mag,deg
| 38.438503
| 81
| 0.483097
| 13,866
| 0.964524
| 0
| 0
| 0
| 0
| 0
| 0
| 2,710
| 0.188509
|
f56710ff85a90ed722496b29dbe8a6afdffc8f9d
| 2,291
|
py
|
Python
|
neural_structured_learning/tools/graph_builder.py
|
eustomaqua/neural-structured-learning
|
e63a9e7ef435caaf6d70c04b6529e830bf47239d
|
[
"Apache-2.0"
] | null | null | null |
neural_structured_learning/tools/graph_builder.py
|
eustomaqua/neural-structured-learning
|
e63a9e7ef435caaf6d70c04b6529e830bf47239d
|
[
"Apache-2.0"
] | null | null | null |
neural_structured_learning/tools/graph_builder.py
|
eustomaqua/neural-structured-learning
|
e63a9e7ef435caaf6d70c04b6529e830bf47239d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Program to build a graph based on dense input features (embeddings).
This is a wrapper around the `nsl.tools.build_graph` API. See its documentation
for more details.
USAGE:
`python graph_builder.py` [*flags*] *input_features.tfr... output_graph.tsv*
For details about this program's flags, run `python graph_builder.py --help`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from neural_structured_learning.tools import graph_builder_lib
import tensorflow as tf
def _main(argv):
"""Main function for running the graph_builder program."""
flag = flags.FLAGS
flag.showprefixforinfo = False
if len(argv) < 3:
raise app.UsageError(
'Invalid number of arguments; expected 2 or more, got %d' %
(len(argv) - 1))
graph_builder_lib.build_graph(argv[1:-1], argv[-1], flag.similarity_threshold,
flag.id_feature_name,
flag.embedding_feature_name)
if __name__ == '__main__':
flags.DEFINE_string(
'id_feature_name', 'id',
"""Name of the singleton bytes_list feature in each input Example
whose value is the Example's ID.""")
flags.DEFINE_string(
'embedding_feature_name', 'embedding',
"""Name of the float_list feature in each input Example
whose value is the Example's (dense) embedding.""")
flags.DEFINE_float(
'similarity_threshold', 0.8,
"""Lower bound on the cosine similarity required for an edge
to be created between two nodes.""")
# Ensure TF 2.0 behavior even if TF 1.X is installed.
tf.compat.v1.enable_v2_behavior()
app.run(_main)
| 34.19403
| 80
| 0.717154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,479
| 0.64557
|
f56a3c3291794639e68ab580cfe7cfde7175ba0c
| 11,672
|
py
|
Python
|
main/dataset.py
|
MarcSerraPeralta/rec-flows
|
d05c3eca944f2228cffa575698ee5b010e83f167
|
[
"MIT"
] | null | null | null |
main/dataset.py
|
MarcSerraPeralta/rec-flows
|
d05c3eca944f2228cffa575698ee5b010e83f167
|
[
"MIT"
] | null | null | null |
main/dataset.py
|
MarcSerraPeralta/rec-flows
|
d05c3eca944f2228cffa575698ee5b010e83f167
|
[
"MIT"
] | null | null | null |
import torch
from torch.utils import data
import sys
from sklearn.utils import shuffle
import numpy as np
import argparse
import matplotlib.pyplot as plt
class UserSet(data.Dataset):
def __init__(self, path, tsplit, idim=100, seed=0, Nsongs=180198, pc_split=0.1, tag2vector_path=""):
"""
path : str
path + fname of the user-playcounts list
the file has the index of the songs listened by each user
idim : int
maximum number of songs per user in items
>95% of users have listened less than 100 songs
tsplit : str
type of dataset: 'train', 'val', 'test'
loss : str
Name of the loss function used
seed : int
Seed used for the pcounts splitting
Nsongs : int
Number of different songs
pc_split : float
Percentage of the val and test set
(pc_split=1 corresponds to 100%)
"""
# LOAD DATA
self.path = path
self.pcounts = torch.load(self.path) #list
self.tsplit = tsplit
self.pc_split = pc_split
self.idim = idim
self.len = len(self.pcounts)
self.index1 = int(self.len*(1 - 2*pc_split))
self.index2 = int(self.len*(1 - pc_split))
self.seed = seed
self.Nsongs = Nsongs
# SPLIT DATASET
if self.tsplit == "train":
self.pcounts = shuffle(self.pcounts, random_state=self.seed)[:self.index1]
self.len = len(self.pcounts)
elif self.tsplit == "val":
self.pcounts = shuffle(self.pcounts, random_state=self.seed)[self.index1:self.index2]
self.len = len(self.pcounts)
elif self.tsplit == "test":
self.pcounts = shuffle(self.pcounts, random_state=self.seed)[self.index2:]
self.len = len(self.pcounts)
else:
print("ERROR: split options = 'train', 'val', 'test'. \n", self.tsplit)
self.len = None
self.pcounts = None
sys.exit(0)
return
def __len__(self):
return self.len
def __getitem__(self, idx): #given an index of user, returns two vectors of the listenned songs
user = shuffle(self.pcounts[idx])
idx_inp = np.random.randint(1, min(len(user)-1, self.idim))
idx_out = np.random.randint(idx_inp + 1, min(len(user) + 1, idx_inp + self.idim))
#INP PER EMBEDDING (song ID and -1)
inp = -torch.ones(self.idim, dtype=torch.long)
inp[range(idx_inp)] = torch.LongTensor(user[:idx_inp])
#OUT (one-hot vector)
out = torch.zeros(self.Nsongs, dtype=torch.long)
out[user[idx_inp:idx_out]] = torch.ones(len(user[idx_inp:idx_out]), dtype=torch.long)
return inp, out
def get_tags(self, Nusers=0, Ntags=1):
return torch.randint(Ntags, (Nusers, 1)).squeeze(1)
class EmbSet(data.Dataset):
def __init__(self, path, tsplit, idim=100, seed=0, Nsongs=180198, pc_split=0.1):
"""
See UserSet
This dataset is for flows.
"""
self.path = path
self.pcounts = torch.load(self.path) #list
self.tsplit = tsplit
self.pc_split = pc_split
self.idim = idim
self.len = len(self.pcounts)
self.index1 = int(self.len*(1 - 2*pc_split))
self.index2 = int(self.len*(1 - pc_split))
self.seed = seed
self.Nsongs = Nsongs
# SPLIT DATASET
if self.tsplit == "train":
self.pcounts = shuffle(self.pcounts, random_state=self.seed)[:self.index1]
self.len = len(self.pcounts)
elif self.tsplit == "val":
self.pcounts = shuffle(self.pcounts, random_state=self.seed)[self.index1:self.index2]
self.len = len(self.pcounts)
elif self.tsplit == "test":
self.pcounts = shuffle(self.pcounts, random_state=self.seed)[self.index2:]
self.len = len(self.pcounts)
else:
print("ERROR: split options = 'train', 'val', 'test'. \n", self.tsplit)
self.len = None
self.pcounts = None
sys.exit(0)
return
def __len__(self):
return self.len
def __getitem__(self, idx): #given an index of user, returns two vectors of the listenned songs
user = shuffle(self.pcounts[idx])
idx_inp = np.random.randint(1, min(len(user)-1, self.idim))
idx_out = np.random.randint(idx_inp + 1, min(len(user) + 1, idx_inp + self.idim))
#INP
inp_idim = -torch.ones(self.idim, dtype=torch.long)
inp_idim[range(idx_inp)] = torch.LongTensor(user[:idx_inp])
inp_idx = torch.zeros(self.Nsongs, dtype=torch.long)
inp_idx[user[:idx_inp]] = torch.ones(len(user[:idx_inp]), dtype=torch.long)
#OUT
out_idim = -torch.ones(self.idim, dtype=torch.long)
out_idim[range(idx_out - idx_inp)] = torch.LongTensor(user[idx_inp:idx_out])
out_idx = torch.zeros(self.Nsongs, dtype=torch.long)
out_idx[user[idx_inp:idx_out]] = torch.ones(len(user[idx_inp:idx_out]), dtype=torch.long)
return inp_idim, inp_idx, out_idim, out_idx
class PostSet(data.Dataset):
"""
Loads dataset for predict created by get_PostSet().
"""
def __init__(self, calculate=False, metadata_path="results/metadata", metadata_name="opt_tags", bias_top=1, bias_normal=1):
if calculate:
get_TestSetPredict()
self.data = torch.load(metadata_path + "/postset_{}_t{}_n{}".format(metadata_name, bias_top, bias_normal))
self.len = len(self.data)
self.path = metadata_path + "/postset_{}_t{}_n{}".format(metadata_name, bias_top, bias_normal)
return
def __len__(self):
return self.len
def __getitem__(self, idx):
return self.data[idx]
def get_PostSet(pcounts_name = "opt_pcounts", pcounts_path = "results/metadata",
pc_split=0.1, seed = 0,
metadata_name = "opt_tags", metadata_path = "results/metadata",
bias_top=1, bias_normal=1):
"""
ONLY VALID FOR METADATA THAT IS A LIST FOR EACH SONG
"""
# LOAD PCOUNTS AND METADATA
pcounts = torch.load(pcounts_path + "/" + pcounts_name) #list
index2 = int(len(pcounts)*(1 - pc_split))
pcounts = shuffle(pcounts, random_state=seed)[index2:] # Test partition
metadata, meta = torch.load(metadata_path + "/" + metadata_name)
Nclasses = len(meta)
meta2idx = {meta[i]:i for i in range(Nclasses)}
idx2meta = {i:meta[i] for i in range(Nclasses)}
# CHANGE METADATA
print("Metadata2num and opt_pcounts to dict...")
idx_metadata = {} # same as metadata but using the index of meta2idx
for i in range(len(metadata)):
if metadata[i] == -1:
idx_metadata[i] = -1
else:
idx_metadata[i] = [meta2idx[m] for m in metadata[i]]
dict_pcounts = {}
for i in range(len(pcounts)):
dict_pcounts[i] = pcounts[i]
# USER META COUNT
print("Before filtering users without metadata,", len(pcounts))
user2class_counts = {}
total = len(dict_pcounts)
for b, user in enumerate(list(dict_pcounts.keys())):
print(" {0:0.3f}% \r".format((b+1.)*100./total), end="")
class_counts = torch.zeros(Nclasses)
for song in dict_pcounts[user]:
if idx_metadata[song] != -1:
class_counts[idx_metadata[song]] += 1
if (class_counts != 0).any():
user2class_counts[user] = class_counts.data.tolist()
else:
del dict_pcounts[user]
# GET TOP CLASS
print("After filtering users without metadata,", len(user2class_counts), len(dict_pcounts))
user2topclass = {}
for user in user2class_counts.keys():
user2topclass[user] = idx2meta[torch.argmax(torch.tensor(user2class_counts[user])).data.tolist()]
# SPLIT INTO [SONGS, TOP CLASS SONGS, TOP TAG]
user2topsongs = {}
user2normalsongs = {}
total = len(dict_pcounts)
for b, user in enumerate(dict_pcounts.keys()):
print(" {0:0.3f}%\r".format((b+1.)/total*100), end="")
top = []
normal = []
Ntop = 0
for song in dict_pcounts[user]:
if metadata[song] != -1:
if (user2topclass[user] in metadata[song]) and Ntop<100:
top += [song]
Ntop += 1
else:
normal += [song]
else:
normal += [song]
user2topsongs[user] = top
user2normalsongs[user] = normal
# DELETE USERS (BIAS_TOP, BIAS_NORMAL)
predict_dataset = []
for b, user in enumerate(dict_pcounts.keys()):
print(" {0:0.3f}%\r".format((b+1.)/total*100), end="")
if len(user2topsongs[user]) >= bias_top and len(user2normalsongs[user]) >= bias_normal:
predict_dataset += [[user2normalsongs[user], user2topsongs[user], user2topclass[user]]]
print("# Users (after deleting top<{}, inp<{}): ".format(bias_top, bias_normal), len(predict_dataset))
torch.save(predict_dataset, metadata_path + "/postset_{}_t{}_n{}".format(metadata_name, bias_top, bias_normal))
return
def get_topclass2Ntopclass(bias_top=1, bias_normal=1, metadata_path="results/metadata", metadata_name="opt_tags"):
print("Calculating topclass2Ntopclass...")
PostSet = torch.load(metadata_path + "/postset_{}_t{}_n{}".format(metadata_name, bias_top, bias_normal))
topclass2Ntopclass = {}
for b, (inp, out, c) in enumerate(PostSet):
if c not in list(topclass2Ntopclass.keys()): topclass2Ntopclass[c] = 0
topclass2Ntopclass[c] += 1
torch.save(topclass2Ntopclass, metadata_path + "/topclass2Ntopclass_{}_t{}_n{}".format(metadata_name, bias_top, bias_normal))
return
def get_class2song(metadata_path="results/metadata", metadata_name="opt_tags"):
print("Calculating class2song...")
metadata, meta = torch.load(metadata_path + "/" + metadata_name)
class2song = {c:[] for c in meta}
total = len(metadata)
for i in range(total):
print(" {0:0.3f}%\r".format((i+1.)/total*100), end="")
if metadata[i] == -1: continue
for c in metadata[i]:
class2song[c] += [i]
torch.save(class2song, metadata_path + "/{}2song".format(metadata_name))
return
def get_class2vector(metadata_path="results/metadata", metadata_name="opt_tags", Nsongs=180198):
print("Calculating get_class2vector...")
class2song = torch.load(metadata_path + "/{}2song".format(metadata_name))
_, meta = torch.load(metadata_path + "/" + metadata_name) # for idx2meta
Nclasses = len(meta)
meta2idx = {meta[i]:i for i in range(Nclasses)}
idx2meta = {i:meta[i] for i in range(Nclasses)}
total = len(class2song)
class2vector = torch.zeros(total,Nsongs).long()
for i in range(total):
print(" {0:0.3f}%\r".format((i+1.)/total*100), end="")
class2vector[i][class2song[idx2meta[i]]] = 1
torch.save(class2vector, metadata_path + "/{}2vector".format(metadata_name))
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bias_top', type=int, default=1, help="Minimum number of songs in user_topsongs to be taken in care")
parser.add_argument('--bias_normal', type=int, default=1, help="Minimum number of songs in user_normalsongs to be taken in care")
parser.add_argument('--Nsongs', type=int, default=180198, help="Number of different songs")
parser.add_argument('--metadata_name', type=str, default="opt_tags", help="Name of the metadata to use")
parser.add_argument('--metadata_path', type=str, default="results/metadata", help="Path of the metadata to use")
parser.add_argument('--pcounts_name', type=str, default="opt_pcounts", help="Name of the pcounts to use")
parser.add_argument('--pcounts_path', type=str, default="results/metadata", help="Path of the pcounts to use")
parser.add_argument('--TODO', nargs='+', type=str, default=["all"], help="Things to calculate")
args = parser.parse_args()
if args.TODO == ["all"]: args.TODO = ["postset", "topclass2Ntopclass", "class2song", "class2vector"]
print("METADATA: {}\nBIAS TOP: {}\nBIAS NORMAL: {}\n".format(args.metadata_name, args.bias_top, args.bias_normal))
if "postset" in args.TODO:
get_PostSet(bias_normal=args.bias_normal, bias_top=args.bias_top, metadata_name=args.metadata_name, metadata_path=args.metadata_path, pcounts_name=args.pcounts_name, pcounts_path=args.pcounts_path)
if "topclass2Ntopclass" in args.TODO:
get_topclass2Ntopclass(bias_normal=args.bias_normal, bias_top=args.bias_top, metadata_name=args.metadata_name, metadata_path=args.metadata_path)
if "class2song" in args.TODO:
get_class2song(metadata_name=args.metadata_name, metadata_path=args.metadata_path)
if "class2vector" in args.TODO:
get_class2vector(metadata_name=args.metadata_name, metadata_path=args.metadata_path, Nsongs=args.Nsongs)
| 36.936709
| 199
| 0.706306
| 4,920
| 0.421522
| 0
| 0
| 0
| 0
| 0
| 0
| 2,692
| 0.230637
|
f56aef37015ae46f5772b8eb36d680a12e113fe7
| 892
|
py
|
Python
|
back/LocationParser.py
|
DimaYurchenko/postdata-hackathon-app
|
f688491b27db991946fd104102a7912c1b104ea4
|
[
"MIT"
] | null | null | null |
back/LocationParser.py
|
DimaYurchenko/postdata-hackathon-app
|
f688491b27db991946fd104102a7912c1b104ea4
|
[
"MIT"
] | null | null | null |
back/LocationParser.py
|
DimaYurchenko/postdata-hackathon-app
|
f688491b27db991946fd104102a7912c1b104ea4
|
[
"MIT"
] | null | null | null |
import json
from typing import List
from LocationObject import LocationObject
def parse(file_path: str) -> List[LocationObject]:
with open(file_path, "r") as file:
data = json.loads(file.read().replace("\n", ""))
locations: List[LocationObject] = []
for object in data:
city = object["City"]
code = object["PostalCode"]
street = object["Street"]
streetNum = str(object["StreetNumber"])
openTime = object["OpenTime"]
closeTime = object["CloseTime"]
location = LocationObject(city, code, street, streetNum, openTime, closeTime)
locations.append(location)
uniqueLocations = []
for i in locations:
if i not in uniqueLocations:
uniqueLocations.append(i)
return uniqueLocations
# add geocoding for each location
| 27.030303
| 89
| 0.602018
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 103
| 0.115471
|
f56b8e9802094da8814e591262fd9b96c9698428
| 736
|
py
|
Python
|
data/train/python/f56b8e9802094da8814e591262fd9b96c9698428manage.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/train/python/f56b8e9802094da8814e591262fd9b96c9698428manage.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/train/python/f56b8e9802094da8814e591262fd9b96c9698428manage.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
#!/usr/bin/env python3
#-*- codin:utf-8 -*-
'''
用django + celery + redis演示异步队列任务。
不过文章写的太简略了,文章没啥意思,水平到了可以直接看代码。
python manage.py migrate -- looks at the INSTALLED_APPS setting and creates any necessary database tables according to the database settings in your mysite/settings.py file and the database migrations shipped with the app
python manage.py runserver -- 启动
python manage.py startapp app_name -- 创建
python manage.py makemigrations app_name -- 预览
python manage.py sqlmigrate app_name 0001 -- 真干
'''
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "picha.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 33.454545
| 221
| 0.777174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 653
| 0.784856
|
f56b9c719e339cbfa0c390fd236dda0208636e27
| 7,786
|
py
|
Python
|
nfp/servicos/controles/controle_execucao.py
|
FranciscoACLima/Robo_NFP_Selenium
|
7702854f94355fee8d78a4c04fc134cf099db5f0
|
[
"MIT"
] | null | null | null |
nfp/servicos/controles/controle_execucao.py
|
FranciscoACLima/Robo_NFP_Selenium
|
7702854f94355fee8d78a4c04fc134cf099db5f0
|
[
"MIT"
] | 16
|
2020-09-05T16:03:40.000Z
|
2022-03-19T17:42:05.000Z
|
nfp/servicos/controles/controle_execucao.py
|
FranciscoACLima/Robo_NFP_Selenium
|
7702854f94355fee8d78a4c04fc134cf099db5f0
|
[
"MIT"
] | null | null | null |
import os
from datetime import datetime
from sqlalchemy.orm import sessionmaker
import nfp.servicos.model as tables
from nfp import CONEXAO
class ControleExecucao(object):
uri = ''
tarefa = None
tarefa_nova = False
engine = CONEXAO
def configurar_base_de_dados(self):
self.DBSession = sessionmaker(bind=self.engine)
if os.path.isfile(self.uri):
if not self.engine.dialect.has_table(self.engine, self.table_name):
print('Tabela {} ainda não existe. Criando tabela...'.format(self.table_name))
base = tables.Base
base.metadata.create_all(self.engine)
return
print('Base de dados ainda não existe. Criando...')
base = tables.Base
base.metadata.create_all(self.engine)
print('usando base de dados: ' + self.uri)
def get_tarefa(self, tarefa_id):
session = self.DBSession()
tarefa = tables.Tarefa
query = session.query(tarefa).filter(
tarefa.id == tarefa_id,
)
registro = query.first()
return registro
def atualizar_colunas_tabela(self):
colunas = self.localizar_colunas_faltantes()
if not colunas:
return
session = self.DBSession()
for coluna, tipo in colunas.items():
session.execute('ALTER TABLE %s ADD COLUMN %s %s' % (self.table_name, coluna, tipo))
session.commit()
def localizar_colunas_faltantes(self):
tabela = self.table_name
session = self.DBSession()
result = session.execute("SELECT name FROM PRAGMA_TABLE_INFO('%s')" % (tabela))
colunas_bd = set()
for coluna in result.fetchall():
colunas_bd.add(coluna[0])
mapper = self.model.__mapper__.columns
colunas = set()
colunas_dic = {}
for column in mapper:
colunas.add(column.name)
colunas_dic[column.name] = str(column.type)
diferencas = list(colunas - colunas_bd)
if diferencas:
retorno = {}
for diferenca in diferencas:
retorno[diferenca] = colunas_dic[diferenca]
return retorno
return None
def extrair_dados_tarefa(self, tarefa_id):
session = self.DBSession()
execucao = self.model
# busca uma tarefa iniciada
filtro = [execucao.tarefa_id == tarefa_id]
query = session.query(execucao).filter(
*filtro,
)
registros = query.all()
if not registros:
return None
colunas = [column.name for column in self.model.__mapper__.columns]
remover = ['id', 'tarefa_id', 'inicio', 'fim']
for item in remover:
try:
colunas.remove(item)
except Exception:
pass
linhas = [
[getattr(valor, column.name)
for column in self.model.__mapper__.columns
if not(column.name in remover)]
for valor in registros]
return [colunas] + linhas
def contador_processos_tarefa(self, tarefa_id):
session = self.DBSession()
execucao = self.model
query = session.query(execucao).filter(
execucao.tarefa_id == tarefa_id
)
registros = query.all()
executadas = [reg.fim for reg in registros if reg.fim is not None]
ex = len(executadas)
# ex += 1
tot = len(registros)
return ex, tot
def finalizar_tarefa(self):
session = self.DBSession()
tarefa = tables.Tarefa
robo = self.table_name
# busca a tarefa iniciada
query = session.query(tarefa).filter(
tarefa.inicio.isnot(None),
tarefa.fim.is_(None),
tarefa.robo == robo,
)
registro = query.first()
if not registro:
return None
# registra a finalização da tarefa
# registro = tarefa()
registro.robo = robo
registro.fim = datetime.now()
session.add(registro)
session.commit()
return registro
def limpar_tabela(self, tabela):
session = self.DBSession()
session.execute('''DELETE FROM {}'''.format(tabela))
session.commit()
def reativar_tarefa(self, tarefa_id):
session = self.DBSession()
tarefa = tables.Tarefa
query = session.query(tarefa).filter(
tarefa.id == tarefa_id,
)
registro = query.first()
registro.fim = None
session.commit()
return True
def selecionar_execucao(self, tarefa_id):
session = self.DBSession()
execucao = self.model
tarefa = tables.Tarefa
# busca uma tarefa iniciada
query = session.query(execucao).filter(
execucao.inicio.isnot(None),
execucao.fim.is_(None),
execucao.tarefa_id == tarefa_id
).join(tarefa).filter(tarefa.fim.is_(None))
registro = query.first()
if registro:
return registro
# busca a primeira tarefa livre
query = session.query(execucao).filter(
execucao.inicio.is_(None),
execucao.tarefa_id == tarefa_id
).join(tarefa).filter(tarefa.fim.is_(None))
registro = query.first()
# se não houver nenhuma livre, retorna vazio
if not registro:
return None
# registra a tarefa livre encontrada como iniciada
registro.inicio = datetime.now()
session.add(registro)
session.commit()
return registro
def selecionar_tarefa_ativa(self, criar_nova=False):
session = self.DBSession()
tarefa = tables.Tarefa
robo = self.table_name
# busca uma tarefa iniciada
query = session.query(tarefa).filter(
tarefa.inicio.isnot(None),
tarefa.fim.is_(None),
tarefa.robo == robo,
)
registro = query.first()
if registro:
self.tarefa_nova = False
return registro
# registra a entrada da tarefa marcando como iniciada
if criar_nova:
registro = tarefa()
registro.robo = robo
registro.inicio = datetime.now()
session.add(registro)
session.commit()
self.tarefa_nova = True
return registro
return None
def selecionar_ultima_tarefa_finalizada(self):
session = self.DBSession()
tarefa = tables.Tarefa
robo = self.table_name
# busca a ultima tarefa finalizada
query = session.query(tarefa).filter(
tarefa.inicio.isnot(None),
tarefa.fim.isnot(None),
tarefa.robo == robo,
).order_by(tarefa.fim.desc())
return query.first()
def __del__(self):
del self.DBSession
# ---------------- Funções de módulo ------
def selecionar_ultima_tarefa_remota_finalizada(tarefa_remota_id):
ctrl = ControleExecucao()
ctrl.table_name = 'tarefas'
ctrl.configurar_base_de_dados()
return ctrl.selecionar_ultima_tarefa_remota_finalizada(tarefa_remota_id)
def get_id_tarefa_remota(tarefa_id):
ctrl = ControleExecucao()
ctrl.table_name = 'tarefas'
ctrl.configurar_base_de_dados()
return ctrl.get_id_tarefa_remota(tarefa_id)
def get_tarefa(tarefa_id):
ctrl = ControleExecucao()
ctrl.table_name = 'tarefas'
ctrl.configurar_base_de_dados()
return ctrl.get_tarefa(tarefa_id)
def reativar_tarefa(tarefa_id):
ctrl = ControleExecucao()
ctrl.table_name = 'tarefas'
ctrl.configurar_base_de_dados()
return ctrl.reativar_tarefa(tarefa_id)
# ----------------------------------------
if __name__ == "__main__":
pass
| 32.041152
| 96
| 0.595171
| 6,755
| 0.866692
| 0
| 0
| 0
| 0
| 0
| 0
| 761
| 0.097639
|
f56c33ff6b67b94fb127b4ea54ac62ad0efe9506
| 2,458
|
py
|
Python
|
artifactory/repository/repository.py
|
VeritasOS/py-artifactory
|
a54bde5cf31f02a1f836bb013ac17a78859b3370
|
[
"Apache-2.0"
] | 6
|
2017-08-11T23:53:43.000Z
|
2019-10-25T17:34:11.000Z
|
artifactory/repository/repository.py
|
VeritasOS/py-artifactory
|
a54bde5cf31f02a1f836bb013ac17a78859b3370
|
[
"Apache-2.0"
] | 2
|
2019-05-07T01:36:08.000Z
|
2021-03-31T18:40:11.000Z
|
artifactory/repository/repository.py
|
VeritasOS/py-artifactory
|
a54bde5cf31f02a1f836bb013ac17a78859b3370
|
[
"Apache-2.0"
] | 2
|
2018-12-11T09:43:15.000Z
|
2019-10-25T18:19:05.000Z
|
# -*- coding: utf-8 -*-
"""
Artifactory repository endpoint
"""
__copyright__ = "Copyright (C) 2016 Veritas Technologies LLC. All rights reserved."
# project imports
from ..http import HTTP
from .repotype import RepositoryType
from .virtual import Virtual
from .local import Local
from .remote import Remote
# define all repo types
REPO_TYPE = {
"local": Local,
"remote": Remote,
"virtual": Virtual,
}
class Repository(HTTP):
endpoint = "repositories"
_required = [
("key", "key", ""),
("type", "type", ""),
("url", "url", ""),
]
_optional = [
("description", "description", ""),
]
def __init__(self, api):
self.api = api
super(Repository, self).__init__(self.api)
def virtual(self):
return Virtual(self.api)
def local(self):
return Local(self.api)
def remote(self):
return Remote(self.api)
def list(self, type=None):
"""
Repository types - (local|remote|virtual)
"""
if type:
endpoint = "{0}/?type={1}".format(self.endpoint, type)
return self.get(endpoint=endpoint, instance_class=Repository)
else:
return self.get(instance_class=Repository)
def fetch(self, name=""):
if not name:
name = getattr(self, "key", "")
if not name:
message = "Repository name is required"
self.log.error(message)
raise Exception(message)
endpoint = "{0}/{1}".format(self.endpoint, name)
return self.get(endpoint=endpoint, instance_class=RepositoryType)
def _get_instance(self, data, instance):
# TODO: this is disgusting hack, need to improve this in future
if not instance in [RepositoryType]:
return super(Repository, self)._get_instance(data, instance)
else:
self.log.debug("Instance RepositoryType found with type {0}".format(
data.get("rclass")))
repo_instance = REPO_TYPE.get(data.get("rclass"))
if not repo_instance:
message = "Repository type {0} not supported".format(
data.get("rclass"))
self.log.error(message)
raise Exception(message)
self.log.debug("Returning RepositoryType instance {0}".format(
repo_instance))
return repo_instance(self.api)
| 27.311111
| 83
| 0.579333
| 2,019
| 0.8214
| 0
| 0
| 0
| 0
| 0
| 0
| 606
| 0.246542
|
f56e6fbda99325c6509cd93be29f620a11819e74
| 2,887
|
py
|
Python
|
app.py
|
PrismaPhonic/PetFinder-Exercise
|
a4d2c6293873299f9d6632158bca837a830fac98
|
[
"MIT"
] | null | null | null |
app.py
|
PrismaPhonic/PetFinder-Exercise
|
a4d2c6293873299f9d6632158bca837a830fac98
|
[
"MIT"
] | null | null | null |
app.py
|
PrismaPhonic/PetFinder-Exercise
|
a4d2c6293873299f9d6632158bca837a830fac98
|
[
"MIT"
] | null | null | null |
"""Adoption application."""
from flask import Flask, request, redirect, render_template
from models import db, connect_db, Pets
from wtforms import StringField, IntegerField, TextAreaField, BooleanField
from wtforms.validators import DataRequired,InputRequired,AnyOf,URL, NumberRange
from flask_wtf import FlaskForm
from petfunctions import get_random_pet
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///adopt'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = True
connect_db(app)
db.create_all()
from flask_debugtoolbar import DebugToolbarExtension
app.config['SECRET_KEY'] ='SOSECRET'
debug=DebugToolbarExtension(app)
class AddPetForm(FlaskForm):
"""Form class for adding a pet"""
name = StringField('Pet Name')
#make this a dropdown (species)
species = StringField('Pet Species',validators=[InputRequired(),AnyOf(['dog','cat','porcupine','pickle'])])
photo_url = StringField('Pet Photo Url',validators=[InputRequired(),URL()])
age = IntegerField('Pet Age',validators=[InputRequired(), NumberRange(0, 30, "Age must be between 0 and 30")])
notes = TextAreaField('Notes')
class EditPetForm(FlaskForm):
""""Form class for editing pets"""
photo_url = StringField('Pet Photo Url',validators=[InputRequired(),URL()])
notes = TextAreaField('Notes')
available = BooleanField('Available')
@app.route('/')
def pet_list():
"""Display a homepage of pets we can adopt"""
pets = Pets.query.all()
pet_name,pet_age,pet_url = get_random_pet()
return render_template('index.html',pets=pets,pet_name=pet_name,pet_age=pet_age,pet_url=pet_url)
@app.route('/add', methods=['GET','POST'])
def add_pet_form():
"""Add pet to adoption database form"""
form = AddPetForm()
if form.validate_on_submit():
name = form.data['name']
species = form.data['species']
photo_url = form.data['photo_url']
age = form.data['age']
notes = form.data['notes']
pet = Pets(name=name,
species=species,
photo_url=photo_url,
age=age,
notes=notes,
)
db.session.add(pet)
db.session.commit()
return redirect('/')
else:
return render_template('add_pet_form.html',form=form)
@app.route('/<int:pet_id>', methods=['GET','POST'])
def pet_page(pet_id):
"""Display pet details and a form to edit pet"""
pet = Pets.query.get_or_404(pet_id)
form = EditPetForm(obj=pet)
if form.validate_on_submit():
pet.photo_url = form.data['photo_url']
pet.notes = form.data['notes']
pet.available = form.data['available']
db.session.commit()
return redirect(f'/{pet_id}')
else:
return render_template('pet_details.html',pet=pet, form=form)
| 29.459184
| 114
| 0.66505
| 710
| 0.24593
| 0
| 0
| 1,476
| 0.511257
| 0
| 0
| 697
| 0.241427
|
f56f1c7317138379cc46e4bc9738fe0615922706
| 17,810
|
py
|
Python
|
pyrolite/util/resampling.py
|
bomtuckle/pyrolite
|
c0af0ade14ff26b4e9fdd5a033b27e73df085c55
|
[
"BSD-3-Clause"
] | 69
|
2019-02-25T00:17:53.000Z
|
2022-03-31T17:26:48.000Z
|
pyrolite/util/resampling.py
|
bomtuckle/pyrolite
|
c0af0ade14ff26b4e9fdd5a033b27e73df085c55
|
[
"BSD-3-Clause"
] | 68
|
2018-07-20T09:01:01.000Z
|
2022-03-31T16:28:36.000Z
|
pyrolite/util/resampling.py
|
bomtuckle/pyrolite
|
c0af0ade14ff26b4e9fdd5a033b27e73df085c55
|
[
"BSD-3-Clause"
] | 24
|
2018-10-02T04:32:10.000Z
|
2021-11-10T08:24:17.000Z
|
"""
Utilities for (weighted) bootstrap resampling applied to geoscientific point-data.
"""
import numpy as np
import pandas as pd
from .meta import subkwargs
from .spatial import great_circle_distance, _get_sqare_grid_segment_indicies
from .log import Handle
logger = Handle(__name__)
try:
import sklearn
HAVE_SKLEARN = True
except ImportError:
msg = "scikit-learn not installed"
logger.warning(msg)
HAVE_SKLEARN = False
def _segmented_univariate_distance_matrix(
A, B, distance_metric, dtype="float32", segs=10
):
"""
A method to generate a point-to-point distance matrix in segments to be softer
on memory requirements yet retain precision (e.g. beyond a few thousand points).
Parameters
-----------
A, B : :class:`numpy.ndarray`
Numpy arrays with positions of points.
distance_metric
Callable function f(a, b) from which to derive a distance metric.
dtype : :class:`str` | :class:`numpy.dtype`
Data type to use for the matrix.
segs : :class:`int`
Number of segments to split the matrix into (note that this will effectively
squared - i.e. 10 -> 100 individual segments).
Returns
-------
dist : :class:`numpy.ndarray`
2D point-to-point distance matrix.
"""
max_size = np.max([a.shape[0] for a in [A, B]])
dist = np.zeros((max_size, max_size), dtype=dtype) # full matrix
# note that this could be parallelized; the calcuations are independent
for ix_s, ix_e, iy_s, iy_e in _get_sqare_grid_segment_indicies(max_size, segs):
dist[ix_s:ix_e, iy_s:iy_e] = distance_metric(
A[ix_s:ix_e][:, np.newaxis], B[iy_s:iy_e][np.newaxis, :],
)
return dist
def univariate_distance_matrix(a, b=None, distance_metric=None):
"""
Get a distance matrix for a single column or array of values (here used for ages).
Parameters
-----------
a, b : :class:`numpy.ndarray`
Points or arrays to calculate distance between. If only one array is
specified, a full distance matrix (i.e. calculate a point-to-point distance
for every combination of points) will be returned.
distance_metric
Callable function f(a, b) from which to derive a distance metric.
Returns
-------
:class:`numpy.ndarray`
2D distance matrix.
"""
if distance_metric is None:
distance_metric = lambda a, b: np.abs(a - b)
a = np.atleast_1d(np.array(a).astype(np.float))
full_matrix = False
if b is not None:
# a second set of points is specified; the return result will be 1D
b = np.atleast_1d(np.array(b).astype(np.float))
else:
# generate a full point-to-point matrix for a single set of points
full_matrix = True
b = a.copy()
return _segmented_univariate_distance_matrix(a, b, distance_metric)
def get_spatiotemporal_resampling_weights(
df,
spatial_norm=1.8,
temporal_norm=38,
latlong_names=["Latitude", "Longitude"],
age_name="Age",
max_memory_fraction=0.25,
normalized_weights=True,
**kwargs
):
"""
Takes a dataframe with lat, long and age and returns a sampling weight for each
sample which is essentailly the inverse of the mean distance to other samples.
Parameters
-----------
df : :class:`pandas.DataFrame`
Dataframe to calculate weights for.
spatial_norm : :class:`float`
Normalising constant for spatial measures (1.8 arc degrees).
temporal_norm : :class:`float`
Normalising constant for temporal measures (38 Mya).
latlong_names : :class:`list`
List of column names referring to latitude and longitude.
age_name : :class:`str`
Column name corresponding to geological age or time.
max_memory_fraction : :class:`float`
Constraint to switch to calculating mean distances where :code:`matrix=True`
and the distance matrix requires greater than a specified fraction of total
avaialbe physical memory. This is passed on to
:func:`~pyrolite.util.spatial.great_circle_distance`.
normalized_weights : :class:`bool`
Whether to renormalise weights to unity.
Returns
--------
weights : :class:`numpy.ndarray`
Sampling weights.
Notes
------
This function is equivalent to Eq(1) from Keller and Schone:
.. math::
W_i \\propto 1 \\Big / \\sum_{j=1}^{n} \\Big ( \\frac{1}{((z_i - z_j)/a)^2 + 1} + \\frac{1}{((t_i - t_j)/b)^2 + 1} \\Big )
"""
weights = pd.Series(index=df.index, dtype="float")
z = great_circle_distance(
df[[*latlong_names]],
absolute=False,
max_memory_fraction=max_memory_fraction,
**subkwargs(kwargs, great_circle_distance)
) # angular distances
_invnormdistances = np.zeros_like(z)
# where the distances are zero, these weights will go to inf
# instead we replace with the smallest non-zero distance/largest non-inf
# inverse weight
norm_inverse_distances = 1.0 / ((z / spatial_norm) ** 2 + 1)
norm_inverse_distances[~np.isfinite(norm_inverse_distances)] = 1
_invnormdistances += norm_inverse_distances
# ages - might want to split this out as optional for spatial resampling only?
t = univariate_distance_matrix(df[age_name])
norm_inverse_time = 1.0 / ((t / temporal_norm) ** 2 + 1)
norm_inverse_time[~np.isfinite(norm_inverse_time)] = 1
_invnormdistances += norm_inverse_time
weights = 1.0 / np.sum(_invnormdistances, axis=0)
if normalized_weights:
weights = weights / weights.sum()
return weights
def add_age_noise(
df,
min_sigma=50,
noise_level=1.0,
age_name="Age",
age_uncertainty_name="AgeUncertainty",
min_age_name="MinAge",
max_age_name="MaxAge",
):
"""
Add gaussian noise to a series of geological ages based on specified uncertainties
or age ranges.
Parameters
-----------
df : :class:`pandas.DataFrame`
Dataframe with age data within which to look up the age name and add noise.
min_sigma : :class:`float`
Minimum uncertainty to be considered for adding age noise.
noise_level : :class:`float`
Scaling of the noise added to the ages. By default the uncertaines are unscaled,
but where age uncertaines are specified and are the one standard deviation level
this can be used to expand the range of noise added (e.g. to 2SD).
age_name : :class:`str`
Column name for absolute ages.
age_uncertainty_name : :class:`str`
Name of the column specifiying absolute age uncertainties.
min_age_name : :class:`str`
Name of the column specifying minimum absolute ages (used where uncertainties
are otherwise unspecified).
max_age_name : :class:`str`
Name of the column specifying maximum absolute ages (used where uncertainties
are otherwise unspecified).
Returns
--------
df : :class:`pandas.DataFrame`
Dataframe with noise-modified ages.
Notes
------
This modifies the dataframe which is input - be aware of this if using outside
of the bootstrap resampling for which this was designed.
"""
# try and get age uncertainty
try:
age_uncertainty = df[age_uncertainty_name]
except KeyError:
# otherwise get age min age max
# get age uncertainties
age_uncertainty = (
np.abs(df[max_age_name] - df[min_age_name]) / 2
) # half bin width
age_uncertainty[
~np.isfinite(age_uncertainty) | age_uncertainty < min_sigma
] = min_sigma
# generate gaussian age noise
age_noise = np.random.randn(df.index.size) * age_uncertainty.values
age_noise *= noise_level # scale the noise
# add noise to ages
df[age_name] += age_noise
return df
def spatiotemporal_bootstrap_resample(
df,
columns=None,
uncert=None,
weights=None,
niter=100,
categories=None,
transform=None,
boostrap_method="smooth",
add_gaussian_age_noise=True,
metrics=["mean", "var"],
default_uncertainty=0.02,
relative_uncertainties=True,
noise_level=1,
age_name="Age",
latlong_names=["Latitude", "Longitude"],
**kwargs
):
"""
Resample and aggregate metrics from a dataframe, optionally aggregating by a given
set of categories. Formulated specifically for dealing with resampling to address
uneven sampling density in space and particularly geological time.
Parameters
-----------
df : :class:`pandas.DataFrame`
Dataframe to resample.
columns : :class:`list`
Columns to provide bootstrap resampled estimates for.
uncert : :class:`float` | :class:`numpy.ndarray` | :class:`pandas.Series` | :class:`pandas.DataFrame`
Fractional uncertainties for the dataset.
weights : :class:`numpy.ndarray` | :class:`pandas.Series`
Array of weights for resampling, if precomputed.
niter : :class:`int`
Number of resampling iterations. This will be the minimum index size of the output
metric dataframes.
categories : :class:`list` | :class:`numpy.ndarray` | :class:`pandas.Series`
List of sample categories to group the ouputs by, which has the same size as the
dataframe index.
transform
Callable function to transform input data prior to aggregation functions. Note
that the outputs will need to be inverse-transformed.
boostrap_method : :class:`str`
Which method to use to add gaussian noise to the input dataset parameters.
add_gaussian_age_noise : :class:`bool`
Whether to add gassian noise to the input dataset ages, where present.
metrics : :class:`list`
List of metrics to use for dataframe aggregation.
default_uncertainty : :class:`float`
Default (fractional) uncertainty where uncertainties are not given.
relative_uncertainties : :class:`bool`
Whether uncertainties are relative (:code:`True`, i.e. fractional proportions
of parameter values), or absolute (:code:`False`)
noise_level : :class:`float`
Multiplier for the random gaussian noise added to the dataset and ages.
age_name : :class:`str`
Column name for geological age.
latlong_names : :class:`list`
Column names for latitude and longitude, or equvalent orthogonal spherical
spatial measures.
Returns
--------
:class:`dict`
Dictionary of aggregated Dataframe(s) indexed by statistical metrics. If
categories are specified, the dataframe(s) will have a hierarchical index of
:code:`categories, iteration`.
"""
# uncertainty managment ############################################################
uncertainty_type = None
if uncert is not None:
if isinstance(uncert, float):
uncertainty_type = "0D" # e.g. 2%
elif isinstance(uncert, (list, pd.Series)) or (
isinstance(uncert, np.ndarray) and np.array(uncert).ndim < 2
):
uncertainty_type = "1D" # e.g. [0.5%, 1%, 2%]
# shape should be equal to parameter column number
elif isinstance(uncert, (pd.DataFrame)) or (
isinstance(uncert, np.ndarray) and np.array(uncert).ndim >= 2
):
uncertainty_type = "2D" # e.g. [[0.5%, 1%, 2%], [1.5%, 0.6%, 1.7%]]
# shape should be equal to parameter column number by rows
else:
raise NotImplementedError("Unknown format for uncertainties.")
# weighting ########################################################################
# generate some weights for resampling - here addressing specifically spatial
# and temporal resampling
if weights is None:
weights = get_spatiotemporal_resampling_weights(
df,
age_name=age_name,
latlong_names=latlong_names,
**subkwargs(kwargs, get_spatiotemporal_resampling_weights)
)
# to efficiently manage categories we can make sure we have an iterable here
if categories is not None:
if isinstance(categories, (list, tuple, pd.Series, np.ndarray)):
pass
elif isinstance(categories, str) and categories in df.columns:
categories = df[categories]
else:
msg = "Categories unrecognized"
raise NotImplementedError(msg)
# column selection #################################################################
# get the subset of parameters to be resampled, removing spatial and age names
# and only taking numeric data
subset = columns or [
c
for c in df.columns
if c not in [[i for i in df.columns if age_name in i], *latlong_names]
and np.issubdtype(df.dtypes[c], np.number)
]
# resampling #######################################################################
def _metric_name(metric):
return repr(metric).replace("'", "")
metric_data = {_metric_name(metric): [] for metric in metrics}
# samples are independent, so this could be processed in parallel
for repeat in range(niter):
# take a new sample with replacement equal in size to the original dataframe
smpl = df.sample(weights=weights, frac=1, replace=True)
# whether to specfically add noise to the geological ages
# note that the metadata around age names are passed through to this function
# TODO: Update to have external disambiguation of ages/min-max ages,
# and just pass an age series to this function.
if add_gaussian_age_noise:
smpl = add_age_noise(
smpl,
min_sigma=50,
age_name=age_name,
noise_level=noise_level,
**subkwargs(kwargs, add_age_noise)
)
# transform the parameters to be estimated before adding parameter noise?
if transform is not None:
smpl[subset] = smpl[subset].apply(transform, axis="index")
# whether to add parameter noise, and if so which method to use?
# TODO: Update the naming of this? this is only one part of the bootstrap process
if boostrap_method is not None:
# try to get uncertainties for the data, otherwise use standard deviations?
if boostrap_method.lower() == "smooth":
# add random noise within uncertainty bounds
# this is essentially smoothing
# consider modulating the noise model using the covariance structure?
# this could be done by individual group to preserve varying covariances
# between groups?
if uncert is None:
noise = (
smpl[subset].values
* default_uncertainty
* np.random.randn(*smpl[subset].shape)
) * noise_level
else:
noise = np.random.randn(*smpl[subset].shape) * noise_level
if uncertainty_type in ["0D", "1D"]:
# this should work if a float or series is passed
noise *= uncert
else:
# need to get indexes of the sample to look up uncertainties
# need to extract indexes for the uncertainties, which might be arrays
arr_idxs = df.index.take(smpl.index).values
noise *= uncert[arr_idxs, :]
if relative_uncertainties:
noise *= smpl[subset].values
smpl[subset] += noise
elif (boostrap_method.upper() == "GP") or (
"process" in bootstrap_method.lower()
):
# gaussian process regression to adapt to covariance matrix
msg = "Gaussian Process boostrapping not yet implemented."
raise NotImplementedError(msg)
else:
msg = "Bootstrap method {} not recognised.".format(boostrap_method)
raise NotImplementedError(msg)
# whether to independently estimate metric values for individual categories?
# TODO: Should the categories argument be used to generate indiviudal
# bootstrap resampling processes?
if categories is not None:
for metric in metrics:
metric_data[_metric_name(metric)].append(
smpl[subset].groupby(categories).agg(metric)
)
else: # generate the metric summaries for the overall dataset
for metric in metrics:
metric_data[_metric_name(metric)].append(smpl[subset].agg(metric))
# where the whole dataset is presented
if categories is not None:
# the dataframe will be indexed by iteration of the bootstrap
return {
metric: pd.concat(data, keys=range(niter), names=["Iteration"])
.swaplevel(0, 1)
.sort_index()
for metric, data in metric_data.items()
}
else:
# the dataframe will be indexed by categories and iteration
# TODO: add iteration level to this index?
return {metric: pd.DataFrame(data) for metric, data in metric_data.items()}
| 40.022472
| 131
| 0.614935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10,230
| 0.574396
|
f56fadc40b0f5bac091cf8c15f4a134f11cb883f
| 49
|
py
|
Python
|
sfrmaker/test/test_nhdplus_utils.py
|
mnfienen/sfrmaker
|
f3ee175c67c80df15bff509235d9a6218bfc6b0b
|
[
"CC0-1.0"
] | 17
|
2015-08-15T02:20:04.000Z
|
2020-04-30T17:36:21.000Z
|
sfrmaker/test/test_nhdplus_utils.py
|
rfrederiksen/sfrmaker
|
7e66d67d6cb0ad84fbb9994402f0baaf5b3fcd01
|
[
"CC0-1.0"
] | 15
|
2015-03-04T16:57:13.000Z
|
2020-01-14T16:29:18.000Z
|
sfrmaker/test/test_nhdplus_utils.py
|
rfrederiksen/sfrmaker
|
7e66d67d6cb0ad84fbb9994402f0baaf5b3fcd01
|
[
"CC0-1.0"
] | 9
|
2015-08-18T14:15:07.000Z
|
2020-04-28T18:45:21.000Z
|
# TODO: add unit tests for test_nhdplus_utils.py
| 24.5
| 48
| 0.795918
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 48
| 0.979592
|
f570043bcd7ec43faf876327124a5a21c6d01798
| 1,809
|
py
|
Python
|
src/examples/stimuli-representation.py
|
cwardell97/learn-hippo-1
|
90280c614fb94aea82a60c2ed071db8068a37d5c
|
[
"MIT"
] | null | null | null |
src/examples/stimuli-representation.py
|
cwardell97/learn-hippo-1
|
90280c614fb94aea82a60c2ed071db8068a37d5c
|
[
"MIT"
] | null | null | null |
src/examples/stimuli-representation.py
|
cwardell97/learn-hippo-1
|
90280c614fb94aea82a60c2ed071db8068a37d5c
|
[
"MIT"
] | null | null | null |
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from task import SequenceLearning
sns.set(style='white', palette='colorblind', context='poster')
np.random.seed(0)
'''how to use'''
# init
n_param, n_branch = 16, 4
pad_len = 0
n_parts = 2
n_samples = 256
p_rm_ob_enc = 0
p_rm_ob_rcl = 0
n_rm_fixed = False
task = SequenceLearning(
n_param, n_branch, pad_len=pad_len,
p_rm_ob_enc=p_rm_ob_enc,
p_rm_ob_rcl=p_rm_ob_rcl,
n_rm_fixed=n_rm_fixed,
)
# take sample
X, Y = task.sample(n_samples, to_torch=False)
print(f'X shape = {np.shape(X)}, n_example x time x x-dim')
print(f'Y shape = {np.shape(Y)}, n_example x time x y-dim')
'''visualize the sample'''
# pick a sample
i = 0
x, y = X[i], Y[i]
cmap = 'bone'
x_split = np.split(x, (n_param, n_param + n_branch), axis=1)
mat_list = x_split + [y]
f, axes = plt.subplots(
2, 4, figsize=(14, 11), sharey=True,
gridspec_kw={
'width_ratios': [n_param, n_branch, n_param, n_branch],
'height_ratios': [n_param, n_param]
},
)
title_list = ['Observed feature', 'Observed value',
'Queried feature', 'Queried value']
ylabel_list = ['Part one', 'Part two']
for i, mat in enumerate(mat_list):
[mat_p1, mat_p2] = np.split(mat, [n_param], axis=0)
axes[0, i].imshow(mat[:n_param, :], cmap=cmap)
axes[1, i].imshow(mat[n_param:, :], cmap=cmap)
axes[0, i].set_title(title_list[i], fontname='Helvetica')
axes[0, i].set_xticks([])
for i in [1, 3]:
axes[1, i].set_xticks(range(n_branch))
axes[1, i].set_xticklabels(i for i in np.arange(4) + 1)
for i in range(2):
axes[i, 0].set_yticks(np.arange(0, n_param, 5))
axes[i, 0].set_ylabel(ylabel_list[i], fontname='Helvetica')
f.tight_layout()
f.savefig(f'examples/figs/stimulus-rep.png', dpi=100, bbox_inches='tight')
| 28.265625
| 74
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 391
| 0.216142
|
f570cddcfd6909cf11435d43423d70ccb8d64b16
| 133
|
py
|
Python
|
WEEKS/wk17/CodeSignal-Solutions/Core_007_-_lateRide.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | null | null | null |
WEEKS/wk17/CodeSignal-Solutions/Core_007_-_lateRide.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | null | null | null |
WEEKS/wk17/CodeSignal-Solutions/Core_007_-_lateRide.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | null | null | null |
def lateRide(n):
hours = n // 60
minutes = n % 60
return (hours // 10) + (hours % 10) + (minutes // 10) + (minutes % 10)
| 26.6
| 74
| 0.511278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f571719391b271f64aa33623e91452b85398b280
| 704
|
py
|
Python
|
eventbusk/exceptions.py
|
Airbase/eventbusk
|
704d50a4c9c1f7d332dba93ee04ab07afa59d216
|
[
"BSD-3-Clause"
] | null | null | null |
eventbusk/exceptions.py
|
Airbase/eventbusk
|
704d50a4c9c1f7d332dba93ee04ab07afa59d216
|
[
"BSD-3-Clause"
] | 1
|
2021-06-13T18:08:50.000Z
|
2021-06-13T18:08:50.000Z
|
eventbusk/exceptions.py
|
Airbase/eventbusk
|
704d50a4c9c1f7d332dba93ee04ab07afa59d216
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Custom exceptions
"""
from __future__ import annotations
__all__ = [
"AlreadyRegistered",
"ConsumerError",
"EventBusError",
"UnknownEvent",
]
class EventBusError(Exception):
"""
Base of exceptions raised by the bus.
"""
class UnknownEvent(EventBusError):
"""
Raised when an receiver is created for an event the bus does not recognize.
"""
class AlreadyRegistered(EventBusError):
"""
Raised when an event is registered more than once to the bus.
"""
class ProducerError(EventBusError):
"""
Raised during production of an event.
"""
class ConsumerError(EventBusError):
"""
Raised during consumption of an event
"""
| 16.761905
| 79
| 0.661932
| 526
| 0.747159
| 0
| 0
| 0
| 0
| 0
| 0
| 415
| 0.589489
|
f572b933b1b5aed70aca3d4ac6ade4a2e8fe1e58
| 9,580
|
py
|
Python
|
sparse_ct/example/dgr_example.py
|
mozanunal/SparseCT
|
97d7f06c0414f934c7fa36023adcf9fe4c071eaf
|
[
"MIT"
] | 11
|
2020-11-01T11:35:30.000Z
|
2022-03-30T02:19:52.000Z
|
sparse_ct/example/dgr_example.py
|
mozanunal/SparseCT
|
97d7f06c0414f934c7fa36023adcf9fe4c071eaf
|
[
"MIT"
] | 8
|
2020-12-13T12:17:38.000Z
|
2021-12-21T21:04:27.000Z
|
sparse_ct/example/dgr_example.py
|
mozanunal/SparseCT
|
97d7f06c0414f934c7fa36023adcf9fe4c071eaf
|
[
"MIT"
] | null | null | null |
from sparse_ct.tool import plot_grid
from sparse_ct.data import image_to_sparse_sinogram
from sparse_ct.reconstructor_2d import (
IRadonReconstructor,
SartReconstructor,
SartTVReconstructor,
DgrReconstructor,
SartBM3DReconstructor)
import logging
logging.basicConfig(
filename='dgr_example_32_35.log',
filemode='a',
format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.DEBUG
)
def test(fname, label, n_proj=32, noise_pow=25.0):
dgr_iter = 4000
lr = 0.01
net = 'skip'
noise_std = 1./100
gt, sinogram, theta, FOCUS = image_to_sparse_sinogram(fname,
channel=1, n_proj=n_proj, size=512,
angle1=0.0, angle2=180.0, noise_pow=noise_pow)
logging.warning('Starting')
logging.warning('fname: %s %s',label, fname)
logging.warning('n_proj: %s', n_proj)
logging.warning('noise_pow: %s', noise_pow)
logging.warning('dgr_n_iter: %s', dgr_iter)
logging.warning('dgr_lr: %s', lr)
logging.warning('dgr_net: %s', net)
logging.warning('dgr_noise_std: %s', noise_std)
recons = [
IRadonReconstructor('FBP'),
SartReconstructor('SART', sart_n_iter=40, sart_relaxation=0.15),
SartTVReconstructor('SART+TV',
sart_n_iter=40, sart_relaxation=0.15,
tv_weight=0.5, tv_n_iter=100),
SartBM3DReconstructor('SART+BM3D',
sart_n_iter=40, sart_relaxation=0.15,
bm3d_sigma=0.5),
DgrReconstructor('DIP_1.00_0.00_0.00_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=1.0,
w_perceptual_loss=0.0,
w_tv_loss=0.0
),
DgrReconstructor('DIP_0.99_0.01_0.00_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.99,
w_perceptual_loss=0.01,
w_tv_loss=0.0
),
DgrReconstructor('DIP_0.90_0.10_0.00_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.90,
w_perceptual_loss=0.10,
w_tv_loss=0.0
),
DgrReconstructor('DIP_0.50_0.50_0.00_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.5,
w_perceptual_loss=0.5,
w_tv_loss=0.0
),
DgrReconstructor('DIP_0.10_0.90_0.00_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.10,
w_perceptual_loss=0.90,
w_tv_loss=0.0
),
DgrReconstructor('DIP_0.01_0.99_0.00_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.01,
w_perceptual_loss=0.99,
w_tv_loss=0.0
),
DgrReconstructor('DIP_0.00_1.00_0.00_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.0,
w_perceptual_loss=1.0,
w_tv_loss=0.0
),
DgrReconstructor('DIP_0.99_0.00_0.01_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.99,
w_perceptual_loss=0.0,
w_tv_loss=0.01
),
DgrReconstructor('DIP_0.90_0.00_0.10_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.9,
w_perceptual_loss=0.0,
w_tv_loss=0.1
),
DgrReconstructor('DIP_0.50_0.00_0.50_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.5,
w_perceptual_loss=0.0,
w_tv_loss=0.5
),
DgrReconstructor('DIP_0.10_0.00_0.90_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.1,
w_perceptual_loss=0.0,
w_tv_loss=0.9
),
DgrReconstructor('DIP_0.01_0.00_0.99_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.01,
w_perceptual_loss=0.0,
w_tv_loss=0.99
),
DgrReconstructor('DIP_0.00_0.00_1.0_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.00,
w_perceptual_loss=0.0,
w_tv_loss=1.0
),
DgrReconstructor('DIP_0.33_0.33_0.33_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.33,
w_perceptual_loss=0.33,
w_tv_loss=0.33
),
DgrReconstructor('DIP_0.8_0.10_0.10_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.8,
w_perceptual_loss=0.1,
w_tv_loss=0.1
),
DgrReconstructor('DIP_0.98_0.01_0.01_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.98,
w_perceptual_loss=0.01,
w_tv_loss=0.01
),
DgrReconstructor('DIP_0.10_0.80_0.10_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.10,
w_perceptual_loss=0.80,
w_tv_loss=0.10
),
DgrReconstructor('DIP_0.01_0.98_0.01_0.00',
dip_n_iter=dgr_iter,
net=net,
lr=lr,
reg_std=noise_std,
w_proj_loss=0.01,
w_perceptual_loss=0.98,
w_tv_loss=0.01
),
]
img_sart_bm3d = recons[3].calc(sinogram, theta)
imgs = []
for recon in recons:
if type(recon) == DgrReconstructor:
recon.set_for_metric(gt, img_sart_bm3d, FOCUS=FOCUS, log_dir='../log/dip')
imgs.append(recon.calc(sinogram))
mse, psnr, ssim = recon.eval(gt)
recon.save_result()
logstr = "{}: MSE:{:.5f} PSNR:{:.5f} SSIM:{:.5f}".format(
recon.name, mse, psnr, ssim
)
logging.info(logstr)
plot_grid([gt] + imgs,
FOCUS=FOCUS, save_name=label+'.png', dpi=500)
logging.warning('Done. Results saved as %s', label+'.png')
if __name__ == "__main__":
#
test("../data/shepp_logan.jpg", "shepp_logan_32_35", n_proj=32, noise_pow=35.0)
test("../data/ct2.jpg", "ct2_32_35", n_proj=32, noise_pow=35.0)
test("../data/ct1.jpg", "ct1_32_35", n_proj=32, noise_pow=35.0)
test("../data/LoDoPaB/004013_02_01_119.png", "LoDoPaB1_32_35", n_proj=32, noise_pow=35.0)
test("../data/LoDoPaB/004017_01_01_151.png", "LoDoPaB2_32_35", n_proj=32, noise_pow=35.0)
test("../data/LoDoPaB/004028_01_04_109.png", "LoDoPaB3_32_35", n_proj=32, noise_pow=35.0)
test("../data/LoDoPaB/004043_01_01_169.png", "LoDoPaB4_32_35", n_proj=32, noise_pow=35.0)
test("../data/LoDoPaB/004049_04_01_062.png", "LoDoPaB5_32_35", n_proj=32, noise_pow=35.0)
| 38.167331
| 93
| 0.423173
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,138
| 0.118789
|
f5735cd931c6cb22e8fa362f8340421fcf372c3d
| 1,340
|
py
|
Python
|
backend/app/settings/globals.py
|
alldevic/base-fastapi-postgresql
|
7e3a2916910155cd83b10cd7fec42eba7b1d3a95
|
[
"MIT"
] | 3
|
2021-06-17T00:06:15.000Z
|
2022-01-26T03:53:51.000Z
|
backend/app/settings/globals.py
|
alldevic/base-fastapi-postgresql
|
7e3a2916910155cd83b10cd7fec42eba7b1d3a95
|
[
"MIT"
] | null | null | null |
backend/app/settings/globals.py
|
alldevic/base-fastapi-postgresql
|
7e3a2916910155cd83b10cd7fec42eba7b1d3a95
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from typing import Optional
from starlette.config import Config
from starlette.datastructures import CommaSeparatedStrings
from ..models.pydantic.database import DatabaseURL
p: Path = Path(__file__).parents[2] / ".env"
config: Config = Config(p if p.exists() else None)
DATABASE: str = config("POSTGRES_DB", cast=str)
DB_USER: Optional[str] = config("POSTGRES_USER", cast=str, default=None)
DB_PASSWORD: Optional[str] = config(
"POSTGRES_PASSWORD", cast=str, default=None
)
DB_HOST: str = config("DB_HOST", cast=str, default="postgres_db")
DB_PORT: int = config("DB_PORT", cast=int, default=5432)
DATABASE_CONFIG: DatabaseURL = DatabaseURL(
drivername="asyncpg",
username=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=DB_PORT,
database=DATABASE,
)
ALEMBIC_CONFIG: DatabaseURL = DatabaseURL(
drivername="postgresql+psycopg2",
username=DB_USER,
password=DB_PASSWORD,
host=DB_HOST,
port=DB_PORT,
database=DATABASE,
)
REDIS_IP: str = config("REDIS_IP", cast=str, default="redis")
REDIS_PORT: int = config("REDIS_PORT", cast=int, default=6379)
REDIS_PASSWORD: str = config("REDIS_PASSWORD", cast=str, default=None)
ARQ_BACKGROUND_FUNCTIONS: Optional[CommaSeparatedStrings] = config(
"ARQ_BACKGROUND_FUNCTIONS", cast=CommaSeparatedStrings, default=None
)
| 31.162791
| 72
| 0.750746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 185
| 0.13806
|
f5738865aace2f3446a95a35c7f51b460031ae67
| 1,607
|
py
|
Python
|
03. Advanced (Nested) Conditional Statements/P09 Fruit Shop #.py
|
KrisBestTech/Python-Basics
|
10bd961bf16d15ddb94bbea53327b4fc5bfdba4c
|
[
"MIT"
] | null | null | null |
03. Advanced (Nested) Conditional Statements/P09 Fruit Shop #.py
|
KrisBestTech/Python-Basics
|
10bd961bf16d15ddb94bbea53327b4fc5bfdba4c
|
[
"MIT"
] | null | null | null |
03. Advanced (Nested) Conditional Statements/P09 Fruit Shop #.py
|
KrisBestTech/Python-Basics
|
10bd961bf16d15ddb94bbea53327b4fc5bfdba4c
|
[
"MIT"
] | null | null | null |
fruit = str(input())
day_of_the_week = str(input())
quantity = float(input())
price = 0
if fruit == 'banana' or \
fruit == 'apple' or \
fruit == 'orange' or \
fruit == 'grapefruit' or \
fruit == 'kiwi' or \
fruit == 'pineapple' or \
fruit == 'grapes':
if day_of_the_week == 'Monday' or day_of_the_week == 'Tuesday' or \
day_of_the_week == 'Wednesday' or \
day_of_the_week == 'Thursday' or \
day_of_the_week == 'Friday':
if fruit == 'banana':
price = 2.50
elif fruit == 'apple':
price = 1.20
elif fruit == 'orange':
price = 0.85
elif fruit == 'grapefruit':
price = 1.45
elif fruit == 'kiwi':
price = 2.70
elif fruit == 'pineapple':
price = 5.50
elif fruit == 'grapes':
price = 3.85
total_price = quantity * price
print(f'{total_price:.2f}')
elif day_of_the_week == 'Saturday' or day_of_the_week == 'Sunday':
if fruit == 'banana':
price = 2.70
elif fruit == 'apple':
price = 1.25
elif fruit == 'orange':
price = 0.90
elif fruit == 'grapefruit':
price = 1.60
elif fruit == 'kiwi':
price = 3
elif fruit == 'pineapple':
price = 5.60
elif fruit == 'grapes':
price = 4.20
total_price = quantity * price
print(f'{total_price:.2f}')
else:
print('error')
else:
print('error')
| 21.716216
| 71
| 0.47542
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 298
| 0.185439
|
f573e98c3617ee161a5bc2f46171d1b7f2905fc3
| 1,368
|
py
|
Python
|
trajectories/tests/test_DTW.py
|
donsheehy/geomcps
|
b4ef5dbf0fed21927485b01580b724272f84d9ed
|
[
"MIT"
] | null | null | null |
trajectories/tests/test_DTW.py
|
donsheehy/geomcps
|
b4ef5dbf0fed21927485b01580b724272f84d9ed
|
[
"MIT"
] | null | null | null |
trajectories/tests/test_DTW.py
|
donsheehy/geomcps
|
b4ef5dbf0fed21927485b01580b724272f84d9ed
|
[
"MIT"
] | null | null | null |
import unittest
from trajectories.dynamic_time_warper import *
from trajectories.trajectory import Trajectory
from trajectories.point import Point
class TestDTW(unittest.TestCase):
def test_1D_DTW(self):
t1 = [1,2,2,10,2,1]
t2 = [3,3,5,5,2]
self.assertEqual(45, dtw(t1, t2, -1, metricI))
self.assertEqual(0, dtw(t1, t1, -1, metricI))
t1 = Trajectory([Point([1]),Point([2]),Point([2]),Point([10]),Point([2]),Point([1])])
t2 = Trajectory([Point([3]),Point([3]),Point([5]),Point([5]),Point([2])])
self.assertEqual(45, dtw(t1, t2, -1, metricD))
self.assertEqual(0, dtw(t1, t1, -1, metricD))
def test_DTWI(self):
p1 = Point([-7, -4])
p2 = Point([5, 6])
p3 = Point([3, 4])
p4 = Point([-3, 5])
t1 = Trajectory([p1, p2])
t2 = Trajectory([p3, p4])
self.assertEqual(45, dtwI(t1, t2))
t1 = Trajectory([p1, p2, p3, p4])
self.assertEqual(0, dtwI(t1, t1))
def test_ITWD(self):
p1 = Point([-7, -4])
p2 = Point([5, 6])
p3 = Point([3, 4])
p4 = Point([-3, 5])
t1 = Trajectory([p1, p2])
t2 = Trajectory([p3, p4])
self.assertEqual(45, dtwD(t1, t2))
t1 = Trajectory([p1, p2, p3, p4])
self.assertEqual(0, dtwD(t1, t1))
if __name__ == '__main__':
unittest.main()
| 33.365854
| 93
| 0.54386
| 1,171
| 0.855994
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.00731
|
f57687a33470d7205bc87af05ce7973af384b2a0
| 235
|
py
|
Python
|
1103.py
|
gabzin/uri
|
177bdf3f87bacfd924bd031a973b8db877379fe5
|
[
"MIT"
] | 3
|
2021-09-21T18:50:20.000Z
|
2021-12-14T13:07:31.000Z
|
1103.py
|
gabzin/uri
|
177bdf3f87bacfd924bd031a973b8db877379fe5
|
[
"MIT"
] | null | null | null |
1103.py
|
gabzin/uri
|
177bdf3f87bacfd924bd031a973b8db877379fe5
|
[
"MIT"
] | null | null | null |
while True:
h1,m1,h2,m2=map(int,input().split())
i=f=0
if m1+m2+h1+h2==0:break
if h1==0:i=(24*60)+m1
else:i=(h1*60)+m1
if h2==0:f=(24*60)+m2
else:f=(h2*60)+m2
print(f-i) if f>i else print((24*60)-(i-f))
| 23.5
| 47
| 0.52766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f578b94c9410c26b42768750abeeeeadcdf0cd8f
| 4,820
|
py
|
Python
|
portfolio_pj/portfolio_app/views.py
|
duynb92/portfolio_site_py
|
b69be83a11d9adafae664bf08e506273f541ff53
|
[
"MIT"
] | null | null | null |
portfolio_pj/portfolio_app/views.py
|
duynb92/portfolio_site_py
|
b69be83a11d9adafae664bf08e506273f541ff53
|
[
"MIT"
] | 4
|
2020-02-11T23:47:01.000Z
|
2021-06-10T21:12:36.000Z
|
portfolio_pj/portfolio_app/views.py
|
duynb92/portfolio_site_py
|
b69be83a11d9adafae664bf08e506273f541ff53
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Count
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponseRedirect
from .models import *
from .forms import *
import os
from django.http import HttpResponse
from portfolio_pj import settings
# Create your views here.
def index(req):
context = HomeContext("Home", Facade.getSkills(), Facade.getHobbies())
return render(req, 'index.html', {"context": context})
def profile(req):
profile_context = ProfileContext("Profile", Facade.getProfiles())
return render(req, 'profile.html', {"context": profile_context})
def portfolio(req):
portfolio_context = PortfolioContext("Portfolio", Facade.getProjects())
return render(req, 'portfolio-gird-3.html', {"context": portfolio_context})
def service(req):
service_context = ServiceContext("Services", Facade.getServices())
return render(req, 'services.html', {"context": service_context,})
def contact(req):
context = BaseContext("Contact")
return render(req, 'contact-3.html', {"context": context,})
def blog(req):
blogs = getBlogsWithPaging(req, Blog.objects.all().order_by('-pub_date'))
blog_context = BlogsContext("Blog", blogs, getRecentBlogs(), getCategories(), getTags(), getArchives())
return render(req, 'blog-list.html', {"context": blog_context})
def blogWithSlug(req, blog_year, blog_month, blog_day, blog_slug):
blog = Blog.objects.get(slug=blog_slug)
blog_context = BlogContext("Blog", blog, getRecentBlogs(), getCategories(), getTags(), getArchives())
# if this is a POST request we need to process the form data
if req.method == 'POST':
# create a form instance and populate it with data from the request:
form = CommentForm(req.POST)
# check whether it's valid:
if form.is_valid():
# process the data in form.cleaned_data as required
# save the data
comment = form.save(commit=False)
if (form.cleaned_data['parent'] is not None):
comment.parent_id = form.cleaned_data['parent'].id
comment.blog_id = blog.id
comment.save()
form = CommentForm()
return HttpResponseRedirect("/blog/%s/%s/%s/%s" % (blog_year,blog_month,blog_day,blog_slug))
# if a GET (or any other method) we'll create a blank form
else:
form = CommentForm()
return render(req, 'blog-details.html', {"context": blog_context, "form" : form})
def blogArchive(req, blog_year, blog_month):
blogs = getBlogsWithPaging(req, Blog.objects.filter(pub_date__year=blog_year, pub_date__month=blog_month))
blog_context = BlogsContext("Blog", blogs, getRecentBlogs(), getCategories(), getTags(), getArchives())
return render(req, 'blog-list.html', {"context": blog_context})
def blogWithTag(req, tag_slug):
blogs = getBlogsWithPaging(req,Tag.objects.get(slug=tag_slug).blog_set.all())
blog_context = BlogsContext("Blog", blogs, getRecentBlogs(), getCategories(), getTags(), getArchives())
return render(req, 'blog-list.html', {"context":blog_context})
def blogWithCategory(req, category_slug):
blogs = getBlogsWithPaging(req,Category.objects.get(slug=category_slug).blog_set.all())
blog_context = BlogsContext("Blog", blogs, getRecentBlogs(), getCategories(), getTags(), getArchives())
return render(req, 'blog-list.html', {"context":blog_context})
def cv(req):
file_path = os.path.join(settings.STATIC_ROOT, 'CV_NBD.pdf')
if os.path.exists(file_path):
with open(file_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type="application/pdf")
response['Content-Disposition'] = 'inline; filename=' + os.path.basename(file_path)
return response
else:
return render(req, 'eror-404.html')
# Private Methods
def getCategories():
return Category.objects.annotate(blog_count=Count('blog')).filter(blog_count__gt=0).order_by('-blog_count')
def getTags():
return Tag.objects.annotate(blog_count=Count('blog')).filter(blog_count__gt=0).order_by('-blog_count')
def getRecentBlogs():
return Blog.objects.all().order_by('-pub_date')[:7]
def getArchives():
return Blog.getArchives()
def getBlogsWithPaging(req, blog_list):
max_paging = 5
page_no = req.GET.get('page')
blogs_paginator = Paginator(blog_list, max_paging)
try:
blogs = blogs_paginator.page(page_no)
except PageNotAnInteger:
blogs = blogs_paginator.page(1)
except EmptyPage:
blogs = blogs_paginator.page(blogs_paginator.num_pages)
return blogs
| 41.551724
| 111
| 0.700415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 873
| 0.18112
|
f5792851b55e8b741f344366679574e04969bc93
| 1,022
|
py
|
Python
|
backend/repositories/bookmark_repository.py
|
heshikirihasebe/fastapi-instagram-clone
|
7bc265a62160171c5c5c1b2f18b3c86833cb64e7
|
[
"MIT"
] | 1
|
2022-02-08T19:35:22.000Z
|
2022-02-08T19:35:22.000Z
|
backend/repositories/bookmark_repository.py
|
heshikirihasebe/fastapi-instagram-clone
|
7bc265a62160171c5c5c1b2f18b3c86833cb64e7
|
[
"MIT"
] | null | null | null |
backend/repositories/bookmark_repository.py
|
heshikirihasebe/fastapi-instagram-clone
|
7bc265a62160171c5c5c1b2f18b3c86833cb64e7
|
[
"MIT"
] | null | null | null |
import datetime
from ..databases.postgresql import session
from ..models.bookmark_model import Bookmark
# Select one
async def select_one(user_id: int, post_id: int):
bookmark = session.query(Bookmark).filter(Bookmark.user_id == user_id, Bookmark.post_id == post_id).first()
return bookmark
# Insert
async def insert(user_id: int, post_id: int):
bookmark = Bookmark(
user_id=user_id,
post_id=post_id,
)
session.add(bookmark)
session.commit()
session.close()
# Update
async def update(user_id: int, post_id: int, deleted_at: str):
bookmark = session.query(Bookmark).filter(Bookmark.user_id == user_id, Bookmark.post_id == post_id).first()
bookmark.updated_at = datetime.datetime.now()
bookmark.deleted_at = deleted_at
session.commit()
session.close()
# Count by post id
async def countByPostId(post_id: int):
num_bookmarks = session.query(Bookmark).filter(Bookmark.post_id == post_id, Bookmark.deleted_at == None).count()
return num_bookmarks
| 31.9375
| 116
| 0.720157
| 0
| 0
| 0
| 0
| 0
| 0
| 856
| 0.837573
| 46
| 0.04501
|
f57b07d03e45e8f7fc9d99adb6fc72590a4d7edd
| 3,326
|
py
|
Python
|
D3_cgi/support/uman.py
|
slzjw26/learn_Pthon
|
9c4053ec1ea4c32a01fa2658499d8e53a4a532f3
|
[
"MIT"
] | null | null | null |
D3_cgi/support/uman.py
|
slzjw26/learn_Pthon
|
9c4053ec1ea4c32a01fa2658499d8e53a4a532f3
|
[
"MIT"
] | null | null | null |
D3_cgi/support/uman.py
|
slzjw26/learn_Pthon
|
9c4053ec1ea4c32a01fa2658499d8e53a4a532f3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
#
# User management application
#
"""
六、用python写一个cgi程序,功能如下:
1. 查询用户 (get)
2. 创建用户 (post)
3. 修改用户 (post)
4. 删除用户 (post)
要点:
1. 通过变量 REQUEST_METHOD 来判断是get还是post
2. 通过变量 QUERY_STRING 来判断是创建还是修改还是删除
3. 通过subprocess.getoutput, 或者os.system 来运行shell命令
4. 相关命令如下:
查用户:grep ^root /etc/passwd
加用户:useradd user-name
改用户:usermod user-name
删用户:userdel user-name
"""
import os
import sys
import subprocess as sub
def response(headers, body):
for h in headers:
print(h)
print()
for b in body:
sys.stdout.write(b)
def get_user_info(params_str, headers):
if params_str:
params = dict(p.split('=') for p in params_str.split('&'))
else:
params = {}
name = params.get('name')
if not name:
headers.append('Status: 400 BAD_REQUEST')
return response(headers, ['name is required'])
info = read_user_info(name)
if not info:
headers.append('Status: 200 OK')
return response(headers, ['name %s not exists' % name])
body = []
body.append('name: %s\n' % info['name'])
body.append('uid: %s\n' % info['uid'])
body.append('gid: %s\n' % info['gid'])
body.append('comment: %s\n' % info['comment'])
body.append('home: %s\n' % info['home'])
body.append('shell: %s\n' % info['shell'])
return response(headers, body)
def read_user_info(name):
"""从系统的用户数据库 /etc/passwd 中读取指定用户的基本信息,返回字典"""
db = '/etc/passwd'
info = [line.split(':') for line in open(db).read().splitlines()]
user_info = [i for i in info if i[0] == name]
if not user_info: # 找不到用户
return
user_info = user_info[0]
colnames = ('name', 'password', 'uid', 'gid', 'comment', 'home', 'shell')
return dict(zip(colnames, user_info))
def alter_user(headers):
data = sys.stdin.read().strip()
if data:
params = dict(p.split('=') for p in data.split('&'))
else:
headers.append('Status: 400 BAD_REQUEST')
return response(headers, ['invalid parameters'])
kind = params['kind'] # add? delete? modify?
if kind == 'add':
cmd = ['useradd', params['name']]
elif kind == 'delete':
cmd = ['userdel', '-r', params['name']]
elif kind == 'mod':
# 目前只支持修改用户的comment字段,后续可以扩展
name = params['name']
comment = params['comment']
cmd = ['usermod', '-c', comment, name]
else:
headers.append('Status: 400 BAD_REQUEST')
return response(headers, ['operation %s not supported' % kind])
# 运行外部的用户管理命令
# 临时修改,用sudo 执行命令
cmd.insert(0, 'sudo')
cmd = ' '.join(cmd)
code, out = sub.getstatusoutput(cmd)
if code == 0:
headers.append('Status: 200 OK')
return response(headers, ['operation success'])
else:
headers.append('Status: 200 OK')
return response(headers, ['failed: %s' % out])
if __name__ == '__main__':
headers = []
headers.append('Content-Type: text/plain')
if os.getenv('REQUEST_METHOD') == 'GET':
params = os.getenv('QUERY_STRING', '')
get_user_info(params, headers)
elif os.getenv('REQUEST_METHOD') == 'POST':
alter_user(headers)
else:
headers.append('Status: 405 METHOD_NOT_ALLOWED')
response(headers, [])
| 27.262295
| 77
| 0.591401
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,553
| 0.425014
|
f57b96c36aa134214c43fa41994f0ddf57c913f2
| 1,979
|
py
|
Python
|
main.py
|
TheGreatRambler/blender-universal-exporter
|
996191a787b36aa6ea007b82a36b9a752b9f50ee
|
[
"MIT"
] | 2
|
2018-10-11T17:34:51.000Z
|
2021-04-26T20:51:45.000Z
|
main.py
|
TheGreatRambler/blender-universal-exporter
|
996191a787b36aa6ea007b82a36b9a752b9f50ee
|
[
"MIT"
] | null | null | null |
main.py
|
TheGreatRambler/blender-universal-exporter
|
996191a787b36aa6ea007b82a36b9a752b9f50ee
|
[
"MIT"
] | null | null | null |
bl_info = {
"name": "Universal Exporter",
"category": "Import & Export",
}
import bpy
class Export(bpy.types.Operator):
"""Export blender project"""
bl_idname = "object.export_scene"
bl_label = "Export Blender Scene"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
scene = context.scene
cursor = scene.cursor_location
obj = scene.objects.active
# Add stuff here
# This script converts all objects to .obj files
for obj in bpy.data.objects:
bpy.ops.object.select_name(name=obj.name)
bpy.ops.export_scene.obj(filepath=file_path, # the filepath
check_existing=True,
filter_glob="*.obj;*.mtl",
use_selection=True,
use_all_scenes=False,
use_animation=False,
use_modifiers=True,
use_rotate_x90=True,
use_edges=True,
use_normals=False,
use_hq_normals=True,
use_uvs=True,
use_materials=True,
copy_images=False,
use_triangles=False,
use_vertex_groups=False,
use_nurbs=False,
use_blen_objects=True,
group_by_object=False,
group_by_material=False,
keep_vertex_order=False
global_scale=1)
return {'FINISHED'}
def register():
bpy.utils.register_class(Export)
def unregister():
bpy.utils.unregister_class(Export)
if __name__ == "__main__":
register()
| 32.983333
| 69
| 0.458312
| 1,724
| 0.871147
| 0
| 0
| 0
| 0
| 0
| 0
| 252
| 0.127337
|
f57c524ea058c9eaac99f335f5d9b80e94762f25
| 2,024
|
py
|
Python
|
chmm_files/chmm_gen.py
|
IvanTyulyandin/Lin_alg_Viterbi
|
0359c33ed67f8748cd51e8852555ea2fa35b9365
|
[
"Apache-2.0"
] | null | null | null |
chmm_files/chmm_gen.py
|
IvanTyulyandin/Lin_alg_Viterbi
|
0359c33ed67f8748cd51e8852555ea2fa35b9365
|
[
"Apache-2.0"
] | null | null | null |
chmm_files/chmm_gen.py
|
IvanTyulyandin/Lin_alg_Viterbi
|
0359c33ed67f8748cd51e8852555ea2fa35b9365
|
[
"Apache-2.0"
] | null | null | null |
import random
# Parameters
states_num: int = 900
trans_per_state: int = 3
transitions_num: int = trans_per_state * states_num
num_non_zero_start_probs: int = 2
emit_range: int = 20
file_name: str = "random_" + \
str(states_num) + "_" + str(transitions_num) + "_" + \
str(emit_range) + "_" + str(num_non_zero_start_probs) + ".chmm"
# Implicit parameter for probabilities generation
rng_range: int = 100
def generate_probability_list(length: int) -> list:
# Fill list with random values, then divide all elements to sum of probs,
# so sum(probs) == 1
probs: list = []
for _ in range(length):
probs.append(random.randrange(rng_range))
sum_of_list: int = sum(probs)
# Cast to floats with fixed precision of 6-2 = 4 signs
probs = list(
map(lambda x: str(float(x) / sum_of_list)[:6], probs))
return probs
# Generation
with open(file_name, 'w') as f:
f.write(str(states_num) + '\n')
# Start probabilities pairs info
start_probs: list = generate_probability_list(num_non_zero_start_probs)
f.write(str(num_non_zero_start_probs) + '\n')
for i in range(num_non_zero_start_probs):
f.write(str(i) + ' ' + start_probs[i] + '\n')
# Emissions probabilities for each state
f.write(str(emit_range) + '\n')
for _ in range(states_num):
emit_probs: list = generate_probability_list(emit_range)
emit_str: str = ' '.join(emit_probs) + '\n'
f.write(emit_str)
# Transitions info
f.write(str(transitions_num) + '\n')
for src in range(states_num):
used_dst: list = []
for _ in range(trans_per_state):
dst: int = random.randrange(states_num)
while (dst in used_dst):
dst = random.randrange(states_num)
used_dst.append(dst)
trans_probs: list = generate_probability_list(trans_per_state)
for i in range(trans_per_state):
f.write(str(src) + ' ' + str(used_dst[i]) +
' ' + trans_probs[i] + '\n')
| 32.126984
| 77
| 0.64081
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 378
| 0.186759
|
f57e7874385469f7089b57659f20f37fe7da9980
| 1,764
|
py
|
Python
|
solutions/0409-longest-palindrome/longest-palindrome.py
|
iFun/Project-G
|
d33b3b3c7bcee64f93dc2539fd9955a27f321d96
|
[
"MIT"
] | null | null | null |
solutions/0409-longest-palindrome/longest-palindrome.py
|
iFun/Project-G
|
d33b3b3c7bcee64f93dc2539fd9955a27f321d96
|
[
"MIT"
] | null | null | null |
solutions/0409-longest-palindrome/longest-palindrome.py
|
iFun/Project-G
|
d33b3b3c7bcee64f93dc2539fd9955a27f321d96
|
[
"MIT"
] | null | null | null |
# Given a string which consists of lowercase or uppercase letters, find the length of the longest palindromes that can be built with those letters.
#
# This is case sensitive, for example "Aa" is not considered a palindrome here.
#
# Note:
# Assume the length of given string will not exceed 1,010.
#
#
# Example:
#
# Input:
# "abccccdd"
#
# Output:
# 7
#
# Explanation:
# One longest palindrome that can be built is "dccaccd", whose length is 7.
#
#
#
# @lc app=leetcode id=409 lang=python3
#
# [409] Longest Palindrome
#
# https://leetcode.com/problems/longest-palindrome/description/
#
# algorithms
# Easy (48.27%)
# Likes: 547
# Dislikes: 56
# Total Accepted: 100.6K
# Total Submissions: 208.5K
# Testcase Example: '"abccccdd"'
#
# Given a string which consists of lowercase or uppercase letters, find the
# length of the longest palindromes that can be built with those letters.
#
# This is case sensitive, for example "Aa" is not considered a palindrome
# here.
#
# Note:
# Assume the length of given string will not exceed 1,010.
#
#
# Example:
#
# Input:
# "abccccdd"
#
# Output:
# 7
#
# Explanation:
# One longest palindrome that can be built is "dccaccd", whose length is 7.
#
#
#
class Solution:
def longestPalindrome(self, s: str) -> int:
if not s:
return 0
if len(s) is 1:
return 1
hash_table = {}
result = 0
for char in s:
if char not in hash_table:
hash_table[char] = 1
else:
hash_table[char] = hash_table[char] + 1
if hash_table[char] % 2 == 0:
result += 1
if result * 2 != len(s):
return result * 2 + 1
else:
return result * 2
| 20.275862
| 148
| 0.616213
| 548
| 0.310658
| 0
| 0
| 0
| 0
| 0
| 0
| 1,152
| 0.653061
|
f57f22bf388fb4aa2a7b99663c5c1b62f0a9da4f
| 108
|
py
|
Python
|
getpaid/rest_framework/apps.py
|
wuuuduu/django-getpaid
|
d864de53bc947e2d1ab4f2d3879a803cab1216d3
|
[
"MIT"
] | 6
|
2020-05-26T08:49:10.000Z
|
2022-01-03T17:44:19.000Z
|
getpaid/rest_framework/apps.py
|
wuuuduu/django-getpaid
|
d864de53bc947e2d1ab4f2d3879a803cab1216d3
|
[
"MIT"
] | null | null | null |
getpaid/rest_framework/apps.py
|
wuuuduu/django-getpaid
|
d864de53bc947e2d1ab4f2d3879a803cab1216d3
|
[
"MIT"
] | 1
|
2021-08-23T06:59:05.000Z
|
2021-08-23T06:59:05.000Z
|
from django.apps import AppConfig
class GetpaidRestConfig(AppConfig):
name = "getpaid_rest_framework"
| 18
| 35
| 0.796296
| 71
| 0.657407
| 0
| 0
| 0
| 0
| 0
| 0
| 24
| 0.222222
|
f580e360a82ba7dad75ab77286f0111cf9d43ab3
| 392
|
py
|
Python
|
new_server.py
|
19bcs2410/flask_updated-web-chat
|
c72644a2b1feb2c6ba3b6c1c8d0ec53817e6d05e
|
[
"MIT"
] | null | null | null |
new_server.py
|
19bcs2410/flask_updated-web-chat
|
c72644a2b1feb2c6ba3b6c1c8d0ec53817e6d05e
|
[
"MIT"
] | null | null | null |
new_server.py
|
19bcs2410/flask_updated-web-chat
|
c72644a2b1feb2c6ba3b6c1c8d0ec53817e6d05e
|
[
"MIT"
] | null | null | null |
import socketio
import socketio
sio = socketio.Client()
@sio.event
def connect():
print('connection established')
@sio.event
def my_message(data):
print('message received with ', data)
sio.emit('my response', {'response': 'my response'})
@sio.event
def disconnect():
print('disconnected from server')
sio.connect('http://localhost:5000')
sio.wait()
| 17.818182
| 57
| 0.660714
| 0
| 0
| 0
| 0
| 265
| 0.67602
| 0
| 0
| 133
| 0.339286
|
f582dcf3f1bc8baf921c638fcb41729df76ff930
| 2,042
|
py
|
Python
|
ricecooker/utils/libstudio.py
|
elaeon/ricecooker
|
e5ef13478625b6996775ae7690e027140bc63373
|
[
"MIT"
] | null | null | null |
ricecooker/utils/libstudio.py
|
elaeon/ricecooker
|
e5ef13478625b6996775ae7690e027140bc63373
|
[
"MIT"
] | 1
|
2019-04-20T07:14:04.000Z
|
2019-04-20T07:14:04.000Z
|
ricecooker/utils/libstudio.py
|
nucleogenesis/ricecooker
|
7525f842e34f5bbb37d1f2d3c85872faa32724ff
|
[
"MIT"
] | null | null | null |
import requests
import requests_cache
requests_cache.install_cache()
from ricecooker.config import LOGGER
STUDIO_URL = 'https://studio.learningequality.org'
NODES_ENDPOINT = STUDIO_URL + '/api/get_nodes_by_ids_complete/'
LICENSES_LIST_ENDPOINT = STUDIO_URL + '/api/license'
# TODO https://studio.learningequality.org/api/get_node_path/ca8f380/18932/41b2549
# TODO http://develop.studio.learningequality.org/api/channel/094097ce6f395ec0b50aabd04943c6b3
class StudioApi(object):
def __init__(self, token):
self.token = token
self.licenses_by_id = self.get_licenses()
def get_licenses(self):
headers = {"Authorization": "Token {0}".format(self.token)}
response = requests.get(LICENSES_LIST_ENDPOINT, headers=headers)
licenses_list = response.json()
licenses_dict = {}
for license in licenses_list:
licenses_dict[license['id']] = license
return licenses_dict
def get_nodes_by_ids_complete(self, studio_id):
headers = {"Authorization": "Token {0}".format(self.token)}
url = NODES_ENDPOINT + studio_id
LOGGER.info(' GET ' + url)
response = requests.get(url, headers=headers)
studio_node = response.json()[0]
return studio_node
def get_tree_for_studio_id(self, studio_id):
"""
Returns the full json tree (recusive calls to /api/get_nodes_by_ids_complete)
"""
channel_parent = {'children': []} # this is like _ with children
def _build_subtree(parent, studio_id):
subtree = self.get_nodes_by_ids_complete(studio_id)
if 'children' in subtree:
children_refs = subtree['children']
subtree['children'] = []
for child_studio_id in children_refs:
_build_subtree(subtree, child_studio_id)
parent['children'].append(subtree)
_build_subtree(channel_parent, studio_id)
channel = channel_parent['children'][0]
return channel
| 37.127273
| 94
| 0.664055
| 1,574
| 0.770813
| 0
| 0
| 0
| 0
| 0
| 0
| 515
| 0.252204
|
f583aafa3eab4133dcbce8cce69eba93bfd77474
| 2,163
|
py
|
Python
|
Python/kruskal.py
|
AtilioA/algoritmos-teoria-dos-grafos
|
287234d9d4c5c16707dfe71629f5c237e1759826
|
[
"Unlicense"
] | 2
|
2020-05-14T14:12:45.000Z
|
2020-09-07T20:44:23.000Z
|
Python/kruskal.py
|
AtilioA/teoria-dos-grafos
|
287234d9d4c5c16707dfe71629f5c237e1759826
|
[
"Unlicense"
] | null | null | null |
Python/kruskal.py
|
AtilioA/teoria-dos-grafos
|
287234d9d4c5c16707dfe71629f5c237e1759826
|
[
"Unlicense"
] | null | null | null |
# Supostamente não funciona
from aresta import Aresta
from insert import insert_sort
from collections import defaultdict
def kruskal(arestas):
arestas, vertices = insert_sort(arestas, defaultdict())
# Inicializa a árvore de fato
arvore = list()
# vertices terá o número de chaves do dicionário retornado pelo insertion_sort
tamanhoArvore = len(vertices.keys())
i = 0
# Enquanto o tamanho da árvore é menor que o tamanho do dicionário de vértices,
while len(arvore) < tamanhoArvore - 1:
# Utilizamos todas as arestas
aresta = arestas[i]
i += 1
# Para verificar o peso das arestas com o dicionário
if vertices[aresta.first] < 2 and vertices[aresta.second] < 2:
vertices[aresta.first] += 1
vertices[aresta.second] += 1
arvore.append(aresta)
# Não se utiliza todo o dicionário pois o tamanho da árvore quebra o while antes disso
return arvore
if __name__ == "__main__":
arestas = list()
# arestas.append(Aresta(1, 'a', 'b'))
# arestas.append(Aresta(8, 'a', 'c'))
# arestas.append(Aresta(3, 'c', 'b'))
# arestas.append(Aresta(4, 'b', 'd'))
# arestas.append(Aresta(2, 'd', 'e'))
# arestas.append(Aresta(3, 'b', 'e'))
# arestas.append(Aresta(-1, 'c', 'd'))
# arestas.append(Aresta(13, '0', '3'))
# arestas.append(Aresta(24, '0', '1'))
# arestas.append(Aresta(13, '0', '2'))
# arestas.append(Aresta(22, '0', '4'))
# arestas.append(Aresta(13, '1', '3'))
# arestas.append(Aresta(22, '1', '2'))
# arestas.append(Aresta(13, '1', '4'))
# arestas.append(Aresta(19, '2', '3'))
# arestas.append(Aresta(14, '2', '4'))
# arestas.append(Aresta(19, '3', '4'))
arestas.append(Aresta(2, "0", "1"))
arestas.append(Aresta(-10, "0", "3"))
arestas.append(Aresta(3, "0", "2"))
arestas.append(Aresta(5, "1", "2"))
arestas.append(Aresta(0, "1", "3"))
arestas.append(Aresta(4, "2", "3"))
grafo = kruskal(arestas)
print("Imprimindo árvore geradora mínima:")
for aresta in grafo:
print(f"Peso {aresta.peso:2}: {aresta.first:1} para {aresta.second:2}")
| 33.796875
| 90
| 0.609801
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,181
| 0.542241
|
f58515c1c70e7d555b4680ca39bcd04616159789
| 1,983
|
py
|
Python
|
metodoDePaulo.py
|
paulossa/MSN
|
e80b4b82ae865ea50f69619712f7e73dc1eac95d
|
[
"MIT"
] | null | null | null |
metodoDePaulo.py
|
paulossa/MSN
|
e80b4b82ae865ea50f69619712f7e73dc1eac95d
|
[
"MIT"
] | null | null | null |
metodoDePaulo.py
|
paulossa/MSN
|
e80b4b82ae865ea50f69619712f7e73dc1eac95d
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import sys
__author__ = "Paulo Sérgio dos Santos Araujo"
__license__ = "MIT"
__version__ = "1.0.0"
__email__ = "paulo.araujo [at] splab.ufcg.edu.br"
class Msn:
"""
Essa classe feita para disciplina de Métodos de Software Númericos - UFCG 2018.2
se propõe a encontrar as raízes de uma certa equação definida por um usuário.
Parameters
----------
eq : str Equação a ser avaliada deve ser expressa em termos de 'x', pode usar funções de python.
tol : float Tolerância da precisão do MSN.
alg: string String contendo qual algoritmo deve ser executado 'bisection' ou 'false_position'
"""
def __init__(self, eq, tol, alg="false_position"):
self.eq = eq
self.tol = tol
self.alg = alg
def f(self, x):
return eval(self.eq)
def findRoots(self, a, b):
"""
Encontra as raízes da função no intervalo A, B
"""
if abs(b-a) >= self.tol and (self.f(a) * self.f(b) > 0):
mid = (a + b) * .5
self.findRoots(a, mid)
self.findRoots(mid, b)
else:
iterNum = 1
while abs(b - a) > self.tol :
if self.alg == "bisection": estimate = (a + b) * .5
elif self.alg == "false_position": estimate = (a*(self.f(b)) - b * (self.f(a))) / (self.f(b) - self.f(a))
else:
print('Algoritmo não definido')
exit(0)
if (self.f(a) * self.f(estimate) > 0): a = estimate
else: b = estimate
iterNum += 1
print(estimate)
print(iterNum)
if __name__ == "__main__":
msn = Msn(eq="-x**2 + 3", tol=0.01, alg="false_position")
msn.findRoots(-2, 3) # -1.7320508075688774 e 1.7320508075688776
msn2 = Msn(eq="-x**2 + 3", tol=0.01, alg="bisection")
msn2.findRoots(-2, 3) # -1.736328125 e 1.740234375
| 36.054545
| 122
| 0.534039
| 1,559
| 0.77911
| 0
| 0
| 0
| 0
| 0
| 0
| 853
| 0.426287
|
f585cb72a0db164994f5a14aac9910a31f98b2a9
| 1,096
|
py
|
Python
|
unit1/data-types.py
|
mmunozz/merit-notes
|
66c04939eb2aa9f63efe4ef947c291aafbc1ce0a
|
[
"MIT"
] | null | null | null |
unit1/data-types.py
|
mmunozz/merit-notes
|
66c04939eb2aa9f63efe4ef947c291aafbc1ce0a
|
[
"MIT"
] | null | null | null |
unit1/data-types.py
|
mmunozz/merit-notes
|
66c04939eb2aa9f63efe4ef947c291aafbc1ce0a
|
[
"MIT"
] | null | null | null |
"""
Project: Data Types Notes
Author: Mr. Buckley
Last update: 8/25/2018
Description: Goes over comments, int, float, str, and type casting
"""
# *** COMMENTS ***
# This is a comment (with a "#")
# Comments are only for the user's eyes, the program doesn't read them.
# Describe what sections of code do with a comment.
"""
This is a
multiline comment
"""
# *** DATA TYPE: INTEGER ***
# TODO: An integer number (no decimal)
integer = 5
print (integer)
print (type(integer))
# *** DATA TYPE: FLOAT ***
# TODO: A decimal number
decimal = 4.85
print (decimal)
print (type(decimal))
# *** DATA TYPE: STRING ***
# TODO: A string of characters enclosed in quotes
word = "these are my words"
print (word)
print (type(word))
# *** TYPE CASTING ***
# This converts one type to another
# TODO: Cast float to int
decimal = 55.55
dec_to_int = int(decimal)
print(dec_to_int)
# TODO: Cast int to string
number = "8"
print (int(number)+2)
# TODO: Cast number string to int
print ("give me add I'll add 1 to it")
number = float (input())
print (number + 1)
# TODO: Input demo (str to float)
| 20.296296
| 71
| 0.671533
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 780
| 0.711679
|
f5869e041f8cfc604cdaeae8bc529488e18f09e4
| 3,812
|
py
|
Python
|
zarr-dataset/test_anime_faces.py
|
tinon224/experiments
|
cbe066fb9eec20f290eaff5bb19131616af61bee
|
[
"MIT"
] | 103
|
2015-03-28T14:32:44.000Z
|
2021-03-31T08:20:24.000Z
|
zarr-dataset/test_anime_faces.py
|
tinon224/experiments
|
cbe066fb9eec20f290eaff5bb19131616af61bee
|
[
"MIT"
] | 6
|
2016-05-17T13:31:56.000Z
|
2020-11-13T17:19:19.000Z
|
zarr-dataset/test_anime_faces.py
|
tinon224/experiments
|
cbe066fb9eec20f290eaff5bb19131616af61bee
|
[
"MIT"
] | 106
|
2015-05-10T14:29:06.000Z
|
2021-07-13T08:19:19.000Z
|
import os
import zarr
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from tqdm import tqdm, trange
class FaceDataset(Dataset):
def __init__(self, path, transforms=None):
self.path = path
self.keys = ('images', 'labels')
assert os.path.exists(path), 'file `{}` not exists!'.format(path)
with zarr.LMDBStore(path) as store:
zarr_db = zarr.group(store=store)
self.num_examples = zarr_db['labels'].shape[0]
self.datasets = None
if transforms is None:
transforms = {
'labels': lambda v: torch.tensor(v, dtype=torch.long),
'images': lambda v: torch.tensor((v - 127.5)/127.5, dtype=torch.float32)
}
self.transforms = transforms
def __len__(self):
return self.num_examples
def __getitem__(self, idx):
if self.datasets is None:
store = zarr.LMDBStore(self.path)
zarr_db = zarr.group(store=store)
self.datasets = {key: zarr_db[key] for key in self.keys}
items = []
for key in self.keys:
item = self.datasets[key][idx]
if key in self.transforms:
item = self.transforms[key](item)
items.append(item)
return items
class Model(nn.Module):
def __init__(self, input_size=96 * 96 * 3, output_size=126,
hidden_size=25):
super().__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=6, stride=2, padding=2),
nn.BatchNorm2d(16), nn.ReLU(), nn.MaxPool2d(
kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=6, stride=2, padding=2),
nn.BatchNorm2d(32), nn.ReLU(), nn.MaxPool2d(
kernel_size=2, stride=2))
self.fc = nn.Linear(6 * 6 * 32, output_size)
self.criteria = nn.CrossEntropyLoss()
def forward(self, inputs):
outputs = self.layer1(inputs)
outputs = self.layer2(outputs)
outputs = outputs.reshape(outputs.size(0), -1)
outputs = self.fc(outputs)
return outputs
def main(batch_size=64, epochs=50):
data_train = FaceDataset('data/anime_faces/train.lmdb')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
loader = DataLoader(data_train, batch_size=batch_size, num_workers=10)
model = Model()
model.to(device)
model.train()
optim = torch.optim.Adam(model.parameters(), lr=0.001)
for epoch in trange(epochs):
t = tqdm(loader)
for i, (images, labels) in enumerate(t):
images = images.to(device)
labels = labels.to(device)
optim.zero_grad()
logits = model(images)
loss = model.criteria(logits, labels)
loss.backward()
optim.step()
predicts = torch.argmax(F.softmax(logits, dim=1), dim=1)
accuracy = (predicts == labels).to(torch.float32).mean()
t.set_postfix(
epoch=epoch, i=i, loss=loss.item(), accuracy=accuracy.item())
data_val = FaceDataset('data/anime_faces/val.lmdb')
val_loader = DataLoader(data_val, batch_size=batch_size, num_workers=0)
total = len(data_val)
total_correct = 0
model.eval()
for images, labels in val_loader:
images = images.to(device)
labels = labels.to(device)
logits = model(images)
predicts = torch.argmax(F.softmax(logits, dim=1), dim=1)
correct = (predicts == labels).sum()
total_correct += correct.item()
print('Val accuracy = {}'.format(total_correct / total))
if __name__ == '__main__':
main()
| 33.147826
| 88
| 0.597587
| 2,057
| 0.539612
| 0
| 0
| 0
| 0
| 0
| 0
| 159
| 0.04171
|
f586db857714c3a406cc8d011335a90b361a86d4
| 1,066
|
py
|
Python
|
pepper/spiders/pepper.py
|
Guilehm/dr-pepper-crawler
|
0cc02f8b9bf9a739cb1644d4ef4c0c566428f6a2
|
[
"MIT"
] | null | null | null |
pepper/spiders/pepper.py
|
Guilehm/dr-pepper-crawler
|
0cc02f8b9bf9a739cb1644d4ef4c0c566428f6a2
|
[
"MIT"
] | 2
|
2021-03-31T19:47:28.000Z
|
2021-06-08T20:39:41.000Z
|
pepper/spiders/pepper.py
|
Guilehm/dr-pepper-crawler
|
0cc02f8b9bf9a739cb1644d4ef4c0c566428f6a2
|
[
"MIT"
] | null | null | null |
import os
import scrapy
from pepper.items import PepperItem
class PepperSpider(scrapy.Spider):
name = 'pepper'
start_urls = ['https://blog.drpepper.com.br']
def parse(self, response):
images = response.xpath(
'.//img[contains(@class,"size-full")]'
)
images += response.xpath(
'.//img[contains(@class,"alignnone")]'
)
images += response.xpath(
'.//img[contains(@src,"/tirinhas/")]'
)
images = set(images)
for img in images:
link = img.xpath('./@src').get()
yield PepperItem(
name=os.path.basename(link),
description=img.xpath('./parent::p/text()').get(),
link=link,
image_urls=[link],
)
current_page = response.xpath('//span[@class="page-numbers current"]')
next_page = current_page.xpath('./parent::li/following-sibling::li[1]/a/@href').get()
if next_page:
yield scrapy.Request(next_page, callback=self.parse)
| 28.052632
| 93
| 0.541276
| 1,001
| 0.939024
| 891
| 0.835835
| 0
| 0
| 0
| 0
| 265
| 0.248593
|
f5885ba233a8e2203989f8de45355db074bbea32
| 4,334
|
py
|
Python
|
spotseeker_server/test/search/uw_noise_level.py
|
uw-it-aca/spotseeker_server
|
1d8a5bf98b76fdcb807ed4cd32f939bb7e9aa66c
|
[
"Apache-2.0"
] | 5
|
2015-03-12T00:36:33.000Z
|
2022-02-24T16:41:25.000Z
|
spotseeker_server/test/search/uw_noise_level.py
|
uw-it-aca/spotseeker_server
|
1d8a5bf98b76fdcb807ed4cd32f939bb7e9aa66c
|
[
"Apache-2.0"
] | 133
|
2016-02-03T23:54:45.000Z
|
2022-03-30T21:33:58.000Z
|
spotseeker_server/test/search/uw_noise_level.py
|
uw-it-aca/spotseeker_server
|
1d8a5bf98b76fdcb807ed4cd32f939bb7e9aa66c
|
[
"Apache-2.0"
] | 6
|
2015-01-07T23:21:15.000Z
|
2017-12-07T08:26:33.000Z
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
import simplejson as json
from spotseeker_server.models import Spot, SpotExtendedInfo
from spotseeker_server.org_filters import SearchFilterChain
def spot_with_noise_level(name, noise_level):
"""Create a spot with the given noise level"""
spot = Spot.objects.create(name=name)
spot.spotextendedinfo_set.create(key='noise_level',
value=noise_level)
return spot
@override_settings(SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok',
SPOTSEEKER_SEARCH_FILTERS=(
'spotseeker_server.org_filters.uw_search.Filter',))
class UWNoiseLevelTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.silent_spot = spot_with_noise_level('Silent Spot', 'silent')
cls.quiet_spot = spot_with_noise_level('Quiet Spot', 'quiet')
cls.moderate_spot = spot_with_noise_level('Moderate', 'moderate')
cls.variable_spot = spot_with_noise_level('Var Spot', 'variable')
@classmethod
def tearDownClass(cls):
Spot.objects.all().delete()
def get_spots_for_noise_levels(self, levels):
"""Do a search for spots with particular noise levels"""
c = self.client
response = c.get('/api/v1/spot',
{'extended_info:noise_level': levels},
content_type='application/json')
return json.loads(response.content)
def assertResponseSpaces(self, res_json, spaces):
"""
Assert that a particular decoded response contains exactly the same
spaces as 'spaces'.
"""
def sortfunc(spot_dict):
return spot_dict['id']
expected_json = [spot.json_data_structure() for spot in spaces]
expected_json.sort(key=sortfunc)
res_json.sort(key=sortfunc)
self.assertEqual(expected_json, res_json)
def test_only_silent(self):
"""Searching for silent should return only silent"""
SearchFilterChain._load_filters() # make sure the uw filters is loaded
res_json = self.get_spots_for_noise_levels(['silent'])
self.assertResponseSpaces(res_json, [self.silent_spot])
def test_uw_only_quiet(self):
"""Quiet should return both a quiet spot and variable"""
SearchFilterChain._load_filters() # make sure the uw filters is loaded
res_json = self.get_spots_for_noise_levels(['quiet'])
expected = [self.quiet_spot, self.variable_spot]
self.assertResponseSpaces(res_json, expected)
def test_uw_only_moderate(self):
"""Moderate should return moderate and variable"""
SearchFilterChain._load_filters() # make sure the uw filters is loaded
res_json = self.get_spots_for_noise_levels(['moderate'])
expected = [self.moderate_spot, self.variable_spot]
self.assertResponseSpaces(res_json, expected)
def test_uw_silent_and_quiet(self):
"""Silent+quiet should give everything but moderate"""
SearchFilterChain._load_filters() # make sure the uw filters is loaded
res_json = self.get_spots_for_noise_levels(['silent', 'quiet'])
expected = [self.quiet_spot, self.silent_spot, self.variable_spot]
self.assertResponseSpaces(res_json, expected)
def test_uw_silent_and_moderate(self):
"""Silent+moderate should give everything but quiet"""
SearchFilterChain._load_filters() # make sure the uw filters is loaded
res_json = self.get_spots_for_noise_levels(['silent', 'moderate'])
expected = [self.silent_spot, self.moderate_spot, self.variable_spot]
self.assertResponseSpaces(res_json, expected)
def test_uw_all_three(self):
"""All 3 should give everything"""
SearchFilterChain._load_filters() # make sure the uw filters is loaded
query = ['silent', 'quiet', 'moderate']
res_json = self.get_spots_for_noise_levels(query)
expected = [self.silent_spot,
self.quiet_spot,
self.moderate_spot,
self.variable_spot]
self.assertResponseSpaces(res_json, expected)
| 42.910891
| 79
| 0.682972
| 3,512
| 0.810337
| 0
| 0
| 3,709
| 0.855791
| 0
| 0
| 1,140
| 0.263036
|
f58a63fcbad7aec0c720d44005782e265c314a57
| 17
|
py
|
Python
|
inventoryanalytics/simulation/deterministic/__init__.py
|
vishalbelsare/inventoryanalytics
|
85feff8f1abaf2c29414e066eed096ac3a74973b
|
[
"MIT"
] | 7
|
2018-06-17T02:45:33.000Z
|
2021-06-11T09:13:06.000Z
|
inventoryanalytics/simulation/deterministic/__init__.py
|
vishalbelsare/inventoryanalytics
|
85feff8f1abaf2c29414e066eed096ac3a74973b
|
[
"MIT"
] | 1
|
2021-02-07T03:33:22.000Z
|
2021-06-02T21:11:59.000Z
|
inventoryanalytics/simulation/deterministic/__init__.py
|
vishalbelsare/inventoryanalytics
|
85feff8f1abaf2c29414e066eed096ac3a74973b
|
[
"MIT"
] | 7
|
2018-07-14T19:45:43.000Z
|
2021-10-12T09:45:04.000Z
|
__all__ = ["des"]
| 17
| 17
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 0.294118
|
f58b58dad3bb3dc21147ea1fd781e8e5e4ef8b49
| 185
|
py
|
Python
|
spotdl/metadata/providers/__init__.py
|
khjxiaogu/spotify-downloader
|
a8dcb8d998da0769bbe210f2808d16b346453c23
|
[
"MIT"
] | 4,698
|
2017-06-20T22:37:10.000Z
|
2022-03-28T13:38:07.000Z
|
spotdl/metadata/providers/__init__.py
|
Delgan/spotify-downloader
|
8adf3e8d6b98269b1538dd91c9a44ed345c77545
|
[
"MIT"
] | 690
|
2017-06-20T20:08:42.000Z
|
2022-02-26T23:36:07.000Z
|
spotdl/metadata/providers/__init__.py
|
Delgan/spotify-downloader
|
8adf3e8d6b98269b1538dd91c9a44ed345c77545
|
[
"MIT"
] | 741
|
2017-06-21T23:32:51.000Z
|
2022-03-07T12:11:54.000Z
|
from spotdl.metadata.providers.spotify import ProviderSpotify
from spotdl.metadata.providers.youtube import ProviderYouTube
from spotdl.metadata.providers.youtube import YouTubeSearch
| 37
| 61
| 0.881081
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f58db2e3a8108081fdad6ca36c2b07a1f84d614d
| 1,476
|
py
|
Python
|
_03_AttributesAndMethodsLab/_02_Integer.py
|
Andrey-V-Georgiev/PythonOOP
|
73aabdccace5ce7183c39e2f5674f7e17475b1cc
|
[
"MIT"
] | 1
|
2021-06-30T10:34:38.000Z
|
2021-06-30T10:34:38.000Z
|
_03_AttributesAndMethodsLab/_02_Integer.py
|
Andrey-V-Georgiev/PythonOOP
|
73aabdccace5ce7183c39e2f5674f7e17475b1cc
|
[
"MIT"
] | null | null | null |
_03_AttributesAndMethodsLab/_02_Integer.py
|
Andrey-V-Georgiev/PythonOOP
|
73aabdccace5ce7183c39e2f5674f7e17475b1cc
|
[
"MIT"
] | null | null | null |
from math import floor
class Integer:
def __init__(self, value):
self.value = value
@classmethod
def from_float(cls, float_value):
if isinstance(float_value, float):
return cls(floor(float_value))
else:
return 'value is not a float'
@classmethod
def from_roman(cls, value):
try:
roman_nums = list(value)
translate = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
arabic_nums = [translate[r] for r in roman_nums]
arabic_sum = sum(
val if val >= next_val else -val
for val, next_val in zip(arabic_nums[:-1], arabic_nums[1:])
) + arabic_nums[-1]
return cls(int(arabic_sum))
except Exception:
pass
@classmethod
def from_string(cls, value):
if isinstance(value, str):
try:
return cls(int(value))
except Exception:
return 'wrong type'
else:
return 'wrong type'
def add(self, num):
if isinstance(num, Integer):
return self.value + getattr(num, 'value')
else:
return 'number should be an Integer instance'
def __repr__(self):
return self.value
first_num = Integer(10)
second_num = Integer.from_roman("IV")
print(Integer.from_float("2.6"))
print(Integer.from_string(2.6))
print(first_num.add(second_num))
| 25.448276
| 89
| 0.554201
| 1,287
| 0.871951
| 0
| 0
| 958
| 0.649051
| 0
| 0
| 121
| 0.081978
|
f58e292660fbb4b40d7f4326ad34ea03b891aa42
| 324
|
py
|
Python
|
app/app/schemas/token.py
|
Tall-Programacion-FIME/backend
|
95b6934fd57086ffc2be3d9135732df3d240f694
|
[
"Apache-2.0"
] | null | null | null |
app/app/schemas/token.py
|
Tall-Programacion-FIME/backend
|
95b6934fd57086ffc2be3d9135732df3d240f694
|
[
"Apache-2.0"
] | 13
|
2021-03-04T22:59:54.000Z
|
2021-05-16T23:24:22.000Z
|
app/app/schemas/token.py
|
Tall-Programacion-FIME/backend
|
95b6934fd57086ffc2be3d9135732df3d240f694
|
[
"Apache-2.0"
] | 1
|
2021-04-20T14:51:43.000Z
|
2021-04-20T14:51:43.000Z
|
import datetime
from typing import Optional
from pydantic import BaseModel
class TokenBase(BaseModel):
access_token: str
class Token(TokenBase):
refresh_token: str
class TokenData(BaseModel):
username: Optional[str] = None
class VerificationToken(BaseModel):
user_id: int
exp: datetime.datetime
| 14.727273
| 35
| 0.753086
| 236
| 0.728395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f58e82435946520f98ad569c02443f0eda8332d6
| 1,988
|
py
|
Python
|
bot/finance.py
|
kianhean/ShiokBot
|
948417ead579d7476350592f0a960c2c0ea8b757
|
[
"BSD-2-Clause"
] | 6
|
2017-04-06T02:55:16.000Z
|
2020-01-27T05:14:12.000Z
|
bot/finance.py
|
kianhean/ShiokBot
|
948417ead579d7476350592f0a960c2c0ea8b757
|
[
"BSD-2-Clause"
] | 13
|
2016-09-12T14:24:22.000Z
|
2021-10-22T01:19:43.000Z
|
bot/finance.py
|
kianhean/ShiokBot
|
948417ead579d7476350592f0a960c2c0ea8b757
|
[
"BSD-2-Clause"
] | 1
|
2016-09-12T14:01:49.000Z
|
2016-09-12T14:01:49.000Z
|
import json
from urllib.request import urlopen
import requests
from bs4 import BeautifulSoup
def get_sti():
# https://github.com/hongtaocai/googlefinance
return '<a href="https://chart.finance.yahoo.com/t?s=%5eSTI&lang=en-SG®ion=SG&width=300&height=180" >'
def get_fx():
url = 'https://eservices.mas.gov.sg/api/action/datastore/search.json?resource_id=95932927-c8bc-4e7a-b484-68a66a24edfe&limit=1&sort=end_of_day%20desc'
request = requests.get(url)
data = json.loads(request.text)
result_today = data['result']['records'][0]
AUD = 1/float(result_today['aud_sgd'])*1
CNY = 1/float(result_today['cny_sgd_100'])*100
HKD = 1/float(result_today['hkd_sgd_100'])*100
EUR = 1/float(result_today['eur_sgd'])*1
JPY = 1/float(result_today['jpy_sgd_100'])*100
MYR = 1/float(result_today['myr_sgd_100'])*100
THB = 1/float(result_today['thb_sgd_100'])*100
TWD = 1/float(result_today['twd_sgd_100'])*100
USD = 1/float(result_today['usd_sgd'])*1
VND = 1/float(result_today['vnd_sgd_100'])*100
list_curr = {'AUD': AUD, 'CNY':CNY, 'HKD':HKD, 'EUR':EUR, 'JPY':JPY,
'MYR':MYR, 'THB':THB, 'TWD':TWD, 'USD':USD, 'VND':VND}
text_final = '<b>Latest SGD End of Day Rates ' + result_today['end_of_day'] + '</b>\n\n'
for key in sorted(list_curr.keys()):
text_final += key + " " + str(round(list_curr[key], 3)) + " = 1 SGD \n"
return text_final
def get_sibor():
# Connect to Source
url = 'http://www.moneysmart.sg/home-loan/sibor-trend'
data = urlopen(url)
soup = BeautifulSoup(data, 'html.parser')
# Find latest Result
result = soup.findAll("div", {"class" : "sibor-sor-table"})
result = result[0].findAll("td")
result = result[1:]
text_final = '<b>Latest SIBOR Rates</b>\n\n'
name = result[0:][::2]
rate = result[1:][::2]
for i in range(0, 4):
text_final += name[i].get_text() + " - " + rate[i].get_text() + "\n"
return text_final
| 33.694915
| 153
| 0.639336
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 715
| 0.359658
|
f5915199b7c0be4872a450c1503f4eb928f9e20f
| 637
|
py
|
Python
|
dependencyinjection/internal/callsite_resolvers.py
|
Cologler/dependencyinjection-python
|
dc05c61571f10652d82929ebec4b255f109b840b
|
[
"MIT"
] | null | null | null |
dependencyinjection/internal/callsite_resolvers.py
|
Cologler/dependencyinjection-python
|
dc05c61571f10652d82929ebec4b255f109b840b
|
[
"MIT"
] | null | null | null |
dependencyinjection/internal/callsite_resolvers.py
|
Cologler/dependencyinjection-python
|
dc05c61571f10652d82929ebec4b255f109b840b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017~2999 - cologler <skyoflw@gmail.com>
# ----------
#
# ----------
from .common import LifeTime, IServiceProvider, ICallSiteResolver
from .descriptors import CallableDescriptor
class CallSiteResolver(ICallSiteResolver):
def __init__(self, service_provider: IServiceProvider):
self._service_provider = service_provider
def resolve(self, service_type: type, depend_chain):
descriptor = CallableDescriptor.try_create(service_type, service_type, LifeTime.transient)
if descriptor:
return self._service_provider.get_callsite(descriptor, depend_chain)
| 33.526316
| 98
| 0.726845
| 413
| 0.648352
| 0
| 0
| 0
| 0
| 0
| 0
| 105
| 0.164835
|
f5931d77f9a036d1b90d5e9b889749394d2eff5e
| 1,124
|
py
|
Python
|
pipeline/filterstories.py
|
Xirider/BookGen
|
6eaffa936aea3215944dbfbf7ec92398b6e44587
|
[
"MIT"
] | 1
|
2021-05-31T09:40:19.000Z
|
2021-05-31T09:40:19.000Z
|
pipeline/filterstories.py
|
Xirider/BookGen
|
6eaffa936aea3215944dbfbf7ec92398b6e44587
|
[
"MIT"
] | 1
|
2021-06-30T14:35:22.000Z
|
2021-06-30T14:35:22.000Z
|
pipeline/filterstories.py
|
Xirider/BookGen
|
6eaffa936aea3215944dbfbf7ec92398b6e44587
|
[
"MIT"
] | null | null | null |
from joblib import Memory
cachedir = "cache"
memory = Memory(cachedir, verbose=10)
# @memory.cache
def filter_ff_stories(books, max_rating, min_words, max_words, min_chapters, max_chapters, max_books):
print("filtering ff stories")
ratings = {"K":1, "K+":2, "T":3, "M":4, "MA":5 }
rating_number = ratings[max_rating]
delete_ids = []
for bookid, book in enumerate(books):
if bookid % 1000 == 0:
print(f"filtering book {bookid} now")
removal = False
if book["Language"] != "English":
removal = True
if ratings[book["Rating"]] > rating_number:
removal = True
words = int(book["Words"].replace(",",""))
if not (min_words <= words <= max_words):
removal = True
chapters = int(book["Chapters"].replace(",",""))
if not (min_chapters <= chapters <= max_chapters):
removal = True
if removal:
delete_ids.append(bookid)
for bookid in reversed(delete_ids):
del books[bookid]
books = books[:max_books]
return books
| 25.545455
| 102
| 0.572954
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 145
| 0.129004
|
f5936c772afa998f17b6206beacedcb9d549bb50
| 2,167
|
py
|
Python
|
GoAround/goaround.py
|
AJunque9/GoAround
|
1f432a6a80e057e818e2c2073d3bafebba49de48
|
[
"MIT"
] | 6
|
2021-10-03T10:42:59.000Z
|
2022-01-05T05:25:57.000Z
|
GoAround/goaround.py
|
AJunque9/GoAround
|
1f432a6a80e057e818e2c2073d3bafebba49de48
|
[
"MIT"
] | null | null | null |
GoAround/goaround.py
|
AJunque9/GoAround
|
1f432a6a80e057e818e2c2073d3bafebba49de48
|
[
"MIT"
] | 1
|
2021-10-13T08:38:58.000Z
|
2021-10-13T08:38:58.000Z
|
import sys
import helpers.printer
import helpers.parser
import helpers.config
import program.obfuscation
import program.bypass
modes = helpers.config.Modes
bypass_methods = helpers.config.BypassMethods
obfuscation_methods = helpers.config.ObfuscationMethods
printer = helpers.printer.Printer()
parser = helpers.parser.Parser(
printer, modes, bypass_methods, obfuscation_methods)
bypass = program.bypass.Bypass()
obfuscation = program.obfuscation.Obfuscation()
def execute_program(options):
try:
print(options)
mode = options[0]
bypass_type = options[1]
obfuscation_type = options[2]
input = options[3]
output = options[4]
if mode == modes.bypass:
code = bypass.execute_bypass(bypass_type)
obfuscation.execute_ofuscation_code(
obfuscation_type, code, output)
elif mode == modes.obfuscate:
obfuscation.execute_obfuscation_file(
obfuscation_type, input, output)
else:
raise SystemExit("Not supported mode")
print("The file " + output + " has been created succesfully")
except:
raise SystemError("Options are not valid")
def main(args=None):
# Get command line arguments
if args is None:
try:
args = sys.argv[1:]
except IndexError:
printer.print_help()
if ("-h" in args or "--help" in args):
if len(args) == 1:
printer.print_help()
else:
raise SystemExit(
"Help option (-h or --help) must be run without arguments")
elif ("-v" in args or "--version" in args):
if len(args) == 1:
printer.print_version()
else:
raise SystemExit(
"Version option (-v or --version) must be run without arguments")
else:
if (len(args) == 0):
printer.print_help()
else:
# Mode - Bypass type - Obfuscation type - Input file to obfuscate - Output file to generate
options = parser.parse_command_line(args)
execute_program(options)
if __name__ == "__main__":
main()
| 29.283784
| 103
| 0.616059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 363
| 0.167513
|
f594558a69e840af8885fc68a994d40b44b65eaf
| 1,169
|
py
|
Python
|
src/data/CIFAR10_utils.py
|
namanwahi/Transfer-Learning
|
93b9f664fd727a93e0b09b859a20d863602ec743
|
[
"MIT"
] | null | null | null |
src/data/CIFAR10_utils.py
|
namanwahi/Transfer-Learning
|
93b9f664fd727a93e0b09b859a20d863602ec743
|
[
"MIT"
] | null | null | null |
src/data/CIFAR10_utils.py
|
namanwahi/Transfer-Learning
|
93b9f664fd727a93e0b09b859a20d863602ec743
|
[
"MIT"
] | null | null | null |
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import DataLoader
import os
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
resnet_18_default = 224
def _get_dataset(resize=resnet_18_default):
transform = transforms.Compose(
[transforms.Resize(resize),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root=dir_path, train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root=dir_path, train=False, download=True, transform=transform)
return trainset, testset
def _get_classes():
return ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def get_dataloader(train=True, batch_size=16):
animal_indices = [2, 3, 4, 5, 6, 7]
#animal_sampler = SubsetRandomSampler(animal_indices)
if train:
return DataLoader(_get_dataset()[0], batch_size)
else:
return DataLoader(_get_dataset()[1], batch_size)
| 33.4
| 107
| 0.718563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 113
| 0.096664
|
f5970041908938ed814405d6c8377946dc2070bf
| 3,680
|
py
|
Python
|
SVHN/svhn.py
|
Tenant/Densenet-Tensorflow
|
27dca5a3f1a18ae070a8a6387c8a36b2a4be197e
|
[
"MIT"
] | null | null | null |
SVHN/svhn.py
|
Tenant/Densenet-Tensorflow
|
27dca5a3f1a18ae070a8a6387c8a36b2a4be197e
|
[
"MIT"
] | null | null | null |
SVHN/svhn.py
|
Tenant/Densenet-Tensorflow
|
27dca5a3f1a18ae070a8a6387c8a36b2a4be197e
|
[
"MIT"
] | null | null | null |
from scipy import io
import numpy as np
import random
import tensorflow as tf
class_num = 10
image_size = 32
img_channels = 3
def OneHot(label,n_classes):
label=np.array(label).reshape(-1)
label=np.eye(n_classes)[label]
return label
def prepare_data():
classes = 10
data1 = io.loadmat('./data/train_32x32.mat')
data2 = io.loadmat('./data/test_32x32.mat')
data3 = io.loadmat('./data/extra_32x32.mat')
train_data = data1['X']
train_labels = data1['y']
test_data = data2['X']
test_labels = data2['y']
extra_data = data3['X']
extra_labels = data3['y']
train_data = train_data.astype('float32')
test_data = test_data.astype('float32')
extra_data = extra_data.astype('float32')
train_data = np.transpose(train_data, (3, 0, 1, 2))
test_data = np.transpose(test_data, (3, 0, 1, 2))
extra_data = np.transpose(extra_data, (3, 0, 1, 2))
train_labels[train_labels == 10] = 0
test_labels[test_labels == 10] = 0
extra_labels[extra_labels == 10] = 0
train_labels = train_labels[:, 0]
test_labels = test_labels[:, 0]
extra_labels = extra_labels[:, 0]
train_labels = OneHot(train_labels, classes)
test_labels = OneHot(test_labels, classes)
extra_labels = OneHot(extra_labels, classes)
# truncate the train data and test data
train_data = train_data[0:50000,:,:,:]
train_labels = train_labels[0:50000,:]
test_data = test_data[0:10000,:,:,:]
test_labels = test_labels[0:10000,:]
# train_data = np.concatenate((train_data,extra_data),axis=0)
# train_labels = np.concatenate((train_labels,extra_labels),axis=0)
print('Train data:', train_data.shape, ', Train labels:', train_labels.shape)
print('Test data:', test_data.shape, ', Test labels:', test_labels.shape)
return train_data, train_labels, test_data, test_labels
def _random_crop(batch, crop_shape, padding=None):
oshape = np.shape(batch[0])
if padding:
oshape = (oshape[0] + 2 * padding, oshape[1] + 2 * padding)
new_batch = []
npad = ((padding, padding), (padding, padding), (0, 0))
for i in range(len(batch)):
new_batch.append(batch[i])
if padding:
new_batch[i] = np.lib.pad(batch[i], pad_width=npad,
mode='constant', constant_values=0)
nh = random.randint(0, oshape[0] - crop_shape[0])
nw = random.randint(0, oshape[1] - crop_shape[1])
new_batch[i] = new_batch[i][nh:nh + crop_shape[0],
nw:nw + crop_shape[1]]
return new_batch
def _random_flip_leftright(batch):
for i in range(len(batch)):
if bool(random.getrandbits(1)):
batch[i] = np.fliplr(batch[i])
return batch
def color_preprocessing(x_train, x_test):
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train[:, :, :, 0] = (x_train[:, :, :, 0] - np.mean(x_train[:, :, :, 0])) / np.std(x_train[:, :, :, 0])
x_train[:, :, :, 1] = (x_train[:, :, :, 1] - np.mean(x_train[:, :, :, 1])) / np.std(x_train[:, :, :, 1])
x_train[:, :, :, 2] = (x_train[:, :, :, 2] - np.mean(x_train[:, :, :, 2])) / np.std(x_train[:, :, :, 2])
x_test[:, :, :, 0] = (x_test[:, :, :, 0] - np.mean(x_test[:, :, :, 0])) / np.std(x_test[:, :, :, 0])
x_test[:, :, :, 1] = (x_test[:, :, :, 1] - np.mean(x_test[:, :, :, 1])) / np.std(x_test[:, :, :, 1])
x_test[:, :, :, 2] = (x_test[:, :, :, 2] - np.mean(x_test[:, :, :, 2])) / np.std(x_test[:, :, :, 2])
return x_train, x_test
def data_augmentation(batch):
batch = _random_flip_leftright(batch)
batch = _random_crop(batch, [32, 32], 4)
return batch
| 32.857143
| 108
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 375
| 0.101902
|
f59759138fa73fcc525ff95be2e388e6c99396f6
| 530
|
py
|
Python
|
app/models/user.py
|
johnshumon/fastapi-boilerplate
|
f0cb31e74ab773b8ce044149b17ce24c2e7fa4fc
|
[
"MIT"
] | null | null | null |
app/models/user.py
|
johnshumon/fastapi-boilerplate
|
f0cb31e74ab773b8ce044149b17ce24c2e7fa4fc
|
[
"MIT"
] | null | null | null |
app/models/user.py
|
johnshumon/fastapi-boilerplate
|
f0cb31e74ab773b8ce044149b17ce24c2e7fa4fc
|
[
"MIT"
] | null | null | null |
"""
User models module
"""
from sqlalchemy import Column, Integer, String
from app.models import Base
class User(Base):
"""User class"""
id: int = Column(Integer, primary_key=True, index=True)
firstname: str = Column(String(50), nullable=False, index=True)
lastname: str = Column(String(50), nullable=False, index=True)
email: str = Column(String(100), nullable=False)
username: str = Column(String(50), nullable=False, unique=True, index=True)
password: str = Column(String(100), nullable=False)
| 27.894737
| 79
| 0.696226
| 423
| 0.798113
| 0
| 0
| 0
| 0
| 0
| 0
| 42
| 0.079245
|
f5987f75714e27b31f8445c59b2b8df50b29383c
| 782
|
py
|
Python
|
oops_fhir/r4/value_set/parent_relationship_codes.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/value_set/parent_relationship_codes.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/value_set/parent_relationship_codes.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.v3_role_code import v3RoleCode
__all__ = ["ParentRelationshipCodes"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class ParentRelationshipCodes(ValueSet):
"""
Parent Relationship Codes
The value set includes the v3 RoleCode PRN (parent), TWIN (twin) and all
of their specializations. It covers the relationships needed to
establish genetic pedigree relationships between family members.
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/parent-relationship-codes
"""
# TODO: fix this template issue1
pass
class Meta:
resource = _resource
| 23
| 76
| 0.751918
| 483
| 0.617647
| 0
| 0
| 0
| 0
| 0
| 0
| 409
| 0.523018
|
f59c2ee308a2569240f5244e69e497fec60a9ffe
| 253
|
py
|
Python
|
setting.py
|
JamesPerlman/Dain-App
|
f589abdca8309cfdb6dd106da7c7c4613d152c72
|
[
"MIT"
] | 688
|
2020-12-02T18:02:21.000Z
|
2022-03-31T09:56:14.000Z
|
setting.py
|
JamesPerlman/Dain-App
|
f589abdca8309cfdb6dd106da7c7c4613d152c72
|
[
"MIT"
] | 29
|
2020-12-03T00:21:25.000Z
|
2021-12-04T22:32:42.000Z
|
setting.py
|
JamesPerlman/Dain-App
|
f589abdca8309cfdb6dd106da7c7c4613d152c72
|
[
"MIT"
] | 88
|
2020-12-03T00:13:29.000Z
|
2022-03-26T16:03:38.000Z
|
counter = 0
interpolations = None
padding = None
def AddCounter(tag):
global counter
#print("\nSetting:\n" +tag +": " + str(counter) + "\n")
counter += 1
def SetPad(pad):
global padding
padding = pad
def GetPad():
global padding
return padding
| 15.8125
| 56
| 0.679842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 55
| 0.217391
|
f59da655c4bae4aa2a9b07d54d040a1c00439910
| 1,540
|
py
|
Python
|
bgjobs/plugins.py
|
holtgrewe/sodar_core
|
116c5c8abc1dea483a640ba68af6d5cf4d27c8d7
|
[
"MIT"
] | null | null | null |
bgjobs/plugins.py
|
holtgrewe/sodar_core
|
116c5c8abc1dea483a640ba68af6d5cf4d27c8d7
|
[
"MIT"
] | null | null | null |
bgjobs/plugins.py
|
holtgrewe/sodar_core
|
116c5c8abc1dea483a640ba68af6d5cf4d27c8d7
|
[
"MIT"
] | null | null | null |
"""Code related to ``django-plugins``.
First, it creates a ``ProjectAppPluginPoint`` for the ``bgjobs`` app.
Second, it creates a new plugin point for the registering ``BackgroundJob``
specializations.
"""
from djangoplugins.point import PluginPoint
from projectroles.plugins import ProjectAppPluginPoint
from .urls import urlpatterns
class ProjectAppPlugin(ProjectAppPluginPoint):
"""Plugin for registering app with the ``ProjectAppPluginPoint`` from the
``projectroles`` app."""
name = 'bgjobs'
title = 'Background Jobs'
urls = urlpatterns
icon = 'tasks'
entry_point_url_id = 'bgjobs:list'
description = 'Jobs executed in the background'
#: Required permission for accessing the app
app_permission = 'bgjobs.view_data'
#: Enable or disable general search from project title bar
search_enable = False
#: List of search object types for the app
search_types = []
#: Search results template
search_template = None
#: App card template for the project details page
details_template = 'bgjobs/_details_card.html'
#: App card title for the project details page
details_title = 'Background Jobs App Overview'
#: Position in plugin ordering
plugin_ordering = 100
class BackgroundJobsPluginPoint(PluginPoint):
"""Definition of a plugin point for registering background job types with
the ``bgjobs`` app."""
#: Mapping from job specialization name to specialization class
# (OneToOneField "inheritance").
job_specs = {}
| 26.551724
| 77
| 0.718831
| 1,195
| 0.775974
| 0
| 0
| 0
| 0
| 0
| 0
| 952
| 0.618182
|
f5a27b850295f14cce9d9e2cff15b6524fbbecf8
| 4,562
|
py
|
Python
|
cogs/automod.py
|
ZeroTwo36/midna
|
f78591baacdd32386d9155cb7728de7384016361
|
[
"MIT"
] | 1
|
2022-01-18T09:53:34.000Z
|
2022-01-18T09:53:34.000Z
|
cogs/automod.py
|
ZeroTwo36/midna
|
f78591baacdd32386d9155cb7728de7384016361
|
[
"MIT"
] | null | null | null |
cogs/automod.py
|
ZeroTwo36/midna
|
f78591baacdd32386d9155cb7728de7384016361
|
[
"MIT"
] | null | null | null |
import discord as nextcord
import asyncio
from discord.ext import commands
import json
import time
import typing
def log(*,text):
...
class AutoMod(commands.Cog):
def __init__(self,bot):
self.bot=bot
self._cd = commands.CooldownMapping.from_cooldown(5, 5, commands.BucketType.member) # Change accordingly
def get_ratelimit(self, message: nextcord.Message) -> typing.Optional[int]:
"""Returns the ratelimit left"""
bucket = self._cd.get_bucket(message)
return bucket.update_rate_limit()
@commands.Cog.listener()
async def on_message(self,message):
if message.author.bot:return
with open("config.json") as f:
config = json.load(f)
if message.content == message.content.upper():
print("ALL CAPS")
if config[str(message.guild.id)]["ancap"] == True and not str(message.channel.id) in config[str(message.guild.id)]["whitelists"]:
await message.delete()
await message.author.send("Please don't spam Capital letters")
ratelimit = self.get_ratelimit(message)
if ratelimit is None:
...
else:
role = nextcord.utils.get(message.guild.roles,name="MUTED (By Midna)")
if not role:
role = await message.guild.create_role(name="MUTED (By Midna)",permissions=nextcord.Permissions(send_messages=False,read_messages=True))
await role.edit(position=2)
for c in message.guild.categories:
await c.set_permissions(role,send_messages=False)
await message.author.add_roles(role)
embed = nextcord.Embed(title="🔇 Member silenced | 2m")
embed.add_field(name="Reason",value="Message Spam")
embed.set_footer(text=f'{message.author} | {message.author.id}')
await message.channel.send(embed=embed)
await asyncio.sleep(120)
await message.author.remove_roles(role)
@commands.command()
async def anticaps(self,ctx,enabled:bool=False):
with open("config.json") as f:
config = json.load(f)
config[str(ctx.guild.id)]["ancap"] = enabled
with open("config.json","w+") as f:
json.dump(config,f)
embed = nextcord.Embed(color=nextcord.Color.green())
embed.description = f':white_check_mark: Anti Caps is now set to {enabled}!'
await ctx.send(embed=embed)
@commands.command(help="Open the Lockdown")
@commands.has_permissions(manage_channels=True)
async def openlockdown(self,ctx):
with open("config.json") as f:
config = json.load(f)
await ctx.channel.edit(slowmode_delay=0)
await ctx.send("This channel is no longer under lockdown")
@commands.command(help="Starts a Lockdown in the current channel")
@commands.has_permissions(manage_channels=True)
async def lockdown(self,ctx):
with open("config.json") as f:
config = json.load(f)
await ctx.channel.edit(slowmode_delay=config[str(ctx.guild.id)]["emergencyLock"])
await ctx.send("This channel is now under lockdown")
@commands.command(help="Set the Rate Limit, a channel will be put into upon being spammed")
@commands.has_permissions(manage_channels=True)
async def emratelimit(self,ctx,rate=60):
with open("config.json") as f:
config = json.load(f)
config[str(ctx.guild.id)]["emergencyLock"] = rate
with open("config.json","w+") as f:
json.dump(config,f)
embed = nextcord.Embed(color=nextcord.Color.green())
embed.description = f':white_check_mark: Emergency Member Rate limit is now set to {rate}!'
await ctx.send(embed=embed)
@commands.command(help="The Threshold of how many messages a user can send before its detected as spam")
@commands.has_permissions(manage_channels=True)
async def empanicrate(self,ctx,rate=5):
with open("config.json") as f:
config = json.load(f)
config[str(ctx.guild.id)]["panicRate"] = rate
with open("config.json","w+") as f:
json.dump(config,f)
embed = nextcord.Embed(color=nextcord.Color.green())
embed.description = f':white_check_mark: Emergency Member Rate limit is now set to {rate}!'
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(AutoMod(bot))
| 38.016667
| 153
| 0.622534
| 4,363
| 0.95575
| 0
| 0
| 3,895
| 0.853231
| 3,300
| 0.722892
| 904
| 0.198028
|
f5a40afb92b821bdbd1bca8cea58ac0b9702d2e6
| 960
|
py
|
Python
|
task07.py
|
G00387867/pands-problems
|
01db5fd26eb0327f6f61da7e06dfe1f2b9f0333c
|
[
"MIT"
] | null | null | null |
task07.py
|
G00387867/pands-problems
|
01db5fd26eb0327f6f61da7e06dfe1f2b9f0333c
|
[
"MIT"
] | null | null | null |
task07.py
|
G00387867/pands-problems
|
01db5fd26eb0327f6f61da7e06dfe1f2b9f0333c
|
[
"MIT"
] | null | null | null |
# Adam
# A program that reads in a text
# file and outputs the number of e's it contains
# The program takes the filename from
# an argument on the command line.
# I found information on this website:
# https://www.sanfoundry.com/python-program-read-file-counts-number/
#fname = input("Enter file name: ")
#l = input("Enter letter to be searched: ")
#e = 0
#with open(fname, "r") as f:
#for line in f:
#words = line.split()
#for i in words:
#for letter in i:
#if(letter == e):
#e = e+1
#print("Occurences of the letter: ")
#print(e)
# Requirement for this assignmnet is to only print
# The occurence of letter E.
fname = input("Enter file name: ")
e = 0
with open(fname, "r") as f:
for line in f:
words = line.split()
for i in words:
for letter in i:
if(letter == "e"):
e = e+1
print(e)
| 20.869565
| 68
| 0.558333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 620
| 0.645833
|
f5a59287ceaf7b3b0006e335abd2aae06f9ad302
| 3,936
|
py
|
Python
|
texext/tests/test_tinypages.py
|
effigies/texext
|
545ecf3715ab43bfb95859861fbb17af1fef512d
|
[
"BSD-2-Clause"
] | null | null | null |
texext/tests/test_tinypages.py
|
effigies/texext
|
545ecf3715ab43bfb95859861fbb17af1fef512d
|
[
"BSD-2-Clause"
] | null | null | null |
texext/tests/test_tinypages.py
|
effigies/texext
|
545ecf3715ab43bfb95859861fbb17af1fef512d
|
[
"BSD-2-Clause"
] | null | null | null |
""" Tests for tinypages build using sphinx extensions """
from os.path import (join as pjoin, dirname, isdir)
import sphinx
SPHINX_ge_1p5 = sphinx.version_info[:2] >= (1, 5)
from sphinxtesters import PageBuilder
HERE = dirname(__file__)
PAGES = pjoin(HERE, 'tinypages')
from texext.tests.test_plotdirective import format_math_block
def _pdiff(str1, str2):
# For debugging
from difflib import ndiff
print(''.join(ndiff(str1.splitlines(True), str2.splitlines(True))))
class TestTinyPages(PageBuilder):
# Test build and output of tinypages project
page_source_template = PAGES
def test_some_math(self):
assert isdir(self.out_dir)
assert isdir(self.doctree_dir)
doctree = self.get_doctree('some_math')
assert len(doctree.document) == 1
tree_str = self.doctree2str(doctree)
if SPHINX_ge_1p5:
back_ref = (
'<paragraph>Refers to equation at '
'<pending_xref refdoc="some_math" refdomain="math" '
'refexplicit="False" reftarget="some-label" '
'reftype="eq" refwarn="True">'
'<literal classes="xref eq">some-label</literal>'
'</pending_xref>')
else:
back_ref=(
'<paragraph>Refers to equation at '
'<eqref docname="some_math" '
'target="some-label">(?)</eqref>')
expected = (
'<title>Some math</title>\n'
'<paragraph>Here <math latex="a = 1"/>, except '
'<title_reference>$b = 2$</title_reference>.</paragraph>\n'
'<paragraph>Here <math latex="c = 3"/>, except '
'<literal>$d = 4$</literal>.</paragraph>\n'
'<literal_block xml:space="preserve">'
'Here $e = 5$</literal_block>\n'
'<bullet_list bullet="*">'
'<list_item>'
'<paragraph>'
'A list item containing\n'
'<math latex="f = 6"/> some mathematics.'
'</paragraph>'
'</list_item>'
'<list_item>'
'<paragraph>'
'A list item containing '
'<literal>a literal across\nlines</literal> '
'and also <math latex="g = 7"/> some mathematics.'
'</paragraph>'
'</list_item>'
'</bullet_list>\n'
+ format_math_block('some_math', "10 a + 2 b + q") +
'\n<paragraph>More text</paragraph>\n'
'<target refid="equation-some-label"/>\n'
+ format_math_block(
'some_math', "5 a + 3 b",
label='some-label',
number='1',
ids='equation-some-label') +
'\n<paragraph>Yet more text</paragraph>\n'
+ format_math_block(
"some_math", latex="5 w + 3 x") + '\n' +
r'<paragraph>Math with <math latex="\beta"/> a backslash.'
'</paragraph>\n'
'<paragraph>' # What happens to backslashes?
'A protected whitespace with <math latex="dollars"/>.'
'</paragraph>\n'
'<paragraph>'
'Some * asterisks *. <math latex="dollars"/>. '
r'A line break. Protected \ backslash. '
'Protected n in <math latex="a"/> line.</paragraph>\n'
# Do labels get set as targets?
+ back_ref +
'.</paragraph>')
assert tree_str == expected
class TestTopLevel(TestTinyPages):
# Test we can import math_dollar with just `texext`
@classmethod
def modify_source(cls):
conf_fname = pjoin(cls.page_source, 'conf.py')
with open(conf_fname, 'rt') as fobj:
contents = fobj.read()
contents = contents.replace("'texext.mathcode',\n", "")
contents = contents.replace("'texext.math_dollar'", "'texext'")
with open(conf_fname, 'wt') as fobj:
fobj.write(contents)
| 37.485714
| 71
| 0.54497
| 3,446
| 0.875508
| 0
| 0
| 389
| 0.098831
| 0
| 0
| 1,846
| 0.469004
|
f5a8efb033fff75dd7f358a028f0ce20386e8ec9
| 3,708
|
py
|
Python
|
core.py
|
marcolcl/django-toolkit
|
f425cccb6f55f3afce4326e7e79770e5c36c9646
|
[
"MIT"
] | 1
|
2021-04-07T14:25:01.000Z
|
2021-04-07T14:25:01.000Z
|
core.py
|
marcolcl/django-toolkit
|
f425cccb6f55f3afce4326e7e79770e5c36c9646
|
[
"MIT"
] | 5
|
2021-03-30T14:08:53.000Z
|
2021-09-22T19:29:42.000Z
|
core.py
|
marcolcl/django-toolkit
|
f425cccb6f55f3afce4326e7e79770e5c36c9646
|
[
"MIT"
] | null | null | null |
import logging
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.http import HttpRequest
from rest_framework.exceptions import NotFound
from rest_framework.test import APIRequestFactory
from rest_framework.views import exception_handler, APIView
from typing import List, TypeVar
logger = logging.getLogger(__name__)
T = TypeVar('T')
NON_CLONEABLE_MODELS: List[str] = [
'User',
]
@transaction.atomic
def clone_instance(instance: T) -> T:
"""
Clone any django model instance and its related instances recursively
Ignore many-to-many or one-to-many relationship (reverse foreign key)
Also ignore user model
ref:
https://docs.djangoproject.com/en/2.2/ref/models/fields/#attributes-for-fields-with-relations
https://github.com/jackton1/django-clone/blob/master/model_clone/mixins/clone.py
"""
# initialize a new instance
cloned_instance = instance.__class__()
fields = instance._meta.get_fields()
for field in fields:
# only clone one-to-one or forward foreign key relationship
# ignore many-to-many or reverse foreign key relationship
if field.one_to_one or field.many_to_one:
_related = getattr(instance, field.name)
# skip if related instance is None
if _related is None:
continue
# use the same reference for non-cloneable related models
if field.related_model.__name__ in NON_CLONEABLE_MODELS:
setattr(cloned_instance, field.name, _related)
else:
_cloned_related = clone_instance(_related)
setattr(cloned_instance, field.name, _cloned_related)
# simply copy the value for those non-relation fields
if not field.is_relation:
_value = getattr(instance, field.name)
setattr(cloned_instance, field.name, _value)
# set primary key as None to save a new record in DB
cloned_instance.pk = None
cloned_instance.save()
return cloned_instance
def exception_logging_handler(exc: Exception, context: dict):
"""
Intercept DRF error handler to log the error message
Update the REST_FRAMEWORK setting in settings.py to use this handler
REST_FRAMEWORK = {
'EXCEPTION_HANDLER': 'core.exception_logging_handler',
}
"""
logger.warning(exc)
# translate uncaught Django ObjectDoesNotExist exception to NotFound
if isinstance(exc, ObjectDoesNotExist):
logger.error(f'uncaught ObjectDoesNotExist error: {exc} - {context}')
exc = NotFound(str(exc))
# follow DRF default exception handler
response = exception_handler(exc, context)
return response
def make_drf_request(request: HttpRequest = None, headers: dict = None):
"""
The request object made by APIRequestFactory is `WSGIRequest` which
doesn't have `.query_params` or `.data` method as recommended by DRF.
It only gets "upgraded" to DRF `Request` class after passing through
the `APIView`, which invokes `.initialize_request` internally.
This helper method uses a dummy API view to return a DRF `Request`
object for testing purpose.
Ref:
https://stackoverflow.com/questions/28421797/django-rest-framework-apirequestfactory-request-object-has-no-attribute-query-p
https://github.com/encode/django-rest-framework/issues/3608
"""
class DummyView(APIView):
pass
if request is None:
# use a default request
request = APIRequestFactory().get('/')
drf_request = DummyView().initialize_request(request)
if headers:
drf_request.headers = headers
return drf_request
| 33.107143
| 128
| 0.702805
| 38
| 0.010248
| 0
| 0
| 1,631
| 0.43986
| 0
| 0
| 1,762
| 0.475189
|
f5aa196ccf6037cd4fcdad669c9f9252c8778f6e
| 436
|
py
|
Python
|
atcoder/past/past201912_f.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | 1
|
2018-11-12T15:18:55.000Z
|
2018-11-12T15:18:55.000Z
|
atcoder/past/past201912_f.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
atcoder/past/past201912_f.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
S = input()
arr = []
now = []
counter = 0
for s in S:
now.append(s.lower())
if s.isupper():
if counter == 0:
counter += 1
else:
arr.append(''.join(now))
now = []
counter = 0
arr.sort()
for word in arr:
for i, s in enumerate(word):
if i == 0 or i == len(word) - 1:
print(s.upper(), end='')
else:
print(s, end='')
print()
| 19.818182
| 40
| 0.428899
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.013761
|
f5aa1cc085abe91e5e9d7da0530662e853080933
| 738
|
py
|
Python
|
lesson13n2/states/out.py
|
muzudho/py-state-machine-practice
|
e31c066f4cf142b6b6c5ff273b56a0f89428c59e
|
[
"MIT"
] | null | null | null |
lesson13n2/states/out.py
|
muzudho/py-state-machine-practice
|
e31c066f4cf142b6b6c5ff273b56a0f89428c59e
|
[
"MIT"
] | null | null | null |
lesson13n2/states/out.py
|
muzudho/py-state-machine-practice
|
e31c066f4cf142b6b6c5ff273b56a0f89428c59e
|
[
"MIT"
] | null | null | null |
from lesson12_projects.house3.data.const import E_TURNED_KNOB, MSG_TURN_KNOB, E_FAILED
class OutState:
def update(self, req):
self.on_entry(req)
# 入力
msg = self.on_trigger(req)
# 外に居ます。 'Turn knob' とメッセージを送ってくるのが正解です
if msg == MSG_TURN_KNOB:
self.on_turned_knob(req)
return E_TURNED_KNOB
else:
self.on_failed(req)
return E_FAILED
def on_entry(self, req):
req.c_sock.send(
"""You can see the house.
You can see the close knob.""".encode()
)
def on_trigger(self, req):
return req.pull_trigger()
def on_turned_knob(self, req):
pass
def on_failed(self, req):
pass
| 21.085714
| 86
| 0.585366
| 700
| 0.886076
| 0
| 0
| 0
| 0
| 0
| 0
| 151
| 0.191139
|
190f3d0f2aa0d41a590c2d4d36fe77e3833762f3
| 2,171
|
py
|
Python
|
setup.py
|
biodatageeks/pysequila
|
2fb3b83f008e6b7f874648ea02e7ca307d8519d3
|
[
"Apache-2.0"
] | 1
|
2020-10-14T23:02:04.000Z
|
2020-10-14T23:02:04.000Z
|
setup.py
|
biodatageeks/pysequila
|
2fb3b83f008e6b7f874648ea02e7ca307d8519d3
|
[
"Apache-2.0"
] | 9
|
2020-11-07T23:33:28.000Z
|
2021-12-13T09:22:07.000Z
|
setup.py
|
biodatageeks/pysequila
|
2fb3b83f008e6b7f874648ea02e7ca307d8519d3
|
[
"Apache-2.0"
] | 1
|
2020-11-07T22:35:40.000Z
|
2020-11-07T22:35:40.000Z
|
# -*- coding: utf-8 -*-
"""setup.py"""
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class Tox(TestCommand):
user_options = [('tox-args=', 'a', 'Arguments to pass to tox')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import tox
import shlex
if self.tox_args:
errno = tox.cmdline(args=shlex.split(self.tox_args))
else:
errno = tox.cmdline(self.test_args)
sys.exit(errno)
def read_content(filepath):
with open(filepath) as fobj:
return fobj.read()
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
long_description = (
read_content("README.rst") +
read_content(os.path.join("docs/source", "CHANGELOG.rst")))
requires = ['setuptools', 'typeguard==2.5.0', 'pyspark==3.0.1', 'findspark']
extras_require = {
'reST': ['Sphinx'],
}
if os.environ.get('READTHEDOCS', None):
extras_require['reST'].append('recommonmark')
setup(name='pysequila',
version=os.getenv('VERSION', '0.1.0'),
description='An SQL-based solution for large-scale genomic analysis',
long_description=long_description,
long_description_content_type='text/x-rst',
author='biodatageeks',
author_email='team@biodatageeks.org',
url='https://pysequila.biodatageeks.org',
classifiers=classifiers,
packages=['pysequila'],
data_files=[],
install_requires=requires,
include_package_data=True,
extras_require=extras_require,
tests_require=['tox'],
cmdclass={'test': Tox},)
| 28.194805
| 76
| 0.64947
| 578
| 0.266237
| 0
| 0
| 0
| 0
| 0
| 0
| 756
| 0.348227
|
1910c0a5070edd02fb3f35021e3104c2486a91bb
| 2,266
|
py
|
Python
|
binaryblob.py
|
rikusalminen/trimuncher
|
bdf534fdf382c750e0ec7a6031433de88014e656
|
[
"Zlib"
] | 1
|
2018-11-06T05:11:08.000Z
|
2018-11-06T05:11:08.000Z
|
binaryblob.py
|
rikusalminen/trimuncher
|
bdf534fdf382c750e0ec7a6031433de88014e656
|
[
"Zlib"
] | null | null | null |
binaryblob.py
|
rikusalminen/trimuncher
|
bdf534fdf382c750e0ec7a6031433de88014e656
|
[
"Zlib"
] | null | null | null |
from sys import stdin, stdout
from struct import pack, unpack
def float2half(float_val):
f = unpack('I', pack('f', float_val))[0]
if f == 0: return 0
if f == 0x80000000: return 0x8000
return ((f>>16)&0x8000) | ((((f&0x7f800000)-0x38000000)>>13)&0x7c00) | ((f>>13)&0x03ff)
def half2float(h):
if h == 0: return 0
if h == 0x8000: return 0x80000000
f = ((h&0x8000)<<16) | (((h&0x7c00)+0x1C000)<<13) | ((h&0x03FF)<<13)
return unpack('f', pack('I', f))[0]
def blob_pack_vertex_attr((count, typ, signed, bits, padding), v, little_endian = True):
assert typ is int or typ is float
assert len(padding) == count
assert len(v) <= count
pad = v + padding[len(v):]
fmt = None
if typ is float:
float_fmts = {16: 'H', 32: 'f', 64: 'd'}
if bits == 16:
pad = map(float2half, pad)
fmt = float_fmts[bits]
else:
int_fmts = {8: 'b', 16: 'h', 32: 'i', 64: 'q'}
uint_fmts = {8: 'B', 16: 'H', 32: 'I', 64: 'Q'}
fmt = int_fmts[bits] if signed else uint_fmts[bits]
return pack(('<' if little_endian else '>') + fmt * count, *pad)
def blob_vertices(attrs, verts, little_endian = True):
for v in verts:
for (attr, data) in zip(attrs, v):
yield blob_pack_vertex_attr(attr, data, little_endian)
def blob_indices(indices, restart = None, little_endian = True):
fmt = ('<' if little_endian else '>') + 'H'
for primitive in indices:
for index in primitive:
yield pack(fmt, index)
if restart is not None:
yield pack(fmt, restart)
def blob_vertex_write(attrs, verts, out=stdout, little_endian = True):
for blob in blob_vertices(attrs, verts, little_endian):
out.write(blob)
def blob_vertex_save(filename, attrs, verts, little_endian = True):
with open(filename, 'wb0') as f:
blob_vertex_write(attrs, verts, f, little_endian)
def blob_index_write(indices, out=stdout, restart = None, little_endian = True):
for blob in blob_indices(indices, restart, little_endian):
out.write(blob)
def blob_index_save(filename, indices, restart = None, little_endian = True):
with open(filename, 'wb0') as f:
blob_index_write(indices, f, restart, little_endian)
| 35.40625
| 91
| 0.621359
| 0
| 0
| 462
| 0.203883
| 0
| 0
| 0
| 0
| 70
| 0.030891
|
1910e99d0b7143a24de2db38d697e59e51df210d
| 2,234
|
py
|
Python
|
lib/piservices/remote/integration.py
|
creative-workflow/pi-setup
|
d6d28cb8d34ef71b1e8ac95dd94099bfad08837a
|
[
"MIT"
] | 1
|
2020-04-25T00:55:45.000Z
|
2020-04-25T00:55:45.000Z
|
lib/piservices/remote/integration.py
|
creative-workflow/pi-setup
|
d6d28cb8d34ef71b1e8ac95dd94099bfad08837a
|
[
"MIT"
] | 4
|
2015-05-28T23:20:13.000Z
|
2015-05-28T23:24:01.000Z
|
lib/piservices/remote/integration.py
|
creative-workflow/pi-setup
|
d6d28cb8d34ef71b1e8ac95dd94099bfad08837a
|
[
"MIT"
] | null | null | null |
from config import *
from template import *
from dictasobject import DictAsObject
class RemoteFileHelper:
def __init__(self, service):
self.service = service
self.config = DictAsObject({
'ini' : self.config_ini,
'parser' : self.config_parser,
'shellvars' : self.config_shellvars,
'whitespace' : self.config_whitespace
})
def build_local_lpath(self, path):
if not path:
return path
if path and path.startswith('/'):
return path
if os.path.isfile(self.service.local_path+'/'+path):
return self.service.local_path+'/'+path
return path
def abstract(self, remote_file=None):
return AbstractRemoteLoader(self.service,
self.build_remote_path(remote_file))
def template(self, local_path, remote_path=None, *args, **kwargs):
return RemoteConfigFileWithTemplate(self.service,
self.build_local_lpath(local_path),
remote_path,
*args, **kwargs)
def partial(self, local_path, remote_path=None, *args, **kwargs):
return RemoteConfigFileWithPartial( self.service,
self.build_local_lpath(local_path),
remote_path, *args, **kwargs)
def config_ini(self, remote_file = None, *args, **kwargs):
if remote_file: remote_file = self.service.normalize_path(remote_file)
return RemoteConfigIniLoader(self.service, remote_file, *args, **kwargs)
def config_parser(self, remote_file = None, *args, **kwargs):
if remote_file: remote_file = self.service.normalize_path(remote_file)
return RemoteConfigParser(self.service, remote_file, *args, **kwargs)
def config_shellvars(self, remote_file = None, *args, **kwargs):
if remote_file: remote_file = self.service.normalize_path(remote_file)
return RemoteShellVarsLoader(self.service, remote_file, *args, **kwargs)
def config_whitespace(self, remote_file = None, *args, **kwargs):
if remote_file: remote_file = self.service.normalize_path(remote_file)
return RemoteWhitespaceConfigLoader(self.service, remote_file, *args, **kwargs)
| 39.192982
| 83
| 0.656222
| 2,150
| 0.962399
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.020143
|
191159a3e3b8327371261a5ae76fdabd0024bab8
| 4,400
|
py
|
Python
|
src/architecture/cartpole_target.py
|
ginevracoal/adversarialGAN
|
7a38e037a5ddbbe0bb4daed35fcb0e6fbf9b311e
|
[
"CC-BY-4.0"
] | 1
|
2020-12-15T03:03:47.000Z
|
2020-12-15T03:03:47.000Z
|
src/architecture/cartpole_target.py
|
ginevracoal/adversarialGAN
|
7a38e037a5ddbbe0bb4daed35fcb0e6fbf9b311e
|
[
"CC-BY-4.0"
] | null | null | null |
src/architecture/cartpole_target.py
|
ginevracoal/adversarialGAN
|
7a38e037a5ddbbe0bb4daed35fcb0e6fbf9b311e
|
[
"CC-BY-4.0"
] | 1
|
2020-11-05T09:35:11.000Z
|
2020-11-05T09:35:11.000Z
|
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import matplotlib.pyplot as plt
import architecture.default
from architecture.default import Defender
DEBUG=False
BATCH_SIZE=32
FIXED_POLICY=False
NORMALIZE=False
K=10
PENALTY=10
MAX_TARGET_POS=10
torch.set_default_tensor_type(torch.DoubleTensor)
class Attacker(architecture.default.Attacker):
""" NN architecture for the attacker """
def __init__(self, model, n_hidden_layers, layer_size, n_coeff, noise_size):
super(Attacker, self).__init__(model, n_hidden_layers, layer_size, n_coeff, noise_size)
def forward(self, x):
output = self.nn(x)
eps = output[1]
mu = torch.sigmoid(output[1])
return eps, mu
class Trainer(architecture.default.Trainer):
""" The class contains the training logic """
def __init__(self, world_model, robustness_computer, \
attacker_nn, defender_nn, lr, logging_dir=None):
super(Trainer, self).__init__(world_model, robustness_computer, attacker_nn, defender_nn, lr, logging_dir)
def train_attacker_step(self, timesteps, dt, atk_static):
self.attacker_optimizer.zero_grad()
if FIXED_POLICY is True:
z = torch.rand(self.attacker.noise_size)
oe = torch.tensor(self.model.environment.status)
oa = torch.tensor(self.model.agent.status)
x_target = oe[3]
atk_policy = self.attacker(torch.cat((z, oe)))
with torch.no_grad():
def_policy = self.defender(oa)
cumloss = 0.
if PENALTY:
previous_def_policy = torch.zeros_like(self.defender(torch.tensor(self.model.agent.status)))
for t in range(timesteps):
if FIXED_POLICY is False:
z = torch.rand(self.attacker.noise_size)
oe = torch.tensor(self.model.environment.status)
oa = torch.tensor(self.model.agent.status)
x_target = oe[3]
atk_policy = self.attacker(torch.cat((z, oe)))
with torch.no_grad():
def_policy = self.defender(oa)
self.model.step(atk_policy, def_policy, dt)
if t>K:
rho = self.robustness_computer.compute(self.model)
cumloss += self.attacker_loss_fn(rho)
if torch.abs(x_target) >= MAX_TARGET_POS:
cumloss += PENALTY
cumloss.backward()
self.attacker_optimizer.step()
if DEBUG:
print(self.attacker.state_dict()["nn.0.bias"])
return cumloss.detach() / timesteps
def train_defender_step(self, timesteps, dt, atk_static):
self.defender_optimizer.zero_grad()
if FIXED_POLICY is True:
z = torch.rand(self.attacker.noise_size)
oe = torch.tensor(self.model.environment.status)
oa = torch.tensor(self.model.agent.status)
x_target = oa[3]
with torch.no_grad():
atk_policy = self.attacker(torch.cat((z, oe)))
def_policy = self.defender(oa)
cumloss = 0.
if PENALTY:
previous_def_policy = torch.zeros_like(self.defender(torch.tensor(self.model.agent.status)))
for t in range(timesteps):
if FIXED_POLICY is False:
z = torch.rand(self.attacker.noise_size)
oe = torch.tensor(self.model.environment.status)
oa = torch.tensor(self.model.agent.status)
x_target = oa[3]
with torch.no_grad():
atk_policy = self.attacker(torch.cat((z, oe)))
def_policy = self.defender(oa)
self.model.step(atk_policy, def_policy, dt)
if t>K:
rho = self.robustness_computer.compute(self.model)
cumloss += self.defender_loss_fn(rho)
if torch.abs(x_target) >= MAX_TARGET_POS:
cumloss += PENALTY
cumloss.backward()
self.defender_optimizer.step()
if DEBUG:
print(self.defender.state_dict()["nn.0.bias"])
# make_dot(def_input, self.defender.named_parameters(), path=self.logging_dir)
return cumloss.detach() / timesteps
| 30.769231
| 114
| 0.606136
| 4,001
| 0.909318
| 0
| 0
| 0
| 0
| 0
| 0
| 185
| 0.042045
|
1911d18a99f00abe9dd822c30eace393500445cb
| 7,785
|
py
|
Python
|
tictactoe.py
|
smrsassa/tic-tac-toe-pygame
|
36f738fb94a3a138ef2aa21d409558e3d1680526
|
[
"MIT"
] | 1
|
2019-10-21T18:19:12.000Z
|
2019-10-21T18:19:12.000Z
|
tictactoe.py
|
smrsassa/tic-tac-toe-pygame
|
36f738fb94a3a138ef2aa21d409558e3d1680526
|
[
"MIT"
] | null | null | null |
tictactoe.py
|
smrsassa/tic-tac-toe-pygame
|
36f738fb94a3a138ef2aa21d409558e3d1680526
|
[
"MIT"
] | null | null | null |
import pygame
import random
from time import sleep
white = (255, 255, 255)
black = (0, 0, 0)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
pygame.init()
largura = 320
altura = 320
fundo = pygame.display.set_mode((largura, altura))
pygame.display.set_caption("TicTacToe")
def texto(msg, cor, tam, x, y):
fonte = pygame.font.SysFont(None, tam)
texto1 = fonte.render(msg, True, cor)
fundo.blit(texto1, [x, y])
def circulo(centro):
if centro == 0 or centro == 1 or centro == 2:
if centro == 0:
centro = 53*(centro+1)
if centro == 1:
centro = 53*(centro+2)
if centro == 2:
centro = 53*(centro+3)
pos_circulo = (centro, 53)
if centro == 3 or centro == 4 or centro == 5:
if centro == 3:
centro = 53*(centro-2)
if centro == 4:
centro = 53*(centro-1)
if centro == 5:
centro = 53*centro
pos_circulo = (centro, 160)
if centro == 6 or centro == 7 or centro == 8:
if centro == 6:
centro = 53*(centro-5)
if centro == 7:
centro = 53*(centro-4)
if centro == 8:
centro = 53*(centro-3)
pos_circulo = (centro, 266)
pygame.draw.circle(fundo, black, pos_circulo, 30)
def cruz(cruzx, cruzy):
pygame.draw.line(fundo, black, (cruzx, cruzy), (cruzx+35, cruzy+35))
pygame.draw.line(fundo, black, (cruzx+35, cruzy), ( cruzx, cruzy+35))
def cerca():
pygame.draw.line(fundo, black,(106, 0), (106, altura))
pygame.draw.line(fundo, black,(212, 0), (212, altura))
pygame.draw.line(fundo, black,(0, 106), (largura, 106))
pygame.draw.line(fundo, black,(0, 212), (largura, 212))
def endgame():
global fimdejogo
global resultado
global trava
if matriz[0] == 1 and matriz[1] == 1 and matriz[2] == 1 or matriz[0] == 2 and matriz[1] == 2 and matriz[2] == 2:
fimdejogo = True
trava = False
if matriz[0] == 1:
resultado = 1
else:
resultado = 2
if matriz[3] == 1 and matriz[4] == 1 and matriz[5] == 1 or matriz[3] == 2 and matriz[4] == 2 and matriz[5] == 2:
fimdejogo = True
trava = False
if matriz[3] == 1:
resultado = 1
else:
resultado = 2
if matriz[6] == 1 and matriz[7] == 1 and matriz[8] == 1 or matriz[6] == 2 and matriz[7] == 2 and matriz[8] == 2:
fimdejogo = True
trava = False
if matriz[6] == 1:
resultado = 1
else:
resultado = 2
if matriz[0] == 1 and matriz[3] == 1 and matriz[6] == 1 or matriz[0] == 2 and matriz[3] == 2 and matriz[6] == 2:
fimdejogo = True
trava = False
if matriz[6] == 1:
resultado = 1
else:
resultado = 2
if matriz[1] == 1 and matriz[4] == 1 and matriz[7] == 1 or matriz[1] == 2 and matriz[4] == 2 and matriz[7] == 2:
fimdejogo = True
trava = False
if matriz[1] == 1:
resultado = 1
else:
resultado = 2
if matriz[2] == 1 and matriz[5] == 1 and matriz[8] == 1 or matriz[2] == 2 and matriz[5] == 2 and matriz[8] == 2:
fimdejogo = True
trava = False
if matriz[2] == 1:
resultado = 1
else:
resultado = 2
if matriz[0] == 1 and matriz[4] == 1 and matriz[8] == 1 or matriz[0] == 2 and matriz[4] == 2 and matriz[8] == 2:
fimdejogo = True
trava = False
if matriz[0] == 1:
resultado = 1
else:
resultado = 2
if matriz[2] == 1 and matriz[4] == 1 and matriz[6] == 1 or matriz[2] == 2 and matriz[4] == 2 and matriz[6] == 2:
fimdejogo = True
trava = False
if matriz[2] == 1:
resultado = 1
else:
resultado = 2
vaziu = 0
for c in range(0, len(matriz)):
if matriz[c] == 0:
vaziu +=1
if vaziu == 0:
if resultado != 1 and resultado != 2:
fimdejogo = True
resultado = 3
vaziu = 0
game = True
fimdejogo = False
evento = True
trava = True
resultado = 0
mousex = -1
mousey = 0
fundo.fill(white)
cerca()
matriz = [0, 0, 0, 0, 0, 0, 0, 0, 0]
pygame.display.update()
while game:
while fimdejogo:
sleep(0.5)
fundo.fill(white)
texto('Fim de Jogo', red, 50, 65, 30)
if resultado == 1:
texto('Vitoria!!!', black, 30, 70, 80)
if resultado == 3:
texto('Velha', black, 30, 70, 80)
if resultado == 2:
texto('Derrota!!', black, 30, 70, 80)
pygame.draw.rect(fundo, black, [45, 120, 135, 27])
texto('Continuar(C)', white, 30, 50, 125)
pygame.draw.rect(fundo, black, [190, 120, 75, 27])
texto('Sair(S)', white, 30, 195, 125)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
game = False
fimdejogo = False
trava = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_c:
game = True
fimdejogo = False
evento = True
trava = True
resultado = 0
mousex = -1
mousey = 0
fundo.fill(white)
cerca()
matriz = [0, 0, 0, 0, 0, 0, 0, 0, 0]
pygame.display.update()
if event.key == pygame.K_s:
game = False
fimdejogo = False
evento = False
trava = False
while evento:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game = False
evento = False
trava = False
if event.type == pygame.MOUSEBUTTONDOWN:
mousex = pygame.mouse.get_pos()[0]
mousey = pygame.mouse.get_pos()[1]
evento = False
evento = True
if mousex < 106 and mousey < 106 and mousex != -1 and matriz[0] == 0:
cruz(35, 35)
matriz[0] = 1
if mousex < 212 and mousex > 106 and mousey < 106 and matriz[1] == 0:
cruz(141, 35)
matriz[1] = 1
if mousex < 320 and mousex > 212 and mousey < 106 and matriz[2] == 0:
cruz(247, 35)
matriz[2] = 1
if mousex < 106 and mousey > 106 and mousey < 212 and matriz[3] == 0:
cruz(35, 141)
matriz[3] = 1
if mousex < 212 and mousex > 106 and mousey < 212 and mousey > 106 and matriz[4] == 0:
cruz(141, 141)
matriz[4] = 1
if mousex < 320 and mousex > 212 and mousey < 212 and mousey > 106 and matriz[5] == 0:
cruz(247, 141)
matriz[5] = 1
if mousex < 106 and mousey < 320 and mousey > 212 and matriz[6] == 0:
cruz(35, 247)
matriz[6] = 1
if mousex < 212 and mousex > 106 and mousey < 320 and mousey > 212 and matriz[7] == 0:
cruz(141, 247)
matriz[7] = 1
if mousex < 320 and mousex > 212 and mousey < 320 and mousey > 212 and matriz[8] == 0:
cruz(247, 247)
matriz[8] = 1
endgame()
pygame.display.update()
sleep(0.5)
if trava:
while True:
jogada = random.randint(0, 8)
if matriz[jogada] == 0:
circulo(jogada)
matriz[jogada] = 2
break
else:
if 0 in matriz:
jogada = random.randint(0, 8)
else:
break
endgame()
pygame.display.update()
pygame.display.update()
| 31.26506
| 116
| 0.491715
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 77
| 0.009891
|
19128f435521b2a41c0130fc202af247adfc091d
| 2,332
|
py
|
Python
|
src/plugins/notice.py
|
napalmpiri/irkotr0id
|
8125c0119038ddccdf6f0a587fa9eb4a0f66821d
|
[
"Beerware"
] | 7
|
2017-10-30T17:12:51.000Z
|
2021-03-03T23:00:35.000Z
|
src/plugins/notice.py
|
napalmpiri/irkotr0id
|
8125c0119038ddccdf6f0a587fa9eb4a0f66821d
|
[
"Beerware"
] | 1
|
2017-09-20T13:34:56.000Z
|
2017-09-20T13:34:56.000Z
|
src/plugins/notice.py
|
napalmpiri/irkotr0id
|
8125c0119038ddccdf6f0a587fa9eb4a0f66821d
|
[
"Beerware"
] | 7
|
2017-09-16T10:39:20.000Z
|
2018-02-28T19:43:57.000Z
|
#/usr/bin/env python
# -*- coding: Utf8 -*-
import event
class Plugin:
def __init__(self, client):
self.client = client
self.notices = {}
#:nisay!~nisay@53.ip-192-99-70.net PRIVMSG #testbobot :!notice user message
@event.privmsg()
def get_notice(self, e):
target = e.values['target']
msg = e.values['msg'][1:]
nick = e.values['nick']
if nick == self.client.nick_name:
return
if msg == '!notice':
self.help(target)
if target == self.client.nick_name:
message = "You can only send a notice on a channel !"
self.client.priv_msg(nick, message)
return
elif msg[0:7] == '!notice':
try:
(cmd, user, message) = msg.split(' ', 2)
except ValueError, e:
self.help(target)
return
if user in self.client.channels[target.lower()].users:
message = nick + ": Can\'t you really do that by yourself ? ._."
self.client.priv_msg(target, message)
else:
message = message.strip()
notice = 'From ' + nick + ': ' + message
if not self.notices.has_key(target):
self.notices[target] = {}
if not self.notices[target].has_key(user):
self.notices[target][user] = []
self.notices[target][user].append(notice)
@event.join()
def send_notice(self, e):
nick = e.values['nick']
if nick[0] in ('&', '~', '+', '@'):
nick = nick[1:]
chan = e.values['chan']
if nick == self.client.nick_name:
return
if self.notices.has_key(chan):
if self.notices[chan].has_key(nick):
message = nick + ": While you were away"
self.client.priv_msg(chan, message)
for notice in self.notices[chan][nick]:
self.client.priv_msg(chan, notice)
self.notices[chan].pop(nick)
def help(self, target):
message = "Notify a user with a message when (s)he reconnects."
self.client.priv_msg(target, message)
message = "!notice user message"
self.client.priv_msg(target, message)
| 33.797101
| 80
| 0.51458
| 2,272
| 0.974271
| 0
| 0
| 1,845
| 0.791166
| 0
| 0
| 380
| 0.16295
|
191359000d3e32159cc42150dd476b64da855e66
| 5,794
|
py
|
Python
|
pyec/distribution/bayes/parser.py
|
hypernicon/pyec
|
7072835c97d476fc45ffc3b34f5c3ec607988e6d
|
[
"MIT"
] | 2
|
2015-03-16T21:18:27.000Z
|
2017-10-09T19:59:24.000Z
|
pyec/distribution/bayes/parser.py
|
hypernicon/pyec
|
7072835c97d476fc45ffc3b34f5c3ec607988e6d
|
[
"MIT"
] | null | null | null |
pyec/distribution/bayes/parser.py
|
hypernicon/pyec
|
7072835c97d476fc45ffc3b34f5c3ec607988e6d
|
[
"MIT"
] | null | null | null |
"""
Copyright (C) 2012 Alan J Lockett
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Parse .net format for Bayes nets and return a bayes net
"""
from pyec.config import Config
from pyec.distribution.bayes.net import *
from pyec.distribution.bayes.structure.proposal import StructureProposal
class BayesParser(object):
def __init__(self):
self.variables = {}
self.indexMap = {}
self.revIndexMap = {}
self.index = 0
def processLine(self, line):
line = line.strip()
if line == "":
return None
line = line[1:-1]
parts = line.split(" ")
if parts[0] == "var":
name = parts[1].strip("' ")
vals = " ".join(parts[2:])
vals = vals.strip("'()").split(" ")
vals = [v.strip("() \t\r\n") for v in vals]
vals = [v for v in vals if v != ""]
self.variables[name] = {'vals':vals, 'parents':None, 'cpt':None}
self.indexMap[name] = self.index
self.revIndexMap[self.index] = name
self.index += 1
elif parts[0] == "parents":
name = parts[1].strip("'").strip()
parts2 = line.split("'(")
if len(parts2) == 1:
parts2 = line.split("(")
parstr = parts2[1]
cptstr = "(".join(parts2[2:])
else:
parstr = parts2[1]
cptstr = parts2[2]
parents = parstr.strip(") \n").split(" ")
parents = [parent for parent in parents if parent != ""]
sortedParents = sorted(parents, key=lambda x: self.indexMap[x])
self.variables[name]['parents'] = sortedParents
cpt = {}
if len(parents) == 0:
vals = cptstr[:-2].strip("( )\r\n\t").split(" ")
vals = array([float(v) for v in vals][:-1])
cpt[""] = vals
else:
rows = cptstr[:-2].split("((")
for row in rows:
row = row.strip(") \r\n\t")
if row == "": continue
cfg, vals = row.split(")")
keys = [c for c in cfg.split(" ") if c != ""]
keyStr = [[]] * len(parents)
for j, key in enumerate(keys):
options = self.variables[parents[j]]['vals']
idx = options.index(key) + 1
keyStr[sortedParents.index(parents[j])] = idx
keyStr = ",".join([str(i) for i in array(keyStr)])
vals = vals.strip().split(" ")
vals = array([float(v) for v in vals][:-1])
cpt[keyStr] = vals
self.variables[name]['cpt'] = cpt
else:
return False
def parse(self, fname):
f = open(fname)
totalLine = ""
done = False
for line in f:
totalLine += line
lefts = len(totalLine.split("("))
rights = len(totalLine.split(")"))
if lefts == rights:
self.processLine(totalLine)
totalLine = ""
categories = [[]] * self.index
for name, idx in self.indexMap.iteritems():
categories[idx] = self.variables[name]['vals']
cfg = Config()
cfg.numVariables = len(self.variables)
cfg.variableGenerator = MultinomialVariableGenerator(categories)
cfg.randomizer = MultinomialRandomizer()
cfg.sampler = DAGSampler()
cfg.structureGenerator = StructureProposal(cfg)
net = BayesNet(cfg)
for variable in net.variables:
variable.tables = self.variables[self.revIndexMap[variable.index]]['cpt']
#print names[variable.index], self.variables[self.revIndexMap[variable.index]]['parents']
variable.known = [self.indexMap[parent] for parent in self.variables[self.revIndexMap[variable.index]]['parents']]
variable.known = sorted(variable.known)
variable.parents = dict([(i, net.variables[i]) for i in variable.known])
net.dirty = True
net.computeEdgeStatistics()
"""
for variable in net.variables:
print "(var ", self.revIndexMap[variable.index], " (", " ".join(variable.categories[variable.index]), "))"
for variable in net.variables:
print "(parents ", self.revIndexMap[variable.index], " (", " ".join([self.revIndexMap[i] for i in variable.known]), ") "
for key, val in variable.tables.iteritems():
if key == "":
expanded = ""
else:
cfg = array([int(num) for num in key.split(",")])
expanded = " ".join(self.variables[self.revIndexMap[variable.known[k]]]['vals'][c-1] for k,c in enumerate(cfg))
total = val.sum()
vals = " ".join([str(i) for i in val])
print "((", expanded, ") ", vals, (1. - total), ")"
print ")"
"""
return net
| 43.893939
| 460
| 0.576458
| 4,514
| 0.779082
| 0
| 0
| 0
| 0
| 0
| 0
| 2,241
| 0.386779
|
1916221a240fca8e366955bf9b55225db064e9c4
| 169
|
py
|
Python
|
Desafio25.py
|
sergioboff/Desafios-Curso-em-Video
|
f876396635b12c00bdd9523758364bbebfd70ae0
|
[
"MIT"
] | null | null | null |
Desafio25.py
|
sergioboff/Desafios-Curso-em-Video
|
f876396635b12c00bdd9523758364bbebfd70ae0
|
[
"MIT"
] | null | null | null |
Desafio25.py
|
sergioboff/Desafios-Curso-em-Video
|
f876396635b12c00bdd9523758364bbebfd70ae0
|
[
"MIT"
] | null | null | null |
nome = str(input('Digite seu nome completo: ')).strip()
if 'silva' in nome.lower():
print('Sim, seu nome tem Silva.')
else:
print('Não , seu nome não tem Silva')
| 33.8
| 55
| 0.64497
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 93
| 0.54386
|
19179562beab192ca5fca3ff7c055101546a8163
| 1,527
|
py
|
Python
|
migrations/versions/8b664608a7c7_.py
|
wangyuan02605/webcloud
|
e57a2713125b751ee8bb8da29b789e2044e789aa
|
[
"MIT"
] | 5
|
2021-12-13T14:52:08.000Z
|
2022-03-15T08:59:32.000Z
|
migrations/versions/8b664608a7c7_.py
|
wangyuan02605/webcloud
|
e57a2713125b751ee8bb8da29b789e2044e789aa
|
[
"MIT"
] | null | null | null |
migrations/versions/8b664608a7c7_.py
|
wangyuan02605/webcloud
|
e57a2713125b751ee8bb8da29b789e2044e789aa
|
[
"MIT"
] | 1
|
2021-04-26T06:08:35.000Z
|
2021-04-26T06:08:35.000Z
|
"""empty message
Revision ID: 8b664608a7c7
Revises: ec21e19825ff
Create Date: 2021-06-01 14:37:20.327189
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8b664608a7c7'
down_revision = 'ec21e19825ff'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('admin_dept',
sa.Column('id', sa.Integer(), nullable=False, comment='部门ID'),
sa.Column('parent_id', sa.Integer(), nullable=True, comment='父级编号'),
sa.Column('dept_name', sa.String(length=50), nullable=True, comment='部门名称'),
sa.Column('sort', sa.Integer(), nullable=True, comment='排序'),
sa.Column('leader', sa.String(length=50), nullable=True, comment='负责人'),
sa.Column('phone', sa.String(length=20), nullable=True, comment='联系方式'),
sa.Column('email', sa.String(length=50), nullable=True, comment='邮箱'),
sa.Column('status', sa.Integer(), nullable=True, comment='状态(1开启,0关闭)'),
sa.Column('remark', sa.Text(), nullable=True, comment='备注'),
sa.Column('address', sa.String(length=255), nullable=True, comment='详细地址'),
sa.Column('create_at', sa.DateTime(), nullable=True, comment='创建时间'),
sa.Column('update_at', sa.DateTime(), nullable=True, comment='创建时间'),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('admin_dept')
# ### end Alembic commands ###
| 35.511628
| 80
| 0.675835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 643
| 0.399627
|
191840622ba4f376a7f93c8724514c6d2f52d3bb
| 1,393
|
py
|
Python
|
africa/views.py
|
Mutugiii/Pinstagram
|
40436facfb068eea135c6dffcdaf85028ff803c1
|
[
"MIT"
] | null | null | null |
africa/views.py
|
Mutugiii/Pinstagram
|
40436facfb068eea135c6dffcdaf85028ff803c1
|
[
"MIT"
] | 6
|
2021-03-30T13:09:41.000Z
|
2021-09-08T01:50:42.000Z
|
africa/views.py
|
Mutugiii/Pinstagram
|
40436facfb068eea135c6dffcdaf85028ff803c1
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from .models import Location,Category,Image
def index(request):
'''Main view function for the start page'''
images = Image.get_images()
template = loader.get_template('index.html')
context = {
'images': images,
}
return HttpResponse(template.render(context,request))
def search(request):
'''View function to search by category'''
template = loader.get_template('search.html')
if 'image' in request.GET and request.GET['image']:
search_category = request.GET['image']
searched_images = Image.search_images(search_category)
message = f'{search_category}'
context = {
'message': message,
'images': searched_images,
}
return HttpResponse(template.render(context,request))
else:
message = 'The category does not exist!!'
context = {
'message': message,
}
return render(request, 'search.html', {'message': message})
def locations(request, region):
'''View Function to sort based on location'''
template = loader.get_template('location.html')
region_images = Image.filter_by_location(region)
context = {
'images': region_images,
}
return HttpResponse(template.render(context,request))
| 30.955556
| 67
| 0.65542
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 305
| 0.218952
|
1918493233bb0f6b63771c2685165671159e3808
| 509
|
py
|
Python
|
src/chapter4/exercise6.py
|
group6BCS1/BCS-2021
|
272b1117922163cde03901cfdd82f8e0cfab9a67
|
[
"MIT"
] | null | null | null |
src/chapter4/exercise6.py
|
group6BCS1/BCS-2021
|
272b1117922163cde03901cfdd82f8e0cfab9a67
|
[
"MIT"
] | null | null | null |
src/chapter4/exercise6.py
|
group6BCS1/BCS-2021
|
272b1117922163cde03901cfdd82f8e0cfab9a67
|
[
"MIT"
] | null | null | null |
x = (input("enters hours"))
y = (input("enters rate"))
def compute_pay(hours, rate):
"""The try block ensures that the user enters a
value between from 0-1 otherwise an error message pops up"""
try:
hours = float(x)
rate = float(y)
if hours <= 40:
pay= float(hours * rate)
else:
pay = float(40 * rate + (hours - 40) * 1.5 * rate)
return pay
except ValueError:
return "INVALID ENTRY"
pay = compute_pay(x, y)
print(pay)
| 23.136364
| 63
| 0.563851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 153
| 0.300589
|
1918ab57423bec266c18fa1bdfea020adfc67165
| 628
|
py
|
Python
|
front_end/create_data_natalia_icassp.py
|
adiyoss/DeepVOT
|
6bbda01fc5a4f93c92fccba2b9dacee511533244
|
[
"MIT"
] | 6
|
2016-10-16T14:23:44.000Z
|
2021-05-06T12:00:08.000Z
|
front_end/create_data_natalia_icassp.py
|
adiyoss/DeepVOT
|
6bbda01fc5a4f93c92fccba2b9dacee511533244
|
[
"MIT"
] | 3
|
2019-10-17T19:21:45.000Z
|
2020-08-06T10:29:27.000Z
|
front_end/create_data_natalia_icassp.py
|
adiyoss/DeepVOT
|
6bbda01fc5a4f93c92fccba2b9dacee511533244
|
[
"MIT"
] | 8
|
2016-07-02T04:38:07.000Z
|
2021-08-31T08:25:19.000Z
|
# import argparse
#
#
# def main(audio_path, textgrid_path, output_path):
# data = list()
# for
# print(1)
#
# if __name__ == "__main__":
# # -------------MENU-------------- #
# # command line arguments
# parser = argparse.ArgumentParser()
# parser.add_argument("audio_path", help="The path to the audio directory")
# parser.add_argument("labels_path", help="The path to the relevant textgrids")
# parser.add_argument("output_path", help="The path to output directory")
# args = parser.parse_args()
#
# # main function
# main(args.audio_path, args.textgrid_path, args.output_path)
| 33.052632
| 83
| 0.638535
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 610
| 0.971338
|
1918ecc1cb7ed0d73d2876e4710c8c0ffca95358
| 557
|
py
|
Python
|
phone_numbers.py
|
EdilOndong/beginner_code
|
13b05afb25ec2ba4396f5fbe751febe7cb4bdabb
|
[
"Unlicense"
] | 1
|
2021-09-19T13:33:33.000Z
|
2021-09-19T13:33:33.000Z
|
phone_numbers.py
|
EdilOndong/beginner_code
|
13b05afb25ec2ba4396f5fbe751febe7cb4bdabb
|
[
"Unlicense"
] | null | null | null |
phone_numbers.py
|
EdilOndong/beginner_code
|
13b05afb25ec2ba4396f5fbe751febe7cb4bdabb
|
[
"Unlicense"
] | null | null | null |
import phonenumbers
from phonenumbers import geocoder, carrier
def get_information_about_number(phone_numbers):
number = phonenumbers.parse(phone_numbers, "en")
phone_location = geocoder.description_for_number(number, "en")
phone_carrier = carrier.name_for_number(number, "en")
print("The Location Of This Phone Number is " + str(phone_location) + " " + "And The Phone Carrier is " + phone_carrier)
if __name__ == '__main__':
numbers = input("Please Enter The Target Number : ")
get_information_about_number(numbers)
| 39.785714
| 125
| 0.732496
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 126
| 0.226212
|
191b02340ae1fb3a92d5b7d4ecfd3b82e78caed3
| 3,494
|
py
|
Python
|
src/templates/camera.py
|
coherentsolutionsinc/issoft-insights-2019-sdc-carla-ros
|
f6d3e162888bd79d59b771c82ff028df0f70ae11
|
[
"MIT"
] | 8
|
2019-06-04T16:21:07.000Z
|
2021-09-05T07:24:20.000Z
|
src/templates/camera.py
|
coherentsolutionsinc/issoft-insights-2019-sdc-carla-ros
|
f6d3e162888bd79d59b771c82ff028df0f70ae11
|
[
"MIT"
] | null | null | null |
src/templates/camera.py
|
coherentsolutionsinc/issoft-insights-2019-sdc-carla-ros
|
f6d3e162888bd79d59b771c82ff028df0f70ae11
|
[
"MIT"
] | 1
|
2019-06-21T14:37:18.000Z
|
2019-06-21T14:37:18.000Z
|
# TODO: 1. Add indicator that node should be run by python
# line above indicates that python is responsible for running this node
import os
import csv
import rospy
import numpy as np
import pygame
from utilities import pipline
import cv2
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
# set image resolution
RESOLUTION_X = 640
RESOLUTION_Y = 480
# python class definition
class CameraTester(object):
# python constructor definition
def __init__(self):
self.start_time = None
self.image = None
self.got_image = False
self.init_pygame()
self.bridge = CvBridge()
# TODO: 2. Init nide - give node an unique name - overwritten from launch file
# wait master node is initialized and record start time
self.wait_master_initialization()
# TODO: 3. Subscribe to the ROS bridge camera topic and provide callback
# wait till we got first image
self.wait_initialization()
# run node infinite loop
self.loop()
# TODO: 4. Write callback method for the subscriber
# init pygame window to display images
def init_pygame(self):
pygame.init()
pygame.display.set_caption("Camera images")
self.screen = pygame.display.set_mode([RESOLUTION_X, RESOLUTION_Y])
# wait master node is initialized and record start time
def wait_master_initialization(self):
while not self.start_time and not rospy.is_shutdown():
self.start_time = rospy.Time.now().to_nsec()
if not rospy.is_shutdown():
rospy.loginfo('CameraTester: Ros master initialized.')
# wait till we got first image
def wait_initialization(self):
# define sleep rate foe the loop
rate = rospy.Rate(10)
# wait till we get image initialized
while not rospy.is_shutdown() and not self.got_image:
rate.sleep()
if not rospy.is_shutdown():
rospy.loginfo('CameraTester: Connected to vehicle - got camera images')
# main node loop
def loop(self):
# define loop rate in Hz
rate = rospy.Rate(20)
while not rospy.is_shutdown():
if self.image is not None:
# process stored image and display it in pygame window
self.process_frame()
# update pygame window
pygame.display.flip()
# wait 1/20 sec
rate.sleep()
# convert open cv image to pygame image and display
def process_frame(self):
# we need to convert image as it use BGR color scheme and flipped
frame = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
frame = np.rot90(frame)
frame = np.flip(frame, 0)
# TODO: 5. Add sample image processing - for example filters useful to lanes detection - uncomment line below
#frame = pipline(frame)
frame = pygame.surfarray.make_surface(frame)
self.screen.blit(frame,(0,0))
return
# python way to indicate what to do if this file is run as executable rather then imported as library
if __name__ == '__main__':
try:
# create CameraTester instance and initiate loop sequence
CameraTester()
except rospy.ROSInterruptException:
# catch and log ROS errors
rospy.logerr('Could not start camera tester node.')
pass
finally:
pygame.quit()
| 30.920354
| 121
| 0.642244
| 2,670
| 0.764167
| 0
| 0
| 0
| 0
| 0
| 0
| 1,447
| 0.414139
|
191e560813185fd516419069f86e8ed6f0ccdaf4
| 393
|
py
|
Python
|
Server APP/epl/rest/migrations/0012_auto_20180201_2304.py
|
seanjohn85/Premier-League
|
61c1406733e9c6fb282f8a4544a2f16cd6ff80a0
|
[
"MIT"
] | null | null | null |
Server APP/epl/rest/migrations/0012_auto_20180201_2304.py
|
seanjohn85/Premier-League
|
61c1406733e9c6fb282f8a4544a2f16cd6ff80a0
|
[
"MIT"
] | null | null | null |
Server APP/epl/rest/migrations/0012_auto_20180201_2304.py
|
seanjohn85/Premier-League
|
61c1406733e9c6fb282f8a4544a2f16cd6ff80a0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-02-01 23:04
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rest', '0011_auto_20180201_2256'),
]
operations = [
migrations.RenameModel(
old_name='nextFix',
new_name='Fixtures',
),
]
| 19.65
| 46
| 0.615776
| 245
| 0.62341
| 0
| 0
| 0
| 0
| 0
| 0
| 119
| 0.302799
|
191ea83d06729e5bde9055413df2bd0a44ff8fe7
| 2,669
|
py
|
Python
|
plugins/beacon/alerta_beacon.py
|
ernadhalilovic/alerta-contrib
|
e12b5cf1e7f5913f641758032ca0d426c7eb8a08
|
[
"MIT"
] | null | null | null |
plugins/beacon/alerta_beacon.py
|
ernadhalilovic/alerta-contrib
|
e12b5cf1e7f5913f641758032ca0d426c7eb8a08
|
[
"MIT"
] | null | null | null |
plugins/beacon/alerta_beacon.py
|
ernadhalilovic/alerta-contrib
|
e12b5cf1e7f5913f641758032ca0d426c7eb8a08
|
[
"MIT"
] | null | null | null |
import logging
import os
import json
import requests
try:
from alerta.plugins import app # alerta >= 5.0
except ImportError:
from alerta.app import app # alerta < 5.0
from alerta.plugins import PluginBase
LOG = logging.getLogger('alerta.plugins.beacon')
BEACON_HEADERS = {
'Content-Type': 'application/json'
}
BEACON_SEND_ON_ACK = os.environ.get('BEACON_SEND_ON_ACK') or app.config.get('BEACON_SEND_ON_ACK', False)
BEACON_SEVERITY_MAP = app.config.get('BEACON_SEVERITY_MAP', {})
BEACON_DEFAULT_SEVERITY_MAP = {'security': '#000000', # black
'critical': '#FF0000', # red
'major': '#FFA500', # orange
'minor': '#FFFF00', # yellow
'warning': '#1E90FF', #blue
'informational': '#808080', #gray
'debug': '#808080', # gray
'trace': '#808080', # gray
'ok': '#00CC00'} # green
class ServiceIntegration(PluginBase):
def __init__(self, name=None):
# override user-defined severities
self._severities = BEACON_DEFAULT_SEVERITY_MAP
self._severities.update(BEACON_SEVERITY_MAP)
super(ServiceIntegration, self).__init__(name)
def pre_receive(self, alert):
return alert
def post_receive(self, alert):
return
def status_change(self, alert, status, text, **kwargs):
BEACON_WEBHOOK_URL = self.get_config('BEACON_WEBHOOK_URL', type=str, **kwargs)
#if BEACON_SEND_ON_ACK == False or status not in ['ack', 'assign']:
#return
LOG.debug('Beacon alert: %s', alert)
LOG.debug('Beacon status: %s', status)
LOG.debug('Beacon text: %s', text)
LOG.debug('Beacon kwargs: %s', kwargs)
payload = dict()
try:
payload['severity'] = alert.severity
payload['status'] = status
payload['environment'] = alert.environment
payload['event'] = alert.event
payload['id'] = alert.id
payload['tags'] = alert.tags
LOG.debug('Beacon payload: %s', payload)
except Exception as e:
LOG.error('Exception formatting payload: %s\n%s' % (e, traceback.format_exc()))
return
try:
r = requests.post(BEACON_WEBHOOK_URL,
data=json.dumps(payload), headers=BEACON_HEADERS, timeout=2)
except Exception as e:
raise RuntimeError("Beacon connection error: %s", e)
LOG.debug('Beacon response: %s\n%s' % (r.status_code, r.text))
| 35.118421
| 104
| 0.573248
| 1,637
| 0.613338
| 0
| 0
| 0
| 0
| 0
| 0
| 720
| 0.269764
|
1923272d7be3c0492ef9a0ede3d5a0e3b7f1d272
| 5,542
|
py
|
Python
|
pyscriptman/hosts/host.py
|
reap2sow1/pyscriptman
|
2020111097e846c3fb6c4669d67de05ee3ef979a
|
[
"MIT"
] | null | null | null |
pyscriptman/hosts/host.py
|
reap2sow1/pyscriptman
|
2020111097e846c3fb6c4669d67de05ee3ef979a
|
[
"MIT"
] | null | null | null |
pyscriptman/hosts/host.py
|
reap2sow1/pyscriptman
|
2020111097e846c3fb6c4669d67de05ee3ef979a
|
[
"MIT"
] | null | null | null |
"""The abstract class module for all hosts."""
# Standard Library Imports
from abc import ABC, abstractmethod, abstractclassmethod
# Third Party Imports
# Local Application Imports
from util.helpers import get_typeof_repo_names
from util.message import Message
class Host(ABC):
"""The abstract class for all hosts.
Host subclasses are created under the assumption
that their abstract methods are defined and `HELP_DESC`
is also defined in class scope (see __init_subclass__).
Attributes
----------
HELP_DESC : NotImplemented
Parser description provided of a host
when using -h/--help (See Also).
HOST_KEY : str
Chosen host is stored under this name.
Methods
----------
add_parser : argparse._SubParsersAction
How hosts are added to the command line to be used.
Used to enforce consistent structure.
See Also
----------
pyscriptman.LocalHost.HELP_DESC
pyscriptman.RemoteHost.HELP_DESC
pyscriptman.GitHub.HELP_DESC
Notes
----------
_modify_parser : argparse.ArgumentParser
To be implemented, allows the host parser to
take custom arguments.
"""
HELP_DESC = NotImplemented
HOST_KEY = "host"
@property
def repo_names_and_locations(self):
"""Getter for returning repo names and locations
Returns
-------
dict
repo names and locations are returned,
where repo names are the keys with locations
as the values.
"""
return self._repo_names_and_locations
@property
def repo_names(self):
"""Getter for repo names"""
return self._repo_names_and_locations.keys()
def __init__(self):
self._repo_names_and_locations = dict()
def __init_subclass__(cls, *args, **kwargs):
"""Specifications required by future host subclasses."""
super().__init_subclass__(*args, **kwargs)
if cls.HELP_DESC is NotImplemented and cls.__name__ != "WebHost":
raise NotImplementedError(
Message.construct_helpdesc_notimplemented_msg({cls.__name__})
)
@staticmethod
def _get_bare_repo_names_from_path(dir_path):
"""Retrieve's bare Git repos from a given directory path.
Parameters
----------
dir_path : str
A directory path.
"""
return get_typeof_repo_names(dir_path, barerepo=True)
@classmethod
def add_parser(cls, subparser_container):
"""How hosts are added to the command line.
Parameters
----------
subparser_container : argparse._SubParsersAction
The 'container' that the host subparser is added to
(see notes).
Notes
----------
It should be noted that subparser_container is
technically not actually an container, but
a 'special action object' (see argparser documentation).
"""
subcommand = cls._get_host_name()
parser = subparser_container.add_parser(
subcommand, help=cls.HELP_DESC, allow_abbrev=False
)
parser = cls._modify_parser(parser)
parser.set_defaults(**{cls.HOST_KEY: subcommand})
return parser
@classmethod
def _get_host_name(cls):
"""How the host name is returned."""
return cls.__name__.lower()
def add_repo_name_and_location(self, repo_name, location):
"""How to add repo name and location to host's repos names and locations.
Parameters
----------
repo_name : str
The name of the Git repo to store.
location : str
A url to the Git repo.
"""
self.repo_names_and_locations[repo_name] = location
def get_location_from_repo_name(self, repo_name):
"""How to get the host's repo location from the repo name.
Parameters
----------
repo_name : str
The name of the Git repo to store.
"""
return self.repo_names_and_locations[repo_name]
@abstractclassmethod
def is_host_type(cls, chosen_host, configholder):
"""How the host type is determined.
Parameters
----------
chosen_host : str
Input received from the command line.
configholder : util.configholder.ConfigHolder
An instantiation of ConfigHolder, used to hold program
configurations (see notes).
Notes
-----
The signature implemented in each host subclass
does not have to be exact according to the base
method and may not contain `configholder`.
"""
NotImplemented
@abstractclassmethod
def _modify_parser(cls, parser):
"""To be implemented, allows the host parser to take custom arguments.
Parameters
----------
parser : argparse.ArgumentParser
A normal argparse.ArgumentParser parser that
can additional positional/optional arguments.
"""
NotImplemented
@abstractmethod
def get_user_repo_names_and_locations(self):
"""To be implemented.
Depending on the type of host, this
method is the 'how' in getting the repo names
and locations.
See Also
--------
pyscriptman.hosts.host.add_repo_name_and_location : For location definition
"""
NotImplemented
| 28.27551
| 83
| 0.616925
| 5,276
| 0.952003
| 0
| 0
| 3,074
| 0.554673
| 0
| 0
| 3,578
| 0.645615
|
192369f557f40b35dc6e1a446089e36a7716438d
| 488
|
py
|
Python
|
discovery-provider/src/models/reward_manager.py
|
AudiusProject/audius-protocol
|
0315c31402121b24faa039e93cea8869d5b80743
|
[
"Apache-2.0"
] | 429
|
2019-08-14T01:34:07.000Z
|
2022-03-30T06:31:38.000Z
|
discovery-provider/src/models/reward_manager.py
|
AudiusProject/audius-protocol
|
0315c31402121b24faa039e93cea8869d5b80743
|
[
"Apache-2.0"
] | 998
|
2019-08-14T01:52:37.000Z
|
2022-03-31T23:17:22.000Z
|
discovery-provider/src/models/reward_manager.py
|
AudiusProject/audius-protocol
|
0315c31402121b24faa039e93cea8869d5b80743
|
[
"Apache-2.0"
] | 73
|
2019-10-04T04:24:16.000Z
|
2022-03-24T16:27:30.000Z
|
from sqlalchemy import (
Column,
Integer,
String,
DateTime,
)
from .models import Base
class RewardManagerTransaction(Base):
__tablename__ = "reward_manager_txs"
signature = Column(String, nullable=False, primary_key=True)
slot = Column(Integer, nullable=False)
created_at = Column(DateTime, nullable=False)
def __repr__(self):
return f"<RewardManagerTransaction\
signature={self.signature},\
slot={self.slot}\
created_at={self.created_at}\
>"
| 25.684211
| 64
| 0.719262
| 384
| 0.786885
| 0
| 0
| 0
| 0
| 0
| 0
| 128
| 0.262295
|
1924e772ac06a1b05910f40c7a40911d19ba34ea
| 2,326
|
py
|
Python
|
plugins/roll.py
|
Cyame/OkayuTweetBot
|
5ca257f2faa622f5b88cecc95522f2114e5717fc
|
[
"MIT"
] | 3
|
2020-04-10T16:47:25.000Z
|
2020-05-17T14:44:47.000Z
|
plugins/roll.py
|
Cyame/OkayuTweetBot
|
5ca257f2faa622f5b88cecc95522f2114e5717fc
|
[
"MIT"
] | null | null | null |
plugins/roll.py
|
Cyame/OkayuTweetBot
|
5ca257f2faa622f5b88cecc95522f2114e5717fc
|
[
"MIT"
] | 1
|
2020-04-12T09:38:22.000Z
|
2020-04-12T09:38:22.000Z
|
from nonebot import on_command, CommandSession,permission as perm
import asyncio
import traceback
from helper import getlogger,msgSendToBot,CQsessionToStr,data_read,data_save
from module.roll import match_roll
logger = getlogger(__name__)
__plugin_name__ = 'ROLL骰'
__plugin_usage__ = r"""
roll命令
"""
#预处理
def headdeal(session: CommandSession):
if session.event['message_type'] == "group" and session.event.sub_type != 'normal':
return False
return True
# on_command 装饰器将函数声明为一个命令处理器
@on_command('roll',aliases=['掷骰','掷骰子','骰子'],only_to_me = False)
async def roll(session: CommandSession):
if not headdeal(session):
return
stripped_arg = session.current_arg_text.strip()
logger.info(CQsessionToStr(session))
event = session.event
nick = event['user_id']
if hasattr(event,'sender'):
if 'card' in event.sender and event['sender']['card'] != '':
nick = event['sender']['card']
elif 'nickname' in event.sender and event['sender']['nickname'] != '':
nick = event['sender']['nickname']
#公式
res = stripped_arg.split('#',1)
#注释合成
addmsg = ''
if len(res) == 2:
stripped_arg = res[1]
if len(res[0]) > 25:
addmsg = "---{0}---\n".format(res[0])
else:
addmsg = res[0] + '#'
#Default
if stripped_arg == '':
stripped_arg = '1d100<50'
elif stripped_arg[:1] in ('<','>','!'):
stripped_arg = '1d100' + stripped_arg
elif stripped_arg.isdecimal():
stripped_arg = '1d100<' + stripped_arg
try:
msg = match_roll(nick,stripped_arg)
if msg == '':
await session.send('参数不正确')
return
except:
s = traceback.format_exc(limit=10)
logger.error(s)
await session.send("内部错误!")
return
await session.send(addmsg + msg)
@on_command('rollhelp',aliases=['掷骰帮助','掷骰子帮助','骰子帮助','骰娘帮助'],only_to_me = False)
async def rollhelp(session: CommandSession):
if not headdeal(session):
return
msg = '--掷骰帮助--' + "\n"
msg = msg + '!roll 参数' + "\n"
msg = msg + '无参默认为1d100>50' + "\n"
msg = msg + '1d100固定1-5大成功,96-100大失败' + "\n"
msg = msg + '支持符号>,<,>=,<=,!=,=,+,-,*,/' + "\n"
msg = msg + "代码主体来自:https://github.com/akrisrn/dice"
await session.send(msg)
| 32.305556
| 87
| 0.602322
| 0
| 0
| 0
| 0
| 1,964
| 0.78247
| 1,769
| 0.704781
| 661
| 0.263347
|
1925e952156fee34defbc265e2d492362c1470a4
| 384
|
py
|
Python
|
wip/ray/serve/archive/serve-dag-client.py
|
nitish-raj/data-science-on-aws
|
b760805d28f8375094ce83aee849de8b9d3382a2
|
[
"Apache-2.0"
] | 42
|
2022-02-27T16:16:57.000Z
|
2022-03-30T20:11:05.000Z
|
wip/ray/serve/archive/serve-dag-client.py
|
nitish-raj/data-science-on-aws
|
b760805d28f8375094ce83aee849de8b9d3382a2
|
[
"Apache-2.0"
] | null | null | null |
wip/ray/serve/archive/serve-dag-client.py
|
nitish-raj/data-science-on-aws
|
b760805d28f8375094ce83aee849de8b9d3382a2
|
[
"Apache-2.0"
] | 26
|
2022-02-28T18:28:11.000Z
|
2022-03-29T13:15:22.000Z
|
import time
import requests
# Http endpoint
cur = time.time()
print(requests.post("http://127.0.0.1:8000/my-dag", json=["5", [1, 2], "sum"]).text)
print(f"Time spent: {round(time.time() - cur, 2)} secs.")
# Http endpoint
cur = time.time()
print(requests.post("http://127.0.0.1:8000/my-dag", json=["1", [0, 2], "max"]).text)
print(f"Time spent: {round(time.time() - cur, 2)} secs.")
| 29.538462
| 84
| 0.630208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 206
| 0.536458
|
192684c2fc33a3cff3429a446241736d19388fbe
| 697
|
py
|
Python
|
apps/careeropportunity/migrations/0015_auto_20200426_1109.py
|
Kpaubert/onlineweb4
|
9ac79f163bc3a816db57ffa8477ea88770d97807
|
[
"MIT"
] | 32
|
2017-02-22T13:38:38.000Z
|
2022-03-31T23:29:54.000Z
|
apps/careeropportunity/migrations/0015_auto_20200426_1109.py
|
Kpaubert/onlineweb4
|
9ac79f163bc3a816db57ffa8477ea88770d97807
|
[
"MIT"
] | 694
|
2017-02-15T23:09:52.000Z
|
2022-03-31T23:16:07.000Z
|
apps/careeropportunity/migrations/0015_auto_20200426_1109.py
|
Kpaubert/onlineweb4
|
9ac79f163bc3a816db57ffa8477ea88770d97807
|
[
"MIT"
] | 35
|
2017-09-02T21:13:09.000Z
|
2022-02-21T11:30:30.000Z
|
# Generated by Django 3.0.5 on 2020-04-26 09:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("companyprofile", "0007_company_created_date"),
("careeropportunity", "0014_auto_20191031_1239"),
]
operations = [
migrations.AlterField(
model_name="careeropportunity",
name="company",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="career_opportunities",
to="companyprofile.Company",
verbose_name="Bedrift",
),
)
]
| 26.807692
| 60
| 0.604017
| 571
| 0.819225
| 0
| 0
| 0
| 0
| 0
| 0
| 217
| 0.311334
|
1928b4ff5a13f2e891c264d668791fc8b69f1469
| 790
|
py
|
Python
|
Businesslayer/Email_Validate.py
|
rohitgs28/FindMyEmployer
|
d4b369eb488f44e40ef371ac09847f8ccc39994c
|
[
"MIT"
] | null | null | null |
Businesslayer/Email_Validate.py
|
rohitgs28/FindMyEmployer
|
d4b369eb488f44e40ef371ac09847f8ccc39994c
|
[
"MIT"
] | null | null | null |
Businesslayer/Email_Validate.py
|
rohitgs28/FindMyEmployer
|
d4b369eb488f44e40ef371ac09847f8ccc39994c
|
[
"MIT"
] | null | null | null |
import os
import os.path
import logging
import sys
import IValidator
import re
sys.path.append(os.path.abspath(os.path.join('0', '../extensions')))
from extensions_logging import logmyerror
class Email_Validate(IValidator.IValidator):
def formValidate_BSL(self,email):
try:
regex_emailCheck = re.compile("^\S+@\S+$")
if (regex_emailCheck.match(email)):
return email
else:
msg = "Email address not valid"
return msg
except Exception as e:
excep_msg = "Error occured in method formValidate_BSL method"
level = logging.getLogger().getEffectiveLevel()
logmyerror.loadMyExceptionInDb(level,excep_msg,e)
logging.info(excep_msg, exc_info=True)
| 32.916667
| 73
| 0.640506
| 598
| 0.756962
| 0
| 0
| 0
| 0
| 0
| 0
| 103
| 0.13038
|
192a1f5991a077d1bd0f6488cd823461b384cac5
| 188
|
py
|
Python
|
psmate/apps/blog/admin.py
|
vgrivtsov/psmate
|
10e0279b995d36518e0867e8c5d5125c355a2f00
|
[
"MIT"
] | null | null | null |
psmate/apps/blog/admin.py
|
vgrivtsov/psmate
|
10e0279b995d36518e0867e8c5d5125c355a2f00
|
[
"MIT"
] | null | null | null |
psmate/apps/blog/admin.py
|
vgrivtsov/psmate
|
10e0279b995d36518e0867e8c5d5125c355a2f00
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from psmate.models import News
class BlogAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('title',)}
admin.site.register(News, BlogAdmin)
| 17.090909
| 46
| 0.75
| 82
| 0.43617
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 0.069149
|
192a9867b561e4cc653889667cda0bafef034b8e
| 4,706
|
py
|
Python
|
Main/APIUsagePatternSearcher.py
|
SMAT-Lab/APIMatchmaker
|
0cc5c68f7f2aba570ad4c583bbc5ec757158c676
|
[
"MIT"
] | null | null | null |
Main/APIUsagePatternSearcher.py
|
SMAT-Lab/APIMatchmaker
|
0cc5c68f7f2aba570ad4c583bbc5ec757158c676
|
[
"MIT"
] | null | null | null |
Main/APIUsagePatternSearcher.py
|
SMAT-Lab/APIMatchmaker
|
0cc5c68f7f2aba570ad4c583bbc5ec757158c676
|
[
"MIT"
] | null | null | null |
# coding:utf-8
import re
from Helper.common import *
class APIUsagePatternSearcher:
def __init__(self, OPTIONS, custom_args, numOfRecs):
self.OPTIONS = OPTIONS
self.custom_args = custom_args
self.numOfRecs = numOfRecs
def searchAPIUsagePatterns(self):
# Collect in allProjects the method invocations for every training project
allProjects = {} # Map<String, Map<String, Set<String>>>
# ???only the most similar projects are considered
trainingProjects = getFileList_from_txt(self.custom_args['Training_Set'])
testingProjects = self.getProjectNames(self.custom_args['Training_Set_filtered'])
for trainingProject in trainingProjects:
# projectMIs - Map<String, Set<String>> projectMIs
projectMIs = self.getProjectDetails(self.OPTIONS.presolve, trainingProject)
allProjects[trainingProject] = projectMIs
# For every testingPro, collect the Jaccard distance
# between the recommendations and the actual invocations
for testingPro in testingProjects:
results = {} # Map<String, Float>
# ordered lists
recommendations = []
testingInvocations = self.getTestingInvocations(self.custom_args['Test_Set'], testingPro)
# Searching API usage pattern for testingPro
# add also the testing invocation(s)
for invocation in testingInvocations:
recommendations.append(invocation)
recommendations.extend(self.readRecommendationFile(self.custom_args['RECOMMENDATION_PATH'], testingPro))
for project in allProjects:
methodInvocations = allProjects[project]
for declaration in methodInvocations:
invocations = methodInvocations[declaration]
allMIs = set()
# Md in training projects
s_train = len(invocations)
# Recoomendations in test project
s_test = len(recommendations)
short_len = min(s_train, s_test)
for i in range(short_len):
allMIs.add(recommendations[i])
size1 = len(invocations.intersection(allMIs))
size2 = len(invocations.union(allMIs))
if size1:
jaccard = (1.0 * size1) / size2
results[project + "#" + declaration] = jaccard
jaccard_sim_list = dict2sortedlist(results)
numOfRecs = self.numOfRecs
if len(jaccard_sim_list) > numOfRecs:
jaccard_sim_list = jaccard_sim_list[:numOfRecs]
headings = ["Project#Declaration", "Jaccard Similarity"]
writeScores(self.custom_args['OUTPUT_PATH'], testingPro, jaccard_sim_list, headings)
def readRecommendationFile(self, path, project):
ret = []
filename = os.path.join(path, project + ".csv")
with open(filename, "r") as fr:
reader = csv.reader(fr)
headings = next(reader)
for line in reader:
mi = line[0]
ret.append(mi)
return ret
def getTestingInvocations(self, path, project):
ret = []
filename = os.path.join(path, project + ".csv")
with open(filename, "r") as fr:
reader = csv.reader(fr)
headings = next(reader)
for line in reader:
md = line[0].strip('\"[] ')
string = line[1].strip('\"[] ')
pattern = r'(<.*?>)'
mi = re.findall(pattern, string)
ret = mi
return ret
def getProjectDetails(self, path, project):
# return a Map<String, Set<String>>
methodInvocations = {}
filename = os.path.join(path, project + ".csv")
with open(filename, "r") as fr:
reader = csv.reader(fr)
headings = next(reader)
for line in reader:
md = line[0].strip('\"[] ')
string = line[1].strip('\"[] ')
pattern = r'(<.*?>)'
mi = re.findall(pattern, string)
mi = set(mi)
if md in methodInvocations:
methodInvocations[md] = methodInvocations[md].union(mi)
else:
methodInvocations[md] = mi
return methodInvocations
def getProjectNames(self, path):
names = []
files = getFileList(path, ".csv")
for file in files:
names.append(os.path.split(file)[-1][:-4])
return names
| 35.923664
| 116
| 0.563536
| 4,651
| 0.987054
| 0
| 0
| 0
| 0
| 0
| 0
| 757
| 0.160654
|
192b17d903cd1b6ec6531cfdd00faa10c1ae8213
| 873
|
py
|
Python
|
tools/leetcode.093.Restore IP Addresses/leetcode.093.Restore IP Addresses.submission2.py
|
tedye/leetcode
|
975d7e3b8cb9b6be9e80e07febf4bcf6414acd46
|
[
"MIT"
] | 4
|
2015-10-10T00:30:55.000Z
|
2020-07-27T19:45:54.000Z
|
tools/leetcode.093.Restore IP Addresses/leetcode.093.Restore IP Addresses.submission2.py
|
tedye/leetcode
|
975d7e3b8cb9b6be9e80e07febf4bcf6414acd46
|
[
"MIT"
] | null | null | null |
tools/leetcode.093.Restore IP Addresses/leetcode.093.Restore IP Addresses.submission2.py
|
tedye/leetcode
|
975d7e3b8cb9b6be9e80e07febf4bcf6414acd46
|
[
"MIT"
] | null | null | null |
class Solution:
# @param {string} s
# @return {string[]}
def restoreIpAddresses(self, s):
if not s or len(s) < 4: return []
res = []
cur = []
self.helper(s, res, cur, 0)
return res
def helper(self, s, res, cur, level):
if level == 4:
if not s:
res.append('.'.join(cur))
return
if len(s) == 0:
return
if len(s) >= 1:
cur.append(s[0])
self.helper(s[1:],res,cur,level+1)
cur.pop(-1)
if len(s) >= 2 and 10 <= int(s[:2]):
cur.append(s[:2])
self.helper(s[2:],res,cur,level+1)
cur.pop(-1)
if len(s) >= 3 and 100 <= int(s[:3]) < 256:
cur.append(s[:3])
self.helper(s[3:],res,cur,level+1)
cur.pop(-1)
| 873
| 873
| 0.412371
| 873
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 853
| 0.97709
|
192b90d17689e6aeda21369042966d2de1a7f460
| 335
|
py
|
Python
|
Beginner/Ambiguous Permutations (PERMUT2)/permutation.py
|
anishsingh42/CodeChef
|
50f5c0438516210895e513bc4ee959b9d99ef647
|
[
"Apache-2.0"
] | 127
|
2020-10-13T18:04:35.000Z
|
2022-02-17T10:56:27.000Z
|
Beginner/Ambiguous Permutations (PERMUT2)/permutation.py
|
anishsingh42/CodeChef
|
50f5c0438516210895e513bc4ee959b9d99ef647
|
[
"Apache-2.0"
] | 132
|
2020-10-13T18:06:53.000Z
|
2021-10-17T18:44:26.000Z
|
Beginner/Ambiguous Permutations (PERMUT2)/permutation.py
|
anishsingh42/CodeChef
|
50f5c0438516210895e513bc4ee959b9d99ef647
|
[
"Apache-2.0"
] | 364
|
2020-10-13T18:04:52.000Z
|
2022-03-04T14:34:53.000Z
|
while True :
n = int(input())
if n == 0 :
break
else :
arr = input().split()
check = True
for i in range(n) :
if int(arr[int(arr[i]) - 1]) != i + 1 :
check = False
if check :
print('ambiguous')
else :
print('not ambiguous')
| 23.928571
| 51
| 0.402985
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 26
| 0.077612
|
192bad6eff2c66e4ca11db59cd7ea795ca554716
| 2,140
|
py
|
Python
|
src/voiceassistant/integrations/respeaker.py
|
vadimtitov/voice-assistant
|
9ed6a799f44d5a546eb712195e3e84e6ff10d2fa
|
[
"Apache-2.0"
] | 1
|
2021-12-19T14:59:31.000Z
|
2021-12-19T14:59:31.000Z
|
src/voiceassistant/integrations/respeaker.py
|
vadimtitov/voice-assistant
|
9ed6a799f44d5a546eb712195e3e84e6ff10d2fa
|
[
"Apache-2.0"
] | 3
|
2021-09-16T20:47:58.000Z
|
2021-12-19T02:45:59.000Z
|
src/voiceassistant/integrations/respeaker.py
|
vadimtitov/voice-assistant
|
9ed6a799f44d5a546eb712195e3e84e6ff10d2fa
|
[
"Apache-2.0"
] | null | null | null |
"""Add-On functions for speech interface."""
from __future__ import annotations
from typing import TYPE_CHECKING, List
from voiceassistant.addons.create import Addon, CoreAttribute, addon_begin, addon_end
from voiceassistant.exceptions import IntegrationError
from .base import Integration
if TYPE_CHECKING:
from voiceassistant.core import VoiceAssistant
try:
from pixel_ring import pixel_ring
from pixel_ring import apa102_pixel_ring
if isinstance(pixel_ring, apa102_pixel_ring.PixelRing):
print("Found ReSpeaker 4 Mic Array")
from gpiozero import LED
power = LED(5)
power.on()
pixel_ring.change_pattern("echo")
class PixelRingState:
"""Host pixel ring states."""
off = 0
speak = 1
think = 2
pixel_ring.off()
ring_state = PixelRingState.off
except Exception as e:
raise IntegrationError(f"No ReSpeaker Microphone detected or not able to connect: {e}") from e
class RespeakerMicrophoneArray(Integration):
"""Respeaker Microphone Array integration."""
name = "respeaker"
def __init__(self, vass: VoiceAssistant) -> None:
"""Init."""
pass
@property
def addons(self) -> List[Addon]:
"""Get addons."""
return [processing_starts, processing_ends, tts_starts, tts_ends]
@addon_begin(CoreAttribute.SPEECH_PROCESSING)
def processing_starts(vass: VoiceAssistant) -> None:
"""Do before NLP starts."""
pixel_ring.speak()
global ring_state
ring_state = PixelRingState.speak
@addon_end(CoreAttribute.SPEECH_PROCESSING)
def processing_ends(vass: VoiceAssistant) -> None:
"""Do when NLP ends."""
pixel_ring.off()
global ring_state
ring_state = PixelRingState.off
@addon_begin(CoreAttribute.SPEECH_OUTPUT)
def tts_starts(vass: VoiceAssistant) -> None:
"""Do before voice output starts."""
pixel_ring.think()
@addon_end(CoreAttribute.SPEECH_OUTPUT)
def tts_ends(vass: VoiceAssistant) -> None:
"""Do when voice output ends."""
if ring_state == PixelRingState.speak:
pixel_ring.speak()
else:
pixel_ring.off()
| 24.883721
| 98
| 0.700935
| 470
| 0.219626
| 0
| 0
| 936
| 0.437383
| 0
| 0
| 373
| 0.174299
|
192bfb70b6700b39e9f6c097fb207ffc155ff246
| 4,602
|
py
|
Python
|
src/driving_curriculum/agents/neural_networks/tf/tf_novelty_detector.py
|
takeitallsource/pac-simulator
|
2c00d878047ec4a0247167e8a7de5aec8b474086
|
[
"MIT"
] | 1
|
2018-07-14T07:09:23.000Z
|
2018-07-14T07:09:23.000Z
|
src/driving_curriculum/agents/neural_networks/tf/tf_novelty_detector.py
|
takeitallsource/pac-simulator
|
2c00d878047ec4a0247167e8a7de5aec8b474086
|
[
"MIT"
] | null | null | null |
src/driving_curriculum/agents/neural_networks/tf/tf_novelty_detector.py
|
takeitallsource/pac-simulator
|
2c00d878047ec4a0247167e8a7de5aec8b474086
|
[
"MIT"
] | null | null | null |
from math import cos, sin
import numpy as np
import tensorflow as tf
from .....simulator import Agent
# from simulator import Agent
tf.set_random_seed(1234)
class TensorflowNoveltyDetector(Agent):
def execute(self, action):
raise NotImplementedError()
def __init__(self, world, learning=True, x=0.0, y=0.0, theta=0.0, v=0.0, checkpoint_file=None):
Agent.__init__(self, world, x, y, theta, v)
self.state_tensor = None
self.action_tensor = None
self.encoder_model = None
self.optimization_algorithm = None
self.loss_function = None
self.last_loss = None
self.tf_session = tf.InteractiveSession()
self.tf_checkpoint = checkpoint_file
self.tf_saver = None
self.summary_merge = None
self.summary_writer = None
self.global_step = tf.Variable(0, trainable=False, name='global_step')
self.learning_tensor = tf.placeholder(dtype=tf.bool, name='learning')
self.learning = learning
def is_learning(self):
return self.learning
def exploit(self, state, action, horizon=1):
feed_dict = dict()
feed_dict[self.state_tensor] = [state]
if action is not None:
feed_dict[self.action_tensor] = [action]
model, loss = self.tf_session.run(
fetches=[
self.encoder_model,
self.loss_function
],
feed_dict=feed_dict
)
return model, loss
def explore(self, state, horizon=1):
pass
def learn(self, state, action):
feed_dict = dict()
feed_dict[self.state_tensor] = [state]
feed_dict[self.learning_tensor] = self.learning
if action is not None:
feed_dict[self.action_tensor] = [action]
summary, step, _, learning_loss, _ = self.tf_session.run(
fetches=[
self.summary_merge,
self.global_step,
self.optimization_algorithm,
self.loss_function,
self.encoder_model
],
feed_dict=feed_dict
)
self.summary_writer.add_summary(summary, step)
self.last_loss = learning_loss
return learning_loss
def commit(self):
self.tf_saver.save(self.tf_session, self.tf_checkpoint, global_step=self.global_step)
def architecture(self):
raise NotImplementedError()
def train(self, state_dims, action_dims, storage_location):
if not self.encoder_model:
self._state_action_tensors(state_dims, action_dims)
self.encoder_model, self.loss_function = self.architecture()
self.optimization_algorithm = self.get_optimizer(self.loss_function)
self.tf_session.run(tf.global_variables_initializer())
tf.train.global_step(self.tf_session, self.global_step)
self.summary_merge = tf.summary.merge_all()
self.last_loss = float('inf')
self.tf_checkpoint = tf.train.latest_checkpoint(storage_location)
self.tf_saver = tf.train.Saver(filename='model')
if self.tf_checkpoint:
self.tf_saver.restore(self.tf_session, self.tf_checkpoint)
else:
self.tf_checkpoint = storage_location + 'model'
self.summary_writer = tf.summary.FileWriter(storage_location, self.tf_session.graph)
def test(self, state_dims, action_dims, storage_location):
if not self.encoder_model:
self._state_action_tensors(state_dims, action_dims)
self.encoder_model, self.loss_function = self.architecture()
self.tf_session.run(tf.global_variables_initializer())
self.tf_checkpoint = tf.train.latest_checkpoint(storage_location)
self.tf_saver = tf.train.Saver()
if self.tf_checkpoint:
self.tf_saver.restore(self.tf_session, self.tf_checkpoint)
else:
print("NO TRAINING!")
def _state_action_tensors(self, input_shape=(None, 1), output_shape=(1, 1)):
if len(input_shape) == 3:
input_shape = (1, input_shape[0], input_shape[1], input_shape[2])
with tf.name_scope('data'):
self.state_tensor = tf.placeholder(dtype=tf.float32, shape=input_shape, name='state')
if output_shape:
self.action_tensor = tf.placeholder(dtype=tf.float32, shape=output_shape, name='action')
tf.summary.image('state', self.state_tensor, 1)
def get_optimizer(self, loss):
raise NotImplementedError()
| 37.112903
| 100
| 0.634941
| 4,440
| 0.964798
| 0
| 0
| 0
| 0
| 0
| 0
| 113
| 0.024555
|
192c65ff044acb45e1b0a8921920efeebef0c02a
| 4,093
|
py
|
Python
|
setup.py
|
opalmer/pycffiwin32
|
39210182a92e93c37a9f1c644fd5fcc1aa32f6d1
|
[
"MIT"
] | 10
|
2015-11-19T12:39:50.000Z
|
2021-02-21T20:15:29.000Z
|
setup.py
|
opalmer/pycffiwin32
|
39210182a92e93c37a9f1c644fd5fcc1aa32f6d1
|
[
"MIT"
] | 109
|
2015-06-15T05:03:33.000Z
|
2018-01-14T10:18:48.000Z
|
setup.py
|
opalmer/pycffiwin32
|
39210182a92e93c37a9f1c644fd5fcc1aa32f6d1
|
[
"MIT"
] | 8
|
2015-07-29T04:18:27.000Z
|
2018-11-02T17:15:40.000Z
|
from __future__ import print_function
import os
import sys
from errno import ENOENT
from os.path import dirname, abspath, join, isdir
from setuptools import setup, find_packages
from distutils.command.upload import upload
from pywincffi import __version__
try:
WindowsError
except NameError:
WindowsError = OSError
try:
with open("README.rst") as readme:
long_description = readme.read()
except (OSError, IOError, WindowsError) as error:
if error.errno == ENOENT:
long_description = ""
else:
raise
requirements = [
"cffi>=1.6.0",
"six"
]
ROOT = dirname(abspath(__file__))
DISTS = join(ROOT, "dist")
class AppVeyorArtifactUpload(upload):
"""
A subclass of the normal upload command which
"""
def run(self):
if not isdir(DISTS):
print("%s does not exist" % DISTS, file=sys.stderr)
sys.exit(1)
# Clean out everything in dist/* first. This ensures that
# if we have local files they'll be replaced by the artifacts
# that we're downloading.
for root, dirs, files in os.walk(DISTS):
for name in files:
os.remove(join(root, name))
from pywincffi.dev.release import AppVeyor
appveyor = AppVeyor()
for artifact in appveyor.artifacts(directory=DISTS):
extension = artifact.path.split(".")[-1]
if extension not in ("whl", "zip", "msi", "exe"):
continue
for root, dirs, files in os.walk(DISTS):
for filename in files:
if filename.endswith(".zip"):
command = "sdist"
pyversion = "source"
elif filename.endswith(".whl"):
command = "bdist_wheel"
_, _, pyversion, _, _ = filename.rstrip(".whl").split("-")
pyversion = ".".join(list(pyversion.lstrip("cp")))
elif filename.endswith(".msi"):
command = "bdist_msi"
pyversion = \
filename.rstrip(".msi").split("-")[-1].lstrip("py")
elif filename.endswith(".exe"):
command = "bdist_wininst"
raise NotImplementedError(
"Don't have `pyversion` implemented for %r" % filename)
else:
print(
"Unknown file type: %r" % filename.split(".")[-1],
file=sys.stderr)
sys.exit(1)
filename = join(root, filename)
self.upload_file(command, pyversion, filename)
setup_keywords = dict(
name="pywincffi",
version=".".join(map(str, __version__)),
cmdclass={
"upload_from_appveyor": AppVeyorArtifactUpload
},
packages=find_packages(
include=("pywincffi*", )
),
include_package_data=True,
author="Oliver Palmer",
author_email="oliverpalmer@opalmer.com",
url="http://github.com/opalmer/pywincffi",
description="A Python library which wraps Windows functions using CFFI",
long_description=long_description,
setup_requires=requirements,
install_requires=requirements,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Environment :: Win32 (MS Windows)",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries"
]
)
# Only add cffi_modules if we're running on Windows. Otherwise
# things like the documentation build, which can run on Linux, may
# not work.
if os.name == "nt":
setup_keywords.update(
cffi_modules=["pywincffi/core/dist.py:_ffi"]
)
setup(**setup_keywords)
| 31.728682
| 79
| 0.583435
| 2,001
| 0.488883
| 0
| 0
| 0
| 0
| 0
| 0
| 1,269
| 0.310042
|
192d556dfe9b06a1468a98d96f04aa0a6fb881ce
| 331
|
py
|
Python
|
data/split.py
|
manhcuongk55/gender-classification-by-hand
|
cafc5781f9c0a6476e848239f13d3ddf3a55de59
|
[
"MIT"
] | null | null | null |
data/split.py
|
manhcuongk55/gender-classification-by-hand
|
cafc5781f9c0a6476e848239f13d3ddf3a55de59
|
[
"MIT"
] | null | null | null |
data/split.py
|
manhcuongk55/gender-classification-by-hand
|
cafc5781f9c0a6476e848239f13d3ddf3a55de59
|
[
"MIT"
] | 1
|
2020-12-07T06:38:16.000Z
|
2020-12-07T06:38:16.000Z
|
import shutil
with open('test.csv', 'r') as f:
lines = f.readlines()
for line in lines:
image, gender = line.split(",")
print(image)
if 'female' in gender:
shutil.move("test/" + image, "test/female/" + image)
else:
shutil.move("test/" + image, "test/male/" + image)
| 27.583333
| 64
| 0.531722
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 64
| 0.193353
|
192f1d7e5401a66f3ca654feee18cca382797d01
| 2,941
|
py
|
Python
|
generate.py
|
fnrcum/dungeon_generator
|
7f5d1bd1b612f66e39f2782eac6fcd40abe7f7f0
|
[
"MIT"
] | null | null | null |
generate.py
|
fnrcum/dungeon_generator
|
7f5d1bd1b612f66e39f2782eac6fcd40abe7f7f0
|
[
"MIT"
] | null | null | null |
generate.py
|
fnrcum/dungeon_generator
|
7f5d1bd1b612f66e39f2782eac6fcd40abe7f7f0
|
[
"MIT"
] | null | null | null |
import random
from helpers import Leaf, Rect, RoomList
from renderer import MapRenderer
from typing import List, Any
class BSPTree:
def __init__(self):
self.level: List = []
self.room: object = None
self._leafs: List = []
self.MAX_LEAF_SIZE: int = 32
self.ROOM_MAX_SIZE: int = 20
self.ROOM_MIN_SIZE: int = 6
def generateLevel(self, map_width: int, map_height: int, room_list: RoomList):
# Creates an empty 2D array or clears existing array
self.level = [["#"
for y in range(map_height)]
for x in range(map_width)]
rootLeaf = Leaf(0, 0, map_width, map_height)
self._leafs.append(rootLeaf)
split_successfully = True
# loop through all leaves until they can no longer split successfully
while split_successfully:
split_successfully = False
for l in self._leafs:
if (l.child_1 is None) and (l.child_2 is None):
if (l.width > self.MAX_LEAF_SIZE or
(l.height > self.MAX_LEAF_SIZE) or
(random.random() > 0.7)):
if l.split_leaf(): # try to split the leaf
self._leafs.append(l.child_1)
self._leafs.append(l.child_2)
split_successfully = True
rootLeaf.createRooms(self, room_list)
return self.level
def createRoom(self, room: Rect):
# set all tiles within a rectangle to 0
for x in range(room.x1 + 1, room.x2):
for y in range(room.y1 + 1, room.y2):
self.level[x][y] = " "
def createHall(self, room1: Rect, room2: Rect):
# connect two rooms by hallways
x1, y1 = room1.get_wall()
x2, y2 = room2.get_wall()
# 50% chance that a tunnel will start horizontally
if random.randint(0, 1) == 1:
self.createHorTunnel(x1, x2, y1)
self.createVirTunnel(y1, y2, x2)
else: # else it starts virtically
self.createVirTunnel(y1, y2, x1)
self.createHorTunnel(x1, x2, y2)
def createHorTunnel(self, x1: int, x2: int, y: int):
_x1, _x2, _y = int(x1), int(x2), int(y)
for x in range(min(_x1, _x2), max(_x1, _x2) + 1):
if self.level[x][_y] is not " ":
self.level[x][_y] = "c"
# self.level[x][_y] = "c"
def createVirTunnel(self, y1: int, y2: int, x: int):
_y1, _y2, _x = int(y1), int(y2), int(x)
for y in range(min(_y1, _y2), max(_y1, _y2) + 1):
if self.level[_x][y] is not " ":
self.level[_x][y] = "c"
# self.level[_x][y] = "c"
room_list = RoomList()
tree = BSPTree().generateLevel(64, 128, room_list)
MapRenderer(tree).render_map()
print(room_list.get_rooms()[5].get_random_point_in_room())
| 35.011905
| 82
| 0.550493
| 2,653
| 0.902074
| 0
| 0
| 0
| 0
| 0
| 0
| 359
| 0.122067
|
19308256c02abbb6b9f77d5d12549a9bf4a3e01e
| 1,382
|
py
|
Python
|
src/runner/exporter.py
|
leechunghwan/YCSB-runner
|
f33d0fed30b41797864d95b7dbc8a6c2430f0bd8
|
[
"Apache-2.0"
] | null | null | null |
src/runner/exporter.py
|
leechunghwan/YCSB-runner
|
f33d0fed30b41797864d95b7dbc8a6c2430f0bd8
|
[
"Apache-2.0"
] | null | null | null |
src/runner/exporter.py
|
leechunghwan/YCSB-runner
|
f33d0fed30b41797864d95b7dbc8a6c2430f0bd8
|
[
"Apache-2.0"
] | null | null | null |
class Exporter:
# Extensions for output files
FILE_EXT = ".txt"
PLOTS_FILE_EXT = ".pdf"
"""Exporter: Exports statistical data captured from YCSB output to a file."""
def __init__(self, stats_set):
"""__init__
:param stats_set: StatisticsSet object containing data to be exported
"""
self.stats_set = stats_set
def export(self, filename, key, *fields):
"""export
Exports the given fields to the given CSV file.
:param filename: Filename and path for the export output
:param key: Key to use as index column
:param *fields: Fields to be exported
"""
raise NotImplementedError
def export_averages(self, filename, key, *fields):
"""export_averages
Exports the averages of the given fields, grouped by the given key, to
the given CSV file.
:param filename: Filename and path for export output
:param key: Key to group by
:param *fields: Fields to average
"""
raise NotImplementedError
def export_averages_plot(self, filename, key, *fields):
"""export_plot
Automatically generates and saves a plot of the given fields
:param filename: Filename and path for the plot output
:param *fields: Fields to be plotted
"""
raise NotImplementedError
| 32.139535
| 81
| 0.636035
| 1,381
| 0.999276
| 0
| 0
| 0
| 0
| 0
| 0
| 939
| 0.67945
|
1930fdebe216693651fdbce0248a0cc3fa01cfe6
| 4,387
|
py
|
Python
|
src/sima/riflex/crsaxialfrictionmodel.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
src/sima/riflex/crsaxialfrictionmodel.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
src/sima/riflex/crsaxialfrictionmodel.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
# This an autogenerated file
#
# Generated with CRSAxialFrictionModel
from typing import Dict,Sequence,List
from dmt.entity import Entity
from dmt.blueprint import Blueprint
from .blueprints.crsaxialfrictionmodel import CRSAxialFrictionModelBlueprint
from typing import Dict
from sima.sima.moao import MOAO
from sima.sima.scriptablevalue import ScriptableValue
class CRSAxialFrictionModel(MOAO):
"""
Keyword arguments
-----------------
name : str
(default "")
description : str
(default "")
_id : str
(default "")
scriptableValues : List[ScriptableValue]
staticFriction : float
Static friction force corresponding to elongation(default 0.0)
staticElongation : float
Relative elongation(default 0.0)
dynamicFriction : float
Dynamic friction force corresponding to elongation(default 0.0)
dynamicElongation : float
Relative elongation(default 0.0)
axialFriction : bool
Local axial friction model(default False)
"""
def __init__(self , name="", description="", _id="", staticFriction=0.0, staticElongation=0.0, dynamicFriction=0.0, dynamicElongation=0.0, axialFriction=False, **kwargs):
super().__init__(**kwargs)
self.name = name
self.description = description
self._id = _id
self.scriptableValues = list()
self.staticFriction = staticFriction
self.staticElongation = staticElongation
self.dynamicFriction = dynamicFriction
self.dynamicElongation = dynamicElongation
self.axialFriction = axialFriction
for key, value in kwargs.items():
if not isinstance(value, Dict):
setattr(self, key, value)
@property
def blueprint(self) -> Blueprint:
"""Return blueprint that this entity represents"""
return CRSAxialFrictionModelBlueprint()
@property
def name(self) -> str:
""""""
return self.__name
@name.setter
def name(self, value: str):
"""Set name"""
self.__name = str(value)
@property
def description(self) -> str:
""""""
return self.__description
@description.setter
def description(self, value: str):
"""Set description"""
self.__description = str(value)
@property
def _id(self) -> str:
""""""
return self.___id
@_id.setter
def _id(self, value: str):
"""Set _id"""
self.___id = str(value)
@property
def scriptableValues(self) -> List[ScriptableValue]:
""""""
return self.__scriptableValues
@scriptableValues.setter
def scriptableValues(self, value: List[ScriptableValue]):
"""Set scriptableValues"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__scriptableValues = value
@property
def staticFriction(self) -> float:
"""Static friction force corresponding to elongation"""
return self.__staticFriction
@staticFriction.setter
def staticFriction(self, value: float):
"""Set staticFriction"""
self.__staticFriction = float(value)
@property
def staticElongation(self) -> float:
"""Relative elongation"""
return self.__staticElongation
@staticElongation.setter
def staticElongation(self, value: float):
"""Set staticElongation"""
self.__staticElongation = float(value)
@property
def dynamicFriction(self) -> float:
"""Dynamic friction force corresponding to elongation"""
return self.__dynamicFriction
@dynamicFriction.setter
def dynamicFriction(self, value: float):
"""Set dynamicFriction"""
self.__dynamicFriction = float(value)
@property
def dynamicElongation(self) -> float:
"""Relative elongation"""
return self.__dynamicElongation
@dynamicElongation.setter
def dynamicElongation(self, value: float):
"""Set dynamicElongation"""
self.__dynamicElongation = float(value)
@property
def axialFriction(self) -> bool:
"""Local axial friction model"""
return self.__axialFriction
@axialFriction.setter
def axialFriction(self, value: bool):
"""Set axialFriction"""
self.__axialFriction = bool(value)
| 29.641892
| 174
| 0.647367
| 4,023
| 0.917028
| 0
| 0
| 2,532
| 0.57716
| 0
| 0
| 1,205
| 0.274675
|