blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
457db9cf31f17eff66a91f40c6afac4e33165f21
|
c2e2d1a80850017b95931752801024eb3f03b66a
|
/revision/repeated_word.py
|
2be1c4bb117045c352ee5c281760dcd177c55f98
|
[] |
no_license
|
shreyasvinaya/School_assignments
|
61e7c909f1f0764d0978455d137a962e05e34c5e
|
2c2dbd328fc896b70555176802c1c2166c974353
|
refs/heads/master
| 2020-07-08T18:58:15.001500
| 2019-11-19T05:01:12
| 2019-11-19T05:01:12
| 203,750,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
#most repeated word
a=input('enter a sentence')
b=a.split()
print(b)
d={}
for i in range(len(b)):
pass
if b[i] not in d:
d[b[i]]=1
else:
for j in d:
pass
if b[i]==j :
pass
d[b[i]]+=1
highest=0
item=''
for q in d:
if d[q]>highest :
pass
highest=d[q]
item=q
print(item,highest)
|
[
"noreply@github.com"
] |
shreyasvinaya.noreply@github.com
|
9a985c189a5328bcc149258b1faf5e97c967615f
|
ab66cdd15bb1ad964e21ce236e3e524b1eebb58a
|
/build/toolchain/win/setup_toolchain.py
|
66e840bb9bee4e29ff2d61f9afdf05d8d839e3f3
|
[
"BSD-3-Clause"
] |
permissive
|
tainyiPeter/build-gn
|
729659f3af90318d8ca80caa0a2f72d9bbfc595e
|
59376ea32237f28525173e25fe1ce4a5c19ad659
|
refs/heads/master
| 2020-04-19T09:52:23.499802
| 2018-12-18T10:43:26
| 2018-12-18T10:56:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,687
|
py
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Copies the given "win tool" (which the toolchain uses to wrap compiler
# invocations) and the environment blocks for the 32-bit and 64-bit builds on
# Windows to the build directory.
#
# The arguments are the visual studio install location and the location of the
# win tool. The script assumes that the root build directory is the current dir
# and the files will be written to the current directory.
import errno
import json
import os
import re
import subprocess
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
import gn_helpers
SCRIPT_DIR = os.path.dirname(__file__)
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
# This occasionally happens and leads to misleading SYSTEMROOT error messages
# if not caught here.
if output_of_set.count('=') == 0:
raise Exception('Invalid output_of_set. Value is:\n%s' % output_of_set)
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules and actions in Chromium rely on python being in the
# path. Add the path to this python here so that if it's not in the
# path when ninja is run later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting.lower()
break
if sys.platform in ('win32', 'cygwin'):
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _DetectVisualStudioPath():
"""Return path to the GYP_MSVS_VERSION of Visual Studio.
"""
# Use the code in build/vs_toolchain.py to avoid duplicating code.
chromium_dir = os.path.abspath(os.path.join(SCRIPT_DIR, '..', '..', '..'))
sys.path.append(os.path.join(chromium_dir, 'build'))
import vs_toolchain
return vs_toolchain.DetectVisualStudioPath()
def _LoadEnvFromBat(args):
"""Given a bat command, runs it and returns env vars set by it."""
args = args[:]
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
if popen.returncode != 0:
raise Exception('"%s" failed with error %d' % (args, popen.returncode))
return variables
def _LoadToolchainEnv(cpu, sdk_dir):
"""Returns a dictionary with environment variables that must be set while
running binaries from the toolchain (e.g. INCLUDE and PATH for cl.exe)."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |cpu| should be either
# 'x86' or 'x64'.
assert cpu in ('x86', 'x64')
# PATCH(build-gn): Do not assume depot_tools by default.
if bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', 0))) and sdk_dir:
# Load environment from json file.
env = os.path.normpath(os.path.join(sdk_dir, 'bin/SetEnv.%s.json' % cpu))
env = json.load(open(env))['env']
for k in env:
entries = [os.path.join(*([os.path.join(sdk_dir, 'bin')] + e))
for e in env[k]]
# clang-cl wants INCLUDE to be ;-separated even on non-Windows,
# lld-link wants LIB to be ;-separated even on non-Windows. Path gets :.
# The separator for INCLUDE here must match the one used in main() below.
sep = os.pathsep if k == 'PATH' else ';'
env[k] = sep.join(entries)
# PATH is a bit of a special case, it's in addition to the current PATH.
env['PATH'] = env['PATH'] + os.pathsep + os.environ['PATH']
# Augment with the current env to pick up TEMP and friends.
for k in os.environ:
if k not in env:
env[k] = os.environ[k]
varlines = []
for k in sorted(env.keys()):
varlines.append('%s=%s' % (str(k), str(env[k])))
variables = '\n'.join(varlines)
# Check that the json file contained the same environment as the .cmd file.
if sys.platform in ('win32', 'cygwin'):
script = os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.cmd'))
assert _ExtractImportantEnvironment(variables) == \
_ExtractImportantEnvironment(_LoadEnvFromBat([script, '/' + cpu]))
else:
if 'GYP_MSVS_OVERRIDE_PATH' not in os.environ:
os.environ['GYP_MSVS_OVERRIDE_PATH'] = _DetectVisualStudioPath()
# We only support x64-hosted tools.
script_path = os.path.normpath(os.path.join(
os.environ['GYP_MSVS_OVERRIDE_PATH'],
'VC/vcvarsall.bat'))
if not os.path.exists(script_path):
# vcvarsall.bat for VS 2017 fails if run after running vcvarsall.bat from
# VS 2013 or VS 2015. Fix this by clearing the vsinstalldir environment
# variable.
if 'VSINSTALLDIR' in os.environ:
del os.environ['VSINSTALLDIR']
other_path = os.path.normpath(os.path.join(
os.environ['GYP_MSVS_OVERRIDE_PATH'],
'VC/Auxiliary/Build/vcvarsall.bat'))
if not os.path.exists(other_path):
raise Exception('%s is missing - make sure VC++ tools are installed.' %
script_path)
script_path = other_path
# Chromium requires the 10.0.15063.468 SDK - previous versions don't have
# all of the required declarations and 10.0.16299.0 has some
# incompatibilities (crbug.com/773476).
args = [script_path, 'amd64_x86' if cpu == 'x86' else 'amd64',
'10.0.15063.0']
variables = _LoadEnvFromBat(args)
return _ExtractImportantEnvironment(variables)
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def main():
if len(sys.argv) != 6:
print('Usage setup_toolchain.py '
'<visual studio path> <win sdk path> '
'<runtime dirs> <target_cpu> <goma_disabled>')
sys.exit(2)
win_sdk_path = sys.argv[2]
runtime_dirs = sys.argv[3]
target_cpu = sys.argv[4]
goma_disabled = sys.argv[5]
cpus = ('x86', 'x64')
assert target_cpu in cpus
vc_bin_dir = ''
include = ''
# TODO(scottmg|goma): Do we need an equivalent of
# ninja_use_custom_environment_files?
for cpu in cpus:
# Extract environment variables for subprocesses.
env = _LoadToolchainEnv(cpu, win_sdk_path)
env['PATH'] = runtime_dirs + os.pathsep + env['PATH']
env['GOMA_DISABLED'] = goma_disabled
if cpu == target_cpu:
for path in env['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(path, 'cl.exe')):
vc_bin_dir = os.path.realpath(path)
break
# The separator for INCLUDE here must match the one used in
# _LoadToolchainEnv() above.
include = [p.replace('"', r'\"') for p in env['INCLUDE'].split(';') if p]
include_I = ' '.join(['"/I' + i + '"' for i in include])
include_imsvc = ' '.join(['"-imsvc' + i + '"' for i in include])
env_block = _FormatAsEnvironmentBlock(env)
with open('environment.' + cpu, 'wb') as f:
f.write(env_block)
# Create a store app version of the environment.
if 'LIB' in env:
env['LIB'] = env['LIB'] .replace(r'\VC\LIB', r'\VC\LIB\STORE')
if 'LIBPATH' in env:
env['LIBPATH'] = env['LIBPATH'].replace(r'\VC\LIB', r'\VC\LIB\STORE')
env_block = _FormatAsEnvironmentBlock(env)
with open('environment.winrt_' + cpu, 'wb') as f:
f.write(env_block)
assert vc_bin_dir
print 'vc_bin_dir = ' + gn_helpers.ToGNString(vc_bin_dir)
assert include_I
print 'include_flags_I = ' + gn_helpers.ToGNString(include_I)
assert include_imsvc
print 'include_flags_imsvc = ' + gn_helpers.ToGNString(include_imsvc)
if __name__ == '__main__':
main()
|
[
"zcbenz@gmail.com"
] |
zcbenz@gmail.com
|
4918810498af75369329a2204c7cccbe0e40efb1
|
40dd8330e5f78c4348bbddc2c5acfd59d793dd51
|
/tools/model_converters/twins2mmseg.py
|
647d41784aa07468be4b3f2e183064ad55266ad1
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmsegmentation
|
0d12092312e2c465ede1fd7dd9847b6f2b37049c
|
30a3f94f3e2916e27fa38c67cc3b8c69c1893fe8
|
refs/heads/main
| 2023-09-04T10:54:52.299711
| 2023-07-24T07:28:21
| 2023-07-24T07:28:21
| 272,133,018
| 6,534
| 2,375
|
Apache-2.0
| 2023-09-14T01:22:32
| 2020-06-14T04:32:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,764
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from collections import OrderedDict
import mmengine
import torch
from mmengine.runner import CheckpointLoader
def convert_twins(args, ckpt):
new_ckpt = OrderedDict()
for k, v in list(ckpt.items()):
new_v = v
if k.startswith('head'):
continue
elif k.startswith('patch_embeds'):
if 'proj.' in k:
new_k = k.replace('proj.', 'projection.')
else:
new_k = k
elif k.startswith('blocks'):
# Union
if 'attn.q.' in k:
new_k = k.replace('q.', 'attn.in_proj_')
new_v = torch.cat([v, ckpt[k.replace('attn.q.', 'attn.kv.')]],
dim=0)
elif 'mlp.fc1' in k:
new_k = k.replace('mlp.fc1', 'ffn.layers.0.0')
elif 'mlp.fc2' in k:
new_k = k.replace('mlp.fc2', 'ffn.layers.1')
# Only pcpvt
elif args.model == 'pcpvt':
if 'attn.proj.' in k:
new_k = k.replace('proj.', 'attn.out_proj.')
else:
new_k = k
# Only svt
else:
if 'attn.proj.' in k:
k_lst = k.split('.')
if int(k_lst[2]) % 2 == 1:
new_k = k.replace('proj.', 'attn.out_proj.')
else:
new_k = k
else:
new_k = k
new_k = new_k.replace('blocks.', 'layers.')
elif k.startswith('pos_block'):
new_k = k.replace('pos_block', 'position_encodings')
if 'proj.0.' in new_k:
new_k = new_k.replace('proj.0.', 'proj.')
else:
new_k = k
if 'attn.kv.' not in k:
new_ckpt[new_k] = new_v
return new_ckpt
def main():
parser = argparse.ArgumentParser(
description='Convert keys in timm pretrained vit models to '
'MMSegmentation style.')
parser.add_argument('src', help='src model path or url')
# The dst path must be a full path of the new checkpoint.
parser.add_argument('dst', help='save path')
parser.add_argument('model', help='model: pcpvt or svt')
args = parser.parse_args()
checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu')
if 'state_dict' in checkpoint:
# timm checkpoint
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
weight = convert_twins(args, state_dict)
mmengine.mkdir_or_exist(osp.dirname(args.dst))
torch.save(weight, args.dst)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
open-mmlab.noreply@github.com
|
668a48a3ce0d405c8471c0a654011de3bee895c6
|
96facbfa0cd5a2ca63819d67f46af3c3456631c1
|
/sound4bats/plot_metrics.py
|
9e4acaa3c4f6e31ed3e7ecbc9ab6650ece023e7c
|
[
"MIT"
] |
permissive
|
cloudedbats/cloudedbats_sound
|
114a79f88ea0dd789304b88ecf451acc4196bb3e
|
e9c263ed86665f2f2a0b8bed300bca0a5c5b61f6
|
refs/heads/master
| 2021-07-14T03:36:12.777907
| 2020-07-21T07:27:54
| 2020-07-21T07:27:54
| 158,961,160
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# Project: http://cloudedbats.org
# Copyright (c) 2019 Arnold Andreasson
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import numpy
import pathlib
import dsp4bats
class PlotMetrics():
""" """
def __init__(self, debug=False):
""" """
|
[
"parallels@parallels-Parallels-Virtual-Platform"
] |
parallels@parallels-Parallels-Virtual-Platform
|
2e7a4bd79b06aac3719043d6611fdd4744d9fe10
|
51065aaa884c855fb00d65abbebd6d33f99b289c
|
/Nenotal PDA monitoring/Software/HR_Unit_Tests.py
|
5617303e8bb7e476765ed366ba2bcf0ad1bbe6df
|
[] |
no_license
|
johnsonhit/Neonatal-cardiac-monitoring
|
e5d78ae8772f0366a764760a804c33293ee015bc
|
efd2b896e8ef7272d61b5f2b98e49ed6ad6e4e1a
|
refs/heads/master
| 2020-07-25T16:43:31.984399
| 2019-03-26T02:03:23
| 2019-03-26T02:03:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,552
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 23 18:04:46 2015
@author: Moseph
"""
import unittest
class runTests(unittest.TestCase): #all of the below unit tests test the accuracy and correctness of functions used in HR_Monitor.py
# Status: All tests Run OK in ~0.03 seconds
def test_findTHRZone(self):
"""
checks that accurate target heart zone is calculated based off of input data
"""
from HR_Monitor import findTHRZone
input1=22
output1, output2, output3, output4=findTHRZone(input1)
self.assertAlmostEqual(output3,191.6, msg='ironically the max is not actually a max')
self.assertEqual(output1,134, msg='70% of what???, definitely not the max!')
self.assertEqual(output2,163, msg='85% of what???, certainly not the max!')
def test_findsafeRHR(self):
"""
checks that accurate safe resting heart rate (RHR) is calculated based off of all combinations of input data
"""
from HR_Monitor import findsafeRHR
input1='Male'
input2='Female'
input3='High Level Athlete'
input4='Relatively Fit'
input5='Couch Potato'
output1=findsafeRHR(input1,input3)
output2=findsafeRHR(input1,input4)
output3=findsafeRHR(input1,input5)
output4=findsafeRHR(input2,input3)
output5=findsafeRHR(input2,input4)
output6=findsafeRHR(input2,input5)
self.assertEqual(output1,48, msg='Lie detector is telling me this is a lie')
self.assertEqual(output2,60, msg='Something smells fishy...tuna maybe?')
self.assertEqual(output3,72, msg='You lie about as well as you pass unit tests')
self.assertEqual(output4,52, msg='Lie detector is telling me this is a lie')
self.assertEqual(output5,64, msg='Something smells fishy...is it salmon?')
self.assertEqual(output6,76, msg='You lie about as well as you pass unit tests')
def test_importandseparate(self):
"""
checks for if the input vectors are the same length
"""
from HR_Monitor import importandseparate
from random import randrange
input1 = randrange(1,21,1)
t, v=importandseparate(input1)
self.assertEqual(len(t),len(v), msg='different lengths! abort abort abort!')
def test_makenparray(self):
"""
checks to make sure input vectors are output as numpy arrays
"""
from HR_Monitor import makenparray
from numpy import ndarray
a=[1,3,-5,2,7,5,9,-1,3,-4]
a_new=makenparray(a)
self.assertIsInstance(a_new,ndarray, msg='not an np array!!!')
def test_removeDCoffset(self):
"""
checks that the input and output vectors are the same length
"""
from HR_Monitor import removeDCoffset
from numpy import array, int8
input1=array([1,2,3,4,5,10,9,8,7,6], dtype=int8)
output1=removeDCoffset(input1)
self.assertEqual(len(input1),len(output1), msg='output length is not quite right')
def test_normalizeData(self):
"""
checks that an input array is properly normalized by ensuring all resulting
values are between -1 and 1, that either -1 or 1 is a value, and the correct
value is chosen to normalize data against
"""
from HR_Monitor import normalizeData
from numpy import array, float32, absolute
input1=array([-5.6,-9.4,-3.5,-1.4,4.3,4.8,0.9,-2.8,-3.9,6.9], dtype=float32)
output1=normalizeData(input1)
self.assertLessEqual(max(output1), 1, msg='values are abnormal...get it? :)')
self.assertGreaterEqual(min(output1), -1, msg='values are abnormal...get it? :)')
self.assertEqual(max(abs(output1)),1, msg='values are abnormal...get it? :)')
self.assertEqual(abs(output1[1]), 1, msg='normalized against wrong value')
self.assertEqual(len(input1),len(output1), msg='output length is not quite right')
def test_elapsedtime(self):
"""
checks that (1) elapsed time is result of final index value in a vector
minus the initial index value (2) elapsed time is a single value (3) elapsed
time is positive
"""
from HR_Monitor import elapsedtime
from numpy import array, float32
b=[1,2,3,4,5,6,7,8,9,10]
a=array(b, dtype=float32)
a_new=elapsedtime(a)
self.assertEqual(a_new.size,1, msg='WRONG, this should be a single value, not a vector')
self.assertEqual(a_new,9, msg='Nope, this elapsed time is not correct')
self.assertGreater(a_new,0, msg='Eh eh, time elapsed should be greater than 0')
def test_crosscorrviafor(self):
"""
checks that cross correlation of kernel with measured signal via for loop
is working properly by ensuring the resulting numpy array of correlation
coefficients is the appropriate length (which equals number of loops) and
each index value is correct, using known values
"""
from HR_Monitor import crosscorrviafor
from numpy import array, float32
a=array([3,1,2,0,2], dtype=float32) #known measured signal array
b=array([1,1], dtype=float32) #kernel array
c=len(b)
x=crosscorrviafor(a,c,b)
self.assertEqual(len(x), len(a)-1, msg='Coefficient vector wrong length dude')
self.assertEqual(x[0], 4, msg='Cross correlation went south bro')
self.assertEqual(x[1], 3, msg='Cross correlation went south bro')
self.assertEqual(x[2], 2, msg='Cross correlation went south bro')
self.assertEqual(x[3], 2, msg='Cross correlation went south bro')
def test_sectominconvert(self):
"""
checks that conversion from seconds to minutes of a given time numpy array
is done correctly using known values
"""
from HR_Monitor import sectomin
from numpy import array, float32
a=array([120], dtype=float32)
b=sectomin(a)
self.assertEqual(b, 2, msg='Ummm, this is not in minutes man!')
def test_simpledivision(self):
"""
checks for correct division of two numpy arrays using known values
"""
from HR_Monitor import calcheartrate
from numpy import array, float32
a=array([50], dtype=float32)
b=array([20], dtype=float32)
output1, output2 = calcheartrate(a,b)
self.assertEqual(output1, 2.5, msg='Go back to 3rd grade and learn to divide...in Python')
def test_lessthan(self):
"""
checks that the less than condition is properly assessed
"""
from HR_Monitor import detectbradycardia
input1=50
input2=85
output1=detectbradycardia(input1, input2)
output2=detectbradycardia(input2, input1)
self.assertEqual(output1, 0, msg='this would be true...if it were opposite day')
self.assertEqual(output2, 1, msg='opposite day only exists in cartoons')
def test_greaterthan(self):
"""
checks that the greater than condition is properly assessed
"""
from HR_Monitor import detecttachycardia
input1=27
input2=43
output1=detecttachycardia(input1, input2)
output2=detecttachycardia(input2, input1)
self.assertEqual(output1, 1, msg='this would not be false...if it were opposite day')
self.assertEqual(output2, 0, msg='opposite day or not, this is wrong')
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
johnsonhit.noreply@github.com
|
440882081229f4907b7d73f358e1e957fd47ae98
|
9660ada2f477f0d89f60cfd2e979824c7c97ea31
|
/src/experiments/no_trading_data.py
|
6903b582da442d6a8ab9f56f8ff7138f8a02fa9c
|
[] |
no_license
|
JonasLaksen/deeptsa
|
941464fa499ac8022f7980e59265d48d83d078b9
|
1e8f9faf6272e9720bafcb7a0269ad7301fd3cdd
|
refs/heads/master
| 2022-12-20T21:46:39.246314
| 2020-07-19T21:28:36
| 2020-07-19T21:28:36
| 170,122,906
| 0
| 0
| null | 2022-11-21T22:47:57
| 2019-02-11T12:11:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,705
|
py
|
import json
import os
import pathlib
import random
import sys
from datetime import datetime
from functools import reduce
import numpy as np
import tensorflow as tf
from src.lstm_one_output import LSTMOneOutput
from src.models.stacked_lstm import StackedLSTM
from src.utils import load_data, get_features, plot_one, get_feature_list_lags
seed = int(sys.argv[1]) if sys.argv[1] else 0
type_search = sys.argv[2] if sys.argv[2] else 'hyper'
layer_sizes = list(map(int, sys.argv[3].split(","))) if sys.argv[3] else [999]
model_type = sys.argv[4] if sys.argv[4] else 'stacked'
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
def calculate_n_features_and_batch_size(X_train):
return X_train.shape[2], X_train.shape[0]
def experiment_no_trading_data(epochs, y_type='next_price', feature_list=[], experiment_name="no_trading_data"):
experiment_timestamp = datetime.now()
description = 'Analyse all stocks without trading data and evaluate'
X, y, y_dir, X_stocks, scaler_y = load_data(feature_list, y_type)
X, y, y_dir, X_stocks = X, y, y_dir, X_stocks
training_size = int(.9 * len(X[0]))
X_train, y_train = X[:, :training_size], y[:, :training_size]
X_val, y_val = X[:, training_size:], y[:, training_size:]
n_features, batch_size = calculate_n_features_and_batch_size(X_train)
stock_list = [np.arange(len(X)).reshape((len(X), 1, 1))]
lstm = LSTMOneOutput(**{
'X_stocks': X_stocks,
'X_train': X_train,
'y_train': y_train,
'X_val': X_val,
'y_val': y_val,
'feature_list': feature_list,
'dropout': .1,
'optimizer': tf.keras.optimizers.Adam(.001),
'loss': 'MSE',
'model_generator': StackedLSTM,
'layer_sizes': layer_sizes,
'seed': seed,
'n_features': n_features,
'batch_size': batch_size,
'stock_list': stock_list
})
losses = lstm.train(
gen_epochs=epochs,
spech_epochs=0,
copy_weights_from_gen_to_spec=False,
load_spec=False,
load_gen=False,
train_general=True,
train_specialized=False)
evaluation = lstm.generate_general_model_results(
scaler_y=scaler_y, y_type=y_type
)
filename_midfix = f
'{os.path.basename(__file__)}/{experiment_timestamp}'
directory = f
'results/{filename_midfix}/aksje-{i}-{X_stocks[i]}'
if not os.path.exists(directory):
os.makedirs(directory)
evaluation = lstm.generate_general_model_results(
scaler_y=scaler_y, y_type=y_type, title="No trading data", filename=f
'{ directory }/plot'
)
with open(
f'results/{os.path.basename(__file__)}/{experiment_timestamp}/{experiment_name}/loss_history.txt',
'a+') as f:
f.write(str(losses['general_loss']))
f.write(str(losses['general_val_loss']))
with open(
f'results/{os.path.basename(__file__)}/{experiment_timestamp}/{experiment_name}/evaluation.json',
'a+') as f:
f.write(json.dumps(evaluation, indent=4));
with open(f'results/{os.path.basename(__file__)}/{experiment_timestamp}/{experiment_name}/meta.txt',
'a+') as f:
f.write(lstm.meta(description, epochs))
plot_one('Loss history', [losses['general_loss'], losses['general_val_loss']], ['Training loss', 'Validation loss'], ['Epoch', 'Loss'])
feature_list = get_features(trading=False)
experiment_no_trading_data(5000, y_type="next_price", feature_list=feature_list)
|
[
"pthoang94@gmail.com"
] |
pthoang94@gmail.com
|
c436a852bf1b29fdd43c22fec676f7de2348174a
|
da7a165522daea7c346693c5f32850017c482967
|
/abc51-100/abc066/c.py
|
09ed9795009df321637516a4eee2dcfb604ef0b8
|
[] |
no_license
|
SShayashi/ABC
|
19f8750919208c5ff8935638dbaab941c255f914
|
3cbfee0c5251c1bb0df6306166d8d4b33bf7bb2c
|
refs/heads/master
| 2021-05-04T21:06:10.720367
| 2020-07-11T13:59:16
| 2020-07-11T13:59:29
| 119,886,572
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
def main():
n = int(input())
a = list(map(int, input().split()))
even = a[1::2]
odd = a[::2]
ans = []
if n % 2 == 0:
even.reverse()
ans = even + odd
else:
odd.reverse()
ans = odd + even
to_str = map(str, ans)
return " ".join(to_str)
print(main())
|
[
"sshayashi0208@gmail.com"
] |
sshayashi0208@gmail.com
|
03df94b728472d4265b8096044658913498868d3
|
e669f692892b23cdc5829bf82e3b81ab016ec973
|
/active_learning/query_strategies/core_set.py
|
3a52ff7357f36990feff47e505bf04ec60b9916d
|
[] |
no_license
|
JackYANG19/A-Benchmark-and-Empirical-Analysis-for-Replay-Strategies-in-Continual-Learning
|
848610f94eb38de0148ef6d05af0ec4144feecce
|
a6be52884576d56135fd05181df747ea2323f79f
|
refs/heads/main
| 2023-05-29T00:37:49.633513
| 2021-06-11T07:44:02
| 2021-06-11T07:44:02
| 375,931,888
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,406
|
py
|
import numpy as np
from .strategy import Strategy
from sklearn.neighbors import NearestNeighbors
import pickle
from datetime import datetime
class CoreSet(Strategy):
def __init__(self, X, Y, idxs_lb, net, handler, args, tor=1e-4):
super(CoreSet, self).__init__(X, Y, idxs_lb, net, handler, args)
self.tor = tor
def query(self, n):
lb_flag = self.idxs_lb.copy()
embedding = self.get_embedding(self.X, self.Y)
embedding = embedding.numpy()
print('calculate distance matrix')
t_start = datetime.now()
dist_mat = np.matmul(embedding, embedding.transpose())
sq = np.array(dist_mat.diagonal()).reshape(len(self.X), 1)
dist_mat *= -2
dist_mat += sq
dist_mat += sq.transpose()
dist_mat = np.sqrt(dist_mat)
print(datetime.now() - t_start)
print('calculate greedy solution')
t_start = datetime.now()
mat = dist_mat[~lb_flag, :][:, lb_flag]
for i in range(n):
if i % 10 == 0:
print('greedy solution {}/{}'.format(i, n))
mat_min = mat.min(axis=1)
q_idx_ = mat_min.argmax()
q_idx = np.arange(self.n_pool)[~lb_flag][q_idx_]
lb_flag[q_idx] = True
mat = np.delete(mat, q_idx_, 0)
mat = np.append(mat, dist_mat[~lb_flag, q_idx][:, None], axis=1)
print(datetime.now() - t_start)
opt = mat.min(axis=1).max()
bound_u = opt
bound_l = opt / 2.0
delta = opt
xx, yy = np.where(dist_mat <= opt)
dd = dist_mat[xx, yy]
lb_flag_ = self.idxs_lb.copy()
subset = np.where(lb_flag_ == True)[0].tolist()
SEED = 5
pickle.dump((xx.tolist(), yy.tolist(), dd.tolist(), subset, float(opt), n, self.n_pool),
open('mip{}.pkl'.format(SEED), 'wb'), 2)
import ipdb
ipdb.set_trace()
# solving MIP
# download Gurobi software from http://www.gurobi.com/
# sh {GUROBI_HOME}/linux64/bin/gurobi.sh < core_set_sovle_solve.py
sols = pickle.load(open('sols{}.pkl'.format(SEED), 'rb'))
if sols is None:
q_idxs = lb_flag
else:
lb_flag_[sols] = True
q_idxs = lb_flag_
print('sum q_idxs = {}'.format(q_idxs.sum()))
return np.arange(self.n_pool)[(self.idxs_lb ^ q_idxs)]
|
[
"noreply@github.com"
] |
JackYANG19.noreply@github.com
|
16f56f1f208c469a2d92b843ac849f98a7111d08
|
7087a5dd1772c9456f098bc024a894dcaeef5432
|
/backup/build/new-calkube/kubernetes-6.0.0_snapshot-py2.7.egg/kubernetes/client/models/v1_delete_options.py
|
575bcf443ee81e1402ef6bba7a8c440f8590df0a
|
[] |
no_license
|
santhoshchami/kubecctl-python
|
5be7a5a17cc6f08ec717b3eb1c11719ef7653aba
|
cd45af465e25b0799d65c573e841e2acb983ee68
|
refs/heads/master
| 2021-06-23T11:00:43.615062
| 2019-07-10T16:57:06
| 2019-07-10T16:57:06
| 145,669,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,530
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1DeleteOptions(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'grace_period_seconds': 'int',
'kind': 'str',
'orphan_dependents': 'bool',
'preconditions': 'V1Preconditions',
'propagation_policy': 'str'
}
attribute_map = {
'api_version': 'apiVersion',
'grace_period_seconds': 'gracePeriodSeconds',
'kind': 'kind',
'orphan_dependents': 'orphanDependents',
'preconditions': 'preconditions',
'propagation_policy': 'propagationPolicy'
}
def __init__(self, api_version=None, grace_period_seconds=None, kind=None, orphan_dependents=None, preconditions=None, propagation_policy=None):
"""
V1DeleteOptions - a model defined in Swagger
"""
self._api_version = None
self._grace_period_seconds = None
self._kind = None
self._orphan_dependents = None
self._preconditions = None
self._propagation_policy = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if grace_period_seconds is not None:
self.grace_period_seconds = grace_period_seconds
if kind is not None:
self.kind = kind
if orphan_dependents is not None:
self.orphan_dependents = orphan_dependents
if preconditions is not None:
self.preconditions = preconditions
if propagation_policy is not None:
self.propagation_policy = propagation_policy
@property
def api_version(self):
"""
Gets the api_version of this V1DeleteOptions.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1DeleteOptions.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1DeleteOptions.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1DeleteOptions.
:type: str
"""
self._api_version = api_version
@property
def grace_period_seconds(self):
"""
Gets the grace_period_seconds of this V1DeleteOptions.
The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:return: The grace_period_seconds of this V1DeleteOptions.
:rtype: int
"""
return self._grace_period_seconds
@grace_period_seconds.setter
def grace_period_seconds(self, grace_period_seconds):
"""
Sets the grace_period_seconds of this V1DeleteOptions.
The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param grace_period_seconds: The grace_period_seconds of this V1DeleteOptions.
:type: int
"""
self._grace_period_seconds = grace_period_seconds
@property
def kind(self):
"""
Gets the kind of this V1DeleteOptions.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1DeleteOptions.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1DeleteOptions.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1DeleteOptions.
:type: str
"""
self._kind = kind
@property
def orphan_dependents(self):
"""
Gets the orphan_dependents of this V1DeleteOptions.
Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:return: The orphan_dependents of this V1DeleteOptions.
:rtype: bool
"""
return self._orphan_dependents
@orphan_dependents.setter
def orphan_dependents(self, orphan_dependents):
"""
Sets the orphan_dependents of this V1DeleteOptions.
Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param orphan_dependents: The orphan_dependents of this V1DeleteOptions.
:type: bool
"""
self._orphan_dependents = orphan_dependents
@property
def preconditions(self):
"""
Gets the preconditions of this V1DeleteOptions.
Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.
:return: The preconditions of this V1DeleteOptions.
:rtype: V1Preconditions
"""
return self._preconditions
@preconditions.setter
def preconditions(self, preconditions):
"""
Sets the preconditions of this V1DeleteOptions.
Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.
:param preconditions: The preconditions of this V1DeleteOptions.
:type: V1Preconditions
"""
self._preconditions = preconditions
@property
def propagation_policy(self):
"""
Gets the propagation_policy of this V1DeleteOptions.
Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: The propagation_policy of this V1DeleteOptions.
:rtype: str
"""
return self._propagation_policy
@propagation_policy.setter
def propagation_policy(self, propagation_policy):
"""
Sets the propagation_policy of this V1DeleteOptions.
Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param propagation_policy: The propagation_policy of this V1DeleteOptions.
:type: str
"""
self._propagation_policy = propagation_policy
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1DeleteOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"root@kube-node02.local"
] |
root@kube-node02.local
|
e00966eccc36c07429403e8283952fe96a7dc0c6
|
f398d8a101679f02c86d056a7027664559d59f3d
|
/mediaserverV1/common/app.py
|
5dbbb1bdc00586acd75186a2e07f615d8a9a8e95
|
[] |
no_license
|
sasanyi/mediaserver-client
|
5c5ffcef438eb54947a7fe73a66f2fef4c52b9f1
|
bd2eaadd2aa0412dd80d3fedfee9438331b48923
|
refs/heads/main
| 2023-05-09T19:21:03.170112
| 2021-04-20T11:59:58
| 2021-04-20T11:59:58
| 359,217,955
| 0
| 1
| null | 2021-06-02T06:58:31
| 2021-04-18T18:05:05
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 646
|
py
|
from sqlalchemy.orm import sessionmaker
from .models import Base
class App:
__db = None
@staticmethod
def setDb(db):
App.__db = db
@staticmethod
def getDb():
if App.__db is not None:
return App.__db
else:
raise RuntimeError("Server not configured")
class Database:
engine = None
_db = None
def __init__(self, engine):
self.engine = engine
Base.metadata.create_all(bind=engine)
Session = sessionmaker(bind=engine)
self._db = Session()
@property
def db(self):
return self._db
|
[
"savanya.sandor.jozsef@gmail.com"
] |
savanya.sandor.jozsef@gmail.com
|
d240a545d7bf8d3761c56d2353c1b9191d5c5c8d
|
fcc0e1f62924c6e64e3197f2c39df8c85fd91e72
|
/The Millionth Fibonacci Kata/main.py
|
cf1f70b1f573d180a09349164f3a4529306b25cd
|
[] |
no_license
|
T-800cs101/Codewars-solutions
|
9c9a9f3178401081867dbacb8c5473197de1039c
|
704e4514d01225586fffb77499956115f7deba24
|
refs/heads/main
| 2023-02-08T01:02:14.442862
| 2021-01-03T18:39:53
| 2021-01-03T18:39:53
| 324,001,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
def fib(n):
"""Calculates the nth Fibonacci number"""
a=1
b=1
c=1
rc=0
d=0
rd=1
if n >= 0:
while n>0:
if n%2!=0: #multiple matrix a with vector r
tc = rc
rc = rc*a + rd*c
rd = tc*b + rd*d
ta = a
tb = b
tc = c
a = a*a + b*c
b = ta*b + b*d
c = c*ta + d*c
d = tc*tb + d*d
n >>=1
return rc
else:
return ((-1)**((n*(-1))+1))*fib(n*(-1))
|
[
"noreply@github.com"
] |
T-800cs101.noreply@github.com
|
6332935611c8fc4d545043236f06b43121fcb9a3
|
555319a2f344a789e6971a877824111b13004c6d
|
/transfer_points.py
|
65a0b2040789bb1378783a424f0548a72d95a945
|
[] |
no_license
|
Kairobo/Learn_Python
|
66e792054e70b72039a610cb8159693352b3092e
|
903f5d1d8614f4b3a81dc005362efb0ee50e8778
|
refs/heads/master
| 2021-01-25T09:45:00.162914
| 2018-09-12T04:53:07
| 2018-09-12T04:53:07
| 123,317,004
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,917
|
py
|
from copy import deepcopy
import numpy as np
def vector_norm(vec):
return np.sqrt(np.power(vec[0],2)+np.power(vec[1],2))
def vector_divide(vec,divider=1):
assert(len(vec)==2)
vec_new = deepcopy(vec)
vec_new[0] = vec_new[0]/divider
vec_new[1] = vec_new[1]/divider
return vec_new
def vector_add(add_list):
vec_new = [0,0]
if len(add_list) == 0:
return vec_new
for i in range(len(add_list)):
vec_new[0] = vec_new[0] + add_list[i][0]
vec_new[1] = vec_new[1] + add_list[i][1]
return vec_new
def vector_subtract_2(sub_list):
vec_new = [0,0]
assert len(sub_list) == 2
vec_new[0] = sub_list[0][0] - sub_list[1][0]
vec_new[1] = sub_list[0][1] - sub_list[1][1]
return vec_new
#Effect: given four geo_points locations and their pixel locations
#and a list of polygon positions, transfer the four polygon_locations as geo_points
#Require:
#the sequence of the geo_locations is corresponding to the pix_locations
# 1 2
#
# 4 3
def polygon_pix2geo(polygon_indexes,pix_locations,geo_locations):
polygon_new = deepcopy(polygon_indexes)
num_points = len(polygon_indexes)
gO_0 = geo_locations[3]
#4->1
tmp_sub_list = [geo_locations[3],geo_locations[0]]
gx_0 = vector_subtract_2(tmp_sub_list)
gx_0_norm = vector_norm(gx_0)
# 4->3
tmp_sub_list = [geo_locations[3],geo_locations[2]]
gy_0 = vector_subtract_2(tmp_sub_list)
gy_0_norm = vector_norm(gy_0)
for i in range(num_points):
tmp_add_list = []
tmp_add_list.append(O_0)
tmp_add_list.append()
return polygon_new
pix_locations =[[10,10],[10,240],[240,240],[240,10]]
geo_locations = [[99,3],[100,3],[100,4],[99,4]]#la,long
polygon_indexes = [[0,0],[100,50],[30,70]]
polygon_new = polygon_pix2geo(polygon_indexes=polygon_indexes,pix_locations=pix_locations,geo_locations=geo_locations)
print(polygon_new)
|
[
"kajia@umich.edu"
] |
kajia@umich.edu
|
ccc0c33067aa23f9329f7727f8ce57f7f5cf29b1
|
fff24c6c6123e5e90ac2fae26536150449140c6d
|
/setup.py
|
0b74facb61fe0ac8600893175528a1d17392e7ab
|
[
"ISC"
] |
permissive
|
binaryf/demosys-py
|
83da9f9ddd8d1672413f89153012ab6bb7fae6ab
|
f11b09cb6502adfaa437c8cbe780039c49b72524
|
refs/heads/master
| 2020-03-22T16:30:16.767030
| 2018-07-24T11:19:22
| 2018-07-24T11:19:22
| 140,331,208
| 1
| 0
| null | 2018-07-09T19:12:49
| 2018-07-09T19:12:48
| null |
UTF-8
|
Python
| false
| false
| 1,375
|
py
|
from setuptools import setup
setup(
name="demosys-py",
version="1.0.4",
description="Modern OpenGL 3.3+ Framework inspired by Django",
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
url="https://github.com/Contraz/demosys-py",
author="Einar Forselv",
author_email="eforselv@gmail.com",
maintainer="Einar Forselv",
maintainer_email="eforselv@gmail.com",
packages=['demosys'],
include_package_data=True,
keywords = ['opengl', 'framework', 'demoscene'],
classifiers=[
'Programming Language :: Python',
'Environment :: MacOS X',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'Topic :: Multimedia :: Graphics',
'License :: OSI Approved :: ISC License (ISCL)',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
install_requires=[
'PyOpenGL==3.1.0',
'glfw==1.6.0',
'moderngl==5.3.0',
'pyrr==0.9.2',
'Pillow==5.1.0',
'pyrocket==0.2.7',
'PyWavefront==0.3.2',
# 'pygame==1.9.3',
],
entry_points={'console_scripts': [
'demosys-admin = demosys.core.management:execute_from_command_line',
]},
)
|
[
"eforselv@gmail.com"
] |
eforselv@gmail.com
|
bfdd2ab6551b6a573aaa983869d46156ea4d2fb7
|
eba98519fc6a54c2960618d0f51cbf065c2507b6
|
/Praktikum/Praktikum 7/QDateTime.py
|
caef0b9a04aa7dd98ed6fc36a357f4f5b2292d17
|
[] |
no_license
|
novianromadon/19104011-Novian_Dwi_Romadon-Pemrograman_GUI
|
b110c5be31ebf7ccc709d3cc2098fd78f97be21f
|
c056728c241a06bccd1e051e3dc437cccbf49515
|
refs/heads/main
| 2023-05-31T10:29:52.672493
| 2021-07-09T07:59:31
| 2021-07-09T07:59:31
| 358,150,225
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,998
|
py
|
import sys
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class MainForm(QWidget):
def __init__(self):
super().__init__()
self.setupUi()
def setupUi(self):
self.resize(400, 100)
self.move(300, 300)
self.setWindowTitle('Demo QDateTimeEdit')
self.dateLabel = QLabel('Tanggal')
self.dateEdit = QDateEdit()
self.dateEdit.setDisplayFormat('dddd dd/MM/yyyy')
self.dateEdit.setDate(QDate.currentDate())
self.timeLabel = QLabel('Waktu')
self.timeEdit = QTimeEdit()
self.timeEdit.setDisplayFormat('hh:mm')
self.timeEdit.setTime(QTime.currentTime())
self.dateTimeLabel = QLabel('Tanggal dan Waktu')
self.dateTimeEdit = QDateTimeEdit()
self.dateTimeEdit.setDisplayFormat('dddd dd/MM/yyyyhh:mm')
self.dateTimeEdit.setDateTime(QDateTime.currentDateTime())
self.okButton = QPushButton('&OK')
hbox = QHBoxLayout()
hbox.addStretch()
hbox.addWidget(self.okButton)
layout = QGridLayout()
layout.addWidget(self.dateLabel, 0, 0)
layout.addWidget(self.dateEdit, 0, 1)
layout.addWidget(self.timeLabel, 1, 0)
layout.addWidget(self.timeEdit, 1, 1)
layout.addWidget(self.dateTimeLabel, 2, 0)
layout.addWidget(self.dateTimeEdit, 2, 1)
layout.addLayout(hbox, 3, 0, 1, 2)
self.setLayout(layout)
self.okButton.clicked.connect(self.okButtonClick)
def okButtonClick(self):
QMessageBox.information(self, 'Informasi',
'Date: ' + self.dateEdit.date().toString() +
'\n' + 'Time: ' + self.timeEdit.time().toString() + '\n' +'Datetime: ' +
self.dateTimeEdit.dateTime().toString() + '\n')
if __name__ == '__main__':
a = QApplication(sys.argv)
form = MainForm()
form.show()
a.exec_()
|
[
"noreply@github.com"
] |
novianromadon.noreply@github.com
|
5a1800a557704e33d4f51badeae781b4ae00bcca
|
c3a01f8bcece48f94a347b92694f90227708f507
|
/pyvisa/testsuite/test_constants.py
|
8c5add8034b3b0c9c0686b60af1742adea537ea8
|
[
"MIT"
] |
permissive
|
panlun/pyvisa
|
e16a6cdaae47bc69d932538f14c62015d17be7ab
|
124c46bd2ad89e49031339d6181255c2808fecbc
|
refs/heads/master
| 2022-11-21T13:07:29.280849
| 2020-06-24T22:23:27
| 2020-06-24T22:23:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 649
|
py
|
# -*- coding: utf-8 -*-
"""Test objects from constants.
This file is part of PyVISA.
:copyright: 2019-2020 by PyVISA Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from pyvisa.constants import DataWidth
from . import BaseTestCase
class TestDataWidth(BaseTestCase):
def test_conversion_from_literal(self):
for v, e in zip(
(8, 16, 32, 64),
(DataWidth.bit_8, DataWidth.bit_16, DataWidth.bit_32, DataWidth.bit_64),
):
self.assertEqual(DataWidth.from_literal(v), e)
with self.assertRaises(ValueError):
DataWidth.from_literal(0)
|
[
"marul@laposte.net"
] |
marul@laposte.net
|
b066397fb9bc946e9a69c27740fce629d38265b4
|
6e3a397cea008e6d103c3df0d8ecebd73e625f5e
|
/857MinimumCostToHireKWorkers.py
|
b3fb07d05afd9ed4e2744cf7f3f7c5dbb7447aa1
|
[] |
no_license
|
mz-jobs/LeetCode
|
21aef007e44debb80baaba6b122cb9d7784476da
|
eae9e977106a136952cd21cf0de2662f00b6bd4c
|
refs/heads/master
| 2020-11-26T18:07:42.901772
| 2019-12-20T02:11:56
| 2019-12-20T02:11:56
| 229,168,713
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,951
|
py
|
import heapq
# quality = [3, 1, 10, 10, 1]
# wage = [4, 8, 2, 2, 7]
# K = 3
quality = [10, 20, 5]
wage = [70, 50, 30]
K = 2
# calculate wage/quality for each worker
# calculate proposed wage for every other worker
# select those that will take this wage
# if more or equal than K will take than return by sorted
def minCostToHire0(quality, wage, K):
h = [(w/q, i) for i, (q, w) in enumerate(zip(quality, wage))]
heapq.heapify(h)
print(h)
minCost = float('inf')
while h:
ratio = heapq.heappop(h)
proposed = [ratio[0]*q for q in quality]
selected = [p for i, p in enumerate(proposed) if p >= wage[i]]
print(ratio)
print('Proposed: ', proposed)
print('Selected: ', selected)
if(len(selected) >= K):
print('Cost: ', sum(sorted(selected)[:K]))
minCost = min(minCost, sum(sorted(selected)[:K]))
return minCost
def minCostToHire1(quality, wage, K):
minCost = float('inf')
for ratio in [w/q for q, w in zip(quality, wage)]:
proposed = [ratio*q for q in quality]
selected = [p for i, p in enumerate(proposed) if p >= wage[i]]
# print(ratio)
# print('Proposed: ', proposed)
# print('Selected: ', selected)
if(len(selected) >= K):
cost = sum(heapq.nsmallest(K, selected))
# print('Cost: ', cost)
if cost < minCost:
minCost = cost
return minCost
def minCostToHire(quality, wage, K):
cost = float('inf')
workers = [(w/q, q) for q, w in zip(quality, wage)]
heap = [] # heap will keep minimal K values
sumq = 0 # will keep a sum of heap
for ratio, q in sorted(workers):
heapq.heappush(heap, -q)
sumq += q
if len(heap) > K:
# will subtract beacuase q was negated in insertion
sumq += heapq.heappop(heap)
if len(heap) == K:
cost = min(cost, sumq*ratio)
return cost
print('Result: ', minCostToHire(quality, wage, K))
|
[
"mateusz.zarkowski@starburstdata.com"
] |
mateusz.zarkowski@starburstdata.com
|
dddef1b6727b5842f1ec2e9af0d392ed0c77866a
|
1c18dd1a1ffaa107d7b7539989d2855a205621be
|
/tables.py
|
76c1f15c44e3aa5f8d4455b763eb2c0a9a482acc
|
[] |
no_license
|
mertse7en/kafka-to-bigquery
|
a104bef210baeb1c52f224555fa8615b13f9f9b1
|
e5d9ecf11eb98de2973c779f3133fcaef2a141a4
|
refs/heads/master
| 2023-03-29T17:04:36.076720
| 2021-03-31T10:49:20
| 2021-03-31T10:49:20
| 353,313,912
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,064
|
py
|
# table_schema = [
# {'name':'Attributed Touch Type', 'type':'STRING'},
# {'name':'Attributed Touch Time', 'type':'STRING'},
# {'name':'Install Time', 'type':'TIMESTAMP'},
# {'name':'Event Time', 'type':'TIMESTAMP'},
# {'name':'Event Name', 'type':'STRING'},
# {'name':'Event Value', 'type':'STRING'},
# {'name':'Event Revenue', 'type':'STRING'},
# {'name':'Event Revenue Currency', 'type':'STRING'},
# {'name':'Attributed', 'type':'STRING'},
# {'name':'Attributed', 'type':'STRING'},
# {'name':'Attributed', 'type':'STRING'},
# {'name':'Attributed', 'type':'STRING'},
# {'name':'Attributed', 'type':'STRING'},
# {'name':'Attributed', 'type':'STRING'},
# {'name':'Attributed', 'type':'STRING'},
# {'name':'Attributed', 'type':'STRING'},
# {'name':'Attributed', 'type':'STRING'},
# {'name':'Attributed', 'type':'STRING'},
# ]
schema = [
{'name':'name','type':'STRING'},
{'name':'surname', 'type':'STRING'},
{'name':'age', 'type':'INT64'},
{'name':'date', 'type':'DATE'},
]
|
[
"mertse7en@hotmail.com"
] |
mertse7en@hotmail.com
|
9c966306cb14e900ddb0fe333aef456dc990cd86
|
20854911d21af4061fb8508998f9e6221805fa11
|
/compasssudoku/util.py
|
0350f8e270cc274ec463fce0a507bdc1235c9e05
|
[
"MIT"
] |
permissive
|
corrodedHash/compass-sudoku-z3
|
4df43bb79e5b8143b6efae8a7b09e353d6cbc428
|
62105ce76e1965da48944d98eb7eaac16ac313b0
|
refs/heads/master
| 2020-09-11T08:15:29.459647
| 2020-02-27T14:12:12
| 2020-02-27T14:12:12
| 222,002,117
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,663
|
py
|
"""Contains utility"""
import enum
from typing import Tuple, List, Optional
Coord2D = Tuple[int, int]
class CardinalDirection(enum.Enum):
"""Cardinal directions"""
north = enum.auto()
east = enum.auto()
south = enum.auto()
west = enum.auto()
DIRECTION_MAP = {
CardinalDirection.north: (0, -1),
CardinalDirection.east: (1, 0),
CardinalDirection.south: (0, 1),
CardinalDirection.west: (-1, 0),
}
ORTHOGONAL_DIRECTION_MAP = {
CardinalDirection.north: (1, 0),
CardinalDirection.east: (0, 1),
CardinalDirection.south: (-1, 0),
CardinalDirection.west: (0, -1),
}
def in_bounds(cell: Coord2D, bounds: Coord2D) -> bool:
"""Check that coordinate is in [0, bounds)"""
return cell[0] >= 0 and cell[0] < bounds[0] and cell[1] >= 0 and cell[1] < bounds[1]
def add_coords(summand1: Coord2D, summand2: Coord2D, bounds: Optional[Coord2D] = None) -> Coord2D:
"""Add two tuples"""
result = (summand1[0] + summand2[0], summand1[1] + summand2[1])
if bounds is not None:
result = (result[0] % bounds[0], result[1] % bounds[1])
return result
def get_direction_cells(
origin: Coord2D, dimensions: Coord2D, direction: CardinalDirection
) -> List[Coord2D]:
"""List all cells in the given direction"""
result = []
current = add_coords(origin, DIRECTION_MAP[direction])
while in_bounds(current, dimensions):
result.append(current)
current = add_coords(current, ORTHOGONAL_DIRECTION_MAP[direction], dimensions)
if current[0] == origin[0] or current[1] == origin[1]:
current = add_coords(current, DIRECTION_MAP[direction])
return result
|
[
"lukas.woelfer@rwth-aachen.de"
] |
lukas.woelfer@rwth-aachen.de
|
de359021284051a02d242a0b4f474b162f719d09
|
b2f6d5c7ecc1f38e6ba72e6888377a390adbff2a
|
/mysite/settings.py
|
25e38aee41cee359d023b9bbfbfa0c53561e9aa6
|
[] |
no_license
|
DanielDeLeon5/Final
|
2402bf892bd0e918de4e767af4550417dffaf575
|
9db84c2436ab53e55e59586579c0cfcfbfc3bb65
|
refs/heads/master
| 2020-09-12T20:41:12.156660
| 2019-11-18T21:26:28
| 2019-11-18T21:26:28
| 222,547,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,184
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mtuj+ry=@vi&h&9l8y*77^h2h)e*8d(kz@l_myk$1c##uin!1&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'colegio',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'es-es'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"danieldleon5@outlook.com"
] |
danieldleon5@outlook.com
|
c15c664c78d7bb3445a5100274c4956b98fa0822
|
992fdae00c35edfc55b7b9217d3416e23a41e32f
|
/django課程/urexpenses/urexpenses/asgi.py
|
0c441247f979b289639046f618ad83ab34e36949
|
[] |
no_license
|
kinqston34/django_web
|
de5fb44126894b8154b2a579c926a02521aab9ea
|
c9f7bb9465d130bcebae6e53ba163e6ae2b44e3e
|
refs/heads/main
| 2023-08-06T19:19:38.751897
| 2021-09-21T07:49:20
| 2021-09-21T07:49:20
| 408,726,221
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
ASGI config for urexpenses project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'urexpenses.settings')
application = get_asgi_application()
|
[
"aa37741867@gmail.com"
] |
aa37741867@gmail.com
|
fde7b2f385fccb968d25eec83d170caed70787a3
|
1c2402d3e736dc250a0a26b3f293d0f4b61656b2
|
/src/msgvan/msgvan.py
|
fd2ecabe1ceb99e6d85ebd0a8aeb40bdfe21b53b
|
[
"Apache-2.0"
] |
permissive
|
sabaini/msgvan
|
31ca002b1790720a5fd3c25107ef2b2244fc51d2
|
e921b993654a03ed671296ed3e88eb9f3f24ff79
|
refs/heads/main
| 2022-12-28T21:31:26.290011
| 2020-10-10T18:14:17
| 2020-10-10T18:32:07
| 301,180,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,525
|
py
|
"""The msgvan module
Has a factory function for message vans and a base class for messages to be sent
"""
import logging
import logging.handlers
import attr
from msgvan.thespmessages import (
AddSubMessage,
DelSubMessage,
HandlerMessage,
MessageWrapper,
StatusRequest,
)
from thespian.actors import ActorSystem
ACTORSYS = "multiprocTCPBase"
log = logging.getLogger(__name__)
log.addHandler(logging.handlers.SysLogHandler(address="/dev/log"))
logcfg = {"version": 1, "loggers": {__name__: {"level": logging.WARNING}}}
def van(name="default", addr=None, capabilities=None, leader=False, verbose=False):
"""Construct a message van
:param name: van name
:param addr: ipaddr:port where we want to connect to, defaults to local
:param capabilities: additional capabilities
:param leader: Make this msgvan system the leader
:return: MessageVan object
"""
if leader:
caps = {"msgvan-leader": leader}
else:
caps = {}
if addr is not None:
caps["Convention Address.IPv4"] = (addr, 1900)
if capabilities is not None:
caps.update(capabilities)
if verbose:
logcfg["loggers"][__name__]["level"] = logging.DEBUG
log.setLevel(logging.DEBUG)
asys = ActorSystem(ACTORSYS, capabilities=caps, logDefs=logcfg)
van = MessageVan(name, asys)
log.debug("Started van %s, actor sys %s, caps %s", van, asys, caps)
return van
class MessageVan:
"""MessageVan: entry point for message vans
Creates a thespian actorsys, and a MessageVanActor
Handles subscriptions and receives messages for publication
"""
def __init__(self, name, actor_sys):
self.name = name
self.actor_sys = actor_sys
self.van_actor = actor_sys.createActor(
"msgvan.actors.MessageVanActor",
globalName="msgvan-{}".format(name),
)
def publish(self, msg):
"""Publish a message
:param msg:
:return: None
"""
log.debug("Publish %s", msg)
self.actor_sys.tell(self.van_actor, MessageWrapper(msg))
def subscribe_handler(self, name, handler, topic_pat):
"""Subscribe a function to topic
:param handler: A handler function which will receive matching messages
:param topic_pat: regexp topic pattern
:return: a subscription identifier (actor address)
"""
log.debug("Subscribe handler to %s", topic_pat)
act = self.actor_sys.createActor("msgvan.actors.SubscriptionActor")
self.actor_sys.tell(act, HandlerMessage(handler))
self.subscribe_actor(name, act, topic_pat)
def subscribe_actor(self, name, actor, topic_pat):
"""Subscribe an actor to topic
:param actor: Thespian actor
:param topic_pat: regexp pattern
:return: None
"""
log.debug("Subscribe actor %s to %s", actor, topic_pat)
self.actor_sys.tell(self.van_actor, AddSubMessage(name, topic_pat, actor))
def unsubscribe(self, name):
"""Unsubscribe the named actor or function"""
log.debug("Unsubscribe %s", name)
self.actor_sys.tell(self.van_actor, DelSubMessage(name))
def status(self):
"""Return the subscribers registered"""
log.debug("Statusrequest")
return self.actor_sys.ask(self.van_actor, StatusRequest())
@attr.s
class Message:
"""Message to be published
Consists of a payload and a topic
"""
payload = attr.ib()
topic = attr.ib(factory=set)
|
[
"peter.sabaini@canonical.com"
] |
peter.sabaini@canonical.com
|
4f8b1b66f46d84dc3ff522025003878e745fb9f0
|
e71bc93bab1d97959bf97998ee349e431f9c18f3
|
/dag/weather_feeds_dag.py
|
2159c28ba3686ac38491bb51df49e49e3b2cf335
|
[] |
no_license
|
Techfitlab/weather_feeds
|
7bb83f280a6563051d4772dc091fee91d21b47bf
|
98dc6b96cd113e2e32fee40ae664628d8b34fed6
|
refs/heads/master
| 2022-12-20T22:10:48.394560
| 2020-09-26T16:24:43
| 2020-09-26T16:24:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,740
|
py
|
import datetime as dt
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
def notify_script1():
print('Writing in file')
with open('/path/to/your/repo/weather_feeds/log/notification.txt', 'a+', encoding='utf8') as f:
now = dt.datetime.now()
t = now.strftime("%Y-%m-%d %H:%M")
f.write(str(t) + '\n')
f.write(str('Weather Script 1 Data is Updated') + '\n')
return 'Data is updated'
def notify_script2():
print('Writing in file')
with open('/path/to/your/repo/weather_feeds/log/notification.txt', 'a+', encoding='utf8') as f:
now = dt.datetime.now()
t = now.strftime("%Y-%m-%d %H:%M")
f.write(str(t) + '\n')
f.write(str('Weather Script 2 Data is Updated') + '\n')
return 'Data is updated'
default_args = {
'owner': 'airflow',
'start_date': dt.datetime(2020, 9, 19, 10, 00, 00),
'concurrency': 1,
'retries': 0
}
with DAG('weather_data_feeds_dag',
default_args=default_args,
schedule_interval='@hourly',
) as dag:
opr_script1 = BashOperator(task_id='run_report1',
bash_command='/path/to/your/repo/weather_feeds/bin/run-script1.sh ')
opr_notify1 = PythonOperator(task_id='notify_script1',
python_callable=notify_script1)
opr_script2 = BashOperator(task_id='run_report2',
bash_command='/path/to/your/repo/weather_feeds/bin/run-script2.sh ')
opr_notify2 = PythonOperator(task_id='notify_script2',
python_callable=notify_script2)
opr_script1 >> opr_notify1 >> opr_script2 >> opr_notify2
|
[
"noreply@github.com"
] |
Techfitlab.noreply@github.com
|
82bf956dd3c287298797333382139077cbb943dd
|
b09e6cbd019c4f2002ba0c34caaa4506132c1d6b
|
/FINAL/3. Sentiment Analysis/SA_Ensemble_Models_Training_Olga.py
|
ae2ad7c3950eba41e5766d4e0258e8e2dc0013fb
|
[
"MIT"
] |
permissive
|
o0oBluePhoenixo0o/AbbVie2017
|
935be251170722443a13602e5ae635b0128bf286
|
356583487455ba2616457f8a59ca741321c0b154
|
refs/heads/master
| 2021-09-24T17:32:02.676494
| 2018-10-12T12:49:04
| 2018-10-12T12:49:04
| 83,968,797
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,132
|
py
|
# coding: utf-8
# In[3]:
#Download the libraries
import nltk
import re
from sklearn.metrics import confusion_matrix
import pandas as pd
import numpy as np
import csv
import os
import matplotlib.pyplot as plt
import sys
import time
import random
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import svm
from sklearn.metrics import classification_report
from sklearn import tree
from sklearn.naive_bayes import MultinomialNB
from sklearn import model_selection
from mlxtend.classifier import StackingClassifier
# In[4]:
#Set the directory
os.chdir("/Users/imacair/Desktop/Products3/")
# In[5]:
#Read the file
data= pd.read_csv('Final_Manual_3006.csv',
encoding='latin-1',delimiter=',')
# In[6]:
#Converts text into ASCII
data.message = data.message.str.encode('ascii','replace')
# In[7]:
sample_data= data
# In[8]:
data_t=sample_data["message"]
# In[9]:
#lowercase
data_t = data_t.str.lower()
# In[10]:
data_s=sample_data["sentiment"]
# In[11]:
np.unique(data_s)
# In[12]:
#Rename the sentiment
final = data
res5= pd.DataFrame( index=range(0,len(data_t)),columns = {'new_sent'} )
res5[(final.sentiment==u'2')] = '-1'
res5[(final.sentiment==u'1')] = '-1'
res5[(final['sentiment']==u'3')] = '1'
res5[(final['sentiment']==u'4')] = '1'
res5[(final['sentiment']==u'N')] = '0'
res5[(final['sentiment']==u"n")] = '0'
final=pd.concat([final, res5], axis=2)
# In[13]:
np.unique(final.new_sent)
# In[14]:
#Abbriviation translation
with open('abbrev.csv', mode='r') as infile:
reader = csv.reader(infile)
replacement = {rows[0].lower():rows[1].lower() for rows in reader
}
# In[15]:
result = pd.DataFrame()
result = final
# In[16]:
for i in range(len(result)):
data_t.values[i]=' '.join([replacement.get(w, w) for w in data_t.values[i].split()])
# In[17]:
#lowercase
data_t = data_t.str.lower()
#Remove urls
data_t= data_t.str.replace(r'(http.*) |(http.*)$|\n', "",)
#Remove twitter handles
data_t = data_t.str.replace(r"@\\w+", "")
#remove htmls
data_t = data_t.str.replace(r'<.*?>', "")
#Remove citations
data_t = data_t.str.replace(r'@[a-zA-Z0-9]*', "")
# In[18]:
#Spliting the data
from sklearn.cross_validation import train_test_split
import numpy as np
from sklearn.model_selection import KFold
# In[19]:
data_train, data_test, label_train, label_test = train_test_split(data_t, final.new_sent, test_size=0.03, random_state=2340)
# In[21]:
#Vectorization
vectorizer = TfidfVectorizer( sublinear_tf=True,
use_idf=True,stop_words = 'english')
train_vectors = vectorizer.fit_transform(data_train)
test_vectors = vectorizer.transform(data_test)
# In[424]:
#Save vectorizer
joblib.dump(vectorizer, 'vec.pkl')
# In[22]:
# Perform classification with SVM, kernel=linear
classifier_linear = svm.SVC(C=0.6, kernel='linear')
t0 = time.time()
classifier_linear.fit(train_vectors, label_train)
t1 = time.time()
prediction_linear = classifier_linear.predict(test_vectors)
t2 = time.time()
time_linear_train = t1-t0
time_linear_predict = t2-t1
# In[23]:
print("Results for SVC(kernel=linear)")
print("Training time: %fs; Prediction time: %fs" % (time_linear_train, time_linear_predict))
print(classification_report(label_test, prediction_linear))
confusion_matrix(label_test, prediction_linear)
# In[24]:
# Perform classification with SVM, kernel=linear
classifier_liblinear = svm.LinearSVC(C=0.8)
t0 = time.time()
classifier_liblinear.fit(train_vectors, label_train)
t1 = time.time()
prediction_liblinear = classifier_liblinear.predict(test_vectors)
t2 = time.time()
time_liblinear_train = t1-t0
time_liblinear_predict = t2-t1
# In[25]:
# Perform classification with Multinomial Naïve Bayes.
classifier_nb = MultinomialNB(alpha=0.9)
t0 = time.time()
classifier_nb.fit(train_vectors, label_train)
t1 = time.time()
prediction_nb = classifier_nb.predict(test_vectors)
t2 = time.time()
time_nb_train = t1-t0
time_nb_predict = t2-t1
# In[26]:
print("Results for SVC(kernel=linear)")
print("Training time: %fs; Prediction time: %fs" % (time_linear_train, time_linear_predict))
print(classification_report(label_test, prediction_linear))
print("Results for LinearSVC()")
print("Training time: %fs; Prediction time: %fs" % (time_liblinear_train, time_liblinear_predict))
print(classification_report(label_test, prediction_liblinear))
print("Results for MultinomialNB()")
print("Training time: %fs; Prediction time: %fs" % (time_nb_train, time_nb_predict))
print(classification_report(label_test, prediction_nb))
# In[27]:
label_tests = np.asarray(label_test)
# In[28]:
df=[prediction_linear, prediction_liblinear, prediction_nb,label_tests]
# In[29]:
df = pd.DataFrame(df)
df = df.transpose()
df.columns = ['prediction_linear', 'prediction_liblinear', 'prediction_nb','label_tests']
#df
# In[30]:
from sklearn import model_selection
from mlxtend.classifier import StackingClassifier
#Stacking ensembling
lr = classifier_linear
sclf = StackingClassifier(classifiers=[classifier_liblinear, classifier_nb],
meta_classifier=lr)
print('3-fold cross validation:\n')
for clf, label in zip([classifier_liblinear,classifier_nb,sclf],
['linear_svm',
'multi_naive',
'StackingClassifier']):
scores = model_selection.cross_val_score(clf, train_vectors, label_train,
cv=10, scoring='accuracy')
print("Accuracy: %0.2f (+/- %0.2f) [%s]"
% (scores.mean(), scores.std(), label))
# In[31]:
sclf.fit(train_vectors, label_train)
# In[446]:
#Save the model
from sklearn.externals import joblib
joblib.dump(sclf, 'stacking.pkl')
# In[32]:
#Predictions of the stacking model
prediction_sclf =sclf.predict(test_vectors)
# In[33]:
#Convert to np arrays
label_tests = np.asarray(label_test)
#Create a Data Frame
df=[ prediction_linear, prediction_liblinear,prediction_nb, prediction_sclf]
df = pd.DataFrame(df)
df = df.transpose()
df.columns = ['prediction_linear', 'prediction_liblinear','prediction_nb','staking']
df
# In[269]:
#Convert to np arrays
label_tests = np.asarray(label_test)
#Create a Data Frame
df=[ prediction_linear, prediction_liblinear,prediction_nb]
df = pd.DataFrame(df)
df = df.transpose()
df.columns = ['prediction_linear', 'prediction_liblinear','prediction_nb']
df
# In[34]:
# ROC curve
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import LabelBinarizer
def multiclass_roc_auc_score(truth, pred, average="macro"):
lb = LabelBinarizer()
lb.fit(truth)
truth = lb.transform(truth)
pred = lb.transform(pred)
return roc_auc_score(truth, pred, average=average)
# In[35]:
multiclass_roc_auc_score(label_test, prediction_linear, average="macro")
# In[36]:
multiclass_roc_auc_score(label_test, prediction_liblinear, average="macro")
# In[37]:
multiclass_roc_auc_score(label_test, prediction_nb, average="macro")
# In[38]:
multiclass_roc_auc_score(label_test, prediction_sclf, average="macro")
# In[ ]:
|
[
"noreply@github.com"
] |
o0oBluePhoenixo0o.noreply@github.com
|
dfc119e744be40778ca5cd17e33454a7d7001076
|
fd18ce27b66746f932a65488aad04494202e2e0d
|
/day34/farms/categories/categories/categories/pipelines.py
|
dba029c921f259cfcbba84dba0b24d192b7fa697
|
[] |
no_license
|
daofeng123/ClassCodes
|
1acbd843836e550c9cebf67ef21dfca9f6b9fc87
|
fbcd1f24d79b8bb56ad0669b07ad118064609612
|
refs/heads/master
| 2020-06-24T12:34:28.148197
| 2019-08-15T03:56:40
| 2019-08-15T03:56:40
| 198,963,469
| 3
| 0
| null | 2019-07-26T06:53:45
| 2019-07-26T06:53:44
| null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
# -*- coding: utf-8 -*-
import json
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from categories.dbs.redismq import RedisMQ
class CategoriesPipeline(object):
redis_mq = RedisMQ()
def process_item(self, item, spider):
# 做一下简单的json格式处理
content = json.dumps(dict(item), ensure_ascii=False)
# 发送采集任务到队列
self.redis_mq.push_task(content)
return item
|
[
"38395870@qq.com"
] |
38395870@qq.com
|
e63fced98376b8b111bf2c2ad5eed5d9dad939a1
|
c17bf882ae045d38afd4d5092c0acaff43440f08
|
/src/python/structbox.py
|
ce4a3dc795ec10a994e972794b9abf870bfef0f5
|
[
"Apache-2.0"
] |
permissive
|
jstnlef/structbox
|
9a50a01e0d1ea8baa6cdeb3554e48d14703c094f
|
6d751a616a6c3c3a5ad69855d43473239e2b8475
|
refs/heads/master
| 2020-04-02T07:23:38.501747
| 2016-07-07T01:18:56
| 2016-07-07T01:18:56
| 60,471,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 700
|
py
|
from sys import platform
from cffi import FFI
def initialize_dynamic_lib():
if platform.startswith('darwin'):
prefix = 'lib'
ext = 'dylib'
elif platform.startswith('win32'):
prefix = ''
ext = 'dll'
elif platform.startswith('linux'):
prefix = 'lib'
ext = 'so'
else:
raise RuntimeError("OS Platform not supported.")
ffi = FFI()
ffi.cdef('''
size_t lru_cache(size_t);
bool LruCache_contains_key(int);
void* LruCache_new(size_t);
size_t LruCache_len(void*);
''')
return ffi.dlopen("../../target/release/{}structbox.{}".format(prefix, ext))
structbox = initialize_dynamic_lib()
|
[
"jstnlefebvre@gmail.com"
] |
jstnlefebvre@gmail.com
|
6b8ed011ec4b222f6da0db0cb561594982347b23
|
97de77d0fffb2aa3e52e74685b4b0037a32d8ae7
|
/mp4_renamer.py
|
fe61976d9954989e082d3e897875c93028776d84
|
[] |
no_license
|
mpagels/mp4_renamer
|
a71ca5f945faebce3742d45e18a5c956f0ff39fe
|
599feff276eed2376b23302faf50b3a2eb492813
|
refs/heads/master
| 2020-08-09T06:46:01.050859
| 2019-10-09T23:18:12
| 2019-10-09T23:18:12
| 214,025,388
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,551
|
py
|
import os
import shutil
def start():
# Config
# Liste die die Fragen enthält
frage = [
"[1] - Vorne was hinzufügen - XXXXDateiName.mp4" ,
"[2] - Hinten was angängen - DateiNameXXXX.mp4",
"[3] - Vorne und hinten was anhängen - XXXFDateiNameXXX.mp4"
]
frage2 = "Eingabe 1 / 2 / 3 : "
# Ausgabe, Nutzer wird die Fragen ausgegeben
print("Wo soll der Dateiname erweitert werden? : ")
print()
for zeile in frage:
print(zeile)
print()
# Nutzereingabe der Aufgabe des Programms anhand der Frage
x = True
while x:
try:
user_eingabe = int(input(frage2))
if (user_eingabe > 0 and user_eingabe < 4):
x = False
else:
print("Fehler! Bitte 1 / 2 / 3 eingeben!")
except ValueError:
print("Fehler! Keine Zahl eingeben.")
# Gibt die Eingabe zurück
return user_eingabe
def eingabe(zahl):
# Config der Funktion eingabe()
frage_vorne = "Was soll vorne angehängt werden? : "
frage_hinten = "Was soll hinten angehängt werden? : "
# Wenn der User 1 eingegeben hat dann:
if zahl == 1:
# User wird gefragt, was er vorne hinzugefügt haben möchte
vorne = input(frage_vorne)
# Programm geht das Verzeichnis durch, wo die Datei gestartet wurde
for datei in os.listdir():
# Kontrollinstanz, wenn der User eine Datei später hinzugefügt hat, und das Programm
# eben nicht schon die umbenannten Dateien nochmal umbennent
if not datei.startswith(vorne):
# Check, dass er auch nur mp4 Dateien umbennt
if datei.endswith(".mp4"):
# Umbennenung der Dateien
shutil.move(datei, vorne + datei)
elif zahl == 2:
hinten = input(frage_hinten)
for datei in os.listdir():
if not datei[:-4].endswith(hinten):
if datei.endswith(".mp4"):
shutil.move(datei, datei[:-4] + hinten + datei[-4:])
elif zahl == 3:
vorne = input(frage_vorne)
hinten = input(frage_hinten)
for datei in os.listdir():
if datei.endswith(".mp4"):
shutil.move(datei, vorne + datei[:-4] + hinten + datei[-4:])
user_eingabe = start()
eingabe(user_eingabe)
|
[
"noreply@github.com"
] |
mpagels.noreply@github.com
|
06623f252070f66e9280fe42ee2ffbe9322fc85e
|
28b4243e031772102433bc951a75ff04b2d6e51f
|
/src/main/dimention_reduction.py
|
5fb87c3e9eaaa4ee0ac077e991e3cdcb39cef8f8
|
[] |
no_license
|
mana-ysh/gaussian-embedding
|
6ffb8389e58cc21dc580da0b642a506b0963b961
|
1bfa3ad1d132074a8a4903a333c4d009e760f26c
|
refs/heads/master
| 2020-03-11T14:27:46.106834
| 2018-04-23T12:34:01
| 2018-04-23T12:34:01
| 130,055,158
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,689
|
py
|
import argparse
import bhtsne
import matplotlib.pylab as plt
import numpy as np
import sklearn.base
from sklearn.decomposition import PCA
import sys
sys.path.append('../')
from models.gaussian_bilinear_model import GaussianBilinearModel
from utils.vocab import Vocab
class BHTSNE(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
def __init__(self, dimensions=2, perplexity=30.0, theta=0.5, rand_seed=-1):
self.dimensions = dimensions
self.perplexity = perplexity
self.theta = theta
self.rand_seed = rand_seed
def fit_transform(self, x):
return bhtsne.tsne(
x.astype(np.float64), dimensions=self.dimensions, perplexity=self.perplexity, theta=self.theta,
rand_seed=self.rand_seed)
def visualize(embeds, id2word):
assert len(id2word) == embeds.shape[0]
print('converting vectors into 2-dimension...')
embeds_2d = tsne(embeds)
print('plotting...')
plt.scatter(embeds_2d[:, 0], embeds_2d[:, 1])
for w, x, y in zip(id2word, embeds_2d[:, 0], embeds_2d[:, 1]):
plt.annotate(w, xy=(x, y), xytext=(0, 0), textcoords='offset points')
plt.savefig('out.png')
def tsne(embeds):
tsne_model = BHTSNE()
# inter_embeds = PCA(n_components=15).fit_transform(embeds)
return tsne_model.fit_transform(embeds)
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('--model')
p.add_argument('--out')
# p.add_argument('--entity')
args = p.parse_args()
print('preparation...')
m = GaussianBilinearModel.load_model(args.model)
# v = Vocab.load(args.entity)
embeds = tsne(m.entity_mu)
np.savetxt(args.out, embeds)
|
[
"manabe.hitoshi.me0@is.naist.jp"
] |
manabe.hitoshi.me0@is.naist.jp
|
5360d61f104aa7008b4b9781d2455b9f9f7b96ad
|
b27509662b15ab4ae8d7ee1ce274b60b82eddbb9
|
/Lab6/DDPG/ddpg_PBT.py
|
2cc00e8297705e83432a59bfbf3a5542f15550f2
|
[] |
no_license
|
kuolunwang/Deep-Learning-and-Practice
|
4bf22f73a568a079537ea19bc9bd4e4ccff35b44
|
1bd0c089e7f4075b0dd0970b1833d914f54f6ee6
|
refs/heads/main
| 2023-06-14T06:29:03.537905
| 2021-07-14T10:28:33
| 2021-07-14T10:28:33
| 351,057,326
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,235
|
py
|
'''Implement PBT in DDPG'''
__author__ = 'chengscott'
__copyright__ = 'Copyright 2020, NCTU CGI Lab'
import argparse
from collections import deque
import itertools
import random
import time
import os
import json
from functools import partial
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import ray
from ray import tune
from ray.tune.schedulers import PopulationBasedTraining
class GaussianNoise:
def __init__(self, dim, mu=None, std=None):
self.mu = mu if mu else np.zeros(dim)
self.std = std if std else np.ones(dim) * .1
def sample(self):
return np.random.normal(self.mu, self.std)
class ReplayMemory:
__slots__ = ['buffer']
def __init__(self, capacity):
self.buffer = deque(maxlen=capacity)
def __len__(self):
return len(self.buffer)
def append(self, *transition):
# (state, action, reward, next_state, done)
self.buffer.append(tuple(map(tuple, transition)))
def sample(self, batch_size, device):
'''sample a batch of transition tensors'''
transitions = random.sample(self.buffer, batch_size)
return (torch.tensor(x, dtype=torch.float, device=device)
for x in zip(*transitions))
class ActorNet(nn.Module):
def __init__(self, state_dim=8, action_dim=2, hidden_dim=(400, 300)):
super().__init__()
self.fc1 = nn.Linear(state_dim, hidden_dim[0])
self.fc2 = nn.Linear(hidden_dim[0], hidden_dim[1])
self.fc3 = nn.Linear(hidden_dim[1], action_dim)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
out = self.tanh(self.fc3(x))
return out
class CriticNet(nn.Module):
def __init__(self, state_dim=8, action_dim=2, hidden_dim=(400, 300)):
super().__init__()
h1, h2 = hidden_dim
self.critic_head = nn.Sequential(
nn.Linear(state_dim + action_dim, h1),
nn.ReLU(),
)
self.critic = nn.Sequential(
nn.Linear(h1, h2),
nn.ReLU(),
nn.Linear(h2, 1),
)
def forward(self, x, action):
x = self.critic_head(torch.cat([x, action], dim=1))
return self.critic(x)
class DDPG:
def __init__(self, args, config, checkpoint_dir):
# behavior network
self._actor_net = ActorNet().to(args.device)
self._critic_net = CriticNet().to(args.device)
# target network
self._target_actor_net = ActorNet().to(args.device)
self._target_critic_net = CriticNet().to(args.device)
# initialize target network
self._target_actor_net.load_state_dict(self._actor_net.state_dict())
self._target_critic_net.load_state_dict(self._critic_net.state_dict())
# optimizer
self._actor_opt = optim.Adam(self._actor_net.parameters(), lr=config["lr"])
self._critic_opt = optim.Adam(self._critic_net.parameters(), lr=config["lr"])
# action noise
self._action_noise = GaussianNoise(dim=2)
# memory
self._memory = ReplayMemory(capacity=args.capacity)
self.checkpoint_dir = checkpoint_dir
if self.checkpoint_dir:
self.load(self.checkpoint_dir)
## config ##
self.device = args.device
self.batch_size = config['batch_size']
self.tau = args.tau
self.gamma = args.gamma
def select_action(self, state, noise=True):
'''based on the behavior (actor) network and exploration noise'''
with torch.no_grad():
action = self._actor_net(torch.from_numpy(state).view(1, -1).to(self.device)).cpu().numpy().squeeze()
if(noise):
action += self._action_noise.sample()
return action
def append(self, state, action, reward, next_state, done):
self._memory.append(state, action, [reward / 100], next_state,
[int(done)])
def update(self):
# update the behavior networks
self._update_behavior_network(self.gamma)
# update the target networks
self._update_target_network(self._target_actor_net, self._actor_net,
self.tau)
self._update_target_network(self._target_critic_net, self._critic_net,
self.tau)
def _update_behavior_network(self, gamma):
actor_net, critic_net, target_actor_net, target_critic_net = self._actor_net, self._critic_net, self._target_actor_net, self._target_critic_net
actor_opt, critic_opt = self._actor_opt, self._critic_opt
# sample a minibatch of transitions
state, action, reward, next_state, done = self._memory.sample(
self.batch_size, self.device)
## update critic ##
# critic loss
q_value = critic_net(state, action)
with torch.no_grad():
a_next = target_actor_net(next_state)
q_next = target_critic_net(next_state, a_next)
q_target = reward + gamma * q_next * (1 - done)
criterion = nn.MSELoss()
critic_loss = criterion(q_value, q_target)
# optimize critic
actor_net.zero_grad()
critic_net.zero_grad()
critic_loss.backward()
critic_opt.step()
## update actor ##
# actor loss
action = actor_net(state)
actor_loss = -critic_net(state, action).mean()
# optimize actor
actor_net.zero_grad()
critic_net.zero_grad()
actor_loss.backward()
actor_opt.step()
@staticmethod
def _update_target_network(target_net, net, tau):
'''update target network by _soft_ copying from behavior network'''
for target, behavior in zip(target_net.parameters(), net.parameters()):
target.data.copy_(target.data * (1 - tau) + behavior.data * tau)
def save(self, episode):
with tune.checkpoint_dir(episode) as checkpoint_dir:
self.checkpoint_dir = os.path.join(checkpoint_dir, "checkpoint")
torch.save((
self._actor_net.state_dict(),
self._critic_net.state_dict(),
), self.checkpoint_dir)
def save_best(self, path):
torch.save((
self._actor_net.state_dict(),
self._critic_net.state_dict(),
), self.checkpoint_dir)
def load(self, path):
actor, critic = torch.load(path)
self._actor_net.load_state_dict(actor)
self._critic_net.load_state_dict(critic)
def train(args, env, agent, writer, config):
print('Start Training')
total_steps = 0
ewma_reward = 0
for episode in range(config["episode"]):
total_reward = 0
state = env.reset()
for t in itertools.count(start=1):
# select action
if total_steps < args.warmup:
action = env.action_space.sample()
else:
action = agent.select_action(state)
# execute action
next_state, reward, done, _ = env.step(action)
# store transition
agent.append(state, action, reward, next_state, done)
if total_steps >= args.warmup:
agent.update()
state = next_state
total_reward += reward
total_steps += 1
if done:
ewma_reward = 0.05 * total_reward + (1 - 0.05) * ewma_reward
writer.add_scalar('Train/Episode Reward', total_reward,
total_steps)
writer.add_scalar('Train/Ewma Reward', ewma_reward,
total_steps)
print(
'Step: {}\tEpisode: {}\tLength: {:3d}\tTotal reward: {:.2f}\tEwma reward: {:.2f}'
.format(total_steps, episode, t, total_reward,
ewma_reward))
break
if(((episode + 1) % 100 ) == 0) or ((episode + 1) == config["episode"]):
re_mean = test(args, env, agent, writer)
tune.report(reward_mean=re_mean, )
agent.save(episode)
env.close()
def test(args, env, agent, writer):
print('Start Testing')
seeds = (args.seed + i for i in range(10))
rewards = []
for n_episode, seed in enumerate(seeds):
total_reward = 0
env.seed(seed)
state = env.reset()
for t in itertools.count(start=1):
#env.render()
# select action
action = agent.select_action(state, noise=False)
# execute action
next_state, reward, done, _ = env.step(action)
state = next_state
total_reward += reward
if(done):
writer.add_scalar('Test/Episode Reward', total_reward, n_episode)
print("total reward : {0:.2f}".format(total_reward))
rewards.append(total_reward)
break
print('Average Reward', np.mean(rewards))
env.close()
return np.mean(rewards)
def DDPG_BPT(config, args, env, checkpoint_dir=None):
agent = DDPG(args, config, checkpoint_dir)
writer = SummaryWriter(args.logdir)
train(args, env, agent, writer, config)
def main():
## arguments ##
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-d', '--device', default='cuda')
parser.add_argument('-m', '--model', default='ddpg_PBT.pth')
parser.add_argument('--logdir', default='log/ddpg')
parser.add_argument('--name', default='ddpg_best_config')
# train
parser.add_argument('--warmup', default=10000, type=int)
# parser.add_argument('--episode', default=1200, type=int)
# parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--capacity', default=500000, type=int)
# parser.add_argument('--lra', default=1e-3, type=float)
# parser.add_argument('--lrc', default=1e-3, type=float)
parser.add_argument('--gamma', default=.99, type=float)
parser.add_argument('--tau', default=.005, type=float)
# test
parser.add_argument('--test_only', action='store_true')
parser.add_argument('--render', action='store_true')
parser.add_argument('--seed', default=20200519, type=int)
args = parser.parse_args()
## main ##
env = gym.make('LunarLanderContinuous-v2')
if(args.test_only):
with open(args.name + ".json") as fp:
config = json.load(fp)
writer = SummaryWriter(args.logdir)
best_model = os.path.join(os.getcwd(), args.model)
agent = DDPG(args, config, best_model)
test(args, env, agent, writer)
else:
# defined hyperparameter
config = {
"lr" : tune.loguniform(1e-4, 1e-3),
"batch_size" : tune.sample_from(lambda _: 2 ** np.random.randint(4, 7)),
}
pbt = PopulationBasedTraining(
time_attr="training_iteration",
metric="reward_mean",
mode="max",
perturbation_interval=100,
hyperparam_mutations={
"episode" : tune.choice([x for x in range(1000, 2001)])
})
analysis = tune.run(
partial(DDPG_BPT, args=args, env=env),
name="ddpg_PBT",
scheduler=pbt,
stop={
"reward_mean": 290,
"training_iteration": 30,
},
config=config,
resources_per_trial={"cpu": 4, "gpu": 1},
num_samples=10,
)
best_trial = analysis.get_best_trial("reward_mean", "max", "last-5-avg")
print("Best config", best_trial.config)
with open(args.name + ".json", 'w') as fp:
json.dump(best_trial.config, fp)
best_checkpoint_dir = os.path.join(best_trial.checkpoint.value, "checkpoint")
agent = DDPG(args, best_trial.config, best_checkpoint_dir)
writer = SummaryWriter(args.logdir)
test(args, env, agent, writer)
agent.save_best(args.model)
if __name__ == '__main__':
main()
|
[
"kuolunwang.ee08@nycu.edu.tw"
] |
kuolunwang.ee08@nycu.edu.tw
|
0d34a54609bdbacefa44b936368d3ff2f9852728
|
f1d50419d5ba32938bb48c6ef503ce698f569872
|
/code/wii.py
|
e18aa5e1ad653fdc208df91ebfe9526738a6da45
|
[] |
no_license
|
DFNOsorio/Force_Platform
|
f2c0aa2e5950a69778cb50b9fa5871d7133fd334
|
a778d00c9051b613e14d43fd5987c0ce16cf4177
|
refs/heads/master
| 2021-01-22T11:36:44.984159
| 2016-09-23T16:08:04
| 2016-09-23T16:08:04
| 68,393,860
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,218
|
py
|
from WII_Config import *
def converter(rtl, rtr, rbl, rbr):
[converted_tl, converted_tr, converted_bl, converted_br] = all_2_kilo([rtl, rtr, rbl, rbr],
[TOP_LEFT, TOP_RIGHT, BOTTOM_LEFT,
BOTTOM_RIGHT],
calibration_matrix_adjusted)
[converted_tl_a, converted_tr_a, converted_bl_a, converted_br_a] = all_2_converting([converted_tl, converted_tr,
converted_bl, converted_br],
[TOP_LEFT, TOP_RIGHT,
BOTTOM_LEFT, BOTTOM_RIGHT])
return [converted_tl_a, converted_tr_a, converted_bl_a, converted_br_a]
def raw_to_kilos(raw_data_point, corner, matrix):
converted = []
x_0_17 = matrix[0][corner] * 1.0
y_0_17 = 0.0
x_1_17 = matrix[1][corner] * 1.0
y_1_17 = 17.0
x_0_37 = x_1_17
y_0_37 = y_1_17
x_1_37 = matrix[2][corner] * 1.0
y_1_37 = 37.5
cte_17 = ((x_1_17 * y_0_17 - x_0_17 * y_1_17) / (x_1_17 - x_0_17))
cte_37 = ((x_1_37 * y_0_37 - x_0_37 * y_1_37) / (x_1_37 - x_0_37))
# m_17 = (17*1.0) / (matrix[1][corner] - matrix[0][corner]*1.0)
# b_17 = 17 - m_17 * matrix[1][corner] * 1.0
# m_37 = (37.5 * 1.0 - 17.0) / (matrix[2][corner] - matrix[1][corner] * 1.0)
# b_37 = 37.5 - m_37 * matrix[2][corner] * 1.0
for i in range(0, len(raw_data_point)):
if raw_data_point[i] <= matrix[1][corner]:
value = raw_data_point[i] * ((y_1_17 - y_0_17) / (x_1_17 - x_0_17)) + cte_17
else:
value = raw_data_point[i] * ((y_1_37 - y_0_37) / (x_1_37 - x_0_37)) + cte_37
if value < 0:
value = 0
converted.append(value)
return converted
def all_2_kilo(raw_vectors, corners, matrix):
output = []
for i in range(0, len(raw_vectors)):
output.append(raw_to_kilos(raw_vectors[i], corners[i], matrix))
return output
def scaler(kg_vector, corner):
x_0_17 = 0
y_0_17 = 0.0
x_1_17 = Scale_16[4]
y_1_17 = Scale_16[corner] * 1.0
x_0_37 = x_1_17
y_0_37 = y_1_17
x_1_37 = Scale_25[4]
y_1_37 = Scale_25[corner] * 1.0
cte_17 = ((x_1_17 * y_0_17 - x_0_17 * y_1_17) / (x_1_17 - x_0_17))
cte_37 = ((x_1_37 * y_0_37 - x_0_37 * y_1_37) / (x_1_37 - x_0_37))
converted = []
for i in range(0, len(kg_vector)):
if kg_vector[i] <= Scale_16[4]:
value = kg_vector[i] + kg_vector[i] * ((y_1_17 - y_0_17) / (x_1_17 - x_0_17)) + cte_17
else:
value = kg_vector[i] + kg_vector[i] * ((y_1_37 - y_0_37) / (x_1_37 - x_0_37)) + cte_37
if value < 0:
value = 0
converted.append(value)
return converted
def all_2_converting(raw_vectors, corners):
output = []
for i in range(0, len(raw_vectors)):
output.append(scaler(raw_vectors[i], corners[i]))
return output
|
[
"vdosavh@gmail.com"
] |
vdosavh@gmail.com
|
14137630f521a0b7ae73c3c12a6089944fcf3c98
|
ae0f90383fd4317e29e5ade4be50f825eeed0d71
|
/Greedy/구명보트/jungmin.py
|
b91e4d1e1c3db9ae0a7843ff18c9d3350b671348
|
[
"MIT"
] |
permissive
|
YAPP-18th/Study_Algorithm_Team_3
|
281b3641ab6cd35d7c4e809980aa1932f91de097
|
bab515c68bfb46b65611ce9fd6742824db4f2ba2
|
refs/heads/master
| 2023-04-20T23:43:14.490384
| 2021-05-07T19:50:05
| 2021-05-07T19:50:05
| 342,540,718
| 0
| 6
|
MIT
| 2021-05-07T19:42:17
| 2021-02-26T10:26:15
|
Python
|
UTF-8
|
Python
| false
| false
| 540
|
py
|
from collections import deque
def solution(people, limit):
answer = 0
people.sort()
queue = deque(people)
while queue:
if len(queue) >= 2:
# 무게 큰 놈이 나감
if queue[0] + queue[-1] > limit:
queue.pop()
answer += 1
else:
queue.pop()
queue.popleft()
answer += 1
else:
if queue[0] <= limit:
queue.pop()
answer += 1
return answer
|
[
"gjwjdlas@sookmyung.ac.kr"
] |
gjwjdlas@sookmyung.ac.kr
|
9e47150a76fdb7ee18bebc782085fa849b35d399
|
6c5c052058262588cd665d7fb629f4a2fcd8b743
|
/src/mdscripts_dmunozg/__init__.py
|
299de9ed1b8dcad571a01da84537024d04755b39
|
[
"MIT"
] |
permissive
|
dmunozg/personal_scripts
|
e34193bb93c06d0e9733e9088be6b7415f501b9a
|
35fc8bb404ae30db86a88738ed15d712051e8738
|
refs/heads/main
| 2023-04-20T16:10:31.329636
| 2023-04-18T23:49:32
| 2023-04-18T23:49:32
| 366,151,890
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,086
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import matplotlib.pyplot as plt
import pandas as pd
from scipy import interpolate
class Function:
"""This class loads a function from a spread of sorted data.
It has the 'eval' method, which evaluates the function on any number as long as it is in the function domain. For now it only uses a linear interpolation to calculate the result.
It must be initialized with a tuple of X and Y data lists. e.g.:
testFunction = Function( (pointsListX, pointsListY))"""
def __init__(self, plotData, interpolator="linear"):
self.interpolator = interpolator
self.xDataPoints = []
self.yDataPoints = []
self.xDataPoints, self.yDataPoints = plotData[0], plotData[1]
# Generar representación de splines cúbicos
if interpolator == "spline":
self.splineRepresentation = interpolate.splrep(self.xDataPoints, self.yDataPoints)
def eval(self, xValue):
# Revisar si esta fuera del dominio
if not min(self.xDataPoints) < xValue < max(self.xDataPoints):
print("ERROR: trying to evaluate outside domain")
return False
# Revisar si coincide con alguno de los puntos
index = 0
while index < len(self.xDataPoints):
if xValue == self.xDataPoints[index]:
return self.yDataPoints[index]
index += 1
# Si no coincide ningún valor, interpolar.
if self.interpolator == "linear":
return self.linear_interpol(xValue)
elif self.interpolator == "spline":
return self.spline_interpol(xValue)
else:
print("ERROR: Unknown interpolator")
return False
def linear_interpol(self, xValue):
"""Linear interpolation method, the interpolators must be written in such a way that they only need one X value to produce one Y value."""
# Encontrar los valores x0 y x1 mas cercanos a xValue y sus respectivas
# imágenes
index = 1
while index < len(self.xDataPoints):
if self.xDataPoints[index] > xValue:
x0 = self.xDataPoints[index - 1]
y0 = self.yDataPoints[index - 1]
x1 = self.xDataPoints[index]
y1 = self.yDataPoints[index]
break
else:
index += 1
continue
return y0 + (xValue - x0) * (y1 - y0) / (x1 - x0)
def spline_interpol(self, xValue):
return interpolate.splev(xValue, self.splineRepresentation)
def show(self, xLabel=None, yLabel=None):
"""Plots the contained function"""
fig, ax = plt.subplots()
ax.plot(self.xDataPoints, self.yDataPoints)
ax.set(xlabel=xLabel, ylabel=yLabel)
ax.grid()
plt.show()
def clean_gromacs_garbage(path=os.getcwd()):
"""Deletes backups left by Gromacs"""
garbagePattern = re.compile(r"#([\w\d.]+)#")
for file in os.listdir(path):
if garbagePattern.match(file):
os.remove(os.path.join(path, file))
print(os.path.join(path, file), "removed")
def get_overlap(function1, function2):
"""Receives two Function objects, and returns the lower and upper bounds where the two domains overlap. Useful for generating a third function from two."""
if min(function1.xDataPoints) < min(function2.xDataPoints):
xMin = min(function2.xDataPoints)
else:
xMin = min(function1.xDataPoints)
if max(function1.xDataPoints) < max(function2.xDataPoints):
xMax = max(function1.xDataPoints)
else:
xMax = max(function2.xDataPoints)
return [xMin, xMax]
def calculate_enthalpy_plot(lowTempFunc, highTempFunc, deltaTemp, nPoints=200):
"""From two free energy functions at different temperatures produce an enthalpy function for the same process."""
xLowLimit, xHighLimit = get_overlap(lowTempFunc, highTempFunc)
deltaX = (xHighLimit - xLowLimit) / nPoints
xValues = []
enthalpyValues = []
currentX = xLowLimit
while currentX <= xHighLimit:
currentX += deltaX
xValues.append(currentX)
enthalpyValues.append(
-(highTempFunc.eval(currentX) - lowTempFunc.eval(currentX)) / deltaTemp
)
return Function([xValues, enthalpyValues])
def show_umbrella_plot(profileFilename, histogramFilename):
"""Displays the profile graph and histograms on the same graph. Useful to determine if the calculation is missing windows."""
figure = plt.figure()
histogramsData = parseXVG(histogramFilename)
histoPlot = figure.add_subplot(111)
for histogramNum in range(1, len(histogramsData)):
histoPlot.fill_between(
histogramsData[0], 0, histogramsData[histogramNum], color="grey", alpha=0.35
)
histoPlot.set_xlabel("Distance from bilayer center [nm]")
histoPlot.set_ylabel("Population")
profileData = parseXVG(profileFilename)
profilePlot = figure.add_subplot(111, sharex=histoPlot, frameon=False)
profilePlot.plot(profileData[0], profileData[1])
profilePlot.yaxis.tick_right()
profilePlot.yaxis.set_label_position("right")
profilePlot.set_ylabel("Mean force potential [kj/mol]")
profilePlot.grid()
plt.show()
def generate_tpr_list_file(path, tprListFile="tpr_files.dat"):
"""Generates a tpr_files.dat file which contains every tpr file in the directory. Useful for umbrella sampling with GROMACS."""
windowsList = []
pattern = re.compile(r"umbrella([\w.]+).gro")
for file in os.listdir(path):
if pattern.match(file):
windowsList.append(pattern.findall(file)[0])
try:
os.remove(path + tprListFile)
except:
print("No previous tpr file found")
outputFile = open(path + tprListFile, "w+")
for window in windowsList:
print("umbrella" + window + ".tpr", file=outputFile)
outputFile.close()
def generate_pullf_list_file(path, pullfListFile="pullf_files.dat"):
"""Generates a pullf_files.dat file which contains every pullf file in the directory. Useful for umbrella sampling with GROMACS."""
windowsList = []
pattern = re.compile(r"umbrella([\w.]+).gro")
for file in os.listdir(path):
if pattern.match(file):
windowsList.append(pattern.findall(file)[0])
try:
os.remove(path + pullfListFile)
except:
print("No provious pullf list found")
outputFile = open(path + pullfListFile, "w+")
for window in windowsList:
print("pullf_umbrella" + window + ".xvg", file=outputFile)
outputFile.close()
def list_finished_runs(path=os.getcwd()):
windowsList = []
pattern = re.compile(r"umbrella([\w.]+).gro")
for file in os.listdir(path):
if pattern.match(file):
windowsList.append(pattern.match(file)[1])
return windowsList
def xvg_to_dataframe(xvgFilename):
"""Returns a dataframe from a XVG file. The filename of the XVG file needs to be provided"""
# Transformar el archivo xvg en un dataFrame
xvgArray = np.loadtxt(xvgFilename, comments=["#", "@"])
xvgDataFrame = pd.DataFrame(xvgArray)
xvgDataFrame = xvgDataFrame.set_index(0)
# Buscar el nombre de las columnas en el metadato del archivo xvg
columnNames = []
if len(xvgDataFrame.columns) == 1:
columnNamePattern = re.compile(r"@[\s]+title\s\"([\w]+)")
else:
columnNamePattern = re.compile(r"@\ss\d\slegend\s\"([\w\s]+)")
xvgFileData = open(xvgFilename, "r")
while len(columnNames) < (len(xvgDataFrame.columns)):
line = xvgFileData.readline()
if line.startswith("#"):
continue
elif line.startswith("@"):
if columnNamePattern.match(line):
columnNames.append(columnNamePattern.findall(line)[0])
else:
xvgFileData.close()
columnNames = [str(i + 1) for i in range(len(xvgDataFrame.columns))]
break
xvgFileData.close()
xvgDataFrame.columns = columnNames
return xvgDataFrame
|
[
"diego.munoz.g@ug.uchile.cl"
] |
diego.munoz.g@ug.uchile.cl
|
6a3ef821de3e6230d254b961f69eee75586ab627
|
36e0d381ac6688a11c88faa6da7befd4b9f1ff4a
|
/DataDiffFull/parsers/utility.py
|
16c582307beeb62c8ac908eec5ba823f2c57c52b
|
[] |
no_license
|
r0nharley/DataDriven21
|
cdcb8d8b99512cdf3f82e6ed52a5bb91f6a9a30a
|
4f99023b927a14c2b5743cb0ed0066691d1dcb1b
|
refs/heads/master
| 2023-05-24T12:08:28.568909
| 2021-06-09T22:03:38
| 2021-06-09T22:03:38
| 375,474,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,652
|
py
|
def get_span_value(span_value):
"""
Return the integer value of how many rows/columns a rowspan/colspan value represents
:param span_value: the value of the rowspan/colspan html attribute
:return: integer value of the attribute; returns 1 if the value is not a number
"""
return int(span_value) if is_number(span_value) else 1
def clean_string(string):
"""
Removes all extra spaces from a string
:param string: string to be cleaned
:return: cleaned string
"""
clean = string.strip()
while clean.find(' ') > 0:
clean = clean.replace(' ', ' ')
return clean
def get_value(value_string):
"""
Converts a string containing a value to its numerical representation
:param value_string: String to be converted
:return: Numerical value of passed in parameter
"""
original_value_string = value_string
value_string = value_string.upper()
sequence_to_clean = ['$', '%', ',', ' ']
for s in sequence_to_clean:
value_string = value_string.replace(s, '')
multipliers = {'K': 1000, 'M': 1000000, 'B': 1000000000, 'T': 1000000000000}
multiplier = 1
for m in multipliers:
if value_string.endswith(m):
multiplier = multipliers[m]
value_string = value_string[:-1]
return float(value_string) * multiplier if is_number(value_string) else original_value_string
def is_number(value):
"""
Returns whether or not a value is a number
:param value: string to evaluate
:return: true if number otherwise false
"""
try:
float(value)
return True
except:
return False
|
[
"rharley77@gmail.com"
] |
rharley77@gmail.com
|
8569ada425a7a6d84818ba286f86eeec28fcdbaa
|
73a06c33e686f7e01055fdf8d7c69eca901fe40d
|
/round473/even_odd.py
|
5d140fcae4222105f5151464cab711124119f34e
|
[] |
no_license
|
hashkanna/codeforces
|
cb8eee2d19e354b32b84208920c8a91995da050f
|
fc56cb9caaa52aac157ba9e4b717c13d8f51ce1f
|
refs/heads/master
| 2021-06-27T10:37:01.906835
| 2019-01-16T15:55:06
| 2019-01-16T15:55:06
| 112,921,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 71
|
py
|
n=int(input())
if n%2==0:
print('Mahmoud')
else:
print('Ehab')
|
[
"hashkanna@gmail.com"
] |
hashkanna@gmail.com
|
8a6af64bda1660fee7c263541b1c3e8425df645e
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/request/AlipayOpenMiniInstantiateQueryRequest.py
|
2e24f412ecf6d93999b8fecab84c0b693832af9e
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,979
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayOpenMiniInstantiateQueryModel import AlipayOpenMiniInstantiateQueryModel
class AlipayOpenMiniInstantiateQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayOpenMiniInstantiateQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayOpenMiniInstantiateQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.mini.instantiate.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
620f1406e617e247e382133e5f540ee19c5b62d4
|
d4318fc739de6cc17c74dcca3a0cd8ee9e238c34
|
/slack_bot/PythonScripts/greensheet_response.py
|
c7069a371d6525ebf44de8dd448308ee22cd9980
|
[] |
no_license
|
surgoku/CMPE273-Project
|
ac173ee551ae1b5e12a4ca4c688ab94a657cff13
|
1e4933dbc201ba43b8a91bc5be37c4843b14fda0
|
refs/heads/original_changes
| 2021-01-23T08:34:45.761778
| 2017-05-16T00:21:47
| 2017-05-16T00:21:47
| 102,538,589
| 0
| 1
| null | 2017-09-05T23:14:16
| 2017-09-05T23:14:16
| null |
UTF-8
|
Python
| false
| false
| 2,572
|
py
|
import spacy
from SpellCheck import SpellCheckResponse
import MySQLdb
import enchant
from checkGreensheetAttribute import checkSubjectCode
import getColumnandTableName
import enchant
d = enchant.Dict("en_US")
nlp = spacy.load('en')
def DB_Response(command,gloBalVariable):
db = MySQLdb.connect(host="127.0.0.1", # your host, usually localhost
user="greensheet", # your username
passwd="root1234", # your password
db="slackbot") # name of the data base
cur = db.cursor()
i = 0
flag = 0
mainAttributes = []
responseAttributes = []
doc1 = nlp(command.decode("utf-8"))
for word in doc1:
if word.pos_ == "NOUN":
if i <3:
print "input mainAtrr"
mainAttributes.append(word)
elif i > 2:
print "Input response Att"
responseAttributes.append(word)
i = i + 1
checkGreensheet = checkSubjectCode(mainAttributes)
if checkGreensheet == False:
return "Hello There I cannot find the GreenSheet you are seeking for try seaching for someother GreenSheet."
for word in responseAttributes:
print "Checking spell check response"
if SpellCheckResponse(word) == False:
print "found an spelling error"
temp = str(word)
tem_str=d.suggest(temp)
return "Hey I see There Is something wrong with the Spelling you provided. Do you mean " + str(tem_str) + " instead of "+ "'"+ str(word)+"'"
if flag == 1:
print "Finally got every thing All right"
tempColoumn, table = getColumnandTableName.getColumnAndTableName(responseAttributes)
if tempColoumn == None:
return "Hey I cannot fetch the result for your query, please try re-phrashing the question"
print tempColoumn
query = "SELECT " + str(tempColoumn) + " FROM " + str(table) +" where subject_code=" + "'" + str(mainAttributes[0]) + "' and section_name =" + "'" + str(mainAttributes[1]) + "' and subject_term=" + "'" + str(mainAttributes[2]) + "';"
print query
cur.execute(query)
response = cur.fetchall()
return str(response).replace("(","").replace(")","")
else:
print "Test from Hello"
response = 'Hello There there, Try asking me something from your greesheet For Example: cmpe273 section2 spring,2017, who is the instructor?'
return response
cur.close()
db.close()
|
[
"noreply@github.com"
] |
surgoku.noreply@github.com
|
5396d59485edcffb1060921d5fc348209d891fe0
|
b13a326c8aac68f72c71169187a4aa8d4fe1438f
|
/environment/envs/icra.py
|
eaa3aafeecc909022ff8d9a459423e63e37e2866
|
[] |
no_license
|
zy10zm/Pulsar
|
9f1d9abdf90d94e80c6dba2a02630bfe4b4e2115
|
714ee2d78577e59077af7c0f890e639879490eb8
|
refs/heads/master
| 2023-02-22T20:26:42.995175
| 2021-01-23T04:35:38
| 2021-01-23T04:35:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,882
|
py
|
import os, sys
import numpy as np
import logging
from copy import deepcopy
from mujoco_worldgen import Floor
from mujoco_worldgen import ObjFromXML
from mujoco_worldgen.util.sim_funcs import qpos_idxs_from_joint_prefix, qvel_idxs_from_joint_prefix
from environment.worldgen.battlefield import Battlefield
from environment.worldgen.builder import WorldBuilder
from environment.worldgen.core import WorldParams
from environment.worldgen.env import Env
from environment.module.agents import Agents
from environment.wrappers.util_w import DiscardMujocoExceptionEpisodes, DiscretizeActionWrapper, AddConstantObservationsWrapper, ConcatenateObsWrapper
from environment.wrappers.lidar import Lidar
from environment.wrappers.multi_agent import SplitMultiAgentActions, SplitObservations, SelectKeysWrapper
from environment.wrappers.line_of_sight import AgentAgentObsMask2D
from environment.wrappers.buff import BuffWrapper
from environment.wrappers.collision import CollisionWrapper
from environment.wrappers.health import HealthWrapper
from environment.wrappers.prep import PrepWrapper
from environment.wrappers.projectile import ProjectileWrapper
from environment.wrappers.outcome import OutcomeWrapper
from environment.wrappers.no_enter_zone import NoEnterZoneWrapper
from environment.objects.lidarsites import LidarSites
class IcraBase(Env):
'''
Icra base environment.
Args:
horizon (int): Number of steps agent gets to act
n_substeps (int): Number of mujoco simulation steps per outer environment time-step
n_agents (int): number of agents in the environment
mjco_ts (float): seconds for one mujoco simulation step
action_lims (float tuple): lower and upper limit of mujoco actions
deterministic_mode (bool): if True, seeds are incremented rather than randomly sampled.
meshdir (string): directory for meshes
texturedir (string): directory for textures
set_action (function): function for setting actions
env_no (int): number for environment file
'''
def __init__(self, horizon=250, n_substeps=3, n_agents=2, mjco_ts=0.002,
action_lims=(-200.0, 200.0), deterministic_mode=False,
meshdir="assets/stls", texturedir="assets/texture",
set_action=None,
env_no=1, **kwargs):
super().__init__(get_sim=self._get_sim,
get_obs=self._get_obs,
action_space=tuple(action_lims),
horizon=horizon,
set_action=set_action,
deterministic_mode=deterministic_mode)
self.env_no = env_no
self.mjco_ts = mjco_ts
self.n_agents = n_agents
self.metadata['n_actors'] = n_agents
self.horizon = horizon
self.n_substeps = n_substeps
self.kwargs = kwargs
self.modules = []
self.meshdir = meshdir
self.texturedir = texturedir
self.placement_size = (8080, 4480)
def add_module(self, module):
self.modules.append(module)
def _get_obs(self, sim):
'''
Loops through modules, calls their observation_step functions, and
adds the result to the observation dictionary.
'''
obs = {}
for module in self.modules:
obs.update(module.observation_step(self, self.sim))
return obs
def _get_sim(self, seed):
'''
Calls build_world_step and then modify_sim_step for each module. If
a build_world_step failed, then restarts.
'''
world_params = WorldParams(size=(self.placement_size[0], self.placement_size[1], 100),
num_substeps=self.n_substeps)
successful_placement = False
failures = 0
while not successful_placement:
if (failures + 1) % 10 == 0:
logging.warning(f"Failed {failures} times in creating environment")
builder = WorldBuilder(world_params, self.meshdir, self.texturedir, seed, env_no=self.env_no)
battlefield = Battlefield()
builder.append(battlefield)
self.placement_grid = np.zeros((self.placement_size[0], self.placement_size[1]))
successful_placement = np.all([module.build_world_step(self, battlefield, self.placement_size)
for module in self.modules])
failures += 1
sim = builder.get_sim()
for module in self.modules:
module.modify_sim_step(self, sim)
return sim
def get_ts(self):
return self.t
def get_horizon(self):
return self.horizon
def secs_to_steps(self, secs):
return int(secs / (self.mjco_ts * self.n_substeps))
def make_env(deterministic_mode=False, n_agents=4, env_no=1, add_bullets_visual=False):
'''
Response time = 0.02 seconds
Game time = 180 seconds
Decisions = 180 / 0.02 = 9000
Total steps = 9000
Seconds per simulated step = 0.002 seconds
Seconds for each run = 9000 * 0.002 = 18 seconds
'''
mjco_ts = 0.002
n_substeps = 1
horizon = 90000
# Setup action functions
motor_trans_max, motor_forw_max, motor_z_max = 2000.0, 3000.0, 47123.9
action_scale = (motor_trans_max, motor_forw_max, motor_z_max)
action_lims = (-1.0, 1.0)
def icra_ctrl_set_action(sim, action):
"""
For velocity actuators it copies the action into mujoco ctrl field.
"""
if sim.model.nmocap > 0:
_, action = np.split(action, (sim.model.nmocap * 7, ))
if sim.data.ctrl is not None:
for a_idx in range(n_agents):
for as_idx in range(3):
sim.data.ctrl[a_idx*3 + as_idx] = action[a_idx*3 + as_idx] * action_scale[as_idx]
# Create base environment for battlefield
env = IcraBase(n_agents=n_agents,
n_substeps=n_substeps,
horizon=horizon,
mjco_ts=mjco_ts,
action_lims=action_lims,
deterministic_mode=deterministic_mode,
env_no=env_no,
set_action=icra_ctrl_set_action,
meshdir=os.path.join(os.getcwd(), "environment", "assets", "stls"),
texturedir=os.path.join(os.getcwd(), "environment", "assets", "textures"))
# Add bullets just for visualization
nbullets = 25
env.add_module(Agents(n_agents, action_scale=action_scale, add_bullets_visual=add_bullets_visual, nbullets=nbullets))
env.reset()
# PrepWrapper must always be on-top
env = PrepWrapper(env)
env = BuffWrapper(env)
env = CollisionWrapper(env)
env = ProjectileWrapper(env, add_bullets_visual, nbullets)
env = NoEnterZoneWrapper(env)
# OutcomeWrapper must always be lowest, after HealthWrapper
env = HealthWrapper(env)
env = OutcomeWrapper(env)
keys_self = ['agent_qpos_qvel']
global_obs = ['F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'Agent:buff', 'colli_dmg',
'proj_dmg', 'nprojectiles', 'agents_health', 'agent_teams',
'agent_local_qvel']
keys_external = deepcopy(global_obs)
keys_copy = deepcopy(global_obs)
keys_mask_self = []
keys_mask_external = []
env = SplitMultiAgentActions(env)
#env = DiscretizeActionWrapper(env, 'action_movement')
env = SplitObservations(env, keys_self + keys_mask_self, keys_copy=keys_copy)
env = DiscardMujocoExceptionEpisodes(env)
env = SelectKeysWrapper(env, keys_self=keys_self,
keys_external=keys_external,
keys_mask=keys_mask_self + keys_mask_external,
flatten=False)
return env
|
[
"impeccableaslan@gmail.com"
] |
impeccableaslan@gmail.com
|
80e5a620a51115228c9c48f190d3fcad02c8679d
|
ed0fbabfdb575969de233d184fa8e4b9c913ef39
|
/Form_Project/Form_Project/settings.py
|
16fdd43a8a17ca422179596e87af7816cc7c40ad
|
[] |
no_license
|
sourabhjoshi220/Django_Codes
|
6ef762d168437d8122763a28c8c67006e01786bf
|
cc2b2b3960c754a25c15723495d75120def723e5
|
refs/heads/master
| 2020-07-03T09:50:47.913169
| 2019-08-30T14:00:19
| 2019-08-30T14:00:19
| 201,870,892
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,387
|
py
|
"""
Django settings for Form_Project project.
Generated by 'django-admin startproject' using Django 2.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR=os.path.join(BASE_DIR,'templates')
STATIC_DIR=os.path.join(BASE_DIR,'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hd9q@ab$1c&910z(h4041&s5=6cpe%hrev$tf3h^(f%cgtw9qi'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'WebApp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Form_Project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Form_Project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[
STATIC_DIR,
]
|
[
"noreply@github.com"
] |
sourabhjoshi220.noreply@github.com
|
eecdc90449ea3bbc47e90548ca8004f0872498f7
|
ac03d9f3a8c2e6209940ae30900e9b2e32084dce
|
/main.py
|
9ef8df5eafeff0357882459573d9ee1b460c71e4
|
[
"Apache-2.0"
] |
permissive
|
cls1991/github-projects-remover
|
29f28e0a23b596a7e07b0c07b65092626b42de05
|
d924100fedccbb0fd6e20365d4f4df98bf04b292
|
refs/heads/master
| 2022-12-11T12:31:59.498180
| 2019-10-23T14:22:14
| 2019-10-23T14:22:14
| 84,054,255
| 0
| 0
|
Apache-2.0
| 2019-10-23T14:22:16
| 2017-03-06T09:25:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,042
|
py
|
# coding: utf8
import os
# 切换工作目录到项目根目录
project = os.path.split(os.path.realpath(__file__))[0]
os.chdir(project)
from core.github import GithubSample
if __name__ == '__main__':
gs = GithubSample('8709c9b9d01ec8e7388378c3992eff61aa7df813')
# pretty_print(gs.query_api_info())
# pretty_print(gs.query_user_info('cls1991'))
# pretty_print(gs.query_user_repos('cls1991'))
# print(gs.star_repo('torvalds', 'linux'))
"""
star all forked repos, then remove all, for personal use!
"""
user_repos = gs.query_user_repos('cls1991', page=1, per_page=50)
# pretty_print(user_repos)
for repo in user_repos:
if repo['fork']:
repo_info = gs.query_repo_info('cls1991', repo['name'])
if 'source' not in repo_info:
continue
status_code = gs.star_repo(repo_info['source']['owner']['login'], repo['name'])
print(status_code)
if status_code == 204:
gs.remove_repo('cls1991', repo['name'])
|
[
"tzk19910406@hotmail.com"
] |
tzk19910406@hotmail.com
|
5a940509727ba2a3009b1c0b9d1f9080d6eb4205
|
ea9e01715df603ff767395a9d51182d4103b1a2f
|
/pdf_generator.py
|
ec7df6d443f7853e203611fad1d21a03c6791e16
|
[] |
no_license
|
kmutreja/Creator
|
1bc5dd9eea8d265862fc2c913cd39a8a612cdf9c
|
44b4c95162dad9296a24965600122841488dd927
|
refs/heads/main
| 2023-05-07T02:33:06.721774
| 2021-06-04T14:23:46
| 2021-06-04T14:23:46
| 373,863,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,179
|
py
|
import io
from reportlab.lib.pagesizes import A4
from reportlab.pdfgen import canvas
from reportlab.platypus import Table, TableStyle
from reportlab.lib.enums import TA_CENTER
from reportlab.lib import colors
from PyPDF2 import PdfFileReader, PdfFileWriter
from reportlab.lib.units import inch
import random
import string
'''
data_dict = {
"text":["x_co","y_co","width","height","font_size","font_weight","rotation","color"]
}
'''
def editPDF(data_dict):
packet = io.BytesIO()
can = canvas.Canvas(packet, pagesize=A4)
for key, value in data_dict.items():
can.saveState()
can.rotate( value[-2] )
table= Table([[key]], colWidths = value[2]*inch)
c = colors.black
if(value[-1]=="white"):
c = colors.white
table.setStyle(
TableStyle(
[
("ALIGN", (0,0,), (-1,-1), "CENTER"),
("FONTSIZE", (0,0), (-1,-1), value[-4]),
("TEXTCOLOR", (0,0,), (-1,-1), c),
("TEXTFONT", (0,0), (-1,-1), value[-3]),
]
)
)
table.wrapOn(can,value[2]*inch,value[3]*inch)
table.drawOn(can,value[0],value[1])
can.restoreState()
can.save()
packet.seek(0)
content_pdf = PdfFileReader(packet)
output_pdf = PdfFileWriter()
reader = PdfFileReader("./static/pdf/stratahedron.pdf","rb")
page = reader.getPage(0)
page.mergePage(content_pdf.getPage(0))
output_pdf.addPage(page)
letters = string.digits
file_name_random = ''.join(random.choice(letters) for i in range(5))
file_name = f"./static/pdf/digital_stratahedron_{file_name_random}.pdf"
outputStream = open(file_name, "wb")
output_pdf.write(outputStream)
outputStream.close()
return file_name
# editPDF(
# {
# "Audience":[360,376,2,10,20,"Times-Bold",0,'black'],
# "Be Understood":[326,315,3,10,13,"Times-Regular",0,'black'],
# "Be Seen":[440,-120,3,10,13,"Times-Regular",60,'black'],
# "Be Heard":[-220,633,3,10,13,"Times-Regular",300,'black'],
# "Method":[-507,-220,2,10,20,"Times-Bold",180,'black'],
# "Digital Keystone":[-540,-303,3,10,13,"Times-Regular",180,'black'],
# "Digital Assets":[-130,-430,3,10,13,"Times-Regular",120,'black'],
# "Digital Ecosystem":[-520,324,3,10,13,"Times-Regular",240,'black'],
# "#Topic1":[80,-442,1.2,5,10,"Times-Bold",90,'white'],
# "#Topic3":[-193,-423,1.1,5,10,"Times-Bold",150,'white'],
# "#Topic2":[-636,10,1.1,5,10,"Times-Bold",210,'white'],
# "Story":[503,153,2,10,20,"Times-Bold",0,'black'],
# "Resourceful":[455,78,3,10,13,"Times-Regular",0,'black'],
# "Adaptable":[302,-360,3,10,13,"Times-Regular",60,'black'],
# "Connected":[50,633,3,10,13,"Times-Regular",300,'black'],
# "Process":[220,153,2,10,20,"Times-Bold",0,'black'],
# "Digital Delivery":[195,78,3,10,13,"Times-Regular",0,'black'],
# "Value Creation":[178,-120,3,10,13,"Times-Regular",60,'black'],
# "Digital Experience":[-94,393,3,10,13,"Times-Regular",300,'black'],
# }
# )
|
[
"nanotech.softapp20@gmail.com"
] |
nanotech.softapp20@gmail.com
|
38cce542df0415d2d792a37b8355ec7ce0f789d3
|
9e2d467de2d665f41dc94799f0acb98479571922
|
/_error.py
|
cf399a395d6e832d683a0de18251cbd067d4a2f6
|
[] |
no_license
|
pytsite/plugin-geo_ip
|
c63ecd12c95004c05fdae76b20a9343b52fb923f
|
db71e67651eb57b6ca76136d0014eaadf2cb6ffb
|
refs/heads/master
| 2021-10-23T09:29:32.580289
| 2019-03-16T22:04:28
| 2019-03-16T22:04:28
| 112,030,619
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
"""PytSite GeoIP Errors.
"""
__author__ = 'Oleksandr Shepetko'
__email__ = 'a@shepetko.com'
__license__ = 'MIT'
class ResolveError(Exception):
pass
|
[
"a@shepetko.com"
] |
a@shepetko.com
|
f7b22c64ab658985f221cf7076cee8fc91505b98
|
a360a22af5e0b385db438b1324564ef317ff2f38
|
/idex_module/views.py
|
a846edfb5592c73af23acdf636aeb55d68b6c4af
|
[] |
no_license
|
ogglin/exchange_comparison
|
3eb2d849e731f94e67509e4ce9130e33bb37bbaf
|
f3feae64aff26b574f7ecd24e6f7aff7bb95ec65
|
refs/heads/master
| 2023-04-26T07:45:06.229584
| 2021-05-31T18:52:29
| 2021-05-31T18:52:29
| 287,036,194
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 532
|
py
|
from rest_framework.response import Response
from rest_framework.views import APIView
from .functions import idex_profits
# Create your views here.
class idex(APIView):
def get(self, request):
# hotbit_result = hotbit_profits()
idex_result = idex_profits()
results = []
# idex_init()
# exchanges_init()
# for result in hotbit_result:
# results.append(result)
for result in idex_result:
results.append(result)
return Response(results)
|
[
"server.ares@gmail.com"
] |
server.ares@gmail.com
|
33f88b3804973bb17c410c2bdf24456d89324c34
|
9bcfbdf23c9ac156e0cdf5b5b5e06f18a1ad6fae
|
/pre_code/stock/xueqiuPawer.py
|
1a9ff34bb4d43402fbd96230452d2828ff831e48
|
[] |
no_license
|
haoson888/vnpy_future
|
a7576513b7ecf50c36d730c647263c6d1e44f3a6
|
89df2d5079a2e6d3782531369675248e38b2ff00
|
refs/heads/origin
| 2020-09-04T15:04:55.368725
| 2017-12-10T10:37:47
| 2017-12-10T10:37:47
| 219,762,816
| 0
| 1
| null | 2019-11-05T15:12:32
| 2019-11-05T14:18:18
| null |
UTF-8
|
Python
| false
| false
| 6,873
|
py
|
#coding: utf-8
from selenium import webdriver
import re
from urllib import request as urllib2
import sys
import os
from datetime import datetime
from datetime import timedelta
from datetime import date
import xlrd
from xlrd import open_workbook
from xlutils.copy import copy
#import nltk
import time
description_id = 1
#windows
#browser = webdriver.Chrome(executable_path='F:\chromedriver_win32\chromedriver.exe')
#mac
browser = webdriver.Chrome(executable_path='/Users/daitechang/Documents/stock/chromedriver')
def start(url, d, today, vstock):
# try:
global description_id
global browser
url = url
try:
browser.get(url)
t = browser.page_source
pn = re.compile(r'(.*)"statuses":(.*?)}]', re.S)
match = pn.match(t)
if not match:
# browser.close()
# browser.quit()
return 0
result = match.group(2)
result = result + '}]'
decode = json.loads(result)
startDetect = time.time()
st = int(time.mktime(datetime.strptime(datetime.strftime(today, "%Y-%m-%d"), "%Y-%m-%d").timetuple()))
ed = int(time.mktime(datetime.strptime(datetime.strftime(today + timedelta(days = 1), "%Y-%m-%d"), "%Y-%m-%d").timetuple()))
st = str(st) + '000'
print(st)
ed = str(ed) + '000'
print(ed)
s_today = datetime.strftime(today, "%Y-%m-%d")
for i in range(len(vstock)):
for item in decode:
if item['mark'] == 1:
continue
#print item['created_at'], st, ed
#print item['description'].encode('utf-8'), vstock[i]._name
if str(item['created_at']) > st and str(item['created_at']) < ed:
if item['text'].encode('utf-8').find(vstock[i]._name) != -1:
ff = open('corpus/' + s_today + '_' + str(description_id) + '.txt', 'w')
ff.write(item['text'].encode('utf-8'))
ff.close()
description_id += 1
#print vstock[i]._name, item['description'].encode('utf-8')
if d.has_key(i):
d[i] = d[i] + 1
else:
d[i] = 1
elif str(item['created_at']) < st and i == len(vstock) -1:
#print 1
# browser.close()
# browser.quit()
#if i == len(vstock) -1:
return 0
#print array[0], array[1]
# print decode[0]['description'].encode('utf-8')
# browser.close()
# browser.quit()
return 1
except Exception as e:
print(e)
# browser.close()
# browser.quit()
return 0
import json
#获取热门用户列表
def get_id():
f = open('id.txt', 'w')
for i in range(25):
url = 'http://xueqiu.com/recommend/user/industry.json?detail=1&index=' + str(i)
#browser.get(url)
#t = browser.page_source
print(url)
# print t.encode('utf-8')
cookie = '''s=10ht15dh2y; xq_a_token=5e47e2777e3b08d99725fe0f9f78815eb1cb8374; xqat=5e47e2777e3b08d99725fe0f9f78815eb1cb8374; xq_r_token=c38fedb2680c6b923eb4c87f16ebf19f574c3eca; xq_is_login=1; u=6585534947; xq_token_expire=Sun%20Nov%2015%202015%2009%3A14%3A02%20GMT%2B0800%20(CST); bid=73fe343eeb79fd513ae47464f938acf9_ig040t46; snbim_minify=true; __utmt=1; __utma=1.2082135748.1445390046.1445497172.1445504051.8; __utmb=1.14.10.1445504051; __utmc=1; __utmz=1.1445390046.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); Hm_lvt_1db88642e346389874251b5a1eded6e3=1445390044; Hm_lpvt_1db88642e346389874251b5a1eded6e3=1445506132'''
headers = {"User-Agent":"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6","Cookie":cookie }
req = urllib2.Request( url, headers = headers)
try:
content = urllib2.urlopen(req).read()
except Exception as e:
print(e)
#return
js = json.loads(content)
#print js
industries = js['industries']
#print industries
if industries:
for industry in industries:
for user in industry['users']:
print(user['id'], user['screen_name'].encode('utf-8'))
f.write((str(user['id'])) + ' ' + (user['screen_name']).encode('utf-8') + '\n')
#f.write(p[0].get('value').encode('utf-8') + ' ' + p[1].get('value').encode('utf-8') + '\n')
class stock:
_id = ''
_name = ''
_industry = ''
def __init__(self, id, name, industry):
self._id = id
self._name = name
self._industry = industry
def pawner(day, t2):
today = date.today()
delta = -1
os.mkdir('corpus')
while 1:
f = open('id.txt', 'r')
delta += 1
if delta >= t2:
break
yesterday1 = today - timedelta(days = day - delta)
yesterday = datetime.strftime(yesterday1, "%Y-%m-%d")
score_file = 'score' + yesterday + '.txt'
industry_file = 'industry' + yesterday + '.txt'
#ff = open('score' + yesterday + '.txt', 'r')
d = {}
print score_file
vstock = []
#ff = open('stock.txt', 'r')
wb = xlrd.open_workbook('stock.xls')
sh = wb.sheet_by_name('stock')
for rownum in range(sh.nrows):
if rownum < 2:
continue
s = stock(str(sh.cell(rownum, 0).value), sh.cell(rownum, 1).value.encode('utf-8'), sh.cell(rownum, 2).value.encode('utf-8'))
vstock.append(s)
print(len(vstock))
print(repr(vstock[0]._name))
while 1:
try:
line = f.readline()
# user = str(i)
if not line:
break
array = line[:-1].split(' ')
user = array[0]
print(array[0], array[1])
#user = "1676206424"
page = 1
while 1:
url = "http://xueqiu.com/" + user + "?page=" + str(page)
ret = start(url, d, yesterday1, vstock)
if ret == 0:
#print i
break
page = page + 1
time.sleep(2)
except Exception as e:
print(e)
continue
#break
#i = i + 1
#if i >=9999999999:
# break
f.close()
ff = open(score_file, 'w')
industry_p = open(industry_file, 'w')
rb = open_workbook('stock.xls')
rs = rb.sheet_by_name('stock')
wb = copy(rb)
ws = wb.get_sheet(0)
ncol = rs.ncols
ws.write(1, ncol, yesterday)
industry_d = {}
t = sorted(d.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)
for key in t:
print(str(vstock[key[0]]._name) + '%' + str(vstock[key[0]]._industry) + '%'+ str(key[1]) + '\n')
ff.write(str(vstock[key[0]]._name) + '%' + str(vstock[key[0]]._industry) + '%'+ str(key[1]) + '\n')
if industry_d.has_key(vstock[key[0]]._industry):
industry_d[vstock[key[0]]._industry] += 1
else:
industry_d[vstock[key[0]]._industry] = 1
ws.write(key[0] + 2, ncol, key[1])
t = sorted(industry_d.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)
for key in t:
print(str(key[0]) + '%' + str(key[1]) + '\n')
industry_p.write(str(key[0]) + '%' + str(key[1]) + '\n')
print(industry_d)
wb.save('stock.xls')
browser.close()
browser.quit()
# timer = threading.Timer(7200, pawner)
# timer.start()
if __name__ == "__main__":
#nltk.download()
#negids = movie_reviews.fileids('neg')
#posids = movie_reviews.fileids('pos')
#print 1
## timer = threading.Timer(7200, pawner)
# timer.start()
t = int(sys.argv[1])
t2 = int(sys.argv[2])
#get_id()
pawner(t, t2)
|
[
"511735184@qq.com"
] |
511735184@qq.com
|
7fa407813dc0e9f4324ea6fa68186ad55071a769
|
fda201d7cca34e216a17d97665c8457c72e66cb2
|
/register/tests/test_center_csv.py
|
5106fe3734607be0a86be712d190cf444751a602
|
[
"Apache-2.0"
] |
permissive
|
SmartElect/SmartElect
|
94ab192beb32320e9ae8ae222f90ee531037c1c6
|
d6d35f2fa8f60e756ad5247f8f0a5f05830e92f8
|
refs/heads/develop
| 2020-12-26T04:04:42.753741
| 2019-07-17T17:08:25
| 2019-07-17T17:08:25
| 44,687,036
| 24
| 12
|
Apache-2.0
| 2020-06-06T07:16:48
| 2015-10-21T15:47:07
|
Python
|
UTF-8
|
Python
| false
| false
| 28,774
|
py
|
import os
import shutil
import tempfile
from django.test import TestCase
from django.urls import reverse
from ..models import RegistrationCenter, Office, Constituency, SubConstituency
from .. import utils
from .factories import OfficeFactory, ConstituencyFactory, SubConstituencyFactory, \
RegistrationCenterFactory
from libya_elections.constants import NO_NAMEDTHING
from staff.tests.base import StaffUserMixin
def get_copy_center_base_csv():
"""Return the base CSV for copy centers as a lists of lists (rows & columns)"""
current_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(current_dir, 'uploads', 'copy_center_base.csv')
with open(file_path, 'rb') as f:
lines = f.read().decode('utf-8').split('\n')
return [line.split(',') for line in lines if line]
class CSVColumnConstants(object):
"""Constants mapping CSV columns to ints"""
CENTER_ID = 0
NAME = 1
COPY_OF_ID = 2
CENTER_TYPE = 12
class CenterFileTestMixin(object):
def setUp(self):
super(CenterFileTestMixin, self).setUp()
self.url = reverse('upload-centers-csv')
RegistrationCenterFactory(name="Deleted center", deleted=True)
def tearDown(self):
if hasattr(self, 'file'):
self.file.close()
def get_csv_file(self, filename):
# generates a simple csv we can use for tests
current_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(current_dir, 'uploads', filename)
self.file = open(file_path, 'rb')
return self.file
@staticmethod
def get_messages(response):
messages = response.context['messages']
return [str(msg) for msg in messages]
def upload_csv(self, filename='valid_ecc.csv', follow=True):
csv_file = self.get_csv_file(filename)
response = self.client.post(self.url, data={'csv': csv_file}, follow=follow)
return response
class CenterFileUpload(CenterFileTestMixin, StaffUserMixin, TestCase):
# tests for the ecc file upload functionality
permissions = ['add_registrationcenter']
model = RegistrationCenter
@classmethod
def setUpClass(klass): # Files only
# Create a temp dir for CSV files created on the fly.
klass.temp_dir = tempfile.mkdtemp()
@classmethod
def tearDownClass(klass): # Files only
# Clean up temp CSV files.
shutil.rmtree(klass.temp_dir)
def setUp(self):
super(CenterFileUpload, self).setUp()
# Create some things
for id in [1, NO_NAMEDTHING]:
# create one test instance and one special 'no-named-thing' instance (999)
if not Office.objects.filter(id=id).exists():
OfficeFactory(id=id)
if not Constituency.objects.filter(id=id).exists():
ConstituencyFactory(id=id)
if not SubConstituency.objects.filter(id=id).exists():
SubConstituencyFactory(id=id)
def write_csv(self, rows):
"""Given a list of lists, write them as a CSV to a temp file and return the filename.
The list of lists should be rows and columns as returned by get_copy_center_base_csv().
"""
fh, filename = tempfile.mkstemp(suffix='.csv', dir=self.temp_dir)
os.close(fh)
with open(filename, 'wb') as f:
f.write('\n'.join([','.join(row) for row in rows]).encode('utf-8'))
return filename
def test_upload_page_works(self):
# requesting the upload page works and the right template it's used
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'register/upload_centers_csv.html')
def test_empty_upload(self):
# form does not validate if an empty form it's submitted.
# same template as the one we landed on it's used and the form
# has an error.
response = self.client.post(self.url, data={})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'register/upload_centers_csv.html')
self.assertFormError(response, 'form', 'csv', 'This field is required.')
def test_success_upload_page(self):
# after successfully uploading a file we are presented with a
# success template.
response = self.upload_csv()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'register/upload_centers_csv.html')
def test_upload_new_centers(self):
# Uploading a csv file with new center information creates new entries
# in the database.
response = self.upload_csv()
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(centers.count(), 3)
messages = self.get_messages(response)
self.assertIn(
utils.STATUS_MESSAGE.format(created=3, updated=0, dupes=0, blank=0),
messages
)
def test_upload_dupes(self):
# Upload does not create or update records if they did not change.
response = self.upload_csv()
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(centers.count(), 3)
response = self.upload_csv()
messages = self.get_messages(response)
self.assertIn(
utils.STATUS_MESSAGE.format(created=0, updated=0, dupes=3, blank=0),
messages
)
def test_upload_after_delete(self):
# Upload, mark records deleted, upload again
response = self.upload_csv()
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(centers.count(), 3)
RegistrationCenter.objects.all().update(deleted=True)
response = self.upload_csv()
centers = RegistrationCenter.objects.all()
self.assertEqual(centers.count(), 3)
messages = self.get_messages(response)
self.assertIn(
utils.STATUS_MESSAGE.format(created=3, updated=0, dupes=0, blank=0),
messages
)
def test_upload_update(self):
# CSV updates a record if its attributes differ from those in the db.
RegistrationCenter.objects.create(center_id=11001, name="Center 3")
RegistrationCenter.objects.create(center_id=11001, name="Center 3", deleted=True)
response = self.upload_csv()
self.assertEqual(response.status_code, 200)
reg_center = RegistrationCenter.objects.get(center_id=11001)
self.assertNotEqual(reg_center.name, "Center 3")
messages = self.get_messages(response)
self.assertIn(
utils.STATUS_MESSAGE.format(created=2, updated=1, dupes=0, blank=0),
messages
)
def test_non_csv(self):
# Non a CSV file should be generate a specific error.
response = self.upload_csv(filename='icon_clock.gif')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(utils.COULD_NOT_PARSE_ERROR, messages)
def test_bad_formatted_csv(self):
# CSV files that contain rows with the wrong number of columns are not accepted.
# Even compliant rows are not imported.
response = self.upload_csv(filename='too_many_columns.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# file contained one valid center but it should not have been imported
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.PARSING_ERROR.format(line_number=2, columns=", ".join(utils.CSV_FIELDS)),
messages[0]
)
def test_too_many_headers(self):
# If the number of headers exceeds the number of columns expected,
# fail gracefully and inform the user that their file has the wrong format
response = self.upload_csv(filename='too_many_headers.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Too many headers ==> entire file is rejected
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.PARSING_ERROR.format(line_number=1, columns=", ".join(utils.CSV_FIELDS)),
messages[0]
)
def test_too_few_headers(self):
# If the number of headers less than the number of columns expected,
# fail gracefully and inform the user that their file has the wrong format
response = self.upload_csv(filename='too_few_headers.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Too few headers ==> entire file is rejected
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.PARSING_ERROR.format(line_number=1, columns=", ".join(utils.CSV_FIELDS)),
messages[0]
)
def test_wrong_file_headers(self):
# Uploading a csv file with columns in the wrong order should fail
response = self.upload_csv(filename='wrong_headers.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# no centers were created because we encountered an error on line 1.
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.PARSING_ERROR.format(line_number=1, columns=", ".join(utils.CSV_FIELDS)),
messages
)
def test_blank_csv(self):
# Uploading a blank csv file should not create any centers
response = self.upload_csv(filename='blank.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# No records were created
self.assertEqual(centers.count(), 0)
def test_blank_inbetween_csv(self):
# Blank lines are valid in between two rows
response = self.upload_csv(filename='blank_inbetween.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(centers.count(), 5)
messages = self.get_messages(response)
self.assertIn(
utils.STATUS_MESSAGE.format(created=5, updated=0, dupes=0, blank=3),
messages
)
def test_noninteger_center_id_csv(self):
# center id should be able to be cast into an integer otherwise a
# parsing error will occur and a message indicating the line number
# where the error occurred will be presented to the user.
response = self.upload_csv(filename='noninteger_center_id.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(field_name="center_id", value="110A1", line_number=2,
error='Enter a whole number.'),
messages[0]
)
def test_wrong_length_center_id_csv(self):
response = self.upload_csv(filename='wrong_length_center_id.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(field_name="center_id", value="110001", line_number=2,
error='Ensure this value is less than or equal to'),
messages[0]
)
def test_bad_office_id_csv(self):
# office id should be able to be cast into an integer otherwise a
# parsing error will occur and a message indicating the line number
# where the error occurred will be presented to the user.
response = self.upload_csv(filename='bad_office_id.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(field_name="office_id", value="", line_number=2,
error='This field is required.'),
messages[0]
)
def test_centers_not_associated_with_office_con_subcon_csv(self):
# Some Centers are not associated with offices, cons or subcons. For this purpose,
# each of these NamedThing models has a special instance with an ID of NO_NAMEDTHING
# (999) to represent the 'Absence of an associated NamedThing'.
# https://github.com/hnec-vr/libya-elections/issues/949
response = self.upload_csv(filename='no_associated_namedthings.csv')
self.assertEqual(response.status_code, 200)
# 1 center was created
ecc = RegistrationCenter.objects.get()
self.assertEqual(NO_NAMEDTHING, ecc.office.id)
self.assertEqual(NO_NAMEDTHING, ecc.constituency.id)
self.assertEqual(NO_NAMEDTHING, ecc.subconstituency.id)
def test_bad_constituency_id_csv(self):
# constituency id should be able to be cast into an integer otherwise a
# parsing error will occur and a message indicating the line number
# where the error occurred will be presented to the user.
response = self.upload_csv(filename='bad_constituency_id.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(field_name="constituency_id", value="x", line_number=2,
error='Enter a whole number.'),
messages[0]
)
def test_bad_subconstituency_id_csv(self):
# subconstituency id should be able to be cast into an integer otherwise a
# parsing error will occur and a message indicating the line number
# where the error occurred will be presented to the user.
response = self.upload_csv(filename='bad_subconstituency_id.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(field_name="subconstituency_id", value="x", line_number=2,
error='Enter a whole number.'),
messages[0]
)
def test_just_one_latlong(self):
# Providing just one of lat, long is an error
response = self.upload_csv(filename='just_one_latlong.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_ERROR.format(line_number=2,
error='Either set both latitude and longitude or neither'),
messages[0]
)
def test_invalid_lat(self):
# Invalid latitude
response = self.upload_csv(filename='invalid_lat.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(
line_number=2, field_name='center_lat', value="1234",
error='Ensure that there are no more than 3 digits before the decimal'),
messages[0]
)
def test_nonexistent_office(self):
response = self.upload_csv(filename='nonexistent_office.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(
line_number=2, field_name='office_id', value='22',
error='Office does not exist.'),
messages[0]
)
def test_nonexistent_constituency(self):
response = self.upload_csv(filename='nonexistent_constituency.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(
line_number=2, field_name='constituency_id', value='22',
error='Constituency does not exist.'),
messages[0]
)
def test_nonexistent_subconstituency(self):
response = self.upload_csv(filename='nonexistent_subconstituency.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(
line_number=2, field_name='subconstituency_id', value='22',
error='Subconstituency does not exist.'),
messages[0]
)
def test_blank_center_name(self):
response = self.upload_csv(filename='blank_center_name.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(
line_number=2, field_name='name', value='',
error='This field is required.'),
messages[0]
)
def test_newline_in_center_name(self):
response = self.upload_csv(filename='newline_center_name.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(
line_number=2, field_name='name', value='new\nline',
error='Newlines are not allowed.'),
messages[0]
)
def test_reg_open_field_set_to_true(self):
# The 'reg_open' field is not included in the CSV file.
# We should ensure that it is set to True (the model default)
response = self.upload_csv()
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(centers.count(), 3)
for ecc in centers:
self.assertEqual(ecc.reg_open, True)
def test_simple_copy_center_ok(self):
# test that simple copy center creation works
RegistrationCenterFactory(center_id=70001)
csv = get_copy_center_base_csv()
csv[1][CSVColumnConstants.COPY_OF_ID] = '70001'
csv[1][CSVColumnConstants.CENTER_TYPE] = \
RegistrationCenter.Types.NAMES['ar'][RegistrationCenter.Types.COPY]
response = self.upload_csv(filename=self.write_csv(csv))
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(len(centers), 2)
self.assertEqual(centers[0].copy_of, centers[1])
self.assertEqual(list(centers[1].copied_by.all()), [centers[0]])
def test_copy_center_same_file_reference_ok(self):
# test that a copy center can reference an original created in the same file
csv = get_copy_center_base_csv()
# Duplicate the data row and make row the 2nd data row refer to the first.
csv.append(csv[1][::])
csv[2][CSVColumnConstants.CENTER_ID] = '70002'
csv[2][CSVColumnConstants.COPY_OF_ID] = '70000'
csv[2][CSVColumnConstants.CENTER_TYPE] = \
RegistrationCenter.Types.NAMES['ar'][RegistrationCenter.Types.COPY]
response = self.upload_csv(filename=self.write_csv(csv))
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(len(centers), 2)
self.assertEqual(centers[1].copy_of, centers[0])
self.assertEqual(list(centers[0].copied_by.all()), [centers[1]])
def test_copy_center_failed_reference(self):
# test that one can't create a copy center that refers to a non-existent center.
csv = get_copy_center_base_csv()
csv[1][CSVColumnConstants.COPY_OF_ID] = '70001'
response = self.upload_csv(filename=self.write_csv(csv))
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(
line_number=2, field_name='copy_of_id', value='70001',
error='Copy centre does not exist.'),
messages[0]
)
def test_copy_center_read_only(self):
# test that copy centers are read only
original_center = RegistrationCenterFactory(center_id=70000)
copy_center = RegistrationCenterFactory(center_id=70001)
copy_center.copy_of = original_center
copy_center.save()
csv = get_copy_center_base_csv()
csv[1][CSVColumnConstants.CENTER_ID] = '70001'
csv[1][CSVColumnConstants.NAME] = 'different_name_to_trigger_an_attempt_to_edit'
csv[1][CSVColumnConstants.COPY_OF_ID] = '70000'
response = self.upload_csv(filename=self.write_csv(csv))
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertListEqual([center.center_id for center in centers], [70000, 70001])
messages = self.get_messages(response)
self.assertIn(
utils.FORM_ERROR.format(
line_number=2, error='Copy centres are read-only.'),
messages[0]
)
def test_existing_center_cant_become_copy_center(self):
# test that an existing center can't be turned into a copy center.
RegistrationCenterFactory(center_id=70000)
RegistrationCenterFactory(center_id=70001)
csv = get_copy_center_base_csv()
csv[1][CSVColumnConstants.COPY_OF_ID] = '70001'
csv[1][CSVColumnConstants.CENTER_TYPE] = \
RegistrationCenter.Types.NAMES['en'][RegistrationCenter.Types.COPY]
response = self.upload_csv(filename=self.write_csv(csv))
self.assertEqual(response.status_code, 200)
# No new centers should have been created
centers = RegistrationCenter.objects.all()
self.assertListEqual([center.center_id for center in centers], [70000, 70001])
messages = self.get_messages(response)
self.assertIn(
utils.FORM_ERROR.format(
line_number=2, error='A centre may not be changed to a copy centre.'),
messages[0]
)
def test_existing_center_must_remain_copy_center(self):
# test that an existing copy center can't become a non-copy center.
original_center = RegistrationCenterFactory(center_id=70000)
copy_center = RegistrationCenterFactory(center_id=70001)
copy_center.copy_of = original_center
copy_center.save()
csv = get_copy_center_base_csv()
csv[1][CSVColumnConstants.CENTER_ID] = '70001'
csv[1][CSVColumnConstants.COPY_OF_ID] = ''
csv[1][CSVColumnConstants.CENTER_TYPE] = \
RegistrationCenter.Types.NAMES['en'][RegistrationCenter.Types.GENERAL]
response = self.upload_csv(filename=self.write_csv(csv))
self.assertEqual(response.status_code, 200)
# No new centers should have been created
centers = RegistrationCenter.objects.all()
self.assertListEqual([center.center_id for center in centers], [70000, 70001])
messages = self.get_messages(response)
self.assertIn(
utils.FORM_ERROR.format(
line_number=2, error='Copy centres are read-only.'),
messages[0]
)
def test_center_type_valid(self):
# In the CSV file, 'center_type' is an arabic string field. We should
# parse it and convert to a corresponding integer from RegistrationCenter.Types.CHOICES.
response = self.upload_csv(filename='valid_center_types.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(centers.count(), 13)
# The first 6 centers in the test CSV have Arabic names. (At present we don't have have
# an Arabic translation for "Split" so there's no point in testing it.)
for i, center in enumerate(centers[:6]):
self.assertEqual(center.center_type, RegistrationCenter.Types.CHOICES[i][0])
# The last 7 centers in the test CSV have English names.
for i, center in enumerate(centers[6:]):
self.assertEqual(center.center_type, RegistrationCenter.Types.CHOICES[i][0])
def test_center_type_invalid(self):
# If we don't recognize the value in the 'center_type' field, then return an error.
response = self.upload_csv(filename='invalid_center_types.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(
line_number=2, field_name='center_type', value='invalid_center_type',
error='That is not a valid center_type'),
messages[0]
)
def test_center_type_copy_required_for_copy_centers(self):
# Copy centers must have the copy center type
RegistrationCenterFactory(center_id=70000)
csv = get_copy_center_base_csv()
csv[1][CSVColumnConstants.CENTER_ID] = '70001'
csv[1][CSVColumnConstants.COPY_OF_ID] = '70000'
csv[1][CSVColumnConstants.CENTER_TYPE] = \
RegistrationCenter.Types.NAMES['ar'][RegistrationCenter.Types.OIL]
response = self.upload_csv(filename=self.write_csv(csv))
self.assertEqual(response.status_code, 200)
# No new centers should have been created
centers = RegistrationCenter.objects.all()
self.assertListEqual([center.center_id for center in centers], [70000])
messages = self.get_messages(response)
self.assertIn(
utils.FORM_ERROR.format(
line_number=2, error='Copy centre type must be "copy".'),
messages[0]
)
def test_center_type_copy_rejected_for_noncopy_centers(self):
# Non-copy centers may not have the copy center type
csv = get_copy_center_base_csv()
csv[1][CSVColumnConstants.CENTER_TYPE] = \
RegistrationCenter.Types.NAMES['ar'][RegistrationCenter.Types.COPY]
response = self.upload_csv(filename=self.write_csv(csv))
self.assertEqual(response.status_code, 200)
# No new centers should have been created
centers = RegistrationCenter.objects.all()
self.assertEqual(len(centers), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_ERROR.format(
line_number=2, error='Centre type "copy" requires copy centre information.'),
messages[0]
)
|
[
"vinod@kurup.com"
] |
vinod@kurup.com
|
71997201796ce62175d0745d6fef7532a5f65d44
|
111608c4cdf1665583770bc8c47e405d010b45a6
|
/TP2/Conv_vis.py
|
eb1ea0a8a0578c9a21fd50834369e2d39dc86f0a
|
[] |
no_license
|
wangzeyao/ComputerVision
|
56184bb792382cab90d68bb59b4ef410d8750f27
|
96daec6cb5e086e312ca956a857715d89f6a6adc
|
refs/heads/master
| 2020-04-27T15:09:57.906474
| 2019-04-30T19:31:07
| 2019-04-30T19:31:07
| 173,483,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
from keras.models import Model
from keras.models import load_model
import matplotlib.pyplot as plt
# this script is for plot the out put of the convolution layer
def conv_output(model, layer_name, img):
input_img = model.input
try:
out_conv = model.get_layer(layer_name).output
except:
raise Exception('Not layer named {}!'.format(layer_name))
inter_layer_model = Model(inputs=input_img, outputs=out_conv)
img = img.reshape((1, 16, 16, 1))
inter_output = inter_layer_model.predict(img)
return inter_output[0]
def showConvOutput(model, image, layer_name):
for name in layer_name:
out_put = conv_output(load_model(model), name, image)
for i in range(6):
show_img = out_put[:, :, i]
plt.subplot(3, 2, i + 1)
plt.imshow(show_img)
plt.axis('off')
plt.title('Conv1' + '-' + str(i))
plt.show()
|
[
"blithetyrell@gmail.com"
] |
blithetyrell@gmail.com
|
291f1107e0a99ce49de7bd1a42bab6e7fa9b9073
|
ffae55f50f9eb0ae028d9f46cebea565f3700585
|
/18/VAJets/PKUTreeMaker/test/CrabJobsSrc/MC/crab3_analysisWZ_v1.py
|
7d3a2cd976ff0802af00a8aafe4ae252256a8d2a
|
[] |
no_license
|
JINGFFF/test
|
57a92eb2c3143bcfa5776fc87d3ff16ff7cdc04b
|
d48c2be6387dfaff3eb37e28ff116c91c3eaf67e
|
refs/heads/master
| 2021-02-06T21:00:52.184508
| 2020-04-26T04:35:04
| 2020-04-26T04:35:04
| 243,942,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,197
|
py
|
from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'WZ_v1_2'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.maxMemoryMB = 3000
config.JobType.inputFiles = ['Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFPuppi.txt']
config.JobType.psetName = 'analysis_mc.py'
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
##config.Data.outputPrimaryDataset = 'VBS_WGAMMA_94X'
config.Data.inputDataset = '/WZ_TuneCUETP8M1_13TeV-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3_ext1-v2/MINIAODSIM'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 2
config.Data.totalUnits = -1
config.Data.publication = False
config.Data.outputDatasetTag = 'WZ_v1_2'
config.section_("Site")
config.Site.storageSite = 'T2_CN_Beijing'
|
[
"15827238926@163.com"
] |
15827238926@163.com
|
be99f713491a17a0f0cb456799aca46472352110
|
3116a22fcc4426a2e034f9ad244c907b8bab0f8f
|
/core/Config.py
|
1e3674711425d30f726428fd2b67bddb58a2f91e
|
[] |
no_license
|
avjui/Homepy
|
8821986686c3808b11b1a1c4798a56a298a1ce8f
|
5c943d885edb75110db90c10ba27d20804f3f66d
|
refs/heads/master
| 2016-09-05T20:31:13.061590
| 2013-05-13T10:39:01
| 2013-05-13T10:39:01
| 8,085,378
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,189
|
py
|
import os
import sys
from lib.configobj import ConfigObj
import core
import core.Logger
class Config:
def CheckSection(self, sec):
""" Check if INI section exists, if not create it """
try:
core.CFG[sec]
return True
except:
core.CFG[sec] = {}
return False
################################################################################
# Check_setting_int #
################################################################################
def check_setting_int(self, config, cfg_name, item_name, def_val):
try:
my_val = int(config[cfg_name][item_name])
except:
my_val = def_val
try:
config[cfg_name][item_name] = my_val
except:
config[cfg_name] = {}
config[cfg_name][item_name] = my_val
#log(item_name + " -> " + str(my_val), 'debug')
return my_val
################################################################################
# Check_setting_str #
################################################################################
def check_setting_str(self, config, cfg_name, item_name, def_val, log=True):
try:
my_val = config[cfg_name][item_name]
except:
my_val = def_val
try:
config[cfg_name][item_name] = my_val
except:
config[cfg_name] = {}
config[cfg_name][item_name] = my_val
#if log:
# log(item_name + " -> " + my_val, 'debug')
#else:
# log(item_name + " -> ******", 'debug')
#return my_val
def Check(self):
# Make sure all the config sections exist
self.CheckSection('General')
self.CheckSection('Syssetting')
try:
core.HTTP_PORT = self.check_setting_int(core.CFG, 'General', 'http_port', 8989)
except:
print " Port is 8989"
core.HTTP_PORT = 8989
if core.HTTP_PORT < 21 or core.HTTP_PORT > 65535:
core.HTTP_PORT = 8989
try:
core.HTTP_HOST = self.check_setting_str(core.CFG, 'General', 'http_host', '0.0.0.0')
except:
core.HTTP_HOST = '0.0.0.0'
core.HTTP_USERNAME = self.check_setting_str(core.CFG, 'General', 'http_username', '')
core.HTTP_PASSWORD = self.check_setting_str(core.CFG, 'General', 'http_password', '')
core.WEB_INTERFACE = self.check_setting_str(core.CFG, 'Syssetting', 'web_interface', '')
core.ROOMS = self.check_setting_str(core.CFG, 'Syssetting', 'rooms', '')
core.DEBUG_LOG = bool(self.check_setting_str(core.CFG, 'Syssetting', 'debuglog', 0))
core.HTTP_ROOT = self.check_setting_str(core.CFG, 'Syssetting', 'root', '')
def Write(self):
new_config = ConfigObj()
new_config.filename = core.CONFIG_FILE
new_config['General'] = {}
new_config['General']['http_port'] = core.HTTP_PORT
new_config['General']['http_host'] = core.HTTP_HOST
new_config['General']['http_username'] = core.HTTP_USERNAME
new_config['General']['http_password'] = core.HTTP_PASSWORD
new_config['Syssetting'] = {}
new_config['Syssetting']['web_interface'] = core.WEB_INTERFACE
new_config['Syssetting']['rooms'] = core.ROOMS
new_config['Syssetting']['debug_log'] = core.DEBUG_LOG
new_config['Syssetting']['root'] = core.HTTP_ROOT
new_config.write()
|
[
"renej@vol.at"
] |
renej@vol.at
|
90461e000c35af10dc0b8d65f682b4731f30ca25
|
57fb9e67e5134be1d45622726fdc2c5ed7918ac8
|
/wallapi/wallapp/urls.py
|
6253e5d86f2c6f7336ceaca39ba14facb3717a1f
|
[] |
no_license
|
renatoln/WallMessages
|
b798e770a551945f5062465244771ae1742845ac
|
be0013aeda05dbe260efebcb787279f5055a4e25
|
refs/heads/master
| 2022-12-16T23:06:14.858578
| 2018-03-15T20:35:19
| 2018-03-15T20:35:19
| 124,813,272
| 0
| 0
| null | 2022-11-22T01:55:44
| 2018-03-12T00:36:04
|
Python
|
UTF-8
|
Python
| false
| false
| 678
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^usuarios/$', views.UsuarioList.as_view(), name='usuario-list'),
url(r'^usuario/(?P<pk>[0-9]+)/$', views.UsuarioDetail.as_view(), name='usuario-detail'),
url(r'^disciplinas/$', views.DisciplinaList.as_view()),
url(r'^disciplina/(?P<pk>[0-9]+)/$', views.DisciplinaDetail.as_view()),
url(r'^disciplina_alunoss/$', views.Disciplina_AlunosList.as_view()),
url(r'^disciplina_alunos/(?P<pk>[0-9]+)/$', views.Disciplina_AlunosDetail.as_view()),
url(r'^mensagens/$', views.MensagemList.as_view()),
url(r'^mensagem/(?P<pk>[0-9]+)/$', views.MensagemDetail.as_view()),
]
|
[
"renatoln@yahoo.com.br"
] |
renatoln@yahoo.com.br
|
e8a48631debbf0d92915eee050925ee446953169
|
586f71acf5f50d893674de28f12f6f0336e79ae9
|
/less3/add_files/003.py
|
2fb804047209375411572876f610439a2a5e22e3
|
[] |
no_license
|
alexphpteacher/python
|
7d1b60d40e83e72b5ad2463c6b4d866b0254978c
|
d159c578f82019df5f45c2cd88f244526e3a13cb
|
refs/heads/master
| 2020-03-21T17:44:00.591826
| 2018-07-09T16:37:41
| 2018-07-09T16:37:41
| 138,849,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
def f(a, b, c, d, aa, bb, cc, dd):
print(a,b,c,d)
print(aa,'',bb,'',cc,'',dd)
args = [1,2,3,4]
kwargs = {
'aa': 'aa val',
'bb': 'bb val',
'cc': 'cc val',
'dd': 'dd val',
}
f(*args, **kwargs)
|
[
"alexphpteacher@gmail.com"
] |
alexphpteacher@gmail.com
|
e4636070438d299c95e00eb5f247fb6dcd338088
|
322ffff4be00c0fa4da4fde7442393cc2d54a08b
|
/blogp1e0/apps/voicea/views.py
|
8c71ea268f836d739b67e9cff807b97c7fc1c322
|
[] |
no_license
|
aleporyadin/django-simple-blog
|
d04ef95c79553ad9060651a0e47dd02add72dacf
|
203f8d660e67c1072be998da810acf227fb9d82e
|
refs/heads/master
| 2023-02-23T17:27:42.613692
| 2021-01-27T18:33:38
| 2021-01-27T18:33:38
| 299,535,447
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
from django.shortcuts import render
import requests
def index(request):
return render(request, 'voicea/voicea.html')
|
[
"ale.poryadin@gmail.com"
] |
ale.poryadin@gmail.com
|
d64cb11c04445671cb8472f9ac553377401756df
|
1a54e43c7e277029822ad8cf0bd4644e6aa3d0f8
|
/qgis/ogrprocessing/sextanteext/ParameterDbConnection.py
|
2c8d04f41cb46a5ffe64bd11171a131702d4708b
|
[
"MIT"
] |
permissive
|
maphew/ogrtools
|
f25e5398cc45aba214ff8712c294d0d0a205a7fe
|
9fc20807038439e831cf6f0a33afef0874ffc23e
|
refs/heads/master
| 2021-01-21T16:38:26.693153
| 2015-05-29T17:21:55
| 2015-05-29T17:21:55
| 36,517,475
| 0
| 0
| null | 2015-05-29T16:58:53
| 2015-05-29T16:58:53
| null |
UTF-8
|
Python
| false
| false
| 2,242
|
py
|
from sextante.parameters.ParameterSelection import ParameterSelection
from ogrprocessing.dbconnection import DbConnection
class ParameterDbConnection(ParameterSelection):
def __init__(self, name="", description=""):
self.options = DbConnection.qgis_connections()
ParameterSelection.__init__(
self, name, description, self.options, default=0)
def getConnectionName(self):
return self.options[self.value]
def getConnectionURI(self):
return DbConnection.layer_uri(self.getConnectionName())
def getOgrConnection(self):
connoptions = {
"host": self.getHost(),
"port": self.getPort(),
"dbname": self.getDatabase(),
"user": self.getUsername(),
"password": self.getPassword()
}
connargs = []
for k, v in connoptions.items():
if len(v) > 0:
connargs.append("%s='%s'" % (k, v))
return "PG:%s" % " ".join(connargs)
def getOgrDriverName(self):
return 'PostgreSQL'
def getHost(self):
return DbConnection.connection_value(self.getConnectionName(), "host")
def getPort(self):
return DbConnection.connection_value(self.getConnectionName(), "port")
def getDatabase(self):
return DbConnection.connection_value(self.getConnectionName(), "database")
def getUsername(self):
return DbConnection.connection_value(self.getConnectionName(), "username")
def getPassword(self):
return DbConnection.connection_value(self.getConnectionName(), "password")
def getValueAsCommandLineParameter(self):
return "\"" + str(self.value) + "\""
def getAsScriptCode(self):
return "##" + self.name + "=dbconnection " + ";".join(self.options)
def deserialize(self, s):
tokens = s.split("|")
if len(tokens) == 4:
return ParameterSelection(tokens[0], tokens[1], tokens[2].split(";"), int(tokens[3]))
else:
return ParameterSelection(tokens[0], tokens[1], tokens[2].split(";"))
def serialize(self):
return self.__module__.split(".")[-1] + "|" + self.name + "|" + self.description + \
"|" + ";".join(self.options)
|
[
"pka@sourcepole.ch"
] |
pka@sourcepole.ch
|
eb3a8d5c498c7474673b63e103c93f49315218fa
|
3ff9821b1984417a83a75c7d186da9228e13ead9
|
/No_0122_Best Time to Buy and Sell Stock II/by_dynamic_programming.py
|
5874db8b00a7a87dcea7b16d8be839baf34edc99
|
[
"MIT"
] |
permissive
|
brianchiang-tw/leetcode
|
fd4df1917daef403c48cb5a3f5834579526ad0c2
|
6978acfb8cb767002cb953d02be68999845425f3
|
refs/heads/master
| 2023-06-11T00:44:01.423772
| 2023-06-01T03:52:00
| 2023-06-01T03:52:00
| 222,939,709
| 41
| 12
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,646
|
py
|
'''
Description:
Say you have an array for which the ith element is the price of a given stock on day i.
Design an algorithm to find the maximum profit. You may complete as many transactions as you like (i.e., buy one and sell one share of the stock multiple times).
Note: You may not engage in multiple transactions at the same time (i.e., you must sell the stock before you buy again).
Example 1:
Input: [7,1,5,3,6,4]
Output: 7
Explanation: Buy on day 2 (price = 1) and sell on day 3 (price = 5), profit = 5-1 = 4.
Then buy on day 4 (price = 3) and sell on day 5 (price = 6), profit = 6-3 = 3.
Example 2:
Input: [1,2,3,4,5]
Output: 4
Explanation: Buy on day 1 (price = 1) and sell on day 5 (price = 5), profit = 5-1 = 4.
Note that you cannot buy on day 1, buy on day 2 and sell them later, as you are
engaging multiple transactions at the same time. You must sell before buying again.
Example 3:
Input: [7,6,4,3,1]
Output: 0
Explanation: In this case, no transaction is done, i.e. max profit = 0.
'''
from typing import List
class Solution:
def maxProfit(self, prices: List[int]) -> int:
# It is impossible to sell stock on first day, set -infinity as initial value for cur_hold
cur_hold, cur_not_hold = -float('inf'), 0
for stock_price in prices:
prev_hold, prev_not_hold = cur_hold, cur_not_hold
# either keep hold, or buy in stock today at stock price
cur_hold = max( prev_hold, prev_not_hold - stock_price )
# either keep not-hold, or sell out stock today at stock price
cur_not_hold = max( prev_not_hold, prev_hold + stock_price )
# maximum profit must be in not-hold state
return cur_not_hold if prices else 0
# n : the length of input list, prices.
## Time Complexity: O( n )
#
# The overhead in time is the cost of for loop, which is of O( n )
## Space Complexity: O( 1 )
#
# The overhead in space is the storage for loop index and temporary vairable, which is of O( 1 )
from collections import namedtuple
TestEntry = namedtuple('TestEntry', 'stock_sequence')
def test_bench():
test_data = [
TestEntry( stock_sequence = [7,1,5,3,6,4] ),
TestEntry( stock_sequence = [1,2,3,4,5] ),
TestEntry( stock_sequence = [7,6,4,3,1] ),
]
# expected output:
'''
7
4
0
'''
for t in test_data:
print( Solution().maxProfit( prices = t.stock_sequence) )
return
if __name__ == '__main__':
test_bench()
|
[
"brianchiang1988@icloud.com"
] |
brianchiang1988@icloud.com
|
d0dcfc939e257611608f80299b05985ddb4d0589
|
721fe60fad57f0b0ec9c9c320c24ca18cdc981cd
|
/django/rustic_cut/rustic_cut/admin.py
|
77671f3a23ef0d8229a4874fdc88dfa94ee92195
|
[] |
no_license
|
OldGareBear/rustic-cut
|
e53159c64228de9a953675c43227c79ca7b0778e
|
176b0775bb7b6db518fe25c9057682da7df1368d
|
refs/heads/master
| 2021-01-10T15:57:25.685101
| 2016-02-09T18:53:55
| 2016-02-09T18:53:55
| 48,298,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
import rustic_cut.models as m
from django.contrib.admin.views.main import ChangeList
from django.contrib import admin
@admin.register(m.Product)
class ProductAdmin(admin.ModelAdmin):
list_display = ('name',)
search_fields = ('name', 'categories')
@admin.register(m.Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name',)
search_fields = ('name',)
|
[
"betheagary@gmail.com"
] |
betheagary@gmail.com
|
4bd4bebebcee12e2cf64dd1eacd1a163512bbeee
|
78c76c8ec506080ff83edd7a3619a6b1e709a4e5
|
/apps/courses/__init__.py
|
3b3a526b16976686d4850ba0f61ebd17bc4992e1
|
[] |
no_license
|
wadayu/mxonline
|
dd0a08d21b858a49f2107974ba13b6e283a1f01f
|
58e808b3415e51935c15b1e5f7b30461c879d861
|
refs/heads/master
| 2021-01-20T23:47:55.025272
| 2018-02-08T01:41:06
| 2018-02-08T01:41:06
| 101,853,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 75
|
py
|
#xadmin后台中文显示
default_app_config = 'courses.apps.CoursesConfig'
|
[
"wadayu@163.com"
] |
wadayu@163.com
|
46a639f619911ed380be15d8e19d2d7d854a4939
|
50e39231d8bea2a01a9d5db69aeb5c1a8054642b
|
/test/test_update/test_module.py
|
d0498fe86f21adf89dd240bec41836c7977f5729
|
[] |
no_license
|
leecrest/wafer
|
eb09e96d79e149cfee4d6fc40270996618bdea6c
|
58b148d03dc18dcfdf6bac1c5ed410f1fe112ad3
|
refs/heads/master
| 2020-05-18T18:16:41.566961
| 2014-07-15T13:37:31
| 2014-07-15T13:37:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 120
|
py
|
# coding=utf-8
"""
@author : leecrest
@time : 2014/6/30 20:31
@brief :
"""
#NAME = "I am NAME"
NAME2 = "I am NAME2"
|
[
"281042207@qq.com"
] |
281042207@qq.com
|
155e6f8d2612353259928900fac73b905ca32da0
|
e5d8b15cbd899283d6ead4742334e997db06d6e0
|
/web/config/settings/base.py
|
37124bc82aab5552b2646ceca937c109e33f6676
|
[] |
no_license
|
Maliaotw/dashboard-django
|
628d777d88b61dad7c3c551b72979b38c2065e15
|
cabbc3e6e8156510dd4ba91ffe1066c9cb040eac
|
refs/heads/main
| 2023-02-16T02:52:02.169754
| 2021-01-12T03:13:55
| 2021-01-12T03:13:55
| 289,612,737
| 0
| 0
| null | 2021-01-12T03:13:56
| 2020-08-23T04:05:36
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 5,390
|
py
|
"""
Django settings for web project.
Generated by 'django-admin startproject' using Django 3.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from .conf import load_user_config
from pathlib import Path
CONFIG = load_user_config()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent.parent
# vsphere_monitor/
APPS_DIR = ROOT_DIR / "web"
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# PROJECT_DIR = os.path.dirname(BASE_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = CONFIG.DEBUG
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# SECURITY WARNING: keep the secret key used in production secret!
# from django.core.management.utils import get_random_secret_key
# get_random_secret_key()
SECRET_KEY = CONFIG.SECRET_KEY
ALLOWED_HOSTS = ['*']
# Application definition
DJANGO_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
# "django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
]
THIRD_PARTY_APPS = [
'rest_framework',
'django_filters',
'widget_tweaks',
]
LOCAL_APPS = [
'app.apps.AppConfig',
'common.apps.CommonConfig',
'authentication.apps.AuthenticationConfig',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
"DIRS": [str(APPS_DIR / "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
"NAME": str(APPS_DIR / "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
str(APPS_DIR / "static"),
)
STATIC_ROOT = str(APPS_DIR / "data" / "static")
# Media files (File, ImageField) will be save these
MEDIA_URL = '/media/'
# MEDIA_ROOT = os.path.join(BASE_DIR, "data", 'media')
MEDIA_ROOT = str(APPS_DIR / "data" / "media")
LOGIN_URL = "/login/"
# SESSION
SESSION_COOKIE_AGE = 60 * 60 # 設置session過期時間為60分鐘
SESSION_EXPIRE_AT_BROWSER_CLOSE = True # 當瀏覽器被關閉的時候將session失效,但是不能刪除數據庫的session數據
SESSION_SAVE_EVERY_REQUEST = True # 每次請求都要保存一下session
# LOGGING
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'normal': {
'format': '[%(levelname)s] %(asctime)s | %(name)s:%(lineno)d | %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler', # Default logs to stderr
'formatter': 'normal', # use the above "normal" formatter
}
},
'loggers': {
'': { # means "root logger"
'handlers': ['console'], # use the above "console" handler
'level': 'DEBUG', # logging level
},
},
}
|
[
"MaliaoTW@gmail.com"
] |
MaliaoTW@gmail.com
|
a0fb6c98577c64b357b2869ef1c3ec0ceecded1c
|
8ac2b232fff16e6c32b872dfb505e951c5e6753c
|
/samples/contrib/pytorch-samples/bert/news_dataset.py
|
a1c1f8945d390d52b54023046b07179840091ad8
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
Bobgy/pipelines
|
95a3e434ea19492f85ed8711fde566f6a5dff5c5
|
105e10090e6d00ed1910121c00e72c7cf6001633
|
refs/heads/master
| 2022-05-17T02:39:40.388972
| 2021-07-31T05:36:35
| 2021-07-31T05:36:35
| 200,167,826
| 1
| 1
|
Apache-2.0
| 2021-02-06T01:33:33
| 2019-08-02T04:57:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,500
|
py
|
# !/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=arguments-differ
# pylint: disable=unused-argument
# pylint: disable=abstract-method
"""News dataset script."""
import torch
from torch.utils.data import Dataset
class NewsDataset(Dataset):
"""Ag News Dataset
Args:
Dataset
"""
def __init__(self, reviews, targets, tokenizer, max_length):
"""Performs initialization of tokenizer.
Args:
reviews: AG news text
targets: labels
tokenizer: bert tokenizer
max_length: maximum length of the news text
"""
self.reviews = reviews
self.targets = targets
self.tokenizer = tokenizer
self.max_length = max_length
def __len__(self):
"""
Returns:
returns the number of datapoints in the dataframe
"""
return len(self.reviews)
def __getitem__(self, item):
"""Returns the review text and the targets of the specified item.
Args:
item: Index of sample review
Returns:
Returns the dictionary of review text, input ids, attention mask, targets
"""
review = str(self.reviews[item])
target = self.targets[item]
encoding = self.tokenizer.encode_plus(
review,
add_special_tokens=True,
max_length=self.max_length,
return_token_type_ids=False,
padding="max_length",
return_attention_mask=True,
return_tensors="pt",
truncation=True,
)
return {
"review_text": review,
"input_ids": encoding["input_ids"].flatten(),
"attention_mask": encoding["attention_mask"].flatten(), # pylint: disable=not-callable
"targets": torch.tensor(target, dtype=torch.long), # pylint: disable=no-member,not-callable
}
|
[
"noreply@github.com"
] |
Bobgy.noreply@github.com
|
61dbadf241ebabf20e985be5c0ebeac9a380b5f1
|
560a1c817fe015b165a6f2885bfa0575d5c2f2ba
|
/splibrary.py
|
f6b08bae7df936a6dbe1f63e6af32b8ac6a67107
|
[] |
no_license
|
daniel2005d/SharePointTools
|
19ad23181f5611ad725c775882872c0c7468fb06
|
59658a84c0f512c6c61b317ac34238648bb18fbc
|
refs/heads/master
| 2021-01-21T15:37:39.590170
| 2020-06-16T22:45:32
| 2020-06-16T22:45:32
| 91,852,987
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,121
|
py
|
#!/usr/bin/env python
from colored import fg, attr
import argparse
from bs4 import BeautifulSoup
from urlparse import urlparse
import requests
import sys
def validaracceso(pagina):
r = requests.get(pagina, verify=False)
if r.status_code == 200:
print('{} {} {} --> {} OK {}'.format(fg('yellow'), pagina, attr('reset'), fg('green'), attr('reset')))
else:
print('{} {} {} --> {} Error {} {}'.format(fg('red'), pagina, attr('reset'), fg('green'), r.status_code, attr('reset')))
return r.status_code
def obtenerListas(sitio):
paginas = []
segmentosdominio = urlparse(sitio)
dominio = segmentosdominio.scheme + '://' + segmentosdominio.netloc + '/'
l = requests.get(sitio, verify=False)
if (l.status_code == 200):
soup = BeautifulSoup(l.text, 'html.parser')
divs = soup.findAll('div',{"class":"ms-vl-apptile"})
#hijos = divs.findChildren()
for link in divs:
vinculos = link.find_all('a')
for href in vinculos:
urllista = href.attrs['href']
if urllista != 'javascript:;':
if urllista not in paginas:
codigo = validaracceso(dominio + urllista)
paginas.append({'pagina':urllista, 'status':codigo})
return paginas
parser = argparse.ArgumentParser(description='Obtiene los vinculos accesibles de las bibliotecas y listas de SharePoint')
parser.add_argument('-p','--pagina', help='Direccion de la pagina donde se encuentra el listado de Bibliotecas (viewlsts.aspx)', required=True)
parser.add_argument('-o','--output', help='Archivo de Salida de las Urls encontradas', required=False)
args = parser.parse_args()
if args.pagina is not None:
print('%s Iniciando el descubrimiento %s' % (fg('green'), attr('reset')))
try:
paginas = obtenerListas(args.pagina)
if args.output is not None:
f = open(args.output,'w')
for p in paginas:
f.write(p['pagina'] + '-->' + str(p['status']) + '\n')
except:
print sys.exc_info()[0]
|
[
"daniel2005d@gmail.com"
] |
daniel2005d@gmail.com
|
7903b63e3153fe8c031f7f176fc936e66a33a123
|
41d25836fc24cd540f163235fadf07b8030199e3
|
/Sparse Auto Encoder/load_dataset.py
|
af254a576086af14b4013869c54fdddbae25500a
|
[] |
no_license
|
AchyuthaBharadwaj/Machine-Learning
|
d9828eeb22a95c5893bf902d6a8273513b00a117
|
fca3462f0aa9f1c02b520b297a9f59c00b9b4243
|
refs/heads/master
| 2020-03-15T10:28:41.773871
| 2018-05-06T01:11:17
| 2018-05-06T01:11:17
| 132,099,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,720
|
py
|
import numpy as np
import os
import pdb
datasets_dir = './data/'
def one_hot(x,n):
if type(x) == list:
x = np.array(x)
x = x.flatten()
o_h = np.zeros((len(x),n))
o_h[np.arange(len(x)),x] = 1
return o_h
def mnist(ntrain=60000,ntest=10000,onehot=False,subset=True,digit_range=[0,2],shuffle=True):
data_dir = os.path.join(datasets_dir,'mnist/')
fd = open(os.path.join(data_dir,'train-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trX = loaded[16:].reshape((60000,28*28)).astype(float)
fd = open(os.path.join(data_dir,'train-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
trY = loaded[8:].reshape((60000)).astype(float)
fd = open(os.path.join(data_dir,'t10k-images-idx3-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teX = loaded[16:].reshape((10000,28*28)).astype(float)
fd = open(os.path.join(data_dir,'t10k-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd,dtype=np.uint8)
teY = loaded[8:].reshape((10000)).astype(float)
trX = trX/255.
teX = teX/255.
trX = trX[:ntrain]
trY = trY[:ntrain]
teX = teX[:ntest]
teY = teY[:ntest]
if onehot:
trY = one_hot(trY, 10)
teY = one_hot(teY, 10)
else:
trY = np.asarray(trY)
teY = np.asarray(teY)
if subset:
subset_label = np.arange(digit_range[0], digit_range[1])
train_data_sub = []
train_label_sub = []
test_data_sub = []
test_label_sub = []
for i in subset_label:
train_sub_idx = np.where(trY==i)
test_sub_idx = np.where(teY==i)
#pdb.set_trace()
A = trX[train_sub_idx[0],:]
C = teX[test_sub_idx[0],:]
if onehot:
B = trY[train_sub_idx[0],:]
D = teY[test_sub_idx[0],:]
else:
B = trY[train_sub_idx[0]]
D = teY[test_sub_idx[0]]
train_data_sub.append(A)
train_label_sub.append(B)
test_data_sub.append(C)
test_label_sub.append(D)
trX = train_data_sub[0]
trY = train_label_sub[0]
teX = test_data_sub[0]
teY = test_label_sub[0]
for i in range(digit_range[1]-digit_range[0]-1):
trX = np.concatenate((trX,train_data_sub[i+1]),axis=0)
trY = np.concatenate((trY,train_label_sub[i+1]),axis=0)
teX = np.concatenate((teX,test_data_sub[i+1]),axis=0)
teY = np.concatenate((teY,test_label_sub[i+1]),axis=0)
if shuffle:
train_idx = np.random.permutation(trX.shape[0])
test_idx = np.random.permutation(teX.shape[0])
trX = trX[train_idx,:]
teX = teX[test_idx,:]
if onehot:
trY = trY[train_idx,:]
teY = teY[test_idx,:]
else:
trY = trY[train_idx]
teY = teY[test_idx]
trX = np.squeeze(trX).T
teX = np.squeeze(teX).T
trY = trY.reshape(1,-1)
teY = teY.reshape(1,-1)
return trX, trY, teX, teY
'''
def main():
trx, trY, teX, teY = mnist()
if __name__ == "__main__":
main()
'''
|
[
"32027107+achyu1729@users.noreply.github.com"
] |
32027107+achyu1729@users.noreply.github.com
|
6bf1a8ce698e2b0c1de939eb7ad87f03c6b43fb3
|
da10044b2248b4aac89bdc5c4886cc7486d3d938
|
/blog/models.py
|
87ea42b48156fafd97327ecca94139749c5a8481
|
[] |
no_license
|
g-akash/my-blog
|
49100d55b78886ea11423bd317b53c4c1bde7aa4
|
bb8239c7bc76aafa50ce2d124946c12535a68fce
|
refs/heads/master
| 2020-12-02T19:24:27.493143
| 2017-07-06T20:56:33
| 2017-07-06T20:56:33
| 96,337,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,006
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
# Create your models here.
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default = timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
def approved_comments(self):
return self.comments.filter(approved_comment=True)
class Comment(models.Model):
post = models.ForeignKey('blog.Post',related_name="comments")
author = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default = timezone.now)
approved_comment = models.BooleanField(default=False)
def approve(self):
self.approved_comment = True
self.save()
def __str__(self):
return self.text
|
[
"akash.garg2007@gmail.com"
] |
akash.garg2007@gmail.com
|
f9d8898f58752cd3781b1c1101eefbc33a20667c
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/dev/perf/CascadeMaskRCNN_iflytek_for_PyTorch/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py
|
853289e67b4a5019eddfc1bbefb0b44e53dd49e2
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 3,714
|
py
|
_base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
pretrained='torchvision://resnet101',
backbone=dict(depth=101),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(dataset=dict(pipeline=train_pipeline)))
evaluation = dict(interval=24, metric=['bbox', 'segm'])
|
[
"zhangjunyi8@huawei.com"
] |
zhangjunyi8@huawei.com
|
a87804d2d25c07f79802384fc10580b459fae10e
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/media_source/test_local_source.py
|
bc637caab808964792012429bc925ee473d435b8
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 10,965
|
py
|
"""Test Local Media Source."""
from collections.abc import AsyncGenerator
from http import HTTPStatus
import io
from pathlib import Path
from tempfile import TemporaryDirectory
from unittest.mock import patch
import pytest
from homeassistant.components import media_source, websocket_api
from homeassistant.components.media_source import const
from homeassistant.config import async_process_ha_core_config
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.common import MockUser
from tests.typing import ClientSessionGenerator, WebSocketGenerator
@pytest.fixture
async def temp_dir(hass: HomeAssistant) -> AsyncGenerator[str, None]:
"""Return a temp dir."""
with TemporaryDirectory() as tmpdirname:
target_dir = Path(tmpdirname) / "another_subdir"
target_dir.mkdir()
await async_process_ha_core_config(
hass, {"media_dirs": {"test_dir": str(target_dir)}}
)
assert await async_setup_component(hass, const.DOMAIN, {})
yield str(target_dir)
async def test_async_browse_media(hass: HomeAssistant) -> None:
"""Test browse media."""
local_media = hass.config.path("media")
await async_process_ha_core_config(
hass, {"media_dirs": {"local": local_media, "recordings": local_media}}
)
await hass.async_block_till_done()
assert await async_setup_component(hass, const.DOMAIN, {})
await hass.async_block_till_done()
# Test path not exists
with pytest.raises(media_source.BrowseError) as excinfo:
await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{const.DOMAIN}/local/test/not/exist"
)
assert str(excinfo.value) == "Path does not exist."
# Test browse file
with pytest.raises(media_source.BrowseError) as excinfo:
await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{const.DOMAIN}/local/test.mp3"
)
assert str(excinfo.value) == "Path is not a directory."
# Test invalid base
with pytest.raises(media_source.BrowseError) as excinfo:
await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{const.DOMAIN}/invalid/base"
)
assert str(excinfo.value) == "Unknown source directory."
# Test directory traversal
with pytest.raises(media_source.BrowseError) as excinfo:
await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{const.DOMAIN}/local/../configuration.yaml"
)
assert str(excinfo.value) == "Invalid path."
# Test successful listing
media = await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{const.DOMAIN}"
)
assert media
media = await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{const.DOMAIN}/local/."
)
assert media
media = await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{const.DOMAIN}/recordings/."
)
assert media
async def test_media_view(
hass: HomeAssistant, hass_client: ClientSessionGenerator
) -> None:
"""Test media view."""
local_media = hass.config.path("media")
await async_process_ha_core_config(
hass, {"media_dirs": {"local": local_media, "recordings": local_media}}
)
await hass.async_block_till_done()
assert await async_setup_component(hass, const.DOMAIN, {})
await hass.async_block_till_done()
client = await hass_client()
# Protects against non-existent files
resp = await client.get("/media/local/invalid.txt")
assert resp.status == HTTPStatus.NOT_FOUND
resp = await client.get("/media/recordings/invalid.txt")
assert resp.status == HTTPStatus.NOT_FOUND
# Protects against non-media files
resp = await client.get("/media/local/not_media.txt")
assert resp.status == HTTPStatus.NOT_FOUND
# Protects against unknown local media sources
resp = await client.get("/media/unknown_source/not_media.txt")
assert resp.status == HTTPStatus.NOT_FOUND
# Fetch available media
resp = await client.get("/media/local/test.mp3")
assert resp.status == HTTPStatus.OK
resp = await client.get("/media/local/Epic Sax Guy 10 Hours.mp4")
assert resp.status == HTTPStatus.OK
resp = await client.get("/media/recordings/test.mp3")
assert resp.status == HTTPStatus.OK
async def test_upload_view(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
temp_dir: str,
tmp_path: Path,
hass_admin_user: MockUser,
) -> None:
"""Allow uploading media."""
# We need a temp dir that's not under tempdir fixture
extra_media_dir = tmp_path
hass.config.media_dirs["another_path"] = temp_dir
img = (Path(__file__).parent.parent / "image_upload/logo.png").read_bytes()
def get_file(name):
pic = io.BytesIO(img)
pic.name = name
return pic
client = await hass_client()
# Test normal upload
res = await client.post(
"/api/media_source/local_source/upload",
data={
"media_content_id": "media-source://media_source/test_dir/.",
"file": get_file("logo.png"),
},
)
assert res.status == 200
assert (Path(temp_dir) / "logo.png").is_file()
# Test with bad media source ID
for bad_id in (
# Subdir doesn't exist
"media-source://media_source/test_dir/some-other-dir",
# Main dir doesn't exist
"media-source://media_source/test_dir2",
# Location is invalid
"media-source://media_source/test_dir/..",
# Domain != media_source
"media-source://nest/test_dir/.",
# Other directory
f"media-source://media_source/another_path///{extra_media_dir}/",
# Completely something else
"http://bla",
):
res = await client.post(
"/api/media_source/local_source/upload",
data={
"media_content_id": bad_id,
"file": get_file("bad-source-id.png"),
},
)
assert res.status == 400, bad_id
assert not (Path(temp_dir) / "bad-source-id.png").is_file()
# Test invalid POST data
res = await client.post(
"/api/media_source/local_source/upload",
data={
"media_content_id": "media-source://media_source/test_dir/.",
"file": get_file("invalid-data.png"),
"incorrect": "format",
},
)
assert res.status == 400
assert not (Path(temp_dir) / "invalid-data.png").is_file()
# Test invalid content type
text_file = io.BytesIO(b"Hello world")
text_file.name = "hello.txt"
res = await client.post(
"/api/media_source/local_source/upload",
data={
"media_content_id": "media-source://media_source/test_dir/.",
"file": text_file,
},
)
assert res.status == 400
assert not (Path(temp_dir) / "hello.txt").is_file()
# Test invalid filename
with patch(
"aiohttp.formdata.guess_filename", return_value="../invalid-filename.png"
):
res = await client.post(
"/api/media_source/local_source/upload",
data={
"media_content_id": "media-source://media_source/test_dir/.",
"file": get_file("../invalid-filename.png"),
},
)
assert res.status == 400
assert not (Path(temp_dir) / "../invalid-filename.png").is_file()
# Remove admin access
hass_admin_user.groups = []
res = await client.post(
"/api/media_source/local_source/upload",
data={
"media_content_id": "media-source://media_source/test_dir/.",
"file": get_file("no-admin-test.png"),
},
)
assert res.status == 401
assert not (Path(temp_dir) / "no-admin-test.png").is_file()
async def test_remove_file(
hass: HomeAssistant,
hass_ws_client: WebSocketGenerator,
temp_dir: str,
hass_admin_user: MockUser,
) -> None:
"""Allow uploading media."""
msg_count = 0
file_count = 0
def msgid():
nonlocal msg_count
msg_count += 1
return msg_count
def create_file():
nonlocal file_count
file_count += 1
to_delete_path = Path(temp_dir) / f"to_delete_{file_count}.txt"
to_delete_path.touch()
return to_delete_path
client = await hass_ws_client(hass)
to_delete = create_file()
await client.send_json(
{
"id": msgid(),
"type": "media_source/local_source/remove",
"media_content_id": f"media-source://media_source/test_dir/{to_delete.name}",
}
)
msg = await client.receive_json()
assert msg["success"]
assert not to_delete.exists()
# Test with bad media source ID
extra_id_file = create_file()
for bad_id, err in (
# Not exists
(
"media-source://media_source/test_dir/not_exist.txt",
websocket_api.ERR_NOT_FOUND,
),
# Only a dir
("media-source://media_source/test_dir", websocket_api.ERR_NOT_SUPPORTED),
# File with extra identifiers
(
f"media-source://media_source/test_dir/bla/../{extra_id_file.name}",
websocket_api.ERR_INVALID_FORMAT,
),
# Location is invalid
("media-source://media_source/test_dir/..", websocket_api.ERR_INVALID_FORMAT),
# Domain != media_source
("media-source://nest/test_dir/.", websocket_api.ERR_INVALID_FORMAT),
# Completely something else
("http://bla", websocket_api.ERR_INVALID_FORMAT),
):
await client.send_json(
{
"id": msgid(),
"type": "media_source/local_source/remove",
"media_content_id": bad_id,
}
)
msg = await client.receive_json()
assert not msg["success"]
assert msg["error"]["code"] == err
assert extra_id_file.exists()
# Test error deleting
to_delete_2 = create_file()
with patch("pathlib.Path.unlink", side_effect=OSError):
await client.send_json(
{
"id": msgid(),
"type": "media_source/local_source/remove",
"media_content_id": f"media-source://media_source/test_dir/{to_delete_2.name}",
}
)
msg = await client.receive_json()
assert not msg["success"]
assert msg["error"]["code"] == websocket_api.ERR_UNKNOWN_ERROR
# Test requires admin access
to_delete_3 = create_file()
hass_admin_user.groups = []
await client.send_json(
{
"id": msgid(),
"type": "media_source/local_source/remove",
"media_content_id": f"media-source://media_source/test_dir/{to_delete_3.name}",
}
)
msg = await client.receive_json()
assert not msg["success"]
assert to_delete_3.is_file()
|
[
"noreply@github.com"
] |
home-assistant.noreply@github.com
|
e8604bf04360abbcf19d34b33ca5f23bde6bf241
|
9837c961c37ef39b8658117ffd58df3bc2a57e46
|
/util/operate_global.py
|
fcb96fd8def0e49b19b13d844a2698aae00aebab
|
[] |
no_license
|
tan0833/dynamic_inventory_api
|
3012f895a44ab11b77572851f3f0b51c51341a15
|
ab1d739e939b95d44596bcc77864ce75a5189bde
|
refs/heads/master
| 2021-07-11T05:25:22.191705
| 2021-03-04T06:17:52
| 2021-03-04T06:17:52
| 232,775,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 790
|
py
|
u'''
本页面将接口返回的数据以字典的形式存在全局字典中,方便后续调用
'''
from config.Log import Log
import traceback
class GlobalDict:
def __init__(self,dict):
self.log = Log()
self.__global_dict = dict
def set_dict(self,key,value):
self.__global_dict[key] = value
def get_dict(self,key):
try:
value = self.__global_dict[key]
return value
except KeyError as e:
self.log.error('输入的[%s]键不存在\n'%key)
except Exception as e:
self.log.error('未知错误:\n%s'%str(traceback.format_exc()))
raise e
if __name__ == '__main__':
d = {}
a = GlobalDict(d)
a.set_dict('c','12')
b = a.get_dict('c')
print(b)
|
[
"tanxueqiang@chinasoftinc.com"
] |
tanxueqiang@chinasoftinc.com
|
817bef268e1d15ce277c10d89f96d2d96be5bd73
|
7fb95d6a5b4712c257c79e2a31b2c5443f895af7
|
/Python/iterative/icount/test_icount.py
|
710e78dc79ce5112e819ad5cb3eb8894843431a5
|
[
"CC0-1.0"
] |
permissive
|
AlexBuzin/Algorithms
|
e6c716cb02bb18deca56be2e954c989a90101437
|
904df4f93000a81f87f2b1a1a0029fdef29cc841
|
refs/heads/main
| 2023-07-15T06:54:04.707656
| 2021-08-31T08:00:55
| 2021-08-31T08:00:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,070
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""The icount function test module."""
__author__ = "Stanislav D. Kudriavtsev"
from itertools import count
from pytest import fixture, raises
from icount import icount
# pylint: disable=redefined-outer-name
NITERS = 10
@fixture
def numbers():
"""Numbers for (i)count."""
return [-3, 0, 1.5, 2 - 1j, 4.2j]
@fixture()
def not_numbers():
"""Non-numbers for (i)count."""
return [None, [1, 2], {-3, 4}, (6, 9.7)]
def _test_mech(data):
"""The test function for (i)count functions."""
for start in data:
for step in data:
icnt = icount(start, step)
cnt = count(start, step)
for _ in range(NITERS):
assert next(cnt) == next(icnt)
def test_count_to_success(numbers):
"""Test successful counts (numbers only)."""
_test_mech(numbers)
def test_count_to_fail(not_numbers):
"""Test unsuccessful counts (not numbers)."""
for entry in not_numbers:
with raises(TypeError):
next(icount(entry, entry))
|
[
"staniskudriavtsev@yandex.ru"
] |
staniskudriavtsev@yandex.ru
|
f8043f24603be8397d9b38e57c40cd0715eb1b90
|
12746e788495a69d9a56468d0c17860064718796
|
/DjangoGalleryProject/asgi.py
|
9c75b1d66593602406125ca21c98b1ea39cb59e0
|
[
"MIT"
] |
permissive
|
Loisa-Kitakaya/django-gallery
|
aa3f1f0ac1d168dec3ad15e2b405a086759e8583
|
dfe1b816b3756ee15ccd7765346f573ffdd928ea
|
refs/heads/master
| 2021-11-27T03:04:50.269682
| 2020-03-03T13:14:43
| 2020-03-03T13:14:43
| 243,682,160
| 0
| 0
|
MIT
| 2021-09-22T18:40:20
| 2020-02-28T04:59:59
|
Python
|
UTF-8
|
Python
| false
| false
| 417
|
py
|
"""
ASGI config for DjangoGalleryProject project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoGalleryProject.settings')
application = get_asgi_application()
|
[
"loisakitakaya@gmail.com"
] |
loisakitakaya@gmail.com
|
8dd59bbc40376cb8883b20951a8e2e36ce56d2a4
|
8aae25a40542e228a9d4e934c1f9331937682aec
|
/utils/utilities.py
|
431fd67b405434171378d9d0a0c44e00e1d144cc
|
[] |
no_license
|
lijunhon/acoustic_scene
|
61acb94b4c56f31d27660bfafc83b98414af1257
|
328d0ca8a74c26b43233b46a8f5786b492ca1804
|
refs/heads/master
| 2021-06-27T15:15:08.284332
| 2020-11-14T14:13:34
| 2020-11-14T14:13:34
| 181,034,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,267
|
py
|
import numpy as np
import soundfile
import librosa
import os
from sklearn import metrics
import logging
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import config
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
def get_filename(path):
path = os.path.realpath(path)
name_ext = path.split('/')[-1]
name = os.path.splitext(name_ext)[0]
return name
def create_logging(log_dir, filemode):
create_folder(log_dir)
i1 = 0
while os.path.isfile(os.path.join(log_dir, '%04d.log' % i1)):
i1 += 1
log_path = os.path.join(log_dir, '%04d.log' % i1)
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=log_path,
filemode=filemode)
# Print to console
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
return logging
def read_audio(path, target_fs=None):
(audio, fs) = soundfile.read(path)
if audio.ndim > 1:
audio = np.mean(audio, axis=1)
if target_fs is not None and fs != target_fs:
audio = librosa.resample(audio, orig_sr=fs, target_sr=target_fs)
fs = target_fs
return audio, fs
def calculate_scalar(x):
if x.ndim == 2:
axis = 0
elif x.ndim == 3:
axis = (0, 1)
mean = np.mean(x, axis=axis)
std = np.std(x, axis=axis)
return mean, std
def scale(x, mean, std):
return (x - mean) / std
def inverse_scale(x, mean, std):
return x * std + mean
def calculate_accuracy(target, predict, classes_num, average=None):
"""Calculate accuracy.
Inputs:
target: integer array, (audios_num,)
predict: integer array, (audios_num,)
Outputs:
accuracy: float
"""
samples_num = len(target)
correctness = np.zeros(classes_num)
total = np.zeros(classes_num)
for n in range(samples_num):
total[target[n]] += 1
if target[n] == predict[n]:
correctness[target[n]] += 1
accuracy = correctness / total
if average is None:
return accuracy
elif average == 'macro':
return np.mean(accuracy)
else:
raise Exception('Incorrect average!')
def calculate_confusion_matrix(target, predict, classes_num):
"""Calculate confusion matrix.
Inputs:
target: integer array, (audios_num,)
predict: integer array, (audios_num,)
classes_num: int, number of classes
Outputs:
confusion_matrix: (classes_num, classes_num)
"""
confusion_matrix = np.zeros((classes_num, classes_num))
samples_num = len(target)
for n in range(samples_num):
confusion_matrix[target[n], predict[n]] += 1
return confusion_matrix
def print_accuracy(class_wise_accuracy, labels):
print('{:<30}{}'.format('Scene label', 'accuracy'))
print('------------------------------------------------')
for (n, label) in enumerate(labels):
print('{:<30}{:.3f}'.format(label, class_wise_accuracy[n]))
print('------------------------------------------------')
print('{:<30}{:.3f}'.format('Average', np.mean(class_wise_accuracy)))
def plot_confusion_matrix(confusion_matrix, title, labels, values):
"""Plot confusion matrix.
Inputs:
confusion_matrix: matrix, (classes_num, classes_num)
labels: list of labels
values: list of values to be shown in diagonal
Ouputs:
None
"""
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
cax = ax.matshow(confusion_matrix, cmap=plt.cm.Blues)
if labels:
ax.set_xticklabels([''] + labels, rotation=90, ha='left')
ax.set_yticklabels([''] + labels)
ax.xaxis.set_ticks_position('bottom')
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
for n in range(len(values)):
plt.text(n - 0.4, n, '{:.2f}'.format(values[n]), color='yellow')
plt.title(title)
plt.xlabel('Predicted')
plt.ylabel('Target')
plt.tight_layout()
plt.show()
def write_leaderboard_submission(submission_path, audio_names, predictions):
ix_to_lb = config.ix_to_lb
f = open(submission_path, 'w')
f.write('Id,Scene_label\n')
for n in range(len(audio_names)):
f.write('{}'.format(os.path.splitext(audio_names[n])[0]))
f.write(',')
f.write(ix_to_lb[predictions[n]])
f.write('\n')
f.close()
logging.info('Write result to {}'.format(submission_path))
def write_evaluation_submission(submission_path, audio_names, predictions):
ix_to_lb = config.ix_to_lb
f = open(submission_path, 'w')
for n in range(len(audio_names)):
f.write('audio/{}'.format(audio_names[n]))
f.write('\t')
f.write(ix_to_lb[predictions[n]])
f.write('\n')
f.close()
logging.info('Write result to {}'.format(submission_path))
|
[
"812143550@qq.com"
] |
812143550@qq.com
|
607394898c44755e60a85bcf201e935d2c46130a
|
20f2b05116ab1ad5b9421c52157097305d68ae66
|
/mxnet/tools/common/gluoncv_utils_export_helper.py
|
fa1febb5da4755b4a736f4719e55d35b0391787d
|
[] |
no_license
|
autumnfallenwang/pycode
|
0ac860bbbe6b64b49037652945df481f8d2264b9
|
449db37f0a93862e0610f37296ad066031eb2299
|
refs/heads/master
| 2022-12-13T23:23:49.011574
| 2020-09-14T16:25:31
| 2020-09-14T16:25:31
| 240,272,352
| 0
| 0
| null | 2022-12-07T18:30:03
| 2020-02-13T13:54:25
|
Python
|
UTF-8
|
Python
| false
| false
| 4,795
|
py
|
"""Helper utils for export HybridBlock to symbols."""
from __future__ import absolute_import
import mxnet as mx
from mxnet.base import MXNetError
from mxnet.gluon import HybridBlock
from mxnet.gluon import nn
class _DefaultPreprocess(HybridBlock):
"""Default preprocess block used by GluonCV.
The default preprocess block includes:
- mean [123.675, 116.28, 103.53]
- std [58.395, 57.12, 57.375]
- transpose to (B, 3, H, W)
It is used to transform from resized original images with shape (1, H, W, 3) or (B, H, W, 3)
in range (0, 255) and RGB color format.
"""
def __init__(self, **kwargs):
super(_DefaultPreprocess, self).__init__(**kwargs)
with self.name_scope():
mean = mx.nd.array([148.79, 88.74, 71.67]).reshape((1, 3, 1, 1))
scale = mx.nd.array([30.51, 22.25, 20.21]).reshape((1, 3, 1, 1))
self.init_mean = self.params.get_constant('init_mean', mean)
self.init_scale = self.params.get_constant('init_scale', scale)
# pylint: disable=arguments-differ
def hybrid_forward(self, F, x, init_mean, init_scale):
x = F.broadcast_minus(x, init_mean)
x = F.broadcast_div(x, init_scale)
# x = F.transpose(x, axes=(0, 3, 1, 2))
return x
class _SoftmaxProcess(HybridBlock):
def __init__(self):
super(_SoftmaxProcess, self).__init__()
def hybrid_forward(self, F, x):
return F.softmax(x, axis=1)
def export_block(path, block, data_shape=None, epoch=0, preprocess=True, layout='HWC',
ctx=mx.cpu()):
"""Helper function to export a HybridBlock to symbol JSON to be used by
`SymbolBlock.imports`, `mxnet.mod.Module` or the C++ interface..
Parameters
----------
path : str
Path to save model.
Two files path-symbol.json and path-xxxx.params will be created,
where xxxx is the 4 digits epoch number.
block : mxnet.gluon.HybridBlock
The hybridizable block. Note that normal gluon.Block is not supported.
data_shape : tuple of int, default is None
Fake data shape just for export purpose, in format (H, W, C).
If you don't specify ``data_shape``, `export_block` will try use some common data_shapes,
e.g., (224, 224, 3), (256, 256, 3), (299, 299, 3), (512, 512, 3)...
If any of this ``data_shape`` goes through, the export will succeed.
epoch : int
Epoch number of saved model.
preprocess : mxnet.gluon.HybridBlock, default is True.
Preprocess block prior to the network.
By default (True), it will subtract mean [123.675, 116.28, 103.53], divide
std [58.395, 57.12, 57.375], and convert original image (B, H, W, C and range [0, 255]) to
tensor (B, C, H, W) as network input. This is the default preprocess behavior of all GluonCV
pre-trained models.
You can use custom pre-process hybrid block or disable by set ``preprocess=None``.
layout : str, default is 'HWC'
The layout for raw input data. By default is HWC. Supports 'HWC' and 'CHW'.
Note that image channel order is always RGB.
ctx: mx.Context, default mx.cpu()
Network context.
Returns
-------
None
"""
# input image layout
if data_shape is None:
data_shapes = [(s, s, 3) for s in (224, 256, 299, 300, 320, 416, 512, 600)]
else:
data_shapes = [data_shape]
if preprocess:
# add preprocess block
if preprocess is True:
preprocess = _DefaultPreprocess()
postprocess = _SoftmaxProcess()
else:
if not isinstance(preprocess, HybridBlock):
raise TypeError("preprocess must be HybridBlock, given {}".format(type(preprocess)))
wrapper_block = nn.HybridSequential()
preprocess.initialize()
wrapper_block.add(preprocess)
wrapper_block.add(block)
wrapper_block.add(postprocess)
else:
wrapper_block = block
# try different data_shape if possible, until one fits the network
for dshape in data_shapes:
h, w, c = dshape
if layout == 'HWC':
x = mx.nd.zeros((1, h, w, c), ctx=ctx)
elif layout == 'CHW':
x = mx.nd.zeros((1, c, h, w), ctx=ctx)
# hybridize and forward once
wrapper_block.hybridize()
last_exception = None
try:
wrapper_block(x)
wrapper_block.export(path, epoch)
break
except MXNetError as e:
last_exception = e
if last_exception is not None:
raise RuntimeError(str(last_exception).splitlines()[0])
|
[
"qiushiwang0509@163.com"
] |
qiushiwang0509@163.com
|
91aa9483cabeb25ea754ababe244d944de6fe99f
|
44cfc32ae35487314cbf2ea83fbc96de9ac3e693
|
/Sorting/merge_sort.py
|
af08cfbed4e7e08386c3d5ea9da8271b1dbd8d35
|
[] |
no_license
|
Imonymous/Practice
|
738fdf2099a74127460a647fee8db97c95f322fc
|
ff6138d858777e6a669a5a387cd2f775c60c9aff
|
refs/heads/master
| 2020-03-18T16:53:15.029553
| 2019-11-09T23:29:27
| 2019-11-09T23:29:27
| 134,991,515
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
py
|
#!/usr/bin/env python
def merge(arr, start, mid, end):
n1 = mid - start + 1
n2 = end - mid
L = []
R = []
for i in range(start, end+1):
if i <= mid:
L.append(arr[i])
else:
R.append(arr[i])
i = 0 # track the Left array
j = 0 # track the Right array
k = start # track the output array
while i < n1 and j < n2:
if L[i] <= R[j]:
arr[k] = L[i]
i += 1
k += 1
else:
arr[k] = R[j]
j += 1
k += 1
while i < n1:
arr[k] = L[i]
i += 1
k += 1
while j < n2:
arr[k] = R[j]
j += 1
k += 1
def merge_sort(arr, start, end):
if start >= end:
return
mid = start + (end - start)//2
merge_sort(arr, start, mid)
merge_sort(arr, mid+1, end)
merge(arr, start, mid, end)
return arr
print(merge_sort([2,3,4,9,5,1], 0, 5))
|
[
"iman.mukherjee@gmail.com"
] |
iman.mukherjee@gmail.com
|
462ef7284be2acaf6d730c20b7d8773065520058
|
62531ac44615b1bd1e3952b7b72c6b3c345309b5
|
/programming_models/assignment2/random_test.py
|
2707d2065af5ffce25b8ba3111e7d0217baa4524
|
[] |
no_license
|
leanand/my_hws
|
8ea65945fce9070a78f954f7513b7adff110ea1e
|
cf3795df58fe79f6628b1829da19e539f217ffab
|
refs/heads/master
| 2021-03-30T16:01:40.097099
| 2017-09-22T22:28:02
| 2017-09-22T22:28:02
| 70,349,378
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 675
|
py
|
import random
radius = 200
rangeX = (0, 2500)
rangeY = (0, 2500)
qty = 100 # or however many points you want
# Generate a set of all points within 200 of the origin, to be used as offsets later
# There's probably a more efficient way to do this.
deltas = set()
for x in range(-radius, radius+1):
for y in range(-radius, radius+1):
if x*x + y*y <= radius*radius:
deltas.add((x,y))
randPoints = []
excluded = set()
i = 0
while i<qty:
x = random.randrange(*rangeX)
y = random.randrange(*rangeY)
if (x,y) in excluded: continue
randPoints.append((x,y))
i += 1
excluded.update((x+dx, y+dy) for (dx,dy) in deltas)
print randPoints
|
[
"anandlovelin@gmail.com"
] |
anandlovelin@gmail.com
|
3cbb58f18a0adf960d0e16034001493bb8b40f37
|
bfe9beb3e6b7eff02dab414383705aee051cea80
|
/lib/stories/base.py
|
2b25d7f86d6e0da4c9dc369e367678439d88f7f1
|
[] |
no_license
|
windo/Theory-of-Magicity
|
33cb2690a8f156ebf4ba35aaea9a6d2d75c94d09
|
2cef1769cf01e4a76872dd0c97a1e2d28bd61f6c
|
refs/heads/master
| 2021-01-02T09:52:19.544624
| 2010-10-23T21:13:54
| 2010-10-23T21:13:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,408
|
py
|
from random import random
from lib import fields, actors
from lib.debug import dbg
from lib.resources import Resources
class StoryBook:
"""
Container for (sets of) stories
"""
tree = {}
all = []
def get_all(self):
return all
def get_elements(self, d):
set = []
for v in d.values():
if type(v) == dict:
set += self.get_elements(v)
else:
set.append(v)
return set
def get_set(self, path):
set = self.get(path)
return self.get_elements(set)
def get(self, path):
path = path.split(".")
leaf = self.tree
for step in path:
if not step:
continue
leaf = leaf[step]
return leaf
def add(self, story):
path = story.storybook_path.split(".")
leaf = self.tree
for step in path:
if not leaf.has_key(step):
leaf[step] = {}
leaf = leaf[step]
leaf[story.__name__] = story
self.all.append(story)
storybook = StoryBook()
class Story:
storybook_path = ""
themesong = "happytheme"
def __init__(self, world):
self.world = world
self.rsc = Resources()
# story state
self.game_over = False
self.game_result = None
self.exit_now = False
self.set_state("begin")
self.story_time = self.world.get_time()
self.last_narrative = 0
# stories need narrations
self.narrations = []
self.queue = []
dbg("Starting a story: %s" % (self))
def __str__(self):
return self.__class__.__name__
def debug_info(self):
return "%s: over=%s result=%s state=%s time=%.1f" % \
(self, self.game_over, self.game_result,
self.state, self.world.get_time() - self.story_time)
@classmethod
def story_name(klass):
try:
return klass.story_title
except:
return klass.__name__
@classmethod
def gen_menuitem(klass):
return { "action": klass, "txt": klass.story_name() }
def default_scenery(self):
"""
Reused a lot of times
"""
world = self.world
world.new_actor(actors.BackgroundHills, 0)
world.new_actor(actors.ForegroundGrass, 0)
world.new_actor(actors.ForegroundOldGrass, 0)
# paint some scenery
for i in xrange(10):
world.new_actor(actors.Tree, -250 + (500 / 10) * i + random() * 25)
for i in xrange(3):
world.new_actor(actors.Sun, -1200 + (2500 / 3) * i)
for i in xrange(6):
world.new_actor(actors.Cloud, -1200 + (2500 / 6) * i)
# some ambient lifeforms
for i in xrange(25):
bird = world.new_actor(actors.FlockingBird, random() * 1000 - 500)
bird.ypos = random() * bird.controller.ypos_upper_bound
for i in xrange(2):
bird = world.new_actor(actors.PredatorBird, random() * 1000 - 500)
bird.ypos = random() * 10.0
# set music
self.rsc.set_music(self.themesong)
# all narrations done!
def narrated(self, delay = 5.0):
return len(self.narrations) == 0 and self.last_narrative + delay < self.world.get_time()
def narrate(self, text, showtime = 0.0, duration = 5.0, id = False):
if not id: id = text
# make sure this id is unique
if id in self.queue:
return
else:
self.queue.append(id)
now = self.world.get_time()
# render
img = self.rsc.fonts.textfont.render(text, True, (255, 255, 255))
# add to narrations list
self.narrations.append({ "showtime": now + showtime,
"cleartime": now + showtime + duration,
"img": img,
"id": id,
})
def batch_narrate(self, narrations, id = "narrative"):
"""
proccess a tuple of tuples containing narrations
"""
if not id in self.queue:
showtime = 0
for narr in narrations:
showtime += narr[0]
narr = (narr[1], showtime) + narr[2:]
self.narrate(*narr)
self.queue.append(id)
else:
pass
def clear_queue(self, id):
if id in self.queue:
self.queue.pop(self.queue.index(id))
def set_state(self, state):
self.state = state
self.state_time = self.world.get_time()
self.action_times = {}
self.narrations = []
self.queue = []
def set_result(self, result, exit_now = False):
self.game_over = True
self.game_result = result
self.exit_now = exit_now
if result:
self.game_over_img = self.rsc.fonts.smallgoth.render("You Win!", True, (0, 0, 64))
else:
self.game_over_img = self.rsc.fonts.smallgoth.render("Game Over!", True, (0, 0, 64))
def time_passed(self, delay, action = "wait"):
if not self.action_times.has_key(action):
self.action_times[action] = self.world.get_time()
return True
else:
if self.action_times[action] + delay < self.world.get_time():
self.action_times[action] = self.world.get_time()
return True
return False
def times(self):
now = self.world.get_time()
return now - self.story_time, now - self.state_time
# must overload this
def update(self):
raise Exception()
def get_player(self):
raise Exception()
def draw(self, draw_debug = False):
cam = self.world.camera
g = cam.graphics
# draw game over
if self.game_over:
g.blit(self.game_over_img,
(cam.sc_w() / 2 - self.game_over_img.get_width() / 2,
cam.sc_h() / 2 - self.game_over_img.get_height() / 2 - 100))
# proccess narratives
draw_list = []
extra_offset = 0
i = 0
now = self.world.get_time()
while i < len(self.narrations):
narr = self.narrations[i]
showtime = narr["showtime"]
cleartime = narr["cleartime"]
if showtime < now:
if cleartime < now:
if cleartime + 1.0 < now:
if narr["id"]:
self.queue.pop(self.queue.index(narr["id"]))
self.narrations.pop(i)
else:
part = (cleartime + 1.0 - now)
extra_offset += int(part * (narr["img"].get_height() + 5))
i += 1
else:
draw_list.append(narr["img"])
i += 1
else:
i += 1
if draw_list:
self.last_narrative = self.world.get_time()
# draw them
line_y = 10 + extra_offset
for img in draw_list:
g.blit(img, (10, line_y))
line_y += img.get_height() + 5
|
[
"siim@p6drad-teel.net"
] |
siim@p6drad-teel.net
|
33987b4cf2353663e5b9d7a66280c65649d2691d
|
6ff95cd8a2d5f67673e378e461fbc7162af97226
|
/session06/gcd.py
|
3957847ad502e0b106f75e7d713c3f03110a1307
|
[] |
no_license
|
alastairng/mis3640
|
80c52a473afecc45d28693a6539cff490e24a789
|
3bb15fa4007a81249fb9a7181ad2ad907b4e90f4
|
refs/heads/master
| 2020-03-27T16:41:42.306924
| 2018-10-25T03:22:36
| 2018-10-25T03:22:36
| 146,800,422
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17
|
py
|
3%2
25%10
465%100
|
[
"36486239+alastairng@users.noreply.github.com"
] |
36486239+alastairng@users.noreply.github.com
|
e234a8079711be2c3d06150ede58ce02370a578b
|
c75ec82316ed5322c5844912ce9c528c24360b9f
|
/nsd1905/devweb/myansible/webadmin/admin.py
|
dcb81958f29fbd102b6bac0302d385ab3985e950
|
[] |
no_license
|
MrZhangzhg/nsd2019
|
a94cde22f2e4bd648bb9e56ca63827f558f3c083
|
54f6d2c7b348a69f13ad5f38f2fbdc8207528749
|
refs/heads/master
| 2021-08-22T17:38:27.697675
| 2020-02-22T08:36:21
| 2020-02-22T08:36:21
| 183,539,489
| 21
| 24
| null | 2020-05-17T12:07:55
| 2019-04-26T02:06:16
|
HTML
|
UTF-8
|
Python
| false
| false
| 167
|
py
|
from django.contrib import admin
from .models import HostGroup, Host, Module, Argument
for item in [HostGroup, Host, Module, Argument]:
admin.site.register(item)
|
[
"zhangzg@tedu.cn"
] |
zhangzg@tedu.cn
|
3d825b0e036a2c4f6a56c755ea8fe0225bc2d1f8
|
6610ebe9141f00678851a6f068ec1e5458bf050c
|
/code/graph_keyboard.py
|
19a6ffbf2f3e96351320d674a186a385b8d5dedc
|
[
"MIT"
] |
permissive
|
iamrajee/bio_medical
|
e9cec1d9e12c04d87b893d0c12c92d3a1b8fb963
|
8d91cd3838f46685faa057f93f5d22f8e6c4187b
|
refs/heads/master
| 2020-05-04T23:47:45.595827
| 2019-09-17T17:14:19
| 2019-09-17T17:14:19
| 179,555,562
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,147
|
py
|
import matplotlib.pyplot as plt
import networkx as nx
G=nx.Graph()
# G.add_nodes_from([1,2,3,4,5,6,7,8,9,0],key="A")
# # G.add_edges_from([(1,2),(2,3),(3,4),(5,8),(9,1),(2,3),(4,6),(8,2),(7,3)])
# G.add_weighted_edges_from([(1,2,1),(2,3,2),(3,4,3),(5,8,4),(9,1,5),(2,3,6),(4,6,7),(8,2,8),(7,3,9)])
# keyboard_config = [
# ('1','2','3','4','5','6','7','8','9','0'),
# ('q','w','e','r','t','y','u','i','o','p'),
# ('a','s','d','f','g','h','j','k','l'),
# ('z','x','c','v','b','n','m'),
# ('\t\tspace\t\t','backspace','enter','save')
# ]
keyboard_config = [
('1','2','3'),
('q','w','e'),
('a','s','d'),
]
for t_ in range(len(keyboard_config)):
G.add_nodes_from(list(t))
for i in range(0,len(t)):
e=[(t[i],t[i+1],1) for i in range(0,len(t)-1)]
e.append((t[0],t[len(t)-1],1))
G.add_weighted_edges_from(e)
for i in range(0,len(t)):
print(G.nodes(data=True))
nx.draw(G)
plt.show()
|
[
"singh.raj1997@gmail.com"
] |
singh.raj1997@gmail.com
|
e54071db5aba3ebba0b16b9606f8a8b131066a40
|
93cf12f4008b3e4514a9372c951b3bb675212f3a
|
/gmso/parameterization/topology_parameterizer.py
|
3811650150f9cb71272879acbac51d61a69952c6
|
[
"MIT"
] |
permissive
|
daico007/gmso
|
77cde76e3fc8eb9ad540062d02a2ec312b9ff8a9
|
0da111c50f35aa9036ffae92e0222e34263b43d2
|
refs/heads/master
| 2023-08-31T00:19:34.003435
| 2023-05-12T16:59:16
| 2023-05-12T16:59:16
| 243,855,307
| 1
| 0
|
MIT
| 2022-07-01T19:37:15
| 2020-02-28T21:12:36
|
Python
|
UTF-8
|
Python
| false
| false
| 19,464
|
py
|
"""The parameterizer module for a gmso Topology."""
import warnings
from typing import Dict, Union
import networkx as nx
from boltons.setutils import IndexedSet
from pydantic import Field
from gmso.abc.gmso_base import GMSOBase
from gmso.core.forcefield import ForceField
from gmso.core.topology import Topology
from gmso.exceptions import GMSOError
from gmso.parameterization.foyer_utils import (
get_atomtyping_rules_provider,
get_topology_graph,
typemap_dict,
)
from gmso.parameterization.isomorph import (
partition_isomorphic_topology_graphs,
top_node_match,
)
from gmso.parameterization.molecule_utils import (
assert_no_boundary_bonds,
molecule_angles,
molecule_bonds,
molecule_dihedrals,
molecule_impropers,
)
from gmso.parameterization.utils import POTENTIAL_GROUPS
class ParameterizationError(GMSOError):
"""Raise when parameterization fails."""
class TopologyParameterizationConfig(GMSOBase):
"""Configuration options for parameterizing a topology."""
clone_topology: bool = Field(
default=False,
description="If true, clone the topology and apply parameters to the cloned one.",
) # Unused
match_ff_by: str = Field(
default=None,
description="The site's' label used to matched with the provided dictionary.",
)
identify_connections: bool = Field(
default=False,
description="If true, add connections identified using networkx graph matching to match"
"the topology's bonding graph to smaller sub-graphs that correspond to an "
"angle, dihedral, improper etc",
)
identify_connected_components: bool = Field(
default=False,
description="A flag to determine whether or not to search the topology"
" for repeated disconnected structures, otherwise known as "
"molecules and type each molecule only once.",
)
use_molecule_info: bool = Field(
default=False,
description="A flag to determine whether or not to look at site.molecule "
"to look parameterize each molecule only once. Will only be used if "
"identify_connected_components=False",
) # Unused
assert_bond_params: bool = Field(
default=True,
description="If True, an error is raised if parameters are not found for "
"all system bonds.",
)
assert_angle_params: bool = Field(
default=True,
description="If True, an error is raised if parameters are not found for "
"all system angles",
)
assert_dihedral_params: bool = (
Field(
default=True,
description="If True, an error is raised if parameters are not found for "
"all system dihedrals.",
),
)
assert_improper_params: bool = Field(
default=False,
description="If True, an error is raised if parameters are not found for "
"all system impropers.",
)
remove_untyped: bool = Field(
default=False,
description="If True, after the atomtyping and parameterization step, "
"remove all connection that has no connection_type",
)
fast_copy: bool = Field(
default=True,
description="If True, don't deepcopy sympy expression and sympy independent, "
"variables to save time on parameterization step.",
)
class TopologyParameterizer(GMSOBase):
"""Utility class to parameterize a topology with gmso Forcefield."""
topology: Topology = Field(..., description="The gmso topology.")
forcefields: Union[ForceField, Dict[str, ForceField]] = Field(
...,
description="The gmso forcefield/ a dictionary of gmso "
"forcefields per molecule/group, where the keys "
"should match the molecule/group names",
)
config: TopologyParameterizationConfig = Field(
..., description="The configuration options for the parameterizer."
)
def get_ff(self, key=None):
"""Return the forcefield of choice by looking up the forcefield dictionary."""
if isinstance(self.forcefields, Dict):
return self.forcefields.get(key)
else:
return self.forcefields
def _parameterize_sites(self, sites, typemap, ff, use_molecule_info=None):
"""Parameterize sites with appropriate atom-types from the forcefield."""
for j, site in enumerate(sites):
site.atom_type = ff.get_potential(
"atom_type", typemap[j]["atomtype"]
).clone(self.config.fast_copy)
assert site.atom_type, site
def _parameterize_connections(
self,
top,
ff,
label_type=None,
label=None,
):
"""Parameterize connections with appropriate potentials from the forcefield."""
if label_type and label:
bonds = molecule_bonds(
top, label, True if label_type == "group" else False
)
angles = molecule_angles(
top, label, True if label_type == "group" else False
)
dihedrals = molecule_dihedrals(
top, label, True if label_type == "group" else False
)
impropers = molecule_impropers(
top, label, True if label_type == "group" else False
)
else:
bonds = top.bonds
angles = top.angles
dihedrals = top.dihedrals
impropers = top.impropers
self._apply_connection_parameters(
bonds, ff, self.config.assert_bond_params
)
self._apply_connection_parameters(
angles, ff, self.config.assert_angle_params
)
self._apply_connection_parameters(
dihedrals, ff, self.config.assert_dihedral_params
)
self._apply_connection_parameters(
impropers, ff, self.config.assert_improper_params
)
def _apply_connection_parameters(
self, connections, ff, error_on_missing=True
):
"""Find and assign potentials from the forcefield for the provided connections."""
visited = dict()
for connection in connections:
group, connection_identifiers = self.connection_identifier(
connection
)
match = None
for identifier_key in connection_identifiers:
if tuple(identifier_key) in visited:
match = visited[tuple(identifier_key)]
break
match = ff.get_potential(
group=group,
key=identifier_key,
return_match_order=True,
warn=True,
)
if match:
visited[tuple(identifier_key)] = match
break
if not match and error_on_missing:
raise ParameterizationError(
f"No parameters found for connection {connection}, group: {group}, "
f"identifiers: {connection_identifiers} in the Forcefield."
)
elif match:
setattr(
connection, group, match[0].clone(self.config.fast_copy)
)
matched_order = [
connection.connection_members[i] for i in match[1]
]
# connection.connection_members = matched_order
if not match[0].member_types:
connection.connection_type.member_types = tuple(
member.atom_type.name for member in matched_order
)
if not match[0].member_classes:
connection.connection_type.member_classes = tuple(
member.atom_type.atomclass for member in matched_order
)
def _parameterize(
self, top, typemap, label_type=None, label=None, use_molecule_info=False
):
"""Parameterize a topology/subtopology based on an atomtype map."""
if label and label_type:
forcefield = self.get_ff(label)
sites = top.iter_sites(label_type, label)
else:
forcefield = self.get_ff(top.name)
sites = top.sites
self._parameterize_sites(
sites, typemap, forcefield, use_molecule_info=use_molecule_info
)
self._parameterize_connections(
top,
forcefield,
label_type,
label,
)
def _set_combining_rule(self):
"""Verify all the provided forcefields have the same combining rule and set it for the Topology."""
if isinstance(self.forcefields, dict):
all_comb_rules = set(
ff.combining_rule for ff in self.forcefields.values()
)
else:
all_comb_rules = {self.forcefields.combining_rule}
if not len(all_comb_rules) == 1:
raise ParameterizationError(
"Combining rules of the provided forcefields do not"
"match, please provide forcefields with same scaling"
"factors that apply to a Topology"
)
self.topology.combining_rule = all_comb_rules.pop()
def _set_scaling_factors(self):
"""Set either per-molecule or global scaling factors for the topology based on the forcefields provided."""
# ToDo: Set other scaling factors by extending the forcefield schema
# ToDo: What to do when all the scaling factors matchup? Should we promote them to be global?
# ToDo: Do we want to also parse other interaction if provided?
lj_scales = {
f"nonBonded{interaction}Scale": interaction
for interaction in ["12", "13", "14"]
}
electrostatics_scales = {
f"electrostatics{interaction}Scale": interaction
for interaction in ["12", "13", "14"]
}
if isinstance(self.forcefields, Dict):
for group_or_molecule, ff in self.forcefields.items():
for name, interaction in lj_scales.items():
if ff.scaling_factors.get(name) is not None:
self.topology.set_lj_scale(
ff.scaling_factors[name],
interaction=interaction,
molecule_id=group_or_molecule,
)
for name, interaction in electrostatics_scales.items():
if ff.scaling_factors.get(name) is not None:
self.topology.set_electrostatics_scale(
ff.scaling_factors[name],
interaction=interaction,
molecule_id=group_or_molecule,
)
else:
for name, interaction in lj_scales.items():
if self.forcefields.scaling_factors.get(name) is not None:
self.topology.set_lj_scale(
self.forcefields.scaling_factors[name],
interaction=interaction,
)
for name, interaction in electrostatics_scales.items():
if self.forcefields.scaling_factors.get(name) is not None:
self.topology.set_electrostatics_scale(
self.forcefields.scaling_factors[name],
interaction=interaction,
)
def run_parameterization(self):
"""Run parameterization of the topology with give forcefield(s) and configuration."""
if self.topology.is_typed():
raise ParameterizationError(
"Cannot parameterize a typed topology. Please provide a topology without any types"
)
self._set_combining_rule() # Fail Early if no match
if self.config.identify_connections:
"""ToDo: This mutates the topology and is agnostic to downstream
errors. So, here we should use index only option"""
self.topology.identify_connections()
if isinstance(self.forcefields, Dict):
labels = self.topology.unique_site_labels(
self.config.match_ff_by, name_only=True
)
if not labels or labels == IndexedSet([None]):
# raise ParameterizationError(
warnings.warn(
f"The provided gmso topology doesn't have any group/molecule."
f"Either use a single forcefield to apply to to whole topology "
f"or provide an appropriate topology whose molecule names are "
f"the keys of the `forcefields` dictionary. Provided Forcefields: "
f"{self.forcefields}, Topology: {self.topology}"
)
assert_no_boundary_bonds(self.topology)
for label in labels:
if label not in self.forcefields:
warnings.warn(
f"Group/molecule {label} will not be parameterized, as the forcefield to parameterize it "
f"is missing."
) # FixMe: Will warning be enough?
else:
typemap = self._get_atomtypes(
self.get_ff(label),
self.topology,
self.config.match_ff_by,
label,
self.config.use_molecule_info,
self.config.identify_connected_components,
)
self._parameterize(
self.topology,
typemap,
label_type=self.config.match_ff_by,
label=label,
use_molecule_info=self.config.use_molecule_info, # This will be removed from the future iterations
)
else:
typemap = self._get_atomtypes(
self.get_ff(),
self.topology,
use_molecule_info=self.config.use_molecule_info,
use_isomorphic_checks=self.config.identify_connected_components,
)
self._parameterize(
self.topology,
typemap,
use_molecule_info=self.config.use_molecule_info,
)
self._set_scaling_factors() # Set global or per molecule scaling factors
self.topology.update_topology()
if self.config.remove_untyped:
# TEMP CODE: copied from foyer/general_forcefield.py, will update later
for i in range(self.topology.n_bonds - 1, -1, -1):
if not self.topology.bonds[i].bond_type:
self.topology._bonds.pop(i)
for i in range(self.topology.n_angles - 1, -1, -1):
if not self.topology.angles[i].angle_type:
self.topology._angles.pop(i)
for i in range(self.topology.n_dihedrals - 1, -1, -1):
if not self.topology.dihedrals[i].dihedral_type:
self.topology._dihedrals.pop(i)
for i in range(self.topology.n_impropers - 1, -1, -1):
if not self.topology.impropers[i].improper_type:
self.topology._impropers.pop(i)
@staticmethod
def connection_identifier(
connection,
): # This can extended to incorporate a pluggable object from the forcefield.
"""Return the group and list of identifiers for a connection to query the forcefield for its potential."""
group = POTENTIAL_GROUPS[type(connection)]
return group, [
list(
member.atom_type.atomclass
for member in connection.connection_members
),
list(
member.atom_type.name
for member in connection.connection_members
),
]
@staticmethod
def _get_atomtypes(
forcefield,
topology,
label_type=None,
label=None,
use_molecule_info=False,
use_isomorphic_checks=False,
):
"""Run atom-typing in foyer and return the typemap."""
atom_typing_rules_provider = get_atomtyping_rules_provider(forcefield)
foyer_topology_graph = get_topology_graph(
topology,
label_type,
label,
)
if use_molecule_info:
# Iterate through foyer_topology_graph, which is a subgraph of label_type
typemap, reference = dict(), dict()
for connected_component in nx.connected_components(
foyer_topology_graph
):
subgraph = foyer_topology_graph.subgraph(connected_component)
nodes_idx = tuple(subgraph.nodes)
molecule = subgraph.nodes[nodes_idx[0]]["atom_data"].molecule
if molecule not in reference:
reference[molecule] = {
"typemap": typemap_dict(
atomtyping_rules_provider=atom_typing_rules_provider,
topology_graph=subgraph,
),
"graph": subgraph,
}
typemap.update(reference[molecule]["typemap"])
else:
if use_isomorphic_checks:
# Check for isomorphism submatching to typemap
matcher = nx.algorithms.isomorphism.GraphMatcher(
subgraph,
reference[molecule]["graph"],
node_match=top_node_match,
)
assert matcher.is_isomorphic()
for node in subgraph.nodes:
typemap[node] = reference[molecule]["typemap"][
matcher.mapping[node]
]
else:
# Assume nodes in repeated structures are in the same order
for node, ref_node in zip(
sorted(subgraph.nodes),
sorted(reference[molecule]["typemap"]),
):
typemap[node] = reference[molecule]["typemap"][
ref_node
]
return typemap
elif use_isomorphic_checks:
# Iterate through each isomorphic connected component
isomorphic_substructures = partition_isomorphic_topology_graphs(
foyer_topology_graph
)
typemap = {}
for graph, mirrors in isomorphic_substructures.items():
typemap.update(
typemap_dict(
atomtyping_rules_provider=atom_typing_rules_provider,
topology_graph=graph,
)
)
for mirror, mapping in mirrors:
for node in mirror:
typemap[node] = typemap[mapping[node]]
return typemap
else:
return typemap_dict(
topology_graph=foyer_topology_graph,
atomtyping_rules_provider=atom_typing_rules_provider,
)
|
[
"noreply@github.com"
] |
daico007.noreply@github.com
|
86a5cccacd4985d1ceef8dafb8cb826ed19a7e6c
|
f4a060eb5d0248044e846be138ddd1e2a72b08d4
|
/Rename_File.py
|
bfac4c5c7f866c2cc4f635d4e5e742993562d4d3
|
[] |
no_license
|
NahuatlC137/PythonSnippets
|
451e6b193327c9afb696a8015179fdb6fb353ba6
|
7dfeeca1f4978ca5a2cc6a34f417178f1a1985d7
|
refs/heads/master
| 2020-04-12T13:09:28.759385
| 2018-12-20T02:32:37
| 2018-12-20T02:32:37
| 162,513,830
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
import os
import pandas as pd
os.chdir("DIR")
NameKey = pd.read_excel('WB_NAME.xlsx', 'SHEET_NAME')
path = os.getcwd()
FileNames = os.listdir(path)
DealerAccountNumber = 0
FileExtension = '.pdf'
for file in FileNames:
if file[:1].isdigit():
DealerAccountNumber = int(file[:7])
DocumentType = file[8:]
Name = (NameKey.loc[NameKey['AccountNumber'] == DealerAccountNumber, 'Name'].item())
os.rename(file, Name + FileExtension)
|
[
"noreply@github.com"
] |
NahuatlC137.noreply@github.com
|
181c283e5ce2f6749514fd8d40479ebffbf64a3f
|
a03c89a6145026e22207d652003b0425a75372bb
|
/intro-python/main.py
|
4559bc4c0759db9b83aac453004f35b1191cd7ce
|
[] |
no_license
|
Jaydenmay-040/software-basics
|
abecc51eb8bae17d5279b401cc1053b3fd2fcbc2
|
71309cb05597cb3712bdb668d12ead43914a7543
|
refs/heads/master
| 2023-06-09T12:18:38.097153
| 2021-07-01T14:25:36
| 2021-07-01T14:25:36
| 356,247,841
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
# Task 2 Sample Solution
radius = input("Please enter the radius: ")
_PI = 3.14
area = _PI * int(radius)**2
print("The area of a circle is: " + str(area))
# Calculate the circumference
circumference = 2 * _PI * int(radius)
print("The circumference of a cirlce of a circle is: " + str(circumference))
|
[
"jaydenmay040@gmail.com"
] |
jaydenmay040@gmail.com
|
dc1249667df773b5dc7a28c743814ccbf70e8042
|
539c267a58cb727c5f1925b67da0bbbae0b04de2
|
/insight_api_src/manage.py
|
7be5d1abe2cf5be6543437dd1d0f80ab4ed9a2aa
|
[] |
no_license
|
jiananarthurli/insight_api
|
e228b7cbd193b4eb2a9c3ad5a9b490816c1f65ed
|
c6c46f1fa96e3fe6d182ef6b7a575deaa3d6bee9
|
refs/heads/master
| 2022-12-17T08:58:29.978049
| 2020-10-03T04:42:04
| 2020-10-03T04:42:04
| 191,235,576
| 6
| 1
| null | 2022-12-08T05:17:11
| 2019-06-10T19:49:08
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 635
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'insight_api_src.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"reudnetwork@gmail.com"
] |
reudnetwork@gmail.com
|
03fea34e3c11916cfd291a566610c03c8d3de9fc
|
51f887286aa3bd2c3dbe4c616ad306ce08976441
|
/pybind/slxos/v17r_2_00/interface/ethernet/qos/flowcontrol/link_level_flowcontrol/__init__.py
|
3960a6dbe85e3ab3c78e7385317b495bc048176a
|
[
"Apache-2.0"
] |
permissive
|
b2220333/pybind
|
a8c06460fd66a97a78c243bf144488eb88d7732a
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
refs/heads/master
| 2020-03-18T09:09:29.574226
| 2018-04-03T20:09:50
| 2018-04-03T20:09:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,751
|
py
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class link_level_flowcontrol(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/ethernet/qos/flowcontrol/link-level-flowcontrol. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__flowcontrol_tx','__flowcontrol_rx',)
_yang_name = 'link-level-flowcontrol'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__flowcontrol_tx = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'on': {'value': 1}, u'off': {'value': 0}},), is_leaf=True, yang_name="flowcontrol-tx", rest_name="tx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Pause generation', u'display-when': u'((/local-node/swbd-number = "3000") or (/local-node/swbd-number = "3001") or (/local-node/swbd-number = "163") or (/local-node/swbd-number = "2000") or (/local-node/swbd-number = "2001") or (/local-node/swbd-number = "4000"))', u'alt-name': u'tx', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='enumeration', is_config=True)
self.__flowcontrol_rx = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'on': {'value': 1}, u'off': {'value': 0}},), is_leaf=True, yang_name="flowcontrol-rx", rest_name="rx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Pause reception', u'alt-name': u'rx', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='enumeration', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'ethernet', u'qos', u'flowcontrol', u'link-level-flowcontrol']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Ethernet', u'qos', u'flowcontrol']
def _get_flowcontrol_tx(self):
"""
Getter method for flowcontrol_tx, mapped from YANG variable /interface/ethernet/qos/flowcontrol/link_level_flowcontrol/flowcontrol_tx (enumeration)
"""
return self.__flowcontrol_tx
def _set_flowcontrol_tx(self, v, load=False):
"""
Setter method for flowcontrol_tx, mapped from YANG variable /interface/ethernet/qos/flowcontrol/link_level_flowcontrol/flowcontrol_tx (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_flowcontrol_tx is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_flowcontrol_tx() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'on': {'value': 1}, u'off': {'value': 0}},), is_leaf=True, yang_name="flowcontrol-tx", rest_name="tx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Pause generation', u'display-when': u'((/local-node/swbd-number = "3000") or (/local-node/swbd-number = "3001") or (/local-node/swbd-number = "163") or (/local-node/swbd-number = "2000") or (/local-node/swbd-number = "2001") or (/local-node/swbd-number = "4000"))', u'alt-name': u'tx', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """flowcontrol_tx must be of a type compatible with enumeration""",
'defined-type': "brocade-qos-mls:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'on': {'value': 1}, u'off': {'value': 0}},), is_leaf=True, yang_name="flowcontrol-tx", rest_name="tx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Pause generation', u'display-when': u'((/local-node/swbd-number = "3000") or (/local-node/swbd-number = "3001") or (/local-node/swbd-number = "163") or (/local-node/swbd-number = "2000") or (/local-node/swbd-number = "2001") or (/local-node/swbd-number = "4000"))', u'alt-name': u'tx', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='enumeration', is_config=True)""",
})
self.__flowcontrol_tx = t
if hasattr(self, '_set'):
self._set()
def _unset_flowcontrol_tx(self):
self.__flowcontrol_tx = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'on': {'value': 1}, u'off': {'value': 0}},), is_leaf=True, yang_name="flowcontrol-tx", rest_name="tx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Pause generation', u'display-when': u'((/local-node/swbd-number = "3000") or (/local-node/swbd-number = "3001") or (/local-node/swbd-number = "163") or (/local-node/swbd-number = "2000") or (/local-node/swbd-number = "2001") or (/local-node/swbd-number = "4000"))', u'alt-name': u'tx', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='enumeration', is_config=True)
def _get_flowcontrol_rx(self):
"""
Getter method for flowcontrol_rx, mapped from YANG variable /interface/ethernet/qos/flowcontrol/link_level_flowcontrol/flowcontrol_rx (enumeration)
"""
return self.__flowcontrol_rx
def _set_flowcontrol_rx(self, v, load=False):
"""
Setter method for flowcontrol_rx, mapped from YANG variable /interface/ethernet/qos/flowcontrol/link_level_flowcontrol/flowcontrol_rx (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_flowcontrol_rx is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_flowcontrol_rx() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'on': {'value': 1}, u'off': {'value': 0}},), is_leaf=True, yang_name="flowcontrol-rx", rest_name="rx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Pause reception', u'alt-name': u'rx', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """flowcontrol_rx must be of a type compatible with enumeration""",
'defined-type': "brocade-qos-mls:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'on': {'value': 1}, u'off': {'value': 0}},), is_leaf=True, yang_name="flowcontrol-rx", rest_name="rx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Pause reception', u'alt-name': u'rx', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='enumeration', is_config=True)""",
})
self.__flowcontrol_rx = t
if hasattr(self, '_set'):
self._set()
def _unset_flowcontrol_rx(self):
self.__flowcontrol_rx = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'on': {'value': 1}, u'off': {'value': 0}},), is_leaf=True, yang_name="flowcontrol-rx", rest_name="rx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Pause reception', u'alt-name': u'rx', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='enumeration', is_config=True)
flowcontrol_tx = __builtin__.property(_get_flowcontrol_tx, _set_flowcontrol_tx)
flowcontrol_rx = __builtin__.property(_get_flowcontrol_rx, _set_flowcontrol_rx)
_pyangbind_elements = {'flowcontrol_tx': flowcontrol_tx, 'flowcontrol_rx': flowcontrol_rx, }
|
[
"badaniya@brocade.com"
] |
badaniya@brocade.com
|
9729b4cf7e0c626554531e11b13d2050abb5e64c
|
e71ff28628071069952b515a5a1f00f4fc7c7ec2
|
/crm1/accounts/migrations/0001_initial.py
|
253339af7dbd1a4728bbf8e574528a26b0f8557f
|
[] |
no_license
|
Nour-Elgeziry/Django-CRM
|
55f7bded95b7ad970e21302e865e29c469c13153
|
6d64f5d2a175a573a5d1757d25f3befbfffbb433
|
refs/heads/master
| 2022-11-29T13:22:38.244655
| 2020-08-11T15:50:51
| 2020-08-11T15:50:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 727
|
py
|
# Generated by Django 3.1 on 2020-08-10 06:38
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True)),
('phone', models.CharField(max_length=200, null=True)),
('email', models.CharField(max_length=200, null=True)),
('date_created', models.DateTimeField(auto_now_add=True, null=True)),
],
),
]
|
[
"nourelgeziry@Nours-MacBook-Pro.local"
] |
nourelgeziry@Nours-MacBook-Pro.local
|
f08e83052579be7c992470ebd91a4a354a9d9d5c
|
9d21e2482234ce13fafe385baea359dd5a879f59
|
/GIL_test/single_thread.py
|
4539fd6d1cb1a9cc48ecf20e093c384a9051b452
|
[] |
no_license
|
hexiaoong99/spider_data_save
|
e64ea500ce3c93b11d50493a524a8567390ae051
|
8c7e099bad4b1f8e7c2880547335c167ed3929ad
|
refs/heads/master
| 2021-01-11T02:23:17.339594
| 2016-11-06T02:52:18
| 2016-11-06T02:52:18
| 70,970,657
| 0
| 0
| null | 2016-10-15T06:22:08
| 2016-10-15T06:22:08
| null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
'''
GIL无疑就是一把全局排他锁。毫无疑问全局锁的存在会对多线程的效率有不小影响。甚至就几乎等于Python是个单线程的程序。
那么读者就会说了,全局锁只要释放的勤快效率也不会差啊。只要在进行耗时的IO操作的时候,能释放GIL,这样也还是可以提升运行效率的嘛。或者说再差也不会比单线程的效率差吧。理论上是这样,而实际上呢?Python比你想的更糟。
下面我们就对比下Python在多线程和单线程下得效率对比。
一个通过单线程执行两次,一个多线程执行。
'''
# -*- coding: utf-8 -*-
from threading import Thread
import time
def my_counter():
i = 0
for _ in range(10000000):
i = i + 1
return True
def main():
thread_array = {}
start_time = time.time()
for tid in range(2):
t = Thread(target=my_counter)
thread_array[tid] = t
for i in range(2):thread_array[i].start()
thread_array[i].join()
end_time = time.time()
print("Total time: {}".format(end_time - start_time))
if __name__ == '__main__':
main()
|
[
"740322560@qq.com"
] |
740322560@qq.com
|
42a3440f5055af7250f46ea5da4734991bae627f
|
b30d7e28932056d69b3a3dba4e9c0c552ac19029
|
/model_evaluation_utils.py
|
2b1870d19748d7fd2e3cd4fd9a62a61b72ecc57d
|
[] |
no_license
|
rsawin/transfer_learning
|
19b9818a9f978ddc31080354bdc3538f32b870e3
|
d2a28f7947f481f3466f425e3fd21011c0c91243
|
refs/heads/master
| 2021-07-08T23:43:55.211542
| 2020-08-03T21:33:47
| 2020-08-03T21:33:47
| 174,645,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,201
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 31 20:05:23 2017
@author: DIP
@Copyright: Dipanjan Sarkar
"""
from sklearn import metrics
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.base import clone
from sklearn.preprocessing import label_binarize
from scipy import interp
from sklearn.metrics import roc_curve, auc
def get_metrics(true_labels, predicted_labels):
print('Accuracy:', np.round(
metrics.accuracy_score(true_labels,
predicted_labels),
4))
print('Precision:', np.round(
metrics.precision_score(true_labels,
predicted_labels,
average='weighted'),
4))
print('Recall:', np.round(
metrics.recall_score(true_labels,
predicted_labels,
average='weighted'),
4))
print('F1 Score:', np.round(
metrics.f1_score(true_labels,
predicted_labels,
average='weighted'),
4))
def train_predict_model(classifier,
train_features, train_labels,
test_features, test_labels):
# build model
classifier.fit(train_features, train_labels)
# predict using model
predictions = classifier.predict(test_features)
return predictions
def display_confusion_matrix(true_labels, predicted_labels, classes=[1, 0]):
total_classes = len(classes)
level_labels = [total_classes * [0], list(range(total_classes))]
cm = metrics.confusion_matrix(y_true=true_labels, y_pred=predicted_labels,
labels=classes)
cm_frame = pd.DataFrame(data=cm,
columns=pd.MultiIndex(levels=[['Predicted:'], classes],
labels=level_labels),
index=pd.MultiIndex(levels=[['Actual:'], classes],
labels=level_labels))
print(cm_frame)
def display_classification_report(true_labels, predicted_labels, classes=[1, 0]):
report = metrics.classification_report(y_true=true_labels,
y_pred=predicted_labels,
labels=classes)
print(report)
def display_model_performance_metrics(true_labels, predicted_labels, classes=[1, 0]):
print('Model Performance metrics:')
print('-' * 30)
get_metrics(true_labels=true_labels, predicted_labels=predicted_labels)
print('\nModel Classification report:')
print('-' * 30)
display_classification_report(true_labels=true_labels, predicted_labels=predicted_labels,
classes=classes)
print('\nPrediction Confusion Matrix:')
print('-' * 30)
display_confusion_matrix(true_labels=true_labels, predicted_labels=predicted_labels,
classes=classes)
def plot_model_decision_surface(clf, train_features, train_labels,
plot_step=0.02, cmap=plt.cm.RdYlBu,
markers=None, alphas=None, colors=None):
if train_features.shape[1] != 2:
raise ValueError("X_train should have exactly 2 columnns!")
x_min, x_max = train_features[:, 0].min() - plot_step, train_features[:, 0].max() + plot_step
y_min, y_max = train_features[:, 1].min() - plot_step, train_features[:, 1].max() + plot_step
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
clf_est = clone(clf)
clf_est.fit(train_features, train_labels)
if hasattr(clf_est, 'predict_proba'):
Z = clf_est.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
else:
Z = clf_est.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
le = LabelEncoder()
y_enc = le.fit_transform(train_labels)
n_classes = len(le.classes_)
plot_colors = ''.join(colors) if colors else [None] * n_classes
label_names = le.classes_
markers = markers if markers else [None] * n_classes
alphas = alphas if alphas else [None] * n_classes
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y_enc == i)
plt.scatter(train_features[idx, 0], train_features[idx, 1], c=color,
label=label_names[i], cmap=cmap, edgecolors='black',
marker=markers[i], alpha=alphas[i])
plt.legend()
plt.show()
def plot_model_roc_curve(clf, features, true_labels, label_encoder=None, class_names=None):
## Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
if hasattr(clf, 'classes_'):
class_labels = clf.classes_
elif label_encoder:
class_labels = label_encoder.classes_
elif class_names:
class_labels = class_names
else:
raise ValueError('Unable to derive prediction classes, please specify class_names!')
n_classes = len(class_labels)
y_test = label_binarize(true_labels, classes=class_labels)
if n_classes == 2:
if hasattr(clf, 'predict_proba'):
prob = clf.predict_proba(features)
y_score = prob[:, prob.shape[1] - 1]
elif hasattr(clf, 'decision_function'):
prob = clf.decision_function(features)
y_score = prob[:, prob.shape[1] - 1]
else:
raise AttributeError("Estimator doesn't have a probability or confidence scoring system!")
fpr, tpr, _ = roc_curve(y_test, y_score)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, label='ROC curve (area = {0:0.2f})'
''.format(roc_auc),
linewidth=2.5)
elif n_classes > 2:
if hasattr(clf, 'predict_proba'):
y_score = clf.predict_proba(features)
elif hasattr(clf, 'decision_function'):
y_score = clf.decision_function(features)
else:
raise AttributeError("Estimator doesn't have a probability or confidence scoring system!")
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
## Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
## Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
## Plot ROC curves
plt.figure(figsize=(6, 4))
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]), linewidth=3)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]), linewidth=3)
for i, label in enumerate(class_labels):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(label, roc_auc[i]),
linewidth=2, linestyle=':')
else:
raise ValueError('Number of classes should be atleast 2 or more')
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend(loc="lower right")
plt.show()
|
[
"you@example.com"
] |
you@example.com
|
3fd59004b8a3ada46670dc8f08e82e5d397cce55
|
b7b5f5b52f07b576a20e74839136d397f14d0566
|
/main/admin.py
|
cd3bbcc3572a48186b2724bb94ba97c49bfe0e18
|
[] |
no_license
|
Chudische/Shabashka
|
02d7e81cb2bd317b36e73620fc197868c4d65e1c
|
c3bab797601e8509439dc6538ec1f712755eb8c9
|
refs/heads/main
| 2023-07-08T07:54:04.044559
| 2021-08-18T13:40:44
| 2021-08-18T13:40:44
| 315,249,268
| 0
| 1
| null | 2021-08-10T06:42:42
| 2020-11-23T08:34:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,510
|
py
|
import datetime
from django.contrib import admin
from import_export.admin import ImportExportModelAdmin
from import_export import resources
from .models import ShaUser, SubCategory, SuperCategory, Offer, AdditionalImage, Comment, ShaUserAvatar
from .models import UserReview, ChatMessage, Location
from .utilities import send_activation_notification
from .forms import SubCategoryForm
def send_activation_notifications(modeladmin, request, queryset):
""" Sending a messages with activation notification"""
for rec in queryset:
if not rec.is_activated:
send_activation_notification(rec)
modeladmin.message_user(request, "Письма с оповещением отправлены")
send_activation_notifications.short_description = 'Отправка писем с оповещением об активации'
class NonativatedFilter(admin.SimpleListFilter):
title = 'Прошли активацию?'
parameter_name = 'actstate'
def lookups(self, request, model_admin):
return (
("activated", "Прошли активацию"),
("threedays", "Не прошли более 3 дней"),
("week", "Не прошли более недели")
)
def queryset(self, request, queryset):
if self.value() == 'activated':
return queryset.filter(is_active=True, is_activated=True)
if self.value() == 'threedays':
date = datetime.date.today() - datetime.timedelta(days=3)
return queryset.filter(is_active=False, is_activated=False, date_joined__date__lt=date)
if self.value() == 'week':
date = datetime.date.today() - datetime.timedelta(weeks=1)
return queryset.filter(is_active=False, is_activated=False, date_joined__date__lt=date)
class LocationInline(admin.TabularInline):
model = Location
class ShaUserAdmin(admin.ModelAdmin):
list_display = ('__str__', 'is_activated', 'date_joined')
search_fields = ('username', 'email', 'first_name', 'last_name')
list_filter = (NonativatedFilter, )
inlines = (LocationInline, )
fields = (('username', 'email'), ('first_name', 'last_name'), 'average_rating',
('send_message', 'is_active', 'is_activated'),
('is_staff', 'is_superuser'),
'groups', 'user_permissions',
('last_login', 'date_joined'),
'favorite')
readonly_fields = ('last_login', 'date_joined')
actions = (send_activation_notifications, )
class SubCategoryInline(admin.TabularInline):
model = SubCategory
class SuperCategoryAdmin(admin.ModelAdmin):
exclude = ('super_category',)
inlines = (SubCategoryInline,)
class SubCategoryAdmin(admin.ModelAdmin):
form = SubCategoryForm
class AdditionalImageInline(admin.TabularInline):
model = AdditionalImage
class OfferAdmin(admin.ModelAdmin):
list_display = ('category', 'title', 'content', 'winner','author', 'created', 'status')
fields = (('category', 'author', 'status', 'winner'), 'title', 'content', 'price', 'image', 'is_active')
inlines = (AdditionalImageInline, LocationInline,)
class CommentAdmin(admin.ModelAdmin):
list_display = ('offer', 'author', 'content', 'price', 'created', 'is_active')
fields = (('offer', 'author', 'created'), 'content', ('price', 'time_amount', 'measure'), 'is_active')
readonly_fields = ('created',)
class UserReviewAdmin(admin.ModelAdmin):
list_display = ('offer', 'author', 'reviewal', 'speed', 'cost', 'accuracy', 'content', 'created')
fields = (('offer', 'author', 'reviewal', 'created'), ('speed', 'cost', 'accuracy'), 'content')
readonly_fields = ('created',)
class ChatMessageAdmin(admin.ModelAdmin):
list_display = ('offer', 'author', 'receiver', 'content', 'created')
feields = ('offer', ('author', 'receiver, created'), 'content')
readonly_fields = ('created',)
class LocationAdmin(admin.ModelAdmin):
list_display = ('search_id', 'name')
# Register your models here.
admin.site.register(ShaUser, ShaUserAdmin)
admin.site.register(SuperCategory, SuperCategoryAdmin)
admin.site.register(SubCategory, SubCategoryAdmin)
admin.site.register(Offer, OfferAdmin)
admin.site.register(Comment, CommentAdmin)
admin.site.register(ShaUserAvatar)
admin.site.register(UserReview, UserReviewAdmin)
admin.site.register(ChatMessage, ChatMessageAdmin)
admin.site.register(Location, LocationAdmin)
|
[
"="
] |
=
|
a8e75ce6d64bb6b07b557d26e44569c370a3a5de
|
2631dcd293772e857c457c71cde8db2f3d93c450
|
/cam_frame _copy_paste.py
|
817f41bfe4b769fd6d69a56aae4caa20dba14aa6
|
[] |
no_license
|
Keshav-Asopa/python_projects
|
fec723e9bba646737354fe856a360c32ea3156e6
|
2ee05cc98f466f421c79d795c5fb52f5f4c47054
|
refs/heads/master
| 2020-03-19T13:53:53.874915
| 2018-09-19T16:42:25
| 2018-09-19T16:42:25
| 136,600,122
| 0
| 1
| null | 2019-10-13T03:24:48
| 2018-06-08T09:47:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,091
|
py
|
# A python program to copy the part of image from webcam and put it on another image
import cv2
#loading the image on which part of another image is to be pasted
img1 = cv2.imread("dog1.jpeg")
print("Press 'c' to capture the photo")
print("press 'q' to exit")
#to start the webcam or external web camera
capture = cv2.VideoCapture(0)
#camera will switch on
while capture.isOpened():
status,frame = capture.read()
#to draw a rectangle on a frame
cv2.rectangle(frame,(100,100),(250,250),(0,0,255),2)
cv2.imshow("frame1",frame)
if cv2.waitKey(1) & 0xFF == ord("c") :
#saving and copying the part of image into another image roi(region of interest)
roi = frame[100:250 , 100 :250] #coordinates of the rectangle form are (100,100) and (250,250)
#pasting the roi on another image
img1[50:roi.shape[0]+50, 50:roi.shape[1]+50] = roi
cv2.imshow("final_result", img1)
cv2.imshow("f1", frame)
#saving the image
cv2.imwrite("final_image.jpeg",img1)
capture.release()
cv2.waitKey(0)
#exit the program
if cv2.waitKey(1) & 0xFF == ord("q") :
break
|
[
"noreply@github.com"
] |
Keshav-Asopa.noreply@github.com
|
acdb55b99babfd077efdf9e844794993a1a1f1dd
|
8949accc2af8a2ab88a6bdc3f6c85dfa3a7fa16f
|
/Tolerance_test/python/torx/module_Int8/w2g.py
|
a05b6f115231095c3a4f52dbdc53b686cc687bc9
|
[] |
no_license
|
DamonAtSjtu/HAWIS
|
6f67d3473a8b78096245e0c2b1e36e282f60031d
|
9a9155f2c56411e69bbfb6cfc3d6d6f124d67f84
|
refs/heads/master
| 2023-08-28T21:41:31.219419
| 2021-10-23T09:30:04
| 2021-10-23T09:30:04
| 420,367,719
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,729
|
py
|
# Copyright 2019 The PytorX Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch
import torch.nn as nn
import torch.nn.functional as F
from .SAF import SAF
class w2g(nn.Module):
'''
perfrom the weight conversion within this function, which convert the
post-quantization fixed point weight (weight_hat) into a pair of
conductance values. output[0] is the G_pos and output[1] is the G_neg
'''
def __init__(self, delta_g, Gmin, G_SA0, G_SA1, weight_shape,
enable_rand=True, enable_SAF=False, enable_val_SAF=True):
super(w2g, self).__init__()
self.delta_g = delta_g
self.Gmin = Gmin
self.G_SA0 = G_SA0
self.G_SA1 = G_SA1
self.p_SA0 = 0.0175 ###0.1
self.p_SA1 = 0.0904 ###0.1
self.enable_rand = enable_rand
self.enable_SAF = enable_SAF
self.enable_val_SAF = enable_val_SAF
self.SAF_pos = SAF(weight_shape, p_SA0=self.p_SA0, p_SA1=self.p_SA1,
G_SA0=self.G_SA0, G_SA1=self.G_SA1)
self.SAF_neg = SAF(weight_shape, p_SA0=self.p_SA0, p_SA1=self.p_SA1,
G_SA0=self.G_SA0, G_SA1=self.G_SA1)
def forward(self, input):
# x_relu() function is Critical
self.G_pos = self.Gmin + x_relu(input) * self.delta_g
self.G_neg = self.Gmin + F.relu(-input) * self.delta_g
# the following two steps will update the SAF masking if enable_rand is True
#if self.enable_val_SAF:
if self.enable_SAF:
output = torch.cat((self.SAF_pos(self.G_pos).unsqueeze(0),
self.SAF_neg(self.G_neg).unsqueeze(0)),
0)
else:
output = torch.cat((self.G_pos.unsqueeze(0),
self.G_neg.unsqueeze(0)), 0)
return output
def error_compensation(self):
pos_SA0 = self.SAF_pos.index_SA0().float().cuda()
pos_SA1 = self.SAF_pos.index_SA1().float().cuda()
neg_SA0 = self.SAF_neg.index_SA0().float().cuda()
neg_SA1 = self.SAF_neg.index_SA1().float().cuda()
G_pos_diff = (self.G_pos-self.G_SA0)*pos_SA0 + \
(self.G_pos-self.G_SA1)*pos_SA1
G_neg_diff = (self.G_neg-self.G_SA0)*neg_SA0 + \
(self.G_neg-self.G_SA1)*neg_SA1
return G_pos_diff, G_neg_diff
def update_SAF(self, enable_SAF, p_SA0, p_SA1, new_SAF_mask=False, enable_rand=False):
self.p_SA0 = p_SA0
self.p_SA1 = p_SA1
self.enable_SAF = enable_SAF
# update the SAF_pos and SAF_neg modules
self.SAF_pos.p_SA0.data[0] = self.p_SA0
self.SAF_pos.p_SA1.data[0] = self.p_SA1
self.SAF_neg.p_SA0.data[0] = self.p_SA0
self.SAF_neg.p_SA1.data[0] = self.p_SA1
# enable the random mask, thus each forward call get a new p_state mask
self.enable_rand = enable_rand
self.SAF_pos.enable_rand = enable_rand
self.SAF_neg.enable_rand = enable_rand
if new_SAF_mask:
self.SAF_pos.p_state.data.uniform_()
self.SAF_neg.p_state.data.uniform_()
class _newrelu(torch.autograd.Function):
'''
This self-define function is used for mapping weight on positive
and negative array. It will prevent close to zero weights trapped
within the region that quantized into zero, which will never be
updated by back-propagation, thus degrades the accuracy.
'''
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return input.clamp(min=0)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
grad_input = grad_output.clone()
grad_input[input < 0] = 0
return grad_input
x_relu = _newrelu.apply
############################################################
# Testbenchs
############################################################
def test_w2g_module_output_conductance_range():
'''
ensure the w2g module has the correct output conductance range
which is between G_min and G_max.
'''
return
|
[
"tangqidong@sjtu.edu.cn"
] |
tangqidong@sjtu.edu.cn
|
831d7ab7e6bcee8f13d11ea5de2934d0cea83c31
|
185bbb353bc402863b468e215fb629658a2e4e74
|
/OrderTraversal.py
|
a308f7d064b91fb45eeacf49a03a919e23903a17
|
[] |
no_license
|
conniechu929/Algos
|
d6f8e960cf90e53698a0e6ecd5176a320c30d0a4
|
f5fe4be6e0f849b43c4e4f665ee2e332a739edb0
|
refs/heads/master
| 2021-05-23T05:10:08.158744
| 2020-06-11T15:31:57
| 2020-06-11T15:31:57
| 95,244,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if root is None:
return []
ans, level = [], [root]
while level:
ans.append([node.val for node in level])
temp = []
for node in level:
temp.extend([node.left, node.right])
level = [leaf for leaf in temp if leaf]
return ans
|
[
"conniechu929@gmail.com"
] |
conniechu929@gmail.com
|
fdc58293d65c23a6bd4e146624a5e89753163e19
|
8c1562c22fd52b423e5dace418910ec4fd198bc3
|
/mysite/settings.py
|
8448ba261e111a504fa41a5fd667af7446fcf1e2
|
[] |
no_license
|
UlTropy/first-Django-blog
|
7fd1047757c98a73c0545b7a9ad896e2cde3cb9f
|
738a649c6ec901a152d9eb974d8fc792eaadc024
|
refs/heads/master
| 2023-02-06T12:11:37.729032
| 2020-12-21T15:15:10
| 2020-12-21T15:15:10
| 321,672,499
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,636
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wfmx$sa970_4x-6wa+vegveqya%@-2aetrex1k15!n+=75b9(@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/' # URL для медии в шаблонах
# пустая папка, сюда будет собирать статику collectstatic
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/' # URL для шаблонов
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'assets'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
|
[
"wowan-minsk@yandex.ru"
] |
wowan-minsk@yandex.ru
|
d966df1ef50313f7c8e2b8bc6910f56eb22a0388
|
5a4d2dda4372a1ce0fbd4b272cdff90dc38a08ec
|
/_config.py
|
79b885cfc426d3d2e1b1a7ef4439f985416d526b
|
[
"BSD-2-Clause"
] |
permissive
|
a2nt/silverstripe-digitalocean-nginx-backup-scripts
|
46ed4262fe700098dde89ae223c9c4eff6af357c
|
3eae487e76213be05cbc70289ae1a12c9e055e79
|
refs/heads/master
| 2021-01-11T01:42:50.281089
| 2016-12-13T16:27:50
| 2016-12-13T16:27:50
| 70,655,646
| 14
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,487
|
py
|
import _lib
# Digital Ocean API token (to create temporary block storage / server)
token = ''
# Digital Ocean SSH Key ID (will be used to connect to temporary backing up server)
sshkeyid = ''
# your web sites root folder path (with the folders named: my.domain.com)
sitepath = '/srv'
sites = _lib.list_dirs(sitepath)
# exclude following folder names
_lib.exclude('tmp')
_lib.exclude('01virus.twma.pro')
_lib.exclude('02empty')
_lib.exclude('backups')
# your block storage mount point / local backing up folder path
backuppath = '/mnt/backup'
# framework configuration path (to store example_config folder which will be added to backup file)
configpath = '/srv-service/conf.d/php'
# server scripts path
scriptspath = '/srv-service/scripts'
# your web sites configuraion nginx path (will be used to remove excessive my.domain.com.conf files)
nginxpath = '/srv-service/conf.d/nginx/sites'
# Exclude following nginx conf files
excludeconf = [
'01fastcgi_cache_zone.conf',
'02twma.pro.conf',
'03ban_ip.conf',
'04gzip.conf',
]
# MySQL host (will be used to backup database and to remove excessive my.domain.com databases)
host = 'localhost'
# MySQL user
user = 'root'
# MySQL password
password = ''
# Exclude following MySQL DB's
excludedb = [
'performance_schema',
'information_schema',
'mysql',
'user',
'sys',
]
servername = _lib.current_server_name()
server = _lib.get_region_and_id(servername)
region = server[0]
serverid = server[1]
volumeid = 0
|
[
"tony@thewebdevelopmentagency.com"
] |
tony@thewebdevelopmentagency.com
|
9bc90ac7d12e2b6a623323740464528474703da4
|
4db45f25ac41da4208d6dd9e2046e5d64daf077f
|
/Assignment10_2.py
|
0d083fb612f1aa5a36fd59511ae37508a90489a8
|
[] |
no_license
|
likair/python-programming-course-assignments
|
029ee83c4a96edb897a22efbbc245687f2eabced
|
066a5245cf55dc8c79f17c84cc1f706b6df937a2
|
refs/heads/master
| 2023-02-17T23:07:11.263592
| 2015-06-10T15:42:12
| 2015-06-10T15:42:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
'''
Created on Jun 5, 2015
A GUI application , which changes the background color of the main window as the
mouse moves above it.
@author: lebs
'''
from tkinter import *
def changeColor(Event):
root.configure(background='black')
if __name__ == '__main__':
root = Tk()
root.wm_title('Color Change')
root.geometry('500x500')
root.bind('<Motion>',changeColor)
root.mainloop()
|
[
"lebs.fi@qq.com"
] |
lebs.fi@qq.com
|
d44cd5123695d6c48fef84f95857d085ddda8775
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/2D_20200722180003.py
|
f67079f7677c6a1e5d41c5b12f694fad5f417ffe
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,400
|
py
|
def array(n,m):
# where n is row size and m is column size
array = [[0 for x in range(n)] for x in range(m)]
print(array)
a = [[2, 4, 6, 8, 10], [3, 6, 9, 12, 15], [4, 8, 12, 16, 20]]
# where the first arguement reps the row and second arguement reps the column
print(a[0][3])
def hourGlass(arr):
# you have a 2d array
# get max hour glass
# var maxCount to keep record of the max count
# what do you know about an hourglass
# the indicies fall in a pattern where
# i and i+2 are not equal to 0 and i + 1 is equal to 0
maxCount = 1
totalCount = 0
count = 0
j = 3
if arr !=[]:
for i in range(len(arr)-2):
totalCount = 0
# remember j is looping through arr[i]
for j in range(len(arr[i])-2):
totalCount = arr[i][j] + arr[i][j+1] + arr[i][j+2] + arr[i+1][j] + arr[i+2][j] + arr[i+2][j+1] + arr[i+2][j+2]
if totalCount > maxCount:
maxCount = totalCount
# print(arr[i][j],arr[i][j+1],arr[i][j+2],"below",arr[i+1][j+1],"next",arr[i+2][j],arr[i+2][j+1],arr[i+2][j+2])
print(maxCount)
else:
return 0
print(hourGlass([[1,1,1,0,0,0],[0,1,0,0,0,0],[1,1,1,0,0,0],[0,0,2,4,4,0],[0,0,0,2,0,0],[0,0,1,2,4,0]]))
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
9b61c14e3c94a49aed2178fbcf9d5fb71741e03b
|
7c1a4312f0b0f8bfff26779fea1ab95cb1f94e53
|
/newfile.py
|
499caa01d847276d4155f9fe9bddd183761de035
|
[] |
no_license
|
kminwoo4/Moojae2
|
86a388606d8efe78947adbd0439f6ae7acb70d6a
|
af771b48144ce229ec3753404893f8e9937035e4
|
refs/heads/master
| 2022-11-26T03:49:23.599420
| 2020-08-03T06:07:44
| 2020-08-03T06:07:44
| 284,617,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 491
|
py
|
def backAndForthNumbers (x):
number = 23 #Assigned number
checker = number #Chekcer for reverse of the number
reverse = 0
while number > 0:
digit = number % 10 #Extracting last digit of the number
reverse = reverse*10 + digit #appending the digit to the reverse
number = number // 10 #Leave behind the last digit from the number
if checker == reverse:
# Comparing reverse to oroginal
print True
else:
print False #Results
print backAndForthNumbers(223)
|
[
"yjk@gimmin-uui-MacBookPro.local"
] |
yjk@gimmin-uui-MacBookPro.local
|
3ba277c4600b290b63c75ef92a95a1015da482a4
|
b1db73e6c9df798e190ec23d170710d7f0b9f073
|
/zadanie 4.py
|
a0b9da7db60fcb2d0ff5f3e9d94670cf09649c39
|
[] |
no_license
|
andypaw03/homework
|
3f9fe1825f4a9a4e3245994b02a0520fa88f8acd
|
d0553507b5842867aaac21aaeaa7638969e307c6
|
refs/heads/master
| 2021-05-17T11:07:05.534910
| 2020-03-28T17:29:59
| 2020-03-28T17:29:59
| 250,748,906
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 91
|
py
|
a=input("")
def lena(x):
b=0
for i in a:
b+=1
return b
print (lena(a))
|
[
"andypaw03@gmail.com"
] |
andypaw03@gmail.com
|
93b4ff666a4c0dbc1475a16fb53d3a864ecec53d
|
1e0ec4d34def6d1d31665551b4aecbb644323249
|
/disambig_creation_constants.py
|
2fca0360764a115a37e4d1aa2de947aad7ea4777
|
[] |
no_license
|
RheingoldRiver/leaguepedia_archive
|
e10615530846080446fa5a56ae2e570f9376f875
|
52703d4fb0fef2345353945044a78915d78688bf
|
refs/heads/master
| 2022-06-19T21:37:47.480986
| 2022-06-01T18:44:32
| 2022-06-01T18:44:32
| 242,654,649
| 1
| 1
| null | 2021-12-15T20:07:19
| 2020-02-24T05:33:07
|
Python
|
UTF-8
|
Python
| false
| false
| 928
|
py
|
originalName = 'Limit'
irlName = 'Ju Min-gyu'
newName = '{} ({})'.format(originalName,irlName)
initmove = True
blankedit = False
limit = -1
timeoutLimit = 30
listplayerTemplates = ["listplayer", "listplayer/Current"]
rosterTemplates = ["ExtendedRosterLine", "ExtendedRosterLine/MultipleRoles"]
scoreboardTemplates = ["MatchRecap/Player", "MatchRecapS4/Player",
"MatchRecapS5/Player", "MatchRecapS6/Player",
"MatchRecapS7/Player", "MatchRecapS8/Player",
"MatchRecapS6NoSMW/Player", "MatchRecapS7NoKeystones/Player"]
statTemplates = ["IPS","CareerPlayerStats","MatchHistoryPlayer"]
rosterChangeTemplates = ["RosterChangeLine","RosterRumorLine2"]
summary = "Disambiguating {} to {}".format(originalName, newName)
cssStyle = "{\n color:orange!important;\n font-weight:bold;\n}"
origNameLC = originalName[0].lower() + originalName[1:]
newNameLC = newName[0].lower() + newName[1:]
blankEditThese = []
|
[
"18037011+RheingoldRiver@users.noreply.github.com"
] |
18037011+RheingoldRiver@users.noreply.github.com
|
69a1634d445e07945fcf4295399a9402133a27b5
|
2cf543b38f17b1fc7b9c20d19d2da7fde235abca
|
/docs/conf.py
|
e41b0aa24138b21a5d5e76f11cd9ab6762e0e5b7
|
[
"MIT"
] |
permissive
|
arvimal/ceph_check
|
0de8b93462a8e1449b2cddbbe008ed256aa1a072
|
7e82b7838de247568e64ef84a0fcdaf20e6c1728
|
refs/heads/master
| 2020-04-16T02:17:36.186334
| 2017-12-25T16:45:34
| 2017-12-25T16:45:34
| 46,115,573
| 6
| 0
| null | 2017-09-07T09:20:02
| 2015-11-13T10:28:24
|
Python
|
UTF-8
|
Python
| false
| false
| 8,414
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ceph_check documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import ceph_check
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ceph_check'
copyright = u"2017, Vimal A.R"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = ceph_check.__version__
# The full version, including alpha/beta/rc tags.
release = ceph_check.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ceph_checkdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'ceph_check.tex',
u'ceph_check Documentation',
u'Vimal A.R', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ceph_check',
u'ceph_check Documentation',
[u'Vimal A.R'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ceph_check',
u'ceph_check Documentation',
u'Vimal A.R',
'ceph_check',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
[
"arvimal@yahoo.in"
] |
arvimal@yahoo.in
|
666dfa4951ff0ae8197cda16cfa2dc10be92003c
|
69d081693a6f68b54aff61fffc2ebceff65e03c4
|
/src/13_RomanToInteger.py
|
ab8595d89100356194fd010196717e739ec89163
|
[
"MIT"
] |
permissive
|
SofiaSmile/LeetCode
|
9a70deb90260e565d9c3a79ce6e3dfa36fa1f80c
|
f1aca2f35a758d6ecd98c5701755fef59394bac6
|
refs/heads/master
| 2023-01-05T01:48:02.685303
| 2020-11-14T01:46:11
| 2020-11-14T01:46:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,915
|
py
|
"""
13. Roman to Integer
Roman numerals are represented by seven different symbols: I, V, X, L, C, D and M.
Symbol Value
I 1
V 5
X 10
L 50
C 100
D 500
M 1000
For example, two is written as II in Roman numeral, just two one's added together. Twelve is written as, XII, which is simply X + II. The number twenty seven is written as XXVII, which is XX + V + II.
Roman numerals are usually written largest to smallest from left to right. However, the numeral for four is not IIII. Instead, the number four is written as IV. Because the one is before the five we subtract it making four. The same principle applies to the number nine, which is written as IX. There are six instances where subtraction is used:
I can be placed before V (5) and X (10) to make 4 and 9.
X can be placed before L (50) and C (100) to make 40 and 90.
C can be placed before D (500) and M (1000) to make 400 and 900.
Given a roman numeral, convert it to an integer. Input is guaranteed to be within the range from 1 to 3999.
Example 1:
Input: "III"
Output: 3
Example 2:
Input: "IV"
Output: 4
Example 3:
Input: "IX"
Output: 9
Example 4:
Input: "LVIII"
Output: 58
Explanation: L = 50, V= 5, III = 3.
Example 5:
Input: "MCMXCIV"
Output: 1994
Explanation: M = 1000, CM = 900, XC = 90 and IV = 4.
"""
class Solution(object):
def romanToInt(self, s):
"""
:type s: str
:rtype: int
"""
dic = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
sum = 0
for i in range(len(s)):
if i > 0 and dic[s[i]] > dic[s[i - 1]]:
sum += dic[s[i]] - 2 * dic[s[i - 1]]
else:
sum += dic[s[i]]
return sum
x = "III"
x = "IV"
x = "IX"
x = "LVIII"
x = "MCMXCIV"
solution = Solution()
results = solution.romanToInt(x)
print(results)
|
[
"481528024@qq.com"
] |
481528024@qq.com
|
cd1caaedb6fa1d12bca76e6971d6d386ec505d3b
|
a7bc6f7a55f1486c5cdd5619e6d267eee71820ed
|
/enhancement-metrics/main.py
|
b844290b1ce3089b2d62093e6e131883daedd4e9
|
[] |
no_license
|
usmanzafar/aws-experiments-data-ingestion-and-analytics
|
977d39ab2933e359668e7509a14ed8249348fc20
|
c4e9610a374daffd015a08ef0037e35e8df841f4
|
refs/heads/master
| 2022-07-29T20:18:04.920158
| 2020-05-14T15:06:52
| 2020-05-14T15:06:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,775
|
py
|
# A sample output record from Kinesis Analytics application for this function is as below
# {'recordId': '8afbe41d-db75-4cd1-8dc3-f80ca2c382e2', 'lambdaDeliveryRecordMetadata': {'retryHint': 128}, 'data': 'eyJBRCI6Ik5vdCBjbGlja2VkIiwiSU5HRVNUSU9OX1RJTUUiOjE1NjcyMDg4MTkzMDEsIk5CUiI6MTU0OH0='}
# The decoded data is {"AD":"Not clicked","INGESTION_TIME":1567208819301,"NBR":1548}
import boto3
import base64
from json import loads
from datetime import datetime
cw_client = boto3.client('cloudwatch')
namespace = 'BidRequestExperiment'
def handler(event, context):
output = []
success = 0
failure = 0
for record in event['records']:
try:
payload = loads(base64.b64decode(record['data']))
timestamp = float(payload['INGESTION_TIME']) / 1000
event_time = datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%dT%H:%M:%S')
metricDataItem={
'MetricName': 'Clicked?',
'Timestamp': event_time,
'Value': payload['NBR'],
'Unit': 'None',
'StorageResolution': 1
}
metricDataItem['MetricName'] = payload['AD']
print('metrics to cwc = {}'.format([metricDataItem]))
response = cw_client.put_metric_data(Namespace=namespace,MetricData=[metricDataItem])
print(response)
success += 1
output.append({'recordId': record['recordId'], 'result': 'Ok'})
except Exception as inst:
print(inst)
failure += 1
output.append({'recordId': record['recordId'], 'result': 'DeliveryFailed'})
print('Successfully delivered {0} records, failed to deliver {1} records'.format(success, failure))
return {'records': output}
|
[
"nivonh@amazon.com"
] |
nivonh@amazon.com
|
ae7118ad89062e3af0681bd7a8c2347f6bc8e7c6
|
9a6fde3e4fbee6b8f12ed81598970991ec76a78e
|
/avaliacoes/api/viewsets.py
|
de927af97462a9043c8225c562acddfb3aa596c6
|
[] |
no_license
|
manfredimelo/pontos_turisticos
|
4c167a364f65eddd3c4a1406aadbb005fd0fb944
|
b364f9a52e37e3fea16974567913da65ec2a3d76
|
refs/heads/master
| 2022-02-19T05:24:38.429998
| 2019-09-23T18:47:46
| 2019-09-23T18:47:46
| 209,296,596
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
from rest_framework.viewsets import ModelViewSet
from avaliacoes.api.serializers import AvaliacaoSerializer
from avaliacoes.models import Avaliacao
class AvaliacaoViewSet(ModelViewSet):
queryset = Avaliacao.objects.all()
serializer_class = AvaliacaoSerializer
|
[
"noreply@github.com"
] |
manfredimelo.noreply@github.com
|
2b9bd0a8a39604785a9686f4ec2ef8ce078ff84b
|
2c4cbb270fd53c172f6a3f90ba75b8292d8cd88f
|
/sensitivity_analysis.py
|
2e9607fb07e09297aadaadaf6ca325bf8b0057ce
|
[] |
no_license
|
HansongN/dynamic_vehicle_network_clustering
|
be4de341788985ba089bee0b08e52ae300eedb41
|
d363db072fab998de2917f7f89f5049a804e4c22
|
refs/heads/master
| 2021-04-04T10:31:07.488315
| 2020-11-19T12:17:07
| 2020-11-19T12:17:07
| 248,448,964
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,752
|
py
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: Hansong Nie
# @Time : 2019/10/25 15:55
import numpy as np
from utils import load_any_obj_pkl, save_dict, load_dict
import pandas as pd
from sklearn.cluster import KMeans
from clustering import Dunn_Validity_Index
import matplotlib.pyplot as plt
from sklearn.metrics import silhouette_score, davies_bouldin_score, calinski_harabaz_score
import os
def loadData2PD(filepath):
data = load_any_obj_pkl(filepath)[
-1]
X = None
car_ids = []
for key, value in data.items():
car_ids.append(key)
if X is None:
X = np.array(value).reshape(1, -1)
else:
X = np.vstack((X, value.reshape(1, -1)))
X = 1.0 * (X - X.mean()) / X.std()
return pd.DataFrame(X, index=car_ids)
def clusteringPerformance(X):
clu = KMeans(n_clusters=30, init="k-means++", n_init=10, max_iter=300, random_state=0)
clu.fit(X)
labels = clu.labels_
sil = silhouette_score(X, labels)
db= davies_bouldin_score(X, labels)
ch = calinski_harabaz_score(X, labels)
return sil, db, ch
def sensitivity(para_name, para_values, spara_name, spara_values, filepath_part1, filepath_part2, filepath_part3):
# sil = dict()
# db = dict()
# ch = dict()
# for i in range(len(spara_values)):
# silt = dict()
# dbt = dict()
# cht = dict()
# for j in range(len(para_values)):
# if para_name == "alpha" or para_name == "nwalk":
# filepath = filepath_part1 + str(para_values[j]) + filepath_part2 + str(spara_values[i]) + filepath_part3
# else:
# filepath = filepath_part1 + str(spara_values[i]) + filepath_part2 + str(para_values[j]) + filepath_part3
# X = loadData2PD(filepath)
# silt[str(para_values[j])], dbt[str(para_values[j])], cht[str(para_values[j])] = clusteringPerformance(X)
# sil[str(spara_values[i])], db[str(spara_values[i])], ch[str(spara_values[i])] = silt, dbt, cht
# save_dict(filepath="metric_result\sensitivity_analysis\DynWalks_" + para_name + "_silhouette_score.txt", mode="a", dic=sil)
# save_dict(filepath="metric_result\sensitivity_analysis\DynWalks_" + para_name + "_davies_bouldin_score.txt", mode="a", dic=db)
# save_dict(filepath="metric_result\sensitivity_analysis\DynWalks_" + para_name + "_calinski_harabaz_score.txt", mode="a", dic=ch)
p_name_notation = {"alpha": r"$\alpha$", "beta": r"$\beta$", "nwalk": r"$r$", "dim": r"$d$"}
sil = load_dict("metric_result\sensitivity_analysis\DynWalks_" + para_name +"_silhouette_score.txt")
db = load_dict("metric_result\sensitivity_analysis\DynWalks_" + para_name +"_davies_bouldin_score.txt")
ch = load_dict("metric_result\sensitivity_analysis\DynWalks_" + para_name +"_calinski_harabaz_score.txt")
# du = load_dict("metric_result\sensitivity_analysis\DynWalks_" + para_name +"_dunn_score.txt")
fig = plt.figure()
i = 0
for key, values in sil.items():
plt.plot(para_values,
[float(i) for i in list(values.values())],
label=p_name_notation[spara_name] + "=" + key,
color=colors[i],
marker=markers[i])
i += 1
plt.legend(fontsize=18)
plt.xticks(fontsize=18)
plt.xlabel(p_name_notation[para_name], fontdict={"size": 20})
plt.yticks(fontsize=18)
plt.ylabel("Silhouette Coefficient", fontdict={"size": 20})
# plt.title(para_name, fontdict={"size": 13})
plt.show()
fig.savefig("figures\sensitivity_analysis\DynWalks_" + para_name + "_Silhouette_Coefficient.eps",
dpi=400,
format='eps')
fig = plt.figure()
i = 0
for key, values in db.items():
plt.plot(para_values,
[float(i) for i in list(values.values())],
label=p_name_notation[spara_name] + "=" + key,
color=colors[i],
marker=markers[i])
i += 1
plt.legend(fontsize=18)
plt.xticks(fontsize=18)
plt.xlabel(p_name_notation[para_name], fontdict={"size": 20})
plt.yticks(fontsize=18)
plt.ylabel("Davies Bouldin Index", fontdict={"size": 20})
# plt.title(para_name, fontdict={"size": 13})
plt.show()
fig.savefig("figures\sensitivity_analysis\DynWalks_" + para_name + "_Davies_Bouldin_Index.eps",
dpi=400,
format='eps')
fig = plt.figure()
i = 0
for key, values in ch.items():
plt.plot(para_values,
[float(i) for i in list(values.values())],
label=p_name_notation[spara_name] + "=" + key,
color=colors[i],
marker=markers[i])
i += 1
plt.legend(fontsize=18)
plt.xticks(fontsize=18)
plt.xlabel(p_name_notation[para_name], fontdict={"size": 20})
plt.yticks(fontsize=18)
plt.ylabel("Calinski Harabaz Index", fontdict={"size": 20})
# plt.title(para_name, fontdict={"size": 13})
plt.show()
fig.savefig("figures\sensitivity_analysis\DynWalks_" + para_name + "_Calinski_Harabaz_Index.eps",
dpi=400,
format='eps')
# fig = plt.figure()
# plt.plot(para_values,
# [float(i) for i in list(du.values())],
# label="Dunn Validity Index",
# color="r")
# plt.xticks(fontsize=13)
# plt.xlabel(p_name_notation[para_name], fontdict={"size": 13})
# plt.yticks(fontsize=13)
# plt.ylabel("Dunn Validity Index", fontdict={"size": 13})
# plt.title(para_name, fontdict={"size": 13})
# plt.show()
# fig.savefig("figures\sensitivity_analysis\DynWalks_" + para_name + "_dunn_score.png")
def alpha_analysis():
alpha_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
beta_list = [0.1, 0.3, 0.5, 0.7, 0.9]
sensitivity(para_name="alpha",
para_values=alpha_list,
spara_name="beta",
spara_values=beta_list,
filepath_part1="DynWalks/output/DynWalks/hangzhou_20140301_MCC_a",
filepath_part2="_b",
filepath_part3="_embs.pkl")
def beta_analysis():
beta_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
alpha_list = [0.2, 0.4, 0.6, 0.8]
sensitivity(para_name="beta",
para_values=beta_list,
spara_name="alpha",
spara_values=alpha_list,
filepath_part1="DynWalks/output/DynWalks/hangzhou_20140301_MCC_a",
filepath_part2="_b",
filepath_part3="_embs.pkl")
def nwalk_analysis():
nwalk_list = [1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
dim_list = [16, 32, 64, 128, 256, 512]
sensitivity_nwalk(para_name="nwalk",
para_values=nwalk_list,
spara_name="dim",
spara_values=dim_list,
filepath_part1="DynWalks/output/DynWalks/hangzhou_20140301_MCC_nwalk",
filepath_part2="_dim",
filepath_part3="_embs.pkl")
def dim_analysis():
dim_list = [16, 32, 64, 128, 256, 512]
nwalk_list = [10, 20, 40, 80]
sensitivity_dim(para_name="dim",
para_values=dim_list,
spara_name="nwalk",
spara_values=nwalk_list,
filepath_part1="DynWalks/output/DynWalks/hangzhou_20140301_MCC_nwalk",
filepath_part2="_dim",
filepath_part3="_embs.pkl")
def sensitivity_nwalk(para_name, para_values, spara_name, spara_values, filepath_part1, filepath_part2, filepath_part3):
# sil = dict()
# db = dict()
# ch = dict()
# for i in range(len(spara_values)):
# silt = dict()
# dbt = dict()
# cht = dict()
# for j in range(len(para_values)):
# if para_name == "alpha" or para_name == "nwalk":
# filepath = filepath_part1 + str(para_values[j]) + filepath_part2 + str(spara_values[i]) + filepath_part3
# else:
# filepath = filepath_part1 + str(spara_values[i]) + filepath_part2 + str(para_values[j]) + filepath_part3
# X = loadData2PD(filepath)
# silt[str(para_values[j])], dbt[str(para_values[j])], cht[str(para_values[j])] = clusteringPerformance(X)
# sil[str(spara_values[i])], db[str(spara_values[i])], ch[str(spara_values[i])] = silt, dbt, cht
# save_dict(filepath="metric_result\sensitivity_analysis\DynWalks_" + para_name + "_silhouette_score.txt", mode="a", dic=sil)
# save_dict(filepath="metric_result\sensitivity_analysis\DynWalks_" + para_name + "_davies_bouldin_score.txt", mode="a", dic=db)
# save_dict(filepath="metric_result\sensitivity_analysis\DynWalks_" + para_name + "_calinski_harabaz_score.txt", mode="a", dic=ch)
p_name_notation = {"alpha": r"$\alpha$", "beta": r"$\beta$", "nwalk": r"$r$", "dim": r"$d$"}
sil = load_dict("metric_result\sensitivity_analysis\DynWalks_" + para_name + "_silhouette_score.txt")
db = load_dict("metric_result\sensitivity_analysis\DynWalks_" + para_name + "_davies_bouldin_score.txt")
ch = load_dict("metric_result\sensitivity_analysis\DynWalks_" + para_name + "_calinski_harabaz_score.txt")
# du = load_dict("metric_result\sensitivity_analysis\DynWalks_" + para_name +"_dunn_score.txt")
fig = plt.figure()
i = 0
for key, values in sil.items():
plt.plot([i for i in range(len(para_values))],
[float(i) for i in list(values.values())],
label=p_name_notation[spara_name] + "=" + key,
color=colors[i],
marker=markers[i])
i += 1
plt.legend(fontsize=18)
plt.xticks([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
fontsize=18)
plt.xlabel(p_name_notation[para_name], fontdict={"size": 20})
plt.yticks(fontsize=18)
plt.ylabel("Silhouette Coefficient", fontdict={"size": 20})
# plt.title(para_name, fontdict={"size": 13})
plt.show()
fig.savefig("figures\sensitivity_analysis\DynWalks_" + para_name + "_Silhouette_Coefficient.eps",
dpi=400,
format='eps')
fig = plt.figure()
i = 0
for key, values in db.items():
plt.plot([i for i in range(len(para_values))],
[float(i) for i in list(values.values())],
label=p_name_notation[spara_name] + "=" + key,
color=colors[i],
marker=markers[i])
i += 1
plt.legend(fontsize=18)
plt.xticks([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
fontsize=18)
plt.xlabel(p_name_notation[para_name], fontdict={"size": 20})
plt.yticks(fontsize=18)
plt.ylabel("Davies Bouldin Index", fontdict={"size": 20})
# plt.title(para_name, fontdict={"size": 13})
plt.show()
fig.savefig("figures\sensitivity_analysis\DynWalks_" + para_name + "_Davies_Bouldin_Index.eps",
dpi=400,
format='eps')
fig = plt.figure()
i = 0
for key, values in ch.items():
plt.plot([i for i in range(len(para_values))],
[float(i) for i in list(values.values())],
label=p_name_notation[spara_name] + "=" + key,
color=colors[i],
marker=markers[i])
i += 1
plt.legend(fontsize=18)
plt.xticks([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
fontsize=18)
plt.xlabel(p_name_notation[para_name], fontdict={"size": 20})
plt.yticks(fontsize=18)
plt.ylabel("Calinski Harabaz Index", fontdict={"size": 20})
# plt.title(para_name, fontdict={"size": 13})
plt.show()
fig.savefig("figures\sensitivity_analysis\DynWalks_" + para_name + "_Calinski_Harabaz_Index.eps",
dpi=400,
format='eps')
def sensitivity_dim(para_name, para_values, spara_name, spara_values, filepath_part1, filepath_part2, filepath_part3):
# sil = dict()
# db = dict()
# ch = dict()
# for i in range(len(spara_values)):
# silt = dict()
# dbt = dict()
# cht = dict()
# for j in range(len(para_values)):
# if para_name == "alpha" or para_name == "nwalk":
# filepath = filepath_part1 + str(para_values[j]) + filepath_part2 + str(spara_values[i]) + filepath_part3
# else:
# filepath = filepath_part1 + str(spara_values[i]) + filepath_part2 + str(para_values[j]) + filepath_part3
# X = loadData2PD(filepath)
# silt[str(para_values[j])], dbt[str(para_values[j])], cht[str(para_values[j])] = clusteringPerformance(X)
# sil[str(spara_values[i])], db[str(spara_values[i])], ch[str(spara_values[i])] = silt, dbt, cht
# save_dict(filepath="metric_result\sensitivity_analysis\DynWalks_" + para_name + "_silhouette_score.txt", mode="a", dic=sil)
# save_dict(filepath="metric_result\sensitivity_analysis\DynWalks_" + para_name + "_davies_bouldin_score.txt", mode="a", dic=db)
# save_dict(filepath="metric_result\sensitivity_analysis\DynWalks_" + para_name + "_calinski_harabaz_score.txt", mode="a", dic=ch)
p_name_notation = {"alpha": r"$\alpha$", "beta": r"$\beta$", "nwalk": r"$r$", "dim": r"$d$"}
sil = load_dict("metric_result\sensitivity_analysis\DynWalks_" + para_name + "_silhouette_score.txt")
db = load_dict("metric_result\sensitivity_analysis\DynWalks_" + para_name + "_davies_bouldin_score.txt")
ch = load_dict("metric_result\sensitivity_analysis\DynWalks_" + para_name + "_calinski_harabaz_score.txt")
# du = load_dict("metric_result\sensitivity_analysis\DynWalks_" + para_name +"_dunn_score.txt")
fig = plt.figure()
i = 0
for key, values in sil.items():
plt.plot([i for i in range(len(para_values))],
[float(i) for i in list(values.values())],
label=p_name_notation[spara_name] + "=" + key,
color=colors[i],
marker=markers[i])
i += 1
plt.legend(fontsize=18)
plt.xticks([0, 1, 2, 3, 4, 5],
[r"$2^4$", r"$2^5$", r"$2^6$", r"$2^7$", r"$2^8$", r"$2^9$"],
fontsize=18)
plt.xlabel(p_name_notation[para_name], fontdict={"size": 20})
plt.yticks(fontsize=18)
plt.ylabel("Silhouette Coefficient", fontdict={"size": 20})
# plt.title(para_name, fontdict={"size": 13})
plt.show()
fig.savefig("figures\sensitivity_analysis\DynWalks_" + para_name + "_Silhouette_Coefficient.eps",
dpi=400,
format='eps')
fig = plt.figure()
i = 0
for key, values in db.items():
plt.plot([i for i in range(len(para_values))],
[float(i) for i in list(values.values())],
label=p_name_notation[spara_name] + "=" + key,
color=colors[i],
marker=markers[i])
i += 1
plt.legend(fontsize=18)
plt.xticks([0, 1, 2, 3, 4, 5],
[r"$2^4$", r"$2^5$", r"$2^6$", r"$2^7$", r"$2^8$", r"$2^9$"],
fontsize=18)
plt.xlabel(p_name_notation[para_name], fontdict={"size": 20})
plt.yticks(fontsize=18)
plt.ylabel("Davies Bouldin Index", fontdict={"size": 20})
# plt.title(para_name, fontdict={"size": 13})
plt.show()
fig.savefig("figures\sensitivity_analysis\DynWalks_" + para_name + "_Davies_Bouldin_Index.eps",
dpi=400,
format='eps')
fig = plt.figure()
i = 0
for key, values in ch.items():
plt.plot([i for i in range(len(para_values))],
[float(i) for i in list(values.values())],
label=p_name_notation[spara_name] + "=" + key,
color=colors[i],
marker=markers[i])
i += 1
plt.legend(fontsize=18)
plt.xticks([0, 1, 2, 3, 4, 5],
[r"$2^4$", r"$2^5$", r"$2^6$", r"$2^7$", r"$2^8$", r"$2^9$"],
fontsize=18)
plt.xlabel(p_name_notation[para_name], fontdict={"size": 20})
plt.yticks(fontsize=18)
plt.ylabel("Calinski Harabaz Index", fontdict={"size": 20})
# plt.title(para_name, fontdict={"size": 13})
plt.show()
fig.savefig("figures\sensitivity_analysis\DynWalks_" + para_name + "_Calinski_Harabaz_Index.eps",
dpi=400,
format='eps')
if __name__ == '__main__':
if not os.path.exists("figures/sensitivity_analysis"):
os.makedirs("figures/sensitivity_analysis")
colors = ["r", "y", "g", "c", "b", "m"]
markers = ["o", "v", "^", "<", ">", "x"]
# alpha_analysis()
# beta_analysis()
nwalk_analysis()
# dim_analysis()
|
[
"hansong.nie@outlook.com"
] |
hansong.nie@outlook.com
|
111b0e3fdbd6beabd602738595a0fdf949089ff2
|
b65cfcda05fd72350c7b9e11e5995cc1d10fdd75
|
/shop/models.py
|
df6a76b04ed4d1f1473b36d641881c259f5e0b06
|
[] |
no_license
|
gauraviit1/pahal_project_2
|
f4e6a2cf1cfd613088ad27344279460bb72c9786
|
bc89c3848359ae0b95cb55c24d6fe24d637caabd
|
refs/heads/master
| 2021-04-28T21:20:44.614522
| 2017-01-01T09:57:25
| 2017-01-01T09:57:25
| 77,773,709
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,480
|
py
|
from django.db import models
from django.core.urlresolvers import reverse
from django.utils.functional import lazy
from PIL import Image
from django.contrib.postgres.fields import HStoreField
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from decimal import Decimal
# Create your models here.
class Cateogry(models.Model):
name = models.CharField(max_length=200, db_index=True)
slug = models.SlugField(db_index=True, unique=True)
class Meta:
ordering = ['name']
verbose_name = 'cateogry'
verbose_name_plural = 'cateogries'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('shop:product_list_by_cateogry', args=[self.slug])
def save(self, *args, **kwargs):
for field_name in ['name',]:
val = getattr(self, field_name, False)
if val:
setattr(self, field_name, val.capitalize())
super(Cateogry, self).save(*args, **kwargs)
class Product(models.Model):
cateogry = models.ForeignKey('Cateogry', related_name='products')
name = models.CharField(max_length=200, db_index=True)
slug = models.SlugField(max_length=200, db_index=True)
image = models.ImageField(upload_to="products/%Y/%m/%d", blank=True)
description = models.TextField(blank=True)
price = models.DecimalField(max_digits=10, decimal_places=2)
stock = models.PositiveIntegerField()
available = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['name']
index_together = [('id', 'slug')]
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('shop:product_detail', args=[self.id, self.slug])
class Attribute(models.Model):
product = models.ForeignKey('Product', related_name="patt")
weight = models.DecimalField(max_digits=7, decimal_places=3, blank=True, null=True)
waist_size = models.PositiveSmallIntegerField(blank=True, null=True)
size = models.CharField(max_length=2, blank=True, null=True)
def clean(self, *args, **kwargs):
super(Attribute, self).clean(*args, **kwargs)
if self.weight == Decimal('0.350'):
raise ValidationError({'weight': _('Cannot use this value')})
class Meta:
unique_together = ('product', 'weight')
|
[
"mcjail.shi.hp@gmail.com"
] |
mcjail.shi.hp@gmail.com
|
b6f56697fb41c5e23e58b13a4e63f3780c4b41ea
|
db338cf7720a0ecbf181f7077b0dcf22b499d822
|
/src/mobot_client/migrations/0003_auto_20210902_2106.py
|
0f96c067c7d54e446c9525c7deab02ba86fffdfe
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
isabella232/mobot
|
94a7e33755cdf3b1916b6642ee7dc9bdfdebf84d
|
8a1fc884351211b4730e7de1c0bad1e18a1b1c8f
|
refs/heads/main
| 2023-08-31T17:00:21.341368
| 2021-09-16T00:55:35
| 2021-09-16T04:49:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,800
|
py
|
# Generated by Django 3.2.7 on 2021-09-02 21:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mobot_client', '0002_auto_20210902_2053'),
]
operations = [
migrations.AlterField(
model_name='bonuscoin',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='chatbotsettings',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='customer',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='customerdroprefunds',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='customerstorepreferences',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='drop',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='dropsession',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='item',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='message',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='order',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='sku',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='store',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
[
"noreply@github.com"
] |
isabella232.noreply@github.com
|
2c4ab74cda2680598623c66912579b5d2540ef70
|
edf510cc5bbbe24469d8ff262c022b33b4d80a75
|
/tacotron2/model/tacotron2.py
|
fafca0078fcb2bc687a7f48b30a31e19137b81ac
|
[
"Apache-2.0"
] |
permissive
|
rheehot/Tacotron2
|
e8b8a4be614708800b10b9fa7829264407510fa8
|
ddbe55b426397d40cadd14f5040c55ba7c25615d
|
refs/heads/master
| 2022-12-26T14:13:39.966498
| 2020-10-06T18:34:57
| 2020-10-06T18:34:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,577
|
py
|
# -*- coding: utf-8 -*-
# Soohwan Kim @sooftware
# This source code is licensed under the Apache 2.0 License license found in the
# LICENSE file in the root directory of this source tree
import torch.nn as nn
from torch import Tensor
from typing import Optional
from tacotron2.model.encoder import Encoder
from tacotron2.model.decoder import Decoder
from tacotron2.model.postnet import PostNet
class Tacotron2(nn.Module):
""" Neural Speech-To-Text Models called Tacotron2 """
def __init__(self, args) -> None:
super(Tacotron2, self).__init__()
self.encoder = Encoder(
vocab_size=args.vocab_size,
embedding_dim=args.embedding_dim,
encoder_lstm_dim=args.encoder_lstm_dim,
num_lstm_layers=args.num_encoder_lstm_layers,
conv_dropout_p=args.conv_dropout_p,
num_conv_layers=args.num_encoder_conv_layers,
conv_kernel_size=args.encoder_conv_kernel_size,
lstm_bidirectional=args.encoder_lstm_bidirectional,
device=args.device
)
self.decoder = Decoder(
num_mel_bins=args.num_mel_bins,
prenet_dim=args.prenet_dim,
decoder_lstm_dim=args.decoder_lstm_dim,
attn_lstm_dim=args.attn_lstm_dim,
embedding_dim=args.embedding_dim,
attn_dim=args.attn_dim,
location_conv_filter_size=args.location_conv_filter_size,
location_conv_kernel_size=args.location_conv_kernel_size,
prenet_dropout_p=args.prenet_dropout_p,
attn_dropout_p=args.attn_dropout_p,
decoder_dropout_p=args.decoder_dropout_p,
max_decoding_step=args.max_decoding_step,
stop_threshold=args.stop_threshold
)
self.postnet = PostNet(
num_mel_bins=args.num_mel_bins,
postnet_dim=args.postnet_dim,
num_conv_layers=args.num_postnet_conv_layers,
kernel_size=args.postnet_conv_kernel_size,
dropout_p=args.postnet_dropout_p
)
def forward(
self,
inputs: Tensor,
input_lengths: Optional[Tensor] = None,
targets: Optional[Tensor] = None,
teacher_forcing_ratio: float = 1.0
):
encoder_outputs = self.encoder(inputs, input_lengths)
decoder_outputs = self.decoder(encoder_outputs, targets, teacher_forcing_ratio)
postnet_outputs = self.postnet(decoder_outputs["mel_outputs"])
decoder_outputs["mel_outputs"] += postnet_outputs
return decoder_outputs
|
[
"sh951011@gmail.com"
] |
sh951011@gmail.com
|
54fa67bd86817f2351dd6f2ee7f678c36f87cbc4
|
4123f9133ddf6ce6396f7a9ef3909d0056438ec6
|
/Z-Blog/Skype.py
|
6d25ac4beb5b31d69622a5f951ff01cb74873b38
|
[] |
no_license
|
ngotiendat8795/Ngo.T.Dat-C4E28-Fundamentals
|
83af9cfde6daad0b21b97d884cc3c202e4f885f9
|
4b8893cde28084ab2f883606d13aba1153c14701
|
refs/heads/master
| 2020-04-23T22:08:46.352244
| 2020-01-05T07:49:02
| 2020-01-05T07:49:02
| 171,491,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
from skpy import Skype
sk = Skype('ngotiendat8795@gmail.com', 'dungquen5399') # connect to Skype
sk.user # you
sk.contacts # your contacts
sk.chats # your conversations
ch = sk.contacts["ngotiendat8795"].chat # 1-to-1 conversation
ch.sendMsg('cái tên này chắc cả VN có mỗi c') # plain-text message
ch.getMsgs() # retrieve recent messages
|
[
"ngotiendat8795@gmail.com"
] |
ngotiendat8795@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.