blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
98c2b0e2a3629f3f493cfe7dfd981519a328d543
|
d245277858c8708399a3dd6b9b4bc5122687d79c
|
/petselect/views.py
|
5516153370a34a4c1cda3190ade1db699e925516
|
[] |
no_license
|
mandjo2010/DjangoPetSite
|
a86b5c27f6ef852abe1e2a63bbec9d0ebf11e71c
|
f23c666fa0fe74d67270820554ffdc6e89d8490c
|
refs/heads/main
| 2023-05-09T10:21:12.356722
| 2021-06-03T12:49:54
| 2021-06-03T12:49:54
| 373,504,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
from django.shortcuts import render, get_list_or_404
from .models import Pet
# Create your views here.
def index(request):
return render(request, 'petselect/index.html')
def resp(request):
pet_objects = get_list_or_404(Pet, size=request.POST['size'],hair=request.POST['length'])
return render(request, 'petselect/resp.html', {'pets': pet_objects})
|
[
"37101160+mandjo2010@users.noreply.github.com"
] |
37101160+mandjo2010@users.noreply.github.com
|
070d24b806b45b302d7014e3e0647b91d9d81da1
|
7bdb0e12359162c5dd2bddc58d2ca1d234fb29d2
|
/trunk/scripts/checkelf
|
729b1da344b294ea0061299870a64f31cd7ab8db
|
[] |
no_license
|
hitaf/Pardus-2011-Svn-
|
f40776b0bba87d473aac45001c4b946211cbc7bc
|
16df30ab9c6ce6c4896826814e34cfeadad1be09
|
refs/heads/master
| 2021-01-10T19:48:33.836038
| 2012-08-13T22:57:37
| 2012-08-13T22:57:37
| 5,401,998
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,414
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2010 TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
# For more information execute run with '--help'
#
# A detailed documentation about checkelf can be found on:
# http://developer.pardus.org.tr/guides/packaging/checkelf.html
import os
import re
import sys
import glob
import pisi
import magic
import shutil
import fnmatch
import tempfile
import optparse
import itertools
import subprocess
INSTALLDB = pisi.db.installdb.InstallDB()
CONSTANTS = pisi.constants.Constants()
def process_ldd(objdump_needed, ldd_output, ldd_unused, ldd_undefined):
'''Process the ldd outputs. And return a simple path only lists'''
result_unused = []
result_undefined = []
result_broken = []
result_main_ldd = {}
result_needed = []
for line in ldd_unused.replace("\t", "").split("\n"):
if not line == "" and not "Unused" in line:
result_unused.append(line.strip())
for line in ldd_undefined.replace("\t", "").split("\n"):
if line.startswith("undefined symbol:"):
result_undefined.append(re.sub("^undefined symbol: (.*)\((.*)\)$", "\\1", line))
for line in ldd_output:
if "=>" in line:
# Filter these special objects
if "linux-gate" in line or \
"ld-linux" in line or "linux-vdso" in line:
continue
so_name, so_path = line.split("=>")
if "not found" in so_path:
# One of the dynamic dependencies is missing
result_broken.append(so_name.strip())
else:
result_main_ldd[so_name.strip()] = so_path.split(" (")[0].strip()
for obj in objdump_needed:
# Find the absolute path of libraries from their SONAME's
if result_main_ldd.has_key(obj):
result_needed.append(os.popen("readlink -f %s" % result_main_ldd[obj]).read().strip())
else:
result_needed.append(obj)
# result_needed = (all shared libary dependencies) - (needed shared library gathered from objdump)
# result_broken = broken libraries that are not available at their place
# result_unused = unused direct dependencies
# result_undefined = undefined symbol errors
return (result_needed, result_broken, result_unused, result_undefined)
def check_objdump(processed_needed, package_elf_files, package_name):
'''check the objdump needed libraries with the ldd libraries
the libraries that are needed can be used for dependencies'''
result_needed = []
# check if the libraries are shipped with the package
# then associate each library(with his package_name) with the given elf_file
for objdump_needed in processed_needed:
if objdump_needed in package_elf_files:
# file is shipped within this package
dependency_name = package_name
else:
# search for the package name (i.e: pisi sf /usr/lib/*.so )
# the library may not exist, thus adding an exception is welcome
try:
dependency_name = pisi.api.search_file(objdump_needed)[0][0]
except IndexError:
dependency_name = "broken"
#print "%s (probably broken dependency)" % needed
result_needed.append((objdump_needed, dependency_name))
return result_needed
def check_pc_files(pc_file):
'''check for .pc files created by pkgconfig and shipped with the package
these .pc files have requirements tags that can be used for dependencies'''
result_needed = []
requires = set(os.popen("pkg-config --print-requires --print-requires-private %s | gawk '{ print $1 }'" % \
os.path.basename(pc_file).replace(".pc", "")).read().split("\n")[:-1])
for require in requires:
require_file = "/usr/share/pkgconfig/%s.pc" % require
if not os.path.exists(require_file):
require_file = "/usr/lib/pkgconfig/%s.pc" % require
try:
dependency_name = pisi.api.search_file(require_file)[0][0]
except IndexError:
dependency_name = "broken"
result_needed.append((require_file, dependency_name))
return result_needed
def check_intersections(result_dependencies, package_deps, package_name, systembase, systemdevel):
'''eliminate system base and system devel packages and self written deps'''
# get system.base and system.devel packages
systembase_packages = []
systemdevel_packages= []
cdb = pisi.db.componentdb.ComponentDB()
for repo in pisi.db.repodb.RepoDB().list_repos():
for component in cdb.list_components(repo):
if component == "system.base":
systembase_packages.extend(cdb.get_packages('system.base', repo))
if component == "system.devel":
systemdevel_packages.extend(cdb.get_packages('system.devel', repo))
# look for packages that are system.base but are written as dependency
# mark them with "*"
result_must_removed = list(set(package_deps) & set(systembase_packages))
for deps in package_deps:
if deps in result_must_removed:
package_deps[package_deps.index(deps)] = "%s (base)" % deps
# look for packages that are system.devel but are written as dependency
# mark them with "*"
result_must_removed = list(set(package_deps) & set(systemdevel_packages))
for deps in package_deps:
if deps in result_must_removed:
package_deps[package_deps.index(deps)] = "%s (devel)" % deps
# extract the dependency package names and store them in result_deps
# dependencies tagged as broken or given itself are eliminated
dependencies = set()
result_deps = []
for elf_files, paths_and_deps in result_dependencies.items():
for data in paths_and_deps:
if not data[1] == "broken" and not data[1] == package_name:
result_deps.append(data[1])
# remove packages that belong to system.base component
# when -s is used, systembase is set to true
# using set removes also duplicates in result_deps
# mark packages that are common to system.base and result_deps with *
if not systembase:
result_deps = list(set(result_deps) - set(systembase_packages))
if not systemdevel and package_name.endswith('-devel'):
result_deps = list(set(result_deps) - set(systemdevel_packages))
if systemdevel or systembase:
result_must_removed = list(set(result_deps) & set(systembase_packages))
for deps in result_deps:
if deps in result_must_removed:
result_deps[result_deps.index(deps)] = "%s (base)" % deps
result_must_removed = list(set(result_deps) & set(systemdevel_packages))
for deps in result_deps:
if deps in result_must_removed:
result_deps[result_deps.index(deps)] = "%s (devel)" % deps
result_deps = list(set(result_deps))
# remove packages that already are written in metadata.xml (runtime dependencies written in pspec.xml)
result_section = list(set(result_deps) - set(package_deps))
# create a sorted iteration object of the final results variables
# the lists may have variable lengths, thus we fill the smallers one with empty strings.
# at the end all the lists are in the same length. This makes it easy to print it like a table
cmp_func = lambda x, y: len(x) - len(y)
result_lists = itertools.izip_longest(sorted(list(set(package_deps)), cmp=cmp_func),
sorted(result_deps, cmp=cmp_func),
sorted(result_section, cmp=cmp_func),
fillvalue="")
return result_lists
def output_result(package_name, package_dir):
'''execute ldd on elf files and returns them'''
#Initialize magic for using "file" in python
magic_db = magic.open(magic.MAGIC_NONE)
magic_db.load()
package_elf_files = []
# Two options are available. Checking for a pisi file or an installed package in the database
if package_dir:
package_files = os.popen("find %s" % package_dir).read().strip().split("\n")
package_pc_files = glob.glob("%s/usr/*/pkgconfig/*.pc" % package_dir)
else:
package_files = set(["/%s" % file_name.path \
for file_name in INSTALLDB.get_files(package_name).list])
package_pc_files = set([os.path.realpath("/%s" % file_name.path) \
for file_name in INSTALLDB.get_files(package_name).list \
if fnmatch.fnmatch(file_name.path, "*/pkgconfig/*.pc")])
for package_file in package_files:
package_file_info = magic_db.file(package_file) #Return file type
if "LSB shared object" in package_file_info:
package_elf_files.append(os.path.realpath(package_file))
elif "LSB executable" in package_file_info:
package_elf_files.append(package_file)
#There maybe more than one elf file, check for each one
result_dependencies = {}
result_unused = {}
result_undefined = {}
result_broken = None
result_runpath = {}
ld_library_paths = set()
# Add library paths for unpacked pisi files
if package_dir:
for elf_file in package_elf_files:
if elf_file.endswith(".so") or ".so." in elf_file:
ld_library_paths.add(os.path.dirname(elf_file))
os.environ.update({'LD_LIBRARY_PATH': ":".join(ld_library_paths)})
for elf_file in package_elf_files:
ldd_output = subprocess.Popen(["ldd", elf_file],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env = os.environ).communicate()[0].strip().split("\n")
ldd_unused, ldd_undefined = subprocess.Popen(["ldd", "-u", "-r", elf_file],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env = os.environ).communicate()
runpath = subprocess.Popen(["chrpath", "-l", elf_file],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env = os.environ).communicate()[0].strip().split(": ")
objdump_needed = [line.strip().split()[1] for line in \
os.popen("objdump -p \"%s\" | grep 'NEEDED'" % elf_file).readlines()]
# Process the various ldd and objdump outputs
processed_needed, processed_broken, processed_unused, processed_undefined = \
process_ldd(objdump_needed, ldd_output, ldd_unused, ldd_undefined)
# association with each single elf file
result_unused.update(dict([(elf_file, processed_unused)]))
result_undefined.update(dict([(elf_file, processed_undefined)]))
result_runpath.update(dict([(elf_file, runpath)]))
result_broken = processed_broken
result_dependencies[elf_file] = check_objdump(processed_needed, package_elf_files, package_name)
# Check for .pc files
for pc_file in package_pc_files:
result_dependencies[pc_file] = check_pc_files(pc_file)
return (result_dependencies, result_broken, result_unused, result_undefined, result_runpath)
def colorize(msg, color, nocolor=False):
"""Colorizes the given message."""
# The nocolor is added to shut off completely. You may ask the point of this
# someone want to pipe the output, but the asci characters will also printed
if nocolor:
return msg
else:
colors = {'green' : '\x1b[32;01m%s\x1b[0m',
'red' : '\x1b[31;01m%s\x1b[0m',
'yellow' : '\x1b[33;01m%s\x1b[0m',
'bold' : '\x1b[1;01m%s\x1b[0m',
'none' : '\x1b[0m%s\x1b[0m',
}
return colors[color if sys.stdout.isatty() else 'none'] % msg
def main(arg):
'''Initialize packages and get the results. Prints them out '''
# This is of type dictionary, it includes all the
# options from the parser such as option.directory, etc.
option = arg[0]
# Arguments are stored here,
# such as the path of -d, or the pisi file.
packages = arg[1]
# checklib is executed plain only, no packages are
# defined. It will look for pisi files inside that directory
# where checklib is executed
if not packages:
pisi_files = glob.glob("*.pisi")
if pisi_files:
packages.extend(pisi_files)
# If directory is used as option
if option.directory:
if option.recursive:
for root, dirs, files in os.walk(option.directory):
for data in files:
if data.endswith(".pisi"):
packages.append(os.path.join(root, data))
else:
# No recursion is used, just get the pisi
# files from directory specified with -d
pisi_files = glob.glob("%s/*.pisi" % option.directory)
packages.extend(pisi_files)
# check for components, like system.base, tex.language, etc.
if option.component:
cdb = pisi.db.componentdb.ComponentDB()
for repo in pisi.db.repodb.RepoDB().list_repos():
if cdb.has_component(option.component):
packages.extend(cdb.get_packages(option.component, repo))
# check for all packages installed on the machine
if option.installedlist:
packages.extend(INSTALLDB.list_installed())
used_pisi = False # Do not check for a pisi binary file and for a package installed on the system
for package in packages:
# Check loop for .pisi files
if package.endswith(".pisi"):
used_pisi = True
package_pisi = pisi.package.Package(package)
package_meta = package_pisi.get_metadata()
package_name = package_meta.package.name
# Gather runtime dependencies directly from the metadata.xml
package_deps = [dep.name() for dep in package_meta.package.runtimeDependencies()]
# Contains extracted package content
package_tempdir = tempfile.mkdtemp(prefix=os.path.basename(sys.argv[0]) + '-')
package_pisi.extract_install(package_tempdir)
# Get results from objdump,ldd,etc...
result_dependencies, result_broken, result_unused, result_undefined, result_runpath = \
output_result(package_name, package_tempdir)
# Look for intersections of the packages(i.e. do not include system.base packages)
# result_lists is a iteration object which contains tuples of length 3
# this tuple makes it easy to print the missing dependencies. look for print_results()
result_lists = check_intersections(result_dependencies, package_deps, package_name, option.systembase, option.systemdevel)
# Print the results in a fancy output
print_results(result_broken, result_unused, result_undefined, result_lists, result_runpath, package_name, option)
# Delet the created temporary directory
if package_tempdir.startswith("/tmp/"):
shutil.rmtree(package_tempdir)
# Check for a installed package in the system
elif package in INSTALLDB.list_installed():
if used_pisi:
print "You've checked for a pisi file before\nPlease do not check for a installed package and pisi file at the same time"
sys.exit(1)
else:
package_name = package
# Gather runtime dependencies directly from the database of installed packages
package_deps = [dep.name() for dep in INSTALLDB.get_package(package).runtimeDependencies()]
package_tempdir = False # There is no need of temporary directory, hence we look for files that are installed
# Same functions in the above part. You can read them
result_dependencies, result_broken, result_unused, result_undefined, result_runpath = \
output_result(package_name, package_tempdir)
result_lists = check_intersections(result_dependencies, package_deps, package_name, option.systembase, option.systemdevel)
print_results(result_broken, result_unused, result_undefined, result_lists, result_runpath, package_name, option)
else:
print "Error: '%s' is not a valid .pisi file or an installed package\nPlease use -d <path> option for a directory" % package
def print_results(result_broken, result_unused, result_undefined, result_lists, result_runpath, package_name, option):
'''Print the final results in fancy colors'''
def print_header(title, nocolor, color):
print colorize("\n%s" % title, color , nocolor)
print colorize("%s" % (len(title) * '-'), color, nocolor)
def print_unused(result_unused, result_broken):
header_dumped = False
# Mark broken libs
for elf_file, libs in result_unused.items():
result_unused[elf_file] = map(lambda x: ("%s (broken)" % x) if x in result_broken else x, libs)
for elf_file, libs in result_unused.items():
if libs:
if not header_dumped:
print_header("Unused direct dependency analysis", option.nocolor, 'green')
header_dumped = True
print colorize(elf_file, 'red', option.nocolor)
for data in libs:
print " ", data
def print_undefined(result_undefined):
header_dumped = False
for elf_file, libs in result_undefined.items():
if libs:
if not header_dumped:
print_header("Undefined symbol analysis", option.nocolor, 'green')
header_dumped = True
print colorize(elf_file, 'red', option.nocolor)
for data in libs:
print " ", data
def print_runpath(result_runpath):
header_dumped = False
for elf_file, libs in result_runpath.items():
if libs:
if "RPATH" in libs[1]:
if not header_dumped:
print_header("Runpath analysis", option.nocolor, 'green')
header_dumped = True
print colorize(elf_file, 'red', option.nocolor)
print " ", libs[1]
def print_dependencies(result_lists):
print colorize("\nWritten dependencies Detected dependencies Missing dependencies", 'green', option.nocolor)
print colorize("----------------------------------------------------------------------------------", 'green', option.nocolor)
for tuple_deps in result_lists:
for deps in tuple_deps:
if "(base)" in deps:
print "%s" % colorize(deps.ljust(30), 'red', option.nocolor),
elif "(devel)" in deps:
print "%s" % colorize(deps.ljust(30), 'yellow', option.nocolor),
else:
print deps.ljust(30),
print
def print_missing(result_lists, plain_list):
if not plain_list:
print_header("Missing dependencies", option.nocolor, 'green')
for tuple_deps in result_lists:
pkg = tuple_deps[2]
if "(base)" in pkg:
print "%s" % colorize(pkg, 'red', option.nocolor)
elif pkg:
print pkg
if not option.plain_list:
print_header("Package: %s" % package_name, option.nocolor, 'bold')
# This part is for the behavior of checklib argument parsing
# If no options are used at all, than we show all options
# If any of the other options is used, only the choosen ones are showed
if not option.unused and not option.undefined and not option.dependencies \
and not option.runpath and not option.missing:
print_unused(result_unused, result_broken)
print_undefined(result_undefined)
print_runpath(result_runpath)
print_dependencies(result_lists)
else:
if option.unused:
print_unused(result_unused, result_broken)
if option.undefined:
print_undefined(result_undefined)
if option.runpath:
print_runpath(result_runpath)
if option.dependencies:
print_dependencies(result_lists)
if option.missing:
print_missing(result_lists, option.plain_list)
def argument():
'''Command line argument parsing'''
# TODO: use argparse() in the future
parser = optparse.OptionParser(usage="Usage: %prog [options] foo.pisi foo2.pisi \n \t%prog [options] -d <path>\n \t%prog [options]\n \t%prog [options] foo",
version="%prog 0.1")
# Group options
# check_options = optparse.OptionGroup(parser, "Check Options")
parser.add_option("-n", "--no-color",
action="store_true",
dest="nocolor",
default=False,
help="Do not colorize the output (useful for piping the output)")
parser.add_option("-l", "--plain-list",
action="store_true",
dest="plain_list",
default=False,
help="Don't prettify the outputs, only a list of packages")
parser.add_option("-u", "--unused",
action="store_true",
dest="unused",
default=False,
help="Show only unused direct dependencies")
parser.add_option("-f", "--undefined",
action="store_true",
dest="undefined",
default=False,
help="Show only undefined symbols")
parser.add_option("-m", "--missing",
action="store_true",
dest="missing",
default=False,
help="Show only missing dependencies")
parser.add_option("-t", "--dependencies",
action="store_true",
dest="dependencies",
default=False,
help="Show dependencies in a table")
parser.add_option("-p", "--runpath",
action="store_true",
dest="runpath",
default=False,
help="Show RPATH status")
parser.add_option("-s", "--systembase",
action="store_true",
dest="systembase",
default=False,
help="Don't hide system.base dependencies")
parser.add_option("-x", "--systemdevel",
action="store_true",
dest="systemdevel",
default=False,
help="Show(colorize) system.devel dependencies")
parser.add_option("-c", "--component",
action="store",
dest="component",
type="string",
help="Check a whole component")
parser.add_option("-a", "--all",
action="store_true",
dest="installedlist",
default=False,
help="Check all installed packages")
parser.add_option("-d", "--directory",
action="store",
dest="directory",
type="string",
metavar="<path>",
help="Specify a folder to check")
parser.add_option("-r", "--recursive",
action="store_true",
dest="recursive",
default=False,
help="Check recursively in the specified folder")
return parser.parse_args()
if __name__ == "__main__":
args = argument()
sys.exit(main(args))
|
[
"fatih@dhcppc1.(none)"
] |
fatih@dhcppc1.(none)
|
|
1c5d5d7bebf798ac0d2783572f1523e21e3ce33f
|
cc9987a95181509a846b181eac73c45c8f4f42f5
|
/№ 22.py
|
95923ae37fb6011104ad2357a9291e2c1eeb2473
|
[] |
no_license
|
Andrey0563/Kolocvium
|
edd3c3f6e561f12c5d3dcf7f7d9cf8184be9fb9c
|
89f6338d95ec129d611c93588427161885b60a43
|
refs/heads/master
| 2022-04-23T07:09:27.938320
| 2020-04-24T09:24:15
| 2020-04-24T09:24:15
| 258,462,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
'''
№22
Знайти добуток елементів масиву, кратних 3 і 9. Розмірність масиву - 10.
Заповнення масиву здійснити випадковими числами від 5 до 500.
Дужак Андрій 122-Г
'''
import random
import numpy as np
a = np.zeros(10, dtype=int)
s = 1
for i in range(len(a)):
a[i] = (random.randint(5, 500))
if (a[i] % 3 == 0) and (a[i] % 9 == 0): # Перевірка умови
s *= a[i]
print(s)
|
[
"duzhak.a55@gmail.com"
] |
duzhak.a55@gmail.com
|
2844bab25d2c4c18c2dbdb61312346678f2d8927
|
3bf6240364e10c7a26ca791d24604576e474ac3e
|
/datas/generate_file_list.py
|
0f0fdb9188f26914fe7601085db23e6a726b68e5
|
[
"MIT"
] |
permissive
|
anoop3495/FPCC
|
d2d5aa364d6f4f07af8bf760db0033adc5d93161
|
a9c7777f6d1d34bca1109ed6e45f533d47a2f5e2
|
refs/heads/main
| 2023-08-13T09:30:02.377067
| 2021-10-15T07:17:46
| 2021-10-15T07:17:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
import os
root_dir = 'ring_train/'
note =''
for txt_name in os.listdir(root_dir):
note = note+'./datas/'+root_dir+txt_name+'\n'
f = open('./ring_train.txt','w')
f.write(note)
f.close()
|
[
"noreply@github.com"
] |
anoop3495.noreply@github.com
|
25e6d88820fd6c1d178d7fec93d980777b6c3945
|
56e1d915b1270e38248bb0e0241622eff71d9cf9
|
/src/detection/backgroundsubtraction.py
|
602c9313924825fbe5e6e8e3dc17e72326014fef
|
[] |
no_license
|
Lichor8/evc
|
608d7c3f350257b66c2a897f5f2a3c4aaa76f108
|
53bebcef6221304b3331fa7d1420f0189061c104
|
refs/heads/master
| 2020-04-05T14:36:18.966653
| 2016-08-31T14:30:43
| 2016-08-31T14:30:43
| 57,280,849
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
# import OpenCV functions
import cv2
# import Numpy functions
import numpy as np
def backgroundsubstration(img):
# blur image using a gaussian blur
imgBlurred = cv2.GaussianBlur(img, (19, 19), 0)
gray_image = cv2.cvtColor(imgBlurred, cv2.COLOR_BGR2GRAY)
hist, bins = np.histogram(img.ravel(), 256, [0, 256])
grayval = np.argmax(hist)
# print(grayval)
thresh = grayval - 60
[thres, dst] = cv2.threshold(gray_image, thresh, 255, cv2.THRESH_BINARY)
return dst
|
[
"r.j.m.v.d.struijk@student.tue.nl"
] |
r.j.m.v.d.struijk@student.tue.nl
|
24ed85e52192f65dabbeb56095c065db2b08ad37
|
4778739e509fbf9c15843c8e13e2610c1181cbbb
|
/LemurAptana/LemurApp/migrations/0008_auto_20170924_1611.py
|
fcbea5fb139bd479b6e078b67d5c1b9ac783de32
|
[] |
no_license
|
wiltzius/btp-lemur
|
2d37966eb2f1d6505e55a73c87e0f036e58bd75e
|
2b0807b0927cfa74f810f043822e046baca41946
|
refs/heads/master
| 2023-01-27T11:41:08.059836
| 2021-08-22T21:24:27
| 2021-08-22T21:26:09
| 48,426,943
| 0
| 0
| null | 2023-01-24T17:37:38
| 2015-12-22T10:54:55
|
Python
|
UTF-8
|
Python
| false
| false
| 716
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('LemurApp', '0007_auto_20170704_1446'),
]
operations = [
migrations.AddField(
model_name='order',
name='notes',
field=models.CharField(verbose_name='Notes', max_length=2048, blank=True, default=''),
),
migrations.AlterField(
model_name='order',
name='status',
field=models.CharField(verbose_name='Order status', max_length=20, default='OPEN', choices=[('SENT', 'Sent'), ('OPEN', 'Open'), ('RETURNED', 'Returned')]),
),
]
|
[
"tom.wiltzius@gmail.com"
] |
tom.wiltzius@gmail.com
|
3aab8ef592b608c26c031f3b0124134f697aa9ac
|
3573e68315d56d4f124fa57db9b13364e0d415e8
|
/artists/admin.py
|
f5c2f22daaaeb08f09cf0c68bceb203aefe29e50
|
[] |
no_license
|
LBwaks/Turn-Up
|
5993bdf5190518400e6fddc7152c51cd95acf85d
|
7c5b29be8a51e7711571b395591b7f00b68971ce
|
refs/heads/main
| 2023-08-13T03:09:07.555964
| 2021-10-09T08:27:54
| 2021-10-09T08:27:54
| 375,811,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,586
|
py
|
from django.contrib import admin
from.models import Category,Artist,Comment
from django.utils.html import format_html
from django.contrib.auth.models import User
# Register your models here.
class ArtistAdmin(admin.ModelAdmin):
def ImageThumbnail(self,object):
return format_html('<img src="{}" width="60" class="img-thumbnail"/>'.format(object.photo.url))
ImageThumbnail.short_description='Event Poster'
list_display = ('artist_name', 'ImageThumbnail','event_name',
# 'category',
'created_date','is_featured')
list_editable=('is_featured',)
search_fields=('artist_name','event_name','event_county','event_location')
list_filter= ('event_start_date','created_date','event_charge')
def save_model(self, request, obj, form, change):
if not obj.user_id:
obj.user= request.user
obj.save()
admin.site.register(Artist,ArtistAdmin)
class CategoryAdmin(admin.ModelAdmin):
# def ImageThumbnail(self,object):
# return format_html('<img src="{}" width="60" class="img-thumbnail"/>'.format(object.photo.url))
# ImageThumbnail.short_description='Event Poster'
list_display = ('name','description','created_date')
search_fields=('name',)
list_filter= ('name','created_date')
def save_model(self, request, obj, form, change):
if not obj.user_id:
obj.user= request.user
obj.save()
admin.site.register(Category,CategoryAdmin)
class CommentAdmin(admin.ModelAdmin):
list_display=('artist','user')
admin.site.register(Comment,CommentAdmin)
|
[
"obwakuvictor@gmail.com"
] |
obwakuvictor@gmail.com
|
ac1a60a332b0c4dafbac6de937ce385aa43dde89
|
d64928a9c7ab07b04a5074b4f7bee51c3efec9cd
|
/cic/sitemaps.py
|
ef82ecb4e400fd6d97a5c544eb7db5c5bc0db71c
|
[] |
no_license
|
Disabel/cic
|
7110a8ac36c6e0c8a576c741766c5b8701bbc7b2
|
d2d67757c10f07a862769bfec9c9b4a20a9bf052
|
refs/heads/master
| 2021-01-22T21:32:03.139858
| 2015-07-28T17:08:58
| 2015-07-28T17:08:58
| 37,287,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,206
|
py
|
#-*- encoding: utf-8 -*-
from django.contrib import sitemaps
from django.core.urlresolvers import reverse
from cic.apps.calendarium.models import Event
from cic.apps.homepage.models import conferenciaslista, cursoslista
class StaticViewSitemap(sitemaps.Sitemap):
priority = 0.5
changefreq = 'daily'
def items(self):
return [
'homepageindex',
'homepageaboutus',
'aboutdirectorio',
'aboutpaises',
'aboutcodigo',
'homepagerecursos',
'homepagecertificaciones',
'homepageregistro',
'registrocic',
'registroinvitado',
'homepageservicios',
'serviciosconferencias',
'serviciosasesorias',
'homepagecontact',
# colocar los nombre de las url en este lugar. ejemplo: 'homepageworks'
]
def location(self, item):
return reverse(item)
class EventSitemap(sitemaps.Sitemap):
changefreq = 'monthly'
priority = 0.5
def items(self):
return Event.objects.all()
class ConferenciasSitemap(sitemaps.Sitemap):
changefreq = 'monthly'
priority = 0.6
def items(self):
return conferenciaslista.objects.all()
class CursosSitemap(sitemaps.Sitemap):
changefreq = 'monthly'
priority = 0.6
def items(self):
return cursoslista.objects.all()
|
[
"restonjd@gmail.com"
] |
restonjd@gmail.com
|
6fe16bcff5ab82547a2b9e865b8a3018ae5b6745
|
2f557f60fc609c03fbb42badf2c4f41ef2e60227
|
/Validation/Configuration/python/mtdSimValid_cff.py
|
0279b8c25a91a7aca5c19feb0e430ade01179cbe
|
[
"Apache-2.0"
] |
permissive
|
CMS-TMTT/cmssw
|
91d70fc40a7110832a2ceb2dc08c15b5a299bd3b
|
80cb3a25c0d63594fe6455b837f7c3cbe3cf42d7
|
refs/heads/TMTT_1060
| 2020-03-24T07:49:39.440996
| 2020-03-04T17:21:36
| 2020-03-04T17:21:36
| 142,576,342
| 3
| 5
|
Apache-2.0
| 2019-12-05T21:16:34
| 2018-07-27T12:48:13
|
C++
|
UTF-8
|
Python
| false
| false
| 620
|
py
|
import FWCore.ParameterSet.Config as cms
# MTD validation sequences
from Validation.MtdValidation.btlSimHits_cfi import btlSimHits
from Validation.MtdValidation.btlDigiHits_cfi import btlDigiHits
from Validation.MtdValidation.btlRecHits_cfi import btlRecHits
from Validation.MtdValidation.etlSimHits_cfi import etlSimHits
from Validation.MtdValidation.etlDigiHits_cfi import etlDigiHits
from Validation.MtdValidation.etlRecHits_cfi import etlRecHits
mtdSimValid = cms.Sequence(btlSimHits + etlSimHits )
mtdDigiValid = cms.Sequence(btlDigiHits + etlDigiHits)
mtdRecoValid = cms.Sequence(btlRecHits + etlRecHits )
|
[
"massimo.casarsa@ts.infn.it"
] |
massimo.casarsa@ts.infn.it
|
d49b62de24437009d7264a5f6e1a2eb02c3ad94f
|
fcfc39c121b9e2501ee9350f9010e61c8d07ee1c
|
/0x06-python-classes/5-main.py
|
e26d7de3e31e7289d868bc7d5faec252f106fd02
|
[] |
no_license
|
Brandixitor/holbertonschool-higher_level_programming
|
042a6a433ef045af04acb2864521fffd9725a66e
|
6bfa61eeb7915690c4a18be64635d79799589117
|
refs/heads/main
| 2023-04-19T03:13:43.208570
| 2021-05-12T11:33:43
| 2021-05-12T11:33:43
| 319,438,555
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
#!/usr/bin/python3
Square = __import__('5-square').Square
my_square = Square(3)
my_square.my_print()
print("--")
my_square.size = 10
my_square.my_print()
print("--")
my_square.size = 0
my_square.my_print()
print("--")
|
[
"noreply@github.com"
] |
Brandixitor.noreply@github.com
|
c5c30d977f4815f994c1540f77d247bd180c6649
|
1693b5ef0ca727d2d7accccd757400afca81dc12
|
/tests/conftest.py
|
94d39104ad4ce38d3b85fa8da630c335a2f7b00a
|
[
"Apache-2.0"
] |
permissive
|
ghas-results/baklava
|
8230f67fbd100842a52f7928edd1a40944d52088
|
0e029097983db6cea00a7d779b887b149975fbc4
|
refs/heads/master
| 2023-07-13T05:01:09.438209
| 2021-08-18T00:55:53
| 2021-08-18T00:55:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
import pytest
def pytest_addoption(parser):
parser.addoption(
"--slow", action="store_true", default=False, help="run slow tests"
)
def pytest_configure(config):
config.addinivalue_line("markers", "slow: mark test as slow to run")
def pytest_collection_modifyitems(config, items):
if config.getoption("--slow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --slow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
|
[
"code@matthewmadson.com"
] |
code@matthewmadson.com
|
e79e521415686e367a6d4822f711d78ae71bf53d
|
ca738d4e1d2a6585b7dcfb9362487141b47018c2
|
/WVWebsite/app/admin.py
|
432976f35ab488346c695ae60ac4f573345c6d2f
|
[] |
no_license
|
Tyler-Irving/water-valley-website
|
599a631e6ce3fd2e9b638d93891a79352a783f8b
|
c0954e959f94b56b4202fffce0a316c3d09b2ffe
|
refs/heads/master
| 2021-05-21T04:25:53.256861
| 2020-04-06T20:12:25
| 2020-04-06T20:12:25
| 252,542,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 159
|
py
|
from django.contrib import admin
from WVWebsite.app.models import Post
class PostAdmin(admin.ModelAdmin):
pass
admin.site.register(Post, PostAdmin)
|
[
"dbooker@basecampcodingacademy.org"
] |
dbooker@basecampcodingacademy.org
|
de2510471dcc3c7505344e3a7e818cbbcf6cc2d8
|
8ef6d93635d67f01600ae97aae27709bb85035e8
|
/deepmdp/experiments/utils.py
|
f9a2d308fb60c4ed98372863585e3c5321d00482
|
[] |
no_license
|
MkuuWaUjinga/DeepMDP-SSL4RL
|
fcb44cbd570599aeee4e65ec1c9a78fe1fbfd312
|
75a555ce0a89e1c76da12ebd6a351425aae8b69c
|
refs/heads/master
| 2023-01-29T22:41:51.783948
| 2020-12-13T18:57:20
| 2020-12-13T18:57:20
| 268,063,389
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,227
|
py
|
import numpy as np
from visdom import Visdom
from collections import defaultdict
import torch
import pprint
def show_agent_playing(policy, env):
obs = env.reset()
env.render()
for i in range(1000):
action = policy.get_actions(obs)
print(action[0])
obs, rewards, done, env_infos = env.step(action[0])
if done:
break
env.render()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Visualizer:
# TODO plot episode lengths
# TODO plot distribution over chosen actions.
def __init__(self, experiment_id, plot_list, port=9098):
self.port = 9098
self.plot_list = plot_list
self.viz = Visdom(port=port)
self.env = experiment_id
self.line_plotter = VisdomLinePlotter(self.viz, env_name=experiment_id)
self.correlation_plot_window = None
self.aux_losses = defaultdict(list)
self.correlation_matrix = None
self.num_calls = 0
self.store_every_th = 10
self.count_correlation_matrix = 0 # Can be calculated from num_calls and store_every_th
def publish_config(self, config):
config_string = pprint.pformat(dict(config)).replace("\n", "<br>").replace(" ", " ")
self.viz.text(config_string, env=self.env)
def visualize_episodical_stats(self, algo, num_new_episodes):
if self.make_weights_plot():
self.visualize_weights(algo, num_new_episodes)
if self.visualize_aux():
self.visualize_aux_losses(num_new_episodes, len(algo.episode_rewards))
if self.visualize_latent_space():
self.visualize_latent_space_correlation(num_new_episodes, len(algo.episode_rewards), algo.experiment_id)
if self.visualize_stats():
for i in range(len(algo.episode_rewards) - num_new_episodes, len(algo.episode_rewards)):
self.line_plotter.plot("episode reward", "rewards", "Rewards per episode", i, algo.episode_rewards[i])
self.line_plotter.plot("episode mean q-values", "q-values", "Mean q-values per episode", i,
algo.episode_mean_q_vals[i])
self.line_plotter.plot("episode std q-values", "q-std", "Std of q-values per episode", i,
algo.episode_std_q_vals[i])
# Plot running average of rewards
if i > 100:
self.line_plotter.plot("episode reward", "avg reward", "Rewards per episode", i,
np.mean(algo.episode_rewards[i - 100:i]),
color=np.array([[0, 0, 128], ]))
def visualize_module(self, head, head_name, num_episodes, num_new_episodes):
for x, params in enumerate(head.parameters()):
l2_norm = params.data.norm(p=2).cpu().numpy()
min = torch.min(params.data).cpu().numpy()
max = torch.max(params.data).cpu().numpy()
mean = torch.mean(params.data).cpu().numpy()
for i in range(num_new_episodes):
self.line_plotter.plot(f"metrics {head_name} {x}", f"L2-norm", f"Weights {head_name} {list(params.shape)}",
num_episodes - num_new_episodes + i, l2_norm)
self.line_plotter.plot(f"metrics {head_name} {x}", f"min", f"Weights of {head_name} {list(params.shape)}",
num_episodes - num_new_episodes + i, min, color=np.array([[0, 0, 128], ]))
self.line_plotter.plot(f"metrics {head_name} {x}", f"max", f"Weights of {head_name} {list(params.shape)}",
num_episodes - num_new_episodes + i, max, color=np.array([[128, 0, 0], ]))
self.line_plotter.plot(f"metrics {head_name} {x}", f"mean", f"Weights of {head_name} {list(params.shape)}",
num_episodes - num_new_episodes + i, mean, color=np.array([[0, 128, 0], ]))
def visualize_weights(self, algo, num_new_episodes):
num_episodes = len(algo.episode_rewards)
self.line_plotter.env = algo.experiment_id + "_weights"
self.visualize_module(algo.qf.head, "Q-head", num_episodes, num_new_episodes)
self.visualize_module(algo.qf.encoder, "Encoder", num_episodes, num_new_episodes)
for aux in algo.auxiliary_objectives:
self.visualize_module(aux.net, aux.__class__.__name__, num_episodes, num_new_episodes)
self.line_plotter.env = algo.experiment_id + "_main"
def make_weights_plot(self):
return "weight_plot" in self.plot_list
def visualize_aux(self):
return "aux_loss_plot" in self.plot_list
def visualize_latent_space(self):
return "latent_space_correlation_plot" in self.plot_list
def visualize_stats(self):
return "episodical_stats" in self.plot_list
def save_aux_loss(self, loss, loss_type):
if self.visualize_aux():
self.aux_losses[loss_type].append(loss)
def visualize_aux_losses(self, num_new_episodes, total_num_episode):
if self.aux_losses and num_new_episodes > 0:
for aux_loss in self.aux_losses:
for i in range(num_new_episodes):
self.line_plotter.plot(aux_loss, "mean", aux_loss, total_num_episode - num_new_episodes + i,
np.mean(self.aux_losses[aux_loss]))
self.line_plotter.plot(aux_loss, "median", aux_loss, total_num_episode - num_new_episodes + i,
np.median(self.aux_losses[aux_loss]), color=np.array([[0, 0, 128], ]))
self.aux_losses = defaultdict(list)
def save_latent_space(self, algo, next_obs, ground_truth_embedding):
if self.visualize_latent_space() and self.num_calls % self.store_every_th == 0:
if ground_truth_embedding is None:
raise ValueError("Ground truth embedding mustn't be of None type")
ground_truth_embedding = ground_truth_embedding.to(device)
algo.qf.eval()
with torch.no_grad():
_, embedding = algo.qf(next_obs, return_embedding=True)
algo.qf.train()
assert embedding.size() == ground_truth_embedding.size()
if self.correlation_matrix is None:
embedding_dim = embedding.size(1)
self.correlation_matrix = torch.zeros((embedding_dim, embedding_dim)).to(device)
# Calculate correlation
self.correlation_matrix += self.calculate_correlation(embedding.t(), ground_truth_embedding.t())
self.count_correlation_matrix += 1
self.num_calls += 1
def visualize_latent_space_correlation(self, num_new_episodes, total_num_episodes, experiment_id):
if self.correlation_matrix is not None and num_new_episodes > 0:
self.correlation_matrix = self.correlation_matrix.div(self.count_correlation_matrix)
assert round(torch.max(self.correlation_matrix).item(), 2) <= 1.0 and round(torch.min(
self.correlation_matrix).item(), 2) >= -1.0, "Invalid value for correlation coefficient!"
self.line_plotter.env = experiment_id + "_latent_space"
column_names = ["pos_x", "pos_y", "vel_x", "vel_y", "ang", "ang_vel", "leg_1", "leg_2"]
row_names = ['l1', 'l2', 'l3', 'l4', 'l5', 'l6', 'l7', 'l8']
self.correlation_plot_window = self.viz.heatmap(X=self.correlation_matrix,
env=self.env,
win=self.correlation_plot_window,
opts=dict(
columnnames=column_names,
rownames= row_names,
colormap='Viridis',
xmin=-1.0,
xmax=1.0,
title="Average latent space correlation per batch and episode"
))
for i, column_name in enumerate(column_names):
for j, row_name in enumerate(row_names):
for k in range(num_new_episodes):
self.line_plotter.plot(column_name + "_correlation", row_name, column_name,
total_num_episodes - num_new_episodes + k,
self.correlation_matrix[j, i].cpu().numpy(),
color=np.array([[int((255/8)*j), int((255/8)*(8-j)), 0],]))
self.line_plotter.env = experiment_id + "_main"
self.correlation_matrix = None
self.count_correlation_matrix = 0
@staticmethod
def calculate_correlation(x1, x2):
"""
takes two 2D tensors of (latent_space_size, sample_size) and calculates the column-wise correlation between the
two
:param x1:
:param x2:
:return: a 2D tensor of shape (latent_space_size, latent_space_size)
"""
with torch.no_grad():
# Calculate covariance matrix of columns
mean_x1 = torch.mean(x1, 1).unsqueeze(1)
mean_x2 = torch.mean(x2, 1).unsqueeze(1)
x1m = x1.sub(mean_x1)
x2m = x2.sub(mean_x2)
c = x1m.mm(x2m.t())
c = c / (x1.size(1) - 1)
# Normalize by standard deviations. Add epsilon for numerical stability if std close to 0
epsilon = 1e-9
std_x1 = torch.std(x1, 1).unsqueeze(1) + epsilon
std_x2 = torch.std(x2, 1).unsqueeze(1) + epsilon
c = c.div(std_x1)
c = c.div(std_x2.t())
assert round(torch.max(c).item(), 2) <= 1.0 and round(torch.min(
c).item(), 2) >= -1.0, "Invalid value for correlation coefficient!"
return c
class VisdomLinePlotter:
def __init__(self, viz, env_name='main', xlabel='episode number'):
self.viz = viz
self.env = env_name
self.plots = {}
self.legend = defaultdict(dict)
self.xlabel = xlabel
def plot(self, var_name, split_name, title_name, x, y, color=np.array([[255, 136, 0], ])):
self.legend[var_name][split_name] = None
if var_name not in self.plots:
self.plots[var_name] = self.viz.line(X=np.array([x, x]), Y=np.array([y, y]), env=self.env, opts=dict(
title=title_name,
xlabel=self.xlabel,
ylabel=var_name,
linecolor=color,
legend=list(self.legend[var_name])
))
else:
self.viz.line(X=np.array([x]), Y=np.array([y]), env=self.env, win=self.plots[var_name], name=split_name,
update='append', opts=dict(linecolor=color, legend=list(self.legend[var_name])))
|
[
"adrian.ziegler@student.hpi.de"
] |
adrian.ziegler@student.hpi.de
|
e1e220fec8096178af970a98f1e7322006888673
|
ecd681443b6b053c3e9a2a9517f61ad9a3b67d84
|
/5.1 Read and Write txt files.py
|
a421a696bb682e6532b67dfcf979d9d253247f8d
|
[] |
no_license
|
EchoZen/How-to-Automate-The-Boring-Stuff
|
636f8e79b7aa2f351bb6e8cfcc24d38f40077b4f
|
22be3f3d57eac6c04f3f0a77379d340056e78d4e
|
refs/heads/master
| 2022-09-12T21:15:42.563326
| 2020-06-01T07:56:16
| 2020-06-01T07:56:16
| 258,112,007
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,223
|
py
|
# plain text files, readable to humans
# binary files stored in binary format. Unreadable to humans
# meatball_file= open(r'C:\Users\zchen\OneDrive\Documents\Spaghetti\meatball.txt')
# print(meatball_file.read())
# meatball_file.close()
# Have to reopen the file to reread
# meatball_file= open(r'C:\Users\zchen\OneDrive\Documents\Spaghetti\meatball.txt')
# print(meatball_file.readlines()) # returns output as a list
# meatball_file.close()
# Write will overwrite the file
meatball_file= open(r'C:\Users\zchen\OneDrive\Documents\meatball.txt','w')
meatball_file.write('Meatball!\n') # Have to add in newline character
meatball_file.write('Meatball!\n')
meatball_file.write('Meatball!\n')
# Append adds things, does not overwrite
meatball_file= open(r'C:\Users\zchen\OneDrive\Documents\meatball.txt','a')
meatball_file.write('Cheese!\n')
meatball_file.write('Cheese!\n')
# Shelf module, can store lists and dictionary, nontext data as binary file
import shelve
shelffile= shelve.open('mydata')
shelffile['Scientists'] =['Linus Pauling', 'Marie Curie', 'George Beadle', 'Barry Marshall']
shelffile.close()
shelffile= shelve.open('mydata')
print(shelffile['Scientists'])
print(list(shelffile.keys()))
shelffile.close()
|
[
"zoe9955@gmail.com"
] |
zoe9955@gmail.com
|
8c91fe451ed5c1c240a26195a0cd2ad3ca3d65aa
|
8dbe9c94291ac7ebd4630af6ec83c245d3b3ae5e
|
/captcha/seleniumAlertHandle.py
|
8d74645e9b4da15d72ea69053f0c81fdf22dd713
|
[] |
no_license
|
stooorm521/SeleniumChromium
|
2c4940f2480bb895f07092fb80e91aaf9bc66263
|
7ad118075c8aea05ad6959d09da522865bddc385
|
refs/heads/master
| 2020-03-14T05:10:06.215797
| 2018-05-03T00:02:38
| 2018-05-03T00:02:38
| 131,458,599
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# 弹窗提示
# 当你触发了某个事件之后,页面出现了弹窗提示,处理这个提示或者获取提示信息方法如下:
from selenium import webdriver
# 导入 ActionChains 类
from selenium.webdriver import ActionChains
driver = webdriver.PhantomJS()
alert = driver.switch_to.alert()
|
[
"Storm@ShijiedeMacBook-Pro.local"
] |
Storm@ShijiedeMacBook-Pro.local
|
013028572404cabae09a58b6d63b8a83b2a48bc5
|
0c9a1c936f45f801b6816bd34b4271e764e71d5c
|
/Site/content/urls.py
|
64ffe17c544616a2562cd5e2cec4c4e4bdcb5be0
|
[] |
no_license
|
tomwhartung/hello-django
|
7c7b89568901dd816a931d049fccb32cd6a58c34
|
6fb8ce136214282604bd960135ad00ac136ebed1
|
refs/heads/master
| 2020-04-09T21:50:43.780514
| 2018-12-07T02:26:30
| 2018-12-07T02:26:30
| 160,613,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,472
|
py
|
""" urls.py for our content app
Purpose: define the urls for this app
Author: Tom W. Hartung
Date: Fall, 2018.
Copyright: (c) 2018 Tom W. Hartung, Groja.com, and JooMoo Websites LLC.
Reference:
https://docs.djangoproject.com/en/2.1/intro/tutorial01/#write-your-first-view
"""
from django.conf.urls import *
from . import views
"""
When upgrading to django 2.0+, we can use the path method:
https://docs.djangoproject.com/en/2.0/ref/urls/#path
from django.urls import path
The routes we are using are quite simple, and we already have a
version of this file that uses the path method:
./urls-uses_path-save_for_2.0.py
More:
https://stackoverflow.com/questions/47947673/is-it-better-to-use-path-or-url-in-urls-py-for-django-2-0
"""
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^home$', views.home, name='home'),
url(r'^v$', views.versions, name='versions'),
url(r'^legal/affiliate_marketing_disclosure$',
views.affiliate_marketing_disclosure,
name='affiliate_marketing_disclosure'),
url(r'^legal/privacy_policy$',
views.privacy_policy,
name='privacy_policy'),
url(r'^legal/questionnaire_disclaimer$',
views.questionnaire_disclaimer,
name='questionnaire_disclaimer'),
url(r'^legal/terms_of_service$',
views.terms_of_service,
name='terms_of_service'),
url(r'^(?P<unknown_page>[\w\W]+)$',
views.not_found,
name='not_found'),
]
|
[
"tomwhartung@gmail.com"
] |
tomwhartung@gmail.com
|
1f6d0891079a9ec92081eec98735c407084836d5
|
f3c04b6dc0ffa9fabb51ae7c36c0b3b616f5f858
|
/Algorithms/Sorting/CountingSort2.py
|
33689236fa78d743c56ffd31d110f4b98c131c59
|
[] |
no_license
|
woosanguk/solve-at-day
|
31f49e93678986c2dd4cc8f05b2487ee7501ec49
|
f7626b939301b74d245c2ba3249fbcf468236926
|
refs/heads/master
| 2021-09-25T05:54:08.580502
| 2018-10-18T21:50:32
| 2018-10-18T21:50:32
| 107,080,716
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
"""
https://www.hackerrank.com/challenges/countingsort2/problem
"""
def counting_sort_2_solve(n, arr):
ret = sorted(arr)
for v in ret:
print(v, end=" ")
if __name__ == "__main__":
counting_sort_2_solve(6, [2, 4, 6, 8, 3, 3])
|
[
"baofree.uk@gmail.com"
] |
baofree.uk@gmail.com
|
4096758bfd741d35031827d668c50d99f31c7233
|
a34ec07c3464369a88e68c9006fa1115f5b61e5f
|
/A_Basic/String/L0_2696_Minimum_String_Length_After_Removing_Substrings_L0.py
|
e32d35c6e1bffda7f50b78e26d421076f1c6ad3e
|
[] |
no_license
|
824zzy/Leetcode
|
9220f2fb13e03d601d2b471b5cfa0c2364dbdf41
|
93b7f4448a366a709214c271a570c3399f5fc4d3
|
refs/heads/master
| 2023-06-27T02:53:51.812177
| 2023-06-16T16:25:39
| 2023-06-16T16:25:39
| 69,733,624
| 14
| 3
| null | 2022-05-25T06:48:38
| 2016-10-01T10:56:07
|
Python
|
UTF-8
|
Python
| false
| false
| 267
|
py
|
""" https://leetcode.com/problems/minimum-string-length-after-removing-substrings/
simulation
"""
class Solution:
def minLength(self, s: str) -> int:
while "AB" in s or "CD" in s:
s = s.replace('AB', '').replace('CD', '')
return len(s)
|
[
"zhengyuan.zhu@mavs.uta.edu"
] |
zhengyuan.zhu@mavs.uta.edu
|
6d647e3ce89d17c123d228ce25d537ea905ee166
|
cd541adb8fed73f65039d90f0eaa2670f290423c
|
/main.py
|
a175754b485a13516ad9833fa341ef3ff4e6e5a8
|
[] |
no_license
|
sakakendo/stamp
|
8273ba9e5a05eb34123d7c6dbb376091c3bc5dac
|
e659e748df5ab38a5a4a04776e75c36a5f7b9790
|
refs/heads/master
| 2022-12-23T21:42:16.397037
| 2019-01-07T13:48:18
| 2019-01-07T13:48:18
| 159,841,025
| 0
| 0
| null | 2022-12-08T01:29:34
| 2018-11-30T15:16:47
|
Python
|
UTF-8
|
Python
| false
| false
| 7,590
|
py
|
import dotenv
import linebot
import os
import requests
from flask import Flask,request,session,abort,render_template
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
BeaconEvent,MessageEvent,TextMessage,TextSendMessage,
)
from icecream import ic
HOST="https://sakak.ml"
REDIRECT_URL=HOST+"/line/login/callback"
# env
#print(dotenv.find_dotenv())
dotenv.load_dotenv(dotenv.find_dotenv())
#LINE MESSAGING API
CHANNEL_ACCESS_TOKEN=os.environ.get("CHANNEL_ACCESS_TOKEN")
CHANNEL_SECRET=os.environ.get("CHANNEL_SECRET")
print(CHANNEL_ACCESS_TOKEN,CHANNEL_SECRET)
if not CHANNEL_ACCESS_TOKEN or not CHANNEL_SECRET:
print("env not loaded")
else:
print("env file load successed")
# LINE LOGIN API
LINE_LOGIN_CHANNEL_ID=os.environ.get("LINE_LOGIN_CHANNEL_ID")
LINE_LOGIN_CHANNEL_SECRET=os.environ.get("LINE_LOGIN_CHANNEL_SECRET")
if not LINE_LOGIN_CHANNEL_ID or not LINE_LOGIN_CHANNEL_SECRET :
print("line login channel_id or channle_secret is not loaded")
else:
print("line login channel_id and channel_secret is loaded successfully")
LINE_OAUTH_STATE="QWERTY"
app=Flask(__name__,static_url_path="/line/static")
app.secret_key="secret_key"
linebot_api=linebot.LineBotApi(CHANNEL_ACCESS_TOKEN)
handler=linebot.WebhookHandler(CHANNEL_SECRET)
access_tokens={}
@app.route("/line/stamp")
def stamp():
return render_template("vrview.html")
def get_user_state():
displayName=session.get('user')
print(displayName)
if displayName : return "<p>your name : {0}</p>".format(displayName)
else : return "<p>you are not logined yet </p>"
@app.route("/line/index")
def index():
return """
<!doctype html>
<html>
<head></head>
<body>
<h1> hello world</h1>
{0}
<p>
this is a stamp rally with line api and google 360 media.
<a href="/"> top page</a>
<a href="#"> curreent page</a>
<a href="/line/login">start page</a>
<a href="/line/logout">logout</a>
<a href="/line/stamp">stamp</a>
</p>
<p>developer:<a href="https://github.com/sakakendo0321/>sakakendo0321</a></p>
</body>
</html>
""".format(get_user_state())
@app.route("/line/login")
def login():
url="https://access.line.me/oauth2/v2.1/authorize"
response_type="code"
client_id=LINE_LOGIN_CHANNEL_ID
redirect_uri=REDIRECT_URL # HOST+"/line/login/callback"
state=LINE_OAUTH_STATE
scope="profile"
html="""
<!doctype html>
<html>
<body>
<p>{6}</p>
<a href="{0}?response_type={1}&client_id={2}&redirect_uri={3}&state={4}&scope={5}">line login</a>
</body>
</html>
""".format(url,response_type,client_id,redirect_uri,state,scope,get_user_state())
print("login html",html)
return html
def get_profile(access_token):
if access_token is not None:
bearer="Bearer {0}".format(access_token)
print("bearer type",type(bearer))
headers={"Authorization":bearer}
print("get_profile headers",headers)
url="https://api.line.me/v2/profile"
res=requests.get(url,headers=headers)
if res.status_code is 200:
body=res.json()
# print("get profile successed",body,type(body))
return body
else:
print("get_profile failed",res.status_code)
return None
def get_line_access_token(code):
headers={"Content-Type":"application/x-www-form-urlencoded"}
data={
"grant_type":"authorization_code",
"code":code,
"redirect_uri":REDIRECT_URL,
"client_id":LINE_LOGIN_CHANNEL_ID,
"client_secret":LINE_LOGIN_CHANNEL_SECRET
}
print(headers,data)
res=requests.post("https://api.line.me/oauth2/v2.1/token",headers=headers,data=data)
print("get access_token",res.status_code,res.headers)
data=res.json()
print("data:",data)
access_token=data.get("access_token")
if access_token is None:
return None
else:
print("get access token successfully : ",access_token)
return access_token
@app.route("/line/login/callback")
def login_callback():
print("login callback args:",request.args)
if request.args.get('error'):
print('login failed. error:',request.args.get('error_description'))
return """
<!doctype html>
<html>
<body>
<h1> login failure</h1>
<p>error : {0}</p>
</body>
</html>
""".format(request.args.get('error_description'))
# elif request.args.get("friendship_status_changed") is "true":
code=request.args.get("code")
state=request.args.get("state")
print("code",code,"state",state,type(state),type(LINE_OAUTH_STATE))
if state != LINE_OAUTH_STATE:
print("state doesn't matched",state," : ",LINE_OAUTH_STATE)
return """
<!doctype html>
<html>
<body>
<h1> login failure</h1>
</body>
</html>
"""
else:
access_token=get_line_access_token(code)
print("success to get access_token : ",access_token)
profile=get_profile(access_token)
print("get_profile",profile)
displayName=profile.get("displayName")
print("displayName",displayName)
session['user']=displayName
access_tokens[session['user']]=access_token
ic(access_tokens)
return """
<!doctype html>
<html>
<body>
<h1> login successed</h1>
<p>user name:{0}</p>
</body>
</html>
""".format(displayName)
# return """ <!doctype html> <html> <body> <h1> login failure</h1> </body> </html> """
@app.route("/line/logout")
def logout():
headers={"Content-Type":"application/x-www-form-urlencoded"}
url="https://api.line.me/oauth2/v2.1/revoke"
data={
"access_token":access_tokens[session['user']],
"client_id":LINE_LOGIN_CHANNEL_ID,
"client_secret":LINE_LOGIN_CHANNEL_SECRET
}
res=requests.get(url,headers=headers,json=data)
if res.status_code is not 200:
print("logout failure",res.status_code)
return """ <!doctype html> <html> <body> <h1> logout failed</h1> </body> </html> """
return """ <!doctype html> <html> <body> <h1> logout failed</h1> </body> </html> """
@app.route("/callback",methods=["POST"])
@app.route("/line/message/callback",methods=["POST"])
def callback():
print("callback url called")
signature=request.headers['X-Line-Signature']
body=request.get_data(as_text=True)
app.logger.info("Request body"+body)
try:
print("signature : ",signature,"body :",body)
handler.handle(body,signature)
except InvalidSignatureError:
print("invalidSignatureError")
abort(400)
except Exception as e:
print("error : ",e)
return 'OK'
@handler.add(MessageEvent,message=TextMessage)
def handle_message(event):
ret=linebot_api.reply_message(
event.reply_token,
TextSendMessage(text=event.message.text))
print("handle_message ret",ret)
@handler.add(BeaconEvent)
def handler_beacon(event):
print(event)
if event.beacon.type is "":
pass
if __name__ =="__main__":
# app.secret_key="super secret key"
# app.config["SESSION_TYPE"]="filesystem"
# sess=Session()
# sess.init_app(app)
app.run(host="0.0.0.0",port=8080,debug=True)
|
[
"sakakendo0321@gmail.com"
] |
sakakendo0321@gmail.com
|
5b84cdd2fb0832be94ee1cec8730e00d015a8e0e
|
6f810ca045a09f20436e3ee68198bf040e77af42
|
/download.py
|
d431e749e42dc1ac1afb2076df42f092005d125d
|
[] |
no_license
|
JF1HNL/python-google-drive
|
e138921a89fd0c57763332f5c9305daf99735695
|
cf0facf7c5c332056a41fc4bbd13f0877d99a8d3
|
refs/heads/main
| 2023-08-06T09:26:09.636901
| 2021-10-12T06:01:37
| 2021-10-12T06:01:37
| 414,626,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,782
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import pickle
import os.path
import io
import sys
import const
import os
import shutil
# すでに持っているものはfile.bakに動かす
if os.path.exists("files.bak"): shutil.rmtree("files.bak")
os.rename("files", "files.bak")
os.mkdir('files')
# pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.http import MediaIoBaseDownload
SCOPES = ['https://www.googleapis.com/auth/drive']
FOLDER_NAME = const.FOLDER_NAME
os.chdir('files')
def main():
# OAuth
drive = None
creds = None
if os.path.exists('../token.pickle'):
with open('../token.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
elif os.path.exists('../client_secret.json'):
flow = InstalledAppFlow.from_client_secrets_file(
'../client_secret.json', SCOPES)
creds = flow.run_local_server(port=0)
with open('../token.pickle', 'wb') as token:
pickle.dump(creds, token)
if creds and creds.valid:
drive = build('drive', 'v3', credentials=creds)
if not drive: print('Drive auth failed.')
# Folfer list
folders = None
if drive:
results = drive.files().list(
pageSize=100,
fields='nextPageToken, files(id, name)',
q='name="' + FOLDER_NAME + '" and mimeType="application/vnd.google-apps.folder"'
).execute()
folders = results.get('files', [])
if not folders: print('No folders found.')
# File list
files = None
if folders:
query = ''
for folder in folders:
if query != '' : query += ' or '
query += '"' + folder['id'] + '" in parents'
query = '(' + query + ')'
results = drive.files().list(
pageSize=100,
fields='nextPageToken, files(id, name)',
q=query
).execute()
files = results.get('files', [])
if not files: print('No files found.')
# Download
if files:
for file in files:
print(file["name"])
request = drive.files().get_media(fileId=file['id'])
fh = io.FileIO(file['name'], mode='wb')
downloader = MediaIoBaseDownload(fh, request)
done = False
while not done:
_, done = downloader.next_chunk()
if __name__ == '__main__':
main()
print("download files")
|
[
"kimossan88@gmail.com"
] |
kimossan88@gmail.com
|
572c7c49a77f8d71065050b761f16bddd06eb5cb
|
6430380f56756bf87e2effff228e6bcfe6afe7c2
|
/main.py
|
015e848fca69d6624d52e3cf77252ea6ffa7b286
|
[] |
no_license
|
isadorasophia/doenota-ocr
|
88b838bc88bf7bd7338b366acfd71b67443be31a
|
530cb79d2fa2c2e6be43aea42a9736213ec059c7
|
refs/heads/master
| 2020-12-26T11:16:21.297287
| 2015-07-27T06:21:46
| 2015-07-27T06:21:46
| 36,196,257
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,999
|
py
|
import base64
import os
import shutil
from scripts.DB import *
from scripts.extractData import *
from scripts.ocr import *
from scripts.imgProcess.process import *
rawDir = "./assets/raw/"
processedDir = "./assets/processed/"
resultsDir = "./assets/results/"
extension = ".jpg"
class ReceiptModel():
def __init__(self):
pass
# saves the raw receipt to further use
def saveReceipt (self, idDir):
rawPath = rawDir + idDir + self.data_id + extension
with open(rawPath, "wb") as f:
f.write(base64.decodestring(self.image))
f.close()
@staticmethod
def createWorkspace(idDir):
if not os.path.exists(rawDir):
os.makedirs(rawDir + idDir)
if not os.path.exists(processedDir):
os.makedirs(processedDir + idDir)
if not os.path.exists(resultsDir):
os.makedirs(resultsDir + idDir)
@staticmethod
def cleanWorkspace(idDir):
shutil.rmtree(rawDir + idDir)
shutil.rmtree(processedDir + idDir)
shutil.rmtree(resultsDir + idDir)
if __name__ == "__main__":
valid = True
while valid:
database = DBop()
# build initial data
receipt = ReceiptModel()
receipt.data_id, receipt.image = database.getNextImage()
# local directory for data handling
idDir = receipt.data_id + '/'
ReceiptModel.createWorkspace(idDir)
# saves the image
# receipt.saveReceipt(idDir)
# process receipt with the respective algorithms
processor = Processor(rawDir + idDir, processedDir + idDir, receipt.data_id, extension)
processor.CRSB()
processor.CTS()
# perform ocr on the processed images
ocr = OCR(processedDir + idDir)
ocr.performOCR(resultsDir + idDir)
extractor = DataExtractor(resultsDir + idDir)
# perform data extract
receipt.COO = extractor.COO()
receipt.CNPJ = extractor.CNPJ()
receipt.date = extractor.date()
receipt.total = extractor.total()
database.save(receipt.data_id, receipt.CNPJ, receipt.date, receipt.COO,
receipt.total)
ReceiptModel.cleanWorkspace(idDir)
valid = database.check()
|
[
"isaecia@gmail.com"
] |
isaecia@gmail.com
|
e20b88fab5a94a1ec78321d0824e0826437788a3
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/azure-firewall/azext_firewall/vendored_sdks/v2020_07_01/v2020_07_01/operations/_load_balancer_frontend_ip_configurations_operations.py
|
1a4bb634cfcdb033cf8038a3525aee6d107280e4
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432
| 2023-08-24T09:17:25
| 2023-08-24T09:17:25
| 106,580,024
| 336
| 1,226
|
MIT
| 2023-09-14T10:48:57
| 2017-10-11T16:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 9,049
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerFrontendIPConfigurationsOperations(object):
"""LoadBalancerFrontendIPConfigurationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LoadBalancerFrontendIPConfigurationListResult"]
"""Gets all the load balancer frontend IP configurations.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerFrontendIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_07_01.models.LoadBalancerFrontendIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerFrontendIPConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerFrontendIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/frontendIPConfigurations'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
frontend_ip_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FrontendIPConfiguration"
"""Gets load balancer frontend IP configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param frontend_ip_configuration_name: The name of the frontend IP configuration.
:type frontend_ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FrontendIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.FrontendIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FrontendIPConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'frontendIPConfigurationName': self._serialize.url("frontend_ip_configuration_name", frontend_ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FrontendIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/frontendIPConfigurations/{frontendIPConfigurationName}'} # type: ignore
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
34cdafd1ad626fcbfa47442dca32c33c62656ae3
|
3031edd9b94418ed4f82d717d388e13ca3f7edae
|
/fairseq/trainer.py
|
de08ae06d4afeba43e157dfbc8a96e6bf00bd391
|
[
"BSD-3-Clause"
] |
permissive
|
apeterswu/fairseq_mix
|
a591dc5f9a5c288eeab00f6ab63e42ca3d1a13ec
|
0f96d323d32edfdef19773f812970a76e618aad2
|
refs/heads/master
| 2022-11-12T11:31:23.447737
| 2020-06-27T06:55:44
| 2020-06-27T06:55:44
| 273,871,786
| 12
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,817
|
py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Train a network across multiple GPUs.
"""
from collections import defaultdict, OrderedDict
import contextlib
from itertools import chain
import torch
from fairseq import distributed_utils, models, optim, utils
from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter
from fairseq.optim import lr_scheduler
class Trainer(object):
"""Main class for data parallel training.
This class supports synchronous distributed data parallel training,
where multiple workers each have a full model replica and gradients
are accumulated across workers before each update. We use
:class:`~torch.nn.parallel.DistributedDataParallel` to handle
communication of the gradients across workers.
"""
def __init__(self, args, task, model, criterion, dummy_batch):
if not torch.cuda.is_available():
raise NotImplementedError('Training on CPU is not supported')
self.args = args
self.task = task
# copy model and criterion to current device
self.criterion = criterion.cuda()
if args.fp16:
self._model = model.half().cuda()
else:
self._model = model.cuda()
# initialize meters
self.meters = OrderedDict()
self.meters['train_loss'] = AverageMeter()
self.meters['train_nll_loss'] = AverageMeter()
self.meters['train_loss_sen_piece'] = AverageMeter()
self.meters['train_nll_loss_sen_piece'] = AverageMeter()
self.meters['train_overall_loss'] = AverageMeter()
self.meters['train_overall_nll_loss'] = AverageMeter()
self.meters['valid_loss'] = AverageMeter()
self.meters['valid_nll_loss'] = AverageMeter()
self.meters['valid_loss_sen_piece'] = AverageMeter()
self.meters['valid_nll_loss_sen_piece'] = AverageMeter()
self.meters['valid_overall_loss'] = AverageMeter()
self.meters['valid_overall_nll_loss'] = AverageMeter()
self.meters['wps'] = TimeMeter() # words per second
self.meters['ups'] = TimeMeter() # updates per second
self.meters['wpb'] = AverageMeter() # words per batch
self.meters['bsz'] = AverageMeter() # sentences per batch
self.meters['gnorm'] = AverageMeter() # gradient norm
self.meters['clip'] = AverageMeter() # % of updates clipped
self.meters['oom'] = AverageMeter() # out of memory
if args.fp16:
self.meters['loss_scale'] = AverageMeter() # dynamic loss scale
self.meters['wall'] = TimeMeter() # wall time in seconds
self.meters['train_wall'] = StopwatchMeter() # train wall time in seconds
self._lr_scheduler = None
self._dummy_batch = dummy_batch
self._num_updates = 0
self._optim_history = None
self._optimizer = None
self._wrapped_model = None
@property
def model(self):
if self._wrapped_model is None:
if self.args.distributed_world_size > 1:
self._wrapped_model = models.DistributedFairseqModel(
self.args, self._model,
)
else:
self._wrapped_model = self._model
return self._wrapped_model
@property
def optimizer(self):
if self._optimizer is None:
self._build_optimizer()
return self._optimizer
@property
def lr_scheduler(self):
if self._lr_scheduler is None:
self._build_optimizer() # this will initialize self._lr_scheduler
return self._lr_scheduler
def _build_optimizer(self):
if self.args.fp16:
if torch.cuda.get_device_capability(0)[0] < 7:
print('| WARNING: your device does NOT support faster training with --fp16, '
'please switch to FP32 which is likely to be faster')
params = list(filter(lambda p: p.requires_grad, self.model.parameters()))
self._optimizer = optim.FP16Optimizer.build_optimizer(self.args, params)
else:
if torch.cuda.get_device_capability(0)[0] >= 7:
print('| NOTICE: your device may support faster training with --fp16')
self._optimizer = optim.build_optimizer(self.args, self.model.parameters())
self._lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self._optimizer)
def save_checkpoint(self, filename, extra_state):
"""Save all training state in a checkpoint file."""
if distributed_utils.is_master(self.args): # only save one checkpoint
extra_state['train_meters'] = self.meters
utils.save_state(
filename, self.args, self.get_model(), self.criterion, self.optimizer,
self.lr_scheduler, self._num_updates, self._optim_history, extra_state,
)
def load_checkpoint(self, filename, reset_optimizer=False, reset_lr_scheduler=False, optimizer_overrides=None):
"""Load all training state from a checkpoint file."""
extra_state, self._optim_history, last_optim_state = \
utils.load_model_state(filename, self.get_model())
if last_optim_state is not None and not reset_optimizer:
# rebuild optimizer after loading model, since params may have changed
self._build_optimizer()
# only reload optimizer and lr_scheduler if they match
last_optim = self._optim_history[-1]
assert last_optim['criterion_name'] == self.criterion.__class__.__name__, \
'criterion does not match; please reset the optimizer (--reset-optimizer)'
assert last_optim['optimizer_name'] == self.optimizer.__class__.__name__, \
'optimizer does not match; please reset the optimizer (--reset-optimizer)'
if not reset_lr_scheduler:
self.lr_scheduler.load_state_dict(last_optim['lr_scheduler_state'])
self.optimizer.load_state_dict(last_optim_state, optimizer_overrides)
self._num_updates = last_optim['num_updates']
if extra_state is not None and 'train_meters' in extra_state:
self.meters.update(extra_state['train_meters'])
del extra_state['train_meters']
# reset TimeMeters, since their start times don't make sense anymore
for meter in self.meters.values():
if isinstance(meter, TimeMeter):
meter.reset()
return extra_state
def unset_param(self):
for name, p in self.model.named_parameters():
if "sen_piece" not in name:
p.requires_grad = False
def unset_sen_piece_param(self):
for name, p in self.model.named_parameters():
if "sen_piece" in name:
p.requires_grad = False
def train_step(self, samples, dummy_batch=False):
"""Do forward, backward and parameter update."""
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
seed = self.args.seed + self.get_num_updates()
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
self.model.train()
self.zero_grad()
if not dummy_batch:
self.meters['train_wall'].start()
# forward and backward pass
logging_outputs, sample_sizes, ooms = [], [], 0
sample_sizes_sen_piece = []
sample_sizes_overall = []
for i, sample in enumerate(samples):
sample = self._prepare_sample(sample)
if sample is None:
# when sample is None, run forward/backward on a dummy batch
# and ignore the resulting gradients
sample = self._prepare_sample(self._dummy_batch)
ignore_grad = True
else:
ignore_grad = False
try:
# forward
with torch.autograd.set_detect_anomaly(True):
loss, sample_size, loss_sen_piece, sample_size_sen_piece, overall_loss, sample_size_overall, logging_output = self.task.get_loss(
self.model, self.criterion, sample,
)
if ignore_grad:
loss *= 0
if self.args.distributed_world_size > 1:
# only all-reduce gradients in the last backwards pass
if i < len(samples) - 1:
self.model.need_reduction = False
else:
self.model.need_reduction = True
# backward
# self.optimizer.backward(loss)
# self.optimizer.backward(loss_sen_piece)
self.optimizer.backward(overall_loss) # train with overall_loss
# for name, p in self.model.named_parameters():
# if not p.requires_grad:
# p.grad = torch.cuda.FloatTensor(p.size()).fill_(0.)
if not ignore_grad:
logging_outputs.append(logging_output)
sample_sizes.append(sample_size)
sample_sizes_sen_piece.append(sample_size_sen_piece)
sample_sizes_overall.append(sample_size_overall)
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch')
ooms += 1
self.zero_grad()
else:
raise e
if dummy_batch:
return None
# gather logging outputs from all replicas
if self.args.distributed_world_size > 1:
logging_outputs, sample_sizes, sample_sizes_sen_piece, sample_sizes_overall, ooms = zip(*distributed_utils.all_gather_list(
[logging_outputs, sample_sizes, sample_sizes_sen_piece, sample_sizes_overall, ooms],
))
logging_outputs = list(chain.from_iterable(logging_outputs))
sample_sizes = list(chain.from_iterable(sample_sizes))
sample_sizes_sen_piece = list(chain.from_iterable(sample_sizes_sen_piece))
sample_sizes_overall = list(chain.from_iterable(sample_sizes_overall))
ooms = sum(ooms)
if ooms == self.args.distributed_world_size:
print('| WARNING: OOM in all workers, skipping update')
self.zero_grad()
return None
# aggregate logging outputs and sample sizes
logging_output = self.criterion.__class__.aggregate_logging_outputs(logging_outputs)
sample_size = self.criterion.__class__.grad_denom(sample_sizes)
sample_size_sen_piece = self.criterion.__class__.grad_denom(sample_sizes_sen_piece)
sample_size_overall = self.criterion.__class__.grad_denom(sample_sizes_overall)
if not all(k in logging_output for k in ['ntokens', 'ntokens_sen_piece', 'nsentences']):
raise Exception((
'Please update the {}.aggregate_logging_outputs() method to '
'return ntokens, ntokens_sen_piece, and nsentences'
).format(self.criterion.__class__.__name__))
try:
# normalize grads by sample size
# self.optimizer.multiply_grads(self.args.distributed_world_size / float(sample_size))
# self.optimizer.multiply_grads(self.args.distributed_world_size / float(sample_size_sen_piece))
self.optimizer.multiply_grads(self.args.distributed_world_size / float(sample_size_overall / 2.0))
# clip grads
grad_norm = self.optimizer.clip_grad_norm(self.args.clip_norm)
# take an optimization step
self.optimizer.step()
self._num_updates += 1
# update learning rate
self.lr_scheduler.step_update(self._num_updates)
# update meters
ntokens = logging_output.get('ntokens', 0)
ntokens_sen_piece = logging_output.get('ntokens_sen_piece', 0)
nsentences = logging_output.get('nsentences', 0)
self.meters['wps'].update(ntokens)
self.meters['ups'].update(1.)
self.meters['wpb'].update(ntokens)
self.meters['bsz'].update(nsentences)
self.meters['gnorm'].update(grad_norm)
self.meters['clip'].update(
1. if grad_norm > self.args.clip_norm and self.args.clip_norm > 0 else 0.
)
self.meters['oom'].update(ooms)
self.meters['train_loss'].update(logging_output.get('loss', 0), sample_size)
self.meters['train_loss_sen_piece'].update(logging_output.get('loss_sen_piece', 0), sample_size_sen_piece)
self.meters['train_overall_loss'].update(logging_output.get('overall_loss', 0), sample_size_overall/2.0)
if 'nll_loss' in logging_output:
self.meters['train_nll_loss'].update(logging_output.get('nll_loss', 0), ntokens)
if 'nll_loss_sen_piece' in logging_output:
self.meters['train_nll_loss_sen_piece'].update(logging_output.get('nll_loss_sen_piece', 0), ntokens_sen_piece)
if 'overall_nll_loss' in logging_outputs:
self.meters['train_overall_nll_loss'].update(logging_output.get('overall_nll_loss', 0), (ntokens + ntokens_sen_piece)/2.0)
except OverflowError as e:
print('| WARNING: overflow detected, ' + str(e))
self.zero_grad()
logging_output = None
if self.args.fp16:
self.meters['loss_scale'].reset()
self.meters['loss_scale'].update(self.optimizer.scaler.loss_scale)
self.meters['train_wall'].stop()
return logging_output
def valid_step(self, sample, raise_oom=False):
"""Do forward pass in evaluation mode."""
with torch.no_grad():
self.model.eval()
sample = self._prepare_sample(sample)
if sample is None:
sample = self._prepare_sample(self._dummy_batch)
ignore_results = True
else:
ignore_results = False
try:
_loss, sample_size, _loss_sen_piece, sample_size_sen_piece, _overall_loss, sample_size_overall, logging_output = self.task.get_loss(
self.model, self.criterion, sample,
)
except RuntimeError as e:
if 'out of memory' in str(e) and not raise_oom:
print('| WARNING: ran out of memory, retrying batch')
for p in self.model.parameters():
if p.grad is not None:
del p.grad # free some memory
torch.cuda.empty_cache()
return self.valid_step(sample, raise_oom=True)
else:
raise e
if ignore_results:
logging_output, sample_size, sample_size_sen_piece, sample_size_overall = {}, 0, 0, 0
# gather logging outputs from all replicas
if self.args.distributed_world_size > 1:
logging_output, sample_size, sample_size_sen_piece, sample_size_overall = zip(*distributed_utils.all_gather_list(
[logging_output, sample_size, sample_size_sen_piece, sample_size_overall],
))
logging_output = list(logging_output)
sample_size = list(sample_size)
sample_size_sen_piece = list(sample_size_sen_piece)
sample_size_overall = list(sample_size_overall)
else:
logging_output = [logging_output]
sample_size = [sample_size]
sample_size_sen_piece = [sample_size_sen_piece]
sample_size_overall = [sample_size_overall]
# aggregate logging outputs and sample sizes
logging_output = self.criterion.__class__.aggregate_logging_outputs(logging_output)
sample_size = self.criterion.__class__.grad_denom(sample_size)
sample_size_sen_piece = self.criterion.__class__.grad_denom(sample_size_sen_piece)
sample_size_overall = self.criterion.__class__.grad_denom(sample_size_overall)
# update meters for validation
ntokens = logging_output.get('ntokens', 0)
ntokens_sen_piece = logging_output.get('ntokens_sen_piece', 0)
self.meters['valid_loss'].update(logging_output.get('loss', 0), sample_size)
self.meters['valid_loss_sen_piece'].update(logging_output.get('loss_sen_piece', 0), sample_size_sen_piece)
self.meters['valid_overall_loss'].update(logging_output.get('overall_loss', 0), sample_size_overall)
if 'nll_loss' in logging_output:
self.meters['valid_nll_loss'].update(logging_output.get('nll_loss', 0), ntokens)
if 'nll_loss_sen_piece' in logging_output:
self.meters['valid_nll_loss_sen_piece'].update(logging_output.get('nll_loss_sen_piece', 0), ntokens_sen_piece)
if 'overall_nll_loss' in logging_output:
self.meters['valid_overall_nll_loss'].update(logging_output.get('overall_nll_loss', 0), (ntokens + ntokens_sen_piece)/2.0)
return logging_output
def dummy_train_step(self, dummy_batch):
"""Dummy training step for warming caching allocator."""
self.train_step(dummy_batch, dummy_batch=True)
self.zero_grad()
def zero_grad(self):
self.optimizer.zero_grad()
def lr_step(self, epoch, val_loss=None):
"""Adjust the learning rate based on the validation loss."""
return self.lr_scheduler.step(epoch, val_loss)
def lr_step_update(self, num_updates):
"""Update the learning rate after each update."""
return self.lr_scheduler.step_update(num_updates)
def get_lr(self):
"""Get the current learning rate."""
return self.optimizer.get_lr()
def get_model(self):
"""Get the (non-wrapped) model instance."""
return self._model
def get_meter(self, name):
"""Get a specific meter by name."""
if name not in self.meters:
return None
return self.meters[name]
def get_num_updates(self):
"""Get the number of parameters updates."""
return self._num_updates
def _prepare_sample(self, sample):
if sample is None or len(sample) == 0:
return None
return utils.move_to_cuda(sample)
|
[
"apeterswu@gmail.com"
] |
apeterswu@gmail.com
|
e7ed619d855ed3ca7538ebe2d7d47d1c47031add
|
cde9efb6423c41f54dafa3bf2b99ebd818e31752
|
/time_domain/conv-tasnet/main.py
|
318b2cdd0e9648800988150fb2df501c5a997520
|
[] |
no_license
|
yihliang831209/Multi-frequency-Resolution-Singing-Voice-Separation
|
2f9cbf285cf094d5bd5a4be699d8b01be9f8439e
|
6f601088b176bc3a8df2cb6dd6dcae9faf08e6d8
|
refs/heads/main
| 2023-09-05T16:49:35.050402
| 2021-11-21T12:00:14
| 2021-11-21T12:00:14
| 403,480,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,352
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 13 23:10:08 2020
@author: User
"""
from torch.utils.data import Dataset
import matplotlib.pyplot as plt
import scipy.io as sio
import torch
from torch import nn
from tasnet import ConvTasNet
from utils import sizeof_fmt
import numpy as np
import timeit
from torch.utils.tensorboard import SummaryWriter
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
batch_size = 10
epochs_size = 100
def compute_measures(se,s):
Rss=s.transpose().dot(s)
this_s=s
a=this_s.transpose().dot(se)/Rss
e_true=a*this_s
e_res=se-a*this_s
Sss=np.sum((e_true)**2)
Snn=np.sum((e_res)**2)
SDR=10*np.log10(Sss/Snn)
return SDR
class Wavedata(Dataset):
def __init__(self,mix,vocal_music):
self.mix = mix
self.vocal_music = vocal_music
def __len__(self):
return len(self.mix[:,0,0])
def __getitem__(self,idx):
data = self.mix[idx,:,:]
target = self.vocal_music[idx,:,:]
return torch.tensor(data).float(), torch.tensor(target).float()
#____input testing data____#
train_folder = '../../../dataset/'
data2=sio.loadmat(train_folder+'DSD100_16k_100percentVocal_pairedMix_randomMix_validation.mat')
print('Data loading finish.')
x_test = data2['x'][:,:].transpose((1,0))
y_test = data2['y'][:,:,:].transpose((2,1,0))
x_test = np.expand_dims(x_test, 1)
len_x_test =len(x_test)
test_data = Wavedata(x_test, y_test)
test_loader = torch.utils.data.DataLoader(test_data, 10, shuffle = False)
#%% release memory
del x_test
del y_test
del data2
#Original Mix
#%% model save
def save_checkpoint(checkpoint_path, model, optimizer):
# state_dict: a Python dictionary object that:
# - for a model, maps each layer to its parameter tensor;
# - for an optimizer, contains info about the optimizer’s states and hyperparameters used.
state = {
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict()}
torch.save(state, checkpoint_path)
print('model saved to %s' % checkpoint_path)
#%%
model = ConvTasNet()
print(model)
size = sizeof_fmt(4 * sum(p.numel() for p in model.parameters()))
print(f"Model size {size}")
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=3e-4)
criterion = nn.L1Loss()
### =========continue training ==============
# Checkpoint = torch.load(r'./checkpoint/tasnet_55epoch.pth')
# model.load_state_dict(Checkpoint['state_dict'])
# optimizer.load_state_dict(Checkpoint['optimizer'])
### ===========================================
def validation():
model.eval() # set evaluation mode
dataloader_iterator = iter(test_loader)
L1loss_append = []
SDR_vocal_append=[]
SDR_music_append=[]
for idx in range(len_x_test//10):
x_valid, t_valid = next(dataloader_iterator)
with torch.no_grad():
y_estimate = model(x_valid.to(device))
loss = criterion(y_estimate,t_valid.to(device))
L1loss_append.append(loss.item())
if idx%3==0:
vocal_cat = t_valid[:,0,:].numpy()
music_cat = t_valid[:,1,:].numpy()
estimate_vocal_cat = y_estimate[:,0,:].cpu().detach().numpy()
estimate_music_cat = y_estimate[:,1,:].cpu().detach().numpy()
continue
estimate_vocal_cat = np.concatenate((estimate_vocal_cat,y_estimate[:,0,:].cpu().detach().numpy()),0)
estimate_music_cat = np.concatenate((estimate_music_cat,y_estimate[:,1,:].cpu().detach().numpy()),0)
vocal_cat = np.concatenate((vocal_cat,t_valid[:,0,:].numpy()),0)
music_cat = np.concatenate((music_cat,t_valid[:,1,:].numpy()),0)
if (idx+1)%3== 0:
estimate_vocal_cat = np.reshape(estimate_vocal_cat,[-1])
estimate_music_cat = np.reshape(estimate_music_cat,[-1])
vocal_cat = np.reshape(vocal_cat,[-1])
music_cat = np.reshape(music_cat,[-1])
SDR_vocal = compute_measures(estimate_vocal_cat,vocal_cat)
SDR_music = compute_measures(estimate_music_cat,music_cat)
SDR_vocal_append.append(SDR_vocal)
SDR_music_append.append(SDR_music)
print ('Epoch [{}/{}],validatio_Loss: {}'.format(epoch+1, epochs_size,np.mean(L1loss_append)) )
model.train()
return np.mean(L1loss_append),np.median(SDR_vocal_append),np.median(SDR_music_append)
#%% train
training_loss = []
testing_loss = []
validation_L1loss = []
validation_SDR_vocal = []
validation_SDR_music = []
best_vocal_SDR = 0
best_music_SDR = 0
best_mean_SDR = 0
best_epoch = 0
### initial summary writter #####################
writer = SummaryWriter('log_dir')
print('strat training....')
model.train()
for epoch in range(epochs_size):
start = timeit.default_timer()
epoch_now = epoch
file_sequence = np.random.permutation(11)
train_loss_sum = []
for load_file_i in range(3): ## separate all data into several parts
print('Data loading '+str(load_file_i+1)+'/3 ....')
data_1 = sio.loadmat(train_folder+'DSD100_16k_100percentVocal_pairedMix_randomMix_'+str(file_sequence[3*load_file_i]+1)+'.mat')
data_2 = sio.loadmat(train_folder+'DSD100_16k_100percentVocal_pairedMix_randomMix_'+str(file_sequence[3*load_file_i+1]+1)+'.mat')
data_3 = sio.loadmat(train_folder+'DSD100_16k_100percentVocal_pairedMix_randomMix_'+str(file_sequence[3*load_file_i+2]+1)+'.mat')
x_1_train=data_1['x'][:,:].transpose((1,0))
y_1_train=data_1['y'][:,:,:].transpose((2,1,0))
x_2_train=data_2['x'][:,:].transpose((1,0))
y_2_train=data_2['y'][:,:,:].transpose((2,1,0))
x_3_train=data_3['x'][:,:].transpose((1,0))
y_3_train=data_3['y'][:,:,:].transpose((2,1,0))
x_train = np.concatenate([x_1_train,x_2_train,x_3_train], axis = 0)
y_train = np.concatenate([y_1_train,y_2_train,y_3_train],axis = 0)
del data_1,data_2,data_3
del x_1_train,x_2_train,x_3_train
del y_1_train,y_2_train,y_3_train
x_train = np.expand_dims(x_train, 1)
train_data = Wavedata(x_train, y_train)
train_loader = torch.utils.data.DataLoader(train_data, batch_size, shuffle = True)
total_step = len(train_loader)
for i, (x,t) in enumerate(train_loader):
# Forward pass
y_estimate = model(x.to(device))
loss = criterion(y_estimate,t.to(device))
train_loss=loss.item()
train_loss_sum.append(train_loss)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Data_segament [{}/{}] ,Step [{}/{}], Loss: {}'
.format(epoch_now+1, epochs_size, load_file_i+1, 3, i+1, total_step, loss.item()))
print('Best epoch:'+ str(best_epoch)+' Vocal: '+str(best_vocal_SDR)+' Music: '+str(best_music_SDR) )
#== validation =======
stop = timeit.default_timer()
print('Time for one epoch :'+ str(stop-start)+' seconds')
print ('Epoch [{}/{}], Step [{}/{}], Loss: {}'
.format(epoch_now+1, epochs_size, i+1, total_step, loss.item()))
[valid_L1Loss,SDR_vocal_DSD100,SDR_music_DSD100] = validation()
print('is train ? '+str(model.training))
validation_L1loss.append(valid_L1Loss)
validation_SDR_vocal.append(SDR_vocal_DSD100)
validation_SDR_music.append(SDR_music_DSD100)
training_loss.append(np.mean(train_loss_sum))
plt.plot(validation_SDR_vocal,label = "Vocal SDR")
plt.plot(validation_SDR_music,label = "Music SDR")
plt.legend()
plt.show()
writer.add_scalar('Validation/DSD100_Vocal_SDR',SDR_vocal_DSD100,epoch_now)
writer.add_scalar('Validation/DSD100_Music_SDR',SDR_music_DSD100,epoch_now)
# writer.add_scalar('Validation/ikala_Vocal_SDR',SDR_vocal_ikala,epoch_now)
# writer.add_scalar('Validation/ikala_Music_SDR',SDR_music_ikala,epoch_now)
plt.plot(validation_L1loss,label = "validation L1 Loss")
plt.legend()
plt.show()
writer.add_scalar('Validation/loss',valid_L1Loss,epoch_now)
plt.plot(training_loss,label = "training L1 loss")
plt.legend()
plt.show()
writer.add_scalar('Train/L1loss',np.mean(train_loss_sum),epoch_now)
if (SDR_vocal_DSD100>best_vocal_SDR) & (SDR_music_DSD100>best_music_SDR):
best_vocal_SDR = SDR_vocal_DSD100
best_music_SDR = SDR_music_DSD100
best_epoch = epoch_now
checkpoint_path='./checkpoint/tasnet_bestSDR_'+str(best_epoch)+'epoch.pth'
save_checkpoint(checkpoint_path,model,optimizer)
if (SDR_vocal_DSD100+SDR_music_DSD100)/2>best_mean_SDR:
best_mean_SDR = (SDR_vocal_DSD100+SDR_music_DSD100)/2
best_epoch = epoch_now
checkpoint_path='./checkpoint/tasnet_bestMeanSDR_'+str(best_epoch)+'epoch.pth'
save_checkpoint(checkpoint_path,model,optimizer)
checkpoint_path='./checkpoint/tasnet_'+str(epoch_now)+'epoch.pth'
save_checkpoint(checkpoint_path,model,optimizer)
checkpoint_path='./checkpoint/tasnet_'+str(epoch_now)+'epoch.pth'
save_checkpoint(checkpoint_path,model,optimizer)
|
[
"dennis831209@gmail.com"
] |
dennis831209@gmail.com
|
0064d0360a4012673cf16f4dc981aed3b3ff00fc
|
78727ed093b7100b98c240c101658abdddaa389a
|
/Sender.py
|
65fc4fd1baee255dd44fd2c9bf788e8876d3083c
|
[] |
no_license
|
jaddu14/Sending-Email-using-python
|
25310252e187adcd91115a6f302391e15d678005
|
852ffeffdda7774c78d7a9879bba0a349f1dbc09
|
refs/heads/master
| 2022-12-10T22:22:39.431592
| 2020-09-05T06:27:38
| 2020-09-05T06:27:38
| 293,016,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 814
|
py
|
import smtplib, ssl
smtp_server = 'smtp.gmail.com'
port = 465
sender = 'sender's Email'
password=input("Enter your password: ")
reciever = 'Reciever's Email '
message = """\
Subject: Hi There!
Form: {}
To: {}
This message was sent from python!
""".format(sender, reciever)
context = ssl.create_default_context()
# with port 587 check connection
'''
try:
server = smtplib.SMTP(smtp_server, port)
server.ehlo()
server.starttls(context=context)
server.ehlo()
server.login(sender,password)
print('It Worked!')
except Exception as e:
print(e)
finally:
server.quit()
'''
# for sending mail using port 465
with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:
server.login(sender, password)
#send mail here
server.sendmail(sender,reciever,message)
|
[
"noreply@github.com"
] |
jaddu14.noreply@github.com
|
dd0e4f44ae509c2378d22627339e5524827ad6b0
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03417/s919026589.py
|
d737142c20505a6298fc5efd4de10b7ec76a329f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 162
|
py
|
N, M = map(int, input().split())
if N==1 and M==1:
ans = 1
elif N==1 or M==1:
ans = max(N-2,M-2)
else:
ans = N * M - ((2 * N + 2 * M) - 4)
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
db77dc9ad45dda920738725edb3829bb32791983
|
3aa0293df2d31cdb2cfe52c82274e3f0f5c88e3e
|
/Courses/1402-craftingqualitycode-coursera/code/a1_bh.py
|
463544e0332e39cfe13318ba5e69add1cb5459de
|
[] |
no_license
|
BruceHad/notes-dev
|
4641ba7ea4d8880cd745f62d8f34afe084774418
|
ff1ebfe2294472e38edaccca981cc467c78dcbc9
|
refs/heads/master
| 2016-09-01T05:16:29.383267
| 2016-04-18T18:51:23
| 2016-04-18T18:51:23
| 54,989,173
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,888
|
py
|
import math
def num_buses(n):
""" (int) -> int
Precondition: n >= 0
Return the minimum number of buses required to transport n people.
Each bus can hold 50 people.
>>> num_buses(75)
2
>>> num_buses(0)
0
>>> num_buses(1)
1
>>> num_buses(49)
1
>>> num_buses(50)
1
>>> num_buses(51)
2
"""
if n > 0:
return int(math.ceil(n/50.))
else:
return 0
def stock_price_summary(price_changes):
""" (list of number) -> (number, number) tuple
price_changes contains a list of stock price changes. Return a 2-item
tuple where the first item is the sum of the gains in price_changes and
the second is the sum of the losses in price_changes.
>>> stock_price_summary([0.01, 0.03, -0.02, -0.14, 0, 0, 0.10, -0.01])
(0.14, -0.17)
>>> stock_price_summary([])
(0, 0)
>>> stock_price_summary([0,0,0])
(0, 0)
>>> stock_price_summary([1,2,3])
(6, 0)
>>> stock_price_summary([-1,-2,-3])
(0, -6)
"""
gains = 0
losses = 0
for i in price_changes:
if i > 0:
gains += i
else:
losses += i
return (gains,losses)
def swap_k(L, k):
""" (list, int) -> NoneType
Precondtion: 0 <= k <= len(L) // 2
Swap the first k items of L with the last k items of L.
>>> nums = [1, 2, 3, 4, 5, 6]
>>> swap_k(nums, 2)
>>> nums
[5, 6, 3, 4, 1, 2]
>>> nums = [1, 2, 3, 4, 5, 6]
>>> swap_k(nums, 0)
>>> nums
[1, 2, 3, 4, 5, 6]
>>> nums = [1, 2, 3, 4, 5, 6]
>>> swap_k(nums, 1)
>>> nums
[6, 2, 3, 4, 5, 1]
>>> nums = [1, 2, 3, 4, 5, 6]
>>> swap_k(nums, 3)
>>> nums
[4, 5, 6, 1, 2, 3]
"""
swap = L[-k:]+L[k:-k]+L[:k]
for i in range(len(L)):
L[i] = swap[i]
if __name__ == '__main__':
import doctest
doctest.testmod()
|
[
"bahadden@gmail.com"
] |
bahadden@gmail.com
|
883c00b68de3119a7cfade048fa15b921c0e8df6
|
92333037243c4c7a808d26657df8fe5e764217b0
|
/PartialFunc.py
|
23606b069450754645a3c68d7f6c98649384dc37
|
[] |
no_license
|
dragonOrTiger/pythonDemo
|
0aab11c7ce758963621d2c4675802640cdc932e0
|
26de12da050c6eadf4e62ca67d193650c3d5c648
|
refs/heads/master
| 2020-04-06T07:12:27.875226
| 2016-08-25T10:22:46
| 2016-08-25T10:22:46
| 65,807,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,512
|
py
|
#在介绍函数参数的时候,我们讲到,通过设定参数的默认值,可以降低函数调用的难度。
#而偏函数也可以做到这一点
#int()函数可以把字符串转换为整数,当仅传入字符串时,int()函数默认按照十进制转换
print(int('12345'))
#但int()函数还提供额外的base参数,默认值是10。如果传入base参数,就可以做N进制的转换
print(int('12345',base=8))
print(int('12345',16))
#假设要转换大量的二进制字符串,每次都传入int(x,base=2)非常麻烦
#于是我们想到,可以定义一个int2()函数,默认把base=2传进去
def int2(x,base=2):
return int(x,base)
print(int2('1000000'))
print(int2('1010101'))
#functools.partial就是帮助我们创建一个偏函数的,不需要我们自己定义int2(),可以直接使用下面的代码创建一个新的函数int2:
import functools
int2 = functools.partial(int,base=2)
print(int2('1000000'))
print(int2('1010101'))
#简单总结functools.partial的作用就是,把一个函数的某些参数给固定住(也就是设置默认值),返回一个新的函数,调用这个新函数会更简单
#注意到上面的新的int2函数,仅仅是把base参数重新设定默认值为2,但也可以在函数调用时传入其他值
print(int2('1000000',base=10))
#创建偏函数时,实际上可以接收函数对象,*args,**kw这三个参数
max2 = functools.partial(max,10)#实际上会把10作为*args的一部分自动加到左边
print(max2(5,6,7))
|
[
"shiyongjie@neusoft.com"
] |
shiyongjie@neusoft.com
|
ee3af8353dcbf249ff2c3045064b1b52bce07d6f
|
10c42f1e6486b02997ceaeb3e70d091012b72a80
|
/scipy/integrate/odepack.py
|
7bb4c1a8c9be375df855abe6e1b30ca9711f2607
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSL-1.0",
"Qhull"
] |
permissive
|
andyfaff/scipy
|
453a9c9b6f7e6f3d0d00ead85288928df0634131
|
5e136d908a50f18a7a99d0d6645d5cae957dc737
|
refs/heads/main
| 2023-09-01T15:48:52.494964
| 2023-08-26T12:51:14
| 2023-08-26T12:51:14
| 17,390,686
| 2
| 5
|
BSD-3-Clause
| 2023-09-10T23:57:03
| 2014-03-04T04:46:50
|
Python
|
UTF-8
|
Python
| false
| false
| 545
|
py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.integrate` namespace for importing the functions
# included below.
from scipy._lib.deprecation import _sub_module_deprecation
__all__ = ['odeint', 'ODEintWarning'] # noqa: F822
def __dir__():
return __all__
def __getattr__(name):
return _sub_module_deprecation(sub_package="integrate", module="odepack",
private_modules=["_odepack_py"], all=__all__,
attribute=name)
|
[
"noreply@github.com"
] |
andyfaff.noreply@github.com
|
cd4915a01e980729f996c998d8289b62a7becfeb
|
db6901abe45c8b1cf3f38d9749141cc9213de2e7
|
/devel/lib/python2.7/dist-packages/mavros_msgs/srv/_FileOpen.py
|
2c90d75a855a86d367cb75e4c249be5ff2e3e364
|
[] |
no_license
|
amilearning/MAV_competition_mavros_ws
|
e95f5f66dac995538c9a682ec243deebf81b451a
|
303a5272fd47e1a0382c8f769623d7cfddb1ebe3
|
refs/heads/master
| 2023-06-24T11:25:47.592856
| 2021-07-20T16:17:34
| 2021-07-20T16:17:34
| 387,848,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 115
|
py
|
/home/hojin/drone_ws2/mavros_ws/devel/.private/mavros_msgs/lib/python2.7/dist-packages/mavros_msgs/srv/_FileOpen.py
|
[
"hojin.projects@gmail.com"
] |
hojin.projects@gmail.com
|
089dcf86fe7a83420b58a8349209e3c75495d1d3
|
82555a905d550196b01099f3b8d83aff29595eb5
|
/app.py
|
1e54cf82f1bae2349da38cb5510fc0c08e666d79
|
[] |
no_license
|
epickiller1011/epickiller1011.1
|
3f08842bb241049a79f32322b26a522e532a7dcf
|
97fd099f7a3125e05e6c559c5beabbff3b8f2b01
|
refs/heads/master
| 2022-07-29T13:41:02.862632
| 2020-06-20T16:59:06
| 2020-06-20T16:59:06
| 273,740,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,460
|
py
|
from __future__ import division, print_function
# coding=utf-8
import numpy as np
from keras.backend import clear_session
import keras
from keras.applications import vgg16
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.preprocessing import image
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
# Flask utils
from flask import Flask, redirect, url_for, request, render_template
# Define a flask app
app = Flask(__name__)
vgg_model = vgg16.VGG16(weights='imagenet')
# Model saved with Keras model.save()
MODEL_PATH = 'models'
# Load your trained model
"""
model = load_model(MODEL_PATH)
model._make_predict_function() """ # Necessary
# print('Model loaded. Start serving...')
# You can also use pretrained model from Keras
# Check https://keras.io/applications/
#from keras.applications.resnet50 import ResNet50
#model = ResNet50(weights='imagenet')
#model.save('')
print('Model loaded. Check http://127.0.0.1:5000/')
def model_predict(img_path, model):
img = image.load_img(img_path, target_size=(224, 224))
clear_session()
keras.backend.clear_session()
# Preprocessing the image
predictions=0
x = image.img_to_array(img)
# x = np.true_divide(x, 255)
x = np.expand_dims(x, axis=0)
# Be careful how your trained model deals with the input
# otherwise, it won't make correct prediction!
x = preprocess_input(x, mode='caffe')
processed_image = vgg16.preprocess_input(x)
label = decode_predictions(vgg_model.predict(processed_image))
#preds = model.predict(x)
return label
@app.route('/', methods=['GET'])
def index():
# Main page
return render_template('index.html')
@app.route('/predict', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# Get the file from post request
f = request.files['file']
# Make prediction
preds = model_predict(f, model)
# Process your result for human
# pred_class = preds.argmax(axis=-1) # Simple argmax
pred_class = decode_predictions(preds, top=1) # ImageNet Decode
result = str(pred_class[0][0][1]) # Convert to string
return result
return None
if __name__ == '__main__':
app.run()
# -*- coding: utf-8 -*-
|
[
"noreply@github.com"
] |
epickiller1011.noreply@github.com
|
0f179f35be6cbb8dbba9f08f2848157a749982de
|
7abc9f8a20cd4de325e96bf38bd26e59da573b45
|
/commands/main_menu.py
|
3cf89e4e86fef0c1d7c685f484659a35894b5196
|
[] |
no_license
|
tamkovich/Finagram
|
38c691d27f1a556ded354b009b12b60a61c6b558
|
9d33248e0328891f2c353e8745853b8bc6dfd2f5
|
refs/heads/master
| 2020-04-03T22:47:54.034175
| 2018-12-01T07:09:12
| 2018-12-01T07:09:12
| 155,609,845
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 934
|
py
|
from logic_application.bot_talk import VARS, COMMENT_VARS
import command_system
def menu(*args, **kwargs):
message = '*FinGram* - это Ваш персональный консультат, который обьяснит как открыть, ' \
'сопровождать и в случае неудачи закрыть ИП. \nРобот поможет Вам определится с ' \
'выбором системы налогооблажения, подобрать оптимальную форму регистрации и много ' \
'другое. \n _Сделайте свой выбор!_'
return (message, list(VARS.keys())+list(COMMENT_VARS.keys()), [], []), None, None
data_command = command_system.Command()
data_command.keys = ["Главное меню", '/start']
data_command.description = "Начальное меню"
data_command.process = menu
|
[
"jaselnikpromise@gmail.com"
] |
jaselnikpromise@gmail.com
|
a63847d16a5659cf9848a8ac2931d97697e9e4bc
|
6d233ad2059a941e4ce4c5b5ee3857b8a4a0d212
|
/Everyday_alg/2021/06/2021_06_07/fei-bo-na-qi-shu-lie-lcof.py
|
848b7cc8f43e8e03c8af76b43b1a7696200a53d7
|
[] |
no_license
|
Alexanderklau/Algorithm
|
7c38af7debbe850dfc7b99cdadbf0f8f89141fc6
|
eac05f637a55bfcc342fa9fc4af4e2dd4156ea43
|
refs/heads/master
| 2022-06-12T21:07:23.635224
| 2022-06-12T08:12:07
| 2022-06-12T08:12:07
| 83,501,915
| 5
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 801
|
py
|
# coding: utf-8
__author__ = "lau.wenbo"
"""
写一个函数,输入 n ,求斐波那契(Fibonacci)数列的第 n 项(即 F(N))。斐波那契数列的定义如下:
F(0) = 0, F(1) = 1
F(N) = F(N - 1) + F(N - 2), 其中 N > 1.
斐波那契数列由 0 和 1 开始,之后的斐波那契数就是由之前的两数相加而得出。
答案需要取模 1e9+7(1000000007),如计算初始结果为:1000000008,请返回 1。
示例 1:
输入:n = 2
输出:1
示例 2:
输入:n = 5
输出:5
"""
class Solution(object):
def fib(self, n):
"""
:type n: int
:rtype: int
"""
dp = [0, 1]
for i in range(2, n + 1):
dp.append(dp[i - 1] + dp[i - 2])
print(dp)
return dp[n] % 1000000007
|
[
"429095816@qq.com"
] |
429095816@qq.com
|
af0f71bfb54ec732cacdeb845dee774901d59bc0
|
7e195dcc670a4c7d55028dcd149813dcc9ecdefa
|
/Lab 3.1.py
|
2a1f82e803f3a488f4d65da26538b1b871dfa12e
|
[] |
no_license
|
1201200309-AlvinChen/DPL5211
|
ca7b3a986cd23d0ea4ed4719210bdb2933223bd5
|
cc0d34b4a859fcae409f14f93780bf51f4775069
|
refs/heads/main
| 2023-08-12T23:48:31.229792
| 2021-10-11T03:04:36
| 2021-10-11T03:04:36
| 403,466,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
# Student ID : 1201200309
# Student Name : Alvin Chen
# if (cur_balance < 0) {
# printf("Your balance is insufficient")
# }
# printf("Thank you for the payment")
cur_balance = 30.12
if cur_balance < 0:
print("Your balance is insufficient")
|
[
"noreply@github.com"
] |
1201200309-AlvinChen.noreply@github.com
|
b531d1908021e047584bcc73db34af36af58b774
|
beac1893280e28fca6c2ca53cd1138e708512bc5
|
/project/bin/pip2.7
|
a90d97ae525c9809382e73bf8321a4789b44f7d0
|
[] |
no_license
|
istanfield09/eventful
|
9cc325d239c206a2c9b605270cd1fb2836590196
|
f10866bba0de8861e0585195b309b55b3be61865
|
refs/heads/master
| 2021-01-01T06:18:27.134407
| 2015-03-26T18:43:36
| 2015-03-26T18:43:36
| 32,719,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
7
|
#!/Users/ianstanfield/Documents/Development/eventful/project/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ianstanfield@ians-air.attlocal.net"
] |
ianstanfield@ians-air.attlocal.net
|
b1b73b9a0bfada0331cb730c5b4dad4d2661af38
|
a343b3414127943e9c4de4e1db62858af19a059e
|
/CodeUp/CodeUp_6028.py
|
439bb0903e2c6ce50ee1b62eb4541281b215f289
|
[] |
no_license
|
axg1/Algorithm_Study
|
9b4b69906452ab17d36c89e484dc38e3c503e3ff
|
08c7fa49e322fc49fc3d26355b19d33ab0a5350a
|
refs/heads/main
| 2023-06-18T08:15:15.781372
| 2021-07-08T12:18:05
| 2021-07-08T12:18:05
| 373,360,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36
|
py
|
a = input()
a = int(a)
print('%X'%a)
|
[
"axg1@naver.com"
] |
axg1@naver.com
|
f39c3a11e241c378954b5aae955f8025560a05d0
|
fe38e0cd229059d828dfd593baee3816fb0f334d
|
/leetcode-python/MEDIUM_739-daily_temperatures.py
|
dbd0dc89c812bf088925595ff8a99b98799deb27
|
[] |
no_license
|
shyam81295/CODING
|
4de03a9690dadd5330b2fddf7030949260a93202
|
5933e63675a21a68b07a7e852f2db5caf6169743
|
refs/heads/master
| 2021-06-03T18:51:00.708220
| 2020-12-06T23:35:15
| 2020-12-06T23:35:15
| 102,181,335
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
# calculate index of next greater element
# O(N) time, O(N) space complexity with stack
class Solution:
def dailyTemperatures(self, T: List[int]) -> List[int]:
ans_list = [-1]*len(T)
stack_list = []
if T:
i = 0
stack_list.append(0)
# doStuff only when there is more than 2 elements.
if len(T) > 1:
i = 1
while i < len(T):
while stack_list and T[stack_list[-1]] < T[i]:
a = stack_list.pop()
ans_list[a] = i-a
stack_list.append(i)
i += 1
while stack_list:
a = stack_list.pop()
ans_list[a] = 0
return ans_list
# O(N) time, O(N) space complexity without stack
class Solution(object):
def dailyTemperatures(self, T):
"""
:type T: List[int]
:rtype: List[int]
"""
n = len(T)
res = [0] * n
for i in range(n - 2, -1, -1):
k = i + 1
while T[i] >= T[k] and res[k] > 0:
k += res[k]
if T[k] > T[i]:
res[i] = k - i
return res
|
[
"noreply@github.com"
] |
shyam81295.noreply@github.com
|
ac057b9330fe9a10dbaf5923c186c29de9d12341
|
0f0623da91fb2cdfc1202c744b24c24a58918b66
|
/polls/migrations/0001_initial.py
|
47f94a311aa6dbc4aa12f003c9bbb327b7ec3859
|
[] |
no_license
|
lakipiece/django
|
647e090c14b30ce0ce1e7e43071f3c7649441831
|
f89ccb285cef5795b37681c4cf2ab86930bb25f4
|
refs/heads/master
| 2023-01-24T10:53:21.299039
| 2020-11-26T02:01:15
| 2020-11-26T02:01:15
| 314,436,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
# Generated by Django 3.1.3 on 2020-11-20 03:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('querstion_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.question')),
],
),
]
|
[
"root@ip-172-31-47-121.ap-northeast-2.compute.internal"
] |
root@ip-172-31-47-121.ap-northeast-2.compute.internal
|
8eae46bddf63d355abb394a8602f8e03f3b9d72c
|
06da2c3905690ce7f42fdd1ed923ce29a29e6c91
|
/pathfinder.py
|
d8c550d095e8d3d45ebbe303ecd2ddb191562d12
|
[] |
no_license
|
SUJU16/scripts
|
eada4c5d399e68a8388c367514d34de939100fc6
|
5da3cd932ebb138ca5ebe9d4b8228e5846f66fce
|
refs/heads/master
| 2020-06-21T14:49:29.285086
| 2016-11-27T04:41:46
| 2016-11-27T04:41:46
| 74,783,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,981
|
py
|
from sets import Set
import googlemaps
from datetime import datetime
import json
import sys
import time
gmaps = googlemaps.Client(key='AIzaSyD-EkE2N4ULYVKqvd3RpHiy-gQeAVT2fNo')
def log(s):
pass
#print(s)
def cost(time, count):
#print(time, count)
#print(1.0*time/(count*count))
time = time/60.0
return time/(count*count)
def timeDistance(p1, p2):
global gmaps
#from math import sqrt, pow
#return sqrt(pow(p1["latitude"] - p2["latitude"], 2) + pow(p1["longitude"] - p2["longitude"], 2))
now = datetime.now()
res = gmaps.directions("%s %s" % (p1['location']['latitude'], p1['location']['longitude']),
"%s %s" % (p2['location']['latitude'], p2['location']['longitude']),
mode="driving",
departure_time=now
)
#log(res)
#log("%s %s" % (p1['location']['latitude'], p1['location']['longitude']))
#log("%s %s" % (p2['location']['latitude'], p2['location']['longitude']))
try:
return res[0]['legs'][0]['duration_in_traffic']['value']
except:
return None
def sjuktra(stops, end):
MAX_PEOPLE = 12
TIMERANGE = 30*60
unvisited = Set(range(len(stops)))
unvisited.add("end")
prev = dict()
dist = dict()
for i in unvisited:
dist[i] = {"cost": 9999999999999999, "count": 0, "date": None}
prev[i] = None
dist['end'] = {"cost": 0, "count": 0, "date": None}
while unvisited:
# Find smallest unvisited vertex
u_idx = None
for i in unvisited:
if u_idx == None or dist[i] < dist[u_idx]:
u_idx = i
u = stops[u_idx] if u_idx != "end" else end
unvisited.remove(u_idx)
log("Smallest: " + str(u_idx))
for i in unvisited:
i_count = stops[i]['n_points']
i_time = stops[i]['date']
current_count = dist[u_idx]['count']
log("[%s] count: %s" % (str(i), str(i_count)))
log("[%s] time: %s" % (str(i), str(i_time)))
if dist[u_idx]['date'] != None and dist[u_idx]['date'] < i_time:
log("> time")
continue
if current_count >= 12:
log("> count")
continue
t_time = timeDistance(u, stops[i])
if not t_time:
log("time err")
continue
log("Time to travell [%s-%s]: %i" % (str(u_idx), str(i), t_time))
if dist[u_idx]['date'] == None:
time = i_time + t_time
else:
time = dist[u_idx]['date'] + t_time
free_space = 12 - current_count
log("Free: %i" % free_space)
take_count = min(i_count, free_space)
count = current_count + take_count
log("Time: %i" % time)
log("Count: %i" % count)
i_cost = dist[u_idx]['cost']*(current_count*current_count)/(count*count) + cost(t_time, count)
#i_cost = cost(t_time, take_count)
log("Cost: %i" % i_cost)
if i_cost < dist[i]['cost'] and dist[u_idx]['count'] < MAX_PEOPLE and (abs(time - i_time) < TIMERANGE or dist[u_idx]['date'] == None):
dist[i]['cost'] = i_cost
dist[i]['count'] = count
if dist[u_idx]['date'] == None:
dist[i]['date'] = i_time
else:
dist[i]['date'] = dist[u_idx]['date'] + t_time
prev[i] = u_idx
log("Take [%s] -> [%s]" % (str(u_idx), str(i)))
log("")
#log(dist)
#log("")
log(dist)
log(prev)
return (dist, prev)
def pathfind(stops, end):
routes = []
while len(stops) > 0:
dist, prev = sjuktra(stops, end)
m = None
for i in dist:
if (m == None or dist[i]['cost'] < dist[m]['cost']) and i != 'end':
m = i
route = []
idx = []
i = m
#log(m)
while i != 'end':
route.append({
"location": {
"longitude": stops[i]['location']['longitude'],
"latitude": stops[i]['location']['latitude']
},
"count": dist[i]['count']
})
idx.append(i)
i = prev[i]
route.append({
"location": {
"longitude": end['location']['longitude'],
"latitude": end['location']['latitude']
},
"count": 0
})
routes.append(route)
stops = [i for j, i in enumerate(stops) if j not in idx]
return routes
def main():
data = sys.argv[1]
data = json.loads(data)
paths = pathfind(data['result'], data['end'])
#log(len(paths))
print(json.dumps({'paths': paths}))
main()
|
[
"henri.nurmi@kapsi.fi"
] |
henri.nurmi@kapsi.fi
|
e0a234336de31c9505d7dc85a013051f71da125f
|
27f31099ad52e9f6f06f2065f93e78b6d30911e5
|
/hpcMpi/scripts/hpcMpiIntegralPar.py
|
c457988b133b3b4364ed98bb6349dbc41fa989ee
|
[] |
no_license
|
CamilleINGOUF/M2_HPC
|
2034c6bc69248d25ecaeb8a3c0cf2e7c91271676
|
2824f4c8b1f1df0fdd588d21cadce854b59559c9
|
refs/heads/master
| 2020-04-15T21:06:21.166035
| 2019-01-30T15:54:50
| 2019-01-30T15:54:50
| 165,020,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
#!/usr/bin/env python3
import hpcMpi
import sys
import time as t
from mpi4py import MPI
import numpy as np
if __name__ == '__main__':
# parse command line arguments
step = 1e-3
if len(sys.argv) == 2:
step = float(sys.argv[1])
# compute
comm = MPI.COMM_WORLD
worldRank = comm.Get_rank()
worldSize = comm.Get_size()
t0 = MPI.Wtime()
node_result = np.empty(1, 'f')
node_result[0] = hpcMpi.compute(hpcMpi.fPi, worldRank/worldSize, (worldRank+1)/worldSize, step)
all_results = np.empty(1, 'f')
comm.Reduce(node_result, all_results, op=MPI.SUM)
t1 = MPI.Wtime()
time = t1 - t0
# output result
if (worldRank == 0):
print(step, worldSize, all_results[0], time)
|
[
"ingouf.camille@gmail.com"
] |
ingouf.camille@gmail.com
|
b8dda302ffc3569085e0ac3b118d55dea1cf3154
|
644bcdabf35261e07c2abed75986d70f736cb414
|
/python-project/Vrac/courbe_dragon.py
|
df72ad3ecea41d031fe9489c5ff00c20c9b27aca
|
[] |
no_license
|
matcianfa/playground-X1rXTswJ
|
f967ab2c2cf3905becafb6d77e89a31414d014de
|
67859b496e407200afb2b1d2b32bba5ed0fcc3f0
|
refs/heads/master
| 2023-04-03T11:56:15.878757
| 2023-03-24T15:52:37
| 2023-03-24T15:52:37
| 122,226,979
| 5
| 20
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
def pliage(k):
#Ne pas toucher ce qui précède
#Les valeurs pour les variables en entrée seront automatiquement données
#Ecrire ci-dessous en n'oubliant pas d'indenter
|
[
"noreply@github.com"
] |
matcianfa.noreply@github.com
|
06ef47958aa2a2e49563063f380eaad5995c45f6
|
8f757fcec669701eb86cb155868b7c450b7eddc0
|
/tools/input_company_info.py
|
0276c247fe529a539a7b892eb56ecef14f874df0
|
[] |
no_license
|
NIT-WATER/Frank-Yue-Job-push
|
2c25a051a79658db90b996f85bcb4f41a6ad0745
|
0a79841e66e142f67d2f8881bbd85c2544be9677
|
refs/heads/master
| 2022-12-01T01:46:32.506194
| 2020-07-12T14:41:11
| 2020-07-12T14:41:11
| 279,064,382
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,431
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import json
import time
import sys
import os
sys.path.append("..")
from common.test_tool import *
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('--company_name', default = '', help = 'Company name.')
parser.add_argument('--company_url', default = 'xxx', help = 'Company url.')
parser.add_argument('--is_have_crawl_code', action='store_true', default=True,
help='If has crawl code')
parser.add_argument('--crawl_code_path', default=False, help='Crawl code file path')
args = parser.parse_args()
json_date = None
with open('../meta/company.json') as f:
json_date = json.load(f)
is_have_company = False
for company in json_date:
if company['name'] == args.company_name:
if args.company_url == 'xxx':
company['url'] = args.company_url
company['is_have_crawl_code'] = args.is_have_crawl_code
is_have_company = True
if is_have_company is False:
company_obj = {}
company_obj['name'] = args.company_name
company_obj['url'] = args.company_url
company_obj['is_have_crawl_code'] = args.is_have_crawl_code
json_date.append(company_obj)
with open('./company.json', 'w') as f:
json.dump(json_date, f)
os.system(f'mkdir -p ../src/{args.company_name}')
os.system(f'cp {args.crawl_code_path} ../src/{args.company_name}/crawl.py')
|
[
"715339137@qq.com"
] |
715339137@qq.com
|
66f7615f781cb69b7a6ac36da6c6ded2f25397a0
|
d7878b992d055b8b9849f68ccf2ea47e7e418995
|
/models/server_db_access.py
|
5eed3ef1bb3d586ea992d4e3aadcea53d57a5818
|
[] |
no_license
|
Rk85/Http-Request-Response-Builder
|
b455c0c59227680a54663c84c2d11f407f8ca3d9
|
6a0673c9772fd5088236a9fd2c8842bde179b7bc
|
refs/heads/master
| 2020-06-03T05:23:58.736467
| 2014-08-15T12:59:17
| 2014-08-15T12:59:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,216
|
py
|
from db_tables.db_base import session
from db_tables.http_response import HttpResponse, HttpSubResponse
from db_tables.http_request import HttpRequest, HttpSubRequest
from db_tables.http_tests import HttpTestResults, HttpServerTestFailureReason
import logging
logger = logging.getLogger()
def get_response(request_uri=None):
"""
Description: Gets the next response data for the server
input_param: request_uri - Request Uri present in client request
input_type: string
out_param: response_data - Response data as a string
out_type: String
sample output: "HTTP/1.1 200 OK\r\nConnection:Close\r\n\r\n"
"""
if not request_uri:
return {}
test_id, req_id = [ int(parts) for parts in request_uri.strip().split("/")[:3] if len(parts) ]
running_test_row = session.query(HttpTestResults)\
.filter(HttpTestResults.test_id==test_id)\
.filter(HttpTestResults.request_id==req_id)\
.filter(HttpTestResults.is_running==True).first()
if running_test_row and running_test_row.sub_response_id:
sub_response = session.query(HttpSubResponse).get(running_test_row.sub_response_id)
if not sub_response:
failure_data = "Proxy sent one extra request. The Request should have been served from cache"
server_failure_reason = HttpServerTestFailureReason(reason=failure_data)
session.add(server_failure_reason)
session.flush()
running_test_row.request_result=False
running_test_row.server_failure_id = server_failure_reason.id
session.commit()
return "HTTP/1.1 404 Not Found\r\nConnection:Close\r\nContent-Length:0\r\n\r\n"
else:
failure_data = "Proxy sent one extra request. The Request should have been served from cache"
server_failure_reason = HttpServerTestFailureReason(reason=failure_data)
session.add(server_failure_reason)
session.flush()
running_test_row.request_result=False
running_test_row.server_failure_id = server_failure_reason.id
session.commit()
return "HTTP/1.1 404 Not Found\r\nConnection:Close\r\nContent-Length:0\r\n\r\n"
response_data = "HTTP/" + sub_response.version + " "
response_data = response_data + sub_response.response_code.code_name + "\r\n"
for response_header in sub_response.response_hdrs:
response_data = response_data + response_header.header_name + ":"
value_list = eval(response_header.server_value)
if response_header.single_value_hdr:
response_data = response_data + value_list[0]+ "\r\n"
else:
response_data = response_data + + ";".join(value_list) + "\r\n"
if sub_response.data_id:
response_data = response_data + "Content-Length:" + str( len(sub_response.data.data) )
response_data = response_data + "\r\n\r\n"
response_data = response_data + sub_response.data.data
else:
response_data = response_data + "Content-Length:0\r\n\r\n"
logger.debug("Response From Server : " + response_data)
return str(response_data)
|
[
"rachandkrishnan@gmail.com"
] |
rachandkrishnan@gmail.com
|
c296acbae8c58bc200be0034fe92cec6a634c534
|
310185d697d397ed4f531d46bb1ae357886ddd80
|
/exercise8/exercise8.5.py
|
ab1ba56686ab11b41e6fb5e2bf8f745bda49e56f
|
[
"MIT"
] |
permissive
|
srfunksensei/think-python
|
70d42d4afe63164588fb83bf70d7140d801824dc
|
a17fc81b316cd2e873b169e0090eae620442df99
|
refs/heads/main
| 2023-06-09T05:21:11.815623
| 2021-07-06T06:34:33
| 2021-07-06T06:34:33
| 371,041,124
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
def count(word, search_letter):
"""Encapsulate this code in a function named count, and generalize it so that it accepts
the string and the letter as arguments"""
letter_count = 0
for letter in word:
if letter == search_letter:
letter_count = letter_count + 1
print(letter_count)
count('banana', 'a')
count('banana', 'm')
|
[
"sr.funk.sensei@gmail.com"
] |
sr.funk.sensei@gmail.com
|
9fc75b5bc0a6ff682cb73bb15f0b3b63e2544b95
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2067/60829/272898.py
|
edaae49580762e8667a986c59a9230d43349ee36
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
def r(s):
a = {1:'I', 5:'V', 10:'X', 50:'L', 100:'C', 500:'D',1000:'M'}
sum=""
while not s==0:
for i in range(0,7):
if a[i]
return sum
print(r(int(input())))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
014312742a246a4737c55570b56317fca91f7717
|
f433f7c884729e697ae110fa5a795dcc04449e42
|
/Code_tianyu/AA_ddpg_iLQR - control/iRELAX.py
|
4c8314f5755f78bbd0e863ed083e4c93313f7a14
|
[] |
no_license
|
hzm2016/iLQG
|
f05fbf98543f2a016cee1ced1e6562b65748a262
|
5b3d5712ece14cbe6aefda00535c65149c27d313
|
refs/heads/master
| 2020-06-01T17:04:37.135285
| 2019-06-08T07:38:23
| 2019-06-08T07:38:23
| 190,858,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
import numpy as np
from iLSTM import BATCH_MANAGE
from iENV import Env_PeginHole
import time
import winsound
NUM_DEMON = 10
if __name__ == '__main__':
# 主程序
env = Env_PeginHole() # 初始化机器人环境
env.connectRobot(False)
batchm = BATCH_MANAGE(s_dim=4, a_dim=3)
process = env.reset_demonstrate()
for i in range(1000):
process, a_zip, done = env.step_demonstrate()
time.sleep(0.5)
env.close() # 关闭环境(包括多个进程)
time.sleep(5)
|
[
"houzhimin@houzhimindeMacBook-Pro.local"
] |
houzhimin@houzhimindeMacBook-Pro.local
|
746fd79298ca8f4cf4fd7b55c2319bb2bea32863
|
e2e08d7c97398a42e6554f913ee27340226994d9
|
/pyautoTest-master(ICF-7.5.0)/test_case/scg/scg_Route/test_c141124.py
|
f446a2cce896243072511568369a8b51bcdd2464
|
[] |
no_license
|
lizhuoya1111/Automated_testing_practice
|
88e7be512e831d279324ad710946232377fb4c01
|
b3a532d33ddeb8d01fff315bcd59b451befdef23
|
refs/heads/master
| 2022-12-04T08:19:29.806445
| 2020-08-14T03:51:20
| 2020-08-14T03:51:20
| 287,426,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,220
|
py
|
import pytest
import time
import sys
from page_obj.common.rail import *
from os.path import dirname, abspath
from page_obj.common.ssh import *
from page_obj.scg.scg_def_policy_route import *
from page_obj.scg.scg_def_interface import *
from page_obj.scg.scg_def_obj import *
from page_obj.scg.scg_def_ifname_OEM import *
from page_obj.scg.scg_def_multi_isp import *
sys.path.insert(0, dirname(dirname(abspath(__file__))))
test_id = "141124"
# 添加一条策略路由,指定service(包括any、预定义服务、自定义服务)
# 匹配了servicep的数据包才能匹配该路由走指定的网关
def test_c141124(browser):
try:
# 81 上添加策略路由 服务选择any
login_web(browser, url=dev1)
add_multi_gateway_group_wxw(browser, name='lzy2', group="1(GROUP_1)", modify='no', alias='',
device=interface_name_3, gateway='13.1.1.3',
ping_server='13.1.1.3', ping='yes',
arp='no', time_switch='7', ub="100000", db="100000")
add_multi_gateway_group_wxw(browser, name='lzy1', group="1(GROUP_1)", modify='no', alias='',
device=interface_name_2, gateway='12.1.1.2',
ping_server='12.1.1.2', ping='yes',
arp='no', time_switch='7', ub="100000", db="100000")
# 添加一条多网关策略路由
add_policy_route_multi_wxw(browser, in_device='全部', src_ip='12.1.1.0', src_mask='24',
dst_ip='34.1.1.0', dst_mask='24', service='yes', serv='any',
service_grp='no', serv_grp='H323',
gw_group='1(GROUP_1)', grp_mem=["主用", "备份1"], enable='yes',
disable='no', desc='添加多网关策略路由', save='yes', cancel='no')
# 82上添加到13.1.1.0网段 和34.1.1.0网段的路由
a82 = Shell_SSH()
a82.connect(hostip=dev2)
a82.execute('en')
a82.execute('con t')
a82.execute('ip route 13.1.1.0/24 gateway 12.1.1.1')
a82.execute('ip route 34.1.1.0/24 gateway 12.1.1.1')
a82.close()
# 83上添加到12.1.1.0网段的路由
a83 = Shell_SSH()
a83.connect(hostip=dev3)
a83.execute('en')
a83.execute('con t')
a83.execute('ip route 12.1.1.0/24 gateway 13.1.1.1')
a83.close()
# 82 ping 83
login_web(browser, url=dev2)
result1 = diag_ping(browser, ipadd="34.1.1.3", packersize="100", count="5", ping_wait_time="2",
interface=interface_name_2)
# print(result1)
# 81 上添加策略路由 服务选择预定义服务
login_web(browser, url=dev1)
add_policy_route_multi_wxw(browser, in_device='全部', src_ip='12.1.1.0', src_mask='24',
dst_ip='34.1.1.0', dst_mask='24', service='yes', serv='PING',
service_grp='no', serv_grp='H323',
gw_group='1(GROUP_1)', grp_mem=["主用", "备份1"], enable='yes',
disable='no', desc='添加多网关策略路由', save='yes', cancel='no')
# 82 ping 83
login_web(browser, url=dev2)
result2 = diag_ping(browser, ipadd="34.1.1.3", packersize="100", count="5", ping_wait_time="2",
interface=interface_name_2)
# 81 上添加策略路由 服务选择自定义服务
login_web(browser, url=dev1)
# 添加自定义服务
add_obj_service_wxw(browser, name='lzy', desc='zhe是ge描shu',
tcp='no', src_port_from='1', src_port_to='2', dest_port_from='3', dest_port_to='4',
udp='no', src_port_from1='1', src_port_to1='2', dst_port_from1='3', dst_port_to1='4',
icmp='yes', item='ping',
ip='', number='85')
add_policy_route_multi_wxw(browser, in_device='全部', src_ip='12.1.1.0', src_mask='24',
dst_ip='34.1.1.0', dst_mask='24', service='yes', serv='lzy',
service_grp='no', serv_grp='H323',
gw_group='1(GROUP_1)', grp_mem=["主用", "备份1"], enable='yes',
disable='no', desc='添加多网关策略路由', save='yes', cancel='no')
# 82 ping 83
login_web(browser, url=dev2)
result3 = diag_ping(browser, ipadd="34.1.1.3", packersize="100", count="5", ping_wait_time="2",
interface=interface_name_2)
# 81 上添加策略路由 服务选择服务组
login_web(browser, url=dev1)
# 添加服务组
add_obj_serv_grp_wxw(browser, name='lzy', desc='', serv_obj='C:lzy')
add_policy_route_multi_wxw(browser, in_device='全部', src_ip='12.1.1.0', src_mask='24',
dst_ip='34.1.1.0', dst_mask='24', service='no', serv='any',
service_grp='yes', serv_grp='lzy',
gw_group='1(GROUP_1)', grp_mem=["主用", "备份1"], enable='yes',
disable='no', desc='添加多网关策略路由', save='yes', cancel='no')
# 82 ping 83
login_web(browser, url=dev2)
result4 = diag_ping(browser, ipadd="34.1.1.3", packersize="100", count="5", ping_wait_time="2",
interface=interface_name_2)
# 删除83上路由
a83 = Shell_SSH()
a83.connect(hostip=dev3)
a83.execute('en')
a83.execute('con t')
a83.execute('no ip route 12.1.1.0/24 gateway 13.1.1.1')
a83.close()
# 删除82上路由
a82 = Shell_SSH()
a82.connect(hostip=dev2)
a82.execute('en')
a82.execute('con t')
a82.execute('no ip route 13.1.1.0/24 gateway 12.1.1.1')
a82.execute('no ip route 34.1.1.0/24 gateway 12.1.1.1')
a82.close()
# 81 上删除策略路由
login_web(browser, url=dev1)
del_all_policy_route_lzy(browser)
# 删除服务组
del_all_obj_serv_grp_wxw(browser)
# 删除自定义服务
del_obj_service_wxw(browser, name='lzy')
try:
assert "ms" in result1 and "ms" in result2 and "ms" in result3 and "ms" in result4
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "ms" in result1 and "ms" in result2 and "ms" in result3 and "ms" in result4
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
print(err)
rail_fail(test_run_id, test_id)
reload(hostip=[dev1, dev2, dev3])
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c"+str(test_id)+".py"])
|
[
"15501866985@163.com"
] |
15501866985@163.com
|
6f309cdf64481250d7b27e4de0d32d8524694c66
|
f3cd8af6df7f87088d7d25233bcef25cf1f9672c
|
/constant.py
|
eaeebe66e48b65bbb1209063ebd91187685fb282
|
[] |
no_license
|
cash2one/rankScript
|
9a8c16e63e53082944b3eaff2d8ec286ef5f3e65
|
ea906c069b6a8a799616e605bb8ab3464d96a71b
|
refs/heads/master
| 2021-05-06T02:01:36.125795
| 2017-12-01T06:33:28
| 2017-12-01T06:33:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,946
|
py
|
# encoding=gb18030
lib_path = "/search/odin/taoyongbo/rank/beta/scala_spark/lib"
jar_path = "/search/odin/taoyongbo/rank/beta/scala_spark/"
java_jar_path = "/search/odin/taoyongbo/rank/java_spark/"
local_featurePoi_path = '/search/odin/taoyongbo/rank/featurePoi/'
local_city_featurePoi_path = '/search/odin/taoyongbo/rank/cityFeaturePoi/'
local_featurePoi_center_path = '/search/odin/taoyongbo/rank/rank_center/'
local_structure_optimize_path = '/search/odin/taoyongbo/rank/rankResult/structureOptimizeRank'
local_structure_rank_path = '/search/odin/taoyongbo/rank/result/structureRank'
local_split_featurePoi_path = '/search/odin/taoyongbo/rank/splitfeaturePoi/'
default_rank_output = 'taoyongbo/output/multiOptimizeRank'
rank_path = "/search/odin/taoyongbo/rank/rankResult/"
root_path = '/search/odin/taoyongbo/rank/'
zeus_path = "hftp://master01.zeus.hadoop.sogou:50070"
yarn_path = "hdfs://master01.yarn.hadoop.sogou:6230"
# poi xml original files
zeus_poi_path = "/user/go2data/sdb_data/all_data/nochange_data/2016-10-20/result/POI"
yarn_poi_input_path = "/user/go2data_rank/taoyongbo/input/poiXml1"
zeus_buspoi_path = "/user/go2data/sdb_data/all_data/nochange_data/2016-10-20/result/BUSPOI"
yarn_buspoi_input_path = "/user/go2data_rank/taoyongbo/input/poiXml2"
zeus_myself_path = "/user/go2data/sdb_data/all_data/poi_data/2016-10-31/raw_data/myself"
yarn_myself_input_path = "/user/go2data_rank/taoyongbo/input/poiXml3"
# name structure original files
zeus_structure_path = "/user/go2data/huajin.shen_dev/structure_by_name/2016-10-20/name_prefix_structure_release"
yarn_structure_input_path = "/user/go2data_rank/taoyongbo/input/nameStructure"
# matchCount
zeus_matchCount_path = "/user/go2search/taoyongbo/output/caculate"
yarn_matchCount_input_path = "/user/go2data_rank/taoyongbo/input/matchCount"
yarn_matchCount_output_path = "/user/go2data_rank/taoyongbo/output/matchCount"
# gpsHot
zeus_gps_path = "/user/go2search/taoyongbo/output/gps"
yarn_gps_input_path = "/user/go2data_rank/taoyongbo/input/gps"
# polygon
zeus_polygon_path = "/user/go2data/sdb_data/all_data/nochange_data/2016-10-20/result/POLYGON/"
yarn_polygon_input_path = "/user/go2data_rank/taoyongbo/input/polygonXml"
# poiHotCount
yarn_poiHotCount_input_path = "/user/go2data_rank/taoyongbo/input/poiHotCount"
# searchCount
yarn_searchCount_input_path = "/user/go2data_rank/taoyongbo/input/searchCount"
upload_local_path = '/search/odin/taoyongbo/rank/result/'
rsync_version_path = '/search/odin/taoyongbo/rank/rsync_version/'
#back_rank
back_rank_path = '/search/odin/taoyongbo/rank/back_rank/'
default_rank_output_path = '/search/odin/taoyongbo/output/rank/multiOptimizeRank'
# poi rank
zeus_multiRank_path = "/user/go2search/taoyongbo/input/multiRank/"
zeus_hotCountRank_path = "/user/go2search/taoyongbo/input/hotCountRank/"
zeus_hitCountRank_path = "/user/go2search/taoyongbo/input/hitCountRank/"
yarn_multiRank_output_path = "/user/go2data_rank/taoyongbo/output/multiRank/"
yarn_hotCountRank_output_path = "/user/go2data_rank/taoyongbo/output/hotCountRank/"
yarn_hitCountRank_output_path = "/user/go2data_rank/taoyongbo/output/hitCountRank/"
#filter rank source
# similarQueryCount
yarn_similarQueryCount_input_path = "/user/go2data_rank/taoyongbo/input/filterRank/similarQueryCount/"
# sogouViewCount
zeus_sogouViewCount_path = "/user/go2search/taoyongbo/output/20170921sougouViewCount"
yarn_sogouViewCount_input_path = "/user/go2data_rank/taoyongbo/input/filterRank/sogouViewCount/"
# vrHitCount
zeus_vrHitCount_path = "/user/go2data_crawler/dc_log/VR_HITCOUNT"
yarn_vrHitCount_input_path = "/user/go2data_rank/taoyongbo/input/filterRank/vrHitCount/VR_HITCOUNT"
# vrViewCount
zeus_vrViewCount_path = "/user/go2data_crawler/dc_log/VR_VIEW"
yarn_vrViewCount_input_path = "/user/go2data_rank/taoyongbo/input/filterRank/vrViewCount/VR_VIEW"
#filterPoi
yarn_filterPoi_input_path = "/user/go2data_rank/taoyongbo/input/filterRank/filterPoi/"
|
[
"mvplove123@163.com"
] |
mvplove123@163.com
|
ab5bca1d1cb2f0d736bed26afdb88348a6e30d93
|
32003ce9b76266c44372d1732dd91ea961e3d745
|
/hras/wsgi.py
|
205e5a49c6c2b8f160fd44e370226f58c8cd7a5c
|
[] |
no_license
|
mansimishra007/MySpace
|
20cf1464652065d70caacc0c7cd3379af993f7e2
|
5ce502ee46b5175563050027775c5f078741649c
|
refs/heads/main
| 2023-04-04T11:33:54.564119
| 2021-04-10T17:48:43
| 2021-04-10T17:48:43
| 356,621,714
| 0
| 0
| null | 2021-04-10T15:27:05
| 2021-04-10T15:27:05
| null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
"""
WSGI config for hras project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hras.settings')
application = get_wsgi_application()
|
[
"43030745+divyanayal@users.noreply.github.com"
] |
43030745+divyanayal@users.noreply.github.com
|
436b83fd99158f794f2f21a55805f8b6fdea2b36
|
4e9e4b2aa28113e307c87cd6c777d7498fd85b0a
|
/tests/connections.py
|
5f0828fe45657e3d6e695afb80fd28f3b936e88a
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"MIT"
] |
permissive
|
kuychaco/CCF
|
b0608e4f241a1c0dfa1c3f72021b4b4d786e0e02
|
e11acde3be6a7d2213fe5b406b959bb5bb64361d
|
refs/heads/master
| 2020-12-23T05:14:45.012959
| 2020-01-29T17:49:12
| 2020-01-29T17:49:12
| 237,045,643
| 1
| 0
|
Apache-2.0
| 2020-01-29T17:45:36
| 2020-01-29T17:45:35
| null |
UTF-8
|
Python
| false
| false
| 3,246
|
py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import sys
import os
import infra.proc
import infra.e2e_args
import getpass
import os
import time
import logging
import multiprocessing
from random import seed
import infra.ccf
import infra.proc
import json
import contextlib
import resource
import psutil
import random
from loguru import logger as LOG
def run(args):
hosts = ["localhost"]
with infra.ccf.network(
hosts, args.build_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb
) as network:
check = infra.checker.Checker()
network.start_and_join(args)
primary, others = network.find_nodes()
primary_pid = primary.remote.remote.proc.pid
num_fds = psutil.Process(primary_pid).num_fds()
max_fds = num_fds + 50
LOG.info(f"{primary_pid} has {num_fds} open file descriptors")
resource.prlimit(primary_pid, resource.RLIMIT_NOFILE, (max_fds, max_fds))
LOG.info(f"set max fds to {max_fds} on {primary_pid}")
nb_conn = (max_fds - num_fds) * 2
clients = []
with contextlib.ExitStack() as es:
for i in range(nb_conn):
try:
clients.append(es.enter_context(primary.user_client(format="json")))
LOG.info(f"Connected client {i}")
except OSError:
LOG.error(f"Failed to connect client {i}")
c = clients[int(random.random() * len(clients))]
check(c.rpc("LOG_record", {"id": 42, "msg": "foo"}), result=True)
assert (
len(clients) >= max_fds - num_fds - 1
), f"{len(clients)}, expected at least {max_fds - num_fds - 1}"
num_fds = psutil.Process(primary_pid).num_fds()
LOG.info(f"{primary_pid} has {num_fds} open file descriptors")
LOG.info(f"Disconnecting clients")
time.sleep(1)
num_fds = psutil.Process(primary_pid).num_fds()
LOG.info(f"{primary_pid} has {num_fds} open file descriptors")
clients = []
with contextlib.ExitStack() as es:
for i in range(max_fds - num_fds):
clients.append(es.enter_context(primary.user_client(format="json")))
LOG.info(f"Connected client {i}")
c = clients[int(random.random() * len(clients))]
check(c.rpc("LOG_record", {"id": 42, "msg": "foo"}), result=True)
assert (
len(clients) >= max_fds - num_fds - 1
), f"{len(clients)}, expected at least {max_fds - num_fds - 1}"
num_fds = psutil.Process(primary_pid).num_fds()
LOG.info(f"{primary_pid} has {num_fds} open file descriptors")
LOG.info(f"Disconnecting clients")
time.sleep(1)
num_fds = psutil.Process(primary_pid).num_fds()
LOG.info(f"{primary_pid} has {num_fds} open file descriptors")
if __name__ == "__main__":
def add(parser):
parser.add_argument(
"-p",
"--package",
help="The enclave package to load (e.g., liblogging)",
default="liblogging",
)
args = infra.e2e_args.cli_args(add)
run(args)
|
[
"noreply@github.com"
] |
kuychaco.noreply@github.com
|
c9da4ced89196a0e15b9976948e8446af1aa9be2
|
ef6229d281edecbea3faad37830cb1d452d03e5b
|
/ucsmsdk/mometa/config/ConfigImpact.py
|
01f09b71d0f53fa8cd618594fad9815d0d10bb79
|
[
"Apache-2.0"
] |
permissive
|
anoop1984/python_sdk
|
0809be78de32350acc40701d6207631322851010
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
refs/heads/master
| 2020-12-31T00:18:57.415950
| 2016-04-26T17:39:38
| 2016-04-26T17:39:38
| 57,148,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,898
|
py
|
"""This module contains the general information for ConfigImpact ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class ConfigImpactConsts():
CONFIG_STATE_APPLIED = "applied"
CONFIG_STATE_APPLYING = "applying"
CONFIG_STATE_FAILED_TO_APPLY = "failed-to-apply"
CONFIG_STATE_NOT_APPLIED = "not-applied"
DEPLOYMENT_MODE_IMMEDIATE = "immediate"
DEPLOYMENT_MODE_TIMER_AUTOMATIC = "timer-automatic"
DEPLOYMENT_MODE_USER_ACK = "user-ack"
REBOOT_REQUIRED_FALSE = "false"
REBOOT_REQUIRED_NO = "no"
REBOOT_REQUIRED_TRUE = "true"
REBOOT_REQUIRED_YES = "yes"
class ConfigImpact(ManagedObject):
"""This is ConfigImpact class."""
consts = ConfigImpactConsts()
naming_props = set([u'name'])
mo_meta = MoMeta("ConfigImpact", "configImpact", "impact-[name]", VersionMeta.Version212a, "InputOutput", 0x3f, [], ["read-only"], [u'configManagedEpImpactResponse'], [], [None])
prop_meta = {
"affected_obj": MoPropertyMeta("affected_obj", "affectedObj", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"affected_server": MoPropertyMeta("affected_server", "affectedServer", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"changes": MoPropertyMeta("changes", "changes", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|boot-order|server-assignment|operational-policies|server-identity|storage|networking|vnic-vhba-placement),){0,7}(defaultValue|boot-order|server-assignment|operational-policies|server-identity|storage|networking|vnic-vhba-placement){0,1}""", [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version212a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"config_issues": MoPropertyMeta("config_issues", "configIssues", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|not-applicable|boot-order-pxe|wwnn-derivation-from-vhba|migration|incompat-bios-for-sriov-vnics|iscsi-initiator-ip-address|remote-policy|wwnn-assignment|processor-requirement|physical-requirement|hostimg-policy-invalid|vif-resources-overprovisioned|pinning-invalid|incompatible-number-of-local-disks|mac-derivation-virtualized-port|switch-virtual-if-capacity|invalid-wwn|missing-raid-key|board-controller-update-unsupported|insufficient-resources|compute-undiscovered|boot-configuration-invalid|incompatible-bios-image|iscsi-config|storage-path-configuration-error|resource-ownership-conflict|system-uuid-assignment|server-position-requirement|destructive-local-disk-config|imgsec-policy-invalid|pinning-vlan-mismatch|non-interrupt-fsm-running|vnic-capacity|adaptor-requirement|mac-address-assignment|qos-policy-invalid|insufficient-power-budget|boot-order-iscsi|vnic-vcon-provisioning-change|adaptor-protected-eth-capability|connection-placement|incompatible-disk-types|vnic-not-ha-ready|zone-capacity|adaptor-out-of-vifs|duplicate-address-conflict|vhba-capacity|boot-order-san-image-path|compute-unavailable|power-group-requirement|provsrv-policy-invalid|vnic-vlan-assignment-error|missing-firmware-image|wwpn-assignment|memory-requirement|vlan-port-capacity|bootip-policy-invalid|vfc-vnic-pvlan-conflict|named-vlan-inaccessible|adaptor-fcoe-capability|wwpn-derivation-virtualized-port|incompatible-raid-level|missing-primary-vlan|fcoe-capacity|dynamic-vf-vnic),){0,65}(defaultValue|not-applicable|boot-order-pxe|wwnn-derivation-from-vhba|migration|incompat-bios-for-sriov-vnics|iscsi-initiator-ip-address|remote-policy|wwnn-assignment|processor-requirement|physical-requirement|hostimg-policy-invalid|vif-resources-overprovisioned|pinning-invalid|incompatible-number-of-local-disks|mac-derivation-virtualized-port|switch-virtual-if-capacity|invalid-wwn|missing-raid-key|board-controller-update-unsupported|insufficient-resources|compute-undiscovered|boot-configuration-invalid|incompatible-bios-image|iscsi-config|storage-path-configuration-error|resource-ownership-conflict|system-uuid-assignment|server-position-requirement|destructive-local-disk-config|imgsec-policy-invalid|pinning-vlan-mismatch|non-interrupt-fsm-running|vnic-capacity|adaptor-requirement|mac-address-assignment|qos-policy-invalid|insufficient-power-budget|boot-order-iscsi|vnic-vcon-provisioning-change|adaptor-protected-eth-capability|connection-placement|incompatible-disk-types|vnic-not-ha-ready|zone-capacity|adaptor-out-of-vifs|duplicate-address-conflict|vhba-capacity|boot-order-san-image-path|compute-unavailable|power-group-requirement|provsrv-policy-invalid|vnic-vlan-assignment-error|missing-firmware-image|wwpn-assignment|memory-requirement|vlan-port-capacity|bootip-policy-invalid|vfc-vnic-pvlan-conflict|named-vlan-inaccessible|adaptor-fcoe-capability|wwpn-derivation-virtualized-port|incompatible-raid-level|missing-primary-vlan|fcoe-capacity|dynamic-vf-vnic){0,1}""", [], []),
"config_qualifier": MoPropertyMeta("config_qualifier", "configQualifier", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|not-applicable|boot-order-pxe|wwnn-derivation-from-vhba|migration|incompat-bios-for-sriov-vnics|iscsi-initiator-ip-address|remote-policy|wwnn-assignment|processor-requirement|physical-requirement|hostimg-policy-invalid|vif-resources-overprovisioned|pinning-invalid|incompatible-number-of-local-disks|mac-derivation-virtualized-port|switch-virtual-if-capacity|invalid-wwn|missing-raid-key|board-controller-update-unsupported|insufficient-resources|compute-undiscovered|boot-configuration-invalid|incompatible-bios-image|iscsi-config|storage-path-configuration-error|resource-ownership-conflict|system-uuid-assignment|server-position-requirement|destructive-local-disk-config|imgsec-policy-invalid|pinning-vlan-mismatch|non-interrupt-fsm-running|vnic-capacity|adaptor-requirement|mac-address-assignment|qos-policy-invalid|insufficient-power-budget|boot-order-iscsi|vnic-vcon-provisioning-change|adaptor-protected-eth-capability|connection-placement|incompatible-disk-types|vnic-not-ha-ready|zone-capacity|adaptor-out-of-vifs|duplicate-address-conflict|vhba-capacity|boot-order-san-image-path|compute-unavailable|power-group-requirement|provsrv-policy-invalid|vnic-vlan-assignment-error|missing-firmware-image|wwpn-assignment|memory-requirement|vlan-port-capacity|bootip-policy-invalid|vfc-vnic-pvlan-conflict|named-vlan-inaccessible|adaptor-fcoe-capability|wwpn-derivation-virtualized-port|incompatible-raid-level|missing-primary-vlan|fcoe-capacity|dynamic-vf-vnic),){0,65}(defaultValue|not-applicable|boot-order-pxe|wwnn-derivation-from-vhba|migration|incompat-bios-for-sriov-vnics|iscsi-initiator-ip-address|remote-policy|wwnn-assignment|processor-requirement|physical-requirement|hostimg-policy-invalid|vif-resources-overprovisioned|pinning-invalid|incompatible-number-of-local-disks|mac-derivation-virtualized-port|switch-virtual-if-capacity|invalid-wwn|missing-raid-key|board-controller-update-unsupported|insufficient-resources|compute-undiscovered|boot-configuration-invalid|incompatible-bios-image|iscsi-config|storage-path-configuration-error|resource-ownership-conflict|system-uuid-assignment|server-position-requirement|destructive-local-disk-config|imgsec-policy-invalid|pinning-vlan-mismatch|non-interrupt-fsm-running|vnic-capacity|adaptor-requirement|mac-address-assignment|qos-policy-invalid|insufficient-power-budget|boot-order-iscsi|vnic-vcon-provisioning-change|adaptor-protected-eth-capability|connection-placement|incompatible-disk-types|vnic-not-ha-ready|zone-capacity|adaptor-out-of-vifs|duplicate-address-conflict|vhba-capacity|boot-order-san-image-path|compute-unavailable|power-group-requirement|provsrv-policy-invalid|vnic-vlan-assignment-error|missing-firmware-image|wwpn-assignment|memory-requirement|vlan-port-capacity|bootip-policy-invalid|vfc-vnic-pvlan-conflict|named-vlan-inaccessible|adaptor-fcoe-capability|wwpn-derivation-virtualized-port|incompatible-raid-level|missing-primary-vlan|fcoe-capacity|dynamic-vf-vnic){0,1}""", [], []),
"config_state": MoPropertyMeta("config_state", "configState", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["applied", "applying", "failed-to-apply", "not-applied"], []),
"deployment_mode": MoPropertyMeta("deployment_mode", "deploymentMode", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["immediate", "timer-automatic", "user-ack"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version212a, MoPropertyMeta.NAMING, 0x8, 1, 510, None, [], []),
"reboot_required": MoPropertyMeta("reboot_required", "rebootRequired", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version212a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version212a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"affectedObj": "affected_obj",
"affectedServer": "affected_server",
"changes": "changes",
"childAction": "child_action",
"configIssues": "config_issues",
"configQualifier": "config_qualifier",
"configState": "config_state",
"deploymentMode": "deployment_mode",
"dn": "dn",
"name": "name",
"rebootRequired": "reboot_required",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.affected_obj = None
self.affected_server = None
self.changes = None
self.child_action = None
self.config_issues = None
self.config_qualifier = None
self.config_state = None
self.deployment_mode = None
self.reboot_required = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "ConfigImpact", parent_mo_or_dn, **kwargs)
|
[
"test@cisco.com"
] |
test@cisco.com
|
463b99799fdcd59cd3be2436f5ecda106f900404
|
5d95fec0ae2757089170f1f667e14e0fe39c8a63
|
/contacts_and_people/migrations/0024_directory_plugin.py
|
2e743e60d8c1e72009d9117f2ad3e2f9043693a3
|
[
"BSD-2-Clause"
] |
permissive
|
smurp/Arkestra
|
da3d1c4955f3bbbbcc139dea617851ffc199a200
|
599a561cb5dc2296901bb67344a816ee45cd91cd
|
refs/heads/master
| 2021-01-18T10:34:14.857750
| 2012-03-14T11:23:28
| 2012-03-14T11:23:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,869
|
py
|
from south.db import db
from django.db import models
from contacts_and_people.models import *
class Migration:
def forwards(self, orm):
# Adding field 'EntityDirectoryPluginEditor.levels'
db.add_column('cmsplugin_entitydirectoryplugineditor', 'levels', orm['contacts_and_people.entitydirectoryplugineditor:levels'])
# Deleting field 'EntityDirectoryPluginEditor.sub_levels'
db.delete_column('cmsplugin_entitydirectoryplugineditor', 'sub_levels')
def backwards(self, orm):
# Deleting field 'EntityDirectoryPluginEditor.levels'
db.delete_column('cmsplugin_entitydirectoryplugineditor', 'levels')
# Adding field 'EntityDirectoryPluginEditor.sub_levels'
db.add_column('cmsplugin_entitydirectoryplugineditor', 'sub_levels', orm['contacts_and_people.entitydirectoryplugineditor:sub_levels'])
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.CMSPlugin']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'menu_login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'page_flags': ('django.db.models.fields.TextField', [], {'null': True, 'blank': True}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'contacts_and_people.building': {
'additional_street_address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['image_filer.Image']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '9', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.entity': {
'abstract_entity': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'access_note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'auto_contacts_page': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'auto_news_page': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'auto_publications_page': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'auto_vacancies_page': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Building']", 'null': 'True', 'blank': 'True'}),
'contacts_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Contacts & people'", 'max_length': '50'}),
'display_parent': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'entitylite_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['contacts_and_people.EntityLite']", 'unique': 'True', 'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['image_filer.Image']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'news_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'News & events'", 'max_length': '50'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['contacts_and_people.Entity']"}),
'phone_contacts': ('django.contrib.contenttypes.generic.GenericRelation', [], {'to': "orm['contacts_and_people.PhoneContact']"}),
'precise_location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'publications_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Publications'", 'max_length': '50'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'vacancies_page_menu_title': ('django.db.models.fields.CharField', [], {'default': "'Vacancies & studentships'", 'max_length': '50'}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'entity'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"})
},
'contacts_and_people.entityautopagelinkplugineditor': {
'Meta': {'db_table': "'cmsplugin_entityautopagelinkplugineditor'"},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'auto_page_plugin'", 'null': 'True', 'to': "orm['contacts_and_people.Entity']"}),
'link_to': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'contacts_and_people.entitydirectoryplugineditor': {
'Meta': {'db_table': "'cmsplugin_entitydirectoryplugineditor'"},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'display_descriptions_to_level': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'directory_plugin'", 'null': 'True', 'to': "orm['contacts_and_people.Entity']"}),
'levels': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'link_icons': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'contacts_and_people.entitylite': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'contacts_and_people.membership': {
'display_role': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'display_roles'", 'null': 'True', 'to': "orm['contacts_and_people.Membership']"}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['contacts_and_people.Entity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance_to_entity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'importance_to_person': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'key_contact': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_of'", 'to': "orm['contacts_and_people.Person']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.person': {
'access_note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Building']", 'null': 'True', 'blank': 'True'}),
'data_feed_locked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'entities': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contacts_and_people.Entity']", 'null': 'True', 'blank': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'institutional_username': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'override_entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'people_override'", 'null': 'True', 'to': "orm['contacts_and_people.Entity']"}),
'personlite_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['contacts_and_people.PersonLite']", 'unique': 'True', 'primary_key': 'True'}),
'phone_contacts': ('django.contrib.contenttypes.generic.GenericRelation', [], {'to': "orm['contacts_and_people.PhoneContact']"}),
'please_contact': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'contact_for'", 'null': 'True', 'to': "orm['contacts_and_people.Person']"}),
'precise_location': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'staff_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person_user'", 'unique': 'True', 'null': 'True', 'to': "orm['auth.User']"})
},
'contacts_and_people.personlite': {
'given_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'middle_names': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contacts_and_people.Title']", 'to_field': "'abbreviation'", 'null': 'True', 'blank': 'True'})
},
'contacts_and_people.phonecontact': {
'area_code': ('django.db.models.fields.CharField', [], {'default': "'029'", 'max_length': '5'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'country_code': ('django.db.models.fields.CharField', [], {'default': "'44'", 'max_length': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_extension': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
},
'contacts_and_people.site': {
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['image_filer.Image']", 'null': 'True', 'blank': 'True'}),
'post_town': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'site_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'contacts_and_people.teacher': {
'dummy_field_one': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'dummy_field_two': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'teacher'", 'unique': 'True', 'null': 'True', 'to': "orm['contacts_and_people.Person']"})
},
'contacts_and_people.title': {
'abbreviation': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'_file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_file_type_plugin_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'unique_together': "(('parent', 'name'),)"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'image_filer.folder': {
'Meta': {'unique_together': "(('parent', 'name'),)"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['image_filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'image_filer.image': {
'_height_field': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width_field': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'can_use_for_print': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_use_for_private_use': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_use_for_research': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_use_for_teaching': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_use_for_web': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'contact_of_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'image_files'", 'null': 'True', 'to': "orm['image_filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_images'", 'null': 'True', 'to': "orm['auth.User']"}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'usage_restriction_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['contacts_and_people']
|
[
"daniele@apple-juice.co.uk"
] |
daniele@apple-juice.co.uk
|
50cba92bdbcf88d85873297617f28e1165a12621
|
6f33381dcb19a042d916b4a452f9cb7438729798
|
/jabba/test/test_util/test_convert_path.py
|
73d2fca353a41f8c194a165553d65881bb8dbaf4
|
[
"MIT"
] |
permissive
|
puppetlabs/jabba
|
8308adf5be9ba25efb414f384bf3568854be55e2
|
71c1d008ab497020fba6ffa12a600721eb3f5ef7
|
refs/heads/master
| 2023-06-13T09:17:49.274408
| 2017-06-30T11:02:27
| 2017-06-30T11:02:27
| 185,443,592
| 0
| 1
| null | 2019-05-07T16:54:03
| 2019-05-07T16:54:02
| null |
UTF-8
|
Python
| false
| false
| 862
|
py
|
import unittest
import sys
import os
sys.path.append("../")
sys.path.append("../../")
from jabba import util
class TestConvertPath(unittest.TestCase):
def testThrowsForAbsPath(self):
with self.assertRaises(Exception) as context:
util.convert_path('/abs/path')
self.assertTrue('Cannot include' in str(context.exception))
def testKeepsFileIfAlreadyCorrect(self):
self.assertEquals('file.yml', util.convert_path('file.yml'))
self.assertEquals('dir/file.yml', util.convert_path('dir/file.yml'))
def testConvertsRelativePath(self):
self.assertEquals('file.yml', util.convert_path('./file.yml'))
self.assertEquals('dir/file.yml', util.convert_path('./dir/file.yml'))
def testRemoveDuplicateSlashes(self):
self.assertEquals('dir/file.yml', util.convert_path('dir//file.yml'))
|
[
"rebovykin@gmail.com"
] |
rebovykin@gmail.com
|
8d3721f1d76e4f50f9c62bed191c61ec0ef6f49c
|
d547e16fcea3a1dce5a935b001e2095e5708de33
|
/WTF/WTF.gypi
|
0b396fc1fb565bf4835580948a0a52587dc2ec90
|
[] |
no_license
|
rognar/webkit-wince-5-6-7-2013-mobile-
|
a9c963257a56d606fdd2a226e9001fd49e644958
|
1a7fde35a7da9eacfbfc7fd88de13a359408affc
|
refs/heads/master
| 2022-04-11T01:02:02.992600
| 2020-04-10T20:23:55
| 2020-04-10T20:23:55
| 254,726,862
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,969
|
gypi
|
{
'variables': {
'project_dir': ['.'],
'wtf_privateheader_files': [
'wtf/ASCIICType.h',
'wtf/AVLTree.h',
'wtf/Alignment.h',
'wtf/AlwaysInline.h',
'wtf/Assertions.h',
'wtf/Atomics.h',
'wtf/BitArray.h',
'wtf/BitVector.h',
'wtf/Bitmap.h',
'wtf/BlockStack.h',
'wtf/BloomFilter.h',
'wtf/BumpPointerAllocator.h',
'wtf/ByteOrder.h',
'wtf/CheckedArithmetic.h',
'wtf/CheckedBoolean.h',
'wtf/Compiler.h',
'wtf/Complex.h',
'wtf/CryptographicallyRandomNumber.h',
'wtf/CurrentTime.h',
'wtf/DataLog.h',
'wtf/DateMath.h',
'wtf/DecimalNumber.h',
'wtf/Decoder.h',
'wtf/Deque.h',
'wtf/DisallowCType.h',
'wtf/DoublyLinkedList.h',
'wtf/Encoder.h',
'wtf/FastAllocBase.h',
'wtf/FastMalloc.h',
'wtf/FeatureDefines.h',
'wtf/FilePrintStream.h',
'wtf/FixedArray.h',
'wtf/Forward.h',
'wtf/Functional.h',
'wtf/GetPtr.h',
'wtf/GregorianDateTime.h',
'wtf/HashCountedSet.h',
'wtf/HashFunctions.h',
'wtf/HashIterators.h',
'wtf/HashMap.h',
'wtf/HashSet.h',
'wtf/HashTable.h',
'wtf/HashTraits.h',
'wtf/HexNumber.h',
'wtf/ListHashSet.h',
'wtf/ListRefPtr.h',
'wtf/Locker.h',
'wtf/MD5.h',
'wtf/MainThread.h',
'wtf/MathExtras.h',
'wtf/MemoryInstrumentation.cpp',
'wtf/MemoryInstrumentation.h',
'wtf/MemoryInstrumentationArrayBufferView.h',
'wtf/MemoryInstrumentationHashCountedSet.h',
'wtf/MemoryInstrumentationHashMap.h',
'wtf/MemoryInstrumentationHashSet.h',
'wtf/MemoryInstrumentationListHashSet.h',
'wtf/MemoryInstrumentationParsedURL.h',
'wtf/MemoryInstrumentationSequence.h',
'wtf/MemoryInstrumentationString.h',
'wtf/MemoryInstrumentationVector.h',
'wtf/MemoryObjectInfo.h',
'wtf/MessageQueue.h',
'wtf/NonCopyingSort.h',
'wtf/Noncopyable.h',
'wtf/NotFound.h',
'wtf/NullPtr.h',
'wtf/OSAllocator.h',
'wtf/OwnArrayPtr.h',
'wtf/OwnPtr.h',
'wtf/OwnPtrCommon.h',
'wtf/PageAllocation.h',
'wtf/PageAllocationAligned.h',
'wtf/PageBlock.h',
'wtf/PageReservation.h',
'wtf/PassOwnArrayPtr.h',
'wtf/PassOwnPtr.h',
'wtf/PassRefPtr.h',
'wtf/PassTraits.h',
'wtf/Platform.h',
'wtf/PossiblyNull.h',
'wtf/PrintStream.h',
'wtf/RandomNumber.h',
'wtf/RawPointer.h',
'wtf/RefCounted.h',
'wtf/RefCountedLeakCounter.h',
'wtf/RefPtr.h',
'wtf/RefPtrHashMap.h',
'wtf/RetainPtr.h',
'wtf/SaturatedArithmetic.h',
'wtf/SentinelLinkedList.h',
'wtf/SinglyLinkedList.h',
'wtf/StackBounds.h',
'wtf/StaticConstructors.h',
'wtf/StdLibExtras.h',
'wtf/StringExtras.h',
'wtf/StringHasher.h',
'wtf/StringPrintStream.h',
'wtf/TemporaryChange.h',
'wtf/ThreadRestrictionVerifier.h',
'wtf/ThreadSafeRefCounted.h',
'wtf/ThreadSpecific.h',
'wtf/Threading.h',
'wtf/ThreadingPrimitives.h',
'wtf/TypeTraits.h',
'wtf/UnusedParam.h',
'wtf/VMTags.h',
'wtf/ValueCheck.h',
'wtf/Vector.h',
'wtf/VectorTraits.h',
'wtf/WTFThreadData.h',
'wtf/WeakPtr.h',
'wtf/dtoa.h',
'wtf/dtoa/bignum-dtoa.h',
'wtf/dtoa/bignum.h',
'wtf/dtoa/cached-powers.h',
'wtf/dtoa/diy-fp.h',
'wtf/dtoa/double-conversion.h',
'wtf/dtoa/double.h',
'wtf/dtoa/fast-dtoa.h',
'wtf/dtoa/fixed-dtoa.h',
'wtf/dtoa/strtod.h',
'wtf/dtoa/utils.h',
'wtf/text/ASCIIFastPath.h',
'wtf/text/AtomicString.h',
'wtf/text/AtomicStringHash.h',
'wtf/text/AtomicStringImpl.h',
'wtf/text/Base64.h',
'wtf/text/CString.h',
'wtf/text/IntegerToStringConversion.h',
'wtf/text/StringBuffer.h',
'wtf/text/StringBuilder.h',
'wtf/text/StringConcatenate.h',
'wtf/text/StringHash.h',
'wtf/text/StringImpl.h',
'wtf/text/StringOperators.h',
'wtf/text/TextPosition.h',
'wtf/text/WTFString.h',
'wtf/threads/BinarySemaphore.h',
'wtf/unicode/CharacterNames.h',
'wtf/unicode/Collator.h',
'wtf/unicode/UTF8.h',
'wtf/unicode/Unicode.h',
'wtf/unicode/icu/UnicodeIcu.h',
],
'wtf_files': [
'wtf/ArrayBuffer.cpp',
'wtf/ArrayBuffer.h',
'wtf/ArrayBufferView.cpp',
'wtf/ArrayBufferView.h',
'wtf/Assertions.cpp',
'wtf/AutodrainedPool.h',
'wtf/AutodrainedPoolMac.mm',
'wtf/BitVector.cpp',
'wtf/CryptographicallyRandomNumber.cpp',
'wtf/CurrentTime.cpp',
'wtf/DataLog.cpp',
'wtf/DateMath.cpp',
'wtf/DecimalNumber.cpp',
'wtf/DynamicAnnotations.cpp',
'wtf/DynamicAnnotations.h',
'wtf/FastMalloc.cpp',
'wtf/FilePrintStream.cpp',
'wtf/Float32Array.h',
'wtf/Float64Array.h',
'wtf/GregorianDateTime.cpp',
'wtf/HashTable.cpp',
'wtf/Int16Array.h',
'wtf/Int32Array.h',
'wtf/Int8Array.h',
'wtf/IntegralTypedArrayBase.h',
'wtf/MD5.cpp',
'wtf/MainThread.cpp',
'wtf/MallocZoneSupport.h',
'wtf/MediaTime.cpp',
'wtf/MediaTime.h',
'wtf/MetaAllocator.cpp',
'wtf/MetaAllocator.h',
'wtf/NullPtr.cpp',
'wtf/NumberOfCores.cpp',
'wtf/NumberOfCores.h',
'wtf/OSAllocatorPosix.cpp',
'wtf/OSAllocatorWin.cpp',
'wtf/OSRandomSource.cpp',
'wtf/OSRandomSource.h',
'wtf/PageAllocationAligned.cpp',
'wtf/PageBlock.cpp',
'wtf/ParallelJobs.h',
'wtf/ParallelJobsGeneric.cpp',
'wtf/ParallelJobsGeneric.h',
'wtf/ParallelJobsLibdispatch.h',
'wtf/ParallelJobsOpenMP.h',
'wtf/PrintStream.cpp',
'wtf/RAMSize.cpp',
'wtf/RAMSize.h',
'wtf/RandomNumber.cpp',
'wtf/RandomNumberSeed.h',
'wtf/RefCountedLeakCounter.cpp',
'wtf/RunLoopTimer.h',
'wtf/RunLoopTimerCF.cpp',
'wtf/SHA1.cpp',
'wtf/SHA1.h',
'wtf/SchedulePairCF.cpp',
'wtf/SchedulePairMac.mm',
'wtf/SegmentedVector.h',
'wtf/SizeLimits.cpp',
'wtf/StackBounds.cpp',
'wtf/StringPrintStream.cpp',
'wtf/TCPackedCache.h',
'wtf/TCPageMap.h',
'wtf/TCSpinLock.h',
'wtf/TCSystemAlloc.cpp',
'wtf/TCSystemAlloc.h',
'wtf/ThreadFunctionInvocation.h',
'wtf/ThreadIdentifierDataPthreads.cpp',
'wtf/ThreadIdentifierDataPthreads.h',
'wtf/ThreadSpecificWin.cpp',
'wtf/Threading.cpp',
'wtf/ThreadingPthreads.cpp',
'wtf/ThreadingWin.cpp',
'wtf/TypeTraits.cpp',
'wtf/TypedArrayBase.h',
'wtf/Uint16Array.h',
'wtf/Uint32Array.h',
'wtf/Uint8Array.h',
'wtf/WTFThreadData.cpp',
'wtf/chromium/ChromiumThreading.h',
'wtf/chromium/MainThreadChromium.cpp',
'wtf/dtoa.cpp',
'wtf/dtoa/bignum-dtoa.cc',
'wtf/dtoa/bignum.cc',
'wtf/dtoa/cached-powers.cc',
'wtf/dtoa/diy-fp.cc',
'wtf/dtoa/double-conversion.cc',
'wtf/dtoa/fast-dtoa.cc',
'wtf/dtoa/fixed-dtoa.cc',
'wtf/dtoa/strtod.cc',
'wtf/efl/MainThreadEfl.cpp',
'wtf/gobject/GOwnPtr.cpp',
'wtf/gobject/GOwnPtr.h',
'wtf/gobject/GRefPtr.cpp',
'wtf/gobject/GRefPtr.h',
'wtf/gobject/GTypedefs.h',
'wtf/gtk/MainThreadGtk.cpp',
'wtf/mac/MainThreadMac.mm',
'wtf/qt/MainThreadQt.cpp',
'wtf/qt/StringQt.cpp',
'wtf/text/AtomicString.cpp',
'wtf/text/Base64.cpp',
'wtf/text/CString.cpp',
'wtf/text/StringBuilder.cpp',
'wtf/text/StringImpl.cpp',
'wtf/text/StringStatics.cpp',
'wtf/text/WTFString.cpp',
'wtf/threads/BinarySemaphore.cpp',
'wtf/unicode/CollatorDefault.cpp',
'wtf/unicode/ScriptCodesFromICU.h',
'wtf/unicode/UTF8.cpp',
'wtf/unicode/UnicodeMacrosFromICU.h',
'wtf/unicode/icu/CollatorICU.cpp',
'wtf/unicode/wchar/UnicodeWchar.cpp',
'wtf/unicode/wchar/UnicodeWchar.h',
'wtf/win/MainThreadWin.cpp',
'wtf/win/OwnPtrWin.cpp',
'wtf/wince/FastMallocWinCE.h',
'wtf/wince/MemoryManager.cpp',
'wtf/wince/MemoryManager.h',
'wtf/wx/MainThreadWx.cpp',
'wtf/wx/StringWx.cpp',
],
}
}
|
[
"krybas@tau-technologies.com"
] |
krybas@tau-technologies.com
|
b80b9886cbd796c8119c949ece1f263ff06defb9
|
981a006a2c63082e36e67d0a273d89698c910cb9
|
/classy_vision/hooks/torchscript_hook.py
|
96956fac1078b77e4da1465e1ce4997e90c8b78b
|
[
"MIT"
] |
permissive
|
elrach/ClassyVision
|
8d3aa5e431868dc3f657efa395ec7d67a72df0d2
|
ac2993df04dcb85d1ad85cc020da1ac9cfa28066
|
refs/heads/master
| 2022-12-23T10:22:11.757461
| 2020-09-12T06:20:38
| 2020-09-12T06:23:14
| 295,558,705
| 0
| 0
|
MIT
| 2020-09-14T23:05:00
| 2020-09-14T23:04:59
| null |
UTF-8
|
Python
| false
| false
| 3,384
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
from classy_vision.generic.distributed_util import is_primary
from classy_vision.generic.util import eval_model, get_model_dummy_input
from classy_vision.hooks import register_hook
from classy_vision.hooks.classy_hook import ClassyHook
from fvcore.common.file_io import PathManager
# constants
TORCHSCRIPT_FILE = "torchscript.pt"
@register_hook("torchscript")
class TorchscriptHook(ClassyHook):
"""
Hook to convert a task model into torch script.
Saves the torch scripts in torchscript_folder.
"""
on_phase_start = ClassyHook._noop
on_phase_end = ClassyHook._noop
on_step = ClassyHook._noop
def __init__(self, torchscript_folder: str, use_trace: bool = True) -> None:
"""The constructor method of TorchscriptHook.
Args:
torchscript_folder: Folder to store torch scripts in.
use_trace: set to true for tracing and false for scripting,
"""
super().__init__()
assert isinstance(
torchscript_folder, str
), "torchscript_folder must be a string specifying the torchscript directory"
self.torchscript_folder: str = torchscript_folder
self.use_trace: bool = use_trace
def torchscript_using_trace(self, model):
input_shape = model.input_shape if hasattr(model, "input_shape") else None
if not input_shape:
logging.warning(
"This model doesn't implement input_shape."
"Cannot save torchscripted model."
)
return
input_data = get_model_dummy_input(
model,
input_shape,
input_key=model.input_key if hasattr(model, "input_key") else None,
)
with eval_model(model) and torch.no_grad():
torchscript = torch.jit.trace(model, input_data)
return torchscript
def torchscript_using_script(self, model):
with eval_model(model) and torch.no_grad():
torchscript = torch.jit.script(model)
return torchscript
def save_torchscript(self, task) -> None:
model = task.base_model
torchscript = (
self.torchscript_using_trace(model)
if self.use_trace
else self.torchscript_using_script(model)
)
# save torchscript:
logging.info("Saving torchscript to '{}'...".format(self.torchscript_folder))
torchscript_name = f"{self.torchscript_folder}/{TORCHSCRIPT_FILE}"
with PathManager.open(torchscript_name, "wb") as f:
torch.jit.save(torchscript, f)
def on_start(self, task) -> None:
if not is_primary() or getattr(task, "test_only", False):
return
if not PathManager.exists(self.torchscript_folder):
err_msg = "Torchscript folder '{}' does not exist.".format(
self.torchscript_folder
)
raise FileNotFoundError(err_msg)
def on_end(self, task) -> None:
"""Save model into torchscript by the end of training.
"""
if not is_primary() or getattr(task, "test_only", False):
return
self.save_torchscript(task)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
b766b397400af50688bf8558c7a4d5c3081c81db
|
22ac0c3ac59bdde3349ffb7cfbf820bfafa1c7c0
|
/Chapter6.py
|
d2c5320b75fa53f8fdef0e7665fdd2b823366e13
|
[] |
no_license
|
OneHandKlap/Data-Structures
|
ae3aa105b732b37b22c5be31eefc3184b6f33a06
|
8e5ed038d9f9bd1e988aada6cb90221b621ac8d6
|
refs/heads/master
| 2020-07-23T01:02:07.712731
| 2019-12-04T23:46:20
| 2019-12-04T23:46:20
| 207,393,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,146
|
py
|
def bubble_sort(arr):
for i in range(len(arr)-1):
swapped_items=False
for j in range(len(arr)-1):
if arr[j]>arr[j+1]:
swapped_items=True
arr[j],arr[j+1]=arr[j+1],arr[j]
if swapped_items==False:
return arr
return arr
def recursive_bubble_sort(arr,flag=True,count=0):
if flag==False:
return arr
else:
flag=False
for i in range(count,len(arr)-1):
if arr[i]>arr[i+1]:
arr[i],arr[i+1]=arr[i+1],arr[i]
flag = True
count+=1
return recursive_bubble_sort(arr,flag,count)
def selection_sort(arr):
for j in range(len(arr)):
swap_items=False
largest_index=0
for i in range(len(arr)-j):
if arr[i]>arr[largest_index]:
largest_index=i
swap_items=True
if swap_items:
arr[len(arr)-1-j],arr[largest_index]=arr[largest_index],arr[len(arr)-1-j]
return arr
def recursive_sel_sort(arr,flag=True,count=0):
if flag==False or count==len(arr):
return arr
else:
flag==False
largest=max(arr[:len(arr)-count])
index_largest=arr.index(largest)
arr[len(arr)-1-count],arr[index_largest]=arr[index_largest],arr[len(arr)-1-count]
flag=True
count+=1
return (recursive_sel_sort(arr,flag,count))
l=[3,9,3,4,6,5]
print(recursive_sel_sort(l))
def josephus(num_prisoners,num_candy,start_pos):
end_pos=(num_candy%num_prisoners)+start_pos-1
print(end_pos)
if end_pos<num_prisoners:
return end_pos
else:
return end_pos-num_prisoners-1
print(josephus(352926151 ,380324688 ,94730870))
def insertion_sort(arr):
for i in range(len(arr)):
local_min=min(arr[i:])
arr.remove(local_min)
arr.insert(i,local_min)
return arr
def recursive_insertion_sort(arr,acc=[]):
if arr==[]:
return acc
else:
acc.append(min(arr))
arr.remove(min(arr))
return(recursive_insertion_sort(arr,acc))
# l=[6,5,3,2,1]
# print(insertion_sort(l))
|
[
"a.aboud10@gmail.com"
] |
a.aboud10@gmail.com
|
1c8de7cb1a7641f7811f665200cec89318684979
|
b883baf9a5214f97e6ebee484db5f9750f9fd864
|
/bin/gifmaker.py
|
41861e385902f8566e50763f7e7e4f031de31a0f
|
[] |
no_license
|
alsi3dy/New
|
6298c316c45349f2755b3352a6ebf7daff74f0fe
|
9f6dcba307206b9390b7003671e45eeb6b299fe5
|
refs/heads/master
| 2021-01-02T08:35:19.412607
| 2017-08-06T15:03:01
| 2017-08-06T15:03:01
| 99,021,474
| 0
| 0
| null | 2017-08-02T16:46:41
| 2017-08-01T16:24:44
|
Python
|
UTF-8
|
Python
| false
| false
| 681
|
py
|
#!/Users/soudalrashed/Development/Mah_Shed/New/bin/python3
#
# The Python Imaging Library
# $Id$
#
# convert sequence format to GIF animation
#
# history:
# 97-01-03 fl created
#
# Copyright (c) Secret Labs AB 1997. All rights reserved.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from __future__ import print_function
from PIL import Image
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
print("GIFMAKER -- create GIF animations")
print("Usage: gifmaker infile outfile")
sys.exit(1)
im = Image.open(sys.argv[1])
im.save(sys.argv[2], save_all=True)
|
[
"soud94@gmail.com"
] |
soud94@gmail.com
|
52a3708a8f2f489b24828448c92b3339daaf2c86
|
18920960453ac1113918fab9e78818850f2fe106
|
/bin/rst2html.py
|
3d7c8777005bdf778192f7ecec4e2d34e0ca90da
|
[] |
no_license
|
rinalnov/test-serverless-web
|
59bb549cbaa88809ecbd57e7f564b85b5bd3b591
|
f5838ccda079785e3ecc7f8261fb5d8a6ad30981
|
refs/heads/develop
| 2022-11-30T09:44:13.012418
| 2018-06-26T07:19:01
| 2018-06-26T07:19:01
| 138,563,002
| 0
| 1
| null | 2022-11-28T04:39:17
| 2018-06-25T08:07:08
|
Python
|
UTF-8
|
Python
| false
| false
| 664
|
py
|
#!/Users/rinalnov/Documents/Learning Project/Flask-Serverless/flask-sample/bin/python3.6
# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html', description=description)
|
[
""
] | |
cfcf58ffedee1d956c388e1a38f003912fe3ebb0
|
0c08a15045b24b56bdb42dff5cf210f9bee6827f
|
/bin/rst2latex.py
|
149ac2bb90e4c238ee73533db64356ab743a06e9
|
[
"MIT"
] |
permissive
|
squadran2003/family-album
|
205d6f4a7256e466506d796d7da37a0eeff65fe3
|
eae75987e4786255269ecee2482d715ae2229db2
|
refs/heads/master
| 2022-12-05T00:19:29.629432
| 2019-01-20T13:10:22
| 2019-01-20T13:10:22
| 165,837,569
| 0
| 0
|
MIT
| 2022-11-22T03:23:44
| 2019-01-15T11:15:38
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 829
|
py
|
#!/Users/andreascormack/Desktop/family-album/bin/python3
# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', description=description)
|
[
"cormackandy@hotmail.com"
] |
cormackandy@hotmail.com
|
99f0f020c8daa526cd6880b5c7e6c9353f21974e
|
59f35ab81cb00b7ef38bbbcd4d910c98b9024bfc
|
/os_splitting/os_connector.py
|
fa284dda4faed735b4f4dc5bfabe737b395e01ae
|
[] |
no_license
|
apcrumbleton/Openstack_exercises
|
0a1dcaadbe094b410edb2f4b695c1a2ae632e03c
|
10253d79c42c61ef8fa7b1b03653e053b093b763
|
refs/heads/master
| 2021-06-14T06:14:42.112088
| 2017-05-07T07:23:30
| 2017-05-07T07:23:30
| 75,179,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18
|
py
|
../os_connector.py
|
[
"a.p.crumbleton@gmail.com"
] |
a.p.crumbleton@gmail.com
|
0a788d2fa281b70172a82ea0316ce99ec1723664
|
36cf8716e59832d9a53833789e928400ab007bcd
|
/tk15.py
|
86be6a47c0213bf69449e4d09219a89f85e64b6b
|
[
"MIT"
] |
permissive
|
visalpop/tkinter_sample
|
f4fc751aa4e652371b23075d2c8aef4baf3fafec
|
15474250431727e2b24b6f6aebc654c36ccf8d87
|
refs/heads/main
| 2023-02-25T16:05:50.004039
| 2021-02-03T00:34:56
| 2021-02-03T00:34:56
| 332,957,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
# -*- coding: utf-8 -*-
# tk15.pyw
import tkinter as tk
root = tk.Tk()
root.geometry("300x270")
ms_dict={}
for bw_int in range(1,6):
bw_str=str(bw_int)
ms_dict[bw_str]=tk.Message(text="borderwidth="+bw_str,relief="ridge",bd=bw_int)
ms_dict[bw_str].pack()
root.mainloop()
|
[
"vkkulathunga@gmail.com"
] |
vkkulathunga@gmail.com
|
c29b80a7634d228e3b483a5e45f45b0ae4c31382
|
0f05dbc813c8868d2d81989655ea73dc4b891687
|
/myenv/bin/django-admin
|
b3b5d1f8c84140fcaee2734112eb24bacf4889e6
|
[] |
no_license
|
cristianacmc/Motogirls
|
69787a67bbfd67605230c8e1504feccc3749a913
|
436d207bf8adcdf49d3425a331fae3cb29ac0983
|
refs/heads/master
| 2021-01-10T12:41:40.659670
| 2015-10-13T00:50:18
| 2015-10-13T00:50:18
| 43,902,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
#!/home/cristiana/moto/myenv/bin/python3.4
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"cristianacmc@gmail.com"
] |
cristianacmc@gmail.com
|
|
29994c4e89e904cba2ed6c68dd3447b5dd75056f
|
da8422d2c8e58dfc987cf5ef27d46528940144f6
|
/clap/groups/groups/zabbix.py
|
7394d821bddc666a90f6345fbe24ef9b3ae59c8e
|
[] |
no_license
|
MatheusFerraroni/opm-mo833
|
9d5bfd4fa662b3783e8f38d7a2b47fb81dd4a760
|
1b8d1de70bfb15d1dbab25c3a2040bd08ae41858
|
refs/heads/master
| 2022-11-10T12:38:57.804758
| 2020-06-26T16:28:03
| 2020-06-26T16:28:03
| 266,851,155
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 69
|
py
|
playbook = 'roles/zabbix.yml'
actions = ['setup', 'pause', 'resume']
|
[
"matheussanches531@gmail.com"
] |
matheussanches531@gmail.com
|
292d6373b7cb870c9d07724363112b448af7861b
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_073/ch18_2019_09_03_18_23_34_599532.py
|
e7e7fc4a92c25b12a4d9cd23ac9b3be334cf68c1
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 69
|
py
|
def encontra_cateto(h,c2):
c1=((h**2)-(c2**2))**1/2
return c1
|
[
"you@example.com"
] |
you@example.com
|
72fde0b5cb7ccf36f125e4ab4d51e343281998ef
|
a20aa57231a60a0f50ce31c8d88a566b3323bf13
|
/09-About_Dictionary.py
|
97d987cc989ad3f2960d686703bed71c49826700
|
[] |
no_license
|
nirupam1sharma/Python-3-Beginner-to-advanced
|
d2f7eb43332c52b6241bbafecd5936b5a04385a1
|
1147c4698aaf3a9a71ee6f4c6122b6e139050de0
|
refs/heads/master
| 2020-04-06T10:55:37.426504
| 2018-10-07T11:35:48
| 2018-10-07T11:35:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
'''
### All about Dictionary ###
Dictionaray store value using key (key:value).
We can access value using key.
Dictionary use Curly braces.
'''
# 1-Create Dictionary
print('\n1-Create Dictionary')
dicts={'name':'narsi','age':20}
print(dicts)
# 2-Access Dictionary Using key
print('\n2-Access Dictionary Using key')
print(dicts['name'])
print(dicts['age'])
# 3-Update Dictionary
print('\n3-Update Dictionary')
dicts['name']='ghost'
dicts['age']=21
print(dicts)
# 4-Delete Dictionary Elements
print('\n4-Delete Dictionary Elements')
del dicts['age']
print(dicts)
# 5-Dictionary Method
print('\n5-Dictionary Method')
print(str(dicts))
print(len(dicts))
print(dicts.keys())
print(dicts.values())
|
[
"noreply@github.com"
] |
nirupam1sharma.noreply@github.com
|
13ad4fa649a533c1adb98e8b27579c61a6e295e3
|
7813c391a020a013a9f97cb9cb150ea499175d2b
|
/demo_pointcloud_alignment.py
|
82c5a810dc665d4eac9c6127c41e62de8644c973
|
[] |
no_license
|
feixh/tensorflow_se3
|
02d19ae90d8c2cd4afac96f63692d2e9b7d3e16c
|
baa1cc40b32b076afd6f461f41cdb09dbc0222a9
|
refs/heads/master
| 2021-03-27T10:51:48.077985
| 2018-01-16T20:57:10
| 2018-01-16T20:57:10
| 117,741,015
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,188
|
py
|
import so3
import cv2
import numpy as np
import tensorflow as tf
NUM_POINTS = 100
if __name__ == '__main__':
w_gt = np.random.random(3)-0.5
Rgt, _ = cv2.Rodrigues(w_gt)
Rgt = Rgt.astype(np.float32)
src_pc = np.random.random([3, NUM_POINTS])
tgt_pc = Rgt.dot(src_pc)
# problem: given two sets of corresponding points
X_src = tf.placeholder(dtype=tf.float32, shape=(3, NUM_POINTS))
X_tgt = tf.placeholder(dtype=tf.float32, shape=(3, NUM_POINTS))
w = tf.Variable(initial_value=w_gt + 0.5 * np.random.randn(3),
dtype=tf.float32,
trainable=True)
R = so3.exp(w)
loss = tf.reduce_sum(tf.squared_difference(X_tgt, tf.matmul(R, X_src)))
err = tf.norm(so3.log(tf.matmul(R, Rgt.T)))
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=0.01).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print 'wgt=', w_gt
print 'w0=', w.eval()
for i in range(100):
print sess.run([w, loss, err], feed_dict={X_src: src_pc, X_tgt: tgt_pc})
sess.run(optimizer, feed_dict={X_src: src_pc, X_tgt: tgt_pc})
|
[
"hzhsfxh@gmail.com"
] |
hzhsfxh@gmail.com
|
13a0982b336fcf323545e33e93c902c4b1e0bd81
|
eed77d9eee4636984a5d92681e86ad1894a3e124
|
/regresi_prabowo_suara_sah.py
|
307306a0112d456e47e439aa525769cc1a250dbb
|
[] |
no_license
|
haditiyawijaya/uasdatamanajemen
|
555d4a9f8dc1a471bc4a2ec361a9fde5c82e825d
|
9a6542e689694cf7b2d13cf31720cc4a6523ac5f
|
refs/heads/master
| 2020-05-26T19:39:03.961633
| 2019-05-30T19:37:58
| 2019-05-30T19:37:58
| 188,351,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,256
|
py
|
# import libraries
from bs4 import BeautifulSoup
#import urllib.request
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import csv
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#from sklearn.model_selection import train_test_split
#from sklearn.linear_model import LinearRegression
# specify the url
url = 'https://kawalpemilu.org/#pilpres:0'
# The path to where you have your chrome webdriver stored:
webdriver_path = 'C:\Users\haditiyawijaya\Downloads\chromedriver_win32\chromedriver.exe'
# Add arguments telling Selenium to not actually open a window
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--window-size=1920x1080')
# Fire up the headless browser
browser = webdriver.Chrome(executable_path=webdriver_path,
options=chrome_options)
# Load webpage
browser.get(url)
# It can be a good idea to wait for a few seconds before trying to parse the page
# to ensure that the page has loaded completely.
time.sleep(10)
# Parse HTML, close browser
soup = BeautifulSoup(browser.page_source, 'html.parser')
# print(soup)
pretty = soup.prettify()
browser.quit()
# find results within table
results = soup.find('table',{'class':'table'})
rows = results.find_all('tr',{'class':'row'})
#array = []
#jokowi = []
prabowo = []
sah = []
# print(rows)
for r in rows:
# find all columns per result
data = r.find_all('td')
# check that columns have data
if len(data) == 0:
continue
# write columns to variables
#wilayah = data[1].find('a').getText()
#satu = data[2].find('span', attrs={'class':'abs'}).getText()
dua = data[3].find('span', attrs={'class': 'abs'}).getText()
tiga = data[4].find('span', attrs={'class': 'sah'}).getText()
# Remove decimal point
#satu = satu.replace('.','')
dua = dua.replace('.','')
tiga = tiga.replace('.','')
# Cast Data Type Integer
#satu = int(satu )
dua = int(dua)
tiga = int(tiga)
#array.append(wilayah)
#jokowi.append(satu)
prabowo.append(dua)
sah.append(tiga)
# Create Dictionary
#my_dict = {'wilayah':array,'value1':jokowi,'value2':prabowo,'value3':sah}
my_dict = {'value1':prabowo,'value2':sah}
# Create Dataframe
df = pd.DataFrame(my_dict)
#print(df)
#slicing data
X = df.iloc[:, :-1].values
y = df.iloc[:, 1].values
# Membagi data menjadi Training Set dan Test Set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)
#Feature Scaling
"""from sklearn.preprocessing import StandardScaler
scale_X = StandardScaler()
X_train = scale_X.fit_transform(X_train)
X_test = scale_X.transform(X_test)"""
# Fitting Simple Linear Regression terhadap Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Memprediksi hasil Test Set
#y_pred = regressor.predict(X_test)
# Visualisasi hasil Training Set
plt.scatter(X_train, y_train, color = 'red')
plt.plot(X_train, regressor.predict(X_train), color = 'blue')
plt.title('Prabowo vs Suara Sah (Training set)')
plt.xlabel('Prabowo')
plt.ylabel('Suara Sah')
plt.show()
|
[
"noreply@github.com"
] |
haditiyawijaya.noreply@github.com
|
8479567e4fa7714fb6f47bca0f6a11fbd51f4fe9
|
20a1f277a55b9e4a49c0d5e112a1e16aa46a1fb9
|
/Legacy/Cripto/CriptoQLearning/Functions.py
|
63b499477c5e70c1b921f7a7b9714f34e2bcea55
|
[] |
no_license
|
richimf/CryptoBot
|
dec76d3ad90fdb59336966d826cc60bcb803510e
|
5ced039fef469263935e95ff78201ee69fb908b1
|
refs/heads/master
| 2021-07-16T19:39:04.682510
| 2021-07-12T21:04:43
| 2021-07-12T21:04:43
| 137,505,888
| 2
| 4
| null | 2021-07-12T21:35:01
| 2018-06-15T15:48:10
|
Python
|
UTF-8
|
Python
| false
| false
| 866
|
py
|
import numpy as np
import math
# prints formatted price
def formatPrice(n):
return ("-$" if n < 0 else "$") + "{0:.2f}".format(abs(n))
# returns the vector containing stock data from a fixed file
def getStockDataVec(key):
vec = []
lines = open("/Users/richie/Documents/GitHub/Botcito/Cripto/CriptoQLearning/data/" + key + ".csv", "r").read().splitlines()
for line in lines[1:]:
vec.append(float(line.split(",")[4])) # dame Adj Close
return vec
# returns the sigmoid
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# returns an an n-day state representation ending at time t
def getState(data, t, n):
d = t - n + 1
block = data[d:t + 1] if d >= 0 else -d * [data[0]] + data[0:t + 1] # pad with t0
res = []
for i in range(n - 1):
res.append(sigmoid(block[i + 1] - block[i]))
return np.array([res])
|
[
"richimf1@gmail.com"
] |
richimf1@gmail.com
|
d659cca83103609b2a36ee1ddf6450e694eef02f
|
b0fa1b5780b4b7b9f6f8a00d87b2510170869f08
|
/ejemploAPI/drf_cliente/cliente/cliente/wsgi.py
|
5c1e5e4e4d6a000c13d2dd1ad3f1c2106cb768d7
|
[] |
no_license
|
eduardoo12/Floreria
|
847862978bc5b32626182f42bf556b24dae80839
|
5faea73dda5795145a769db66abbba675eedead0
|
refs/heads/master
| 2020-11-25T02:56:48.877730
| 2019-12-16T20:13:39
| 2019-12-16T20:13:39
| 228,465,179
| 0
| 0
| null | 2019-12-16T21:02:51
| 2019-12-16T19:59:33
|
Python
|
UTF-8
|
Python
| false
| false
| 391
|
py
|
"""
WSGI config for cliente project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cliente.settings')
application = get_wsgi_application()
|
[
"eduardo@gmail.com"
] |
eduardo@gmail.com
|
8cf58fb81bf473b64f505c995d4245d0f542b663
|
219e1268f04a6d59424875d1620254b6bd2741e2
|
/LeetCode/reorderDataInLogFIle.py
|
60731bca8d8dec5a18b9d734c15335c8bee75772
|
[] |
no_license
|
dixitomkar1809/Coding-Python
|
227c68a9bdc724254a53621586126b3755803849
|
7134780687acfc2934562d8c7582fd33dfbefdf1
|
refs/heads/master
| 2023-06-21T10:29:29.022764
| 2021-07-10T06:20:40
| 2021-07-10T06:20:40
| 181,409,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 702
|
py
|
# Author: Omkar Dixit
# Email: omedxt@gmail.com
# Link: https://leetcode.com/problems/reorder-data-in-log-files/submissions/
class Solution(object):
def reorderLogFiles(self, logs):
"""
:type logs: List[str]
:rtype: List[str]
"""
digilogs = []
letterlogs = []
for log in logs:
if log.split(' ', 1)[1][0].isdigit():
digilogs.append(log)
else:
letterlogs.append(log)
def sortfunction(log):
logList= log.split(' ', 1)
return 1, logList[1], logList[0]
letterlogs.sort(key=sortfunction)
return letterlogs + digilogs
|
[
"omedxt@gmail.com"
] |
omedxt@gmail.com
|
b9f2613506f9f76036bddc00829fcfdfb4cafe30
|
5c2e4266abf6d2be9102d5309bf94071a1eae1db
|
/02 高级语法系列/cp 多线程/06.py
|
2bdc2499e1e3cb5239f77ca5b790e5f4c7513dbd
|
[] |
no_license
|
13834319675/python
|
8176d5da47136b9b3ec290eaa0b699c6b1e7a8ab
|
3e6f04670f6f01006f827794865488dd40bca380
|
refs/heads/master
| 2021-07-11T18:29:12.894401
| 2021-07-05T08:29:27
| 2021-07-05T08:29:27
| 171,112,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
import time
import threading
def fun():
print("start fun:")
time.sleep(2)
print("end fun")
print("main therad")
t1 = threading.Thread(target=fun,args=())
t1.start()
t1.join()
time.sleep(1)
print("main therad end")
|
[
"1134876981@qq.com"
] |
1134876981@qq.com
|
52dff1d463cb00883aea69550a2c3cac3fd31e95
|
f05e792af806f1069238aae3ce966a3268708f41
|
/PythonCodingClub/GameStructureExample/ShooterGame-move_player_and_enemy_to_modules/data/tools.py
|
a9479c2abb9f66aa8e9d6e9f1aeba37dec1fa915
|
[] |
no_license
|
MaxT2/EWPythonDevelopment
|
9de608e32f319132b85e0ea4a1548872d2c90b55
|
6154b85a0b35438bb51944eddc742684469e6b60
|
refs/heads/master
| 2020-06-18T12:10:56.381449
| 2019-07-11T18:39:04
| 2019-07-11T18:39:04
| 196,299,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
import os
import pygame as pg
def load_all_sfx(directory, accept=(".wav", ".mp3", ".ogg", ".mdi")):
"""
Load all sfx of extensions found in accept. Unfortunately it is
common to need to set sfx volume on a one-by-one basis. This must be done
manually if necessary in the setup module.
"""
effects = {}
for fx in os.listdir(directory):
name,ext = os.path.splitext(fx)
if ext.lower() in accept:
effects[name] = pg.mixer.Sound(os.path.join(directory, fx))
return effects
def load_all_gfx(directory,colorkey=(0,0,0),accept=(".png",".jpg",".bmp")):
"""
Load all graphics with extensions in the accept argument. If alpha
transparency is found in the image the image will be converted using
convert_alpha(). If no alpha transparency is detected image will be
converted using convert() and colorkey will be set to colorkey.
"""
graphics = {}
for pic in os.listdir(directory):
name,ext = os.path.splitext(pic)
if ext.lower() in accept:
img = pg.image.load(os.path.join(directory, pic))
if img.get_alpha():
img = img.convert_alpha()
else:
img = img.convert()
img.set_colorkey(colorkey)
graphics[name]=img
return graphics
|
[
"tristen@payne.org"
] |
tristen@payne.org
|
55e33d80c842504ade1b753e4c296116b4c26c25
|
d8f2f28c21a94d60c2b693adac350c0e46cf24b5
|
/lesson3/2.py
|
2afa289bec1966f3fc78f7eecf7a707a5bb6cc2b
|
[] |
no_license
|
tima-akulich/z25
|
fdf3c5a2053e72cfaa95bf44eb39dd5b37328180
|
3ddf8c3f1ecdb311560ae1ecf788e2ab9927f10a
|
refs/heads/master
| 2020-12-22T14:34:38.148212
| 2020-03-17T21:30:15
| 2020-03-17T21:30:15
| 236,825,874
| 0
| 18
| null | 2020-03-17T16:07:40
| 2020-01-28T19:47:25
|
Python
|
UTF-8
|
Python
| false
| false
| 140
|
py
|
size = int(input('Size? '))
for i in range(1, size + 1, 1):
for j in range(1, size + 1, 1):
print(i * j, end='\t')
print()
|
[
"tima.akulich@gmail.com"
] |
tima.akulich@gmail.com
|
40db9717fb022a9b59fcac40c81d809493753a5a
|
faf50edfb415f2c5232d3279cf9b6d3684d1bb39
|
/distributed_lsh_alessandro_de_palma/python-implementation/distributed_SLSH/worker_node/tests/parallel_test.py
|
7cc52db65cb82eb7ac959db9d79db9fc26a0537d
|
[] |
no_license
|
ALFA-group/EEG_coma
|
aff66ed15f597bdde8583e11bec7d33210bab224
|
c2e65ab1d6491378c71520bc75827f8c1374715d
|
refs/heads/main
| 2023-04-23T18:42:17.519875
| 2021-05-04T21:22:09
| 2021-05-04T21:22:09
| 364,260,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,085
|
py
|
import unittest
import numpy as np
from worker_node.SLSH.hash_family import *
from worker_node.SLSH.lsh import *
from worker_node.SLSH.slsh import *
from worker_node.SLSH.selectors import *
from worker_node.node import *
from worker_node.query import Query
class TestParallelSLSH(unittest.TestCase):
def test_parallel_NN(self):
# Test a query's NN corresponds to what it should be, on 2 cores.
cores = 3
D = 80
H_out = L1LSH([(-1, 1)] * D)
H_in = COSLSH(D)
X = np.eye(D) # The dataset is a unit matrix of size D.
m_out = 50
L_out = 50
m_in = 20
L_in = 10
k = 1
alpha = 0.01
# Create query and expected result.
x = X[21]
query = Query(x * 2)
# Execute parallel code.
temp1, queries = execute_node(cores, k, m_out, L_out, m_in, L_in, H_out, H_in, alpha, X=X, queries=[query])
print("In: {}".format(x * 2))
print("Out: {}".format(queries[0].neighbors[0]))
self.assertTrue(np.array_equal(queries[0].neighbors[0], x))
|
[
"shash@mit.edu"
] |
shash@mit.edu
|
82fa4b250629b57299d3bfdfa298bfd60a767c52
|
ec376cf212d72f3948ce4865224220d5bb6c1272
|
/virtual/bin/wheel
|
0791479aa5a0d30d177e56d25d221744f79837f1
|
[
"MIT"
] |
permissive
|
Okalll/Awwwards
|
9812909b90dc0e53e691e59aaeb49e6797b5bcf7
|
dd69fc817d38e5f31cbd07072bc03dca22e4772f
|
refs/heads/master
| 2020-04-29T10:53:00.296680
| 2019-03-22T12:33:34
| 2019-03-22T12:33:34
| 176,077,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
#!/home/moringa/Desktop/Awwards/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"vivieokall@gmail.com"
] |
vivieokall@gmail.com
|
|
506b07b13e22fc685409b43f2e14969ae5cad757
|
7af49ced7975f2d0ba0c808811317a96f90214b1
|
/test/test_add_contact.py
|
500daa243cd64c9720d806a2e92492b702ffe3ca
|
[
"Apache-2.0"
] |
permissive
|
alexanderpython/python_training
|
928d58aaa65901b05b3e86b8396cf9929fbebbf3
|
91d2983b415e72621bf993b56dcc78a8f6ee4b51
|
refs/heads/master
| 2021-01-18T21:07:45.069457
| 2016-05-19T06:29:27
| 2016-05-19T06:29:27
| 54,002,475
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
# -*- coding: utf-8 -*-
from model.contact import Contact
def test_add_contact(app, db, data_contacts, check_ui):
contact = data_contacts
old_contacts = db.get_contact_list()
app.contact.create(contact)
new_contacts = db.get_contact_list()
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
|
[
"alexander.giderman@rtlabs.ru"
] |
alexander.giderman@rtlabs.ru
|
08c98de0c8c4d30deef7b09ca462266129253a38
|
ec8c572ea9ef5b996937c996607113bd91312b77
|
/app.py
|
66401fc9512f2f505082c4ce18f26aa2233b0669
|
[] |
no_license
|
muktadadariya/Student_Info
|
9a04aaab4477d6fcc844b789f75404567c17f6f5
|
05b87bf0571f3f6f8b4356ee3a18a3956a35be64
|
refs/heads/master
| 2021-06-12T13:25:40.929957
| 2017-03-06T22:30:43
| 2017-03-06T22:30:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,706
|
py
|
from flask import Flask
from flask import jsonify
from flask import url_for
from flask import abort
from flask import make_response
from flask import request
# Import the service which servers the data.
# This could be a service which loads the data
# from database or files or some website.
from data_provider_service import DataProviderService
DATA_PROVIDER = DataProviderService(15)
# create the Flask application
app = Flask(__name__)
# ROUTING:
# One way to configure routing is to use the @app.route() decorator
#
@app.route("/api", methods=["GET"])
def list_routes():
result = []
for rt in app.url_map.iter_rules():
result.append({
"methods": list(rt.methods),
"route": str(rt)
})
return jsonify({"routes": result, "total": len(result)})
def candidate():
candidates = DATA_PROVIDER.get_candidates();
return jsonify({"candidates": candidates, "total": len(candidates)})
# ROUTING:
# Another way to configure routes is through the add_url_rule() method
#
# 1st parameter (/api/candidate) - route path
# 2nd parameter (candidate) - endpoint
# 3rd parameter (candidate) - function which is executed
#
app.add_url_rule('/api/candidate', 'candidate', candidate)
@app.route("/api/candidate/<string:id>", methods=["GET"])
def candidate_by_id(id):
candidate = DATA_PROVIDER.get_candidate(id);
if candidate:
return jsonify({"candidate": candidate})
else:
#
# In case we did not find the candidate by id
# we send HTTP 404 - Not Found error to the client
#
abort(404)
# ROUTING:
# This method updates the name of a Candidate, which was found by it's ID.
# Please note the PUT HTTP verb in the methods list.
#
@app.route("/api/candidate/<string:id>/name/<string:new_name>", methods=["GET","PUT"])
def update_name(id, new_name):
nr_of_updated_items = DATA_PROVIDER.update_name(id, new_name)
if nr_of_updated_items == 0:
abort(404)
else:
return jsonify({"total_updated": nr_of_updated_items})
# ROUTING:
# One method can handle multiple routes, like in this case
# if there is no parameter after candidate, the method
# will default the nrOfItems to 1.
#
# The <int:nrOfItems> is variable parameter and the int is a converter
# which converts the the request parameter to an integer value.
#
@app.route("/api/random/candidate", defaults={"nrOfItems": 1}, methods=["GET"])
@app.route("/api/random/candidate/<int:nrOfItems>", methods=["GET"])
def random(nrOfItems):
candidates = DATA_PROVIDER.get_random_candidates(nrOfItems)
return jsonify({"candidates": candidates, "total": len(candidates)})
# ROUTING:
# This is a method which deletes the candidate with the given id.
# We use the DELETE HTTP verb for mapping this route.
#
@app.route("/api/candidate/delete/<string:id>", methods=["GET","DELETE"])
def delete(id):
if (DATA_PROVIDER.delete_candidate(id)):
return make_response("deleted")
else:
return abort(404)
# ROUTING:
# Here we use the HTTP POST verb and the Flask request object
# to get the data which was sent from the client.
#
# Please note, that the response contains the ID of the newly
# added Candidate and the URL through which this can be accessed.
#
@app.route("/api/candidate", methods=["POST"])
def add_candidate():
first_name = request.form["first_name"]
last_name = request.form["last_name"]
email = request.form["email"]
new_candidate_id = DATA_PROVIDER.add_candidate(first_name, last_name, email)
return jsonify({
"id": new_candidate_id,
"url": url_for("candidate_by_id", id=new_candidate_id)
})
if __name__ == "__main__":
app.run(debug=True)
|
[
"muktadadariya17@gmail.com"
] |
muktadadariya17@gmail.com
|
7ba696c05f0211fee7f37099c1ef2ff16ce0398c
|
d05a1106b60b61d86bd65039f77b044e412ec5d9
|
/iq/card_2.py
|
26e022b1e7bfb0dfc9ecad70dc1c38d237c601f5
|
[] |
no_license
|
MagorokuYohei/hoby
|
fdbe762d24cc19dec04d7cd9caed3482f3992503
|
41cdb32e0a4d116dffa4d6f66900ad9f2763deee
|
refs/heads/master
| 2021-01-17T15:15:39.538230
| 2016-06-09T08:11:21
| 2016-06-09T08:11:21
| 47,348,411
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,731
|
py
|
#-*-coding:utf-8-*-
def f_numm(num,card,digit, target, sa, check, ans):
if digit >0:
for i in range(0, len(card)):
if card[i] == 0:
continue
numr = num + int(card[i]) * digit
sa_ = abs(target- (numr + digit-1))
if sa_ <= sa:
ppp = i/2
ppp *= 2
check.append(ppp)
ppp += 1
check.append(ppp)
numm(numr, card, (digit/10), target, sa, check, ans)
check.pop()
check.pop()
if digit == 0:
ans.append(num)
def numm(num,card,digit, target, sa, check, ans):
if digit >0:
for i in range(0, len(card)):
if i in check:
continue
numr = num + int(card[i]) * digit
sa_ = abs(target- (numr + digit-1))
if digit == 1:
sa_ -=1
if sa_ <= sa:
ppp = i/2
ppp *= 2
check.append(ppp)
ppp += 1
check.append(ppp)
numm(numr, card, (digit/10), target, sa, check, ans)
check.pop()
check.pop()
if digit == 0:
ans.append(num)
def make_ans(target, ans):
maji_ans = []
sa = 100000000
for i in range(0, len(ans)):
sa_= abs(ans[i] - target)
if sa > sa_:
sa = sa_
for i in range(0, len(ans)):
if sa == abs(ans[i] - target):
maji_ans.append(i)
return maji_ans
def bunkai(card_, card):
omote = card_/10
ura = card_ - (omote*10)
card.append(omote)
card.append(ura)
def main():
word = raw_input()
word = word.split(',')
card_ = word[2].split('/')
card = []
for i in range(0, len(card_)):
bunkai(int(card_[i]), card)
digit = 10**(int(word[0])-1)
mago = int(word[1])
digitn = digit
target = []
check = []
ans = []
while digit >0:
magorn = mago /digit
target.append(magorn)
mago -= digit*magorn
digit /=10
sa = 1000000000
if word[0] == '1':
numm(0,card,digitn, int(word[1]), sa, check, ans)
else:
f_numm(0,card,digitn, int(word[1]), sa, check, ans)
if len(ans) == 0:
print '-'
return 0
gans = ans
maji_ans = make_ans(int(word[1]), ans)
beko_ans = []
for i in range(0, len(maji_ans)):
beko_ans.append(str(gans[maji_ans[i]]))
beko_ans = list(set(beko_ans))
beko_ans.sort()
strn = ''
for i in range(0, len(beko_ans)):
strn += beko_ans[i] + ','
strn = strn.rstrip(',')
print strn
if __name__=='__main__':
main()
|
[
"magorock329@gmail.com"
] |
magorock329@gmail.com
|
db7cd87f6156047ade312c1eb7720efa2e1edf24
|
d4943cd895f644b1027396d1a6c3c432812506fd
|
/tools/hintsDatabaseInterface.py
|
642eac3b5a5d0cfb19c50f1e8dddb127c24b69ac
|
[
"Apache-2.0"
] |
permissive
|
mhaukness-ucsc/Comparative-Annotation-Toolkit
|
3b7049d09c0f9c3f3362270f0fb0ac50849226af
|
1cc2c0e4fabccc2977c15b531d86ae6cadaddfe0
|
refs/heads/master
| 2020-09-26T07:51:27.969135
| 2019-12-05T23:47:59
| 2019-12-05T23:47:59
| 226,208,115
| 0
| 0
|
Apache-2.0
| 2019-12-05T23:38:24
| 2019-12-05T23:38:24
| null |
UTF-8
|
Python
| false
| false
| 5,949
|
py
|
"""
This module interfaces with the hints database produced for Augustus, providing a SQLAlchemy ORM access to it.
"""
import sqlalchemy
from sqlalchemy.pool import NullPool
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import sessionmaker
def reflect_hints_db(db_path):
"""
Reflect the database schema of the hints database, automapping the existing tables
The NullPool is used to avoid concurrency issues with luigi. Using this activates pooling, but since sqlite doesn't
really support pooling, what effectively happens is just that it locks the database and the other connections wait.
:param db_path: path to hints sqlite database
:return: sqlalchemy.MetaData object, sqlalchemy.orm.Session object
"""
engine = sqlalchemy.create_engine('sqlite:///{}'.format(db_path), poolclass=NullPool)
metadata = sqlalchemy.MetaData()
metadata.reflect(bind=engine)
Base = automap_base(metadata=metadata)
Base.prepare()
speciesnames = Base.classes.speciesnames
seqnames = Base.classes.seqnames
hints = Base.classes.hints
featuretypes = Base.classes.featuretypes
Session = sessionmaker(bind=engine)
session = Session()
return speciesnames, seqnames, hints, featuretypes, session
def get_rnaseq_hints(genome, chromosome, start, stop, speciesnames, seqnames, hints, featuretypes, session):
"""
Extracts RNAseq hints from RNAseq hints database.
:param genome: genome (table) to query
:param chromosome: Chromosome to extract information from
:param start: start position on chromosome
:param stop: stop position in chromosome
:param speciesnames: speciesnames Table from reflect_hints_db
:param seqnames: seqnames Table from reflect_hints_db
:param hints: hints Table from reflect_hints_db
:param featuretypes: featuretypes Table from reflect_hints_db
:param session: Session object from reflect_hints_db
:return: GFF formatted string.
"""
speciesid = session.query(speciesnames.speciesid).filter_by(speciesname=genome)
seqnr = session.query(seqnames.seqnr).filter(
sqlalchemy.and_(
seqnames.speciesid.in_(speciesid),
(seqnames.seqname == chromosome)))
query = session.query(hints, featuretypes).filter(
sqlalchemy.and_(
hints.speciesid.in_(speciesid),
hints.seqnr.in_(seqnr),
hints.start >= start,
hints.end <= stop,
featuretypes.typeid == hints.type))
hints = []
for h, f in query:
tags = 'pri=3;src={};mult={}'.format(h.esource, h.mult)
# add 1 to both start and end to shift to 1-based
l = [chromosome, h.source, f.typename, h.start + 1, h.end + 1, h.score, '.', '.', tags]
hints.append('\t'.join(map(str, l)) + '\n')
return ''.join(hints)
def get_wiggle_hints(genome, speciesnames, seqnames, hints, session):
"""
Extracts all wiggle hints for a genome to a BED-like format.
:param genome: genome (table) to query
:param speciesnames: speciesnames Table from reflect_hints_db
:param seqnames: seqnames Table from reflect_hints_db
:param hints: hints Table from reflect_hints_db
:param session: Session object from reflect_hints_db
:return: iterator of BED format lists
"""
speciesid = session.query(speciesnames.speciesid).filter_by(speciesname=genome)
seqs = {x.seqnr: x.seqname for x in session.query(seqnames).filter_by(speciesid=speciesid)}
# chunk up the genome to reduce memory usage
for seqnr, seqname in seqs.iteritems():
query = session.query(hints.start, hints.end, hints.score).filter(
sqlalchemy.and_(hints.speciesid.in_(speciesid), hints.source == 'w2h', hints.seqnr == seqnr))
for start, end, score in query:
# add 1 to end to convert to half-open interval
yield seqname, start, end + 1, score
def hints_db_has_rnaseq(db_path, genome=None):
"""
Determines if the hints DB has RNAseq. Is done by querying for one b2h or w2h in hints
:param db_path: path to database
:param genome: set this to query a specific genome instead of the database in general
:return: boolean
"""
speciesnames, seqnames, hints, featuretypes, session = reflect_hints_db(db_path)
query = session.query(hints).filter(sqlalchemy.or_(hints.source == 'w2h', hints.source == 'b2h'))
if genome is not None:
speciesid = session.query(speciesnames.speciesid).filter_by(speciesname=genome)
query = query.filter(hints.speciesid == speciesid)
r = query.first() is not None
session.close()
return r
def genome_has_no_wiggle_hints(db_path, genome):
"""
Determines if the hints db for a specific genome has wiggle hints
:param db_path: path to database
:param genome: genome in question
:return: boolean
"""
speciesnames, seqnames, hints, featuretypes, session = reflect_hints_db(db_path)
query = session.query(hints).filter(hints.source == 'w2h')
speciesid = session.query(speciesnames.speciesid).filter_by(speciesname=genome)
query = query.filter(hints.speciesid == speciesid)
r = query.first() is None
session.close()
return r
def hints_db_has_annotation(db_path, genome=None):
"""
Determines if the hints DB has annotation. Is done by querying for a2h in hints
:param db_path: path to database
:param genome: set this to query a specific genome instead of the database in general
:return: boolean
"""
speciesnames, seqnames, hints, featuretypes, session = reflect_hints_db(db_path)
query = session.query(hints).filter(hints.source == 'a2h')
if genome is not None:
speciesid = session.query(speciesnames.speciesid).filter_by(speciesname=genome)
query = query.filter(hints.speciesid == speciesid)
r = query.first() is not None
session.close()
return r
|
[
"ian.t.fiddes@gmail.com"
] |
ian.t.fiddes@gmail.com
|
ea0460949c6583b934516dffd930f670f4c2e055
|
cfb6cc1b52dc080db0d9dd9b5f1fac2989a8eb30
|
/CodePackages/NLTK/nltk5.py
|
dd2d7b51948693583df223170e4668eef5c5f2c4
|
[] |
no_license
|
MansMeg/TextMiningCourse
|
114c4987b65b1d18583eb75dfb68d3d2eafb2d44
|
28101c03347bd161e1a959653bcadbbcddb88f3b
|
refs/heads/master
| 2020-04-06T06:36:23.523987
| 2015-11-30T09:01:31
| 2015-11-30T09:01:31
| 46,229,834
| 0
| 1
| null | 2015-11-15T18:20:18
| 2015-11-15T18:20:18
| null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
import nltk
from nltk.book import *
text7.generate() # Generates text using trigrams
# POS
myText = 'This is my text, and I will now try to tag it'
myText = nltk.word_tokenize(myText)
nltk.pos_tag(myText)
nltk.parse.viterbi(myText)
nltk.corpus.brown.tagged_words()[1:10]
|
[
"mattias.villani@gmail.com"
] |
mattias.villani@gmail.com
|
40a667cbffa250f6cc7c1eaa714e24382ef94089
|
7c37351bd33a568593f7d64d8d8cc8359ce7c14e
|
/setup.py
|
0275ca5b503c4335406186df99acb4fc8473af6a
|
[
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
Vultaire/r21buddy
|
972d37bac465666a770c77d1c6bfd1d035549d8d
|
8df66abae1392aad8cecbb1200e7e221f2d0e280
|
refs/heads/master
| 2020-11-26T21:07:56.231757
| 2013-12-22T10:41:49
| 2013-12-22T10:41:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,254
|
py
|
from distutils.core import setup
import py2exe
version = "1.1a"
setup(
name="r21buddy",
version=version,
author="Paul Goins",
author_email="general@vultaire.net",
url="https://github.com/Vultaire/r21buddy",
description="Utilities for preparing StepMania songs for use with ITG R21.",
download_url="https://github.com/Vultaire/r21buddy/zipball/master",
#platforms=["nt", "posix"], # Not sure of format...
license="MIT",
packages=["r21buddy"],
# py2exe-specific
console=[
"r21buddy/r21buddy.py",
"r21buddy/oggpatch.py",
],
windows=[
"r21buddy/r21buddy_gui.py",
"r21buddy/oggpatch_gui.py",
],
options={
"py2exe": {
# Currently, the GUIs will not build if the below options
# are enabled.
#"bundle_files": 1,
"excludes": ["_socket", "_ssl", "win32api", "win32evtlog"],
"dll_excludes": [
"API-MS-Win-Core-LocalRegistry-L1-1-0.dll",
"API-MS-Win-Core-ProcessThreads-L1-1-0.dll",
"API-MS-Win-Security-Base-L1-1-0.dll",
"POWRPROF.dll",
"pywintypes27.dll",
]
},
},
)
|
[
"general@vultaire.net"
] |
general@vultaire.net
|
a1e9785f8535dbd31030f2f419aab901bc6263f2
|
5f03110f82c52fc733155c0ef23aba201f052645
|
/Project 03/lessThanFifteen.py
|
8188a5391431e8311073e6bab507c999b7bae172
|
[] |
no_license
|
Tc-blip/SSW555
|
b7f0757182d4a8f1dfc87d1b937311b8b6dd8291
|
4d2511e5bcbf0c4d8799a189e320823fbe6644b1
|
refs/heads/master
| 2020-07-29T04:32:51.195643
| 2019-11-22T04:33:52
| 2019-11-22T04:33:52
| 209,670,726
| 0
| 0
| null | 2019-11-21T17:38:23
| 2019-09-20T00:18:31
|
Python
|
UTF-8
|
Python
| false
| false
| 415
|
py
|
# US15
# There should be fewer than 15 siblings in a family
def check_lessThanFifteen(familyList):
for key in familyList:
family = familyList[key]
if not lessThanFifteen(family.Children):
print("US15 Error: There should be fewer than 15 siblings in a family")
def lessThanFifteen(children):
if len(children) > 15:
return False
else:
return True
|
[
"kmorel@stevens.edu"
] |
kmorel@stevens.edu
|
8b2cfc5dddc6b71ee66f957ad9063e973d145ee0
|
1e6442422003a6c01cf3416bba435635afc3aefa
|
/servo.py
|
1de9f37796fed92fb36e67974cb50c0cc7051811
|
[] |
no_license
|
AdamJankowsky/Python-OpenCV-Brick
|
75844f7c18454ff8ce421b6d2ec3cffa5c117c37
|
4aa13ea5e6b2121b2efd70d957fce7ed26706ab2
|
refs/heads/master
| 2021-01-25T06:44:55.628036
| 2017-06-07T07:00:59
| 2017-06-07T07:00:59
| 93,601,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
pwm = GPIO.PWM(18, 100)
pwm.start(5)
def update(angle):
duty = float(angle) / 18.0 + 2.5
pwm.ChangeDutyCycle(duty)
print(duty)
time.sleep(1)
|
[
"adam1jankowski@gmail.com"
] |
adam1jankowski@gmail.com
|
a7a1301f8c3edae61b48482aff4606502e7fa296
|
92f9b13b6cd1845dbcd411366028908c777a3342
|
/0x07-python-test_driven_development/100-matrix_mul.py
|
4f02fa300b7ce1225a46eb9e6e35de4fc0167eea
|
[] |
no_license
|
zamu5/holbertonschool-higher_level_programming
|
0013656f8fa25d02cffd1c3048dc7706ff4054ae
|
cde6e8ff1f91161a22af204bedefa679f266aada
|
refs/heads/master
| 2020-07-22T21:54:52.802829
| 2020-02-13T21:18:41
| 2020-02-13T21:18:41
| 207,341,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,914
|
py
|
#!/usr/bin/python3
"""
Module have function that multiplies 2 matrices
"""
def matrix_mul(m_a, m_b):
""" Function that multiplies 2 matrices"""
if not isinstance(m_a, list):
raise TypeError("m_a must be a list")
if not isinstance(m_b, list):
raise TypeError("m_b must be a list")
if not [[isinstance(row, list)] for row in m_a]:
raise TypeError("m_a must be a list of lists")
if not isinstance(m_a[0], list):
raise TypeError("m_a must be a list of lists")
if not [[isinstance(row, list)] for row in m_b]:
raise TypeError("m_b must be a list of lists")
if not isinstance(m_b[0], list):
raise TypeError("m_b must be a list of lists")
if m_a == [] or m_a == [[]]:
raise TypeError("m_a can't be empty")
if m_b == [] or m_b == [[]]:
raise TypeError("m_b can't be empty")
for row in m_a:
if not all(isinstance(element, (int, float)) for element in row):
raise TypeError("m_a should contain only integers or floats")
for row in m_b:
if not all(isinstance(element, (int, float)) for element in row):
raise TypeError("m_b should contain only integers or floats")
if not all(len(l) == len(m_a[0]) for l in m_a):
raise TypeError("each row of m_a must be of the same size")
if not all(len(l) == len(m_b[0]) for l in m_b):
raise TypeError("each row of m_b must be of the same size")
if len(m_a[0]) != len(m_b):
raise ValueError("m_a and m_b can't be multiplied")
r1 = []
i1 = 0
for a in m_a:
r2 = []
i2 = 0
num = 0
while (i2 < len(m_b[0])):
num += a[i1] * m_b[i1][i2]
if i1 == len(m_b) - 1:
i1 = 0
i2 += 1
r2.append(num)
num = 0
else:
i1 += 1
r1.append(r2)
return r1
|
[
"szamu5@hotmail.com"
] |
szamu5@hotmail.com
|
778ce290573ccced508c29000d2287491acc0e4f
|
6aa9deea03c2b015dcd582250b4aabdecb8800e9
|
/home/views.py
|
948bba873c2febe93e028bd68e00ca2adb5a783a
|
[] |
no_license
|
bopopescu/ajairahouse.com-jokes_sharing
|
4cab67dbebeb0fcba70160ec2299577785207bcb
|
b7c18d86b0ed7cff3e2e444bf637c10711f3f56b
|
refs/heads/master
| 2022-11-21T14:11:04.685535
| 2019-03-23T14:16:08
| 2019-03-23T14:16:08
| 281,797,551
| 0
| 0
| null | 2020-07-22T22:42:13
| 2020-07-22T22:42:12
| null |
UTF-8
|
Python
| false
| false
| 1,440
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.core.paginator import Paginator,EmptyPage,PageNotAnInteger
# Create your views here.
from bangla_jokes.models import Bangla_Jokes
from engineering_jokes.models import Engineering_Jokes
from fci_troll.models import Fci_Troll
from funny_videos.models import Funny_Videos
from programming_jokes.models import Programming_Jokes
from others.models import Logo
from django.db.models import Q
from itertools import chain
l=Logo.objects.last()
def home(request):
bangla_jokes=Bangla_Jokes.objects.last()
engineering_jokes=Engineering_Jokes.objects.last()
fci_troll=Fci_Troll.objects.last()
funny_videos=Funny_Videos.objects.last()
programming_jokes=Programming_Jokes.objects.last()
bangla=Bangla_Jokes.objects.order_by('?')[0]
engineer=Engineering_Jokes.objects.order_by('?')[0]
fci=Fci_Troll.objects.order_by('?')[0]
funny=Funny_Videos.objects.order_by('?')[0]
programming=Programming_Jokes.objects.order_by('?')[0]
search=request.GET.get('q')
context={'bangla_jokes':bangla_jokes,
'engineering_jokes':engineering_jokes,
'fci_troll':fci_troll,
'funny_videos':funny_videos,
'programming_jokes':programming_jokes,
"l":l,
'bangla':bangla,
'engineer':engineer,
'fci':fci,
'funny':funny,
'programming':programming}
return render(request,'home.html',context)
|
[
"zanjarwhite@gmail.com"
] |
zanjarwhite@gmail.com
|
847a9195867cbaec223c2dfb9fa5f43d284ff356
|
526015b36e11cfd6e61dc1d97a746b50d4ac5d16
|
/django2/core/exceptions.py
|
e1a7294b1eb3309cb3f045e5a26075d7b10dd511
|
[] |
no_license
|
NAveeN4416/django2
|
e81d4b15332ff0ed2bf8923ec89ccd5c890e2302
|
ff11855931ff2e8db22fc4217f595297b651008a
|
refs/heads/master
| 2020-03-24T21:48:03.402544
| 2018-08-03T12:25:36
| 2018-08-03T12:25:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
'''
Extend Every New Class with APIException
e.g : class newclass(APIException):
def newdef():
pass
'''
# Import Requirements Here
from rest_framework import exceptions
# Write Your Exceptional Classes here
|
[
"noreply@github.com"
] |
NAveeN4416.noreply@github.com
|
35944045acc6c2b875eb53c6576ecbe7f8eca5dc
|
4fc9753b439a67c56836b2103f5ce76c89f31be5
|
/pi_detect_drowsiness.py
|
27df8e35a682e13216dc8f126e3c923e84ed7d6a
|
[] |
no_license
|
shivanshxyz/raspberryPi_drowsiness_detector
|
b060b9badf67aba33bc4d0a6788fced0b3520b55
|
0e79f31ce2347f95b147a2f8256a388378bdfa03
|
refs/heads/master
| 2022-03-08T10:44:13.367877
| 2019-11-06T08:39:05
| 2019-11-06T08:39:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,239
|
py
|
from imutils.video import VideoStream
from imutils import face_utils
import numpy as np
import argparse
import imutils
import time
import dlib
import cv2
def eucdDistance(ptA, ptB):
return np.linalg.norm(ptA - ptB)
def eye_aspect_ratio(eye):
A = eucdDistance(eye[1], eye[5])
B = eucdDistance(eye[2], eye[4])
C = eucdDistance(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
if args["alarm"] > 0:
from gpiozero import TrafficHat
th = TrafficHat()
EYE_THRESHOLD = 0.3
EYE_ASPECT_RATIO = 16
COUNTER = 0
ALARM_ON = False
detector = cv2.CascadeClassifier(args["cascade"])
predictor = dlib.shape_predictor(args["shape_predictor"])
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
vs = VideoStream(usePiCamera=True).start()
time.sleep(1.0)
while True:
frame = vs.read()
frame = imutils.resize(frame, width=450)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = detector.detectMultiScale(gray, scaleFactor=1.1,
minNeighbors=5, minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
for (x, y, w, h) in rects:
rect = dlib.rectangle(int(x), int(y), int(x + w),
int(y + h))
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
left_EAR = eye_aspect_ratio(leftEye)
right_EAR = eye_aspect_ratio(rightEye)
ear = (left_EAR + right_EAR) / 2.0
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
if ear < EYE_THRESHOLD:
COUNTER += 1
if COUNTER >= EYE_ASPECT_RATIO:
if not ALARM_ON:
ALARM_ON = True
if args["alarm"] > 0:
th.buzzer.blink(0.1, 0.1, 10,
background=True)
cv2.putText(frame, "ALERT!!!! YOU ARE DROWSY", (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
else:
COUNTER = 0
ALARM_ON = False
cv2.putText(frame, "EAR: {:.3f}".format(ear), (300, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.stop()
|
[
"noreply@github.com"
] |
shivanshxyz.noreply@github.com
|
29e68387ee087791c377c5ec97c551c1d24fc445
|
97c18798ed2bb8bf0ea8924a640a7eb018212065
|
/[0803]Algorithm/4880.tournament.py
|
0d301ab96b1cbada83e968695dace8b9f29d7d79
|
[] |
no_license
|
kanamycine/surely-kill-algorithm
|
21a8899f522f44659107f2556eea3bc48b343cc9
|
7fb2345b29dc0e2ddc3df9933c806a41ed2cd409
|
refs/heads/master
| 2022-12-20T06:12:05.442990
| 2020-09-18T11:40:11
| 2020-09-18T11:40:11
| 285,782,987
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
def win(x, y):
if (lst[x-1] == 1 and lst[y-1] == 3) or (lst[x-1] == 1 and lst[y-1] == 1):
return x
elif (lst[x-1] == 2 and lst[y-1] == 1) or (lst[x-1] == 2 and lst[y-1] == 2):
return x
elif (lst[x-1] == 3 and lst[y-1] == 2) or (lst[x-1] == 3 and lst[y-1] == 3):
return x
return y
def match(start, end):
if start == end:
return start
first_value = match(start, (start+end)//2)
second_value = match((start+end)//2+1, end)
return win(first_value, second_value)
TC = int(input())
for tc in range(1, TC+1):
N = int(input())
lst = list(map(int, input().split()))
start = 1
end = N
print(f'#{tc} {match(start, end)}')
|
[
"jinyeong5320@gmail.com"
] |
jinyeong5320@gmail.com
|
a466ea4b737b166d6abccb080b97565bbf2f7466
|
37688136300295e3b696ff1559ddabce9ce07ff1
|
/task1/mnist_cnn_doublefilters.py
|
5e84977a89cbf7bda77083d4b616d89e3138e5b9
|
[] |
no_license
|
singhsukhendra/dlcv04
|
94fbe113925ca1c903cefd649d4e6b6258f8b267
|
6330d3a16f0d1d311fe0f52f3581f52c2f426ddf
|
refs/heads/master
| 2021-06-01T12:33:41.618038
| 2016-07-08T11:41:00
| 2016-07-08T11:41:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,262
|
py
|
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
batch_size = 128
nb_classes = 10
nb_epoch = 12
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 64
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid', input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
|
[
"miriambellver93@gmail.com"
] |
miriambellver93@gmail.com
|
d4c7c353e91b037420622af404ad3ab1c31461d8
|
496861be33a083db8c341a390f5d63eb0998b4e7
|
/python/81. string.py
|
d498af1b347ffa8c866de5b1da3e49f38625a0f7
|
[] |
no_license
|
kay30kim/Algorithm
|
95cebd21593c121d2eb6e1f219a3e94511bf31cf
|
19fcc965599860e827a30d4c026858022dcd9590
|
refs/heads/main
| 2022-07-01T18:12:28.272575
| 2022-06-15T15:42:22
| 2022-06-15T15:42:22
| 231,189,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,372
|
py
|
msg = input('임이의 문장을 입력하세요 :')
if 'a' in msg:
print('당신이 입력한 문장에는 a가 있습니다.')
else:
print('당신이 입력한 문장에는 a가 없습니다.')
print('당신의 문장 길이는 <%d>입니다.'%len(msg))
txt1 = 'A'
txt2 = '안녕'
txt3 = 'Warcraft Three'
txt4 = '1234'
ret1 = txt1.isalpha()
ret2 = txt2.isalpha()
ret3 = txt3.isalpha()
ret4 = txt4.isalpha()
print(ret1)
print(ret2)
print(ret3)
print(ret4)
ret4 = txt4.isdigit()
print(ret4)
txt1 = '안녕하세요?'
txt2 = '3피오R2D2'
ret1 = txt1.isalnum()
ret2 = txt2.isalnum()
print(ret1)
print(ret2)
txt = 'A lot of Things occur each day.'
ret1 = txt.upper()
ret2 = txt.lower()
print(ret1)
print(ret2)
txt = ' 양쪽에 공백이 있는 문자열입니다. '
ret1 = txt.lstrip()
ret2 = txt.rstrip()
ret3 = txt.strip()
print('<' + txt +'>')
print('<'+ret1+'>')
print('<'+ret2+'>')
print('<'+ret3+'>')
numstr = input('숫자를 입력하세요:')
try:
num = int(numstr)
print('당신이 입력한 숫자는 정수 <%d>입니다.' %num)
except:
try:
num = float(numstr)
print('당신이 입력한 숫자는 실수 <%f>입니다.'%num)
except:
print('+++숫자를 입력하세요+++')
num1 = 1234
num2 = 3.14
numstr1 = str(num1)
numstr2 = str(num2)
print('문자여 %s' %numstr1)
print(' 문자려 %s' %numstr2)
|
[
"kay30kim@gmail.com"
] |
kay30kim@gmail.com
|
9be78fa258a64498a991c13cdde69c5cc3228fef
|
25d69136cadc3755200160f9005ed17bb13409b8
|
/django_1.5/lib/python2.7/site-packages/django_extensions/management/commands/runserver_plus.py
|
8899a6b6870c2383c8d5658efe06f91fd1097d88
|
[] |
no_license
|
davidmfry/Django_Drag_and_Drop_v1
|
4cc89e767c11313451c77dee6cd206a5ee724b8d
|
5ec9c99acb127c04a241eb81d3df16533b4ce96f
|
refs/heads/master
| 2016-09-03T06:52:52.925510
| 2013-08-12T21:48:33
| 2013-08-12T21:48:33
| 12,006,953
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,983
|
py
|
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django_extensions.management.utils import setup_logger, RedirectHandler
from optparse import make_option
import os
import sys
import time
try:
from django.contrib.staticfiles.handlers import StaticFilesHandler
USE_STATICFILES = 'django.contrib.staticfiles' in settings.INSTALLED_APPS
except ImportError:
USE_STATICFILES = False
import logging
logger = logging.getLogger(__name__)
from django_extensions.management.technical_response import null_technical_500_response
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.'),
make_option('--browser', action='store_true', dest='open_browser',
help='Tells Django to open a browser.'),
make_option('--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.'),
make_option('--threaded', action='store_true', dest='threaded',
help='Run in multithreaded mode.'),
make_option('--output', dest='output_file', default=None,
help='Specifies an output file to send a copy of all messages (not flushed immediately).'),
make_option('--print-sql', action='store_true', default=False,
help="Print SQL queries as they're executed"),
make_option('--cert', dest='cert_path', action="store", type="string",
help='To use SSL, specify certificate path.'),
)
if USE_STATICFILES:
option_list += (
make_option('--nostatic', action="store_false", dest='use_static_handler', default=True,
help='Tells Django to NOT automatically serve static files at STATIC_URL.'),
make_option('--insecure', action="store_true", dest='insecure_serving', default=False,
help='Allows serving static files even if DEBUG is False.'),
)
help = "Starts a lightweight Web server for development."
args = '[optional port number, or ipaddr:port]'
# Validation is called explicitly each time the server is reloaded.
requires_model_validation = False
def handle(self, addrport='', *args, **options):
import django
setup_logger(logger, self.stderr, filename=options.get('output_file', None)) # , fmt="[%(name)s] %(message)s")
logredirect = RedirectHandler(__name__)
# Redirect werkzeug log items
werklogger = logging.getLogger('werkzeug')
werklogger.setLevel(logging.INFO)
werklogger.addHandler(logredirect)
werklogger.propagate = False
if options.get("print_sql", False):
from django.db.backends import util
try:
import sqlparse
except ImportError:
sqlparse = None # noqa
class PrintQueryWrapper(util.CursorDebugWrapper):
def execute(self, sql, params=()):
starttime = time.time()
try:
return self.cursor.execute(sql, params)
finally:
raw_sql = self.db.ops.last_executed_query(self.cursor, sql, params)
execution_time = time.time() - starttime
therest = ' -- [Execution time: %.6fs] [Database: %s]' % (execution_time, self.db.alias)
if sqlparse:
logger.info(sqlparse.format(raw_sql, reindent=True) + therest)
else:
logger.info(raw_sql + therest)
util.CursorDebugWrapper = PrintQueryWrapper
try:
from django.core.servers.basehttp import AdminMediaHandler
USE_ADMINMEDIAHANDLER = True
except ImportError:
USE_ADMINMEDIAHANDLER = False
try:
from django.core.servers.basehttp import get_internal_wsgi_application as WSGIHandler
except ImportError:
from django.core.handlers.wsgi import WSGIHandler # noqa
try:
from werkzeug import run_simple, DebuggedApplication
except ImportError:
raise CommandError("Werkzeug is required to use runserver_plus. Please visit http://werkzeug.pocoo.org/ or install via pip. (pip install Werkzeug)")
# usurp django's handler
from django.views import debug
debug.technical_500_response = null_technical_500_response
if args:
raise CommandError('Usage is runserver %s' % self.args)
if not addrport:
addr = ''
port = '8000'
else:
try:
addr, port = addrport.split(':')
except ValueError:
addr, port = '', addrport
if not addr:
addr = '127.0.0.1'
if not port.isdigit():
raise CommandError("%r is not a valid port number." % port)
threaded = options.get('threaded', False)
use_reloader = options.get('use_reloader', True)
open_browser = options.get('open_browser', False)
cert_path = options.get("cert_path")
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
def inner_run():
print("Validating models...")
self.validate(display_num_errors=True)
print("\nDjango version %s, using settings %r" % (django.get_version(), settings.SETTINGS_MODULE))
print("Development server is running at http://%s:%s/" % (addr, port))
print("Using the Werkzeug debugger (http://werkzeug.pocoo.org/)")
print("Quit the server with %s." % quit_command)
path = options.get('admin_media_path', '')
if not path:
admin_media_path = os.path.join(django.__path__[0], 'contrib/admin/static/admin')
if os.path.isdir(admin_media_path):
path = admin_media_path
else:
path = os.path.join(django.__path__[0], 'contrib/admin/media')
handler = WSGIHandler()
if USE_ADMINMEDIAHANDLER:
handler = AdminMediaHandler(handler, path)
if USE_STATICFILES:
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if use_static_handler and (settings.DEBUG or insecure_serving):
handler = StaticFilesHandler(handler)
if open_browser:
import webbrowser
url = "http://%s:%s/" % (addr, port)
webbrowser.open(url)
if cert_path:
"""
OpenSSL is needed for SSL support.
This will make flakes8 throw warning since OpenSSL is not used
directly, alas, this is the only way to show meaningful error
messages. See:
http://lucumr.pocoo.org/2011/9/21/python-import-blackbox/
for more information on python imports.
"""
try:
import OpenSSL # NOQA
except ImportError:
raise CommandError("Python OpenSSL Library is "
"required to use runserver_plus with ssl support. "
"Install via pip (pip install pyOpenSSL).")
dir_path, cert_file = os.path.split(cert_path)
if not dir_path:
dir_path = os.getcwd()
root, ext = os.path.splitext(cert_file)
certfile = os.path.join(dir_path, root + ".crt")
keyfile = os.path.join(dir_path, root + ".key")
try:
from werkzeug.serving import make_ssl_devcert
if os.path.exists(certfile) and \
os.path.exists(keyfile):
ssl_context = (certfile, keyfile)
else: # Create cert, key files ourselves.
ssl_context = make_ssl_devcert(
os.path.join(dir_path, root), host='localhost')
except ImportError:
print("Werkzeug version is less than 0.9, trying adhoc certificate.")
ssl_context = "adhoc"
else:
ssl_context = None
run_simple(
addr,
int(port),
DebuggedApplication(handler, True),
use_reloader=use_reloader,
use_debugger=True,
threaded=threaded,
ssl_context=ssl_context
)
inner_run()
|
[
"david.fry.tv@gmail.com"
] |
david.fry.tv@gmail.com
|
aa0461d343da384009a3c190199d8085afa3e58b
|
ad325aa7ca5c022d6cb1d358b3bdd86af480b962
|
/CTFEZ.py
|
73469bfa6ff9227cc5209ca2071cd850efe68008
|
[] |
no_license
|
JHCISC/CTF_Tools
|
3b30360cabee479ce390dd6010288a8a4df459e3
|
983b3adcc6603803afa65ebcfcf2ed5e169bf52a
|
refs/heads/master
| 2021-01-12T16:43:07.421318
| 2016-10-20T07:53:57
| 2016-10-20T07:53:57
| 71,437,666
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,131
|
py
|
# Filename: CTFEZ.py
import base64
import math
def asciitrun():
b = input("请输入ascii待转换: ")
bres = [int(x) for x in b.split(' ')]
res = []
for m in bres:
res.append(chr(m))
print(''.join(res))
def bintrun():
a = int(input("请输入?进制:"))
b = input("请输入待转换数字: ")
bres = [x for x in b.split(' ')]
for i in range(0, len(bres)):
print(int(bres[i], a), end="")
print('\n')
def base64trun():
b = input("请输入待转换Base64:")
print("base64解码结果是:", end='')
print(base64.b64decode(b + '=' * (4 - len(b) % 4)))
def morsetrun():
CODE = {"----.": "9",
"...-": "V",
"-.": "N",
".-.": "R",
"-----": "0",
"---..": "8",
"--..": "Z",
"....-": "4",
"..---": "2",
".----": "1",
"...": "S",
".--.": "P",
"-...": "B",
"-": "T",
"..-": "U",
"...--": "3",
".--": "W",
".....": "5",
"--.-": "Q",
"..": "I",
"---": "O",
"--": "M",
"--...": "7",
"-.-": "K",
".-": "A",
"-..": "D",
"--.": "G",
"-..-": "X",
".": "E",
".---": "J",
"-....": "6",
"-.-.": "C",
"-.--": "Y",
"..-.": "F",
"....": "H",
".-..": "L",
".-.-.-": ".",
"..--..": "?",
"-.-.--": "!",
"-.--.": "(",
".--.-.": "@",
"---...": ":",
"-...-": "=",
"-....-": "-",
"-.--.-": ")",
"--..--": ",",
".----.": "'",
"..--.-": "_",
"...-..-": "$",
}
b = input('请输入Morse code: ')
b = b.strip()
bres = [x for x in b.split(' ')]
print('转换结果是:')
for i in range(0, len(bres)):
print(CODE[bres[i].upper()], end='')
print('\n')
def fencetrun():
a = input("请输入待破解的栅栏密码:")
a = a.strip()
num = len(a) # 字符串的长度
error = 0
for m in range(2, num):
n = int(num / m)
if num % m == 0:
print("\n栅栏数:", n)
for i in range(m): # 大循环,栅栏行数
char = '' # 初始化
for j in range(i, num, m): # 小循环, i 字符串a[i],num 循环次数, m 跳步数
char = char + a[j]
print(char, end="")
elif num % m != 0:
error = error + 1
if error == (num - 2):
print("该密码不是标准的栅栏密码")
print("\n")
def caesartrun():
Ciphertext = input("请输入Caesar's cipher:")
Ciphertext = Ciphertext.strip()
for i in range(1, 26):
print(chr(65 + i) + ":", end=" ")
y = []
for a in Ciphertext:
x = ord(a)
if (65 <= x <= 90):
x = x + i
if x > 90:
x = x - 90 + 65 - 1
y.append(chr(x))
else:
y.append(chr(x))
elif (97 <= x <= 122):
x = x + i
if x > 122:
x = x - 122 + 97 - 1
y.append(chr(x))
else:
y.append(chr(x))
end = ''.join(y)
print(end + "\n")
def Bacontrun():
CODE = {"aaaaa": "A",
"aaaab": "B",
"aaaba": "C",
"aaabb": "D",
"aabaa": "E",
"aabab": "F",
"aabba": "G",
"aabbb": "H",
"abaaa": "I",
"abaab": "J",
"ababa": "K",
"ababb": "L",
"abbaa": "M",
"abbab": "N",
"abbba": "O",
"abbbb": "P",
"baaaa": "Q",
"baaab": "R",
"baaba": "S",
"baabb": "T",
"babaa": "U",
"babab": "V",
"babba": "W",
"babbb": "X",
"bbaaa": "Y",
"bbaab": "Z"}
a = input("请输入Bacon's cipher:").lower()
a = ''.join([x for x in a if x != " "])
for i in range(0, len(a), 5):
print(CODE[a[i:i + 5]], end="")
print("\n")
def dangputrun():
DUPU = {'口': '0', '田': '0', '由': '1', '甲': '1', '中': '2', '申': '2', '人': '3', '工': '4',
'大': '5', '王': '6', '天': '6', '夫': '7', '井': '8', '丰': '8', '羊': '9'}
b = input('请输入当铺code: ')
b = b.strip()
print('转换结果是:')
for i in range(0, len(b)):
if(b[i] in DUPU.keys()) == False:
print(b[i], '不属于当铺密码')
else:
print(DUPU[b[i].upper()], end='')
print('\n')
|
[
"noreply@github.com"
] |
JHCISC.noreply@github.com
|
0760d6ed7c77bdaf8bfe485e2f157d1e310e2723
|
eb188b2a3aa86925ee0d74dab8995752cf8ff5f9
|
/brief/migrations/0001_initial.py
|
a7e1bf41b2c6c251911d480afd0f4ba60f26bd54
|
[] |
no_license
|
ecugol/kbrief
|
8ecb6e36cfda80788aa4186c770404daf06e49a2
|
b2809265cbeea6dec51a6eb23e192526f7ed0456
|
refs/heads/master
| 2020-05-20T12:23:09.011401
| 2012-01-08T13:50:45
| 2012-01-08T13:50:45
| 3,109,524
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,740
|
py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AudienceType'
db.create_table('brief_audiencetype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('type_of', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('brief', ['AudienceType'])
# Adding model 'SiteAim'
db.create_table('brief_siteaim', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('desc', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('brief', ['SiteAim'])
# Adding model 'SiteTask'
db.create_table('brief_sitetask', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('desc', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('brief', ['SiteTask'])
# Adding model 'SiteType'
db.create_table('brief_sitetype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('desc', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('brief', ['SiteType'])
# Adding model 'SiteLanguage'
db.create_table('brief_sitelanguage', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal('brief', ['SiteLanguage'])
# Adding model 'SiteAccent'
db.create_table('brief_siteaccent', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('desc', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('brief', ['SiteAccent'])
# Adding model 'ScreenResolution'
db.create_table('brief_screenresolution', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('width', self.gf('django.db.models.fields.IntegerField')()),
('height', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('brief', ['ScreenResolution'])
# Adding model 'Brief'
db.create_table('brief_brief', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('contact_name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('contact_phone', self.gf('django.db.models.fields.CharField')(max_length=20)),
('contact_email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('contact_city', self.gf('django.db.models.fields.CharField')(default=u'\u0421\u0443\u0440\u0433\u0443\u0442', max_length=100)),
('contact_address', self.gf('django.db.models.fields.TextField')()),
('project_name', self.gf('django.db.models.fields.CharField')(max_length=150)),
('sphere', self.gf('django.db.models.fields.TextField')()),
('contact_site', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('contact_domain_name', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('created_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('deadline_date', self.gf('django.db.models.fields.DateField')()),
('deadline_info', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('marketing_events', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('target_audience_other', self.gf('django.db.models.fields.CharField')(max_length=200)),
('products_and_services', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('advantage', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('brand', self.gf('django.db.models.fields.BooleanField')(default=False)),
('brand_curves', self.gf('django.db.models.fields.BooleanField')(default=False)),
('brand_logo', self.gf('django.db.models.fields.BooleanField')(default=False)),
('brand_logo_curves', self.gf('django.db.models.fields.BooleanField')(default=False)),
('brand_characters', self.gf('django.db.models.fields.BooleanField')(default=False)),
('brand_characters_curves', self.gf('django.db.models.fields.BooleanField')(default=False)),
('brand_colors', self.gf('django.db.models.fields.CharField')(max_length=200)),
('site_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['brief.SiteType'])),
('site_type_other', self.gf('django.db.models.fields.CharField')(max_length=200)),
('makeup', self.gf('django.db.models.fields.CharField')(default='fixed-center', max_length=50)),
('layout', self.gf('django.db.models.fields.CharField')(default='2col-left', max_length=50)),
('layout_other', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
))
db.send_create_signal('brief', ['Brief'])
# Adding M2M table for field target_audience on 'Brief'
db.create_table('brief_brief_target_audience', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('brief', models.ForeignKey(orm['brief.brief'], null=False)),
('audiencetype', models.ForeignKey(orm['brief.audiencetype'], null=False))
))
db.create_unique('brief_brief_target_audience', ['brief_id', 'audiencetype_id'])
# Adding M2M table for field site_aims on 'Brief'
db.create_table('brief_brief_site_aims', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('brief', models.ForeignKey(orm['brief.brief'], null=False)),
('siteaim', models.ForeignKey(orm['brief.siteaim'], null=False))
))
db.create_unique('brief_brief_site_aims', ['brief_id', 'siteaim_id'])
# Adding M2M table for field site_tasks on 'Brief'
db.create_table('brief_brief_site_tasks', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('brief', models.ForeignKey(orm['brief.brief'], null=False)),
('sitetask', models.ForeignKey(orm['brief.sitetask'], null=False))
))
db.create_unique('brief_brief_site_tasks', ['brief_id', 'sitetask_id'])
# Adding M2M table for field site_languages on 'Brief'
db.create_table('brief_brief_site_languages', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('brief', models.ForeignKey(orm['brief.brief'], null=False)),
('sitelanguage', models.ForeignKey(orm['brief.sitelanguage'], null=False))
))
db.create_unique('brief_brief_site_languages', ['brief_id', 'sitelanguage_id'])
# Adding M2M table for field site_accent on 'Brief'
db.create_table('brief_brief_site_accent', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('brief', models.ForeignKey(orm['brief.brief'], null=False)),
('siteaccent', models.ForeignKey(orm['brief.siteaccent'], null=False))
))
db.create_unique('brief_brief_site_accent', ['brief_id', 'siteaccent_id'])
# Adding M2M table for field screen_resolution on 'Brief'
db.create_table('brief_brief_screen_resolution', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('brief', models.ForeignKey(orm['brief.brief'], null=False)),
('screenresolution', models.ForeignKey(orm['brief.screenresolution'], null=False))
))
db.create_unique('brief_brief_screen_resolution', ['brief_id', 'screenresolution_id'])
# Adding model 'Competitor'
db.create_table('brief_competitor', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('brief', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['brief.Brief'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('site', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
))
db.send_create_signal('brief', ['Competitor'])
# Adding model 'Companion'
db.create_table('brief_companion', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('brief', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['brief.Brief'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('site', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
))
db.send_create_signal('brief', ['Companion'])
# Adding model 'LikedSite'
db.create_table('brief_likedsite', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('brief', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['brief.Brief'])),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('desc', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('brief', ['LikedSite'])
def backwards(self, orm):
# Deleting model 'AudienceType'
db.delete_table('brief_audiencetype')
# Deleting model 'SiteAim'
db.delete_table('brief_siteaim')
# Deleting model 'SiteTask'
db.delete_table('brief_sitetask')
# Deleting model 'SiteType'
db.delete_table('brief_sitetype')
# Deleting model 'SiteLanguage'
db.delete_table('brief_sitelanguage')
# Deleting model 'SiteAccent'
db.delete_table('brief_siteaccent')
# Deleting model 'ScreenResolution'
db.delete_table('brief_screenresolution')
# Deleting model 'Brief'
db.delete_table('brief_brief')
# Removing M2M table for field target_audience on 'Brief'
db.delete_table('brief_brief_target_audience')
# Removing M2M table for field site_aims on 'Brief'
db.delete_table('brief_brief_site_aims')
# Removing M2M table for field site_tasks on 'Brief'
db.delete_table('brief_brief_site_tasks')
# Removing M2M table for field site_languages on 'Brief'
db.delete_table('brief_brief_site_languages')
# Removing M2M table for field site_accent on 'Brief'
db.delete_table('brief_brief_site_accent')
# Removing M2M table for field screen_resolution on 'Brief'
db.delete_table('brief_brief_screen_resolution')
# Deleting model 'Competitor'
db.delete_table('brief_competitor')
# Deleting model 'Companion'
db.delete_table('brief_companion')
# Deleting model 'LikedSite'
db.delete_table('brief_likedsite')
models = {
'brief.audiencetype': {
'Meta': {'object_name': 'AudienceType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'type_of': ('django.db.models.fields.IntegerField', [], {})
},
'brief.brief': {
'Meta': {'object_name': 'Brief'},
'advantage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'brand': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_characters': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_characters_curves': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_colors': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'brand_curves': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_logo': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'brand_logo_curves': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'contact_address': ('django.db.models.fields.TextField', [], {}),
'contact_city': ('django.db.models.fields.CharField', [], {'default': "u'\\u0421\\u0443\\u0440\\u0433\\u0443\\u0442'", 'max_length': '100'}),
'contact_domain_name': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'contact_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'contact_site': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deadline_date': ('django.db.models.fields.DateField', [], {}),
'deadline_info': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layout': ('django.db.models.fields.CharField', [], {'default': "'2col-left'", 'max_length': '50'}),
'layout_other': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'makeup': ('django.db.models.fields.CharField', [], {'default': "'fixed-center'", 'max_length': '50'}),
'marketing_events': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'products_and_services': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'project_name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'screen_resolution': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['brief.ScreenResolution']", 'symmetrical': 'False'}),
'site_accent': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['brief.SiteAccent']", 'symmetrical': 'False'}),
'site_aims': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['brief.SiteAim']", 'symmetrical': 'False'}),
'site_languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['brief.SiteLanguage']", 'symmetrical': 'False'}),
'site_tasks': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['brief.SiteTask']", 'symmetrical': 'False'}),
'site_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['brief.SiteType']"}),
'site_type_other': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'sphere': ('django.db.models.fields.TextField', [], {}),
'target_audience': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['brief.AudienceType']", 'symmetrical': 'False'}),
'target_audience_other': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'brief.companion': {
'Meta': {'object_name': 'Companion'},
'brief': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['brief.Brief']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'brief.competitor': {
'Meta': {'object_name': 'Competitor'},
'brief': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['brief.Brief']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'brief.likedsite': {
'Meta': {'object_name': 'LikedSite'},
'brief': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['brief.Brief']"}),
'desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'brief.screenresolution': {
'Meta': {'object_name': 'ScreenResolution'},
'height': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {})
},
'brief.siteaccent': {
'Meta': {'object_name': 'SiteAccent'},
'desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'brief.siteaim': {
'Meta': {'object_name': 'SiteAim'},
'desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'brief.sitelanguage': {
'Meta': {'object_name': 'SiteLanguage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'brief.sitetask': {
'Meta': {'object_name': 'SiteTask'},
'desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'brief.sitetype': {
'Meta': {'object_name': 'SiteType'},
'desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['brief']
|
[
"ivan@klice.ru"
] |
ivan@klice.ru
|
6099f8d85b4c661f98e66830fd3da213e1c8c05b
|
8d7d3f2446ff92108621e6c782f3df84bda5bff4
|
/part3_classification/classification_template.py
|
1fa77b50121fd2c7c6f1e6a2a1553e5e4fffe8aa
|
[] |
no_license
|
Jintram/machinelearning1
|
d9f3c53268b696ae10d8984212b57cd8cc202f0e
|
a249d7571e1c7066454bff4de3cb8220275d2a9b
|
refs/heads/master
| 2020-03-29T15:42:23.002960
| 2018-09-24T08:49:13
| 2018-09-24T08:49:13
| 150,076,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,021
|
py
|
# Classification template
# Setup location of our datafile
datapath_root = "D:/Work/Udemy/Machine_Learning_Datasets/"
datapath_section = "Part 3 - Classification/Section 16 - Support Vector Machine (SVM)/"
datafile = "Social_Network_Ads.csv"
import os
os.chdir(datapath_root+datapath_section)
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv(datafile)
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Perhaps convenient to add some plotting also - MW
# ===
set1Idxs = np.nonzero(y)
set2Idxs = np.nonzero(y==0)
plt.scatter(X[set1Idxs,0],X[set1Idxs,1],color='blue')
plt.scatter(X[set2Idxs,0],X[set2Idxs,1],color='red')
plt.show()
# Fitting classifier to the Training set
# Create your classifier here
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Classifier (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Classifier (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
|
[
"wehrens@amolf.nl"
] |
wehrens@amolf.nl
|
9be16b69242655c2579d12cdd35bd3ded4cbaf2c
|
d1134559f971c31044868aea44a7b5ba212ab6cf
|
/lib/vkmini/exceptions.py
|
a8fd80b4a220675b60fa91244ed128b41e7b8fc2
|
[] |
no_license
|
jhauy/ICAD-Longpoll
|
5e4dcf6df0b9647957aa4d4208b52218d567b074
|
52f3f2cdc931684c17bd372ec0f31c123df5b97e
|
refs/heads/master
| 2023-07-17T19:29:49.256415
| 2021-08-29T17:42:01
| 2021-08-29T17:42:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
class VkResponseException(Exception):
def __init__(self, error: dict):
self.error_code = error.get('error_code', None)
self.error_msg = error.get('error_msg', None)
self.request_params = error.get('request_params', None)
class NetworkError(Exception):
code: int
def __init__(self, code: int):
self.code = code
class TooManyRequests(VkResponseException):
data: dict
def __init__(self, error: dict, data: dict):
super().__init__(error)
self.data = data
class TokenInvalid(VkResponseException): # не, ну внатуре же
pass
|
[
"Elchin751@gmail.com"
] |
Elchin751@gmail.com
|
13323fbc9543c69d56f34f0da842c481b56225a4
|
86adf136169bc4ab5bdfec5a32d7532f05b80e92
|
/postfeed/migrations/0004_auto_20200408_2332.py
|
7d7b75be87601a39bdf7078324c817a6831cc1fa
|
[] |
no_license
|
idfinternship/project-c
|
57865cb80a2a929ec7525194a479d92ce74ac1af
|
a8509f4f43d91df9e7d144f2bc507e024a92918b
|
refs/heads/master
| 2022-09-21T11:27:22.618524
| 2020-06-04T14:28:14
| 2020-06-04T14:28:14
| 240,582,349
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 878
|
py
|
# Generated by Django 3.0.4 on 2020-04-08 20:32
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('postfeed', '0003_auto_20200402_1137'),
]
operations = [
migrations.AddField(
model_name='post',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='post', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='post',
name='creation_date',
field=models.DateTimeField(default=datetime.datetime(2020, 4, 8, 20, 32, 41, 989623, tzinfo=utc)),
),
]
|
[
"mantuxas001@gmail.com"
] |
mantuxas001@gmail.com
|
63cb45673d7d8aba28560d316bac8bffee48e68a
|
272b7fe305ed45aac9907810b4754ce67eacdd00
|
/proxypool/spider/crawler.py
|
3eb983f2aa57a2bd8ff9f07444e2c0cbbc0d0453
|
[] |
no_license
|
huichen90/visual-proxypool
|
1dad83931d52150a7ebf98075d2643e923c70ae9
|
53b269961246997c7fb6797b1996c4107168344f
|
refs/heads/master
| 2021-04-15T13:42:49.788361
| 2018-03-21T02:38:07
| 2018-03-21T02:38:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,399
|
py
|
import re
from proxypool.spider.utils import get_page
from pyquery import PyQuery as pq
from proxypool.log.save_log import add_spider_log
from proxypool.common.setting import *
class ProxyMetaclass(type):
def __new__(cls, name, bases, attrs):
count = 0
attrs['__CrawlFunc__'] = []
for k, v in attrs.items():
if 'crawl_' in k:
attrs['__CrawlFunc__'].append(k)
count += 1
attrs['__CrawlFuncCount__'] = count
return type.__new__(cls, name, bases, attrs)
class Crawler(object, metaclass=ProxyMetaclass):
def get_proxies(self, callback):
proxies = []
for proxy in eval("self.{}()".format(callback)):
add_spider_log('成功获取到代理-%s'%proxy,LOG_INFO)
proxies.append(proxy)
return proxies
# def crawl_daxiang(self):
# url = 'http://vtp.daxiangdaili.com/ip/?tid=559363191592228&num=50&filter=on'
# html = get_page(url)
# if html:
# urls = html.split('\n')
# for url in urls:
# yield url
def crawl_daili66(self, page_count=4):
"""
获取代理66
:param page_count: 页码
:return: 代理
"""
start_url = 'http://www.66ip.cn/{}.html'
urls = [start_url.format(page) for page in range(1, page_count + 1)]
for url in urls:
add_spider_log('Crawling%s'%url,LOG_INFO)
html = get_page(url)
if html:
doc = pq(html)
trs = doc('.containerbox table tr:gt(0)').items()
for tr in trs:
ip = tr.find('td:nth-child(1)').text()
port = tr.find('td:nth-child(2)').text()
yield ':'.join([ip, port])
def crawl_proxy360(self):
"""
获取Proxy360
:return: 代理
"""
start_url = 'http://www.proxy360.cn/Region/China'
add_spider_log('Crawling%s'%start_url,LOG_INFO)
html = get_page(start_url)
if html:
doc = pq(html)
lines = doc('div[name="list_proxy_ip"]').items()
for line in lines:
ip = line.find('.tbBottomLine:nth-child(1)').text()
port = line.find('.tbBottomLine:nth-child(2)').text()
yield ':'.join([ip, port])
def crawl_goubanjia(self):
"""
获取Goubanjia
:return: 代理
"""
start_url = 'http://www.goubanjia.com/free/gngn/index.shtml'
html = get_page(start_url)
if html:
doc = pq(html)
tds = doc('td.ip').items()
for td in tds:
td.find('p').remove()
yield td.text().replace(' ', '')
def crawl_ip181(self):
start_url = 'http://www.ip181.com/'
html = get_page(start_url)
ip_address = re.compile('<tr.*?>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
# \s* 匹配空格,起到换行作用
re_ip_address = ip_address.findall(html)
for address,port in re_ip_address:
result = address + ':' + port
yield result.replace(' ', '')
def crawl_ip3366(self):
for page in range(1, 4):
start_url = 'http://www.ip3366.net/free/?stype=1&page={}'.format(page)
html = get_page(start_url)
ip_address = re.compile('<tr>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
# \s * 匹配空格,起到换行作用
re_ip_address = ip_address.findall(html)
for address, port in re_ip_address:
result = address+':'+ port
yield result.replace(' ', '')
def crawl_kxdaili(self):
for i in range(1, 11):
start_url = 'http://www.kxdaili.com/ipList/{}.html#ip'.format(i)
html = get_page(start_url)
ip_address = re.compile('<tr.*?>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
# \s* 匹配空格,起到换行作用
re_ip_address = ip_address.findall(html)
for address, port in re_ip_address:
result = address + ':' + port
yield result.replace(' ', '')
def crawl_premproxy(self):
for i in ['China-01','China-02','China-03','China-04','Taiwan-01']:
start_url = 'https://premproxy.com/proxy-by-country/{}.htm'.format(i)
html = get_page(start_url)
if html:
ip_address = re.compile('<td data-label="IP:port ">(.*?)</td>')
re_ip_address = ip_address.findall(html)
for address_port in re_ip_address:
yield address_port.replace(' ','')
def crawl_xroxy(self):
for i in ['CN','TW']:
start_url = 'http://www.xroxy.com/proxylist.php?country={}'.format(i)
html = get_page(start_url)
if html:
ip_address1 = re.compile("title='View this Proxy details'>\s*(.*).*")
re_ip_address1 = ip_address1.findall(html)
ip_address2 = re.compile("title='Select proxies with port number .*'>(.*)</a>")
re_ip_address2 = ip_address2.findall(html)
for address,port in zip(re_ip_address1,re_ip_address2):
address_port = address+':'+port
yield address_port.replace(' ','')
def crawl_kuaidaili(self):
for i in range(1, 4):
start_url = 'http://www.kuaidaili.com/free/inha/{}/'.format(i)
html = get_page(start_url)
if html:
ip_address = re.compile('<td data-title="IP">(.*?)</td>')
re_ip_address = ip_address.findall(html)
port = re.compile('<td data-title="PORT">(.*?)</td>')
re_port = port.findall(html)
for address,port in zip(re_ip_address, re_port):
address_port = address+':'+port
yield address_port.replace(' ','')
def crawl_xicidaili(self):
for i in range(1, 3):
start_url = 'http://www.xicidaili.com/nn/{}'.format(i)
headers = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Cookie':'_free_proxy_session=BAh7B0kiD3Nlc3Npb25faWQGOgZFVEkiJWRjYzc5MmM1MTBiMDMzYTUzNTZjNzA4NjBhNWRjZjliBjsAVEkiEF9jc3JmX3Rva2VuBjsARkkiMUp6S2tXT3g5a0FCT01ndzlmWWZqRVJNek1WanRuUDBCbTJUN21GMTBKd3M9BjsARg%3D%3D--2a69429cb2115c6a0cc9a86e0ebe2800c0d471b3',
'Host':'www.xicidaili.com',
'Referer':'http://www.xicidaili.com/nn/3',
'Upgrade-Insecure-Requests':'1',
}
html = get_page(start_url, options=headers)
if html:
find_trs = re.compile('<tr class.*?>(.*?)</tr>', re.S)
trs = find_trs.findall(html)
for tr in trs:
find_ip = re.compile('<td>(\d+\.\d+\.\d+\.\d+)</td>')
re_ip_address = find_ip.findall(tr)
find_port = re.compile('<td>(\d+)</td>')
re_port = find_port.findall(tr)
for address,port in zip(re_ip_address, re_port):
address_port = address+':'+port
yield address_port.replace(' ','')
def crawl_ip3366(self):
for i in range(1, 4):
start_url = 'http://www.ip3366.net/?stype=1&page={}'.format(i)
html = get_page(start_url)
if html:
find_tr = re.compile('<tr>(.*?)</tr>', re.S)
trs = find_tr.findall(html)
for s in range(1, len(trs)):
find_ip = re.compile('<td>(\d+\.\d+\.\d+\.\d+)</td>')
re_ip_address = find_ip.findall(trs[s])
find_port = re.compile('<td>(\d+)</td>')
re_port = find_port.findall(trs[s])
for address,port in zip(re_ip_address, re_port):
address_port = address+':'+port
yield address_port.replace(' ','')
def crawl_iphai(self):
start_url = 'http://www.iphai.com/'
html = get_page(start_url)
if html:
find_tr = re.compile('<tr>(.*?)</tr>', re.S)
trs = find_tr.findall(html)
for s in range(1, len(trs)):
find_ip = re.compile('<td>\s+(\d+\.\d+\.\d+\.\d+)\s+</td>', re.S)
re_ip_address = find_ip.findall(trs[s])
find_port = re.compile('<td>\s+(\d+)\s+</td>', re.S)
re_port = find_port.findall(trs[s])
for address,port in zip(re_ip_address, re_port):
address_port = address+':'+port
yield address_port.replace(' ','')
def crawl_89ip(self):
start_url = 'http://www.89ip.cn/apijk/?&tqsl=1000&sxa=&sxb=&tta=&ports=&ktip=&cf=1'
html = get_page(start_url)
if html:
find_ips = re.compile('(\d+\.\d+\.\d+\.\d+:\d+)', re.S)
ip_ports = find_ips.findall(html)
for address_port in ip_ports:
yield address_port
def crawl_data5u(self):
start_url = 'http://www.data5u.com/free/gngn/index.shtml'
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Cookie': 'JSESSIONID=47AA0C887112A2D83EE040405F837A86',
'Host': 'www.data5u.com',
'Referer': 'http://www.data5u.com/free/index.shtml',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',
}
html = get_page(start_url, options=headers)
if html:
ip_address = re.compile('<span><li>(\d+\.\d+\.\d+\.\d+)</li>.*?<li class=\"port.*?>(\d+)</li>', re.S)
re_ip_address = ip_address.findall(html)
for address, port in re_ip_address:
result = address + ':' + port
yield result.replace(' ', '')
|
[
"Garfield_lv@163.com"
] |
Garfield_lv@163.com
|
811fbe2a011ea54189f0ce944a3b227c27f9d69e
|
b6132d5e45ae5194a85658359f04cd5d89872676
|
/LeetCode/LRU Cache.py
|
bd44b3e85a168f49be83009d9ba403c2be48de87
|
[] |
no_license
|
OscarZeng/Data_Structure_and_Algo
|
e59c42a3b82c78094dc8d78d1c439109da985f12
|
08ff466c7f01a3ab8d01b26a18591b9a8efacdb7
|
refs/heads/master
| 2021-07-25T20:33:20.685146
| 2020-09-16T13:35:51
| 2020-09-16T13:35:51
| 217,005,479
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,144
|
py
|
#Basically the same own solution with change the side of latest
class DlinkNode:
def __init__(self, key= None, value = None):
self.prev = None
self.next = None
self.key = key
self.value = value
class LRUCache:
def __init__(self, capacity: int):
self.Cache = {}
self.dummyhead = DlinkNode()
self.dummytail = DlinkNode()
self.dummyhead.next = self.dummytail
self.dummytail.prev = self.dummyhead
self.capacity = capacity
self.size = 0
def removeNode(self, theNode):
theNode.prev.next = theNode.next
theNode.next.prev = theNode.prev
def addToLatest(self, theNode):
theNode.next = self.dummytail
theNode.prev = self.dummytail.prev
self.dummytail.prev.next = theNode
self.dummytail.prev = theNode
def moveToLatest(self, theNode):
self.removeNode(theNode)
self.addToLatest(theNode)
def deleteOldest(self):
oldestNode = self.dummyhead.next
self.removeNode(oldestNode)
return oldestNode
def get(self, key: int) -> int:
if key not in self.Cache:
return -1
else:
theNode = self.Cache[key]
self.moveToLatest(theNode)
return theNode.value
def put(self, key: int, value: int) -> None:
if (key not in self.Cache):
newNode = DlinkNode(key, value)
self.Cache[key] = newNode
self.addToLatest(newNode)
self.size += 1
if self.size > self.capacity:
oldestNode = self.deleteOldest()
self.Cache.pop(oldestNode.key)
self.size -= 1
else:
theNode = self.Cache[key]
theNode.value = value
self.moveToLatest(theNode)
return None
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
#LeetCode official solution, Dict & Double linked list
class DLinkedNode:
def __init__(self, key=0, value=0):
self.key = key
self.value = value
self.prev = None
self.next = None
class LRUCache:
def __init__(self, capacity: int):
self.cache = dict()
# 使用伪头部和伪尾部节点
self.head = DLinkedNode()
self.tail = DLinkedNode()
self.head.next = self.tail
self.tail.prev = self.head
self.capacity = capacity
self.size = 0
def get(self, key: int) -> int:
if key not in self.cache:
return -1
# 如果 key 存在,先通过哈希表定位,再移到头部
node = self.cache[key]
self.moveToHead(node)
return node.value
def put(self, key: int, value: int) -> None:
if key not in self.cache:
# 如果 key 不存在,创建一个新的节点
node = DLinkedNode(key, value)
# 添加进哈希表
self.cache[key] = node
# 添加至双向链表的头部
self.addToHead(node)
self.size += 1
if self.size > self.capacity:
# 如果超出容量,删除双向链表的尾部节点
removed = self.removeTail()
# 删除哈希表中对应的项
self.cache.pop(removed.key)
self.size -= 1
else:
# 如果 key 存在,先通过哈希表定位,再修改 value,并移到头部
node = self.cache[key]
node.value = value
self.moveToHead(node)
def addToHead(self, node):
node.prev = self.head
node.next = self.head.next
self.head.next.prev = node
self.head.next = node
def removeNode(self, node):
node.prev.next = node.next
node.next.prev = node.prev
def moveToHead(self, node):
self.removeNode(node)
self.addToHead(node)
def removeTail(self):
node = self.tail.prev
self.removeNode(node)
return node
|
[
"zenghaonus@outlook.com"
] |
zenghaonus@outlook.com
|
8144e2d5a1590843bfff997bf5bbab02ae5b5b95
|
774b74e8a928def669934cd08ad6603cbe8fbefb
|
/learning_templates/basic_app/urls.py
|
d77a4e09e4f681f8853b2e2a3c0906fb2d19764b
|
[] |
no_license
|
VipulGupta910/django-deployment-example
|
c02a07f673ab5e0e70c3ff65bf85e8304c70d051
|
0555a15ac1e8b03b18e32402947c6462446d54fb
|
refs/heads/master
| 2020-03-23T00:04:42.882658
| 2018-07-13T13:07:25
| 2018-07-13T13:07:25
| 140,842,665
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
from django.urls import path,include
from basic_app import views
#TEMPLATE TAGGING
app_name = 'basic_app'
urlpatterns = [
path('relative', views.relative, name='relative'),
path('other', views.other, name='other'),
]
|
[
"vipulgupta910@gmail.com"
] |
vipulgupta910@gmail.com
|
5638846bdd0417fd4dea73978fae51c6f38e750a
|
2d74faff3a6491e2cb26fc7b935c36b58c6989a8
|
/init.py
|
3cd2291f208134fd6c188b0677de214dca0da1a2
|
[] |
no_license
|
ivojnovic/AHP
|
d5d5570e41f07524a7fb6623266ce63c4d97dd88
|
318e518987701324cebca9f4d3b31e19075a70c9
|
refs/heads/master
| 2020-12-23T23:42:38.018098
| 2020-02-08T10:24:47
| 2020-02-08T10:24:47
| 237,312,241
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,010
|
py
|
import tkinter as tk
from tkinter import ttk
import numpy as np
from PIL import ImageTk, Image
class ERP:
def __init__(self, ime):
self.ime = ime
lista_svih_sustava = []
lista_svih_kriterija = []
lista_odabranih_sustava = []
odabrani_sustavi = {}
odabrani_kriteriji = {}
odabrani_sustavi_po_kriterijima = {}
filepath = 'popis_sustava.txt'
with open(filepath) as fp:
line = fp.readline()
line = line[:-1]
while line:
_erp = ERP(line)
lista_svih_sustava.append(_erp)
line = fp.readline()
line = line[:-1]
fp.close()
filepath = 'popis_kriterija.txt'
with open(filepath) as fp:
line = fp.readline()
line = line[:-1]
while line:
lista_svih_kriterija.append(line)
line = fp.readline()
line = line[:-1]
fp.close()
scale_dictionary = {
1: 9,
2: 8,
3: 7,
4: 6,
5: 5,
6: 4,
7: 3,
8: 2,
9: 1,
10: 1/2,
11: 1/3,
12: 1/4,
13: 1/5,
14: 1/6,
15: 1/7,
16: 1/8,
17: 1/9
}
|
[
"noreply@github.com"
] |
ivojnovic.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.