text stringlengths 8 6.05M |
|---|
"""
Author: Ben Knisley [benknisley@gmail.com]
Date: 26 March, 2021
"""
def process_message(data):
## Convert manchesterSignal into rawSignal by remove first and then every other
## As each bit is sent twice and inverted
dataSignal = data[1::2]
## Split dataSignal into list of 4 chars for each nimble
dataSignal = [dataSignal[i:i+4] for i in range(0, len(dataSignal), 4)]
## Flip each nimble to get real binary value
dataSignal = [x[::-1] for x in dataSignal]
## If signal is not aligned right, fix it
if dataSignal[1] == '0000':
tempDataSig = list()
for nimble in dataSignal:
#! Fix this, make better code
nimble = nimble.replace('0', 'x')
nimble = nimble.replace('1', 'y')
nimble = nimble.replace('x', '1')
nimble = nimble.replace('y', '0')
tempDataSig.append(nimble)
dataSignal = tempDataSig
## Convert each nimble into a hex char
hexString = ''
for nimble in dataSignal:
hexString += str( hex( int(nimble, 2) ) )[2:]
return(hexString)
def hex2data(hexString):
try:
## Process hexString into data
ID = hexString[5:][:4]
chan = hexString[9:][:1]
flag = hexString[12:][:1]
if hexString[16:][:1] == '0':
temp = float(hexString[13:][:3][::-1]) * 0.1
else:
temp = float(hexString[13:][:3][::-1]) * -0.1
hum = int(hexString[17:][:2][::-1])
## Return success status, and data list
return True, [ID, chan, flag, temp, hum]
## If anything fails, then return fail status, and empty data list
except:
return False, []
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import gzip
import datetime
import string
import json
import statistics
import sys
import getopt
import logging
# log_format ui_short '$remote_addr $remote_user $http_x_real_ip [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for" "$http_X_REQUEST_ID" "$http_X_RB_USER" '
# '$request_time';
config = {
"REPORT_SIZE": 1000,
"REPORT_DIR": "./reports",
"LOG_DIR": "./log"
}
def get_config(config_file=''):
result_config = config
config_items = ("REPORT_SIZE",
"REPORT_DIR",
"LOG_DIR")
parse_conf = {}
if config_file:
with open(config_file, 'r') as cf:
for line in cf:
if line.startswith(config_items):
sp_line = line.split(":")
parse_conf[sp_line[0]] = "".join(
sp_line[1:]).strip('\n" ')
result_config.update(parse_conf)
return result_config
def get_log_files(directory):
files_list = [f for f in os.listdir(
directory) if os.path.isfile(
directory+'/'+f) and f.startswith('nginx-access-ui')]
files_with_date = []
pattern = re.compile(r"nginx-access-ui\.log-(\d*)(\.gz)?")
for f in files_list:
files_with_date.append(( directory+'/'+f ,
datetime.datetime.strptime(
re.search(pattern, f).group(1), '%Y%m%d' )))
files_with_date.sort(key=lambda r: r[1])
if not files_with_date:
logging.info('Files not found')
for f in files_with_date:
yield f
def get_message_from_file(filename):
if filename.endswith('.gz'):
with gzip.open(filename, 'rb') as f:
for message in f:
yield str(message)
else:
with open(filename, 'r') as f:
for message in f:
yield message
def parse_log_massage(message):
url, request_time, error = ('', 0, False)
try:
list_of_m = message.split(' ')
url = list_of_m[7]
request_time = round(float(list_of_m[-1].rstrip("\\n\'")),3)
except Exception:
error = True
return url, request_time, error
def perc(count_all, count):
return round(count * 100 / count_all, 3)
def check_error_count(all_message, error_count, threshold):
if perc(all_message, error_count ) > threshold:
logging.info('Errors threshold exceeded')
return False
return True
def get_report_name(date, report_dir):
return report_dir+'/report_{}.html'.format(date.strftime(
"%Y.%m.%d"))
def main(argv):
opts, _ = getopt.getopt(argv,"",["config="])
if opts and opts[0][0] == '--config':
conf = get_config(opts[0][1])
else:
conf = get_config()
if conf['LOG_DIR']:
logfile=conf['LOG_DIR']+"/log_analyzer.log"
else:
logfile=None
logging.basicConfig(filename=logfile,
format='[%(asctime)s] %(levelname).1s %(message)s',
datefmt='%Y.%m.%d %H:%M:%S',
level=logging.INFO)
result_dict = {}
NUM_REQUESTS = 0
SUM_TIME = 0
try:
for i in get_log_files("."):
error_count = 0
if os.path.isfile(get_report_name(
i[1],
conf["REPORT_DIR"]
)):
logging.info(f"work with file {i[0]} already done")
continue
for x in get_message_from_file(i[0]):
url, r_time, error = parse_log_massage(x)
current = result_dict.get(url, {
"count" : 0,
"time_sum": 0,
"time_avg": 0,
"time_max": 0,
"time_values" : []
})
current["count"]+=1
current["time_sum"]+=r_time
current["time_avg"]=current["time_sum"]/current["count"]
current["time_max"]=r_time if r_time > current["time_max"] \
else current["time_max"]
current["time_values"].append(r_time)
result_dict.setdefault(url,current)
result_dict[url]=current
NUM_REQUESTS +=1
SUM_TIME +=r_time
if error:
error_count+=1
if not check_error_count(NUM_REQUESTS,
error_count,
40):
sys.exit(1)
RESULT_LIST = []
for k, v in result_dict.items():
item = {
"count": v["count"],
"time_sum": round(v["time_sum"],3),
"time_avg": round(v["time_avg"],3),
"time_max": v["time_max"]
}
item["url"] = k
item["time_med"] = round(statistics.median(v["time_values"]),3)
item["count_perc"] = perc(NUM_REQUESTS,v["count"])
item["time_perc"] = perc(SUM_TIME, v["time_sum"])
RESULT_LIST.append(item)
del result_dict
RESULT_LIST=sorted(RESULT_LIST,
key=lambda r: r["time_sum"], reverse=True)
RESULT_LIST = RESULT_LIST[:int(conf["REPORT_SIZE"]):]
with open('report.html', 'r') as f:
s = string.Template(f.read())
result_string = s.safe_substitute(
table_json=json.dumps(RESULT_LIST))
with open(get_report_name(i[1],conf["REPORT_DIR"]),
'w') as save_f:
save_f.write(result_string)
except Exception as e:
logging.exception(f"Exception occurred {e}")
if __name__ == "__main__":
main(sys.argv[1:]) |
#/usr/bin/ipython
from mpi4py import MPI
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy.linalg as la
from mpl_toolkits.mplot3d import Axes3D
import GreenF
import Impurity
def line(g,x,E, theta):
M = np.array([[np.cos(theta), - np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
roh = np.zeros(x.shape)
for i in range(x.shape[0]):
r = np.array([0.0, 0.0])
r[0] = x[i]
r = np.dot(M,r)
roh[i] = g.Roh(r)
return roh
def plot_k(g,ax, E0):
E = np.linspace(-10, 10, 100)
k1 = np.zeros(E.shape, dtype=np.complex_)
k2 = np.zeros(E.shape, dtype=np.complex_)
for i in range(E.shape[0]):
k1[i], k2[i] = g.find_ks(E[i] + g.eta)
ax.plot(E, np.real(k1))
ax.plot(E, np.real(k2))
ax.plot([E0,E0],ax.get_ylim(), 'k:')
ax.set_xlabel("E")
ax.set_ylabel("k")
def plot_line(ax, g,E, theta):
x = np.linspace(0.2, 3.0, 300)
ax.plot(x, line(g,x,E,theta), label=r"$\theta$ = %0.1f"%(theta / np.pi * 180))
ax.set_xlabel("r")
ax.set_ylabel(r"$\Delta \rho$")
def latex(i, mag):
print(r"\begin{figure}")
print(r"\centering")
print(r"\includegraphics[width=0.7\textwidth]{img/Set_" \
+ str(i+1) \
+ r"_Bx_mag_" + str(mag) + ".pdf}")
print(r"\caption{Set %d , magnetic "%(i+1) + str(mag) + " Bx}")
print(r"\end{figure}")
print(r"\begin{figure}")
print(r"\centering")
print(r"\includegraphics[width=0.7\textwidth]{img/Set_" \
+ str(i+1) \
+ r"_Bz_mag_" + str(mag) + ".pdf}")
print(r"\caption{Set %d , magnetic "%(i+1) + str(mag) + " Bz}")
print(r"\end{figure}")
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nprocs = comm.Get_size()
N = 1
V = 0.23 * np.ones(N)
R = np.array([[0.0, 0.0]])
B = np.zeros((N,3))
B[:,2] = V[0]
I = Impurity.Imp(R,V,B)
m = 10.0 * np.ones(5)
alpha = np.array([1E-3, 1.0, 1E-3, 2.0, 1E-3])
beta = np.array([1E-3, 1E-3, 1.0, 1E-3, 1.0])
B0 = np.array([1.0, 0.0, 0.0, 1.0, 2.0])
mag = False
t = np.linspace(0, 2 * np.pi, 4)[:-1]
E = 2.5
for i in range(5):
f, ax1 = plt.subplots(1, 1)
g = GreenF.GF(m[i], alpha[i], beta[i], B0[i], I,E, mag, nprocs, rank, comm)
#plot_k(g, ax1, E)
for theta in t:
plot_line(ax1,g, E, theta)
#f.title("Set = %d, E = %f"%(i+1, E))
plt.legend()
plt.tight_layout()
plt.savefig("plots/Set_%d_Bz_mag_"%(i+1) + str(mag) +".pdf")
plt.clf()
#plt.show()
latex(i, mag)
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import logging
import os
from dataclasses import dataclass
from typing import Any
from pants.backend.helm.dependency_inference.unittest import rules as dependency_rules
from pants.backend.helm.subsystems.unittest import HelmUnitTestSubsystem
from pants.backend.helm.subsystems.unittest import rules as subsystem_rules
from pants.backend.helm.target_types import (
HelmChartFieldSet,
HelmChartMetaSourceField,
HelmChartTarget,
HelmUnitTestDependenciesField,
HelmUnitTestSourceField,
HelmUnitTestStrictField,
HelmUnitTestTestsGeneratorTarget,
HelmUnitTestTestTarget,
HelmUnitTestTimeoutField,
)
from pants.backend.helm.util_rules import tool
from pants.backend.helm.util_rules.chart import HelmChart, HelmChartRequest
from pants.backend.helm.util_rules.sources import HelmChartRoot, HelmChartRootRequest
from pants.backend.helm.util_rules.tool import HelmProcess
from pants.base.deprecated import warn_or_error
from pants.core.goals.generate_snapshots import GenerateSnapshotsFieldSet, GenerateSnapshotsResult
from pants.core.goals.test import TestFieldSet, TestRequest, TestResult, TestSubsystem
from pants.core.target_types import FileSourceField, ResourceSourceField
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.core.util_rules.stripped_source_files import StrippedSourceFiles
from pants.engine.addresses import Address
from pants.engine.fs import (
AddPrefix,
Digest,
DigestSubset,
MergeDigests,
PathGlobs,
RemovePrefix,
Snapshot,
)
from pants.engine.process import FallibleProcessResult, ProcessCacheScope, ProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import (
DependenciesRequest,
SourcesField,
Targets,
TransitiveTargets,
TransitiveTargetsRequest,
)
from pants.engine.unions import UnionRule
from pants.util.logging import LogLevel
logger = logging.getLogger(__name__)
class MissingUnitTestChartDependency(Exception):
f"""Indicates that no chart has been found as dependency of the `{HelmUnitTestTestTarget.alias}` or
`{HelmUnitTestTestsGeneratorTarget.alias}` targets."""
def __init__(self, address: Address) -> None:
super().__init__(
f"No valid `{HelmChartTarget.alias}` target has been found as a dependency for target at '{address.spec}'."
)
@dataclass(frozen=True)
class HelmUnitTestFieldSet(TestFieldSet, GenerateSnapshotsFieldSet):
required_fields = (HelmUnitTestSourceField,)
source: HelmUnitTestSourceField
dependencies: HelmUnitTestDependenciesField
strict: HelmUnitTestStrictField
timeout: HelmUnitTestTimeoutField
class HelmUnitTestRequest(TestRequest):
tool_subsystem = HelmUnitTestSubsystem
field_set_type = HelmUnitTestFieldSet
@dataclass(frozen=True)
class HelmUnitTestSetup:
chart: HelmChart
chart_root: HelmChartRoot
process: HelmProcess
reports_output_directory: str
snapshot_output_directories: tuple[str, ...]
@dataclass(frozen=True)
class HelmUnitTestSetupRequest:
field_set: HelmUnitTestFieldSet
description: str = dataclasses.field(compare=False)
force: bool
update_snapshots: bool
timeout_seconds: int | None
@rule
async def setup_helm_unittest(
request: HelmUnitTestSetupRequest, unittest_subsystem: HelmUnitTestSubsystem
) -> HelmUnitTestSetup:
field_set = request.field_set
direct_dep_targets, transitive_targets = await MultiGet(
Get(Targets, DependenciesRequest(field_set.dependencies)),
Get(
TransitiveTargets,
TransitiveTargetsRequest([field_set.address]),
),
)
chart_targets = [tgt for tgt in direct_dep_targets if HelmChartFieldSet.is_applicable(tgt)]
if len(chart_targets) == 0:
raise MissingUnitTestChartDependency(field_set.address)
chart_target = chart_targets[0]
chart, chart_root, test_files, extra_files = await MultiGet(
Get(HelmChart, HelmChartRequest, HelmChartRequest.from_target(chart_target)),
Get(HelmChartRoot, HelmChartRootRequest(chart_target[HelmChartMetaSourceField])),
Get(
SourceFiles,
SourceFilesRequest(sources_fields=[field_set.source]),
),
Get(
StrippedSourceFiles,
SourceFilesRequest(
sources_fields=[tgt.get(SourcesField) for tgt in transitive_targets.dependencies],
for_sources_types=(ResourceSourceField, FileSourceField),
enable_codegen=True,
),
),
)
stripped_test_files = await Get(
Digest, RemovePrefix(test_files.snapshot.digest, chart_root.path)
)
merged_digests = await Get(
Digest,
MergeDigests(
[
chart.snapshot.digest,
stripped_test_files,
extra_files.snapshot.digest,
]
),
)
input_digest = await Get(Digest, AddPrefix(merged_digests, chart.name))
reports_dir = "__reports_dir"
reports_file = os.path.join(reports_dir, f"{field_set.address.path_safe_spec}.xml")
snapshot_dirs = {
os.path.join(
chart.name, os.path.relpath(os.path.dirname(file), chart_root.path), "__snapshot__"
)
for file in test_files.snapshot.files
}
# Cache test runs only if they are successful, or not at all if `--test-force`.
cache_scope = ProcessCacheScope.PER_SESSION if request.force else ProcessCacheScope.SUCCESSFUL
uses_legacy = unittest_subsystem._is_legacy
if uses_legacy:
warn_or_error(
"2.19.0.dev0",
f"[{unittest_subsystem.options_scope}].version < {unittest_subsystem.default_version}",
"You should upgrade your test suites to work with latest version.",
start_version="2.18.0.dev1",
)
process = HelmProcess(
argv=[
unittest_subsystem.plugin_name,
# TODO remove this flag once support for legacy unittest tool is dropped.
*(("--helm3",) if uses_legacy else ()),
*(("--color",) if unittest_subsystem.color else ()),
*(("--strict",) if field_set.strict.value else ()),
*(("--update-snapshot",) if request.update_snapshots else ()),
"--output-type",
unittest_subsystem.output_type.value,
"--output-file",
reports_file,
chart.name,
],
description=request.description,
input_digest=input_digest,
cache_scope=cache_scope,
timeout_seconds=request.timeout_seconds if request.timeout_seconds else None,
output_directories=(reports_dir, *((snapshot_dirs) if request.update_snapshots else ())),
)
return HelmUnitTestSetup(
chart,
chart_root,
process,
reports_output_directory=reports_dir,
snapshot_output_directories=tuple(snapshot_dirs),
)
@rule(desc="Run Helm Unittest", level=LogLevel.DEBUG)
async def run_helm_unittest(
batch: HelmUnitTestRequest.Batch[HelmUnitTestFieldSet, Any],
test_subsystem: TestSubsystem,
) -> TestResult:
field_set = batch.single_element
setup = await Get(
HelmUnitTestSetup,
HelmUnitTestSetupRequest(
field_set,
description=f"Running Helm unittest suite {field_set.address}",
force=test_subsystem.force,
update_snapshots=False,
timeout_seconds=field_set.timeout.calculate_from_global_options(test_subsystem),
),
)
process_result = await Get(FallibleProcessResult, HelmProcess, setup.process)
reports_digest = await Get(
Digest,
DigestSubset(
process_result.output_digest,
PathGlobs([os.path.join(setup.reports_output_directory, "**")]),
),
)
reports = await Get(Snapshot, RemovePrefix(reports_digest, setup.reports_output_directory))
return TestResult.from_fallible_process_result(
process_result,
address=field_set.address,
output_setting=test_subsystem.output,
xml_results=reports,
)
@rule
async def generate_helm_unittest_snapshots(
field_set: HelmUnitTestFieldSet,
) -> GenerateSnapshotsResult:
setup = await Get(
HelmUnitTestSetup,
HelmUnitTestSetupRequest(
field_set,
description=f"Generating Helm unittest snapshots for suite {field_set.address}",
force=False,
update_snapshots=True,
timeout_seconds=None,
),
)
process_result = await Get(ProcessResult, HelmProcess, setup.process)
snapshot_output_digest = await Get(
Digest,
DigestSubset(
process_result.output_digest,
PathGlobs(
[
os.path.join(snapshot_path, "*.snap")
for snapshot_path in setup.snapshot_output_directories
]
),
),
)
stripped_test_snapshot_output = await Get(
Digest, RemovePrefix(snapshot_output_digest, setup.chart.name)
)
normalised_test_snapshots = await Get(
Snapshot, AddPrefix(stripped_test_snapshot_output, setup.chart_root.path)
)
return GenerateSnapshotsResult(normalised_test_snapshots)
def rules():
return [
*collect_rules(),
*subsystem_rules(),
*dependency_rules(),
*tool.rules(),
*HelmUnitTestRequest.rules(),
UnionRule(GenerateSnapshotsFieldSet, HelmUnitTestFieldSet),
]
|
# -*- coding: utf-8 -*-
"""`anarchytools` lives on `Github`_.
.. _github: https://github.com/AnarchyTools/anarchy_sphinx
"""
from setuptools import setup
from anarchy_theme import __version__
setup(
name='anarchy_sphinx',
version=__version__,
url='https://github.com/AnarchyTools/anarchy_sphinx',
license='BSD',
author='Johannes Schriewer',
author_email='hallo@dunkelstern.de',
description='AnarchyTools Theme and Swift support for Sphinx.',
long_description=open('README.rst').read(),
zip_safe=False,
packages=['anarchy_theme', 'swift_domain'],
package_data={
'anarchy_theme': [
'theme.conf',
'*.html',
'static/css/*.css'
]
},
entry_points={
'console_scripts': [
'anarchysphinx=swift_domain.bootstrap:main',
],
},
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'Topic :: Documentation',
'Topic :: Software Development :: Documentation',
],
install_requires=[
'fuzzywuzzy',
'sphinx'
]
)
|
import bpy
import math
# mesh arrays
verts = []
faces = []
edges = []
#3D supershape parameters
m = 14.23
a = -0.06
b = 2.78
n1 = 0.5
n2 = -.48
n3 = 1.5
scale = 3
Unum = 50
Vnum = 50
Uinc = math.pi / (Unum/2)
Vinc = (math.pi/2)/(Vnum/2)
#fill verts array
theta = -math.pi
for i in range (0, Unum + 1):
phi = -math.pi/2
r1 = 1/(((abs(math.cos(m*theta/4)/a))**n2+(abs(math.sin(m*theta/4)/b))**n3)**n1)
for j in range(0,Vnum + 1):
r2 = 1/(((abs(math.cos(m*phi/4)/a))**n2+(abs(math.sin(m*phi/4)/b))**n3)**n1)
x = scale * (r1 * math.cos(theta) * r2 * math.cos(phi))
y = scale * (r1 * math.sin(theta) * r2 * math.cos(phi))
z = scale * (r2 * math.sin(phi))
vert = (x,y,z)
verts.append(vert)
#increment phi
phi = phi + Vinc
#increment theta
theta = theta + Uinc
#fill faces array
count = 0
for i in range (0, (Vnum + 1) *(Unum)):
if count < Vnum:
A = i
B = i+1
C = (i+(Vnum+1))+1
D = (i+(Vnum+1))
face = (A,B,C,D)
faces.append(face)
count = count + 1
else:
count = 0
#create mesh and object
mymesh = bpy.data.meshes.new("supershape")
myobject = bpy.data.objects.new("supershape",mymesh)
#set mesh location
myobject.location = bpy.context.scene.cursor_location
bpy.context.scene.objects.link(myobject)
#create mesh from python data
mymesh.from_pydata(verts,edges,faces)
mymesh.update(calc_edges=True)
#set the object to edit mode
bpy.context.scene.objects.active = myobject
bpy.ops.object.mode_set(mode='EDIT')
# remove duplicate vertices
bpy.ops.mesh.remove_doubles()
# recalculate normals
bpy.ops.mesh.normals_make_consistent(inside=False)
bpy.ops.object.mode_set(mode='OBJECT')
# subdivide modifier
myobject.modifiers.new("subd", type='SUBSURF')
myobject.modifiers['subd'].levels = 3
# show mesh as smooth
mypolys = mymesh.polygons
for p in mypolys:
p.use_smooth = True
|
import json
from dotmap import DotMap
from util import fake_value, load_catalogs
from pymongo import MongoClient
class DataFaker:
STRING = 'string'
OBJECT = 'object'
ARRAY = 'array'
def __init__(self, path_to_file, path_to_schema, path_to_catalogs):
self.path_to_file = path_to_file
self.path_to_schema = path_to_schema
self.path_to_catalogs = path_to_catalogs
self.root = None
self.schema = None
self.read()
self.seeds = {}
self.client = MongoClient(host='localhost', port=27017)
load_catalogs(self.path_to_catalogs)
@staticmethod
def __get_paths(path_str):
return path_str.replace('#/', '').split('/')
def read(self):
with open(self.path_to_file) as json_file:
self.root = json.load(json_file)
if self.root is not None:
self.schema = self.root
paths = self.__get_paths(self.path_to_schema)
for path in paths:
self.schema = DotMap(self.schema[path])
def __get_node(self, path_to_node):
node = self.root
paths = self.__get_paths(path_to_node)
for path in paths:
node = DotMap(node[path])
return DotMap(node.copy())
def __init_property(self, p):
if '$ref' in p:
faker = p.faker if 'faker' in p else None
p = self.__get_node(p['$ref'])
if faker is not None:
p['faker'] = faker
if p.type == self.OBJECT:
return self.__init_properties(p)
elif p.type == self.ARRAY and 'items' in p:
r = DotMap({'type': self.ARRAY, 'items': self.__init_property(p['items'])})
if 'faker' in p:
r.faker = p.faker
return r
else:
return p
def __init_properties(self, node):
result = DotMap()
for k, v in node.properties.items():
result[k] = self.__init_property(v)
node.properties = result
return node
def init(self):
return self.__init_property(self.schema)
def __fake_property(self, p):
"""
if '$ref' in p:
if 'faker' in p:
return fake_value(p.faker, self)
else:
p = self.__get_node(p['$ref'])
"""
if p.type != self.OBJECT and p.type != self.ARRAY:
return fake_value(p, self) if 'faker' in p else None
elif p.type == self.OBJECT:
return fake_value(p, self) if 'faker' in p else self.__fake_properties(p.properties)
elif p.type == self.ARRAY and 'items' in p:
n = fake_value(p, self) if 'faker' in p else 1
return [self.__fake_property(p['items']) for _ in range(n)]
def __fake_properties(self, properties):
result = {}
for k, v in properties.items():
result[k] = self.__fake_property(v)
return result
def fake(self):
self.schema = self.init()
return self.__fake_property(self.schema)
def fake_node(self, node):
return self.__fake_properties(node.properties) if 'properties' in node else self.__fake_property(node)
def add_seed(self, seed):
self.seeds[seed.name] = seed
seed.set_df(self)
def get_seed(self, name):
return self.seeds[name]
@staticmethod
def save(data, name, dst='db', options=None):
if dst == 'db':
options = DotMap(options)
if options.host is not 'localhost':
client = MongoClient(host=options.host, port=options.port, username=options.user, password=options.password)
else:
client = MongoClient(host=options.host, port=options.port)
db = client[options.db]
collection = db[name]
#collection.remove()
res = collection.insert_many(data)
return res
elif dst == 'file':
with open(name, 'w') as f:
f.write(json.dumps(data, indent=2))
return True
else:
return None
|
x = 25
epsilon = 0.01
step = epsilon ** 2
numGuess = 0
ans = 0.0
while (abs(ans ** 2 - x)) >= epsilon and ans < x:
ans += step
numGuess += 1
print('numGuess= ' + str(numGuess))
if abs(ans ** 2 - x) >= epsilon:
print('Failed on square root of ' + str(x))
else:
print(str(ans) + ' is close to the square root of ' + str(x))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-01-30 13:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('progress_analyzer', '0003_exercisestatscumulative_cum_overall_workout_gpa'),
]
operations = [
migrations.AlterField(
model_name='cumulativesum',
name='created_at',
field=models.DateField(),
),
]
|
import datetime
datatime.datetime.now()
datetime.datetime.utcnov()
from datetime import date
now = date.today()
now.strftime("%m-%d-%y, %d %b %Y is a %A on the %d day of %B.")
birthday = date(1964, 7, 31)
age = now - birthday
age.days
|
from evaluate import pivotal, pivotality, criticality, prob_pivotal, unpacked_pivotality
from itertools import product
from simulate import *
from draw import draw, highlight_cause_effect, draw_outcomes, show_predictions
from names import *
import numpy as np
import networkx as nx
import json
'''
open input file and save to a dict
'''
with open('../json/experiment1.json', 'r') as data:
file = json.load(data)
data.close()
# list of cases to run
cases = len(file['experiments'])
names = []
index = [0]
for i in xrange(cases):
nodes = [u for u,v in file['experiments'][i]['situation']['values']]
index.append(index[i]+len(nodes))
count = 0
while count < len(nodes):
name = get_first_name()
if len(name) < 7 and name not in names:
names.append(name)
count += 1
for case in xrange(cases):
cause = file['experiments'][case]['situation']['cause']
if 'effect' in file['experiments'][case]['situation']:
effect = file['experiments'][case]['situation']['effect']
else:
effect = 'o'
hierarchy = simulate(file['experiments'][case], cause=cause, effect=effect)
# fig = draw(hierarchy, ID=case)
# fig = highlight_cause_effect(hierarchy, fig, cause, effect, ID=case)
# situation = draw_outcomes(hierarchy, fig, ID=case)
pivr = pivotality(hierarchy, cause, effect, root=True)
piv = pivotality(hierarchy, cause, effect)
print case
print
crit = criticality(hierarchy, cause, effect, e_value=True)
print 'prob', crit
crit = (1 - (criticality(hierarchy, cause, effect, e_value=False)/criticality(hierarchy, cause, effect, e_value=True))) + 100
print 'crit', crit
# print 'piv', pivr
# print 'piv*', piv
print
print
# predictions = show_predictions(hierarchy, fig, cause, effect, ID=case, pivotalityr=pivr, pivotality=piv, criticality=crit)
plt.close() |
#!/usr/local/bin/python3
import os
def main():
from ctc_server import db
db.create_all()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
""" A python version of Vectorize2.R, uses different ways
to run the stochastic Ricker model"""
__appname__ = 'Vectorize2.py'
__author__ = 'Hanyun Zhang (hanyun.zhang18@imperial.ac.uk)'
__version__ = '0.0.1'
# Imports
import numpy as np
# Function
def matrix(numyears, p0):
""" Create an empty matrix with random values assigned to the first row """
N = np.zeros(shape = (numyears,len(p0[0])))
N[0] = p0[0]
return(N)
def stochrick(numyears = 100, p0 = np.random.random_sample((1,1000)), r=1.2, K=1, sigma=0.2):
""" Calculate the Ricker model with looping """
N = matrix(numyears, p0)
for pop in range(len(p0[0])):
for yr in range(1, numyears):
N[yr][pop] = N[yr-1][pop] * np.exp(r * (1-N[yr-1][pop]/K)) + np.random.normal(0, sigma, 1)
return(N)
def stochrickvect(numyears = 100, p0 = np.random.random_sample((1,1000)), r=1.2, K=1, sigma=0.2):
""" Calculate the Ricker model with vectorization """
N = matrix(numyears, p0)
for yr in range(1, numyears):
N[yr] = N[yr-1] * np.exp(r * (1-N[yr-1]/K)) + np.random.normal(0, sigma, 1)
return(N)
np.seterr(all='ignore') # The np.exp function rises a runtime warning because some outputs are
# too large for the dtype of the numpy array to store. The warning is ignored by numpy setting.
## Calculate the Ricker model with loop
# Record the time taken
from timeit import default_timer as timer
start = timer()
data = stochrick()
end = timer()
timetaken = end - start
print ("Looping Stochastic Ricker takes:", timetaken)
# Calculate the vectorized Ricker iteration
# Record the time taken
start = timer()
data = stochrickvect()
end = timer()
timetaken = end - start
print ("Vectorized Stochastic Ricker takes:", timetaken)
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# File: osc4py3/tests/udpbc.py
# <pep8 compliant>
"""This file can be used as a start point for broadcast usage.
For quick test, it doesn't use osc4py3 monitors (nonblocking options set to false),
and directly target low level communication functions.
This should be modified for a realworld use case.
"""
# https://pymotw.com/3/socket/multicast.html
import sys
from os.path import abspath, dirname
# Make osc4py3 available.
PACKAGE_PATH = dirname(dirname(dirname(abspath(__file__))))
if PACKAGE_PATH not in sys.path:
sys.path.insert(0, PACKAGE_PATH)
import socket
import time
import pprint
import logging
from osc4py3 import oscchannel
from osc4py3.oscudpmc import UdpMcChannel
from osc4py3.oscpacketoptions import PacketOptions
# A logger to monitor activity... and debug.
logging.basicConfig(format='%(asctime)s - %(threadName)s ø %(name)s - '
'%(levelname)s - %(message)s')
logger = logging.getLogger("osc")
logger.setLevel(logging.DEBUG)
print("=" * 80)
print("\nTRANSMITING LOCAL UDP MULTICAST PACKETS\n")
LENGTH = 18
DURATION = 20
data = bytes(range(ord('A'), ord('A') + LENGTH))
IP = "127.0.0.1"
PORT = 50000 # Hope it is not used.
# Note: we rely on network buffers (send small amount of data). Else we may need
# nonblocking write with parallel read.
writer = UdpMcChannel("testwriter", "w",
{
'udpwrite_host': IP,
'udpwrite_port': PORT,
'auto_start': True,
'bcast_enabled': True,
'udpwrite_nonblocking': False, # Else we need a Monitor
'udpwrite_forceipv4': True, # localhost may resolve in ::1
'logger': logger,
})
writer.activate()
reader = UdpMcChannel("testreader", "r",
{
'udpread_host': "0.0.0.0",
'udpread_port': PORT,
'auto_start': True,
'bcast_enabled': True,
'udpread_nonblocking': False,
'udpread_identusedns': False, # Dont care about reader hostname
'logger': logger,
})
reader.activate()
print("Sending in the received_rawpackets queue:")
for i in range(10):
writer.transmit_data(data, PacketOptions())
writer.process_monevents(0, 'w')
time.sleep(0.1)
reader.process_monevents(0, 'r')
print("Received in the received_rawpackets queue:")
received = []
while oscchannel.received_rawpackets.qsize():
received.append(oscchannel.received_rawpackets.get_nowait())
pprint.pprint(received)
reader.terminate()
writer.terminate()
#TODO: Write high level test code with as_xxx functions for broadcast client/server.
|
t=int(input())
for i in range(t):
num1,num2=map(int,input().split())
num1=num1%10
lastdig=num1
if num2==0:
print("1")
continue
ld=[]
ld.append(lastdig)
lookup=[]
for i in range(10):
lookup.append(0)
lookup[lastdig]=1
while True:
lastdig=lastdig*num1
lastdig=lastdig%10
if lookup[lastdig]==1:
break
ld.append(lastdig)
lookup[lastdig]=1
id_len=int(len(ld))
index=int(num2%id_len)
index=index-1
if index<0:
index=id_len-1
print(ld[index])
|
from __future__ import division
from collections import defaultdict
from math import *
from random import sample
import csv
from operator import itemgetter
import matplotlib as mpl
import matplotlib.pyplot as plt
import random
class BaseAlgorithm():
#def __init__(self):
# self.update_data()
def update_data(self,a,b,c):
filename = "data3.csv"
"""
self.cities = []
#self.size = len(self.cities)
self.coords = []
with open(filename, 'r') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
self.coords.append([float(row[0]),float(row[1])])
self.cities = range(0,len(self.coords))
"""
self.cities=[]
self.coords=[[-a/2,-a/2],[-a/2,a/2],[a/2,a/2],[a/2,-a/2]]
for i in range(1,int(ceil(a/c))):
self.coords.append([0-a/2,c*i-a/2])
self.coords.append([c*i-a/2,0-a/2])
self.coords.append([c*i-a/2,a/2])
self.coords.append([a/2,c*i-a/2])
self.denum = len(self.coords)
print self.coords,self.denum
#random.shuffle(self.coords)
for i in range(0,int((a/b))):
for j in range(0,int((a/b))):
self.coords.append([-a/2+b/2+(b*j),a/2-b/2-(b*i)])
#print self.coords,len(self.coords)
self.cities=range(0,len(self.coords))
#random.shuffle(self.coords[7:])
print self.coords,len(self.coords)
self.size = len(self.cities)
self.distances = self.compute_distances()
def haversine_distance(self, cityA, cityB):
coord1 = self.coords[cityA]
coord2= self.coords[cityB]
a = (coord1[0]-coord2[0])**2+(coord1[1]-coord2[1])**2
c = sqrt(a)
return c
def compute_distances(self):
self.distances = defaultdict(dict)
for cityA in self.cities:
for cityB in self.cities:
if cityB not in self.distances[cityA]:
distance = self.haversine_distance(cityA, cityB)
self.distances[cityA][cityB] = distance
self.distances[cityB][cityA] = distance
return self.distances
# add node k between node i and node j
def add(self, i, j, k):
return self.distances[i][k] + self.distances[k][j] - self.distances[i][j]
class TourConstructionHeuristics(BaseAlgorithm):
# find the neighbor k closest to the tour, i.e such that
# cik + ckj - cij is minimized with (i, j) an edge of the tour
# add k between the edge (i, j), resulting in a tour with subtour (i, k, j)
# used for the cheapest insertion algorithm
def __init__(self,dist,grid,comm,Q):
self.update_data(dist,grid,comm)
self.Q=Q
self.comm=comm
self.sqr=dist
self.grid=grid
##print self.cities
def closest_neighbor(self, tour, node, in_tour=False, farthest=False):
neighbors = self.distances[node]
##print node
##print neighbors.items()
##print tour
current_dist = [(c, d) for c, d in neighbors.items()
if (c in tour)]
return sorted(current_dist, key=itemgetter(1))[-farthest]
def add_closest_to_tour(self, tours,tourslength,unass,veh,vehlengths):
best_ratio,best_dist, new_tour = float('inf'),float('inf'), None
##print vehlengths
##print veh
##print tourslength
##print tours
t=1
tour_index = 0
city1 = 0
c=0.3
d=0.7
vehi=vehindex=None
for city in unass:
##print city
for tour in tours:
##print tour
for index in range(len(tour) - 1):
dist = self.add(tour[index], tour[index + 1], city)
#print unass
##print dist
##print vehlengths[tours.index(tour)]
a=abs(self.coords[tour[0]][0]-self.coords[city][0])
b=abs(self.coords[tour[0]][1]-self.coords[city][1])
if len(tour)!=2:
ratio = c*dist+d*(vehlengths[tours.index(tour)]+dist)
else:
ratio = c*dist+d*(vehlengths[tours.index(tour)]+dist+tourslength[tours.index(tour)])
if ratio < best_ratio and (tourslength[tours.index(tour)]+dist)<self.Q:
best_dist = dist
best_ratio = ratio
new_tour = tour[:index + 1] + [city] + tour[index + 1:]
tour_index = tours.index(tour)
city1 = city
for p in range(0,t):
if tours[tour_index] in veh[p]:
vehi=p
vehindex = veh[p].index(tours[tour_index])
##print best_dist
##print city1
return best_dist, new_tour, tour_index,city1,vehi,vehindex
def nearest_insertion(self,farthest=False):
denum=8
t=1
v=0
a= len(self.coords)
#tour = [0,a]
tours = []
tourslength = []
vehlengths=[]
cantass=[]
for i in range(0,denum):
for j in range(0,denum):
if ([i,j]in tours or [j,i]in tours):
continue
else:
tours.append([i,j])
cantass.append([])
tourslength.append(self.distances[i][j])
vehlengths.append(0)
# we find the closest node R to the first node
unass = self.cities[denum:a]
veh = [[],[],[],[],[],[]]
##print unass
while len(unass) != 0:
best, best_len,best_ratio = None, 0 if farthest else float('inf'),float('inf')
t=1
tour_index = 0
city1 = 0
c=0.8
d=0.2
#print len(unass)
vehi=vehindex=None
# (selection step) given a sub-tour,we find node r not in the
# sub-tour closest to any node j in the sub-tour,
# i.e. with minimal c_rj
for tour in tours:
for city in unass:
#print cantass
#print tour
if city in cantass[tours.index(tour)]:
continue
# we consider only the distances to nodes already in the tour
#print tour,city
_, length = self.closest_neighbor(tour, city, True,farthest)
if len(tour)!=2:
ratio = c*length+d*(vehlengths[tours.index(tour)])
else:
ratio = c*length+d*(vehlengths[tours.index(tour)]+tourslength[tours.index(tour)]+self.add(tour[0],tour[1],city))
if (length > best_len if farthest else ratio < best_ratio):
city1, best_len,tour1,best_ratio = city, length,tour,ratio
#if len(unass) is 25 or len(unass)is 24:
#print best_ratio, tour1,city1
##print city1
# (insertion step) we find the arc (i, j) in the sub-tour which
# minimizes cir + crj - cij, and we insert r between i and j
best_dist, new_tour = float('inf'), None
for index in range(len(tour1) - 1):
dist = self.add(tour1[index], tour1[index + 1], city1)
##print dist
if dist < best_dist and (tourslength[tours.index(tour1)]+dist)<self.Q:
best_dist = dist
new_tour = tour1[:index + 1] + [city1] + tour1[index + 1:]
tour_index = tours.index(tour1)
if best_dist == float('inf'):
cantass[tours.index(tour1)].append(city1)
continue
for p in range(0,t):
if tours[tour_index] in veh[p]:
vehi=p
vehindex = veh[p].index(tours[tour_index])
length, tour, index, city1,vehi,vehindex=best_dist, new_tour, tour_index,city1,vehi,vehindex
#print tour,index
if city1 != 0 and tour != 0:
unass.remove(city1)
tourslength[index] += length
tours[index]=tour
if vehi!=None:
veh[vehi][vehindex]=tour
x=vehlengths[tours.index(tour)]
for j in range(0,len(tours)):
if vehlengths[j]==x:
#print"s"
vehlengths[j]+=length
if len(tours[index]) is 3:
v+=1
if v<t:
tour1=[tour[0],tour[2]]
tours.append(tour1)
cantass.append([])
tourslength.append(self.distances[tour[0]][tour[2]])
veh[v-1].append(tour)
vehlengths.append(0)
vehlengths[index]=tourslength[index]
#print veh
#print veh
if v==t:
veh[v-1].append(tour)
vehlengths[index]=tourslength[index]
#print veh
#print veh[0]
#print veh[0][0]
a=tourslength
d=vehlengths
#print tours
i=0
while i<len(tours):
tour2=tours[i]
#print tour2
i+=1
if (len(tour2)==2):
#print "S"
tourslength.remove(a[tours.index(tour2)])
vehlengths.remove(d[tours.index(tour2)])
tours.remove(tour2)
i-=1
#print "Y"
e=tours
#print e
for l in range(0,t):
tour3=tours[l]
b=tour3[-1]
#print "w"
for i in range(0,denum):
tours.append([b,i])
cantass.append([])
tourslength.append(self.distances[b][i])
vehlengths.append(vehlengths[l])
#print"x"
if v>t:
c=tour[0]
d=tour[-1]
for k in range(0,denum):
tours.append([d,k])
cantass.append([])
tourslength.append(self.distances[d][k])
#print tour
vehlengths.append(vehlengths[tours.index(tour)]+tourslength[tours.index(tour)])
#print "d",vehlengths[tours.index(tour)],tourslength[tours.index(tour)]
y=vehlengths[tours.index(tour)]
for j in range(0,len(tours)):
if vehlengths[j]==y:
vehlengths[j]+=tourslength[tours.index(tour)]
#print "dd"
for i in range(0,denum):
#print "d"
if [c,i] in tours:
#print "S"
del tourslength[tours.index([c,i])]
del vehlengths[tours.index([c,i])]
#print vehlengths
tours.remove([c,i])
#print tours
#print tourslength
for i in range(0,t):
#print veh
#print veh[i][-1][-1]
if veh[i][-1][-1]==tour[0]:
veh[i].append(tour)
break
#print vehlengths
print tours
vehlength=[0,0,0,0,0,0]
for i in range(0,t):
for p in range(0,len(veh[i])):
#print veh[i],veh[i][p],tours.index(veh[i][p])
#print tourslength[tours.index(veh[i][p])]
#print veh
vehlength[i]+=tourslength[tours.index(veh[i][p])]
j=0
while(j<len(tours)):
if(len(tours[j]))==2:
tours.remove(tours[j])
tourslength.remove(tourslength[j])
else:
j+=1
for l in range(0,t):
d= veh[l][-1][-1]
for k in range(0,denum):
tours.append([d,k])
veh[l].append([d,k])
tourslength.append(self.distances[d][k])
return tours, tourslength,veh,vehlength
##print unass
##print self.cities
#return tours,tourslength
def cheapest_insertion(self):
denum=8
t=1
v=0
a= len(self.coords)
##print a
#tour = [0,a]
tours = []
tourslength = []
vehlengths=[]
for i in range(0,denum):
for j in range(0,denum):
tours.append([i,j])
tourslength.append(self.distances[i][j])
vehlengths.append(0)
# we find the closest node R to the first node
unass = self.cities[denum:a]
veh = [[],[],[],[],[],[]]
while len(unass) != 0:
length, tour, index, city1,vehi,vehindex = self.add_closest_to_tour(tours,tourslength,unass,veh,vehlengths)
##print unass
print tours
#print tourslength
##print tour
##print veh
##print vehi
##print vehindex
if city1 != 0 and tour != 0:
unass.remove(city1)
tourslength[index] += length
tours[index]=tour
if vehi!=None:
veh[vehi][vehindex]=tour
x=vehlengths[tours.index(tour)]
for j in range(0,len(tours)):
if vehlengths[j]==x:
##print"s"
vehlengths[j]+=length
if len(tours[index]) is 3:
v+=1
if v<t:
tour1=[tour[0],tour[2]]
tours.append(tour1)
tourslength.append(self.distances[tour[0]][tour[2]])
veh[v-1].append(tour)
vehlengths.append(0)
vehlengths[index]=tourslength[index]
##print veh
##print veh
if v==t:
veh[v-1].append(tour)
vehlengths[index]=tourslength[index]
##print veh
##print veh[0]
##print veh[0][0]
a=tourslength
d=vehlengths
##print tours
i=0
while i<len(tours):
tour2=tours[i]
##print tour2
i+=1
if (len(tour2)==2):
##print "S"
tourslength.remove(a[tours.index(tour2)])
vehlengths.remove(d[tours.index(tour2)])
tours.remove(tour2)
i-=1
##print "Y"
e=tours
##print e
for l in range(0,t):
tour3=tours[l]
b=tour3[-1]
##print "w"
for i in range(0,denum):
tours.append([b,i])
tourslength.append(self.distances[b][i])
vehlengths.append(vehlengths[l])
##print"x"
if v>t:
c=tour[0]
d=tour[-1]
for k in range(0,denum):
tours.append([d,k])
tourslength.append(self.distances[d][k])
##print tour
vehlengths.append(vehlengths[tours.index(tour)]+tourslength[tours.index(tour)])
##print "d",vehlengths[tours.index(tour)],tourslength[tours.index(tour)]
y=vehlengths[tours.index(tour)]
for j in range(0,len(tours)):
if vehlengths[j]==y:
vehlengths[j]+=tourslength[tours.index(tour)]
##print "dd"
for i in range(0,denum):
#print "d"
if [c,i] in tours:
#print "S"
del tourslength[tours.index([c,i])]
del vehlengths[tours.index([c,i])]
##print vehlengths
tours.remove([c,i])
#print tours
#print tourslength
for i in range(0,t):
##print veh
##print veh[i][-1][-1]
if veh[i][-1][-1]==tour[0]:
veh[i].append(tour)
break
##print tours
##print tours
vehlength=[0,0,0,0,0,0]
for i in range(0,t):
for p in range(0,len(veh[i])):
##print veh[i],veh[i][p],tours.index(veh[i][p])
##print tourslength[tours.index(veh[i][p])]
#print veh
vehlength[i]+=tourslength[tours.index(veh[i][p])]
j=0
while(j<len(tours)):
if(len(tours[j]))==2:
tours.remove(tours[j])
tourslength.remove(tourslength[j])
else:
j+=1
return tours, tourslength,veh,vehlength
"""def samedis(self,tours,tourslength):
c=0.5
d=0.5
for tour1 in tours:
i=1
while (i<len(tour1)-1):
##print(len(tour1))
##print("!@#!")
best_dist = self.add(tour1[i-1], tour1[i+1], tour1[i])
##print("!!!!")
best_ratio = c*best_dist + d*(tourslength[tours.index(tour1)])
for tour in tours:
##print("******")
if tour != tour1 and len(tour)!=2 :
for index in range(len(tour) - 1):
dist = self.add(tour[index], tour[index + 1], tour1[i])
##print dist
ratio = c*dist + d*(tourslength[tours.index(tour)]+tour1[i])
if ratio < best_ratio and (tourslength[tours.index(tour)]+dist)<self.Q:
best_dist = dist
new_tour = tour[:index + 1] + [tour1[i]] + tour[index + 1:]
tour_index = tours.index(tour)
best_ratio = c*best_dist + d*(tourslength[tours.index(tour)])
if best_ratio != c*best_dist + d*(tourslength[tours.index(tour1)]):
tours[tour_index]=new_tour
tourslength[tour_index]+= best_dist
tourslength[tours.index(tour1)]-=self.add(tour1[i-1], tour1[i + 1], tour1[i])
tour1.remove(tour1[i])
else:
i+=1
##print self.distances ##print(i)
return tours, tourslength
"""
def farthest_insertion(self,farthest=True):
denum=8
t=1
v=0
a= len(self.coords)
#tour = [0,a]
tours = []
tourslength = []
vehlengths=[]
cantass=[]
for i in range(0,denum):
for j in range(0,denum):
if ([i,j]in tours or [j,i]in tours):
continue
else:
tours.append([i,j])
cantass.append([])
tourslength.append(self.distances[i][j])
vehlengths.append(0)
# we find the closest node R to the first node
unass = self.cities[denum:a]
veh = [[],[],[],[],[],[]]
##print unass
while len(unass) != 0:
best, best_len,best_ratio = None, 0 if farthest else float('inf'),-float('inf')
t=1
tour_index = 0
city1 = 0
c=0.8
d=0.2
#print len(unass)
vehi=vehindex=None
# (selection step) given a sub-tour,we find node r not in the
# sub-tour closest to any node j in the sub-tour,
# i.e. with minimal c_rj
for tour in tours:
for city in unass:
#print cantass
#print tour
if city in cantass[tours.index(tour)]:
continue
# we consider only the distances to nodes already in the tour
#print tour,city
_, length = self.closest_neighbor(tour, city, True,farthest)
if len(tour)!=2:
ratio = c*length-d*(vehlengths[tours.index(tour)])
else:
ratio = c*length-d*(vehlengths[tours.index(tour)]+tourslength[tours.index(tour)]+self.add(tour[0],tour[1],city))
if (ratio > best_ratio if farthest else ratio < best_ratio):
city1, best_len,tour1,best_ratio = city, length,tour,ratio
#if len(unass) is 25 or len(unass)is 24:
#print best_ratio, tour1,city1
##print city1
# (insertion step) we find the arc (i, j) in the sub-tour which
# minimizes cir + crj - cij, and we insert r between i and j
best_dist, new_tour = float('inf'), None
for index in range(len(tour1) - 1):
dist = self.add(tour1[index], tour1[index + 1], city1)
##print dist
if dist < best_dist and (tourslength[tours.index(tour1)]+dist)<self.Q:
best_dist = dist
new_tour = tour1[:index + 1] + [city1] + tour1[index + 1:]
tour_index = tours.index(tour1)
if best_dist == float('inf'):
cantass[tours.index(tour1)].append(city1)
continue
for p in range(0,t):
if tours[tour_index] in veh[p]:
vehi=p
vehindex = veh[p].index(tours[tour_index])
length, tour, index, city1,vehi,vehindex=best_dist, new_tour, tour_index,city1,vehi,vehindex
#print tour,index
if city1 != 0 and tour != 0:
unass.remove(city1)
tourslength[index] += length
tours[index]=tour
if vehi!=None:
veh[vehi][vehindex]=tour
x=vehlengths[tours.index(tour)]
for j in range(0,len(tours)):
if vehlengths[j]==x:
#print"s"
vehlengths[j]+=length
if len(tours[index]) is 3:
v+=1
if v<t:
tour1=[tour[0],tour[2]]
tours.append(tour1)
cantass.append([])
tourslength.append(self.distances[tour[0]][tour[2]])
veh[v-1].append(tour)
vehlengths.append(0)
vehlengths[index]=tourslength[index]
#print veh
#print veh
if v==t:
veh[v-1].append(tour)
vehlengths[index]=tourslength[index]
#print veh
#print veh[0]
#print veh[0][0]
a=tourslength
d=vehlengths
#print tours
i=0
while i<len(tours):
tour2=tours[i]
#print tour2
i+=1
if (len(tour2)==2):
#print "S"
tourslength.remove(a[tours.index(tour2)])
vehlengths.remove(d[tours.index(tour2)])
tours.remove(tour2)
i-=1
#print "Y"
e=tours
#print e
for l in range(0,t):
tour3=tours[l]
b=tour3[-1]
#print "w"
for i in range(0,denum):
tours.append([b,i])
cantass.append([])
tourslength.append(self.distances[b][i])
vehlengths.append(vehlengths[l])
#print"x"
if v>t:
c=tour[0]
d=tour[-1]
for k in range(0,denum):
tours.append([d,k])
cantass.append([])
tourslength.append(self.distances[d][k])
#print tour
vehlengths.append(vehlengths[tours.index(tour)]+tourslength[tours.index(tour)])
#print "d",vehlengths[tours.index(tour)],tourslength[tours.index(tour)]
y=vehlengths[tours.index(tour)]
for j in range(0,len(tours)):
if vehlengths[j]==y:
vehlengths[j]+=tourslength[tours.index(tour)]
#print "dd"
for i in range(0,denum):
#print "d"
if [c,i] in tours:
#print "S"
del tourslength[tours.index([c,i])]
del vehlengths[tours.index([c,i])]
#print vehlengths
tours.remove([c,i])
#print tours
#print tourslength
for i in range(0,t):
#print veh
#print veh[i][-1][-1]
if veh[i][-1][-1]==tour[0]:
veh[i].append(tour)
break
vehlength=[0,0,0,0,0,0]
for i in range(0,t):
for p in range(0,len(veh[i])):
#print veh[i],veh[i][p],tours.index(veh[i][p])
#print tourslength[tours.index(veh[i][p])]
#print veh
vehlength[i]+=tourslength[tours.index(veh[i][p])]
j=0
while(j<len(tours)):
if(len(tours[j]))==2:
tours.remove(tours[j])
tourslength.remove(tourslength[j])
else:
j+=1
for l in range(0,t):
d= veh[l][-1][-1]
for k in range(0,denum):
tours.append([d,k])
veh[l].append([d,k])
tourslength.append(self.distances[d][k])
return tours, tourslength,veh,vehlength
##print unass
def samedis(self,tours,tourslength,veh,vehlength):
c=0.5
d=0.5
t=1
for tour1 in tours:
if len(tour1)!=2:
i=1
while (i<len(tour1)-1):
##print(len(tour1))
##print("!@#!")
for j in range(0,t):
if tour1 in veh[j]:
o=j
p=veh[j].index(tour1)
b=vehlength[j]
##print veh ##print b,"s"
best_dist = self.add(tour1[i-1], tour1[i+1], tour1[i])
h=best_dist
best_ratio = c*best_dist + d*(b)
#print best_dist,best_ratio,"sss"
#print "ddd"
##print("!!!!")
for tour in tours:
for j in range(0,t):
if tour in veh[j]:
a=vehlength[j]
w=j
s=veh[j].index(tour)
##print("******")
if tour != tour1 and len(tour)!=2 :
##print a
for index in range(len(tour) - 1):
##print tour
##print index
dist = self.add(tour[index], tour[index + 1], tour1[i])
#print dist
##print dist
ratio = c*dist + d*(a+dist)
if ratio < best_ratio and (tourslength[tours.index(tour)]+dist)<self.Q:
best_dist = dist
w1=w
s1=s
new_tour = tour[:index + 1] + [tour1[i]] + tour[index + 1:]
tour_index = tours.index(tour)
best_ratio = c*best_dist + d*(a+dist)
#print best_dist,"fff"
#print new_tour,best_ratio
#print c*best_dist + d*(b)
if best_ratio < c*h + d*(b):
##print veh
tours[tour_index]=new_tour
#print tours[tour_index]
tourslength[tour_index]+= best_dist
veh[w1][s1]=new_tour
vehlength[w1]+=best_dist
##print veh
tourslength[tours.index(tour1)]-=self.add(tour1[i-1], tour1[i + 1], tour1[i])
##print o,i
##print vehlength[o]
vehlength[o]-=self.add(tour1[i-1], tour1[i + 1], tour1[i])
#print veh
#print tour1
veh[o][p].remove(tour1[i])
#print tour1
#print veh
if (len(tour1)==2):
vehlength[o]-=self.distances[tour1[0]][tour1[1]]
#tour1.remove(tour1[i])
#print veh
else:
i+=1
##print self.distances ##print(i)
return tours, tourslength,veh,vehlength
def closest_city(self,unass,city):
best_dist=float('inf')
closest=None
for x in unass:
dist=self.distances[city][x]
#print dist,x
if dist<best_dist:
best_dist=dist
#print "s"
closest=x
return closest,best_dist
def greedy(self):
denum=8
c=1
d=0
gr=self.grid
total_len=0
prevcdep=None
same_y=[]
a=len(self.coords)
tourslength=[]
tours=[]
tour=[0]
tours.append(tour)
tourslength.append(0)
unass = self.cities[denum:a]
f=None
while len(unass)!=0:
#print tours
#print tourslength
if len(tours[-1])==1:
x,best_dist=self.closest_city(unass,tours[-1][0])
#print x
tours[-1].append(x)
unass.remove(x)
tourslength[-1]+=best_dist
else:
a=self.coords[tours[-1][-1]][0]
bb=self.coords[tours[-1][-1]][1]
best_dist=float('inf')
city1=None
for city in unass:
if self.coords[city][1]==bb:
same_y.append(city)
for i in range(0,len(same_y)):
for j in range(i+1,len(same_y)):
if self.distances[same_y[j]][tours[-1][-1]]<self.distances[same_y[i]][tours[-1][-1]]:
a=same_y[i]
b=same_y[j]
same_y[i]=b
same_y[j]=a
#print same_y
#print same_y,unass
while len(same_y)!=0:
#print same_y
"""
best_dist=float('inf')
city1=None
for city in same_y:
dist=self.distances[city][tours[-1][-1]]
#print "s",tours[-1],city,dist
if dist<best_dist:
city1=city
best_dist=dist
"""
best_dist=self.distances[same_y[0]][tours[-1][-1]]
city1=same_y[0]
best_dep=float('inf')
best_ratio=float('inf')
cdep=None
for i in range(0,denum):
mindep=abs(self.coords[tours[-1][0]][0]-self.coords[i][0])+abs(self.coords[tours[-1][0]][1]-self.coords[i][1])
dep=self.distances[city1][i]
ratio = c*dep+d*mindep
if ratio<best_ratio:
best_dep=dep
cdep=i
best_ratio=ratio
if self.coords[city1][0]==bb and tourslength[-1]>self.Q-self.sqr:
f=True
else:
f= False
print f
if best_dist+tourslength[-1]+best_dep<self.Q:
#print city1
tours[-1].append(city1)
unass.remove(city1)
same_y.remove(city1)
tourslength[-1]+=best_dist
else:
tours[-1].append(prevcdep)
tourslength[-1]+=prevbest_dep
tours.append([prevcdep])
tourslength.append(0)
if f==True:
tours[-1].append(cdep)
tourslength[-1]+=best_dep
tours.append([cdep])
tourslength.append(0)
prevcdep=cdep
prev_bestdep=best_dep
if city1==None:
for city in unass:
if self.coords[city][0]==a:
dist=self.distances[city][tours[-1][-1]]
if dist<best_dist:
city1=city
best_dist=dist
best_dep=float('inf')
best_ratio=float('inf')
cdep=None
for i in range(0,denum):
mindep=abs(self.coords[tours[-1][0]][0]-self.coords[i][0])+abs(self.coords[tours[-1][0]][1]-self.coords[i][1])
dep=self.distances[city1][i]
ratio = c*dep+d*mindep
if ratio<best_ratio:
best_dep=dep
cdep=i
best_ratio=ratio
if self.coords[city1][0]==bb and tourslength[-1]>self.Q-self.sqr:
f=True
else:
f= False
print f,self.coords[city1][0]
if best_dist+tourslength[-1]+best_dep<self.Q:
tours[-1].append(city1)
unass.remove(city1)
tourslength[-1]+=best_dist
else:
tours[-1].append(prevcdep)
tourslength[-1]+=prevbest_dep
tours.append([prevcdep])
tourslength.append(0)
if f==True:
tours[-1].append(cdep)
tourslength[-1]+=best_dep
tours.append([cdep])
tourslength.append(0)
prevcdep=cdep
prevbest_dep=best_dep
if len(unass)==0:
tours[-1].append(prevcdep)
tourslength[-1]+=prevbest_dep
tours.append([prevcdep])
tourslength.append(0)
xx=[]
yy=[]
for i in self.cities:
xx.append(self.coords[i][0]+dist/2)
yy.append(self.coords[i][1]+dist/2)
for i in range(0,len(tourslength)):
total_len+=tourslength[i]
return tours,tourslength,total_len,xx,yy
def plot (self,tours):
b = ['r','b','g','c']
j=0
for tour in tours:
if len(tour)!=2:
for i in range (0,len(tour)-1):
if i != len(self.coords)-1:
plt.plot([self.coords[tour[i]][0], self.coords[tour[i+1]][0]],[self.coords[tour[i]][1],self.coords[tour[i+1]][1]], b[j])
#plt.show(block=False)
if j<3:
j+=1
else:
j=0
x=[]
y=[]
c=['bs','rs','gs','cs','ms','rs','gs','cs','ms']
for i in range(0,len(self.coords)):
x.append(self.coords[i][0])
y.append(self.coords[i][1])
plt.plot(self.coords[i][0],self.coords[i][1],'rs')
#plt.show()
xxx= 'QGC WPL 110\r\n'
import utm
#from math import *
import numpy as np
file = open("mission.txt","r")
a=file.readlines()
file.close()
lat=[]
lon=[]
#xx+=a[2]
if a[1][1]=='\t':
print "s"
j=0
print a
index=None
for k in a:
if a.index(k)!=0:
j=0
lat1='s'
lon1='s'
for i in range (0,len(k)):
if k[i]=='\t':
j+=1
print j
if j==8:
index=i
break
for i in range(index+1,len(k)):
if k[i]=='\t':
index=i
break
lat1+=k[i]
for i in range(index+1,len(k)):
if k[i]=='\t':
#index=i
break
lon1+=k[i]
print k
print index
lat.append(float(lat1[1:]))
lon.append(float(lon1[1:]))
print lat
print lon
e2,n2,aa,bb = utm.from_latlon(lat[1],lon[1])
e1,n1,_,_ = utm.from_latlon(lat[0],lon[0])
angle= atan2(n2-n1,e2-e1)
dist=np.hypot(e2-e1,n2-n1)
def takeoff(lat,lon):
return '\t0\t3\t22\t0\t5\t0\t0\t' + str(lat) +'\t'+ str(lon) + '\t20\t1\r\n'
def waypoint(lat,lon):
return '\t0\t3\t16\t0\t5\t0\t0\t' + str(lat) +'\t'+ str(lon) + '\t20\t1\r\n'
def land(lat,lon):
return '\t0\t3\t21\t0\t5\t0\t0\t' + str(lat) +'\t'+ str(lon) + '\t20\t1\r\n'
def utm1(x,y,e1,n1,angle):
x1=x*cos(angle)-y*sin(angle)
y1=x*sin(angle)+y*cos(angle)
x1+=e1
y1+=n1
#print x1,y1
lat,lon= utm.to_latlon(x1,y1,aa,bb)
return lat,lon
print dist
#r= BaseAlgorithm()
x= TourConstructionHeuristics(dist=dist,grid=20,comm=dist/2,Q=300)
#tours, lengths,veh,vehlength = x.cheapest_insertion()
tours, lengths,total_len,xx,yy = x.greedy()
#tours, lengths,veh,vehlength = x.nearest_insertion()
#print veh
print tours
print lengths
#print total_len
#print vehlength
t=1
b = ['r','b','g','c','m','y']
j=0
#for i in range(0,t):
x.plot(tours)
#j+=1
plt.show()
k=0
for i in tours:
for j in range(0,len(i)):
if j==0:
lat,lon=utm1(xx[i[j]],yy[i[j]],e1,n1,angle)
xxx+=str(k)+takeoff(lat,lon)
k+=1
elif j== len(i)-1:
lat,lon=utm1(xx[i[j]],yy[i[j]],e1,n1,angle)
xxx+=str(k)+land(lat,lon)
k+=1
else:
lat,lon=utm1(xx[i[j]],yy[i[j]],e1,n1,angle)
xxx+=str(k)+waypoint(lat,lon)
k+=1
file=open("mission1.txt","w")
file.write(xxx)
file.close()
"""
tours1, lengths1,veh1,vehlength1 = x.samedis(tours,lengths,veh,vehlength)
print tours1
print lengths1
print veh1
print vehlength1
b = ['r','b','g','c','m','y']
for i in range(0,t):
x.plot(veh1[i])
plt.show()
tours,lengths,total=x.greedy()
print tours
print lengths
print total
total=[total]
veh=[tours]
x.plot(tours)
plt.show()
tours1, lengths1,veh1,vehlength1 = x.samedis(tours,lengths,veh,total)
print tours1
print lengths1
print veh1
print vehlength1
b = ['r','b','g','c','m','y']
for i in range(0,1):
x.plot(veh1[i])
plt.show()
"""
"""
mission planer
qground control
dronecode""" |
# -*- coding:utf8 -*-
import gspread
import httplib2
import numpy as np
from collections import defaultdict
from flask import jsonify
from . import education_bp as education
from .drive import get_file_list, get_credentials_from_file
from apiclient import discovery
from oauth2client.service_account import ServiceAccountCredentials
from flask_cors import cross_origin
from main import db
from models import (SurveyCategory, SurveyWRSSummary,
SurveyWRSTeachingSummary, FollowUpSummary,
AcademicProgram, EvaluationSummary,
WRSEdpexScore, WRSEdpexTopic,
SatisfactionScore, MTLicenseExam, RTLicenseExam,
MTJobEmployed, RTJobEmployed)
@education.route('/gdrive/files/')
@cross_origin()
def get_gdrive_file_list():
cred = get_credentials_from_file() # get_credentials func cannot run inside flask this way
http = cred.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
results = service.files().list(pageSize=10,
fields="nextPageToken, files(id, name, parents)").execute()
items = results.get('files', [])
if not items:
return jsonify({'files': [], 'message': 'No files found.'})
else:
files = []
for item in items:
files.append({'id': item['id'], 'name': item['name'], 'parents': item.get('parents', '')})
#print('{0} ({1})'.format(item['name'], item['id']))
return jsonify({'files': files})
@education.route('/gdrive/wrs/update/')
def udpate_wrs():
'''Load data from Wellrounded scholar spreadsheet to DB'''
cred = get_credentials_from_file() # get_credentials func cannot run
# inside flask this way
http = cred.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
folder_id = '0B45WRw4HPnk_T3ctb3Q1eHRhczA'
files = get_file_list(folder_id, cred)
service_key_file = 'api/AcademicAffairs-420cd46d6400.json'
scope = ['https://spreadsheets.google.com/feeds']
gc_credentials = \
ServiceAccountCredentials.from_json_keyfile_name(service_key_file, scope)
gc = gspread.authorize(gc_credentials)
records = {}
for f in files:
file_name, file_id = f['name'], f['id']
year = file_name.split('.')[0]
if SurveyWRSSummary.query.filter_by(year=year).first():
print('Data of year {} already exists'.format(year))
continue
print('Loading data from file: {}'.format(file_name))
try:
wks = gc.open_by_key(file_id).sheet1
except:
batch = service.new_batch_http_request()
user_permission = {
'type': 'user',
'role': 'onwer',
'emailAddress': 'academic-affairs-mumt@academic-affairs.iam.gserviceaccount.com'
}
batch.add(service.permissions().create(
fileId=file_id,
body=user_permission,
fields='id',
))
batch.execute()
wks = gc.open_by_key(file_id).sheet1
else:
wrs_category = \
SurveyCategory.query.filter_by(name="wellrounded scholar").first()
wks = gc.open_by_key(file_id).sheet1
prior_knowledge = wks.col_values(13)[1:]
prior_prof_skill = wks.col_values(14)[1:]
prior_creativity = wks.col_values(15)[1:]
prior_analysis = wks.col_values(16)[1:]
prior_leadership = wks.col_values(17)[1:]
prior_social_resp = wks.col_values(18)[1:]
post_knowledge = wks.col_values(19)[1:]
post_prof_skill = wks.col_values(20)[1:]
post_creativity = wks.col_values(21)[1:]
post_analysis = wks.col_values(22)[1:]
post_leadership = wks.col_values(23)[1:]
post_social_resp = wks.col_values(24)[1:]
learning_methods = ['lecture', 'lab', 'buzzgroup', 'casestudy',
'discussion', 'roleplay', 'workgroup',
'fieldtrip', 'community', 'pbl',
'transformative', 'project', 'intern'
]
j = 0
teaching_wrs_results = {}
for i in range(35,113,6):
lm = learning_methods[j]
teaching_wrs_results[lm] = defaultdict(dict)
teaching_wrs_results[lm]['knowledge'] = wks.col_values(i)[1:]
teaching_wrs_results[lm]['prof_skill'] = wks.col_values(i+1)[1:]
teaching_wrs_results[lm]['creativity'] = wks.col_values(i+2)[1:]
teaching_wrs_results[lm]['analysis'] = wks.col_values(i+3)[1:]
teaching_wrs_results[lm]['leadership'] = wks.col_values(i+4)[1:]
teaching_wrs_results[lm]['socialresp'] = wks.col_values(i+5)[1:]
j += 1
i = 0
while True:
if(prior_knowledge[i] == '' and prior_prof_skill[i] == '' and
prior_creativity[i] == '' and prior_analysis[i] == '' and
prior_leadership[i] == '' and prior_social_resp[i] == ''):
break
i += 1
prior_knowledge = [int(d) for d in prior_knowledge[:i]]
prior_prof_skill = [int(d) for d in prior_prof_skill[:i]]
prior_creativity = [int(d) for d in prior_creativity[:i]]
prior_analysis = [int(d) for d in prior_analysis[:i]]
prior_leadership = [int(d) for d in prior_leadership[:i]]
prior_social_resp = [int(d) for d in prior_social_resp[:i]]
post_knowledge = [int(d) for d in post_knowledge[:i]]
post_prof_skill = [int(d) for d in post_prof_skill[:i]]
post_creativity = [int(d) for d in post_creativity[:i]]
post_analysis = [int(d) for d in post_analysis[:i]]
post_leadership = [int(d) for d in post_leadership[:i]]
post_social_resp = [int(d) for d in post_social_resp[:i]]
for lm,res in teaching_wrs_results.iteritems():
for k,v in res.iteritems():
#print('before ', len(teaching_wrs_results[lm][k]))
teaching_wrs_results[lm][k] = [int(d) for d in v[:i] if d != '']
#print('after ', len(teaching_wrs_results[lm][k]))
prior = {
'knowledge': np.mean(prior_knowledge),
'prof_skill': np.mean(prior_prof_skill),
'creativity': np.mean(prior_creativity),
'analysis': np.mean(prior_analysis),
'leadership': np.mean(prior_leadership),
'socialresp': np.mean(prior_social_resp)
}
post = {
'knowledge': np.mean(post_knowledge),
'prof_skill': np.mean(post_prof_skill),
'creativity': np.mean(post_creativity),
'analysis': np.mean(post_analysis),
'leadership': np.mean(post_leadership),
'socialresp': np.mean(post_social_resp)
}
for k,v in prior.iteritems():
a = SurveyWRSSummary(category_id=wrs_category.id,
question=k, value=str(v), year=year, post=False)
db.session.add(a)
for k,v in post.iteritems():
a = SurveyWRSSummary(category_id=wrs_category.id,
question=k, value=str(v), year=year, post=True)
db.session.add(a)
for lm,res in teaching_wrs_results.iteritems():
for k,v in res.iteritems():
a = SurveyWRSTeachingSummary(category_id=wrs_category.id,
question=k, method=lm, year=year,
value=str(np.mean(teaching_wrs_results[lm][k])))
db.session.add(a)
db.session.commit()
return jsonify({'status': 'success'}), 200
@education.route('/wrs/results/development/')
@cross_origin()
def get_wrs_results():
wrs_category = \
SurveyCategory.query.filter_by(name="wellrounded scholar").first()
data = []
for item in SurveyWRSSummary.query.with_entities(SurveyWRSSummary.year)\
.filter_by(category_id=wrs_category.id)\
.order_by(SurveyWRSSummary.year).distinct():
year = item[0]
res = []
for rec in SurveyWRSSummary.query.filter_by(category_id=wrs_category.id, year=year):
res.append({'question': rec.question, 'value': rec.value, 'post': rec.post})
d = {'year': year}
d['results'] = res
data.append(d)
return jsonify({'data': data})
@education.route('/wrs/results/teaching/')
@cross_origin()
def get_wrs_teaching_results():
wrs_category = \
SurveyCategory.query.filter_by(name="wellrounded scholar").first()
learning_methods = ['lecture', 'lab', 'buzzgroup', 'casestudy',
'discussion', 'roleplay', 'workgroup',
'fieldtrip', 'community', 'pbl',
'transformative', 'project', 'intern'
]
data = []
for item in SurveyWRSTeachingSummary.query.with_entities(SurveyWRSTeachingSummary.year)\
.filter_by(category_id=wrs_category.id)\
.order_by(SurveyWRSTeachingSummary.year).distinct():
year = item[0]
d = {'year': year, 'results': []}
for lm in learning_methods:
dd = {'method': lm}
res = []
for rec in SurveyWRSTeachingSummary.query\
.filter_by(category_id=wrs_category.id, year=year, method=lm):
res.append({'question': rec.question, 'value': rec.value})
dd['results'] = sorted(res, key=lambda x: x['question'])
d['results'].append(dd)
data.append(d)
return jsonify({'data': data})
@education.route('/gdrive/followup/update/')
def udpate_followup():
'''Load data from follow up spreadsheet to DB'''
cred = get_credentials_from_file() # get_credentials func cannot run
# inside flask this way
http = cred.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
folder_id = '0BxLCeg0VgIlYcEhodkxpTEpwTVE'
files = get_file_list(folder_id, cred)
service_key_file = 'api/AcademicAffairs-420cd46d6400.json'
scope = ['https://spreadsheets.google.com/feeds']
gc_credentials = \
ServiceAccountCredentials.from_json_keyfile_name(service_key_file, scope)
gc = gspread.authorize(gc_credentials)
records = {}
for f in files:
file_name, file_id = f['name'], f['id']
program_abbr, year = file_name.split('.')[0].split('_')
program = AcademicProgram.query.filter_by(program_title_abbr=program_abbr.lower()).first()
print(program.id, program_abbr)
if FollowUpSummary.query.filter_by(survey_year=year).filter_by(program_id=program.id).first():
print('Data of year {} already exists'.format(year))
continue
print('Loading data from file: {} {}'.format(file_name, file_id))
try:
wks = gc.open_by_key(file_id).sheet1
except:
print('Error!')
continue
else:
col_no = 10 # employment status for MT from 2558 onwards
if program.id == 2 and year == '2557':
col_no = 6 # employment status for RT
empl_data = wks.col_values(col_no)[1:]
employed = [e for e in empl_data if e.startswith(u'ได้งานทำ')
or e.startswith(u'ทำงาน')
or e.startswith(u'ศึกษาต่อ')
or e.startswith(u'ทำงานแล้ว')
or e.startswith(u'กำลังศึกษา')] # what a hell
empl_rate = len(employed) / float(len([d for d in empl_data if d != '']))
print(program_abbr, year, empl_rate, len(employed))
a = FollowUpSummary(program_id=program.id,
post_grad_employment_rate=empl_rate,survey_year=year)
db.session.add(a)
db.session.commit()
return jsonify({'status': 'success'})
@education.route('/followup/results/')
@cross_origin()
def get_followup_result():
query = db.session.query(FollowUpSummary.survey_year,
FollowUpSummary.post_grad_employment_rate,
AcademicProgram.program_title_abbr)
query = query.join(AcademicProgram)
results = query.all()
d = []
for r in results:
d.append({
'year': r[0],
'rate': r[1],
'program': r[2]
})
return jsonify(d)
@education.route('/gdrive/evaluation/update/')
def udpate_evaluation():
'''Load data from evaluation spreadsheet to DB'''
cred = get_credentials_from_file() # get_credentials func cannot run
# inside flask this way
http = cred.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
folder_id = '0B45WRw4HPnk_bWhadnNxcDdQWm8' # evaluation folder
files = get_file_list(folder_id, cred)
service_key_file = 'api/AcademicAffairs-420cd46d6400.json'
scope = ['https://spreadsheets.google.com/feeds']
gc_credentials = \
ServiceAccountCredentials.from_json_keyfile_name(service_key_file, scope)
gc = gspread.authorize(gc_credentials)
records = {}
for f in files:
print(f['name'])
file_name, file_id = f['name'], f['id']
try:
program_abbr, year = file_name.split('.')[0].split('_')
except:
print('Invalid filename. Skipped.')
continue
print('Loading data from file: {} {}'.format(file_name, file_id))
program = AcademicProgram.query.filter_by(program_title_abbr=program_abbr.lower())\
.filter_by(level='undergraduate').first()
e = EvaluationSummary.query.filter_by(survey_year=year)\
.filter_by(program_id=program.id).first()
if e:
print('Data of the year {} already exists.'.format(year))
continue
try:
wks = gc.open_by_key(file_id).sheet1
except:
print('Error!')
continue
else:
morals = []
for i in range(22,29):
morals.append([int(d) for d in wks.col_values(i)[1:] if d != ''])
avg_morals_list = []
for i in range(len(morals)):
avg = np.mean(morals[i])
avg_morals_list.append(avg)
avg_morals = np.mean(avg_morals_list)
knowledge = []
for i in range(29, 36):
knowledge.append([int(d) for d in wks.col_values(i)[1:] if d != ''])
avg_knowledge_list = []
for i in range(len(knowledge)):
avg = np.mean(knowledge[i])
avg_knowledge_list.append(avg)
avg_knowledge = np.mean(avg_knowledge_list)
thinking = []
for i in range(36, 40):
thinking.append([int(d) for d in wks.col_values(i)[1:] if d != ''])
avg_thinking_list = []
for i in range(len(thinking)):
avg = np.mean(thinking[i])
avg_thinking_list.append(avg)
avg_thinking = np.mean(avg_thinking_list)
relation = []
for i in range(40, 47):
relation.append([int(d) for d in wks.col_values(i)[1:] if d != ''])
avg_relation_list = []
for i in range(len(relation)):
avg = np.mean(relation[i])
avg_relation_list.append(avg)
avg_relation = np.mean(avg_relation_list)
analytics = []
for i in range(47, 52):
analytics.append([int(d) for d in wks.col_values(i)[1:] if d != ''])
avg_analytics_list = []
for i in range(len(analytics)):
avg = np.mean(analytics[i])
avg_analytics_list.append(avg)
avg_analytics = np.mean(avg_analytics_list)
professional = []
for i in range(52, 55):
professional.append([int(d) for d in wks.col_values(i)[1:] if d != ''])
avg_professional_list = []
for i in range(len(professional)):
avg = np.mean(professional[i])
avg_professional_list.append(avg)
avg_professional = np.mean(avg_professional_list)
identity = []
for i in range(55, 58):
identity.append([int(d) for d in wks.col_values(i)[1:] if d != ''])
avg_identity_list = []
for i in range(len(identity)):
avg = np.mean(identity[i])
avg_identity_list.append(avg)
avg_identity = np.mean(avg_identity_list)
overall = [int(d) for d in wks.col_values(58)[1:] if d != '']
avg_overall = np.mean(overall)
a = EvaluationSummary(survey_year=year,
avg_morals=avg_morals,
avg_thinking=avg_thinking,
avg_relation=avg_relation,
avg_professional=avg_professional,
avg_analytics=avg_analytics,
avg_identity=avg_identity,
avg_knowledge=avg_knowledge,
avg_overall=avg_overall,
program_id=program.id)
db.session.add(a)
db.session.commit()
return jsonify({'status': 'success'})
@education.route('/evaluation/results/')
@cross_origin()
def get_evaluation_result():
query = db.session.query(EvaluationSummary.survey_year,
EvaluationSummary.avg_analytics,
EvaluationSummary.avg_identity,
EvaluationSummary.avg_knowledge,
EvaluationSummary.avg_morals,
EvaluationSummary.avg_professional,
EvaluationSummary.avg_relation,
EvaluationSummary.avg_thinking,
EvaluationSummary.avg_overall,
AcademicProgram.level,
AcademicProgram.program_title_abbr,
)
query = query.join(AcademicProgram)
results = query.all()
d = []
for res in results:
r = {'year': res[0],
'program': res[-1].upper(),
'level': res[-2],
'avg_analytics': res[1],
'avg_identity': res[2],
'avg_knowledge': res[3],
'avg_morals': res[4],
'avg_professional': res[5],
'avg_relation': res[6],
'avg_thinking': res[7],
'avg_overall': res[8]
}
d.append(r)
return jsonify(d)
@education.route('/evaluation/edpex/wrs/load/')
@cross_origin()
def load_edpex_wrs_results():
cred = get_credentials_from_file() # get_credentials func cannot run
# inside flask this way
http = cred.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
folder_id = '0B45WRw4HPnk_YlhxbjFJeDVoWk0'
files = get_file_list(folder_id, cred)
service_key_file = 'api/AcademicAffairs-420cd46d6400.json'
scope = ['https://spreadsheets.google.com/feeds']
gc_credentials = \
ServiceAccountCredentials.from_json_keyfile_name(service_key_file, scope)
gc = gspread.authorize(gc_credentials)
for f in files:
file_name, file_id = f['name'], f['id']
print('Loading data from file: {} {}'.format(file_name, file_id))
try:
wks = gc.open_by_key(file_id).get_worksheet(4)
except:
print('Error!')
continue
else:
t_professional = WRSEdpexTopic.objects(slug="professional").first()
t_creativity = WRSEdpexTopic.objects(slug="creativity").first()
t_analytical = WRSEdpexTopic.objects(slug="analytical").first()
t_leadership = WRSEdpexTopic.objects(slug="leadership").first()
t_social_resp = WRSEdpexTopic.objects(slug="social_resp").first()
for idx in range(2,6):
year = int(wks.col_values(idx)[2].split()[-1])
scores = wks.col_values(idx)[3:8]
professional_score = WRSEdpexScore(score=float(scores[0]), year=year)
creativity_score = WRSEdpexScore(score=scores[1], year=year)
analytical_score = WRSEdpexScore(score=scores[2], year=year)
leadership_score = WRSEdpexScore(score=scores[3], year=year)
social_score = WRSEdpexScore(score=scores[4], year=year)
t_professional.scores.append(professional_score)
t_creativity.scores.append(creativity_score)
t_analytical.scores.append(analytical_score)
t_leadership.scores.append(leadership_score)
t_social_resp.scores.append(social_score)
t_professional.save()
t_creativity.save()
t_leadership.save()
t_analytical.save()
t_social_resp.save()
return jsonify([])
@education.route('/evaluation/edpex/wrs/')
@cross_origin()
def get_edpex_wrs_results():
data = []
for slug in ["professional", "creativity", "analytical",
"leadership", "social_resp"]:
t = WRSEdpexTopic.objects(slug=slug).first()
scores = [dict(year=s.year, score=s.score) for s in t.scores]
data.append(dict(slug=slug, scores=scores, desc=t.desc))
return jsonify(data)
@education.route('/evaluation/edpex/satisfaction/load/')
@cross_origin()
def load_edpex_satisfaction_results():
cred = get_credentials_from_file() # get_credentials func cannot run
http = cred.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
folder_id = '0B45WRw4HPnk_YlhxbjFJeDVoWk0'
files = get_file_list(folder_id, cred)
service_key_file = 'api/AcademicAffairs-420cd46d6400.json'
scope = ['https://spreadsheets.google.com/feeds']
gc_credentials = \
ServiceAccountCredentials.from_json_keyfile_name(service_key_file, scope)
gc = gspread.authorize(gc_credentials)
for f in files:
#TODO: specify file ID here
file_name, file_id = f['name'], f['id']
print('Loading data from file: {} {}'.format(file_name, file_id))
try:
wks = gc.open_by_key(file_id).get_worksheet(10)
except:
print('Error!')
continue
else:
years = wks.col_values(1)[3:11]
goals = wks.col_values(2)[3:11]
scores = wks.col_values(3)[3:11]
for i in range(len(scores)):
s = SatisfactionScore(year=int(years[i]),
goal=float(goals[i]), score=float(scores[i]))
s.save()
return jsonify(response="success")
@education.route('/evaluation/edpex/satisfaction/')
@cross_origin()
def get_edpex_satisfaction_results():
data = []
for s in SatisfactionScore.objects:
data.append(dict(year=s.year, goal=s.goal, score=s.score))
return jsonify(data)
@education.route('/evaluation/edpex/license/load/')
@cross_origin()
def load_edpex_license_results():
cred = get_credentials_from_file() # get_credentials func cannot run
http = cred.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
folder_id = '0B45WRw4HPnk_YlhxbjFJeDVoWk0'
files = get_file_list(folder_id, cred)
service_key_file = 'api/AcademicAffairs-420cd46d6400.json'
scope = ['https://spreadsheets.google.com/feeds']
gc_credentials = \
ServiceAccountCredentials.from_json_keyfile_name(service_key_file, scope)
gc = gspread.authorize(gc_credentials)
for f in files:
#TODO: specify file ID here
file_name, file_id = f['name'], f['id']
print('Loading data from file: {} {}'.format(file_name, file_id))
try:
wks = gc.open_by_key(file_id).get_worksheet(14)
except:
print('Error!')
continue
else:
years = wks.col_values(1)[2:10]
mumt = wks.col_values(2)[2:10]
mtkku = wks.col_values(3)[2:10]
mtcmu = wks.col_values(4)[2:10]
rtyears = wks.col_values(1)[15:22]
murt = wks.col_values(2)[15:22]
rtcmu = wks.col_values(3)[15:22]
for i in range(len(years)):
try:
mumt_score = MTLicenseExam(year=int(years[i]),
institute="MUMT", percent=float(mumt[i]),
program="MT")
except:
mumt_score = MTLicenseExam(year=int(years[i]),
instistute="MUMT", percent=None,
program="MT")
mumt_score.save()
try:
mtkku_score = MTLicenseExam(year=int(years[i]),
institute="MT-KKU", percent=float(mtkku[i]),
program="MT")
except:
mtkku_score = MTLicenseExam(year=int(years[i]),
institute="MT-KKU", percent=None,
program="MT")
mtkku_score.save()
try:
mtcmu_score = MTLicenseExam(year=int(years[i]),
institute="MT-CMU", percent=float(mtcmu[i]),
program="MT")
except:
mtcmu_score = MTLicenseExam(year=int(years[i]),
institute="MT-CMU", percent=None,
program="MT")
mtcmu_score.save()
for i in range(len(rtyears)):
try:
murt_score = RTLicenseExam(year=int(years[i]),
institute="MURT", percent=float(murt[i]),
program="RT")
except:
murt_score = RTLicenseExam(year=int(years[i]),
instistute="MUMT", percent=None,
program="RT")
murt_score.save()
try:
rtcmu_score = RTLicenseExam(year=int(years[i]),
institute="RT-CMU", percent=float(rtcmu[i]),
program="RT")
except:
rtcmu_score = RTLicenseExam(year=int(years[i]),
institute="RT-CMU", percent=None,
program="RT")
rtcmu_score.save()
return jsonify(response="success")
@education.route('/evaluation/edpex/license/')
@cross_origin()
def get_edpex_license_results():
mt = []
rt = []
for m in MTLicenseExam.objects:
d = {
'institute': m.institute,
'year': m.year,
'percent': m.percent,
}
mt.append(d)
for r in RTLicenseExam.objects:
d = {
'institute': r.institute,
'year': r.year,
'percent': r.percent,
}
rt.append(d)
return jsonify(data={'mt': mt, 'rt': rt})
@education.route('/evaluation/edpex/employment/load/')
@cross_origin()
def load_edpex_employment_results():
cred = get_credentials_from_file() # get_credentials func cannot run
http = cred.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
folder_id = '0B45WRw4HPnk_YlhxbjFJeDVoWk0'
files = get_file_list(folder_id, cred)
service_key_file = 'api/AcademicAffairs-420cd46d6400.json'
scope = ['https://spreadsheets.google.com/feeds']
gc_credentials = \
ServiceAccountCredentials.from_json_keyfile_name(service_key_file, scope)
gc = gspread.authorize(gc_credentials)
for f in files:
#TODO: specify file ID here
file_name, file_id = f['name'], f['id']
try:
wks = gc.open_by_key(file_id).get_worksheet(15)
except:
print('Error!')
continue
else:
years = wks.col_values(1)[2:9]
mtmu_data = wks.col_values(2)[2:9]
rtmu_data = wks.col_values(3)[2:9]
mtkku_data = wks.col_values(4)[2:9]
mtcmu_data = wks.col_values(5)[2:9]
rtcmu_data = wks.col_values(6)[2:9]
for i in range(len(years)):
try:
mtmu_ = MTJobEmployed(year=int(years[i]),
institute="MUMT", percent=float(mtmu_data[i]),
program="MT")
except:
mtmu_ = MTJobEmployed(year=int(years[i]),
institute="MUMT", percent=None,
program="MT")
mtmu_.save()
try:
mtcmu_ = MTJobEmployed(year=int(years[i]),
institute="MT-CMU", percent=float(mtcmu_data[i]),
program="MT")
except:
mtcmu_ = MTJobEmployed(year=int(years[i]),
institute="MT-CMU", percent=None,
program="MT")
mtcmu_.save()
try:
mtkku_ = MTJobEmployed(year=int(years[i]),
institute="MT-KKU", percent=float(mtkku_data[i]),
program="MT")
except:
mtkku_ = MTJobEmployed(year=int(years[i]),
institute="MT-KKU", percent=None,
program="MT")
mtkku_.save()
try:
rtmu_ = RTJobEmployed(year=int(years[i]),
institute="MURT", percent=float(rtmu_data[i]),
program="RT")
except:
rtmu_ = RTJobEmployed(year=int(years[i]),
institute="MURT", percent=None,
program="RT")
rtmu_.save()
try:
rtcmu_ = RTJobEmployed(year=int(years[i]),
institute="RT-CMU", percent=float(rtcmu_data[i]),
program="RT")
except:
rtcmu_ = RTJobEmployed(year=int(years[i]),
institute="RT-CMU", percent=None,
program="RT")
rtcmu_.save()
return jsonify(status="success")
@education.route('/evaluation/edpex/employment/')
@cross_origin()
def get_edpex_employment_results():
mt = []
rt = []
for m in MTJobEmployed.objects:
d = {
'institute': m.institute,
'year': m.year,
'percent': m.percent,
}
mt.append(d)
for r in RTJobEmployed.objects:
d = {
'institute': r.institute,
'year': r.year,
'percent': r.percent,
}
rt.append(d)
return jsonify(data={'mt': mt, 'rt': rt})
|
# Copyright (C) 2020 THL A29 Limited, a Tencent company.
# All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
# https://opensource.org/licenses/BSD-3-Clause
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# See the AUTHORS file for names of contributors.
import turbo_transformers
import unittest
import sys
import torch
import os
from onmt.modules.position_ffn import PositionwiseFeedForward
sys.path.append(os.path.dirname(__file__))
import test_helper
fname = "ffn.txt"
def create_test(batch_size, input_len):
class TestPositionwiseFeedForward(unittest.TestCase):
def init_data(self, use_cuda):
self.test_device = torch.device('cuda:0') if use_cuda else \
torch.device('cpu:0')
if not use_cuda:
torch.set_num_threads(4)
turbo_transformers.set_num_threads(4)
self.model_dim = 1024
self.d_ff = 4096
torch.set_grad_enabled(False)
onmt_ffn = PositionwiseFeedForward(self.model_dim, self.d_ff)
onmt_ffn.eval()
if use_cuda:
onmt_ffn.to(self.test_device)
turbo_ffn_trans = turbo_transformers.PositionwiseFeedForward.from_onmt(
onmt_ffn, is_trans_weight=True)
turbo_ffn_notrans = turbo_transformers.PositionwiseFeedForward.from_onmt(
onmt_ffn, is_trans_weight=False)
# (batch_size, input_len, model_dim)
inputs = torch.rand(size=(batch_size, input_len, self.model_dim),
dtype=torch.float32,
device=self.test_device)
return onmt_ffn, turbo_ffn_trans, turbo_ffn_notrans, inputs
def check_torch_and_turbo(self, use_cuda, num_iter=1):
onmt_ffn, turbo_ffn_trans, turbo_ffn_notrans, inputs = self.init_data(
use_cuda)
device = "GPU" if use_cuda else "CPU"
onmt_model = lambda: onmt_ffn(inputs)
onmt_model_result, torch_qps, torch_time_consume = \
test_helper.run_model(onmt_model, use_cuda, num_iter)
print(
f"PositionwiseFeedForward \"({batch_size}, {input_len:03})\" ",
f"{device} ONMT QPS, {torch_qps}, time, {torch_time_consume}")
turbo_model_trans = lambda: turbo_ffn_trans(inputs,
is_trans_weight=True)
with turbo_transformers.pref_guard("gpref_test") as perf:
turbo_model_result, turbo_qps_trans, turbo_time_consume_trans = \
test_helper.run_model(turbo_model_trans, use_cuda, num_iter)
print(
f"PositionwiseFeedForward \"({batch_size}, {input_len:03})\" ",
f"{device} Turbo Trans QPS, {turbo_qps_trans}, time, {turbo_time_consume_trans}"
)
turbo_model_notrans = lambda: turbo_ffn_notrans(
inputs, is_trans_weight=False)
with turbo_transformers.pref_guard("gpref_test") as perf:
turbo_model_result, turbo_qps_notrans, turbo_time_consume_notrans = \
test_helper.run_model(turbo_model_notrans, use_cuda, num_iter)
print(
f"PositionwiseFeedForward Notrans \"({batch_size}, {input_len:03})\" ",
f"{device} Turbo NoTrans QPS, {turbo_qps_notrans}, time, {turbo_time_consume_notrans}"
)
self.assertTrue(
torch.max(torch.abs(turbo_model_result - onmt_model_result)) <
(1e-3 if use_cuda else 1e-4))
with open(fname, "a") as fh:
fh.write(
f"\"({batch_size},{input_len:03})\", {torch_qps}, {turbo_qps_trans}, {turbo_qps_notrans}\n"
)
def test_positionwise_feed_forward(self):
self.check_torch_and_turbo(use_cuda=False)
if torch.cuda.is_available() and \
turbo_transformers.config.is_compiled_with_cuda():
self.check_torch_and_turbo(use_cuda=True)
globals(
)[f"TestPositionwiseFeedForward{batch_size}_{input_len:3}"] = TestPositionwiseFeedForward
with open(fname, "w") as fh:
fh.write(", torch, turbo_trans, turbo_notrans\n")
for batch_size in [4]:
for input_len in [10, 20, 30, 40, 50]:
create_test(batch_size, input_len)
if __name__ == '__main__':
unittest.main()
|
import datetime
# print(dir(datetime))
# print(datetime.MINYEAR)
# print(datetime.MAXYEAR)
# print(datetime.date.today())
# print(datetime.datetime.now())
from importlib._bootstrap import ModuleSpec
from importlib._bootstrap_external import SourceFileLoader
a = datetime.datetime.now()
# print(type(a))
# print(a.now())
# print(a.date())
# print(a.ctime())
# print(a.day)
# print(a.today())
# print(a.year)
# print(a.month)
# print(a.astimezone())
# print(dir(a))
# for i in dir(a):
# print(i)
# print(type(i))
# func = getattr(a, i)
# print(type(func))
# print(callable(func))
# if callable(func):
# print("===========")
# continue
# elif isinstance(func, (dict, str, int, SourceFileLoader, ModuleSpec)):
# print(func)
# elif not func:
# print("object is None")
# else:
# print("What the F*** happened?")
#
# print("======")
# print(a.strftime("%c/ %a/ %A/ %b/ %B/ %C/ %d/ %D"))
b = datetime.datetime(2020, 2, 10, 12, 34, 56)
print(b)
|
from validate_command import Validate
class CommandMode:
def __init__(self, tello):
self.tello = tello
def command_mode(self):
validate = Validate()
while True:
print('Please enter a command...')
valid = False
ext = None
command = None
try:
command = raw_input().rstrip().lower()
if ' ' in command:
command, ext = command.split(' ')
ext = int(ext)
if command == 'exit':
break
else:
valid, valid_command = validate.command(command, ext)
except:
valid = False
if valid:
print('Valid ' + valid_command)
self.tello.send_command_await(valid_command)
else:
print('Invalid Command...')
|
import sys
sys.path.insert(0, "./ADNET")
from utils import convert
import numpy as np
from PIL import Image
from torch.autograd import Variable
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
class UnetBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
a = self.model(x)
return torch.cat([x, a], 1)
class Generator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]):
super(Generator, self).__init__()
self.gpu_ids = gpu_ids
unet_block = UnetBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
class ADModel:
net = None
gpu = []
def __init__(self, gpu_ids=[0],load_model=None, isCPU=False):
self.isCPU = isCPU
self.gpu = gpu_ids
norm_layer = self.get_norm_layer('instance')
self.net = Generator(3,1,8,64,use_dropout=False,norm_layer = norm_layer,gpu_ids = gpu_ids)#remove gpu_ids if convert to android
if load_model is not None:
self.net.load_state_dict(torch.load(load_model))
if (not isCPU):
self.net.cuda(0)
def get_norm_layer(self, norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False,track_running_stats=True)
elif layer_type == 'none':
norm_layer = None
else:
raise NotImplementedError('layer [%s] is not found' % norm_type)
return norm_layer
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
def save_network(self, network, network_label, epoch_label, gpu_ids):
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(self.save_dir, save_filename)
torch.save(network.cpu().state_dict(), save_path)
if len(gpu_ids) and torch.cuda.is_available():
network.cuda(gpu_ids[0])
def load_network(self, network, network_label, epoch_label):
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(self.save_dir, save_filename)
network.load_state_dict(torch.load(save_path))
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('LR = %.7f' % lr)
def print_net(self):
networks.print_network(self.net)
def test(self,data):
if (not self.isCPU):
return self.net.forward(Variable(data['A'].cuda(0),requires_grad = 0)).data
else:
return self.net.forward(Variable(data['A'],requires_grad = 0)).data
class ADNET:
def __init__(self):
self.model = ADModel(load_model='adnet/135_net_D.pth', isCPU=False)
def getShadow(self, frame, width=256, height=256):
outim = self.model.test(convert(frame))
im_out = outim[0].cpu().float().numpy()
im_out = np.transpose(im_out, (1,2,0))
im_out = (im_out+1)/2*255
im_out = im_out.astype('uint8')
gray = Image.fromarray(np.squeeze(im_out, axis =2)).resize((int(width), int(height)))
shadowFrame = np.array(gray)
return shadowFrame
|
__all__ = ["mesh", "hexmesh"]
|
# Copyright 2020 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the configuration of an array of neutral atoms in 2D."""
from __future__ import annotations
from collections.abc import Mapping
from typing import Any, Optional, Union
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.axes import Axes
from numpy.typing import ArrayLike
import pulser
import pulser.register._patterns as patterns
from pulser.json.utils import stringify_qubit_ids
from pulser.register._reg_drawer import RegDrawer
from pulser.register.base_register import BaseRegister, QubitId
class Register(BaseRegister, RegDrawer):
"""A 2D quantum register containing a set of qubits.
Args:
qubits: Dictionary with the qubit names as keys and their
position coordinates (in μm) as values
(e.g. {'q0':(2, -1, 0), 'q1':(-5, 10, 0), ...}).
"""
def __init__(self, qubits: Mapping[Any, ArrayLike], **kwargs: Any):
"""Initializes a custom Register."""
super().__init__(qubits, **kwargs)
if any(c.shape != (self._dim,) for c in self._coords) or (
self._dim != 2
):
raise ValueError(
"All coordinates must be specified as vectors of size 2."
)
@classmethod
def square(
cls, side: int, spacing: float = 4.0, prefix: Optional[str] = None
) -> Register:
"""Initializes the register with the qubits in a square array.
Args:
side: Side of the square in number of qubits.
spacing: The distance between neighbouring qubits in μm.
prefix: The prefix for the qubit ids. If defined, each qubit
id starts with the prefix, followed by an int from 0 to N-1
(e.g. prefix='q' -> IDs: 'q0', 'q1', 'q2', ...).
Returns:
A register with qubits placed in a square array.
"""
# Check side
if side < 1:
raise ValueError(
f"The number of atoms per side (`side` = {side})"
" must be greater than or equal to 1."
)
return cls.rectangle(side, side, spacing=spacing, prefix=prefix)
@classmethod
def rectangle(
cls,
rows: int,
columns: int,
spacing: float = 4.0,
prefix: Optional[str] = None,
) -> Register:
"""Initializes the register with the qubits in a rectangular array.
Args:
rows: Number of rows.
columns: Number of columns.
spacing: The distance between neighbouring qubits in μm.
prefix: The prefix for the qubit ids. If defined, each qubit
id starts with the prefix, followed by an int from 0 to N-1
(e.g. prefix='q' -> IDs: 'q0', 'q1', 'q2', ...)
Returns:
A register with qubits placed in a rectangular array.
"""
# Check rows
if rows < 1:
raise ValueError(
f"The number of rows (`rows` = {rows})"
" must be greater than or equal to 1."
)
# Check columns
if columns < 1:
raise ValueError(
f"The number of columns (`columns` = {columns})"
" must be greater than or equal to 1."
)
# Check spacing
if spacing <= 0.0:
raise ValueError(
f"Spacing between atoms (`spacing` = {spacing})"
" must be greater than 0."
)
coords = patterns.square_rect(rows, columns) * spacing
return cls.from_coordinates(coords, center=True, prefix=prefix)
@classmethod
def triangular_lattice(
cls,
rows: int,
atoms_per_row: int,
spacing: float = 4.0,
prefix: Optional[str] = None,
) -> Register:
"""Initializes the register with the qubits in a triangular lattice.
Initializes the qubits in a triangular lattice pattern, more
specifically a triangular lattice with horizontal rows, meaning the
triangles are pointing up and down.
Args:
rows: Number of rows.
atoms_per_row: Number of atoms per row.
spacing: The distance between neighbouring qubits in μm.
prefix: The prefix for the qubit ids. If defined, each qubit
id starts with the prefix, followed by an int from 0 to N-1
(e.g. prefix='q' -> IDs: 'q0', 'q1', 'q2', ...).
Returns:
A register with qubits placed in a triangular lattice.
"""
# Check rows
if rows < 1:
raise ValueError(
f"The number of rows (`rows` = {rows})"
" must be greater than or equal to 1."
)
# Check atoms per row
if atoms_per_row < 1:
raise ValueError(
"The number of atoms per row"
f" (`atoms_per_row` = {atoms_per_row})"
" must be greater than or equal to 1."
)
# Check spacing
if spacing <= 0.0:
raise ValueError(
f"Spacing between atoms (`spacing` = {spacing})"
" must be greater than 0."
)
coords = patterns.triangular_rect(rows, atoms_per_row) * spacing
return cls.from_coordinates(coords, center=True, prefix=prefix)
@classmethod
def hexagon(
cls, layers: int, spacing: float = 4.0, prefix: Optional[str] = None
) -> Register:
"""Initializes the register with the qubits in a hexagonal layout.
Args:
layers: Number of layers around a central atom.
spacing: The distance between neighbouring qubits in μm.
prefix: The prefix for the qubit ids. If defined, each qubit
id starts with the prefix, followed by an int from 0 to N-1
(e.g. prefix='q' -> IDs: 'q0', 'q1', 'q2', ...).
Returns:
A register with qubits placed in a hexagonal layout.
"""
# Check layers
if layers < 1:
raise ValueError(
f"The number of layers (`layers` = {layers})"
" must be greater than or equal to 1."
)
# Check spacing
if spacing <= 0.0:
raise ValueError(
f"Spacing between atoms (`spacing` = {spacing})"
" must be greater than 0."
)
n_atoms = 1 + 3 * (layers**2 + layers)
coords = patterns.triangular_hex(n_atoms) * spacing
return cls.from_coordinates(coords, center=False, prefix=prefix)
@classmethod
def max_connectivity(
cls,
n_qubits: int,
device: pulser.devices._device_datacls.BaseDevice,
spacing: float | None = None,
prefix: str | None = None,
) -> Register:
"""Initializes the register with maximum connectivity for a device.
In order to maximize connectivity, the basic pattern is the triangle.
Atoms are first arranged as layers of hexagons around a central atom.
Extra atoms are placed in such a manner that C3 and C6 rotational
symmetries are enforced as often as possible.
Args:
n_qubits: Number of qubits.
device: The device whose constraints must be obeyed.
spacing: The distance between neighbouring qubits in μm.
If omitted, the minimal distance for the device is used.
prefix: The prefix for the qubit ids. If defined, each qubit
id starts with the prefix, followed by an int from 0 to N-1
(e.g. prefix='q' -> IDs: 'q0', 'q1', 'q2', ...).
Returns:
A register with qubits placed for maximum connectivity.
"""
# Check device
if not isinstance(device, pulser.devices._device_datacls.BaseDevice):
raise TypeError("'device' must be of type 'BaseDevice'.")
# Check number of qubits (1 or above)
if n_qubits < 1:
raise ValueError(
f"The number of qubits (`n_qubits` = {n_qubits})"
" must be greater than or equal to 1."
)
# Check number of qubits (less than the max number of atoms)
if device.max_atom_num is not None and n_qubits > device.max_atom_num:
raise ValueError(
f"The number of qubits (`n_qubits` = {n_qubits})"
" must be less than or equal to the maximum"
" number of atoms supported by this device"
f" ({device.max_atom_num})."
)
if not device.min_atom_distance > 0.0:
raise NotImplementedError(
"Maximum connectivity layouts are not well defined for a "
f"device with 'min_atom_distance={device.min_atom_distance}'."
)
# Default spacing or check minimal distance
if spacing is None:
spacing = device.min_atom_distance
elif spacing < device.min_atom_distance:
raise ValueError(
f"Spacing between atoms (`spacing = `{spacing})"
" must be greater than or equal to the minimal"
" distance supported by this device"
f" ({device.min_atom_distance})."
)
coords = patterns.triangular_hex(n_qubits) * spacing
return cls.from_coordinates(coords, center=False, prefix=prefix)
def rotate(self, degrees: float) -> None:
"""Rotates the array around the origin by the given angle.
Args:
degrees: The angle of rotation in degrees.
"""
if self.layout is not None:
raise TypeError(
"A register defined from a RegisterLayout cannot be rotated."
)
theta = np.deg2rad(degrees)
rot = np.array(
[[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]
)
self._coords = [rot @ v for v in self._coords]
def draw(
self,
with_labels: bool = True,
blockade_radius: Optional[float] = None,
draw_graph: bool = True,
draw_half_radius: bool = False,
qubit_colors: Mapping[QubitId, str] = dict(),
fig_name: str | None = None,
kwargs_savefig: dict = {},
custom_ax: Optional[Axes] = None,
show: bool = True,
) -> None:
"""Draws the entire register.
Args:
with_labels: If True, writes the qubit ID's
next to each qubit.
blockade_radius: The distance (in μm) between
atoms below the Rydberg blockade effect occurs.
draw_half_radius: Whether or not to draw the
half the blockade radius surrounding each atoms. If `True`,
requires `blockade_radius` to be defined.
draw_graph: Whether or not to draw the
interaction between atoms as edges in a graph. Will only draw
if the `blockade_radius` is defined.
qubit_colors: By default, atoms are drawn with a common default
color. If this parameter is present, it replaces the colors
for the specified atoms. Non-specified ones are stilled colored
with the default value.
fig_name: The name on which to save the figure.
If None the figure will not be saved.
kwargs_savefig: Keywords arguments for
``matplotlib.pyplot.savefig``. Not applicable if `fig_name`
is ``None``.
custom_ax: If present, instead of creating its own Axes object,
the function will use the provided one. Warning: if fig_name
is set, it may save content beyond what is drawn in this
function.
show: Whether or not to call `plt.show()` before returning. When
combining this plot with other ones in a single figure, one may
need to set this flag to False.
Note:
When drawing half the blockade radius, we say there is a blockade
effect between atoms whenever their respective circles overlap.
This representation is preferred over drawing the full Rydberg
radius because it helps in seeing the interactions between atoms.
"""
super()._draw_checks(
len(self._ids),
blockade_radius=blockade_radius,
draw_graph=draw_graph,
draw_half_radius=draw_half_radius,
)
pos = np.array(self._coords)
if custom_ax is None:
_, custom_ax = self._initialize_fig_axes(
pos,
blockade_radius=blockade_radius,
draw_half_radius=draw_half_radius,
)
super()._draw_2D(
custom_ax,
pos,
self._ids,
with_labels=with_labels,
blockade_radius=blockade_radius,
draw_graph=draw_graph,
draw_half_radius=draw_half_radius,
qubit_colors=qubit_colors,
)
if fig_name is not None:
plt.savefig(fig_name, **kwargs_savefig)
if show:
plt.show()
def _to_dict(self) -> dict[str, Any]:
return super()._to_dict()
def _to_abstract_repr(self) -> list[dict[str, Union[QubitId, float]]]:
names = stringify_qubit_ids(self._ids)
return [
{"name": name, "x": x, "y": y}
for name, (x, y) in zip(names, self._coords)
]
|
import libtcodpy as libtcod
class Swatch:
colors = {
# http://paletton.com/#uid=73d0u0k5qgb2NnT41jT74c8bJ8X
'PrimaryLightest': libtcod.Color(110, 121, 119),
'PrimaryLighter': libtcod.Color(88, 100, 98),
'Primary': libtcod.Color(68, 82, 79),
'PrimaryDarker': libtcod.Color(48, 61, 59),
'PrimaryDarkest': libtcod.Color(29, 45, 42),
'SecondaryLightest': libtcod.Color(116, 120, 126),
'SecondaryLighter': libtcod.Color(93, 97, 105),
'Secondary': libtcod.Color(72, 77, 85),
'SecondaryDarker': libtcod.Color(51, 56, 64),
'SecondaryDarkest': libtcod.Color(31, 38, 47),
'AlternateLightest': libtcod.Color(190, 184, 174),
'AlternateLighter': libtcod.Color(158, 151, 138),
'Alternate': libtcod.Color(129, 121, 107),
'AlternateDarker': libtcod.Color(97, 89, 75),
'AlternateDarkest': libtcod.Color(71, 62, 45),
'ComplimentLightest': libtcod.Color(190, 180, 174),
'ComplimentLighter': libtcod.Color(158, 147, 138),
'Compliment': libtcod.Color(129, 116, 107),
'ComplimentDarker': libtcod.Color(97, 84, 75),
'ComplimentDarkest': libtcod.Color(71, 56, 45),
# http://pixeljoint.com/forum/forum_posts.asp?TID=12795
'DbDark': libtcod.Color(20, 12, 28),
'DbOldBlood': libtcod.Color(68, 36, 52),
'DbDeepWater': libtcod.Color(48, 52, 109),
'DbOldStone': libtcod.Color(78, 74, 78),
'DbWood': libtcod.Color(133, 76, 48),
'DbVegetation': libtcod.Color(52, 101, 36),
'DbBlood': libtcod.Color(208, 70, 72),
'DbStone': libtcod.Color(117, 113, 97),
'DbWater': libtcod.Color(89, 125, 206),
'DbBrightWood': libtcod.Color(210, 125, 44),
'DbMetal': libtcod.Color(133, 149, 161),
'DbGrass': libtcod.Color(109, 170, 44),
'DbSkin': libtcod.Color(210, 170, 153),
'DbSky': libtcod.Color(109, 194, 202),
'DbSun': libtcod.Color(218, 212, 94),
'DbLight': libtcod.Color(222, 238, 214)
}
|
from cv2 import *
def incCount(count):
f=open('count.txt','w')
f.write(str(count))
def getCount():
try:
f=open('count.txt','r')
count=int(f.read())
return count+1
except:
f=open('count.txt','w')
f.write('1')
return 1
def imageCaputre():
cam = VideoCapture(0) # 0 -> index of camera
s, img = cam.read()
if s: # frame captured without any errors
namedWindow("cam-test")
imshow("cam-test",img)
waitKey(0)
destroyWindow("cam-test")
count=getCount()
imwrite("filename{}.jpg".format(count),img)
incCount(count)
cam.release()
|
from modules.machine import machine
class processing_cell:
def __init__(self, type, **kwargs):
self.type = type
self.facility = kwargs.get('facility')
if type == 'packaging':
self.num_machines = {
'boxing_machine':kwargs.get('boxing_machines'),
'bagging_machine':kwargs.get('bagging_machines')
}
self.machines = [
machine(type=key, i=1, **dict(key=value), **kwargs)
for key,value in self.num_machines.items()
]
else:
self.num_machines = kwargs.get('num_machines')
self.machines = [
machine(type, i, **kwargs) for i in range(self.num_machines)
]
def __repr__(self):
return("{0} Processing Cell".format(self.type))
@property
def avail_mach(self):
return [m for m in self.machines if m.available == True]
@property
def unavail_mach(self):
return [m for m in self.machines if m.available == False]
def load_machines(self, **kwargs):
for m in self.avail_mach:
m.load(**kwargs)
return(m.process(**kwargs))
def unload_machines(self, amount):
for m in self.unavail_mach:
yield m.unload(amount)
|
# 数据处理,删除某些wifi
# 删除连接数小于9并且所有链接强度小于-30的wifi
# 采用了多线程运算,提高效率
import src.utils as u
import threading
strength = 35 #强度阈值
connects = 6 #连接数阈值
def run(mall_ids,i):
print(mall_ids)
for mall_id in mall_ids:
print(mall_id,' starts')
conn = u.get_db_conn()
cur = conn.cursor()
sql = 'SELECT wifi_ssid FROM {m} GROUP BY wifi_ssid HAVING MAX(wifi_db)<-{s} AND COUNT(*)<{c}'.format(m=mall_id,s=strength,c=connects)
cur.execute(sql)
if cur.rowcount > 0:
wifis = ["'"+r[0]+"'" for r in cur.fetchall()] # 给所有的wifi ssid 加上引号,组成都好连接的字符串
sql = "DELETE FROM {m} WHERE wifi_ssid in ({s})".format(m=mall_id,s=','.join(wifis))
# print(sql)
cur.execute(sql)
conn.commit()
print(mall_id,' done')
cur.close()
conn.close()
if __name__ == '__main__':
malls = u.get_malls()
thread_count = 15
threads = []
for m in [malls[i:i+thread_count] for i in range(0,len(malls),thread_count)]:
t = threading.Thread(target=run,args=(m,1))
t.start()
threads.append(t)
for t in threads:
t.join()
print("all done") |
import jwt
from app import app
def generate_token(payload):
print('generate_token')
token = jwt.encode(
payload,
app.config.get('SECRET_KEY'),
algorithm='HS256'
)
print(f'{token}')
return token |
import numpy as np
def interpolate(points):
time, values = zip(*points)
new_time = np.arange(int(time[0]), int(time[-1]) + 1)
return list(zip(new_time, np.interp(new_time, time, values)))
|
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ ASR_THCHS30.py ]
# Synopsis [ automatic speech recognition on the THCHS30 dataset - tensorflow ]
# Author [ Ting-Wei Liu (Andi611) ]
# Copyright [ Copyleft(c), NTUEE, NTU, Taiwan ]
# Reference [ http://blog.topspeedsnail.com/archives/10696 ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import os
import csv
import pickle
import random
import argparse
import numpy as np
import tensorflow as tf
from collections import Counter
import librosa # >> https://github.com/librosa/librosa
##################
# CONFIGURATIONS #
##################
def get_config():
parser = argparse.ArgumentParser(description='ASR_THCHS30 configuration')
#--mode--#
parser.add_argument('--train', action='store_true', help='run training process')
parser.add_argument('--reprocess', action='store_true', help='process and read all wav files to obtain max len, this may take a while')
#--parameters--#
parser.add_argument('--batch_size', type=int, default=64, help='batch size for training')
parser.add_argument('--n_epoch', type=int, default=100, help='number of training epoch')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate for optimizer')
#--path--#
parser.add_argument('--data_path', type=str, default='./data_thchs30/data/', help='path to the THCHS30 file')
parser.add_argument('--model_dir', type=str, default='./model', help='model storage directory')
config = parser.parse_args()
return config
#################
# GET WAV FILES #
#################
def get_wav_files(path):
wav_files = []
for (dirpath, dirnames, filenames) in os.walk(path):
for filename in filenames:
if filename.endswith('.wav'):
filename_path = os.sep.join([dirpath, filename])
if os.stat(filename_path).st_size < 240000: # ignore small files
continue
wav_files.append(filename_path)
return wav_files
#################
# GET WAV LABEL #
#################
def get_wav_lable(path, wav_files):
labels = []
label_ids = []
#--get id list--#
for wav_file in wav_files:
wav_id = os.path.basename(wav_file).split('.')[0]
label_ids.append(wav_id)
#--get labels that are in id list--#
for (dirpath, dirnames, filenames) in os.walk(path):
for filename in filenames:
if filename.endswith('.trn'):
filename_path = os.sep.join([dirpath, filename])
label_id = os.path.basename(filename_path).split('.')[0]
if label_id in label_ids:
with open(filename_path, 'r') as f:
label_line = f.readline()
if label_line[-1] == '\n': label_line = label_line[:-1]
labels.append(label_line)
assert len(wav_files) == len(labels)
print('Number of training samples: ', len(wav_files)) # >> 11841
return labels
#################
# BUILD MAPPING #
#################
def build_mapping(labels):
all_words = []
for label in labels:
all_words += [word for word in label]
counter = Counter(all_words)
count_pairs = sorted(counter.items(), key=lambda x: -x[1])
words, _ = zip(*count_pairs)
print('>> building vocabulary, vocab size:', len(words)) # >> 2882
word2idx = dict(zip(words, range(len(words))))
word2idx['<unk>'] = len(words)
idx2word = { i : w for w, i in word2idx.items() }
return word2idx, idx2word
####################
# MAP LABEL TO IDX #
####################
def label2idx(word2idx, labels):
to_num = lambda word: word2idx.get(word, len(word2idx))
labels_in_idx = [ list(map(to_num, label)) for label in labels]
# >> [466, 0, 9, 0, 158, 280, 0, 231, 0, 599, 0, 23, 332, 0, 25, 1200, 0, 1, 0, 516, 218, 0, 65, 40, 0, 1, 0, 312, 0, 1323, 0, 272, 9, 0, 466, 0, 70, 0, 810, 274, 0, 748, 1833, 0, 1067, 154, 0, 2111, 85]
label_max_len = np.max([len(label) for label in labels_in_idx])
print('Max length of label: ', label_max_len) # >> 75
return labels_in_idx, label_max_len
#####################
# GET MAX AUDIO LEN #
#####################
def get_max_audio_len(wav_files):
wav_max_len = 0
for wav in wav_files:
wav, sr = librosa.load(wav, mono=True)
mfcc = np.transpose(librosa.feature.mfcc(wav, sr), [1,0])
if len(mfcc) > wav_max_len:
wav_max_len = len(mfcc)
print('Max audio length', wav_max_len) # >> 673
return wav_max_len
#################
# CLASS ASR NET #
#################
class ASR_NET(object):
##################
# INITIALIZATION #
##################
def __init__(self, config, vocab_size, label_max_len, wav_max_len, wav_files, labels_in_idx):
#--parameters--#
self.batch_size = config.batch_size
self.n_epoch = config.n_epoch
self.lr = config.lr
self.model_dir = config.model_dir
#--len--#
self.vocab_size = vocab_size
self.label_max_len = label_max_len
self.wav_max_len = wav_max_len
#--data--#
self.wav_files = wav_files
self.labels = labels_in_idx
#--placeholders--#
self.X = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, None, 20])
self.Y = tf.placeholder(dtype=tf.int32, shape=[self.batch_size, None])
#--model variables--#
self.sequence_len = tf.reduce_sum(tf.cast(tf.not_equal(tf.reduce_sum(self.X, axis=2), 0.), tf.int32), axis=1)
self.n_batch = len(self.wav_files) // self.batch_size
self.best_loss = 777.777
self.batch_pointer = 0
#--build model--#
self.logit = self.build_network()
#--model saver--#
self.variables = [var for var in tf.global_variables() if 'ASR_NET' in var.name]
self.saver = tf.train.Saver(var_list=self.variables, max_to_keep=1)
#################
# BUILD NETWORK #
#################
def build_network(self, n_dim=128, n_blocks=3):
with tf.variable_scope('ASR_NET'):
out = self.conv1d_layer(layer_id=0, input_tensor=self.X, size=1, dim=n_dim, activation='tanh', scale=0.14, bias=False)
# skip connections
def residual_block(input_sensor, size, rate, id_increment):
conv_filter = self.aconv1d_layer(layer_id=1+id_increment, input_tensor=input_sensor, size=size, rate=rate, activation='tanh', scale=0.03, bias=False)
conv_gate = self.aconv1d_layer(layer_id=2+id_increment, input_tensor=input_sensor, size=size, rate=rate, activation='sigmoid', scale=0.03, bias=False)
out = conv_filter * conv_gate
out = self.conv1d_layer(layer_id=3+id_increment, input_tensor=out, size=1, dim=n_dim, activation='tanh', scale=0.08, bias=False)
return out + input_sensor, out
skip = 0
id_increment = 0
for _ in range(n_blocks):
for r in [1, 2, 4, 8, 16]:
out, s = residual_block(out, size=7, rate=r, id_increment=id_increment)
id_increment += 3
skip += s
logit = self.conv1d_layer(layer_id=id_increment+1, input_tensor=skip, size=1, dim=skip.get_shape().as_list()[-1], activation='tanh', scale=0.08, bias=False)
logit = self.conv1d_layer(layer_id=id_increment+2, input_tensor=logit, size=1, dim=self.vocab_size, activation=None, scale=0.04, bias=True)
return logit
#################
# TRAIN NETWORK #
#################
def train(self):
#--CTC loss--#
indices = tf.where(tf.not_equal(tf.cast(self.Y, tf.float32), 0.))
target = tf.SparseTensor(indices=indices, values=tf.gather_nd(self.Y, indices) - 1, dense_shape=tf.cast(tf.shape(self.Y), tf.int64))
loss = tf.nn.ctc_loss(labels=target, inputs=self.logit, sequence_length=self.sequence_len, time_major=False)
#--optimizer--#
optimizer = tf.train.RMSPropOptimizer(learning_rate=self.lr, decay=0.99)
var_list = [t for t in tf.trainable_variables()]
gradient = optimizer.compute_gradients(loss, var_list=var_list)
optimizer_op = optimizer.apply_gradients(gradient)
#--training session--#
with tf.Session() as sess:
#--initialize training--#
sess.run(tf.global_variables_initializer())
best_loss = 777.777
history_loss = []
#--run epoch--#
for epoch in range(self.n_epoch):
#--initialize epoch--#
self.batch_pointer = 0
epoch_loss = []
self.shuffle_data()
#--run batch--#
for batch in range(self.n_batch):
batches_wavs, batches_labels = self.get_batch()
train_loss, _ = sess.run([loss, optimizer_op], feed_dict={self.X: batches_wavs, self.Y: batches_labels})
epoch_loss.append(np.mean(train_loss))
print('Epoch: %i/%i, Batch: %i/%i, Loss: %.5f' % (epoch, self.n_epoch, batch, self.n_batch, epoch_loss[-1]), end='\r')
#--epoch checkpoint--#
history_loss.append(np.mean(epoch_loss))
to_save = self.model_checkpoint_save_best(cur_val=history_loss[-1], mode='min')
print('Epoch: %i/%i, Batch: %i/%i, Loss: %.5f, Saved: %s' % (epoch, self.n_epoch, batch, self.n_batch, history_loss[-1], 'True' if to_save else 'False'))
if to_save == True:
self.saver.save(sess, os.path.join(self.model_dir, 'ASR_THCHS30'), global_step=(epoch+1))
pickle.dump(history_loss, open(os.path.join(self.model_dir, 'history_loss.pkl'), 'wb'), True)
################
# SHUFFLE DATA #
################
def shuffle_data(self):
to_shuffle = list(zip(self.wav_files, self.labels))
random.shuffle(to_shuffle)
self.wav_files, self.labels = zip(*to_shuffle)
#############
# GET BATCH #
#############
def get_batch(self):
batches_wavs = []
batches_labels = []
for i in range(self.batch_size):
wav, sr = librosa.load(self.wav_files[self.batch_pointer], mono=True)
mfcc = np.transpose(librosa.feature.mfcc(wav, sr), [1,0])
batches_wavs.append(mfcc.tolist())
batches_labels.append(self.labels[self.batch_pointer])
self.batch_pointer += 1
for mfcc in batches_wavs:
while len(mfcc) < self.wav_max_len:
mfcc.append([0]*20)
for label in batches_labels:
while len(label) < self.label_max_len:
label.append(0)
return batches_wavs, batches_labels
##############
# _SAVE BEST #
##############
"""
Called by pre_train(), this function checks and saves the best model.
"""
def model_checkpoint_save_best(self, cur_val, mode):
if mode == 'min':
if cur_val < self.best_loss:
self.best_loss = cur_val
return True
else: return False
elif mode == 'max':
if cur_val > self.best_loss:
self.best_loss = cur_val
return True
else: return False
else: raise ValueError('Invalid Mode!')
################
# CONV1D LAYER #
################
def conv1d_layer(self, layer_id, input_tensor, size, dim, activation, scale, bias):
global conv1d_index
with tf.variable_scope('conv1d_' + str(layer_id)):
W = tf.get_variable('W', (size, input_tensor.get_shape().as_list()[-1], dim), dtype=tf.float32, initializer=tf.random_uniform_initializer(minval=-scale, maxval=scale))
if bias:
b = tf.get_variable('b', [dim], dtype=tf.float32, initializer=tf.constant_initializer(0))
out = tf.nn.conv1d(input_tensor, W, stride=1, padding='SAME') + (b if bias else 0)
if not bias:
beta = tf.get_variable('beta', dim, dtype=tf.float32, initializer=tf.constant_initializer(0))
gamma = tf.get_variable('gamma', dim, dtype=tf.float32, initializer=tf.constant_initializer(1))
mean_running = tf.get_variable('mean', dim, dtype=tf.float32, initializer=tf.constant_initializer(0))
variance_running = tf.get_variable('variance', dim, dtype=tf.float32, initializer=tf.constant_initializer(1))
mean, variance = tf.nn.moments(out, axes=list(np.arange(len(out.get_shape()) - 1)))
def update_running_stat():
decay = 0.99
update_op = [mean_running.assign(mean_running * decay + mean * (1 - decay)), variance_running.assign(variance_running * decay + variance * (1 - decay))]
with tf.control_dependencies(update_op):
return tf.identity(mean), tf.identity(variance)
m, v = tf.cond(tf.Variable(False, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES]), update_running_stat, lambda: (mean_running, variance_running))
out = tf.nn.batch_normalization(out, m, v, beta, gamma, 1e-8)
if activation == 'tanh':
out = tf.nn.tanh(out)
if activation == 'sigmoid':
out = tf.nn.sigmoid(out)
return out
#################
# ACONV1D LAYER #
#################
def aconv1d_layer(self, layer_id, input_tensor, size, rate, activation, scale, bias):
with tf.variable_scope('aconv1d_' + str(layer_id)):
shape = input_tensor.get_shape().as_list()
W = tf.get_variable('W', (1, size, shape[-1], shape[-1]), dtype=tf.float32, initializer=tf.random_uniform_initializer(minval=-scale, maxval=scale))
if bias:
b = tf.get_variable('b', [shape[-1]], dtype=tf.float32, initializer=tf.constant_initializer(0))
out = tf.nn.atrous_conv2d(tf.expand_dims(input_tensor, dim=1), W, rate=rate, padding='SAME')
out = tf.squeeze(out, [1])
if not bias:
beta = tf.get_variable('beta', shape[-1], dtype=tf.float32, initializer=tf.constant_initializer(0))
gamma = tf.get_variable('gamma', shape[-1], dtype=tf.float32, initializer=tf.constant_initializer(1))
mean_running = tf.get_variable('mean', shape[-1], dtype=tf.float32, initializer=tf.constant_initializer(0))
variance_running = tf.get_variable('variance', shape[-1], dtype=tf.float32, initializer=tf.constant_initializer(1))
mean, variance = tf.nn.moments(out, axes=list(np.arange(len(out.get_shape()) - 1)))
def update_running_stat():
decay = 0.99
update_op = [mean_running.assign(mean_running * decay + mean * (1 - decay)), variance_running.assign(variance_running * decay + variance * (1 - decay))]
with tf.control_dependencies(update_op):
return tf.identity(mean), tf.identity(variance)
m, v = tf.cond(tf.Variable(False, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES]), update_running_stat, lambda: (mean_running, variance_running))
out = tf.nn.batch_normalization(out, m, v, beta, gamma, 1e-8)
if activation == 'tanh':
out = tf.nn.tanh(out)
if activation == 'sigmoid':
out = tf.nn.sigmoid(out)
return out
"""
def speech_to_text(wav_file):
wav, sr = librosa.load(wav_file, mono=True)
mfcc = np.transpose(np.expand_dims(librosa.feature.mfcc(wav, sr), axis=0), [0,2,1])
logit = speech_to_text_network()
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
decoded = tf.transpose(logit, perm=[1, 0, 2])
decoded, _ = tf.nn.ctc_beam_search_decoder(decoded, sequence_len, merge_repeated=False)
predict = tf.sparse_to_dense(decoded[0].indices, decoded[0].shape, decoded[0].values) + 1
output = sess.run(decoded, feed_dict={X: mfcc})
#print(output)
"""
########
# MAIN #
########
"""
main function
"""
def main():
config = get_config()
if config.train:
wav_files = get_wav_files(path=config.data_path)
labels = get_wav_lable(path=config.data_path, wav_files=wav_files)
word2idx, idx2word = build_mapping(labels)
labels_in_idx, label_max_len = label2idx(word2idx, labels)
vocab_size = len(word2idx)
if config.reprocess:
wav_max_len = get_max_audio_len(wav_files)
else:
wav_max_len = 703
ASR = ASR_NET(config, vocab_size, label_max_len, wav_max_len, wav_files, labels_in_idx)
ASR.train()
if __name__ == '__main__':
main()
|
'''
Created on 20. mar. 2017
@author: tsy
'''
import os
import xml.etree.ElementTree as ET
def charsToDict(filename,name):
'''filename without extension'''
cwd = os.getcwd()
os.chdir(os.path.join(os.path.dirname(__file__), 'The-9th-Age'))
tree = ET.parse(filename+'.cat')
root = tree.getroot()
def _childCharsToDict_(child=None):
assert child is not None
returnDict = {'M':None,'WS':None,'BS':None,'S':None,'T':None,'W':None,'I':None,'A':None,'LD':None,'ArmourSave':7,'WardSave':7,}
for child2 in child.iter('{http://www.battlescribe.net/schema/catalogueSchema}characteristic'):
for char in returnDict:
if child2.attrib['name']== char:
try:
returnDict[char] = int(child2.attrib['value'])
except KeyError:
print('%s can not be found for %s'%(char,child.attrib['name']))
except ValueError:
#either '-' or '5+'
try:
returnDict[char] = int(child2.attrib['value'][0])
except ValueError:
returnDict[char] = None
print('%s will be left as None for %s'%(char,child.attrib['name']))
except IndexError:
pass
print('\n')
return returnDict
for child in root.iter('{http://www.battlescribe.net/schema/catalogueSchema}selectionEntry'):
if child.attrib['name']==name:
if child.attrib['type']=='model':
returnDict = _childCharsToDict_(child)
elif child.attrib['type']=='unit':
returnDict = _childCharsToDict_(child)
os.chdir(cwd)
return returnDict
def rulesToList(filename,name):
'filename without extension'
cwd = os.getcwd()
os.chdir(os.path.join(os.path.dirname(__file__), 'The-9th-Age'))
tree = ET.parse(filename+'.cat')
root = tree.getroot()
def _childRulesToList_(child=None):
rules = []
assert child is not None
for child2 in child.iter('{http://www.battlescribe.net/schema/catalogueSchema}infoLink'):
if 'targetId' in child2.attrib:
rules.append(child2.attrib['targetId'])
return rules
for child in root.iter('{http://www.battlescribe.net/schema/catalogueSchema}selectionEntry'):
if child.attrib['name']==name:
if child.attrib['type']=='model':
ruleList = _childRulesToList_(child)
elif child.attrib['type']=='unit':
ruleList = _childRulesToList_(child)
os.chdir(cwd)
return ruleList
def rulesInterpreter(filename=None,ruleList=None):
#Send a list of rule ID's. and this one returns the corresponding rule text
assert ruleList is not None
assert filename is not None
cwd = os.getcwd()
os.chdir(os.path.join(os.path.dirname(__file__), 'The-9th-Age'))
tree = ET.parse(filename+'.cat')
root = tree.getroot()
ruleDict = {}
#SEARCHING RULES
for child in root.iter('{http://www.battlescribe.net/schema/catalogueSchema}rule'):
if child.attrib['id'] in ruleList:
id = child.attrib['id']
name = child.attrib['name']
for child2 in child.iter('{http://www.battlescribe.net/schema/catalogueSchema}description'):
text = child2.text
for child2 in child.iter('{http://www.battlescribe.net/schema/catalogueSchema}modifiers'):
modifier = child2.text
ruleDict[name] = [text,id,modifier]
#SEARCHING PROFILES
for child in root.iter('{http://www.battlescribe.net/schema/catalogueSchema}profile'):
if child.attrib['id'] in ruleList:
id = child.attrib['id']
name = child.attrib['name']
for child2 in child.iter('{http://www.battlescribe.net/schema/catalogueSchema}description'):
text = child2.text
for child2 in child.iter('{http://www.battlescribe.net/schema/catalogueSchema}modifiers'):
modifier = child2.text
ruleDict[name] = [text,id,modifier]
os.chdir(cwd)
return ruleDict
|
#coding:utf-8
import os, os.path
from flask import flash, url_for, redirect, render_template, abort,\
request, current_app
from flask.ext.login import login_required, current_user
from . import home
from .home_form import FileUploadForm, AboutMeForm
from ..models import User, Role, Permission, db, Article, Follow
from ..decorators import permission_required
from ..functions import random_str
@home.route('/<id>')
def homepage(id):
user = User.query.filter_by(id=id).first()
if user is None:
abort(404)
page = request.args.get('page', 1, type=int)
pagination = user.article.order_by(Article.publish_time.desc()).paginate(
page, per_page=current_app.config['BLOG_ISLAND_ARTICLES_PER_PAGE'],
error_out=False)
articles = pagination.items
return render_template('home/homepage.html', user=user, articles=articles,
pagination=pagination)
@home.route('/upload_picture',methods=['GET','POST'])
@login_required
@permission_required(Role.User)
def upload_picture():
form = FileUploadForm()
if form.validate_on_submit():
picture_path = os.path.join(current_app.root_path, 'static/picture/')
static_path = current_app.static_folder
format = ['png','jpg','jpeg','gif']
file = request.files['file'] #key 'file' is defined in FileUploadForm
if file and '.' in file.filename and file.filename.rsplit('.', 1)[1] in format:
newfilename = random_str(32) + '.' + file.filename.rsplit('.', 1)[1]
while os.path.exists(picture_path + newfilename):
newfilename = random_str(32) + '.' + file.filename.rsplit('.', 1)[1]
file.save(picture_path + newfilename)
if os.path.exists(static_path + str(current_user.picture_url)):
os.remove(static_path + current_user.picture_url)
current_user.picture_url = 'picture/' + newfilename
db.session.add(current_user)
flash(u'上传照片成功')
return redirect(url_for('home.homepage',id=current_user.id))
else:
flash(u'上传照片失败,请检查图片路径是否正确或图片格式是否是png,jpg,jpeg,gif其中之一')
return redirect(url_for('home.upload_picture'))
return render_template('home/upload_picture.html',user=current_user,form=form)
@home.route('/disable_picture/<id>',methods=['GET'])
@login_required
@permission_required(Role.Manager)
def disable_picture(id):
user = User.query.get(int(id))
if user is None:
flash(u'不存在的用户')
return redirect(url_for('home.homepage',id=current_user.id))
if current_user.id == user.id:
flash(u'只能禁用其他用户的头像')
return redirect(url_for('home.homepage',id=current_user.id))
if user.verify_permission():
flash(u'不能禁用管理员的头像')
return redirect(url_for('home.homepage',id=user.id))
if not user.picture_disabled:
user.picture_disabled = True
db.session.add(user)
return redirect(url_for('home.homepage',id=user.id))
@home.route('/able_picture/<id>',methods=['GET'])
@login_required
@permission_required(Role.Manager)
def able_picture(id):
user = User.query.get(int(id))
if user is None:
flash(u'不存在的用户')
return redirect(url_for('home.homepage',id=current_user.id))
if current_user.id == user.id:
flash(u'只能启用其他用户的头像')
return redirect(url_for('home.homepage',id=current_user.id))
if user.picture_disabled:
user.picture_disabled = False
db.session.add(user)
return redirect(url_for('home.homepage',id=user.id))
@home.route('/edit_about_me',methods=['GET','POST'])
@login_required
@permission_required(Role.User)
def edit_about_me():
form = AboutMeForm()
if form.validate_on_submit():
current_user.about_me = form.about_me.data
return redirect(url_for('home.homepage',id=current_user.id))
form.about_me.data = current_user.about_me
return render_template('home/edit_about_me.html',user=current_user,form=form)
@home.route('/disable_about_me/<id>',methods=['GET'])
@login_required
@permission_required(Role.Manager)
def disable_about_me(id):
user = User.query.get(int(id))
if user is None:
flash(u'不存在的用户')
return redirect(url_for('home.homepage',id=current_user.id))
if current_user.id == user.id:
flash(u'只能禁用其他用户的个人简介')
return redirect(url_for('home.homepage',id=current_user.id))
if user.verify_permission():
flash(u'不能禁用管理员的个人简介')
return redirect(url_for('home.homepage',id=user.id))
if not user.about_me_disabled:
user.about_me_disabled = True
db.session.add(user)
return redirect(url_for('home.homepage',id=user.id))
@home.route('/able_about_me/<id>',methods=['GET'])
@login_required
@permission_required(Role.Manager)
def able_about_me(id):
user = User.query.get(int(id))
if user is None:
flash(u'不存在的用户')
return redirect(url_for('home.homepage',id=current_user.id))
if current_user.id == user.id:
flash(u'只能启用其他用户的个人简介')
return redirect(url_for('home.homepage',id=current_user.id))
if user.about_me_disabled:
user.about_me_disabled = False
db.session.add(user)
return redirect(url_for('home.homepage',id=user.id))
@home.route('/follow/<int:id>',methods=['GET'])
@login_required
@permission_required(Permission.FOLLOW)
def follow(id):
user = User.query.get_or_404(int(id))
fans = Follow.query.filter_by(star_id=user.id).\
filter_by(fans_id=current_user.id).first()
if fans is None:
fans = Follow(star_id=user.id,fans_id=current_user.id)
db.session.add(fans)
flash(u'关注成功')
else:
flash(u'不能重复关注')
return redirect(url_for('home.homepage',id=user.id))
@home.route('/unfollow/<int:id>',methods=['GET'])
@login_required
@permission_required(Permission.FOLLOW)
def unfollow(id):
user = User.query.get_or_404(int(id))
fans = Follow.query.filter_by(star_id=user.id).\
filter_by(fans_id=current_user.id).first()
if fans is not None:
db.session.delete(fans)
flash(u'取消关注成功')
else:
flash(u'不能对未关注的用户取消关注')
return redirect(url_for('home.homepage',id=user.id))
@home.route('/<int:id>/stars',methods=['GET'])
def show_stars(id):
user = User.query.get_or_404(int(id))
stars = []
for star_relationship in user.star_relation.all():
star = star_relationship.star
if user.id != star.id:
stars.append(star)
if not stars:
flash(u'该用户未关注其他用户')
return redirect(url_for('home.homepage',id=user.id))
return render_template('home/follow.html',head=user.username + u'关注的人',follows=stars)
@home.route('/<int:id>/fans',methods=['GET'])
def show_fans(id):
user = User.query.get_or_404(int(id))
fans = []
for fan_relationship in user.fans_relation.all():
fan = fan_relationship.fans
if user.id != fan.id:
fans.append(fan)
if fans == []:
flash(u'该用户还没有被关注')
return redirect(url_for('home.homepage',id=user.id))
return render_template('home/follow.html',head=user.username + u'的粉丝',follows=fans)
|
"""
author songjie
"""
from app.spider.get_book_data import GetBookData
from sqlalchemy import Column, String, Integer, ForeignKey, Boolean, desc
from sqlalchemy.orm import relationship
from app.models.base import Base
class Wish(Base):
__tablename__ = 'wish'
id = Column(Integer, primary_key=True)
uid = Column(Integer, ForeignKey('user.id'), nullable=False)
user = relationship('User')
isbn = Column(String(13))
launched = Column(Boolean, default=False)
@property
def book(self):
get_book_data = GetBookData()
get_book_data.search_by_isbn(self.isbn)
return get_book_data.first
@classmethod
def get_user_wishes(cls, uid):
wishes = Wish.query.filter_by(
uid=uid, launched=False).order_by(
desc(Wish.create_time)).all()
return wishes
|
# Copyright (C) 2021 FireEye, Inc. All Rights Reserved.
import speakeasy.winenv.defs.windows.netapi32 as netapi32defs
from .. import api
class NetUtils(api.ApiHandler):
name = 'netutils'
apihook = api.ApiHandler.apihook
impdata = api.ApiHandler.impdata
def __init__(self, emu):
super(NetUtils, self).__init__(emu)
super(NetUtils, self).__get_hook_attrs__(self)
@apihook('NetApiBufferFree', argc=1)
def NetApiBufferFree(self, emu, argv, ctx={}):
"""
NET_API_STATUS NET_API_FUNCTION NetApiBufferFree(
_Frees_ptr_opt_ LPVOID Buffer
);
"""
return netapi32defs.NERR_Success
|
import tweepy
import praw
from flask import Flask, request, redirect
from flask import render_template
from flask_pymongo import PyMongo
import apikeys
app = Flask(__name__)
#Connect to Mongodb using connection string
app.config['MONGO_DBNAME'] = 'dbname'
app.config['MONGO_URI'] = "mongodb+srv://username:password@cluster0.rj36d.mongodb.net/dbname?retryWrites=true&w=majority"
mongo = PyMongo(app)
#OAuth Authentication for twitter
auth = tweepy.OAuthHandler(apikeys.twitter.API_KEY, apikeys.twitter.API_secret_key)
auth.set_access_token(apikeys.twitter.Access_Token, apikeys.twitter.Access_TokenSecret)
api = tweepy.API(auth)
#OAuth Authentication for Reddit
reddit = praw.Reddit(client_id = apikeys.reddit.client_id,
client_secret = apikeys.reddit.client_secret,
user_agent = apikeys.reddit.user_agent,
redirect_uri = apikeys.reddit.redirect_uri,
refresh_token = apikeys.reddit.refresh_token)
#app_route to index.html
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
#app_route to twitter.html
@app.route('/postTweet', methods=['GET','POST'])
def postTweet():
if request.method == "POST":
message = request.form["tweet"]
status = api.update_status(message)
collection = mongo.db.twitter
collection.insert_one({'user':status.user.screen_name,
'id':status.id,
'text':status.text,
'retweet_count':status.retweet_count,
'favorite_count':status.favorite_count,
'lang':status.lang,
'source':status.source })
return redirect('/')
return render_template('twitter.html')
#app_route to reddit.html
@app.route('/postReddit', methods=['GET','POST'])
def postReddit():
if request.method == "POST":
title = request.form['title']
message = request.form['message']
sub = reddit.subreddit("subreddit")
post = sub.submit(title, message)
collection = mongo.db.reddit
collection.insert_one({"id": post.id,
"author_name": post.author.name,
"title": post.title,
"text": post.selftext})
return redirect('/')
return render_template('reddit.html')
#main function to run flask app.py
if __name__ == '__main__':
app.run() |
REDIS_KEY = 'visited_links'
|
# -*- python -*-
from flask import Flask, render_template, redirect, request, session
import random
from datetime import datetime
app = Flask( __name__ )
app.secret_key = "NinjaGoldSecretKey"
@app.route( "/" )
def index():
if "your_gold" not in session:
session["your_gold"] = 0
session["activity"] = ""
return( render_template( "index.html" ) )
@app.route( "/process_money", methods=["POST"] )
def process_money():
loc = request.form["loc"]
c_date = datetime.now()
if loc == "farm":
new_gold = random.randint( 10, 20 )
session["activity"] += "Earned {} golds from the farm! ({})\n".format( new_gold, c_date )
session["your_gold"] += new_gold
elif loc == "cave":
new_gold = random.randint( 5, 10 )
session["activity"] += "Earned {} golds from the cave! ({})\n".format( new_gold, c_date )
session["your_gold"] += new_gold
elif loc == "house":
new_gold = random.randint( 2, 5 )
session["activity"] += "Earned {} golds from the house! ({})\n".format( new_gold, c_date )
session["your_gold"] += new_gold
elif loc == "casino":
new_gold = random.randint( -50, 50 )
if new_gold >= 0:
session["activity"] += "Entered a casino and won {} golds... Cool.. ({})\n".format( new_gold, c_date )
else:
session["activity"] += "Entered a casino and lost {} golds... Ouch.. ({})\n".format( abs( new_gold ), c_date )
session["your_gold"] += new_gold
return( redirect( "/" ) )
@app.route( "/reset", methods=["POST"] )
def reset():
session.pop( "your_gold" )
return( redirect( "/" ) )
app.run( debug=True )
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
# from __future__ import annotations
from textwrap import dedent
from typing import Any, Mapping
import pytest
from pants.backend.docker.target_types import DockerImageSourceField, DockerImageTarget
from pants.backend.docker.util_rules.dockerfile import rules as dockerfile_rules
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.core.util_rules.source_files import rules as source_files_rules
from pants.engine.addresses import Address
from pants.engine.fs import DigestContents, FileContent
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.rules import rule
from pants.engine.target import (
GeneratedTargets,
GenerateTargetsRequest,
SourcesField,
TargetGenerator,
)
from pants.engine.unions import UnionMembership, UnionRule
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def rule_runner(request) -> RuleRunner:
rule_runner_args: Mapping[str, Any] = dict(
rules=[
*dockerfile_rules(),
*source_files_rules(),
QueryRule(SourceFiles, [SourceFilesRequest]),
],
target_types=[DockerImageTarget],
)
if hasattr(request, "param") and callable(request.param):
request.param(rule_runner_args)
return RuleRunner(**rule_runner_args)
DOCKERFILE = dedent(
"""\
FROM python:3.9
ENTRYPOINT python3
"""
)
def assert_dockerfile(
rule_runner: RuleRunner,
addr: Address = Address("test"),
*,
filename: str = "test/Dockerfile",
content: str = DOCKERFILE,
) -> None:
tgt = rule_runner.get_target(addr)
result = rule_runner.request(
SourceFiles,
[
SourceFilesRequest(
sources_fields=[tgt.get(SourcesField)],
for_sources_types=(DockerImageSourceField,),
enable_codegen=True,
)
],
)
if filename:
assert result.snapshot.files == (filename,)
if content:
digest_contents = rule_runner.request(DigestContents, [result.snapshot.digest])
assert len(digest_contents) == 1
assert isinstance(digest_contents[0], FileContent)
assert digest_contents[0].content.decode() == content
def test_hydrate_dockerfile(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"test/BUILD": "docker_image()",
"test/Dockerfile": DOCKERFILE,
}
)
assert_dockerfile(rule_runner)
def test_generate_dockerfile(rule_runner: RuleRunner) -> None:
instructions = DOCKERFILE.strip().split("\n")
rule_runner.write_files(
{
"test/BUILD": dedent(
f"""\
docker_image(
instructions={instructions!r},
)
"""
),
}
)
assert_dockerfile(rule_runner, filename="test/Dockerfile.test")
def setup_target_generator(rule_runner_args: dict) -> None:
class GenerateOriginTarget(TargetGenerator):
alias = "docker_image_generator"
generated_target_cls = DockerImageTarget
core_fields = ()
copied_fields = ()
moved_fields = ()
class GenerateDockerImageTargetFromOrigin(GenerateTargetsRequest):
generate_from = GenerateOriginTarget
@rule
async def generate_docker_image_rule(
request: GenerateDockerImageTargetFromOrigin, union_membership: UnionMembership
) -> GeneratedTargets:
return GeneratedTargets(
request.generator,
[
DockerImageTarget(
{
"instructions": DOCKERFILE.strip().split("\n"),
},
request.template_address.create_generated("generated-image"),
union_membership,
)
],
)
rule_runner_args["rules"].extend(
[
generate_docker_image_rule,
UnionRule(GenerateTargetsRequest, GenerateDockerImageTargetFromOrigin),
]
)
rule_runner_args["target_types"].append(GenerateOriginTarget)
@pytest.mark.parametrize("rule_runner", [setup_target_generator], indirect=True)
def test_generate_dockerfile_for_generated_target(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"test/BUILD": "docker_image_generator()",
}
)
assert_dockerfile(
rule_runner,
Address("test", generated_name="generated-image"),
filename="test/Dockerfile.test.generated-image",
)
def test_missing_dockerfile_is_error(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"test/BUILD": "docker_image()"})
with pytest.raises(ExecutionError, match=r"The `docker_image` test:test does not specify any"):
assert_dockerfile(rule_runner, filename="", content="")
def test_multiple_dockerfiles_is_error(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"test/BUILD": "docker_image(instructions=['FROM base'])",
"test/Dockerfile": "FROM base",
}
)
with pytest.raises(ExecutionError, match=r"The `docker_image` test:test provides both"):
assert_dockerfile(rule_runner, filename="", content="")
|
# Generated by Django 2.1.7 on 2019-02-20 07:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20190219_1320'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='company',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='profile',
name='designation',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='profile',
name='photo',
field=models.FileField(upload_to='profiles'),
),
]
|
#!/usr/bin/env python
"""
title: Export Downloader -python
description: Downloads exort data from Bazaarvoice export APIs in bulk
"""
import argparse
import os
import time
import hmac
import hashlib
import requests
import json
# Create hmac signature
def createSignature(passkey, secretKey, timestamp, path):
# If 'path' parameter will be in the GET request, we must include it as part of the HMAC signature.
if path:
message = "path={0}&passkey={1}×tamp={2}".format(path, passkey, timestamp)
else:
message = "passkey={0}×tamp={1}".format(passkey, timestamp)
return hmac.new(secretKey, message, hashlib.sha256).hexdigest()
def buildBVHeaders(passkey, signature, timestamp):
return {'X-Bazaarvoice-Passkey': passkey, 'X-Bazaarvoice-Signature': signature, 'X-Bazaarvoice-Timestamp': timestamp}
def doHttpGet(url, passkey, secretKey, path):
timestamp = str(round(time.time() * 1000))
# Get current manifests
signature = createSignature(passkey=passkey, secretKey=secretKey, timestamp=timestamp, path=path)
headers = buildBVHeaders(passkey, signature, timestamp)
params = { 'path' : path } if path else {}
resp = requests.get(url, params=params, headers=headers, timeout=60, allow_redirects=True, stream=True)
return resp
def getManifestForDate(manifests, version, date, dataType):
for manifest_record in manifests:
if manifest_record['version'] == version and dataType in manifest_record:
for item in manifest_record[dataType]:
if item['date'] == date:
return item['path']
return None
def saveFile(dest, path, content):
filename_ = dest + path
print "Saving as " + filename_
dirname_ = os.path.dirname(filename_)
if not os.path.exists(dirname_):
os.makedirs(dirname_)
with open(filename_, "wb") as file_:
file_.write(content)
def getFiles(manifests, version, date, category, destination, dataType):
manifest_path = getManifestForDate(manifests, version, date, dataType)
if not manifest_path:
print "Warning: Did not find \"" + date + "\" for version=\"" + version + "\" type=\"" + dataType + "\" in downloaded manifests"
return False
# We have the manifest file path for the requested date and version. Download it and process.
print "Fetching " + manifest_path + "..."
resp = doHttpGet(url, passkey, secretKey, manifest_path)
if resp.status_code != requests.codes.ok:
print "Error: could not download " + manifest_path + " (" + str(resp.status_code) + ")"
exit(resp.content)
file_type_map = json.loads(resp.content)
if category == "all":
print "Downloading all categories..."
else:
print "Downloading category \"" + category + "\"..."
# Iterate through all category types, processing the category we want
for file_type in file_type_map.keys():
if file_type == category or category == "all":
for file_object in file_type_map[file_type]:
path = file_object['path']
print "Fetching " + path + " ..."
resp = doHttpGet(url, passkey, secretKey, path)
if resp.status_code != requests.codes.ok:
print "Error: could not download " + path + " (" + str(resp.status_code) + ")"
exit(resp.content)
saveFile(destination, path, resp.content)
return True
# Main part
if __name__ == '__main__':
# Setting script parameters and variables
p = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
p.add_argument('--config', dest='configFile', help='path to configuration (default is ../config.json)')
p.add_argument('--env', dest='environment', required=True, help='environment of export service (must be present in config file)')
p.add_argument('--date', dest='date', required=True, help='date of files to download, in YYYY-mm-dd format')
p.add_argument('--dest', dest='destination', required=True, help='destination folder to store downloaded data')
p.add_argument('--type', dest='category', help='type of files, like reviews, questions... (defaults to all types)')
p.add_argument('--v', dest='version', help='version of data to retrieve (defaults to v2)')
p.add_argument('--fulls', dest='fulls', action='store_true', help='Retrieve fulls')
p.add_argument('--incrementals', dest='incrementals', action='store_true', help='Retrieve incrementals')
opts = p.parse_args()
# Determine operation mode or print help
if not opts.environment or not opts.date:
p.print_help()
exit(1)
configFile = opts.configFile if opts.configFile else "../config.json"
version = opts.version if opts.version else "v2"
date = opts.date
category = opts.category if opts.category else "all"
destination = opts.destination.rstrip('\\') if opts.destination else "./output"
fulls = opts.fulls
incrementals = opts.incrementals
if not fulls and not incrementals:
exit("Must specify one or both of [--fulls, --incrementals]")
if not os.path.isfile(configFile):
exit("Config file \"" + configFile + "\" does not exist")
else:
with open(configFile) as key_file:
config = json.load(key_file)
environment = config[opts.environment] if opts.environment in config else None
if not environment:
print "Error: environment " + opts.environment + " not present in " + configFile
exit(1)
passkey = str(environment['passkey']).strip('"')
secretKey = str(environment['secret']).strip('"')
url = str(environment['url']).strip('"')
print "Fetching manifests..."
resp = doHttpGet(url, passkey, secretKey, None)
if resp.status_code != requests.codes.ok:
print "Error: could not download manifests (" + str(resp.status_code) + ")"
exit(resp.content)
manifest_json = json.loads(resp.content)
manifests = manifest_json['manifests']
if fulls:
getFiles(manifests, version, date, category, destination, 'fulls')
if incrementals:
getFiles(manifests, version, date, category, destination, 'incrementals')
exit(0)
|
# Fibonacci series: 斐波纳契数列
# 两个元素的总和确定了下一个数
#斐波纳契数列
a, b = 0, 1
while b < 10:
print(b)
a, b = b, a+b
print("*****************")
a, b = 0, 1
while b < 10:
print(b, end=',')
a, b = b, a+b |
# Write a Python program to print each character of a string on single line.
my_string = input("Type a string: ")
for c in my_string:
print(c, end =" ")
|
from PIL import Image
import pytesseract
import cv2
import os
import numpy as np
import sys
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import webbrowser
from googleapiclient.discovery import build
from slackclient import SlackClient
#google keys
google_api_key = os.environ["HQ_GOOGLE_API_KEY"]
google_cse_id = os.environ["HQ_GOOGLE_CSE_ID"]
#slack api key
slack_token = os.environ["HQ_SLACK_TOKEN"]
sc = SlackClient(slack_token)
#performs the google search
def google_search(search_term, api_key, cse_id, **kwargs):
service = build("customsearch", "v1", developerKey=api_key)
res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()
return res['items']
#posts text to hqtrivia channel in slack
def slack_message(text):
sc.api_call(
"chat.postMessage",
channel="#hqtrivia",
text=text
)
class ImageParser(PatternMatchingEventHandler):
def process(self, event):
#if the new file has the word screenshot in it perform the text analysis
if (event.src_path.find("screenshot") != -1):
#crop the image to remove the top and bottom of the screen to only parse out the question and answer
#people with iphone x's might need to modify this
image = cv2.imread(event.src_path)
height = np.size(image, 0)
width = np.size(image, 1)
crop = image[int(height*0.15):int(height - height*0.2), 0:width]
#performs a binary threshold(helps when the answers are grayed out when you're out of the game
gray = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY)
gray = cv2.threshold(gray, 200, 255,
cv2.THRESH_BINARY)[1]
if("-save" not in sys.argv[1:]):
os.remove(event.src_path)
#creates a new file
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, gray)
#this is where the OCR happens
text = pytesseract.image_to_string(Image.open(filename))
os.remove(filename)
#simple split to split questions and answers, assumes the last 3 elements are answers
split = text.split("\n\n")
if len(split) >= 4:
#creates the questions from the first (length - 3) elements, probably a better way to do this but whatever
question = " ".join(split[:len(split) - 3]).replace("\n", " ")
#gets the answers from last 3 elements
answers = split[len(split) - 3:]
self.score_answers(question, answers)
def score_answers(self, question, answers):
print(question)
#UNCOMMENT TO OPEN UP TABS
# for answer in answers:
# a_url = "https://en.wikipedia.org/wiki/{}".format(answer.replace("&", ""))
# webbrowser.open_new(a_url)
# url = "https://www.google.com.tr/search?q={}".format(question.replace("&", ""))
# webbrowser.open_new(url)
results = google_search(question, google_api_key, google_cse_id, num=9)
#just counts the number of times the answers appears in the results
answer_results = [{'count': 0, 'alpha_key': 'A'}, {'count': 0, 'alpha_key': 'B'}, {'count': 0, 'alpha_key': 'C'}]
for index, val in enumerate(answers):
answer_results[index]['count'] = str(results).count(answers[index])
result_sum = sum(answer_result['count'] for answer_result in answer_results)
for index, answer_result in enumerate(answer_results):
if result_sum == 0:
result_sum = 1
percentage = answer_result['count']/result_sum * 100
text = answer_result['alpha_key'] + ":'" + answers[index].lstrip() + "'(" + str(int(percentage)) + "%)"
answer_result['text'] = text
answer_result['percentage'] = percentage
for ar in answer_results:
print(ar['text'])
if("-slack" in sys.argv[1:]):
slack_message(ar['text'])
def on_created(self, event):
self.process(event)
if __name__ == '__main__':
observer = Observer()
observer.schedule(ImageParser(), path='.')
observer.start()
while 1:
input("\nPress anything to continue\n")
os.system("idevicescreenshot")
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
# -*-coding:Utf-8 -*
import re
chaine = ""
exp = r"^0[0-9]([ .-]?[0-9]{2}){4}$"
while re.search(exp, chaine) is None:
raw_input("Numero") |
from abc import ABC, abstractmethod
from typing import List, Tuple
import torch
from torch import nn
from torch.distributions import MultivariateNormal, Normal
from torchdiffeq import odeint
class MLP(nn.Module):
def __init__(self, dim_in: int, hidden_sizes: List[int], dim_out: int, activation: str, last_activation=None):
super().__init__()
# hidden layers
sizes = [dim_in] + hidden_sizes
layers = []
for i in range(len(sizes) - 1):
layers += [nn.Linear(sizes[i], sizes[i + 1])]
if activation == 'relu':
layers += [nn.ReLU()]
elif activation == 'tanh':
layers += [nn.Tanh()]
else:
raise ValueError(f'activation {activation} not valid')
# last layer
layers += [nn.Linear(sizes[-1], dim_out)]
if last_activation == 'relu':
layers += [nn.ReLU()]
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
class Model(nn.Module, ABC):
def __init__(self, dim_r: int, dim_z_prime: int, dim_l: int, hidden_sizes_ode_net: List[int], t0: float,
device: torch.device):
super().__init__()
self.linear_r_to_h = nn.Linear(dim_r, dim_r) # context representation r to h
self.linear_mu_z = nn.Linear(dim_r, dim_l + dim_z_prime)
self.linear_sigma_z = nn.Linear(dim_r, dim_l + dim_z_prime)
self.ode_mlp = MLP(dim_l + dim_z_prime + 1, hidden_sizes_ode_net, dim_l, activation='tanh')
self.dim_l = dim_l
self.dim_z_prime = dim_z_prime
# t0 is minimum possible value of t (and depends on the dataset)
self.t0 = t0
self.device = device
@abstractmethod
def encoder(self, t, y) -> torch.Tensor:
"""
Parameters
----------
t: (N, T, 1)
y: (N, T, dim_y)
Returns
-------
Tensor r of shape (N, T, dim_r), the encoded representation of the context/target set (t, y)
"""
pass
@abstractmethod
def decoder(self, t, latent_states, z_prime) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Parameters
----------
t: (N, T, 1)
latent_states: (N, T, dim_l)
z_prime: (N, T, dim_z_prime)
Returns
-------
Tensors mu and sigma of shape (N, T, dim_y), such that p(y_t|g(l_t,t)) = N(mu, diag(sigma))
"""
pass
def q(self, t: torch.Tensor, y: torch.Tensor):
# t (N, T, 1) and y (N, T, dim_y), can be context set C or target set T
# encode and aggregate
r = self.encoder(t, y) # (N, T, dim_r)
r = r.mean(dim=1) # (N, dim_r)
# context representation r to parameters of q(z|C) : mu_z(r), sigma_z(r)
h = self.linear_r_to_h(r) # (N, dim_r)
mu_z = self.linear_mu_z(h) # (N, dim_z)
assert not mu_z.isnan().any()
sigma_z = self.linear_sigma_z(h) # (N, dim_z)
sigma_z = 0.1 + 0.9 * torch.sigmoid(sigma_z)
return Normal(mu_z, sigma_z)
def p(self, t: torch.Tensor, latent_states: torch.Tensor, z_prime: torch.Tensor):
# t (N, T, 1), latent_states (N, T, dim_l)
mu_y, sigma_y = self.decoder(t, latent_states, z_prime) # (N, T, dim_y)
sigma_y = 0.1 + 0.9 * torch.nn.functional.softplus(sigma_y)
return Normal(mu_y, sigma_y)
def ode_func(self, t, v):
N, _ = v.shape # (N, dim_l + dim_z_prime)
t = t.view(1, 1).repeat(N, 1).to(self.device) # t is already a tensor, make it of shape (N, 1)
dl = self.ode_mlp(torch.cat([v, t], dim=1)) # (N, dim_l)
dz_prime = torch.zeros(N, self.dim_z_prime,
device=self.device) # no variations in z_prime, our constant encoded context
assert not dl.isnan().any()
return torch.cat([dl, dz_prime], dim=1) # return dv
def forward(self, t_context: torch.Tensor, y_context: torch.Tensor, t_target: torch.Tensor,
y_target: torch.Tensor = None, z: torch.Tensor = None):
"""
Parameters
----------
t_context: (batch_size, num_context_points, 1)
y_context: (batch_size, num_context_points, dim_y)
t_target: (batch_size, num_target_points, 1)
y_target: (batch_size, num_target_points, dim_y)
Optional and only specified during training, in this case z is sampled from q(z|T)
z: (batch_size, dim_z)
Optional, if specified do not sample from q(z|C) or q(z|T) but use this z
Returns
-------
p_y, q_z_T, q_z_C
with q_z_T=None in the case y_target=None, and p_y the predictions at points t_target
"""
batch_size = t_context.shape[0]
t0 = torch.tensor(self.t0, device=self.device).view(1, 1, 1).repeat(batch_size, 1, 1)
# encode target/context sets and sample context
q_z_C = self.q(t_context, y_context)
q_z_T = None
if z is None:
if y_target is None:
# during testing, we don't have access to the target set, and we sample from the context set
z = q_z_C.rsample()
else:
# during training, we need q_z_T to compute the loss and we sample from the whole target set
q_z_T = self.q(t_target, y_target)
z = q_z_T.rsample() # z = [l(0), z_prime], of shape (N, dim_l + dim_z_prime)
# integrate to get latent states at prediction times
# shapes :
# t_target_sorted (num_unique_points,)
# t_target_indices (N, num_target + 1, 1) same shape as input
t_target_sorted, t_target_indices = torch.unique(torch.cat([t0, t_target], dim=1), sorted=True,
return_inverse=True)
# v is of shape (N, T', dim_l + dim_z_prime) with :
# v_t = [l(t), z_prime]
# T' = num_unique_points < T = num_target if duplicates
v = odeint(self.ode_func, z, t_target_sorted) # (T', N, dim_v)
# todo: use odeint_adjoint? (more stable numerically)
v = v.permute(1, 0, 2) # (N, T', dim_v)
t_target_indices = t_target_indices.repeat(1, 1, self.dim_l) # (N, T+1, dim_l)
latent = v.gather(dim=1, index=t_target_indices) # (N, T+1, dim_l), get the initial order
latent = latent[:, 1:, :] # we don't care about l_0
z_prime = z[:, self.dim_l:]
p_y = self.p(t_target, latent, z_prime) # distrib of shape (N, num_target, dim_y)
return p_y, q_z_T, q_z_C
class MLPModel(Model):
def __init__(self, dim_y: int, dim_r: int, dim_z_prime: int, dim_l: int,
hidden_sizes_encoder: List[int], hidden_sizes_ode_net: List[int],
hidden_sizes_decoder: List[int], t0: float, device: torch.device):
super(MLPModel, self).__init__(dim_z_prime=dim_z_prime, hidden_sizes_ode_net=hidden_sizes_ode_net, t0=t0,
dim_l=dim_l, dim_r=dim_r, device=device)
self.encoder_mlp = MLP(dim_y + 1, hidden_sizes_encoder, dim_r, activation='relu')
dim_h = hidden_sizes_decoder[-1] # size of the hidden layer coming from xlz_to_hidden, before mu_y/sigma_y
self.decoder_mlp = MLP(dim_l + 1, hidden_sizes_decoder, dim_h, activation='relu', last_activation='relu')
self.decoder_mu = nn.Linear(dim_h + dim_l, dim_y)
self.decoder_sigma = nn.Linear(dim_h + dim_l, dim_y)
def encoder(self, t, y) -> torch.Tensor:
return self.encoder_mlp(torch.cat([t, y], dim=2))
def decoder(self, t, latent_states, z_prime) -> Tuple[torch.Tensor, torch.Tensor]:
h = self.decoder_mlp(torch.cat([t, latent_states], dim=2))
mu = self.decoder_mu(torch.cat([h, latent_states], dim=2))
pre_sigma = self.decoder_sigma(torch.cat([h, latent_states], dim=2))
return mu, pre_sigma
class ConvNetModel(Model):
def __init__(self, dim_r: int, dim_z_prime: int, dim_l: int, hidden_sizes_ode_net: List[int], t0: float,
device: torch.device):
super().__init__(dim_z_prime=dim_z_prime, hidden_sizes_ode_net=hidden_sizes_ode_net, t0=t0, dim_l=dim_l,
dim_r=dim_r, device=device)
self.encoder_cnn = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, stride=2, padding=(2, 2)),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=2, padding=(2, 2)),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=2, padding=(2, 2)),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=2, padding=(2, 2)),
nn.ReLU(),
)
self.encoder_mlp = MLP(dim_in=128 * 2 * 2 + 128, hidden_sizes=[500], dim_out=dim_r, activation='relu')
self.decoder_linear = nn.Linear(dim_l + dim_z_prime + 1, 4 * 4 * 8) # (l, z', t) -> cnn_input_t (c=8, w=4, h=4)
self.decoder_cnn = nn.Sequential(
# output shape (C=128, W=7, H=7)
nn.ConvTranspose2d(in_channels=8, out_channels=128, kernel_size=5, stride=2, padding=(2, 2)),
nn.BatchNorm2d(128),
nn.ReLU(),
# (64, 14, 14)
nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=5, stride=2, padding=(2, 2),
output_padding=(1, 1)),
nn.BatchNorm2d(64),
nn.ReLU(),
# (32, 28, 28)
nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=5, stride=2, padding=(2, 2),
output_padding=(1, 1)),
nn.BatchNorm2d(32),
nn.ReLU()
)
self.decoder_to_sigma = nn.ConvTranspose2d(32, 1, kernel_size=5, stride=1, padding=(2, 2))
self.decoder_to_mu = nn.ConvTranspose2d(32, 1, kernel_size=5, stride=1, padding=(2, 2))
def encoder(self, t, y) -> torch.Tensor:
N, num_points, dim_y = y.shape
assert dim_y == 784
y = y.view(N * num_points, 1, 28, 28) # conv2D requires (N, C, H, W) inputs
y = self.encoder_cnn(y) # (N * num_points, 128, 2, 2)
y = y.view(N, num_points, -1)
t = t.repeat(1, 1, 128) # why ? in order to give more importance to the time component ?
r = self.encoder_mlp(torch.cat([y, t], dim=2)) # (N, num_points, dim_r)
return r
def decoder(self, t, latent_states, z_prime) -> Tuple[torch.Tensor, torch.Tensor]:
N, num_points, _ = t.shape
z_prime = z_prime.view(N, 1, -1).repeat(1, num_points, 1) # (N, num_points, dim_z_prime)
x = torch.cat([t, latent_states, z_prime], dim=2)
x = self.decoder_linear(x)
x = x.view(N * num_points, 8, 4, 4) # convTranspose2D requires (N, C, H, W) inputs
x = self.decoder_cnn(x)
pre_sigma = self.decoder_to_sigma(x)
mu = self.decoder_to_mu(x)
pre_sigma = pre_sigma.view(N, num_points, 784)
mu = mu.view(N, num_points, 784)
return mu, pre_sigma
|
#!/usr/bin/env python3
from sys import argv
"""
Version 2: faster than version 1 (it now uses a list to
store all palindromes and count occurrences).
Finds palindromes greater than X characters.
It also prints:
- the size of the longest palindrome
- the size of the shortest palindrome
PEP8 compliant
“Readability counts."
“Beautiful is better than ugly.”
— The Zen of Python
"""
if len(argv) > 2:
all_palindromes = [] # stores all palindromes
str_size = argv[1] # number of required characters
filename = argv[2] # file to parse
counter_plus = 0 # counts palindromes with str_size or more
counter_exact = 0 # counts palindromes with exact str_size
with open(filename, 'r') as file:
for string in file.read().split():
if string == string[::-1]:
all_palindromes.append(string)
# extracts the longest and the shortest palindromes (string)
longest_str = max(all_palindromes, key=len)
shortest_str = min(all_palindromes, key=len)
# compares initial str_size with the shortest and longest palindromes
# and forces counters to zero, since there are no palindromes with
# those sizes (shorter than shortest_str - longer than longest_str)
if int(str_size) < len(shortest_str) or int(str_size) > len(longest_str):
counter_plus = counter_exact = 0
else:
for palindrome in all_palindromes:
if len(palindrome) >= int(str_size):
counter_plus += 1
if len(palindrome) == int(str_size):
counter_exact += 1
print(f'There are:\n'
f'{counter_plus} palindromes with {str_size} or more characters\n'
f'{counter_exact} palindromes with exact {str_size} characters\n'
f'{len(all_palindromes)} palindromes total in file {filename}\n')
print(f'---> Longest palindrome: "{longest_str}" > '
f'{len(longest_str)} characters.\n')
print(f'---> Shortest palindrome: "{shortest_str}" > '
f'{len(shortest_str)} characters.\n')
else:
print('Usage: palindrome.py numberofchars filename\n'
'Example: ./palindrome.py 15 filewithstrings.txt')
|
# Generated by Django 2.2.4 on 2019-08-06 02:21
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=100, null=True)),
],
),
migrations.CreateModel(
name='PostTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True, verbose_name='Name')),
('slug', models.SlugField(allow_unicode=True, max_length=100, unique=True, verbose_name='slug')),
],
options={
'verbose_name': 'tag',
'verbose_name_plural': 'tags',
},
),
migrations.CreateModel(
name='TaggedPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tagapp.Post')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagapp_taggedpost_items', to='tagapp.PostTag')),
],
options={
'verbose_name': 'tagged post',
'verbose_name_plural': 'tagged posts',
},
),
migrations.AddField(
model_name='post',
name='tags',
field=taggit.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', through='tagapp.TaggedPost', to='tagapp.PostTag', verbose_name='tags'),
),
]
|
import sys
try:
import psycopg2
HAS_PSYCOPG2 = True
except ImportError:
psycopg2 = None
HAS_PSYCOPG2 = False
if sys.version < '3':
text_type = unicode
binary_type = str
string_types = basestring
else:
text_type = str
binary_type = bytes
string_types = str
|
# Loading the libraries we need
import numpy as np
import cv2
import time
#Creating a VideoCapture object to read video from the primary camera
cap=cv2.VideoCapture(0)
#Creating a VideoWriterObject to save the output video
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('wizard_smaran.avi' , fourcc, 20.0, (640,480))
#allow the system to sleep for 3 sec before webcam starts
time.sleep(2)
# Capture the background in range of 30 or 60 without you!
background = 0
for i in range(30):
ret, background = cap.read()#capturing image
#Now we capture you in real time!
while(cap.isOpened()):
ret, img = cap.read()
if not ret:
break
# Converting the color space from BGR to HSV as BGR is more sensitive to light
hsv=cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#Generating mask for red range 1(0-10)
lower_red = np.array([0,120,70])
upper_red = np.array([10,255,255])
mask1 = cv2.inRange(hsv , lower_red , upper_red)
#Generating mask for red range 1(170-180)
lower_red = np.array([170,120,70])
upper_red = np.array([180,255,255])
mask2 = cv2.inRange(hsv , lower_red , upper_red)
#Combining the masks obtained for both the ranges
mask1 = mask1 + mask2
mask1=cv2.morphologyEx(mask1, cv2.MORPH_OPEN ,np.ones((3,3) , np.uint8) , iterations=2)
mask2=cv2.morphologyEx(mask1, cv2.MORPH_DILATE ,np.ones((3,3) , np.uint8) , iterations=1)
# Segmenting out cloth color
mask2 = cv2.bitwise_not(mask1)
# Segment the red color part out of the frame using bitwise and with the inverted mask
layer1 = cv2.bitwise_and(background, background, mask=mask1)
# Create image showing static background frame pixels only for the masked region
layer2 = cv2.bitwise_and(img, img, mask=mask2)
final_output = cv2.addWeighted(layer1 , 1, layer2 , 1, 0)
cv2.imshow('Invisible Smaran' , final_output)
k=cv2.waitKey(10)
#Keyboard Interupt
if k==27:
break
cap.release()
cv2.destroyAllWindows() |
from default_roi_input import DefaultROIInputPopup
from own_roi_input import OwnROIInputPopup
from fs_roi_input import FSROIInputPopup |
"""
# 基于数组实现训练队列
"""
import os
import logging
from itertools import chain
logger = logging.getLogger(__name__)
class CircularQueueByArray(object):
"""基于数组实现循环队列"""
def __init__(self, capacity=5):
self._items = []
self._capacity = capacity + 1 # 预留一个空位留给尾部指针指向
self._head = 0
self._tail = 0
def enqueue(self, item: str):
"""入栈"""
if (self._tail + 1) % self._capacity == self._head:
# 循环对列存储空间已经被填满(尾部指针已经没有可以指向的位置)
return False
self._items.append(item) # 尾部插入元素
self._tail = (self._tail + 1) % self._capacity # 尾部指针指向的位置
return True
def dequeue(self):
"""出栈"""
if self._head != self._tail:
# 如果self._head == self._tail,循环队列为空
item = self._items[self._head]
self._head = (self._head + 1) % self._capacity
return item
def __repr__(self) -> str:
if self._tail >= self._head:
return "->".join(item for item in self._items[self._head: self._tail])
else:
return "->".join(item for item in chain(self._items[self._head:], self._items[:self._tail]))
if __name__ == '__main__':
logging.basicConfig(format="[%(asctime)s %(filename)s:%(lineno)s] %(message)s",
level=logging.INFO,
filename=None,
filemode="a")
q = CircularQueueByArray(5)
for i in range(5):
q.enqueue(str(i))
q.dequeue()
q.dequeue()
q.enqueue(str(5))
logger.info(q)
|
from django.shortcuts import render, render_to_response, RequestContext
from django.core.urlresolvers import reverse
import forms
from django.http import HttpResponseRedirect, HttpResponse
# Create your views here.
from django.views.generic import ListView, CreateView, UpdateView, DeleteView
from django.contrib import auth
from django.core.context_processors import csrf
from goals.models import Goals, Goals_log, Goalsco
"""class SiteViewer(CreateView):
model = Goals
template_name = 'base.html'
"""
class ListGoalsView(ListView):
model = Goals
template_name = 'goals.html'
form_class = forms.GoalsForm
class CreateGoalsView(CreateView):
model = Goals
template_name = 'edit_goals.html'
form_class = forms.GoalsForm
#form_class = forms.GoalsForm
def get_success_url(self):
return reverse('goals-create')
def get_context_data(self, **kwargs):
kwargs['object_list'] = Goals.objects.order_by('id')
return super(CreateGoalsView, self).get_context_data(**kwargs)
def UserDetails():
model = Goals_log
template_name = 'goals.html'
meta['users'] = request.POST.get('users','')
"""
class UpdateGoalsView(UpdateView):
model = Goals
template_name = 'goals.html'
def get_success_url(self):
return reverse('goals-list')
def get_context_data(self, **kwargs):
context = super(UpdateGoalsView, self).get_context_data(**kwargs)
context['action'] = reverse('goals-new',
kwargs={'pk': self.get_object().id})
return context
"""
"""
class DeleteGoalsView(DeleteView):
model = Goals
template_name = 'goals.html'
def get_success_url(self):
return reverse('goals-list')
#class ListUsersView(ListView):
# model = Goals_log
# template_name = 'goals.html'
"""
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 4 08:24:09 2019
@author: Reuben
A module of handy utility functions used elsewhere within resultbox.
"""
import numpy as np
from scipy.interpolate import interp1d
def listify(obj):
"""Put an object into a list if it isn't already a list"""
if not isinstance(obj, list):
return [obj]
else:
return obj
def cosort(xs, ys, min_diff=0):
"""Sort x and y vectors into monotonic increasing order of x
Args:
xs (vector-like): The x values
ys (vector-like): The y values
min_diff (float, int): The minimum step by which x must always increase
Note:
Some values may be omitted in cases where x is not monotonically
increasing. The point is to return vectors that can be used for
interpolation without any problems.
See: https://stackoverflow.com/questions/11851770/spline-interpolation-with-python
"""
# Combine lists into list of tuples
tups = zip(xs, ys)
# Sort list of tuples by x-value
tups = sorted(tups, key=lambda tup: tup[0])
if min_diff is not None:
tups = deduplicate_xs(tups, min_diff=min_diff)
# Split list of tuples into two list of x values any y values
xs_sorted, ys_sorted = zip(*tups)
if isinstance(xs, np.ndarray):
return np.array(xs_sorted), np.array(ys_sorted)
elif isinstance(xs, list):
return list(xs_sorted), list(ys_sorted)
else:
return xs_sorted, ys_sorted
def deduplicate_xs(tups, min_diff=0):
"""Remove duplicate xs
Args:
tups (list[tuple]): A sorted list of x, y pairs from :func:`utils.cosort`
min_diff (float, int): The minimum step by which x must always increase.
Defaults to 0.
Args:
tups: Sorted list of tuples of values
min_diff: Minimum difference between x values
See https://www.geeksforgeeks.org/python-remove-tuples-having-duplicate-first-value-from-given-list-of-tuples/
"""
m = tups[0][0] - min_diff - 1
out = []
for t in tups:
if t[0] > (m + min_diff):
out.append(t)
m = t[0]
return out
def orient(arr, n, axis="rows"):
"""Orient a 2D array so that it has n rows or columns
Args:
arr (array-like): The array
n (int, list): The number of rows for the desired output. Must equal the
length of one array axis. Or a list of elements.
Returns:
ndarray: The oriented array. It behaves as a list.
"""
a = np.atleast_2d(arr)
ax = None
n = len(n) if isinstance(n, list) else n
for i in range(2):
if a.shape[i] == n:
ax = i
if ax is None:
raise ValueError("Neither dimension in arr has " + str(n) + " elements.")
if (axis == "rows" and ax == 0) or (axis != "rows" and ax == 1):
ret = a
else:
ret = a.T
if isinstance(arr, list):
return ret.tolist()
else:
return ret
def unpack(arr, labels):
"""Unpack a 2D array correctly regardless of its orientation
Args:
arr (array-like): The array
labels (list, int): The labels for each vector, or an integer for
the number of labels.
Returns:
list: A list of vectors.
"""
return [v for v in orient(arr, labels, axis="rows")]
def _interp_1D(
xs, ys, new_xs, min_diff=1e-4, bounds_error=False, fill_value=np.nan, **kwargs
):
"""Return interpolated values for 1D array"""
xs, ys = cosort(xs, ys, min_diff=min_diff)
if fill_value == "bounds":
fill_value = (ys[0], ys[-1])
f = interp1d(xs, ys, bounds_error=bounds_error, fill_value=fill_value, **kwargs)
return f(new_xs)
def interp(
xs, ys, new_xs, min_diff=None, bounds_error=False, fill_value=np.nan, **kwargs
):
"""Interpolate an array based on a matching vector and target vector
Args:
xs (vector-like): A 1D array or list of x values
ys (array-like): A 1D or 2D array of y values, which matches xs in
one dimension.
new_xs (vector_like): The desired output x values.
min_diff (float): The minimum positive difference between adjacent
x values to use during interpolation.
bounds_error: As per scipy interp1d.
fill_value: As per scipy interp1d.
kwargs: Other keyword arguments to pass to scipy interp1d.
Note:
This function uses scipy's interp1d to perform the interpolation.
"""
n = np.ndim(ys)
xs = xs.flatten() if isinstance(xs, np.ndarray) else xs
if min_diff is None:
min_diff = 1e-12 * (np.max(xs) - np.min(xs))
if n == 1:
return _interp_1D(xs, ys, new_xs, min_diff, bounds_error, fill_value, **kwargs)
elif n == 2:
a = orient(ys, len(xs), "cols")
out = [
_interp_1D(xs, row, new_xs, min_diff, bounds_error, fill_value, **kwargs)
for row in a
]
out = np.array(out) if isinstance(ys, np.ndarray) else out
return out
raise ValueError("ys must have 1 or 2 dimensions")
def list_to_str(lst, length=18, sep=" ", brackets=True):
"""Convert a list of values to a nice-to-look-at string
Args:
lst (list): A list of values
length (int): The maximum length of the output string. The string
is truncated at this length if the list length is above 3.
sep (str): The separator to use between values
brakets (bool): True to add square brackets around the list.
Returns:
str: The string.
"""
l = [val_to_str(num) for num in lst]
s = sep.join(l)
if len(s) > length and len(lst) > 3:
s = s[:length] + "..."
if brackets:
return "[" + s + "]"
else:
return s
def vec_to_str(num, precision=3, list_sep=" "):
format_str = "{:0." + str(precision) + "g}"
if isinstance(num, list):
return list_to_str(num, sep=list_sep)
elif isinstance(num, np.ndarray):
if num.size == 1:
return format_str.format(num.item())
else:
return list_to_str(num.flatten().tolist())
def val_to_str(num, precision=3, list_sep=" ", length=18):
"""Format a single number as a nice-to-look-at string
Args:
num: The number
precision (int): The precision of the output string
Returns:
str: The formatted number
"""
format_str = "{:0." + str(precision) + "g}"
if num is None:
return "None"
if isinstance(num, str):
return num
elif isinstance(num, int):
return str(num)
elif isinstance(num, float):
return format_str.format(num)
elif isinstance(num, list):
return list_to_str(num, sep=list_sep, length=length)
elif isinstance(num, dict):
return dict_to_str(num, list_sep=list_sep, length=length)
elif isinstance(num, np.ndarray):
return vec_to_str(num, precision=precision, list_sep=list_sep)
else:
return str(num)
def dict_to_str(dct, val_sep=" ", key_sep=" ", list_sep=",", length=18):
"""Convert a dict to a nice-to-look-at string
Args:
dct (dict): The dictionary
val_sep (str): The separator between a key and it's value.
key_sep (str): The separator between different key-value pairs.
list_sep (str): The separator between list entries
Returns:
str: The formatted string
"""
lst = []
for key, val in dct.items():
s = str(key) + val_sep + val_to_str(val, list_sep=list_sep, length=length)
lst.append(s)
return key_sep.join(lst)
def str_to_dict(string, val_sep="=", key_sep=";", list_sep=","):
"""Convert a string of key value pairs to a dictionary
Args:
string (str): The string
val_sep (str): The separator between a key and its value
key_sep (str): The separator between key-value pairs.
list_sep (str): The separator between list entries
Returns:
dict: A new dictionary
"""
def make_list(s):
s = s.strip("[]")
lst = s.split(list_sep)
return [interpret(item) for item in lst]
def interpret(val):
if val == "None":
return None
elif val.startswith("["):
return make_list(val)
elif val.isdigit():
return int(val)
elif val.isnumeric():
return float(val)
return val
pairs = [s.split(val_sep) for s in string.split(key_sep)]
return {p[0]: interpret(p[1]) for p in pairs}
def strip_unit(s):
"""Removes units within square brakets from file names
Args:
s (str): A string
Returns:
str: The string without anything enclosed in square brackets.
Note:
Useful for removing units in a key. It will recurse pairs of brackets.
Trailing spaces will be striped.
"""
start = s.find("[")
end = s.find("]")
if start > 0 and end > 0:
s = s[:start].rstrip() + s[end + 1 :].rstrip()
if "[" in s:
s = strip_unit(s)
return s
def safe_fname(fname):
"""Change a string if needed to make it a valid file name
Args:
fname (str): The candidate file name
Returns:
str: The safe file name.
TODO:
This needs work.
"""
fname = strip_unit(fname)
return fname
def ensure_ext(fname, ext):
"""Edit a string if needed to ensure it has a particular extension
Args:
fname (str): The file name
ext (str): The extension
Returns:
str: The file name with the desired extension (without duplication of
the extension).
"""
ext = ext if ext.startswith(".") else "." + ext
if fname.endswith(ext):
return fname
else:
return fname + ext
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from pants.backend.python.goals import lockfile
from pants.backend.python.lint.bandit.skip_field import SkipBanditField
from pants.backend.python.subsystems.python_tool_base import PythonToolBase
from pants.backend.python.target_types import (
ConsoleScript,
InterpreterConstraintsField,
PythonSourceField,
)
from pants.core.util_rules.config_files import ConfigFilesRequest
from pants.engine.rules import collect_rules
from pants.engine.target import FieldSet, Target
from pants.option.option_types import ArgsListOption, FileOption, SkipOption
@dataclass(frozen=True)
class BanditFieldSet(FieldSet):
required_fields = (PythonSourceField,)
source: PythonSourceField
interpreter_constraints: InterpreterConstraintsField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipBanditField).value
class Bandit(PythonToolBase):
options_scope = "bandit"
name = "Bandit"
help = "A tool for finding security issues in Python code (https://bandit.readthedocs.io)."
default_main = ConsoleScript("bandit")
default_requirements = [
"bandit>=1.7.0,<1.8",
# When upgrading, check if Bandit has started using PEP 517 (a `pyproject.toml` file).
# If so, remove `setuptools` here.
"setuptools",
# GitPython 3.1.20 was yanked because it breaks Python 3.8+, but Poetry's lockfile
# generation still tries to use it.
"GitPython>=3.1.24",
]
default_lockfile_resource = ("pants.backend.python.lint.bandit", "bandit.lock")
skip = SkipOption("lint")
args = ArgsListOption(example="--skip B101,B308 --confidence")
config = FileOption(
default=None,
advanced=True,
help="Path to a Bandit YAML config file (https://bandit.readthedocs.io/en/latest/config.html).",
)
@property
def config_request(self) -> ConfigFilesRequest:
# Refer to https://bandit.readthedocs.io/en/latest/config.html. Note that there are no
# default locations for Bandit config files.
return ConfigFilesRequest(
specified=self.config, specified_option_name=f"{self.options_scope}.config"
)
def rules():
return (
*collect_rules(),
*lockfile.rules(),
)
|
from operator import add as addition_calc
|
# Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict, List, Union
from .abi.decoder import ABIDecoder
from .semantic.decoder import SemanticDecoder
from ..models.decoded_model import DecodedTransaction, Proxy
from ..models.objects_model import Block, Call
from ..providers.web3_provider import NodeDataProvider
from ..semantics.standards.eip1969 import is_eip1969_proxy, is_eip1969_beacon_proxy
log = logging.getLogger(__name__)
class DecoderService:
def __init__(
self,
abi_decoder: ABIDecoder,
semantic_decoder: SemanticDecoder,
web3provider: NodeDataProvider,
default_chain: str,
):
self.abi_decoder: ABIDecoder = abi_decoder
self.semantic_decoder: SemanticDecoder = semantic_decoder
self.web3provider: NodeDataProvider = web3provider
self.default_chain: str = default_chain
def decode_transaction(self, chain_id: str, tx_hash: str) -> DecodedTransaction:
# verify the transaction hash
tx_hash = tx_hash if tx_hash.startswith("0x") else "0x" + tx_hash
chain_id = chain_id or self.default_chain
self.semantic_decoder.repository.record()
# read a raw transaction from a node
transaction = self.web3provider.get_full_transaction(
tx_hash=tx_hash, chain_id=chain_id
)
# read a raw block from a node
block = Block.from_raw(
w3block=self.web3provider.get_block(
transaction.metadata.block_number, chain_id
),
chain_id=chain_id,
)
# prepare lists of delegations to properly decode delegate-calling contracts
delegations = self.get_delegations(transaction.root_call)
proxies = self.get_proxies(delegations, chain_id)
# decode transaction using ABI
abi_decoded_tx = self.abi_decoder.decode_transaction(
block=block, transaction=transaction, proxies=proxies, chain_id=chain_id
)
# decode transaction using additional semantics
semantically_decoded_tx = self.semantic_decoder.decode_transaction(
block=block.metadata,
transaction=abi_decoded_tx,
proxies=proxies,
chain_id=chain_id,
)
used_semantics = self.semantic_decoder.repository.end_record()
log.info(
"Semantics used in decoding %s: %s",
tx_hash,
", ".join(used_semantics) if used_semantics else "",
)
return semantically_decoded_tx
def get_proxies(
self, delegations: Dict[str, List[str]], chain_id: str
) -> Dict[str, Proxy]:
proxies = {}
chain = self.web3provider._get_node_connection(chain_id)
for delegator in delegations:
delegator_semantics = self.semantic_decoder.repository.get_semantics(
chain_id, delegator
)
if is_eip1969_proxy(chain, delegator, delegations[delegator][0]):
proxy_type = "EIP1969Proxy"
fallback_name = "EIP1969_Proxy"
elif is_eip1969_beacon_proxy(chain, delegator, delegations[delegator][0]):
proxy_type = "EIP1969Beacon"
fallback_name = "EIP1969_BeaconProxy"
else:
proxy_type = "GenericProxy"
fallback_name = "Proxy"
delegates_semantics = [
self.semantic_decoder.repository.get_semantics(chain_id, delegate)
for delegate in delegations[delegator]
]
token_semantics = delegator_semantics.erc20
if not token_semantics:
for delegate_semantics in delegates_semantics:
if delegate_semantics.erc20:
token_semantics = delegate_semantics.erc20
break
proxies[delegator] = Proxy(
address=delegator,
name=delegator_semantics.name
if delegator_semantics and delegator_semantics.name != delegator
else fallback_name,
type=proxy_type,
semantics=[semantics for semantics in delegates_semantics if semantics],
token=token_semantics,
)
return proxies
@staticmethod
def get_delegations(calls: Union[Call, List[Call]]) -> Dict[str, List[str]]:
delegations = {}
if not calls:
return delegations
if isinstance(calls, list):
for call in calls:
if call.call_type == "delegatecall":
if call.from_address not in delegations:
delegations[call.from_address] = []
if call.to_address not in delegations[call.from_address]:
delegations[call.from_address].append(call.to_address)
else:
calls_queue = [calls]
while calls_queue:
call = calls_queue.pop()
for _, sub_call in enumerate(call.subcalls):
calls_queue.insert(0, sub_call)
if call.call_type == "delegatecall":
if call.from_address not in delegations:
delegations[call.from_address] = []
if call.to_address not in delegations[call.from_address]:
delegations[call.from_address].append(call.to_address)
return delegations
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
class Lunch(object):
def __init__(self):
self.cuntomer = Customer()
self.employee = Employee()
def order(self, foodName):
self.cuntomer.placeOrder(foodName, self.employee)
def result(self):
self.cuntomer.printFood()
class Customer(object):
def __init__(self):
self.food = None
def placeOrder(self, foodName, employee):
self.food = employee.takeOrder(foodName)
def printFood(self):
print(self.food.name)
class Employee(object):
def takeOrder(self, foodName):
return Food(foodName)
class Food(object):
def __init__(self, name):
self.name = name
if __name__ == '__main__':
x = Lunch()
x.order('pork')
x.result()
x.order('pizza')
x.result()
|
# -*- coding: utf-8 -*-
"""
@author: Kamila Kitowska, Katarzyna Pękala
"""
#%%
my_path = ""
#%%
#libraries
import os
os.chdir(my_path)
import importlib
import datetime
import numpy as np
import scenarios as scn
import matplotlib.pyplot as plt
from textwrap import wrap
#importlib.reload(scn)
#simulate spot prices:
#import price_sim
#it generates ceny_spot_sim.txt file
#get spot prices:
#import prices_get
#it generates ceny_spot.txt file
#%%
# initial values
start = datetime.datetime.strptime("2017-11-27 00:30:00","%Y-%m-%d %H:%M:%S")
end = datetime.datetime.strptime("2017-11-27 23:30:00","%Y-%m-%d %H:%M:%S")
ec2_od = 300 # amount of on-demand servers, already bought
ec2_price_od_old = 0.42 # price of bought on-demand servers, per server per hour
users_per_server = 100 # number of concurrent users per server
revenue = 0.00021 #per user per minute
demand_avg = 40000 #predicted avg. users per minute
demand_std_dev = 5000 #predicted avg. users per minute
ec2_price_od = 0.84 # price of new on-demand servers, per server per hour
ec2_od_new = 0 # variable for new on-demand servers
ec2_spot = 0 # variable for new spot server
# project parameters
n_of_sim = 500 # number of simulations
availability_level = 0.99 # how many users/min must have access
availability_no_sim = 0.9 # how many simulations must meet availability level
bid = 0.84 # bid
spot_prices_s = 0 #spot prices source: 1 - simulation, 0 - historical
if spot_prices_s == 1:
spot_file = "ceny_spot_sim.txt"
else:
spot_file = "ceny_spot.txt"
#%%
#plots definitions
def draw_plot_1():
fig, ax1 = plt.subplots()
plt.suptitle('First scenario: only on-demand servers')
max_profit = max(avg_profit)
avail_index = avail.index(min([i for i in avail if i>availability_no_sim*n_of_sim]))
ax2 = ax1.twinx()
ax1.plot(avg_profit,servers_no_range, 'g-')
ax1.plot(max_profit,servers_no_range[avg_profit.index(max_profit)],'ro') # max_profit point
ax1.annotate((int(max_profit),servers_no_range[avg_profit.index(max_profit)]), \
xy=(max_profit,servers_no_range[avg_profit.index(max_profit)]),xytext=(max_profit-800,servers_no_range[avg_profit.index(max_profit)]), \
arrowprops=dict(facecolor='black', shrink=0.05)) # arrow and max value annotation
ax1.plot(avg_profit[avail_index],servers_no_range[avail_index],'go')
ax1.annotate((int(avg_profit[avail_index]),servers_no_range[avail_index]), \
xy=(avg_profit[avail_index],servers_no_range[avail_index]),xytext=(avg_profit[avail_index]-600,servers_no_range[avail_index]), \
arrowprops=dict(facecolor='black', shrink=0.05)) #
ax2.plot(avg_profit,avg_denials, 'b-')
ax2.plot(max_profit,avg_denials[avg_profit.index(max_profit)],'ro') # max_profit point
ax2.annotate((int(max_profit),int(avg_denials[avg_profit.index(max_profit)])), \
xy=(max_profit,avg_denials[avg_profit.index(max_profit)]),xytext=(max_profit-800, \
avg_denials[avg_profit.index(max_profit)]),arrowprops=dict(facecolor='black', shrink=0.05)) # arrow and max value annotation
ax1.set_xlabel('Avg total profit')
ax1.set_ylabel('Number of oo-demand servers', color='g')
ax2.set_ylabel('MM denials', color='b')
#ax1.set_ylim([140,200])
plt.show()
return
def draw_plot_2():
fig, ax1 = plt.subplots()
plt.suptitle('Second scenario: 300 reserved on-demand servers and spot instances only')
ax2 = ax1.twinx()
ax1.plot(avg_profit,servers_no_range, 'g-')
ax1.plot(max_profit,servers_no_range[avg_profit.index(max_profit)],'ro')
ax1.annotate((int(max_profit),servers_no_range[avg_profit.index(max_profit)]), \
xy=(max_profit,servers_no_range[avg_profit.index(max_profit)]),xytext=(max_profit-300, \
servers_no_range[avg_profit.index(max_profit)]),arrowprops=dict(facecolor='black', shrink=0.05))
ax1.plot(avg_profit[avail_index],servers_no_range[avail_index],'go')
ax1.annotate((int(avg_profit[avail_index]),servers_no_range[avail_index]), \
xy=(avg_profit[avail_index],servers_no_range[avail_index]),xytext=(avg_profit[avail_index]-600,servers_no_range[avail_index]), \
arrowprops=dict(facecolor='black', shrink=0.05)) #
ax2.plot(avg_profit,avg_denials, 'b-')
ax2.plot(max_profit,avg_denials[avg_profit.index(max_profit)],'ro')
ax2.annotate((int(max_profit),int(avg_denials[avg_profit.index(max_profit)])), \
xy=(max_profit,avg_denials[avg_profit.index(max_profit)]), \
xytext=(max_profit-300,avg_denials[avg_profit.index(max_profit)]),arrowprops=dict(facecolor='black', shrink=0.05))
ax1.set_xlabel('Avg total profit')
ax1.set_ylabel('Number of oo-demand servers', color='g')
ax2.set_ylabel('MM denials', color='b')
plt.show()
return
def draw_plot_3():
fig, ax1 = plt.subplots()
plt.suptitle("\n".join(wrap('Third scenario: 300 reserved on-demand servers, spot instances only and on-demand servers when spot unavaible', 60)))
max_profit = max(avg_profit)
ax2 = ax1.twinx()
ax1.plot(avg_profit,servers_no_range, 'g-')
ax1.plot(max_profit,servers_no_range[avg_profit.index(max_profit)],'ro')
ax1.annotate((int(max_profit),servers_no_range[avg_profit.index(max_profit)]),xy=(max_profit,servers_no_range[avg_profit.index(max_profit)]),xytext=(max_profit-700,servers_no_range[avg_profit.index(max_profit)]),arrowprops=dict(facecolor='black', shrink=0.05))
ax1.plot(avg_profit[avail_index],servers_no_range[avail_index],'go')
ax1.annotate((int(avg_profit[avail_index]),servers_no_range[avail_index]), \
xy=(avg_profit[avail_index],servers_no_range[avail_index]),xytext=(avg_profit[avail_index]-650,servers_no_range[avail_index]), \
arrowprops=dict(facecolor='black', shrink=0.05)) #
ax2.plot(avg_profit,avg_denials, 'b-')
ax2.plot(max_profit,avg_denials[avg_profit.index(max_profit)],'ro')
ax2.annotate((int(max_profit),int(avg_denials[avg_profit.index(max_profit)])),xy=(max_profit,avg_denials[avg_profit.index(max_profit)]),xytext=(max_profit-700,avg_denials[avg_profit.index(max_profit)]),arrowprops=dict(facecolor='black', shrink=0.05))
ax1.set_xlabel('Avg total profit')
ax1.set_ylabel('Number of oo-demand servers', color='g')
ax2.set_ylabel('MM denials', color='b')
plt.show()
return
#%%
# scenario "only on demand servers"
avg_profit = []
avg_denials = []
avail = []
final_result = ()
servers_lower_range = 50
servers_higher_range = 200
servers_no_interval = 5
servers_no_range = range(servers_lower_range, servers_higher_range, servers_no_interval)
# simulations for j additional on-demand servers
for j in servers_no_range:
res = scn.first_scenario(start,end,demand_avg,demand_std_dev,n_of_sim,j,
ec2_od, users_per_server, ec2_price_od_old,
ec2_price_od, revenue, availability_level)
avg_res = np.array(res[0]).mean(axis=0)
avail.append(res[1])
avg_profit.append(avg_res[0])
avg_denials.append(avg_res[1]/1000)
print("additional on-demand servers =",j," | avg total profit =",
avg_res[0],"| avg amount of denials-of-access", avg_res[1],
"| availability ", avg_res[2]*100,"% | availability cond. counter",res[1])
if res[1]/n_of_sim>availability_no_sim and scn.is_empty(final_result):
final_result = (avg_res,res[1],j)
# final result of simulations
if scn.is_empty(final_result):
print("\n\nAvailability condition of",availability_level*100,"% in",
availability_no_sim*n_of_sim,"out of",n_of_sim,
"simulations wasn't satisfied.")
else:
print("\nFINAL RESULT: \nAdditional on-demand servers =",final_result[2],
" | avg total profit =", final_result[0][0],
"| avg amount of denials-of-access", final_result[0][1],
"| availability ", final_result[0][2]*100,"% \nIn ",final_result[1],
"simulations out of",n_of_sim,"availability condition of",
availability_level,"was met.")
draw_plot_1()
#plot
#%%
# simulation of second scenario
avg_profit = []
avg_denials = []
avail = []
final_result = ()
servers_lower_range = 50
servers_higher_range = 200
servers_no_interval = 5
servers_no_range = range(servers_lower_range, servers_higher_range, servers_no_interval)
# simulations for j additional spot servers
for j in servers_no_range:
res = scn.second_scenario(start,end,demand_avg,demand_std_dev,n_of_sim,j,
spot_file, bid, users_per_server, ec2_od, ec2_od_new,
ec2_price_od,revenue, availability_level,
ec2_price_od_old,my_path)
avg_res = np.array(res[0]).mean(axis=0)
avail.append(res[3])
avg_profit.append(avg_res[0])
avg_denials.append(avg_res[1]/1000)
print("additional spot servers =",j," | avg tot. profit =",
avg_res[0],"| avg amount of denials", avg_res[1],"| availability ",
avg_res[2]*100,"% | availability cond. counter",res[3])
if res[3]/n_of_sim>availability_no_sim and scn.is_empty(final_result):
final_result = (avg_res,res[3],j)
if scn.is_empty(final_result):
print("\n\nAvailability condition of",availability_level*100,"% in",availability_no_sim*n_of_sim,"out of",n_of_sim,"simulations wasn't satisfied.")
else:
print("\nFINAL RESULTS: \nadditional spot servers =",final_result[2]," | avg total profit =",
final_result[0][0],"| avg amount of denials-of-access", final_result[0][1],
"| availability ", final_result[0][2]*100,"% \nIn ",final_result[1],
"simulations out of",n_of_sim,"availability condition of",availability_level,"was met.")
max_profit = max(avg_profit)
avail_index = avail.index(min([i for i in avail if i>availability_no_sim*n_of_sim]))
draw_plot_2()
spot_min = np.sum(res[1])
sim_min = res[2]
print("---")
print("Spot servers were working for", spot_min, "minutes (",float(spot_min)/sim_min*100,"% of simulation time)")
print("For", sim_min-spot_min, "minutes only 300 on-demand servers were working (",(sim_min-spot_min)/sim_min*100,"% of simulation time)")
#%%
#third scenario
# "old" on demand servers + spot instances + on-demand servers when spot unavaible
# on-demand needs 2 min for startup
avg_profit = []
avg_denials = []
avail = []
final_result = ()
servers_lower_range = 50
servers_higher_range = 200
servers_no_interval = 5
servers_no_range = range(servers_lower_range, servers_higher_range, servers_no_interval)
# simulations for j additional spot servers
for j in servers_no_range:
res = scn.third_scenario(start,end,demand_avg,demand_std_dev,n_of_sim,j,
spot_file, bid, users_per_server, ec2_od,
ec2_price_od, ec2_price_od_old, revenue,
availability_level, my_path)
avail.append(res[4])
avg_res = np.array(res[0]).mean(axis=0)
avg_profit.append(avg_res[0])
avg_denials.append(avg_res[1]/1000)
print("additional spot/on-demand servers =",j," | avg tot. profit =",
avg_res[0],"| avg amount of denials", avg_res[1],"| availability ",
avg_res[2]*100,"% | availability cond. counter",res[4])
if res[4]/n_of_sim>availability_no_sim and scn.is_empty(final_result):
final_result = (avg_res,res[4],j)
if scn.is_empty(final_result):
print("\n\nAvailability condition of",availability_level*100,"% in",availability_no_sim*n_of_sim,"out of",n_of_sim,"simulations wasn't satisfied.")
else:
print("\nFINAL RESULTS: \nAdditional spot servers =",final_result[2]," | avg total profit =",
final_result[0][0],"| avg amount of denials-of-access", final_result[0][1],
"| availability ", final_result[0][2]*100,"% \nIn ",final_result[1],
"simulations out of",n_of_sim,"availability condition of",availability_level,"was met.")
max_profit = max(avg_profit)
avail_index = avail.index(min([i for i in avail if i>availability_no_sim*n_of_sim]))
draw_plot_3()
spot_min = np.sum(res[1])
nod_min = np.sum(res[2])
sim_min = res[3]
print("---")
print("Spot servers were working for", spot_min, "minutes (",float(spot_min)/sim_min*100,"% of simulation time)")
print("Additional on demand servers were working for", nod_min, "minutes (",nod_min/sim_min*100,"% of simulation time)")
print("For", sim_min-nod_min-spot_min, "minutes only 300 on-demand servers were working (",(sim_min-nod_min-spot_min)/sim_min*100,"% of simulation time)")
|
# Chris Pool
# S2816539
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn.metrics import classification_report, confusion_matrix
import sys
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
# This function reads a textfile, splits the document on spaces into a list and depending on the Boolean
# parameter use_sentiment which labels are used. the Document list contains all tokens and the labels the correct labels depending
# on the use_sentiment Boolean. This functions returns two lists, one with lists of lists with tokens of each document and the list variable
# returns all possible labels
def read_corpus(corpus_file, use_sentiment):
documents = []
labels = []
bigrams = True
#stemmer = PorterStemmer()
stemmer = SnowballStemmer("english", ignore_stopwords=True)
wordnet_lemmatizer = WordNetLemmatizer()
with open(corpus_file, encoding='utf-8') as f:
for line in f:
tokens = line.strip().split()
t = [stemmer.stem(token) for token in tokens[3:]]
#t = [wordnet_lemmatizer.lemmatize(token) for token in tokens[3:]]
if bigrams:
documents.append(list(zip(t, t[1:])))
else:
documents.append(t)
labels.append( tokens[1] )
return documents, labels
# a dummy function that just returns its input
def identity(x):
return x
def main(argv):
testmode = False #seperate testfile or do cross validation
if len(argv) == 2:
trainfile = argv[1]
elif len(argv) == 3:
testmode = True
trainfile = argv[1]
testfile = argv[2]
else:
exit("Use assignment.py trainfile <testfile> <Optional>")
# X and Y are the result of the read corpus function. X is a list of all documents that are tokenized and Y is a list of all labels
# The use_sentiment boolean can be changed to use the categories(False) or the polarity(True)
X, Y = read_corpus(trainfile, use_sentiment=True)
if testmode:
print("Use test file")
Xtrain = X
Ytrain = Y
Xtest, Ytest = read_corpus(testfile, use_sentiment=True)
else:
#this code splits the data in a training and test set (75% train and 25% test)
print("Use 75\% of train file")
split_point = int(0.75*len(X))
Xtrain = X[:split_point]
Ytrain = Y[:split_point]
Xtest = X[split_point:]
Ytest = Y[split_point:]
tfidf = True
# we use a dummy function as tokenizer and preprocessor,
# since the texts are already preprocessed and tokenized.
if tfidf:
vec = TfidfVectorizer(preprocessor = identity,
tokenizer = identity, sublinear_tf=True, max_df=0.05)
else:
vec = CountVectorizer(preprocessor = identity,
tokenizer = identity)
classifier = Pipeline( [('vec', vec),
('cls', svm.SVC(kernel='rbf', C=1.0, gamma=0.9))] )
# Train the classifier using 75% of the data
classifier.fit(Xtrain, Ytrain)
# Use the classifier for the remaining 25% of the data
Yguess = classifier.predict(Xtest)
#possible labels
labels = list(set(Ytest))
print(classification_report(Ytest, Yguess))
for y in Yguess:
print(y)
main(sys.argv)
|
import matplotlib.pyplot as plt
import numpy as np
import scipy.interpolate as sc_ip
# constants
offset = [70]
n_x = 100
n_y = 100
xnew = np.linspace(0, 150, n_x)
# layer_1
x_1_t = np.array([0, 8.1, 14.6, 20, 30, 40.9, 51.1, 60, 66.1, 73.3, 79.5, 86.4, 92.5, 100.9, 107.4, 114.3, 123.1, 130, 135.7, 140.2, 145.5, 150])
y_1_t = np.array([0, 0, 1.1, 2.1, 4.2, 6.1, 8.2, 10, 11.8, 13.7, 15.6, 18, 19.9, 23.1, 25.8, 29.1, 32.9, 35.9, 38.4, 40.1, 41.1, 41.7])
y_1_t = np.array(y_1_t.size * offset) - y_1_t
x_1_b = np.array([0, 7.9, 12.7, 20.1, 29.1, 37.1, 42, 46.4, 50.1, 54.4, 59.7, 64.8, 69.8, 75.5, 79, 80.8, 82.8, 85.2, 87.9, 90.3, 94.2, 98.9, 104.6, 108.5, 115.8, 122.3, 129, 135.2, 138.8, 145.1, 150])
y_1_b = np.array([4.1, 6.1, 7.6, 9.3, 9.5, 8.6, 8.7, 10.1, 12.9, 15.2, 17.3, 18.4, 19.3, 19.1, 18.6, 18.6, 19.5, 20.8, 21.9, 22.3, 23.4, 25.1, 27.1, 28.5, 31.3, 34, 36.8, 39.3, 40.7, 41.9, 42.6])
y_1_b = np.array(y_1_b.size * offset) - y_1_b
f_1_t = sc_ip.interp1d(x_1_t, y_1_t, kind = 'cubic')
f_1_b = sc_ip.interp1d(x_1_b, y_1_b, kind = 'cubic')
# info for rect SeismogramSaver
print xnew.size
for i in range(0, xnew.size, 1):
x = xnew[i]
y = np.linspace(f_1_t(x), f_1_b(x), n_y)[1]
print str(x) + ", " + str(y) + ",",
#### save
f = open("layer_1.red", "w")
# header
f.write(str(n_x) + " " + str(n_y) + "\n")
f.write("filename.red\n")
# 1 edge
for x in xnew:
f.write(str(x) + " " + str(f_1_b(x)) + " ")
f.write("\n")
# 2 edge
x = xnew[xnew.size - 1]
ynew = np.linspace(f_1_b(x), f_1_t(x), n_y)
for y in ynew[1:]:
f.write(str(x) + " " + str(y) + " ")
f.write("\n")
# 3 edge
for x in xnew[::-1][1:]:
f.write(str(x) + " " + str(f_1_t(x)) + " ")
f.write("\n")
# 4 edge
x = xnew[0]
ynew = np.linspace(f_1_t(x), f_1_b(x), n_y)
for y in ynew[1:-1]:
f.write(str(x) + " " + str(y) + " ")
f.write("\n")
# inner nodes
for x in xnew[1:-1]:
ynew = np.linspace(f_1_b(x), f_1_t(x), n_y)
for y in ynew[1:-1]:
f.write(str(x) + " " + str(y) + " ")
f.close()
# layer_2
x_2_t = x_1_b
y_2_t = y_1_b
x_2_b = np.array([0, 10.3, 20.7, 31.1, 41.7, 54.9, 68.5, 78.6, 89.2, 97.9, 105.3, 112.7, 120.1, 127.4, 134.9, 140.8, 145.3, 150])
y_2_b = np.array([34.5, 35.4, 36.5, 36.4, 35.7, 35.8, 37.6, 37.8, 37.8, 38, 38.9, 39.1, 39.9, 40.3, 41.7, 42.8, 43.4, 43.9])
y_2_b = np.array(y_2_b.size * offset) - y_2_b
f_2_t = sc_ip.interp1d(x_2_t, y_2_t, kind = 'cubic')
f_2_b = sc_ip.interp1d(x_2_b, y_2_b, kind = 'cubic')
#### save
f = open("layer_2.red", "w")
# header
f.write(str(n_x) + " " + str(n_y) + "\n")
f.write("filename.red\n")
# 1 edge
for x in xnew:
f.write(str(x) + " " + str(f_2_b(x)) + " ")
f.write("\n")
# 2 edge
x = xnew[xnew.size - 1]
ynew = np.linspace(f_2_b(x), f_2_t(x), n_y)
for y in ynew[1:]:
f.write(str(x) + " " + str(y) + " ")
f.write("\n")
# 3 edge
for x in xnew[::-1][1:]:
f.write(str(x) + " " + str(f_2_t(x)) + " ")
f.write("\n")
# 4 edge
x = xnew[0]
ynew = np.linspace(f_2_t(x), f_2_b(x), n_y)
for y in ynew[1:-1]:
f.write(str(x) + " " + str(y) + " ")
f.write("\n")
# inner nodes
for x in xnew[1:-1]:
ynew = np.linspace(f_2_b(x), f_2_t(x), n_y)
for y in ynew[1:-1]:
f.write(str(x) + " " + str(y) + " ")
f.close()
# layer_3
x_3_t = x_2_b
y_3_t = y_2_b
x_3_b = np.array([0, 100, 125, 150])
y_3_b = np.array([50, 50, 50, 50])
y_3_b = np.array(y_3_b.size * offset) - y_3_b
f_3_t = sc_ip.interp1d(x_3_t, y_3_t, kind = 'cubic')
f_3_b = sc_ip.interp1d(x_3_b, y_3_b, kind = 'cubic')
#### save
f = open("layer_3.red", "w")
# header
f.write(str(n_x) + " " + str(n_y) + "\n")
f.write("filename.red\n")
# 1 edge
for x in xnew:
f.write(str(x) + " " + str(f_3_b(x)) + " ")
f.write("\n")
# 2 edge
x = xnew[xnew.size - 1]
ynew = np.linspace(f_3_b(x), f_3_t(x), n_y)
for y in ynew[1:]:
f.write(str(x) + " " + str(y) + " ")
f.write("\n")
# 3 edge
for x in xnew[::-1][1:]:
f.write(str(x) + " " + str(f_3_t(x)) + " ")
f.write("\n")
# 4 edge
x = xnew[0]
ynew = np.linspace(f_3_t(x), f_3_b(x), n_y)
for y in ynew[1:-1]:
f.write(str(x) + " " + str(y) + " ")
f.write("\n")
# inner nodes
for x in xnew[1:-1]:
ynew = np.linspace(f_3_b(x), f_3_t(x), n_y)
for y in ynew[1:-1]:
f.write(str(x) + " " + str(y) + " ")
f.close()
# visualize
#plt.plot(xnew, f_1_t(xnew), '-', xnew, f_1_b(xnew), '-', xnew, f_2_t(xnew), '-', xnew, f_2_b(xnew), '-', xnew, f_3_t(xnew), '-', xnew, f_3_b(xnew), '-')
#plt.show()
|
import sqlite3
def createTable():#function to create a new database
connection = sqlite3.connect("login.db")
connection.execute("CREATE TABLE USERS(USERNAME TEXT NOT NULL, PASSWORD TEXT)")
connection.commit()
connection.close()
def friendsList(username):#function to create a friends list for every unique user
connection = sqlite3.connect(username + ".db") #If file exist connect to it, else create it
connection.execute("CREATE TABLE IF NOT EXISTS friends(friend TEXT NOT NULL PRIMARY KEY, online INTEGER)") #Create friendslist table if it does not exist
connection.commit()
connection.close()
#def updateFriends(username, newFriend):#update a users friends list
def updateFriends(username, newFriend, action):#update a users friends list
connection = sqlite3.connect(username + ".db") #connect to database file
checkFriends = connection.execute("SELECT friend FROM friends WHERE friend = ?",(newFriend,))
if(checkFriends.fetchone()):
if "DELETE" in action:
connection.execute("DELETE FROM friends WHERE friend = ?", (newFriend,))
connection.commit()
connection.close()
else:
connection.execute("INSERT INTO friends VALUES(?,?)",(newFriend, 0))
connection.commit()
connection.close()
def checkOnlineStatus(username, client, statusUpdate): # user is the current user and client is another user that is logged in
connection = sqlite3.connect(username + ".db")
checkFriends = connection.execute ("SELECT friend FROM friends WHERE friend = ?", (client,))
if checkFriends.fetchone():
connection.execute("UPDATE friends SET online = ? WHERE friend = ?", (statusUpdate, client))
connection.commit()
connection.close()
else:
connection.close()
def checkAllOnlineStatus(username, clients = []): # similar to checkOnlineStatus() but checks all friends instead of just one
connection = sqlite3.connect(username + ".db")
checkFriends = connection.execute("SELECT friend FROM friends") # retreive full list of friends
for currIndex in checkFriends:
if currIndex in clients:
connection.execute("UPDATE friends SET online = ? WHERE friend = ?", (1, currIndex[0]))
connection.commit()
else:
connection.execute("UPDATE friends SET online = ? WHERE friend = ?", (0, currIndex[0]))
connection.commit()
connection.close()
def checkFriends(username, newFriend):
userInfo = sqlite3.connect(username + ".db")
result = userInfo.execute("SELECT * FROM friends WHERE friend = ?",(newFriend,))
if(len(result.fetchall()) > 0):
return True
else:
return False
def updateTable(username,password):#function to add users into server database
userInfo = sqlite3.connect("login.db")
userInfo.execute("INSERT INTO USERS VALUES(?,?)",(username,password))
userInfo.commit()
userInfo.close()
def checkTable(username, password):
userInfo = sqlite3.connect("login.db")
result = userInfo.execute("SELECT * FROM USERS WHERE USERNAME = ? AND PASSWORD = ?",(username, password))
if(len(result.fetchall()) > 0):
return True
else:
return False
def checkUserName(username):
userInfo = sqlite3.connect("login.db")
result = userInfo.execute("SELECT * FROM USERS WHERE USERNAME = ?", (username,))
if(len(result.fetchall()) > 0):
return True
else:
return False
|
from rest_framework import serializers
from . import models
class UserSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(read_only=True)
name = serializers.CharField(max_length=15, default="DefaultUserName")
status = serializers.IntegerField(default=0)
class Meta:
fields = ('id', 'name', 'status')
model = models.User
def create(self, validated_data):
return models.User.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.status = validated_data.get('status', instance.status)
instance.save()
return instance
|
import os
import sys
path='/var/www/aceweb'
if path not in sys.path:
sys.path.append(path)
os.environ['DJANGO_SETTINGS_MODULE'] = 'ace_webserver.settings'
activate_this =path+'/env/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
#import django.core.handlers.wsgi
#application = django.core.handlers.wsgi.WSGIHandler()
from django.core.wsgi import get_wsgi_application
from django.contrib.staticfiles.handlers import StaticFilesHandler
application = StaticFilesHandler(get_wsgi_application())
|
###########################
# project_euler number 1
# by 김승현
###########################
#1000보다 작은 자연수 중에서 3 또는 5의 배수를 모두 더하면?
total = 0
for i in range(1, 1000):
if i % 3 == 0 or i % 5 == 0:
total = total + i
print(total) |
"""Identify words by a number of criteria. Colour them accordingly for plotting."""
import xmlWordOperators as xmlWO
class xmlLineOperator(object):
def __init__(self, index, line, page_data, search_key_data, continuation):
self.page = page_data.page
self.index = index
self.line = line[0]
self.word_list = line[1]
self.page_left = page_data.page_left
self.page_right = page_data.page_right
self.center = page_data.center
self.left_column_start = page_data.left_column_start
self.third_first_start = page_data.third_first_start
self.third_second_start = page_data.third_second_start
self.search_key_center = search_key_data[0]
self.search_key_thirds = search_key_data[1]
if continuation is False and self.search_key_center is True:
self.line_data = self.line_test_indented_columns('center')
elif continuation is False and self.search_key_thirds is True:
self.line_data = self.line_test_indented_columns('thirds')
elif continuation is True and (self.search_key_center is True or self.search_key_thirds is True):
self.line_data = self.line_test_continued()
elif continuation is False and self.search_key_center is False and self.search_key_thirds is False:
self.line_data = self.line_test_center()
self.captured_words = self.line_data[0]
self.company_name_found = self.line_data[1]
def clean_captured_words(self, captured_words):
""""""
string_continuous = ''.join([xmlWO.strip_punctuation(word[0]) for word in captured_words])
if not xmlWO.check_capital_title(captured_words) or len(captured_words) < 2 or len(string_continuous) < 5:
captured_words = []
elif not xmlWO.identify_company_extensions(captured_words):
if len(captured_words) > 0:
captured_words = xmlWO.as_of_search(captured_words, string_continuous)
if len(captured_words) > 0:
captured_words = xmlWO.capitals_ratio(captured_words, string_continuous)
if len(captured_words) > 0:
captured_words = xmlWO.beginning_end_line_filter(captured_words, string_continuous)
if len(captured_words) > 0:
captured_words = xmlWO.is_management_bonded(captured_words, string_continuous)
return captured_words
def line_test_operator(self, captured_words, company_name_found):
""""""
company_name_found = 'Undefined'
end = False
for i, word in enumerate(self.word_list[:-1]):
if i == (len(self.word_list) - 1):
present_word = self.word_list[i][0]
next_word = ''
else:
present_word = self.word_list[i][0]
next_word = self.word_list[i+1][0]
end_position_data = xmlWO.end_position(present_word, next_word)
if end_position_data[0]:
end = True
company_name_found = True
if end is False or end_position_data[2]:
captured_words.append(word)
if end_position_data[2]:
break
captured_words = self.clean_captured_words(captured_words)
if len(captured_words) == 0:
company_name_found = False
return (captured_words, company_name_found)
def line_test_indented_columns(self, key):
"""Categorise words in line for two column sheets."""
def line_test_operator_trigger():
"""Compartmentalised function to avoid repetition."""
captured_words = []
company_name_found = False
line_test_data = self.line_test_operator(captured_words, company_name_found)
return line_test_data
captured_words = []
company_name_found = False
offset_list = [.01, .6]
word_zero_left = self.word_list[0][4]
word_zero = self.word_list[0][0]
if key == 'center':
word_limit_line = 5
if self.index == 0:
column_start = self.page_left
elif self.index == 1:
column_start = self.left_column_start
elif key == 'thirds':
word_limit_line = 2
if self.index == 0:
column_start = self.page_left
elif self.index == 1:
column_start = self.third_first_start
elif self.index == 2:
column_start = self.third_second_start
if (column_start + offset_list[0] < word_zero_left < column_start + offset_list[1] and
len(self.word_list) > word_limit_line and xmlWO.capital_search(word_zero)):
if xmlWO.identify_company_extensions(self.line_test_operator(captured_words, company_name_found)[0]) is True:
line_test_data = line_test_operator_trigger()
captured_words = line_test_data[0]
company_name_found = line_test_data[1]
else:
captured_words = []
company_name_found = False
if xmlWO.check_against_popular(xmlWO.strip_punctuation(word_zero), self.word_list, '') is False:
line_test_data = line_test_operator_trigger()
captured_words = line_test_data[0]
company_name_found = line_test_data[1]
return (captured_words, company_name_found)
def line_test_continued(self):
"""Search lines that follow an indented line of interest for string endpoints."""
captured_words = []
company_name_found = 'Undefined'
end = False
for i, word in enumerate(self.word_list[:-1]):
if i == (len(self.word_list) - 1):
present_word = self.word_list[i+1][0]
next_word = ''
else:
present_word = self.word_list[i][0]
next_word = self.word_list[i+1][0]
end_position_data = xmlWO.end_position(present_word, next_word)
if end_position_data[0]:
end = True
company_name_found = True
if end is False or end_position_data[2] is True:
captured_words.append(word)
break
captured_words = self.clean_captured_words(captured_words)
return (captured_words, company_name_found)
def line_test_center(self):
"""Categorise words in single-column sheets. Look for centered company names."""
captured_words = []
company_name_found = False
line_left = self.word_list[0][4]
line_right = self.word_list[-1][2]
distance_left = line_left - self.page_left
distance_right = self.page_right - line_right
distance_distance = abs(distance_right - distance_left)
if distance_distance < .05 and distance_left > .035:
for word in self.word_list:
captured_words.append(word)
captured_words = self.clean_captured_words(captured_words)
return (captured_words, company_name_found)
|
"""This defines the available applications."""
from django.apps import AppConfig
class ShepherdConfig(AppConfig):
name = 'ghostwriter.shepherd'
def ready(self):
try:
import ghostwriter.shepherd.signals # noqa F401
except ImportError:
pass |
"""Code to fetch the repository and commits"""
import requests
import json
def get_repository_details(user_name):
"""Fetched the repository"""
result = []
user_url = 'https://api.github.com/users/{0}/repos'.format(user_name)
result.append('User: {0}'.format(user_name))
try:
res = requests.get(user_url)
except (TypeError, KeyError, IndexError):
return " Not able to fetch user's repos"
repos = json.loads(res.text)
try:
for repo in repos:
repo_name = repo['name']
repo_url = 'https://api.github.com/repos/{}/{}/commits'.format(user_name, repo_name)
repo_detail = requests.get(repo_url)
repo_detail_json = json.loads(repo_detail.text)
result.append('Repository Name: {0} Number of commits in Repository: {1}'.format(repo_name,len(repo_detail_json)))
except (TypeError, KeyError, IndexError):
return "Not able to to fetch user's commits"
return result
def main():
"""Takes user input and calls the main funct"""
usr_name = input("Enter name of the user = ")
for repo in get_repository_details(usr_name):
print(repo)
"""excution starts from here"""
if __name__ == '__main__':
main() |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('Mall_Customers.csv')
X = dataset.iloc[:,[3,4]].values
'''
import scipy.cluster.hierarchy as sch
dendrogram = sch.dendrogram(sch.linkage(X,method = 'ward'))
plt.title('Dendrogram')
plt.show()
'''
GreaterClusters = 5
from sklearn.cluster import AgglomerativeClustering
hc = AgglomerativeClustering(n_clusters = 5, affinity = 'euclidean', linkage = 'ward')
y_hc = hc.fit_predict(X)
colors = ['g','b','r','c','m','y','k','w']
for i in range(0,GreaterClusters):
plt.scatter(X[y_hc==i,0],X[y_hc==i,1],s = 15,c = colors[i],label = 'Cluster '+str(i))
plt.title('Clusters of clients')
plt.xlabel('Annual Income (k$)')
plt.ylabel('Spending Score (1-100)')
plt.legend()
plt.show()
|
from django.db import models
from django.db.models import Q
from django.contrib.auth.models import User
from django.utils import timezone
from profanity.validators import validate_is_profane
from django.shortcuts import redirect, reverse
# Create your models here.
class JobManager(models.Manager):
def search(self, query=None):
qs = self.get_queryset()
if query is not None:
or_lookup = (Q(job_title__icontains=query)|
Q(job_description__icontains=query)|
Q(company_description__icontains=query)|
Q(company_name__icontains=query)
)
qs = qs.filter(or_lookup).distinct() # distinct() is often necessary with Q lookups
return qs
class JobOpening(models.Model):
FIELD_CHOICES = [
('', 'Select Field'),
('Administration/Secretarial', 'Administration/Secretarial'),
('Agriculture/Agro-Allied', 'Agriculture/Agro-Allied'),
('Arts/Crafts/Languages', 'Arts/Crafts/Languages'),
('Aviation/Airline', 'Aviation/Airline'),
('Banking', 'Banking'),
('Building and Construction', 'Building and Construction'),
('Catering / Confectionery', 'Catering / Confectionery'),
('Consultancy', 'Consultancy'),
('Customer Care', 'Customer Care'),
('Education / Teaching', 'Education / Teaching'),
('Engineering / Technical', 'Engineering / Technical'),
('Finance/Accouting/Audit','Finance/Accouting/Audit'),
('General', 'General'),
('Graduate Jobs', 'Graduate Jobs'),
('Hospitality/Hotel/Restaurant', 'Hospitality/Hotel/Restaurant'),
('Human Resources', 'Human Resources'),
('Insurance', 'Insurance'),
('ICT/Computer', 'ICT/Computer'),
('Internships / Volunteering', 'Internships / Volunteering'),
('Janitorial Services', 'Janitorial Services'),
('Law / Legal', 'Law/Legal'),
('Logistics','Logistics'),
('Manutacturing', 'Manufacturing'),
('Media/ Advertising/ Branding', 'Media/ Advertising/ Branding'),
('Medical/Healthcare', 'Medical/Healthcare'),
('NGO/Non-Profit','NGO/Non-Profit'),
('Oil and Gas / Energy','Oil and Gas / Energy'),
('Pharmaceutical', 'Pharmaceutical'),
('Procurement / Store-keeping / Supply Chain', 'Procurement / Store-keeping / Supply Chain'),
('Project Management','Project Management'),
('Real Estate', 'Real Estate'),
('Research / Data Analysis', 'Research / Data Analysis'),
('Safety and Environment / HSE', 'Safety and Environment / HSE'),
('Sales / Marketing / Retail / Business Development', 'Sales / Marketing / Retail / Business Development'),
('Security Intelligence', 'Security Intelligence'),
('Transportation and Driving', 'Transportation and Driving'),
('Travel and Tours', 'Travel and Tours'),
]
INDUSTRY_CHOICES = [
('', 'Select Industry'),
('Advertising / Branding / PR', 'Advertising / Branding / PR'),
('Agriculture / Agro-Allied', 'Agriculture / Agro-Allied'),
('Any','Any'),
('Aviation / Airline', 'Aviation / Airline'),
('Banking / Financial Services', 'Banking / Financial Services'),
('Building / Construction', 'Building / Construction'),
('Consulting', 'Consulting'),
('Creative / Arts','Creative / Arts'),
('Education / Teaching','Education / Teaching'),
('Engineering / Technical', 'Engineering / Technical'),
('Food Services', 'Food Services'),
('General', 'General'),
('Government', 'Government'),
('Healthcare / Medical', 'Healthcare / Medical'),
('Hospitality', 'Hospitality'),
('ICT / Telecommunication', 'ICT / Telecommunication'),
('Insurance', 'Insurance'),
('Janitorial Services / Environment', 'Janitorial Services / Environment'),
('Law / Legal', 'Law / Legal'),
('Logistics and Transportation', 'Logistics and Transportation'),
('Manufacturing / Production / FMCG', 'Manufacturing / Production / FMCG'),
('Media / Radio / TV','Media / Radio / TV'),
('NGO / Non-Profit Associations', 'NGO / Non-Profit Associations'),
('Oil and Gas / Marine', 'Oil and Gas / Marine'),
('Online Sales / Marketing', 'Online Sales / Marketing'),
('Pharmaceuticals', 'Pharmaceuticals'),
('Power / Energy', 'Power / Energy'),
('Professional / Social Associations', 'Professional / Social Associations'),
('Real Estate', 'Real Estate'),
('Religious', 'Religious'),
('Sales / Retail', 'Sales / Retail'),
('Security', 'Security'),
('Travel and Tours', 'Travel and Tours'),
]
EDUCATION_CHOICES = [
('First School Leaving Certificate (FSLC)', 'First School Leaving Certificate (FSLC)'),
('Secondary School (SSCE)', 'Secondary School (SSCE)'),
('NCE', 'NCE'),
('OND', 'OND'),
('BA/BSc/HND', 'BA/BSc/HND'),
('MBA/MSc/MA', 'MBA/MSc/MA'),
('PhD/Fellowship', 'PhD/Fellowship'),
('Vocational', 'Vocational'),
('Others', 'Others'),
]
EXPERIENCE_CHOICES = [
('', 'Select Experience Required'),
('1-4 years', '1-4 years'),
('5-10 years', '5-10 years'),
('11-35 years', '11-35 years')
]
STATE_CHOICES = [
('Abia', 'Abia'),
('Abuja','Abuja'),
('Adamawa', 'Adamawa'),
('Akwa Ibom', 'Akwa Ibom'),
('Anambra', 'Anambra'),
('Bauchi', 'Bauchi'),
('Bayelsa', 'Bayelsa'),
('Benue', 'Benue',),
('Borno', 'Borno'),
('Cross River', 'Cross River'),
('Delta', ' Delta'),
('Ebonyi', 'Ebonyi'),
('Edo', 'Edo'),
('Ekiti', 'Ekiti'),
('Enugu', 'Enugu'),
('Gombe', 'Gombe'),
('Imo', 'Imo'),
('Jigawa', 'Jigawa'),
('Kaduna', 'Kaduna'),
('Kano', 'Kano'),
('Katsina', 'Katsina'),
('Kebbi', 'Kebbi'),
('Kogi', 'Kogi'),
('Kwara', 'Kwara'),
('Lagos', 'Lagos'),
('Nasarawa', 'Nasawara'),
('Niger', 'Niger'),
('Ogun', 'Ogun'),
('Ondo', 'Ondo'),
('Osun', 'Osun'),
('Other', 'Other'),
('Oyo', 'Oyo'),
('Plateau', 'Plateau'),
('Rivers', 'Rivers'),
('Sokoto', 'Sokoto'),
('Taraba', 'Taraba'),
('Yobe', 'Yobe'),
('Zamfara', 'Zamfara'),
]
JOB_TYPE_CHOICES = [
('Full Time', 'Full Time'),
('Contract', 'Contract')
]
user = models.ForeignKey(User, on_delete=models.CASCADE)
job_title = models.CharField(max_length=140)
company_description = models.TextField()
job_description = models.TextField()
company_name = models.CharField(max_length=140)
saved = models.ManyToManyField(User, related_name='saved_job', blank=True)
job_type = models.CharField(choices=JOB_TYPE_CHOICES, max_length=30)
company_email = models.EmailField(max_length=254, blank=True, null=True)
method_of_application = models.TextField(blank=True, null=True)
date_posted = models.DateTimeField(default=timezone.now)
experience = models.CharField(choices=EXPERIENCE_CHOICES, max_length=20)
field = models.CharField(choices=FIELD_CHOICES, max_length=140)
state = models.CharField(max_length=140)
education = models.CharField(choices=EDUCATION_CHOICES, max_length=140)
industry = models.CharField(choices=INDUSTRY_CHOICES, max_length=140)
send_cv_directly = models.BooleanField(default=False)
objects = JobManager()
def __str__(self):
return '{} - {}'.format(self.user, self.job_title)
def get_absolute_url(self):
return reverse('job-detail', kwargs={'pk': self.pk})
def get_api_save_job_url(self):
return reverse("save-job-api-toggle", kwargs={'pk': self.pk})
def get_save_job_url(self):
return reverse("save-job-toggle", kwargs={'pk': self.pk})
class ShareJob(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
job = models.ForeignKey(JobOpening, on_delete=models.CASCADE)
content = models.TextField(null=True, blank=True)
likes = models.ManyToManyField(User, related_name='share_job_likes', blank=True)
image = models.ImageField(upload_to='shared_pic/', blank=True, null=True)
date_posted = models.DateTimeField(default=timezone.now)
is_quote = models.BooleanField(default=False)
share_post = models.ForeignKey('self', on_delete=models.CASCADE, blank=True, null=True)
def get_absolute_url(self):
return reverse('job-thread', kwargs={'pk': self.job.pk})
def __str__(self):
return '{}- {}'.format(self.job.job_title, str(self.user.username))
def get_like_url(self):
return reverse("share-job-like-toggle", kwargs={'pk': self.pk})
def get_api_like_url(self):
return reverse("share-job-like-api-toggle", kwargs={'pk': self.pk})
class RequestJob(models.Model):
FIELD_CHOICES = [
('', 'Select Field'),
('Administration/Secretarial', 'Administration/Secretarial'),
('Agriculture/Agro-Allied', 'Agriculture/Agro-Allied'),
('Arts/Crafts/Languages', 'Arts/Crafts/Languages'),
('Aviation/Airline', 'Aviation/Airline'),
('Banking', 'Banking'),
('Building and Construction', 'Building and Construction'),
('Catering / Confectionery', 'Catering / Confectionery'),
('Consultancy', 'Consultancy'),
('Customer Care', 'Customer Care'),
('Education / Teaching', 'Education / Teaching'),
('Engineering / Technical', 'Engineering / Technical'),
('Finance/Accouting/Audit','Finance/Accouting/Audit'),
('General', 'General'),
('Graduate Jobs', 'Graduate Jobs'),
('Hospitality/Hotel/Restaurant', 'Hospitality/Hotel/Restaurant'),
('Human Resources', 'Human Resources'),
('Insurance', 'Insurance'),
('ICT/Computer', 'ICT/Computer'),
('Internships / Volunteering', 'Internships / Volunteering'),
('Janitorial Services', 'Janitorial Services'),
('Law / Legal', 'Law/Legal'),
('Logistics','Logistics'),
('Manutacturing', 'Manufacturing'),
('Media/ Advertising/ Branding', 'Media/ Advertising/ Branding'),
('Medical/Healthcare', 'Medical/Healthcare'),
('NGO/Non-Profit','NGO/Non-Profit'),
('Oil and Gas / Energy','Oil and Gas / Energy'),
('Pharmaceutical', 'Pharmaceutical'),
('Procurement / Store-keeping / Supply Chain', 'Procurement / Store-keeping / Supply Chain'),
('Project Management','Project Management'),
('Real Estate', 'Real Estate'),
('Research / Data Analysis', 'Research / Data Analysis'),
('Safety and Environment / HSE', 'Safety and Environment / HSE'),
('Sales / Marketing / Retail / Business Development', 'Sales / Marketing / Retail / Business Development'),
('Security Intelligence', 'Security Intelligence'),
('Transportation and Driving', 'Transportation and Driving'),
('Travel and Tours', 'Travel and Tours'),
]
INDUSTRY_CHOICES = [
('', 'Select Industry'),
('Advertising / Branding / PR', 'Advertising / Branding / PR'),
('Agriculture / Agro-Allied', 'Agriculture / Agro-Allied'),
('Any','Any'),
('Aviation / Airline', 'Aviation / Airline'),
('Banking / Financial Services', 'Banking / Financial Services'),
('Building / Construction', 'Building / Construction'),
('Consulting', 'Consulting'),
('Creative / Arts','Creative / Arts'),
('Education / Teaching','Education / Teaching'),
('Engineering / Technical', 'Engineering / Technical'),
('Food Services', 'Food Services'),
('General', 'General'),
('Government', 'Government'),
('Healthcare / Medical', 'Healthcare / Medical'),
('Hospitality', 'Hospitality'),
('ICT / Telecommunication', 'ICT / Telecommunication'),
('Insurance', 'Insurance'),
('Janitorial Services / Environment', 'Janitorial Services / Environment'),
('Law / Legal', 'Law / Legal'),
('Logistics and Transportation', 'Logistics and Transportation'),
('Manufacturing / Production / FMCG', 'Manufacturing / Production / FMCG'),
('Media / Radio / TV','Media / Radio / TV'),
('NGO / Non-Profit Associations', 'NGO / Non-Profit Associations'),
('Oil and Gas / Marine', 'Oil and Gas / Marine'),
('Online Sales / Marketing', 'Online Sales / Marketing'),
('Pharmaceuticals', 'Pharmaceuticals'),
('Power / Energy', 'Power / Energy'),
('Professional / Social Associations', 'Professional / Social Associations'),
('Real Estate', 'Real Estate'),
('Religious', 'Religious'),
('Sales / Retail', 'Sales / Retail'),
('Security', 'Security'),
('Travel and Tours', 'Travel and Tours'),
]
EDUCATION_CHOICES = [
('First School Leaving Certificate (FSLC)', 'First School Leaving Certificate (FSLC)'),
('Secondary School (SSCE)', 'Secondary School (SSCE)'),
('NCE', 'NCE'),
('OND', 'OND'),
('BA/BSc/HND', 'BA/BSc/HND'),
('MBA/MSc/MA', 'MBA/MSc/MA'),
('PhD/Fellowship', 'PhD/Fellowship'),
('Vocational', 'Vocational'),
('Others', 'Others'),
]
EXPERIENCE_CHOICES = [
('', 'Select Experience Required'),
('1-4 years', '1-4 years'),
('5-10 years', '5-10 years'),
('11-35 years', '11-35 years')
]
STATE_CHOICES = [
('Abia', 'Abia'),
('Abuja','Abuja'),
('Adamawa', 'Adamawa'),
('Akwa Ibom', 'Akwa Ibom'),
('Anambra', 'Anambra'),
('Bauchi', 'Bauchi'),
('Bayelsa', 'Bayelsa'),
('Benue', 'Benue',),
('Borno', 'Borno'),
('Cross River', 'Cross River'),
('Delta', ' Delta'),
('Ebonyi', 'Ebonyi'),
('Edo', 'Edo'),
('Ekiti', 'Ekiti'),
('Enugu', 'Enugu'),
('Gombe', 'Gombe'),
('Imo', 'Imo'),
('Jigawa', 'Jigawa'),
('Kaduna', 'Kaduna'),
('Kano', 'Kano'),
('Katsina', 'Katsina'),
('Kebbi', 'Kebbi'),
('Kogi', 'Kogi'),
('Kwara', 'Kwara'),
('Lagos', 'Lagos'),
('Nasarawa', 'Nasawara'),
('Niger', 'Niger'),
('Ogun', 'Ogun'),
('Ondo', 'Ondo'),
('Osun', 'Osun'),
('Other', 'Other'),
('Oyo', 'Oyo'),
('Plateau', 'Plateau'),
('Rivers', 'Rivers'),
('Sokoto', 'Sokoto'),
('Taraba', 'Taraba'),
('Yobe', 'Yobe'),
('Zamfara', 'Zamfara'),
]
JOB_TYPE_CHOICES = [
('Full Time', 'Full Time'),
('Contract', 'Contract')
]
user = models.ForeignKey(User, on_delete=models.CASCADE)
job_type = models.CharField(choices=JOB_TYPE_CHOICES, max_length=45)
field = models.CharField(choices=FIELD_CHOICES, max_length=100)
industry = models.CharField(choices=INDUSTRY_CHOICES, max_length=140)
state = models.CharField(choices=STATE_CHOICES, max_length=20)
education = models.CharField(choices=EDUCATION_CHOICES, max_length=45)
experience = models.CharField(choices=EXPERIENCE_CHOICES, max_length=20)
date_posted = models.DateTimeField(default=timezone.now)
def __str__(self):
return '{} - {}'.format(self.user, self.field)
def get_absolute_url(self):
return reverse('all_jobs')
# return reverse('job-detail', kwargs={'pk': self.pk})
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# File Name : 'dump_patches.py'
# Author : Steve NGUYEN
# Contact : steve.nguyen.000@gmail.com
# Created : DDDD
# Revised :
# Version :
# Target MCU :
#
# This code is distributed under the GNU Public License
# which can be found at http://www.gnu.org/licenses/gpl.txt
#
#
# Notes: notes
#
import os
import sys
from subprocess import call
import fnmatch
arch = "../bin/models/test_exp.json"
weights = "../bin/models/test_exp_weights.bin"
if __name__ == '__main__':
result = {}
basebin = sys.argv[1]
basetest = sys.argv[2]
out_dir = sys.argv[3]
matches = []
for root, dirnames, filenames in os.walk(basetest):
for filename in fnmatch.filter(filenames, '*.png'):
matches.append(os.path.join(root, filename))
# print matches
# files = [basetest + '/' + f for f in listdir(
# basetest) if isfile(join(basetest, f))]
print "running: ", basebin
for img in matches:
print img
res = call([basebin, arch, weights, img, out_dir])
|
__author__ = 'sjaku'
import os
import urllib |
from __future__ import print_function
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def add_layer(inputs, in_size, out_size, activation_function=None):
# outputs = 1
# return outputs\
Weights = tf.Variable(tf.random_normal([in_size,out_size]))
biases = tf.Variable(tf.zeros([1,out_size])+0.1)
Wx=tf.matmul(inputs, Weights)
Wx_plus_b=tf.matmul(inputs,Weights)+biases
print(Weights.shape,biases.shape,Wx.shape,Wx_plus_b.shape)
if activation_function is None:
outputs= Wx_plus_b
else:
outputs=activation_function(Wx_plus_b)
return outputs
x_data = np.linspace(-1, 2, 400)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
#define the placeholder
xs = tf.placeholder(tf.float32, [None, 1])
ys = tf.placeholder(tf.float32, [None, 1])
#add layer1
l1=add_layer(xs,1,10,activation_function=tf.nn.relu)
#add layer2
prediction=add_layer(l1,10,1,activation_function=None)
#calculate error
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]))
#optimization
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
# print("noise:\n", noise.shape)
# print("x_data:\n", x_data.shape)
#important step:init
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
init = tf.initialize_all_variables()
else:
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(x_data, y_data)
plt.ion()
plt.show()
plt.pause(1)
for i in range(1000):
#train
sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
# sess.run(prediction)
if i%10==0:
# attempt to remove
try:
ax.lines.remove(lines[0])
except Exception:
pass
prediction_value=sess.run(prediction,feed_dict={xs:x_data})
print(prediction_value.shape)
lines=ax.plot(x_data,prediction_value,'r-',lw=5)
plt.pause(0.1)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 18 16:13:02 2020
@author: shaun
"""
import numpy as np
import matplotlib.pyplot as plt
from integration import *
N=1000
#creates function x
def function(x):
y=np.e**(-(x**2))
return y
#calculates the integral using n bins and simpsons rule
def f(x):
global N
return(swhole(N,0,x,function)[2])
X=np.linspace(0,3,30)
Y=[]
#plots result
for value in X:
Y.append(f(value))
figure=plt.figure()
ax=figure.add_subplot()
ax.plot(X,Y)
ax.set_xlabel("X")
ax.set_ylabel(r"E(x)")
figure.suptitle("$E(x)$ evaluated numerically")
|
#! /usr/bin/env python3.3
####################################### Plot 1
from decimal import *
import matplotlib.pyplot as plt
import numpy as np
import os
from pylab import *
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('small')
if not os.path.exists('../plots'):
os.makedirs('../plots')
f=open('../data/g06_lab05data_02.csv','r') #CSV1
error_bars=[]
NoOfIter = 40
NoOfReruns = 20
iteration_number=[(x+1) for x in range(NoOfIter)]
step_time_avg=[0 for x in range(NoOfIter)]
collision_time_avg=[0 for x in range(NoOfIter)]
velocity_time_avg=[0 for x in range(NoOfIter)]
position_time_avg=[0 for x in range(NoOfIter)]
loop_time_avg=[0 for x in range(NoOfIter)]
sum_of_avg=[0 for x in range(NoOfIter)]
step_time_rollno=[]
maxIndLT=-1
minIndLT=-1
maxLT=0
minLT=10000
for i in range(NoOfIter): #No of iterations
maxi=0
mini=10000
step_time_sum=0
collision_time_sum=0
velocity_time_sum=0
position_time_sum=0
loop_time_sum=0
for j in range(NoOfReruns): #No of reruns
line=f.readline()
words=line.split(',')
step_value=float(words[2])
if i==19: #Shuld take 49
step_time_rollno.append(step_value)
if step_value>maxi:
maxi=step_value
if step_value<mini:
mini=step_value
step_time_sum+=float(words[2])
collision_time_sum+=float(words[3])
velocity_time_sum+=float(words[4])
position_time_sum+=float(words[5])
loop_time_sum+=float(words[6])
error_bars.append(maxi-mini)
step_time_avg[i]=step_time_sum/NoOfReruns
collision_time_avg[i]=collision_time_sum/NoOfReruns
velocity_time_avg[i]=velocity_time_sum/NoOfReruns
position_time_avg[i]=position_time_sum/NoOfReruns
loop_time_avg[i]=loop_time_sum/NoOfReruns
if loop_time_avg[i]>maxLT:
maxLT=loop_time_avg[i]
maxIndLT=i
if loop_time_avg[i]<minLT:
minLT=loop_time_avg[i]
minIndLT=i
plt.bar(iteration_number,step_time_avg,0.7,align='center',label='Step time avg')
plt.plot(iteration_number,loop_time_avg,'r',label='Loop time avg')
plt.xlabel('Iteration Number')
plt.ylabel('Step time and loop time averages')
plt.title('Plot 1')
#plt.legend()
plt.annotate('{}'.format("max"), xy=(maxIndLT,maxLT), xytext=(-10, 10), ha='right', textcoords='offset points',arrowprops=dict(arrowstyle='->', shrinkA=0))
plt.annotate('{}'.format("min"), xy=(minIndLT,minLT), xytext=(-10, 10), ha='right', textcoords='offset points',arrowprops=dict(arrowstyle='->', shrinkA=0))
plt.legend(prop = fontP,loc='upper center')
plt.savefig('../plots/g06_lab09_plot01.png')
plt.clf() #Clearing to use it later
############################################### Plot 2
for i in range(NoOfIter):
sum_of_avg[i]=collision_time_avg[i]+velocity_time_avg[i]+position_time_avg[i]
plt.plot(iteration_number,step_time_avg,'r',label='Step time avg')
plt.plot(iteration_number,collision_time_avg,'g',label='Collision time avg')
plt.plot(iteration_number,velocity_time_avg,'b',label='Velocity time avg')
plt.plot(iteration_number,position_time_avg,'k',label='Position time avg')
plt.plot(iteration_number,sum_of_avg,'m',label='Sum of averages')
plt.legend(prop = fontP)
plt.title('Plot 2')
plt.xlabel('Iteration Number')
plt.ylabel('Various averages')
plt.savefig('../plots/g06_lab09_plot02.png')
plt.clf() #Clearing to use it later
############################################## Plot 3
plt.errorbar(iteration_number,step_time_avg,yerr=error_bars,fmt='o',label='errorbars')
plt.plot(iteration_number,step_time_avg,'r',label='Step time avg')
plt.xlabel('Iteration number')
plt.ylabel('Step time with error bars')
plt.title('Plot 3')
plt.legend(prop = fontP)
plt.savefig('../plots/g06_lab09_plot03.png')
plt.clf() #Clearing to use it later
############################################# Plot 5
f=open('../data/g06_lab05data_random.csv','r') #RandomCSV
random_step_average=[0 for x in range(NoOfIter)] #No of iterations in random csv
NoOfRerunsR = 10
for i in range(NoOfIter):
random_step_sum=0
for j in range(NoOfRerunsR): #No. of values in each iteration
line=f.readline()
words=line.split(',')
random_step_sum+=float(words[2])
random_step_average[i]=random_step_sum/NoOfRerunsR
fit1 = polyfit(iteration_number,random_step_average,1)
fit_fn1 = poly1d(fit1) # fit_fn is now a function which takes in x and returns an estimate for y
plot(iteration_number,fit_fn1(iteration_number),'b',linestyle = "-.",label='Best Fit Random')
fit2 = polyfit(iteration_number,step_time_avg,1)
fit_fn2 = poly1d(fit2) # fit_fn is now a function which takes in x and returns an estimate for y
plot(iteration_number,fit_fn2(iteration_number),'g',linestyle = "--",label='Best Fit Full')
plt.plot(iteration_number,random_step_average,color = 'r',linestyle = "None",label='Step time avg in random file', marker='o')
plt.plot(iteration_number,step_time_avg,color = 'b',linestyle = "None",label='Step time avg', marker='o')
plt.title('Plot 5')
plt.legend(prop = fontP)
plt.xlabel('Iteration number')
plt.ylabel('Step time averages of both csv files')
plt.savefig('../plots/g06_lab09_plot05.png')
plt.clf() #Clearing to use it later
############################################# Plot 4
plt.hist(step_time_rollno,bins=NoOfReruns/5,cumulative='True',histtype='step',label="Cumulative graph")
plt.hist(step_time_rollno,bins=NoOfReruns/5,label="Frequency plot",align='mid')
plt.legend(prop = fontP,loc='upper center')
plt.xlabel('Bins')
plt.ylabel('Step time average frequency')
plt.title('Plot 4')
plt.savefig('../plots/g06_lab09_plot04.png')
plt.clf()
|
from django.contrib.auth.models import User
from django.db import models
class PlayerStat(models.Model):
user = models.OneToOneField(User, related_name="stats", on_delete=models.CASCADE)
points = models.IntegerField(default=0)
|
from functools import reduce
def ff_add(*a):
"""
>>> hex(ff_add(0x57,0x83))
'0xd4'
"""
return reduce(lambda x, y: x^y, a, 0)
def xtime(a):
"""
>>> hex(xtime(0x57))
'0xae'
>>> hex(xtime(0xae))
'0x47'
>>> hex(xtime(0x47))
'0x8e'
>>> hex(xtime(0x8e))
'0x7'
"""
# print(bin(a), a >> 7)
return (a << 1 if not a >> 7 else ((a << 1) ^ 0x1b)) & 0xff
def ff_multiply(a, b):
"""
>>> hex(ff_multiply(0x57,0x13))
'0xfe'
"""
# for i in range(b):
# a = xtime(a)
# return a
p = 0
for _ in range(8):
if not a or not b:
break
if b & 0x01:
p ^= a
b = b >> 1
a = xtime(a)
return p |
# This file is a "Hello, world!" in Python language for wandbox-vscode.
print("Hello, world!")
# Python language references:
# https://www.python.org
|
def solution(A, count=0):
A.sort()
if len(set(A)) != 1:
if A[-1] - 2 >= A[0]:
A[-1] -= 1
A[0] += 1
count += 1
return solution(A, count)
else:
A[0] += 1
count += 1
return solution(A, count)
else:
return count
print(solution([1, 2, 2, 4]))
print(solution([4, 2, 4, 6]))
print(solution([1, 1, 2, 1]))
|
import psycopg2
DB_NAME = "news"
connection = psycopg2.connect(database="news", user="postgres", password="password", host="localhost")
cursor = connection.cursor()
cursor.execute(
"select articles.title, count(*) as views "
"from articles inner join log on log.path "
"like concat('%', articles.slug, '%') "
"where log.status like '%200%' group by "
"articles.title, log.path order by views desc limit 3")
for row in cursor:
print("%s - %i views" % (row[0], row[1]))
print("------------------------------------------------------------")
cursor.execute(
"select authors.name, count(*) as views from articles inner "
"join authors on articles.author = authors.id inner join log "
"on log.path like concat('%', articles.slug, '%') where "
"log.status like '%200%' group "
"by authors.name order by views desc"
)
for row in cursor:
print("%s - %i views" % (row[0], row[1]))
print("------------------------------------------------------------")
cursor.execute(
"select day, perc from ("
"select day, round((sum(requests)/(select count(*) from log where "
"substring(cast(log.time as text), 0, 11) = day) * 100), 2) as "
"perc from (select substring(cast(log.time as text), 0, 11) as day, "
"count(*) as requests from log where status like '%404%' group by day)"
"as log_percentage group by day order by perc desc) as final_query "
"where perc >= 1"
)
for row in cursor:
print("%s - %i%s errors" % (row[0], row[1], "%"))
print("------------------------------------------------------------")
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import date
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('query', '0009_auto_20150730_1646'),
]
operations = [
migrations.AlterField(
model_name='query',
name='date_lower',
field=models.DateField(default=date.today),
),
migrations.RemoveField(
model_name='query',
name='date_lower',
),
migrations.AlterField(
model_name='query',
name='date_upper',
field=models.DateField(default=date.today),
),
migrations.RemoveField(
model_name='query',
name='date_upper',
),
]
|
""" Serializer"""
from rest_framework import serializers
from scrapingApp.models import Parliament1
class ParliamentSerializer(serializers.ModelSerializer):
""" table columns """
class Meta:
""" table columns """
model = Parliament1
fields = [
"id",
"date_born",
"name",
"place_born",
"profession",
"lang",
"party",
"email",
"fb",
]
depth = 1
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = Parliament1
fields = ["name"]
depth = 1
|
t = int(input())
ans = []
for _ in range(t):
a = int(input())
setA = set(list(map(int,input().split())))
b = int(input())
setB = set(list(map(int,input().split())))
ans.append(setA.issubset(setB))
for i in range(t):
print(ans[i]) |
#!/usr/bin/env python3
import os, fnmatch
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pandas as pd
from ImageMetrics import getNCC, getSSIM
def indexOfMedian(aColumn):
return df[aColumn][df[aColumn] == df[aColumn].median()].index.tolist();
# Get all the files
#files = fnmatch.filter(os.listdir('.'), '*.csv')
files = [
"reconstruction-RMSE-generational-tournament-mitosis.csv",
"reconstruction-RMSE-generational-tournament-no_mitosis.csv",
"reconstruction-RMSE-generational-threshold-mitosis.csv",
"reconstruction-RMSE-generational-threshold-no_mitosis.csv",
"reconstruction-RMSE-steady_state-tournament-mitosis.csv",
"reconstruction-RMSE-steady_state-tournament-no_mitosis.csv",
"reconstruction-RMSE-steady_state-threshold-mitosis.csv",
"reconstruction-RMSE-steady_state-threshold-no_mitosis.csv"
]
data_RMSE_sinogram = [];
data_ZNCC_sinogram = [];
data_SSIM_sinogram = [];
data_RMSE_reconstruction = [];
data_ZNCC_reconstruction = [];
data_SSIM_reconstruction = [];
data_new_individual_counter = [];
xticks_1 = [1, 2, 3, 4, 5, 6, 7 , 8];
xticks_2 = [
"generational\ntournament\nmitosis",
"generational\ntournament\nno mitosis",
"generational\nthreshold\nmitosis",
"generational\nthreshold\nno mitosis",
"steady state\ntournament\nmitosis",
"steady state\ntournament\nno mitosis",
"steady state\nthreshold\nmitosis",
"steady state\nthreshold\nno mitosis"
];
fig = plt.figure(figsize=(12, 5));
plt.axis('off')
reference = np.loadtxt("RUN-1/RMSE-steady_state-threshold-mitosis-1-groundtruth.txt");
i = 0;
for file in files:
print(file);
df = pd.read_csv(file);
data_RMSE_sinogram.append(df['RMSE_sinogram']);
data_ZNCC_sinogram.append(df['ZNCC_sinogram']);
data_SSIM_sinogram.append(df['SSIM_sinogram']);
median_index = indexOfMedian("RMSE_sinogram")[-1];
print(median_index, len(df['RMSE_sinogram']))
if i == 0:
file_name1 = "RUN-" + str(median_index + 1) + "/with_bad_flies-RMSE-generational-tournament-mitosis-" + str(median_index + 1) + "-reconstruction.";
file_name2 = "RUN-" + str(median_index + 1) + "/without_bad_flies-RMSE-generational-tournament-mitosis-" + str(median_index + 1) + "-reconstruction.";
elif i == 1:
file_name1 = "RUN-no_mitosis-" + str(median_index + 1) + "/with_bad_flies-RMSE-generational-tournament-no_mitosis-" + str(median_index + 1) + "-reconstruction.";
file_name2 = "RUN-no_mitosis-" + str(median_index + 1) + "/without_bad_flies-RMSE-generational-tournament-no_mitosis-" + str(median_index + 1) + "-reconstruction.";
elif i == 2:
file_name1 = "RUN-" + str(median_index + 1) + "/with_bad_flies-RMSE-generational-threshold-mitosis-" + str(median_index + 1) + "-reconstruction.";
file_name2 = "RUN-" + str(median_index + 1) + "/without_bad_flies-RMSE-generational-threshold-mitosis-" + str(median_index + 1) + "-reconstruction.";
elif i == 3:
file_name1 = "RUN-no_mitosis-" + str(median_index + 1) + "/with_bad_flies-RMSE-generational-threshold-no_mitosis-" + str(median_index + 1) + "-reconstruction.";
file_name2 = "RUN-no_mitosis-" + str(median_index + 1) + "/without_bad_flies-RMSE-generational-threshold-no_mitosis-" + str(median_index + 1) + "-reconstruction.";
elif i == 4:
file_name1 = "RUN-" + str(median_index + 1) + "/with_bad_flies-RMSE-steady_state-tournament-mitosis-" + str(median_index + 1) + "-reconstruction.";
file_name2 = "RUN-" + str(median_index + 1) + "/without_bad_flies-RMSE-steady_state-tournament-mitosis-" + str(median_index + 1) + "-reconstruction.";
elif i == 5:
file_name1 = "RUN-no_mitosis-" + str(median_index + 1) + "/with_bad_flies-RMSE-steady_state-tournament-no_mitosis-" + str(median_index + 1) + "-reconstruction.";
file_name2 = "RUN-no_mitosis-" + str(median_index + 1) + "/without_bad_flies-RMSE-steady_state-tournament-no_mitosis-" + str(median_index + 1) + "-reconstruction.";
elif i == 6:
file_name1 = "RUN-" + str(median_index + 1) + "/with_bad_flies-RMSE-steady_state-threshold-mitosis-" + str(median_index + 1) + "-reconstruction.";
file_name2 = "RUN-" + str(median_index + 1) + "/without_bad_flies-RMSE-steady_state-threshold-mitosis-" + str(median_index + 1) + "-reconstruction.";
elif i == 7:
file_name1 = "RUN-no_mitosis-" + str(median_index + 1) + "/with_bad_flies-RMSE-steady_state-threshold-no_mitosis-" + str(median_index + 1) + "-reconstruction.";
file_name2 = "RUN-no_mitosis-" + str(median_index + 1) + "/without_bad_flies-RMSE-steady_state-threshold-no_mitosis-" + str(median_index + 1) + "-reconstruction.";
ax = plt.subplot(2, 8, i + 1)
img = np.loadtxt(file_name1 + "txt")
ax.imshow(img, cmap='gray')
ax.set_title(xticks_2[i] + "\nSSIM: " + str(round(100 * getSSIM(reference, img))) + "%");
ax = plt.subplot(2, 8, i + 1 + 8)
img = np.loadtxt(file_name2 + "txt")
ax.imshow(img, cmap='gray')
ax.set_title("\nSSIM: " + str(round(100 * getSSIM(reference, img))) + "%");
#print(indexOfMedian("ZNCC_sinogram"))
#print(indexOfMedian("SSIM_sinogram"))
data_RMSE_reconstruction.append(df['RMSE_reconstruction']);
data_ZNCC_reconstruction.append(df['ZNCC_reconstruction']);
data_SSIM_reconstruction.append(df['SSIM_reconstruction']);
#print(indexOfMedian("RMSE_reconstruction"))
#print(indexOfMedian("ZNCC_reconstruction"))
#print(indexOfMedian("SSIM_reconstruction"))
data_new_individual_counter.append(df['new_individual_counter']);
i += 1;
fig.savefig("reconstructions.pdf", bbox_inches='tight')
fig = plt.figure(figsize=(10, 5));
plt.title('RMSE sinogram (global fitness)')
plt.boxplot(data_RMSE_sinogram)
plt.xticks(xticks_1, xticks_2);
fig.savefig("RMSE_sinogram.pdf", bbox_inches='tight')
fig = plt.figure(figsize=(10, 5));
plt.title('ZNCC sinogram')
plt.boxplot(data_ZNCC_sinogram)
plt.xticks(xticks_1, xticks_2);
fig.savefig("ZNCC_sinogram.pdf", bbox_inches='tight')
fig = plt.figure(figsize=(10, 5));
plt.title('SSIM sinogram')
plt.boxplot(data_SSIM_sinogram)
plt.xticks(xticks_1, xticks_2);
fig.savefig("SSIM_sinogram.pdf", bbox_inches='tight')
fig = plt.figure(figsize=(10, 5));
plt.title('RMSE reconstruction')
plt.boxplot(data_RMSE_reconstruction)
plt.xticks(xticks_1, xticks_2);
fig.savefig("RMSE_reconstruction.pdf", bbox_inches='tight')
fig = plt.figure(figsize=(10, 5));
plt.title('ZNCC reconstruction')
plt.boxplot(data_ZNCC_reconstruction)
plt.xticks(xticks_1, xticks_2);
fig.savefig("ZNCC_reconstruction.pdf", bbox_inches='tight')
fig = plt.figure(figsize=(10, 5));
plt.title('SSIM reconstruction')
plt.boxplot(data_SSIM_reconstruction)
plt.xticks(xticks_1, xticks_2);
fig.savefig("SSIM_reconstruction.pdf", bbox_inches='tight')
fig = plt.figure(figsize=(10, 5));
plt.title('Number of individuals created')
plt.boxplot(data_new_individual_counter)
plt.xticks(xticks_1, xticks_2);
fig.savefig("new_individual_counter.pdf", bbox_inches='tight')
plt.show()
|
import sys
import os
f = open("C:/Users/user/Documents/python/ant_re/import.txt","r")
sys.stdin = f
# -*- coding: utf-8 -*-
# 入力
N = int(input())
S = list(map(int,input().split()))
T = list(map(int,input().split()))
# 仕事をソートするためのpairの配列
itv = [[0,0] for _ in range(N)]
# pairは辞書順で比較される
# 終了時間の早い順にしたいため、Tをfirstに、Sをsecondに入れる
for i in range(N):
itv[i][0] = T[i]
itv[i][1] = S[i]
# tは最後に選んだ仕事の終了時間
ans,t = 0,0
for i in range(N):
if t < itv[i][1]:
ans += 1
t = itv[i][0]
print(ans)
|
from rest_framework import serializers
from .models import Category, Case, CasePicture
from ..common.serializers import ArticleSerializer
from ..utils import build_absolute_uri
class CasePictureSerializer(serializers.ModelSerializer):
id = serializers.CharField(source='uuid')
image_url = serializers.SerializerMethodField()
class Meta:
model = CasePicture
fields = ('id', 'image_url',)
def get_image_url(self, obj):
request = self.context.get('request')
return build_absolute_uri(request, obj.image.url)
class FeaturedCategorySerializer(serializers.ModelSerializer):
title = serializers.CharField(source='name')
images = serializers.SerializerMethodField()
class Meta:
model = Category
fields = ('id', 'title', 'images')
def get_images(self, obj):
featured = [
CasePictureSerializer(img, context=self.context).data
for img in CasePicture.objects.filter(case__category=obj, featured=True)]
if not featured:
featured = [
CasePictureSerializer(img, context=self.context).data
for img in CasePicture.objects.filter(case__category=obj)[:3]]
return featured
class CategorySerializer(serializers.ModelSerializer):
title = serializers.CharField(source='name')
class Meta:
model = Category
fields = ('id', 'title', )
class CaseSerializer(serializers.ModelSerializer):
cover = serializers.SerializerMethodField()
class Meta:
model = Case
fields = ('id', 'cover', 'name')
def get_cover(self, obj):
return CasePictureSerializer(
obj.images.order_by('-cover', '-update_at').first(), context=self.context).data
class CaseDetailedSerializer(CaseSerializer):
images = CasePictureSerializer(many=True, read_only=True)
detailed_description = serializers.SerializerMethodField()
class Meta:
model = Case
fields = ('id', 'images', 'name', 'short_description', 'detailed_description')
def get_detailed_description(self, obj):
if hasattr(obj, 'casedetaileddescription'):
return ArticleSerializer(obj.casedetaileddescription.article, context=self.context).data
else:
return None
|
import ucam_webauth
import ucam_webauth.rsa
import ucam_webauth.flask_glue
from werkzeug.middleware.proxy_fix import ProxyFix
import os
class WLSRequest(ucam_webauth.Request):
def __str__(self):
query_string = ucam_webauth.Request.__str__(self)
return "https://auth.srcf.net/wls/authenticate?" + query_string
class WLSResponse(ucam_webauth.Response):
keys = dict()
for kid in (2, 500):
with open('/etc/ucam_webauth_keys/pubkey{}'.format(kid), 'rb') as f:
keys[str(kid)] = ucam_webauth.rsa.load_key(f.read())
class WLSAuthDecorator(ucam_webauth.flask_glue.AuthDecorator):
request_class = WLSRequest
response_class = WLSResponse
logout_url = "https://auth.srcf.net/logout"
def upstream_wls(display_name: str):
return WLSAuthDecorator(desc=display_name, require_ptags=None, iact=True)
def setup_app(app):
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1, x_host=1)
app.secret_key = os.environ['FLASK_SECRET_KEY']
app.request_class.trusted_hosts = [os.environ["FLASK_HOSTNAME"]]
|
salarioPorHora = float(input("Quanto você ganha por hora?"))
horasTrabalhadas = int(input("Quantas horas você trabalhou no mês?"))
salarioBruto = salarioPorHora * horasTrabalhadas
descontoImposto = salarioBruto * 0.11
descontoINSS = salarioBruto * 0.08
descontoSindicato = salarioBruto * 0.05
salarioLiquido = salarioBruto - descontoImposto - descontoINSS - descontoSindicato
print(f"Salário Bruto : R${salarioBruto:.2f}")
print(f"IR (11%) : R${descontoImposto:.2f}")
print(f"INSS : R${descontoINSS:.2f}")
print(f"Sindicato : R${descontoSindicato:.2f}")
print(f"Salário Líquido : R${salarioLiquido:.2f}")
|
#!/usr/bin/env python
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
#
# # Authors informations
#
# @author: HUC Stéphane
# @email: <devs@stephane-huc.net>
# @url: http://stephane-huc.net
#
# @license : BSD "Simplified" 2 clauses
#
''' Manage notifications systems '''
#import dbus
import pynotify
class Info(object):
'''notifications'''
def __init__(self, init):
for i in init:
setattr(self, i, init[i])
def notify(self, text):
'''Notify systems'''
pynotify.init(self.NAME)
notification = pynotify.Notification(self.title, text,
self.img['icone'])
notification.set_urgency(pynotify.URGENCY_NORMAL)
notification.show()
# import dbus
#item = "org.freedesktop.Notifications"
#path = "/org/freedesktop/Notifications"
#interface = "org.freedesktop.Notifications"
#app_name = self.NAME
#id_num_to_replace = 0
#icon = "/usr/share/icons/Tango/32x32/status/sunny.png"
##title = "Notification Title"
##text = "This is the body"
#actions_list = ''
#hint = ''
#time = 3000 # Use seconds x 1000
#bus = dbus.SessionBus()
#notif = bus.get_object(item, path)
#notify = dbus.Interface(notif, interface)
#notify.Notify(app_name, id_num_to_replace, icon, title, text,
#actions_list, hint, time)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
class Flatten(nn.Module):
def forward(self, x):
N, C, H, W = x.size()
return x.view(N, -1)
class SimpleSliceNet(nn.Module):
def __init__(self):
super(SimpleSliceNet, self).__init__()
self.cnn = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3),
nn.BatchNorm2d(num_features=16),
nn.LeakyReLU(),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(16, 32, kernel_size=3),
nn.BatchNorm2d(num_features=32),
nn.LeakyReLU(),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(32, 16, kernel_size=3),
nn.BatchNorm2d(num_features=16),
nn.LeakyReLU(),
nn.MaxPool2d(kernel_size=2),
Flatten(),
nn.Linear(8464, 1)
)
self.optimizer = torch.optim.Adam(self.parameters())
self.loss_fn = nn.BCEWithLogitsLoss()
def forward(self, slice_):
return self.cnn(slice_).squeeze(dim=1)
def train_step(self, slice_, targets):
logits = self(slice_)
loss = self.loss_fn(logits, targets)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss
class SliceResnet18(nn.Module):
def __init__(self, finetune=True):
super(SliceResnet18, self).__init__()
self.net = torchvision.models.resnet18(pretrained=True)
if not finetune:
for param in self.net.parameters():
param.requires_grad = False
# Parameters of newly constructed modules have requires_grad=True by default
num_ftrs = self.net.fc.in_features
self.net.fc = nn.Linear(num_ftrs, 1)
self.optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.net.parameters()))
self.loss_fn = nn.BCEWithLogitsLoss()
def forward(self, slice_):
return self.net(slice_).squeeze(dim=1)
def train_step(self, slice_, targets):
logits = self(slice_)
loss = self.loss_fn(logits, targets)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss
class SliceDensenet201(nn.Module):
def __init__(self, finetune=True):
super(SliceDensenet201, self).__init__()
self.net = torchvision.models.densenet201(pretrained=True)
if not finetune:
for param in self.net.parameters():
param.requires_grad = False
num_ftrs = self.net.classifier.in_features
self.net.classifier = nn.Linear(num_ftrs, 1)
self.optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.net.parameters()))
self.loss_fn = nn.BCEWithLogitsLoss()
def forward(self, slice_):
return self.net(slice_).squeeze(dim=1)
def train_step(self, slice_, targets):
logits = self(slice_)
loss = self.loss_fn(logits, targets)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss
|
#!/usr/bin/env python
from flask import Flask, Response, request
import requests
EVENTS_URI = 'https://www.carnegielibrary.org/events/'
app = Flask(__name__)
@app.route("/events", methods=['GET'])
def events():
params = {
'ical': '1',
}
for k, v in request.args.items():
params['tribe_' + k] = v
response = requests.get(url=EVENTS_URI, params=params)
response.raise_for_status()
return Response(response.content.strip(), mimetype="text/calendar")
if __name__ == "__main__":
app.run()
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import *
engine = create_engine("sqlite:///shoeland.db")
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# session.query(Owner).delete()
# session.query(Category).delete()
# session.query(Item_Details).delete()
# Create Owner
owner1 = Owner(owner_name="saiteja",
owner_email="saitej9705@gmail.com",
owner_picture="https://lh3.googleusercontent.com/-HBG4P1kjzDY/"
"XFqmGLcwyyI/AAAAAAAAAjo/23mx7zNnop8fb8ZYPEhESncXe3N2Nwl_wCEwY"
"BhgL/w139-h140-p/PROFILE%2BPIC.jpg")
session.add(owner1)
session.commit()
print("Done..!")
# Create Categories
category1 = Category(name="NIKE", owner_id=1)
session.add(category1)
session.commit()
category2 = Category(name="PUMA", owner_id=1)
session.add(category2)
session.commit()
category3 = Category(name="REEBOK", owner_id=1)
session.add(category3)
session.commit()
# Item_Details
product1 = Item_Details(brandname="NIKE",
model="1 pegasus 33",
image="https://n3.sdlcdn.com/imgs/h/s/4/Nike-1-"
"pegasus-33-Blue-SDL676548167-1-98ebf.jpeg",
color="blue",
price="3000",
description="NIKE sports shoe"
"best shoe for your sports",
category_id="1",
ownerid=1)
session.add(product1)
session.commit()
product2 = Item_Details(brandname="PUMA",
model="RANGER",
image="https://encrypted-tbn0.gstatic.com/images?"
"q=tbn:ANd9GcQgYvw7sNPcJq-cSgGBX9f3WP6opMV35"
"-3_uFFmdcepuLfYePEJ",
color="green",
price="3000",
description="PUMA sports shoe"
"best shoe for your sports",
category_id="2",
ownerid=1)
session.add(product2)
session.commit()
product3 = Item_Details(brandname="REEBOK",
model="CLOUD MODA",
image="https://shop.r10s.jp/cloudmoda/cabinet/reebok/"
"v67424_01.jpg",
color="orange",
price="3000",
description="REEBOK sports shoe"
"best shoe for your sports",
category_id="3",
ownerid=1)
session.add(product3)
session.commit()
print("Brands are Added..!")
|
import os
import sys
import json
import re
import _pickle as pickle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from nltk.corpus import stopwords
from nltk import word_tokenize
from nltk.stem import PorterStemmer
from nltk.stem import SnowballStemmer
from module import Stemmer
vectorDimension = 200
domain=sys.argv[1]
scriptDir=os.path.dirname(__file__)
fileData=os.path.join(scriptDir,'data',domain+'.json')
utterance=[]
intent=[]
with open(fileData,'r')as dataFile:
data=json.load(dataFile)
for nameUtterances in data['tasks']:
for utt in nameUtterances['utterances']:
utterance.append(utt)
intent.append(nameUtterances['name'])
myIntent=set(intent)
print('Identified domain:',domain)
print('Number of utterances for training:',len(intent))
print('Number of intents for training:',len(myIntent))
stopListFile=os.path.join(scriptDir,'..','dictionary','stopwords.txt')
arrayWords=[]
stopWords=[]
f=open(stopListFile,"r")
lines=f.read().split("\n")
for line in lines:
if line!="":
words=line.split("=")[1]
arrayWords.append(words.split(','))
for a_word in arrayWords:
for s_word in a_word:
if(re.sub(' ','',s_word))!="":
stopWords.append(s_word)
extraStopWords=set(stopWords)
stops=set(stopwords.words('english'))|extraStopWords
tfidfvec=TfidfVectorizer(utterance,decode_error='ignore',stop_words=stops,ngram_range=(1,5),tokenizer=Stemmer.stemTokenize)
trainset_idf_vectorizer=tfidfvec.fit_transform(utterance).toarray()
vLength=len(trainset_idf_vectorizer[1])
nDimension=vectorDimension
if vLength<=vectorDimension:
nDimension=vLength-1
svd=TruncatedSVD(n_components=nDimension,algorithm='randomized',n_iter=15,random_state=42)
trainLSA=svd.fit_transform(trainset_idf_vectorizer)
pickle_path=os.path.join(scriptDir,'model',domain+'_')
file_Name=pickle_path+'utterance.m'
fileObject=open(file_Name,'wb')
pickle.dump(utterance,fileObject)
fileObject.close()
file_Name=pickle_path+'intent.m'
fileObject=open(file_Name,'wb')
pickle.dump(intent,fileObject)
fileObject.close()
file_Name=pickle_path+'tfidfvec.m'
fileObject=open(file_Name,'wb')
pickle.dump(tfidfvec,fileObject)
fileObject.close()
file_Name=pickle_path+'svd.m'
fileObject=open(file_Name,'wb')
pickle.dump(svd,fileObject)
fileObject.close()
file_Name=pickle_path+'trainLSA.m'
fileObject=open(file_Name,'wb')
pickle.dump(trainLSA,fileObject)
fileObject.close() |
import cx_Oracle # 오라클 DB를 쉽게 활용 가능하게 해주는 driver
connection = cx_Oracle.connect(user="SCOTT", password="TIGER", dsn="xe")
print('1---',connection)
print("Database version:", connection.version) # Database version: 11.2.0.2.0
cur = connection.cursor()
print('2---',cur)
# for row in cur.execute("""select * from dept"""):
# print(row, type(row)) # 튜플 형태로 반환!
# cur = connection.cursor()
# cur.execute("select * from dept")
# while True:
# row = cur.fetchone() # row 하나씩 출력
# print(1)
# if row is None:
# break
# print(row)
# cur = connection.cursor()
# cur.execute("select * from dept")
# num_rows = 3
# while True:
# rows = cur.fetchmany(num_rows) # num_rows 수 만큼 잘라서 출력
# print(1)
# if not rows:
# break
# for row in rows:
# print(row)
cur = connection.cursor()
cur.execute("select * from dept")
rows = cur.fetchall()
for row in rows:
print(row)
cur.close()
connection.close()
|
# -*- coding: utf-8 -*-
import csv
with open('pm2.5Taiwan.csv',encoding="utf-8") as input_csv \
,open('cleaned_pm2.5.csv','w', encoding="utf-8") as output_csv:
reader = csv.reader(input_csv) # 讀取 CSV
writer = csv.writer(output_csv) # 寫入 CSV
next(reader)
# 第一行
writer.writerow(['日期','測站','AMB_TEMP', 'CO', 'NO', 'NO2', 'NOx', 'O3', 'PM10', 'PM2.5', 'RH', 'SO2', 'WD_HR', 'WIND_DIREC', 'WIND_SPEED','WS_HR'])
x=1
temp_dic=[]
row_iter=1
sum_temp=0
amount=0
for row in reader:
for i in range(3,27):
try:
sum_temp+=float(row[i])
amount+=1
except:
continue
if amount==0:
temp_dic.append(0)
else:
temp_dic.append(round(sum_temp/amount,5))
row_iter+=1
sum_temp=0
amount=0
if row_iter==15:
writer.writerow( [ row[0] , row[1] ] + temp_dic)
temp_dic=[]
row_iter=1
print(x)
x+=1
# break
# print(temp_dic)
|
"""
Django views for the CardControl application. Since our application has a
frontend of static content built in Angular, we do not have any significant
views here. Note that there are some views in the backend for infrastrucuture
related tasks.
"""
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
__author__ = 'James T. Dietrich'
__contact__ = 'james.dietrich@uni.edu'
__copyright__ = '(c) James Dietrich 2019'
__license__ = 'MIT'
__date__ = '26 JUNE 2019'
__version__ = '4.0'
__status__ = "initial release"
__url__ = "https://github.com/geojames/pyBathySfM"
"""
Name: py_BathySfM.py
Compatibility: Python 3.7
Description: This program performs a per-camera refration correction on a
Structure-from-Motion point cloud. Additional documnetation,
sample data, and a tutorial are availible from the GitHub
address below.
URL: https://github.com/geojames/pyBathySfM
Requires: PyQT5, numpy, pandas, sympy, matplotlib
Dev ToDo: 1) speed up camera geometry calculations
AUTHOR: James T. Dietrich
ORGANIZATION: University of Northern Iowa
Contact: james.dietrich@uni.edu
Copyright: (c) James Dietrich 2019
Licence: MIT
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# -- TO RUN --
# execute this code form the command line: python py_BathySfM.py
# or from an editor
#------------------------------------------------------------------------------
# Imports
import sys
from PyQt5.QtWidgets import QDialog, QApplication
from py_BathySfM_gui import Ui_bathySfM_gui
class AppWindow(QDialog):
def __init__(self):
super().__init__()
self.ui = Ui_bathySfM_gui()
self.ui.setupUi(self)
self.show()
app = QApplication(sys.argv)
w = AppWindow()
w.show()
sys.exit(app.exec_())
|
import os
import sys
binstart=float(sys.argv[1])
binend=float(sys.argv[2])
numbins = int(sys.argv[3])
width=(binend-binstart)/numbins
for a in range(0,numbins):
edge=binstart+width*a
print edge,", ",
|
from django import template
from django.template.defaulttags import register
register = template.Library()
@register.filter
def index(indexable,i):
return indexable[i] |
from spack import *
from glob import glob
from string import Template
import re
import os
import fnmatch
import sys
import shutil
class Cmssw(Package):
"""CMSSW built with Cmakefile generated by scram2cmake"""
homepage = "http://cms-sw.github.io"
url = "http://cmsrep.cern.ch/cmssw/repos/cms/SOURCES/slc_amd64_gcc630/cms/cmssw/CMSSW_9_2_12/src.tar.gz"
version('10.2.0.pre1', git='https://github.com/cms-sw/cmssw.git',
tag='CMSSW_10_2_0_pre1')
resource(name='cmaketools', git='https://github.com/gartung/cmaketools.git',
commit='df41c9ce7c950397ed52b289887144749b866b24',
placement='cmaketools'
)
resource(name='scram2cmake', git='https://github.com/gartung/scram2cmake.git',
commit='034c581',
placement='scram2cmake'
)
depends_on('builtin.ninja')
depends_on('builtin.cmake')
depends_on('root')
depends_on('intel-tbb')
depends_on('tinyxml')
depends_on('tinyxml2')
depends_on('clhep~cxx11+cxx14')
depends_on('md5')
depends_on('python+shared')
depends_on('vdt')
depends_on('boost@1.63.0')
depends_on('libsigcpp')
depends_on('xrootd')
depends_on('cppunit')
depends_on('xerces-c')
depends_on('expat')
depends_on('sqlite')
depends_on('bzip2')
depends_on('gsl')
depends_on('hepmc')
depends_on('heppdt')
depends_on('libpng')
depends_on('giflib')
depends_on('openssl')
depends_on('pcre')
depends_on('zlib')
depends_on('xz')
depends_on('libtiff')
depends_on('libjpeg-turbo')
depends_on('libxml2')
depends_on('bzip2')
depends_on('fireworks-geometry')
depends_on('llvm')
depends_on('libuuid')
depends_on('valgrind')
depends_on('geant4')
depends_on('expat')
depends_on('protobuf')
depends_on('eigen')
depends_on('curl')
depends_on('classlib')
depends_on('davix')
depends_on('meschach')
depends_on('fastjet')
depends_on('fastjet-contrib')
depends_on('fftjet')
depends_on('pythia6')
depends_on('pythia8')
depends_on('occi')
depends_on('oracle')
depends_on('sqlite')
depends_on('coral')
depends_on('hector')
depends_on('geant4-g4emlow')
depends_on('geant4-g4ndl')
depends_on('geant4-g4photonevaporation')
depends_on('geant4-g4saiddata')
depends_on('geant4-g4abla')
depends_on('geant4-g4ensdfstate')
depends_on('geant4-g4neutronsxs')
depends_on('geant4-g4radioactivedecay')
depends_on('libhepml')
depends_on('lhapdf')
depends_on('utm')
depends_on('photospp')
depends_on('rivet')
depends_on('evtgen')
depends_on('dcap')
depends_on('tauolapp')
depends_on('sherpa')
depends_on('lwtnn')
depends_on('yoda')
depends_on('openloops')
depends_on('qd')
depends_on('blackhat')
depends_on('yaml-cpp')
depends_on('jemalloc')
depends_on('ktjet')
depends_on('herwig')
depends_on('photos')
depends_on('tauola')
depends_on('jimmy')
depends_on('cascade')
depends_on('csctrackfinderemulation')
depends_on('mcdb')
depends_on('fftw')
depends_on('netlib-lapack')
depends_on('tensorflow')
depends_on('dd4hep')
def install(self, spec, prefix):
s2c=Executable('scram2cmake/scram2cmake.py')
s2c()
with working_dir('spack-build', create=True):
options = ['../']
options.extend(std_cmake_args)
for d in self.spec.traverse(root=False, deptype=('link')):
var = '%s_INCLUDE_DIR' % d.name.upper()
opt = '-D%s=%s' % (var, str(self.spec[d.name].prefix.include))
options.append(opt)
if sys.platform == 'darwin':
options.append('-DUUID_INCLUDE_DIR=%s/include' %
self.spec['libuuid'].prefix)
options.append('-DUUID_ROOT_DIR=%s' %
self.spec['libuuid'].prefix)
args = ['-DCMakeTools_DIR=%s/cmaketools' % self.stage.source_path,
'-DCLHEP_ROOT_DIR=%s' % self.spec['clhep'].prefix,
'-DBOOST_ROOT=%s' % self.spec['boost'].prefix,
'-DTBB_ROOT_DIR=%s' % self.spec['intel-tbb'].prefix,
'-DMD5ROOT=%s' % self.spec['md5'].prefix,
'-DDAVIXROOT=%s' % self.spec['davix'].prefix,
'-DSIGCPPROOT=%s' % self.spec['libsigcpp'].prefix,
'-DSIGCPP_INCLUDE_DIR=%s/sigc++-2.0' % self.spec['libsigcpp'].prefix.include,
'-DSHERPA_INCLUDE_DIR=%s/SHERPA-MC' % self.spec['sherpa'].prefix.include,
'-DDAVIX_INCLUDE_DIR=%s/davix' % self.spec['davix'].prefix.include,
'-DXROOTD_INCLUDE_DIR=%s/xrootd' % self.spec['xrootd'].prefix.include,
'-DTINYXMLROOT=%s' % self.spec['tinyxml'].prefix,
'-DCPPUNITROOT=%s' % self.spec['cppunit'].prefix,
'-DXERCESC_ROOT_DIR=%s' % self.spec['xerces-c'].prefix,
'-DGEANT4_INCLUDE_DIRS=%s/Geant4' % self.spec['geant4'].prefix.include,
'-DGEANT4_DIR=%s' % self.spec['geant4'].prefix,
'-DPYTHON_INCLUDE_DIR=%s/python%s' % (self.spec['python'].prefix.include, self.spec['python'].version.up_to(2)),
'-DCMAKE_CXX_FLAGS=-O2 -pthread -pipe -Werror=main -Werror=pointer-arith -Werror=overlength-strings -Wno-vla -Werror=overflow -std=c++1z -ftree-vectorize -Wstrict-overflow -Werror=array-bounds -Werror=format-contains-nul -Werror=type-limits -fvisibility-inlines-hidden -fno-math-errno --param vect-max-version-for-alias-checks=50 -Xassembler --compress-debug-sections -msse3 -felide-constructors -fmessage-length=0 -Wall -Wno-non-template-friend -Wno-long-long -Wreturn-type -Wunused -Wparentheses -Wno-deprecated -Wnon-virtual-dtor -fdiagnostics-show-option -Wno-unused-local-typedefs -Wno-attributes'
,'-GNinja']
# ,]
options.extend(args)
cmake(*options)
# make('-k','VERBOSE=1')
# make('install')
ninja('-k','-1', '-v')
ninja('install')
|
import numpy as np
class Plate:
def __init__(self, p1, p2, p3, p4) -> None:
self.p1 = np.array(p1)
self.p2 = np.array(p2)
self.p3 = np.array(p3)
self.p4 = np.array(p4)
@property
def d12(self):
return self.p2 - self.p1
@property
def l12(self):
return np.linalg.norm(self.d12)
@property
def d14(self):
return self.p4 - self.p1
@property
def l14(self):
return np.linalg.norm(self.d14)
@property
def d43(self):
return self.p3 - self.p4
@property
def l43(self):
return np.linalg.norm(self.d43)
@property
def d23(self):
return self.p3 - self.p2
@property
def l23(self):
return np.linalg.norm(self.d23)
@property
def limit_points(self):
return self.p1, self.p2, self.p3, self.p4
def set_plate_limits(self, p1, p2, p3, p4):
self.p1 = p1
self.p2 = p2
self.p3 = p3
self.p4 = p4
class RectangularPlate(Plate):
"""
Generic rectangular plate defined by 4 points in the space.
"""
def __init__(self, p1, p2, p3, p4):
super().__init__(p1, p2, p3, p4)
@property
def n12(self):
return self.d12/self.chord
@property
def n14(self):
return self.d14/self.span
@property
def normal(self):
vec = np.cross(self.d12, self.d14)
return vec/np.linalg.norm(vec)
@property
def span(self):
return np.linalg.norm(self.d14)
@property
def b(self):
return self.span
@property
def chord(self):
return np.linalg.norm(self.d12)
@property
def a(self):
return self.chord |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.