text
stringlengths 8
6.05M
|
|---|
from collections import defaultdict
import random
with open('YOUR_TEXT_FILE.txt', encoding='utf8') as txt:
script = txt.read()
def create_tf_matrix(text):
words = text.split(" ")
tf_matrix = defaultdict(list)
succeeding_words = zip(words[0:-1], words[1:])
for w1, w2 in succeeding_words:
tf_matrix[w1].append(w2)
tf_matrix = dict(tf_matrix)
return tf_matrix
def generate_sentence(matrix, word_limit=10):
current_word = random.choice(list(matrix.keys()))
sentence = current_word.capitalize()
for i in range(word_limit-1):
next_word = random.choice(matrix[current_word])
sentence += ' ' + next_word
if next_word.endswith('.'):
return sentence
current_word = next_word
sentence += '.'
return sentence
matrix = create_tf_matrix(script)
print(generate_sentence(matrix, 20))
|
#!/usr/bin/env python3
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# This script builds and then uploads the Dart client sample app to AppEngine,
# where it is accessible by visiting http://dart.googleplex.com.
import os
import subprocess
import sys
from os.path import abspath, basename, dirname, exists, join, split, relpath
import base64, re, os, shutil, subprocess, sys, tempfile, optparse
APP_PATH = os.getcwd()
CLIENT_TOOLS_PATH = dirname(abspath(__file__))
CLIENT_PATH = dirname(CLIENT_TOOLS_PATH)
# Add the client tools directory so we can find htmlconverter.py.
sys.path.append(CLIENT_TOOLS_PATH)
import htmlconverter
def convertOne(infile, options):
outDirBase = 'outcode'
outfile = join(outDirBase, infile)
print('converting %s to %s' % (infile, outfile))
if 'dart' in options.target:
htmlconverter.convertForDartium(infile, outDirBase,
outfile.replace('.html', '-dart.html'),
options.verbose)
if 'js' in options.target:
htmlconverter.convertForChromium(infile, options.dartc_extra_flags,
outfile.replace('.html', '-js.html'),
options.verbose)
def Flags():
""" Constructs a parser for extracting flags from the command line. """
result = optparse.OptionParser()
result.add_option("-t",
"--target",
help="The target html to generate",
metavar="[js,dart]",
default='js,dart')
result.add_option("--verbose",
help="Print verbose output",
default=False,
action="store_true")
result.add_option("--dartc_extra_flags",
help="Additional flag text to pass to dartc",
default="",
action="store")
#result.set_usage("update.py input.html -o OUTDIR -t chromium,dartium")
return result
def getAllHtmlFiles():
htmlFiles = []
for filename in os.listdir(APP_PATH):
fName, fExt = os.path.splitext(filename)
if fExt.lower() == '.html':
htmlFiles.append(filename)
return htmlFiles
def main():
os.chdir(CLIENT_PATH) # TODO(jimhug): I don't like chdir's in scripts...
parser = Flags()
options, args = parser.parse_args()
#if len(args) < 1 or not options.out or not options.target:
# parser.print_help()
# return 1
REL_APP_PATH = relpath(APP_PATH)
for file in getAllHtmlFiles():
infile = join(REL_APP_PATH, file)
convertOne(infile, options)
if __name__ == '__main__':
main()
|
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import random
import unittest
import numpy as np
import six
from six.moves import range, zip
from smqtk.algorithms import get_nn_index_impls
from smqtk.algorithms.nn_index.faiss import FaissNearestNeighborsIndex
from smqtk.exceptions import ReadOnlyError
from smqtk.representation.data_element.memory_element import (
DataMemoryElement,
)
from smqtk.representation.descriptor_element.local_elements import (
DescriptorMemoryElement,
)
from smqtk.representation.descriptor_index.memory import MemoryDescriptorIndex
from smqtk.representation.key_value.memory import MemoryKeyValueStore
if FaissNearestNeighborsIndex.is_usable():
class TestFAISSIndex (unittest.TestCase):
RAND_SEED = 42
def _make_inst(self, descriptor_set=None, idx2uid_kvs=None,
uid2idx_kvs=None, **kwargs):
"""
Make an instance of FaissNearestNeighborsIndex
"""
if 'random_seed' not in kwargs:
kwargs.update(random_seed=self.RAND_SEED)
if descriptor_set is None:
descriptor_set = MemoryDescriptorIndex()
if idx2uid_kvs is None:
idx2uid_kvs = MemoryKeyValueStore()
if uid2idx_kvs is None:
uid2idx_kvs = MemoryKeyValueStore()
return FaissNearestNeighborsIndex(descriptor_set, idx2uid_kvs,
uid2idx_kvs, **kwargs)
def test_impl_findable(self):
self.assertIn(FaissNearestNeighborsIndex.__name__,
get_nn_index_impls())
def test_configuration(self):
# Make configuration based on default
c = FaissNearestNeighborsIndex.get_default_config()
self.assertIn('MemoryDescriptorIndex', c['descriptor_set'])
c['descriptor_set']['type'] = 'MemoryDescriptorIndex'
self.assertIn('MemoryKeyValueStore', c['idx2uid_kvs'])
c['idx2uid_kvs']['type'] = 'MemoryKeyValueStore'
self.assertIn('MemoryKeyValueStore', c['uid2idx_kvs'])
c['uid2idx_kvs']['type'] = 'MemoryKeyValueStore'
self.assertIn('DataMemoryElement', c['index_element'])
c['index_element']['type'] = 'DataMemoryElement'
self.assertIn('DataMemoryElement', c['index_param_element'])
c['index_param_element']['type'] = 'DataMemoryElement'
# # Build based on configuration
index = FaissNearestNeighborsIndex.from_config(c)
self.assertEqual(index.factory_string, 'IVF1,Flat')
self.assertIsInstance(index.factory_string, six.string_types)
# Test that constructing a new instance from ``index``'s config
# yields an index with the same configuration (idempotent).
index2 = FaissNearestNeighborsIndex.from_config(
index.get_config())
self.assertEqual(index.get_config(), index2.get_config())
def test_configuration_null_persistence(self):
# Make configuration based on default
c = FaissNearestNeighborsIndex.get_default_config()
c['descriptor_set']['type'] = 'MemoryDescriptorIndex'
c['idx2uid_kvs']['type'] = 'MemoryKeyValueStore'
c['uid2idx_kvs']['type'] = 'MemoryKeyValueStore'
# # Build based on configuration
index = FaissNearestNeighborsIndex.from_config(c)
self.assertEqual(index.factory_string, 'IVF1,Flat')
self.assertIsInstance(index.factory_string, six.string_types)
# Test that constructing a new instance from ``index``'s config
# yields an index with the same configuration (idempotent).
index2 = FaissNearestNeighborsIndex.from_config(
index.get_config())
self.assertEqual(index.get_config(), index2.get_config())
def test_build_index_read_only(self):
v = np.zeros(5, float)
v[0] = 1.
d = DescriptorMemoryElement('unit', 0)
d.set_vector(v)
test_descriptors = [d]
index = self._make_inst(read_only=True)
self.assertRaises(
ReadOnlyError,
index.build_index, test_descriptors
)
def test_update_index_no_input(self):
index = self._make_inst()
self.assertRaises(
ValueError,
index.update_index, []
)
def test_update_index_new_index(self):
n = 100
dim = 8
d_index = [DescriptorMemoryElement('test', i) for i in range(n)]
[d.set_vector(np.random.rand(dim)) for d in d_index]
index = self._make_inst()
index.update_index(d_index)
self.assertEqual(index.count(), 100)
for d in d_index:
self.assertIn(d, index._descriptor_set)
# Check that NN can return stuff from the set used.
# - nearest element to the query element when the query is in the
# index should be the query element.
random.seed(self.RAND_SEED)
for _ in range(10):
i = random.randint(0, n-1)
q = d_index[i]
n_elems, n_dists = index.nn(q)
self.assertEqual(n_elems[0], q)
def test_update_index_additive(self):
n1 = 100
n2 = 10
dim = 8
set1 = {DescriptorMemoryElement('test', i) for i in range(n1)}
set2 = {DescriptorMemoryElement('test', i)
for i in range(n1, n1+n2)}
[d.set_vector(np.random.rand(dim)) for d in (set1 | set2)]
# Create and build initial index.
index = self._make_inst()
index.build_index(set1)
self.assertEqual(index.count(), len(set1))
for d in set1:
self.assertIn(d, index._descriptor_set)
# Update and check that all intended descriptors are present in
# index.
index.update_index(set2)
set_all = set1 | set2
self.assertEqual(index.count(), len(set_all))
for d in set_all:
self.assertIn(d, index._descriptor_set)
# Check that NN can return something from the updated set.
# - nearest element to the query element when the query is in the
# index should be the query element.
for q in set_all:
n_elems, n_dists = index.nn(q)
self.assertEqual(n_elems[0], q)
def test_persistence_with_update_index(self):
n1 = 100
n2 = 10
dim = 8
set1 = {DescriptorMemoryElement('test', i) for i in range(n1)}
set2 = {DescriptorMemoryElement('test', i)
for i in range(n1, n1+n2)}
[d.set_vector(np.random.rand(dim)) for d in (set1 | set2)]
# Create index with persistent entities
index_element = DataMemoryElement(
content_type='application/octet-stream')
index_param_element = DataMemoryElement(
content_type='text/plain')
index = self._make_inst(
index_element=index_element,
index_param_element=index_param_element)
descriptor_set = index._descriptor_set
idx2uid_kvs = index._idx2uid_kvs
uid2idx_kvs = index._uid2idx_kvs
# Build initial index.
index.build_index(set1)
self.assertEqual(index.count(), len(set1))
for d in set1:
self.assertIn(d, index._descriptor_set)
# Update and check that all intended descriptors are present in
# index.
index.update_index(set2)
set_all = set1 | set2
self.assertEqual(index.count(), len(set_all))
for d in set_all:
self.assertIn(d, index._descriptor_set)
del index
index = self._make_inst(
descriptor_set=descriptor_set,
idx2uid_kvs=idx2uid_kvs,
uid2idx_kvs=uid2idx_kvs,
index_element=index_element,
index_param_element=index_param_element)
# Check that NN can return something from the updated set.
# - nearest element to the query element when the query is in the
# index should be the query element.
for q in set_all:
n_elems, n_dists = index.nn(q)
self.assertEqual(n_elems[0], q)
def test_remove_from_index_readonly(self):
"""
Test that we cannot call remove when the instance is read-only.
"""
index = self._make_inst(read_only=True)
self.assertRaises(
ReadOnlyError,
index.remove_from_index, [0]
)
def test_remove_from_index_keyerror_empty_index(self):
"""
Test that any key should cause a key error on an empty index.
"""
index = self._make_inst()
self.assertRaisesRegexp(
KeyError, '0',
index.remove_from_index, [0]
)
self.assertRaisesRegexp(
KeyError, '0',
index.remove_from_index, ['0']
)
# Only includes the first key that's erroneous in the KeyError inst
self.assertRaisesRegexp(
KeyError, '0',
index.remove_from_index, [0, 'other']
)
def test_remove_from_index_keyerror(self):
"""
Test that we do not impact the index by trying to remove an invalid
key.
"""
n = 100
dim = 8
dset = {DescriptorMemoryElement('test', i) for i in range(n)}
[d.set_vector(np.random.rand(dim)) for d in dset]
index = self._make_inst()
index.build_index(dset)
# Try removing 2 invalid entries
self.assertRaises(
KeyError,
index.remove_from_index, [100, 'something']
)
# Make sure that all indexed descriptors correctly return
# themselves from an NN call.
for d in dset:
self.assertEqual(index.nn(d, 1)[0][0], d)
def test_remove_from_index(self):
"""
Test that we can actually remove from the index.
"""
n = 100
dim = 8
dset = {DescriptorMemoryElement('test', i) for i in range(n)}
[d.set_vector(np.random.rand(dim)) for d in dset]
index = self._make_inst()
index.build_index(dset)
# Try removing two valid descriptors
uids_to_remove = [10, 98]
index.remove_from_index(uids_to_remove)
# Check that every other element is still in the index.
self.assertEqual(len(index), 98)
for d in dset:
if d.uuid() not in uids_to_remove:
self.assertEqual(index.nn(d, 1)[0][0], d)
# Check that descriptors matching removed uids cannot be queried
# out of the index.
for d in dset:
if d.uuid() in uids_to_remove:
self.assertNotEqual(index.nn(d, 1)[0][0], d)
def test_remove_then_add(self):
"""
Test that we can remove from the index and then add to it again.
"""
n1 = 100
n2 = 10
dim = 8
set1 = [DescriptorMemoryElement('test', i) for i in range(n1)]
set2 = [DescriptorMemoryElement('test', i)
for i in range(n1, n1 + n2)]
[d.set_vector(np.random.rand(dim)) for d in (set1 + set2)]
uids_to_remove = [10, 98]
index = self._make_inst()
index.build_index(set1)
index.remove_from_index(uids_to_remove)
index.update_index(set2)
self.assertEqual(len(index), 108)
# Removed descriptors should not be in return queries.
self.assertNotEqual(index.nn(set1[10], 1)[0][0], set1[10])
self.assertNotEqual(index.nn(set1[98], 1)[0][0], set1[98])
# Every other descriptor should be queryable
for d in set1 + set2:
if d.uuid() not in uids_to_remove:
self.assertEqual(index.nn(d, 1)[0][0], d)
self.assertEqual(index._next_index, 110)
def test_nn_many_descriptors(self):
np.random.seed(0)
n = 10 ** 4
dim = 256
d_index = [DescriptorMemoryElement('test', i) for i in range(n)]
[d.set_vector(np.random.rand(dim)) for d in d_index]
q = DescriptorMemoryElement('q', -1)
q.set_vector(np.zeros((dim,)))
faiss_index = self._make_inst()
faiss_index.build_index(d_index)
nbrs, dists = faiss_index.nn(q, 10)
self.assertEqual(len(nbrs), len(dists))
self.assertEqual(len(nbrs), 10)
def test_nn_non_flat_index(self):
faiss_index = self._make_inst(factory_string='IVF256,Flat')
self.assertEqual(faiss_index.factory_string, 'IVF256,Flat')
np.random.seed(self.RAND_SEED)
n = 10 ** 4
dim = 256
d_index = [DescriptorMemoryElement('test', i) for i in range(n)]
[d.set_vector(np.random.rand(dim)) for d in d_index]
q = DescriptorMemoryElement('q', -1)
q.set_vector(np.zeros((dim,)))
faiss_index.build_index(d_index)
nbrs, dists = faiss_index.nn(q, 10)
self.assertEqual(len(nbrs), len(dists))
self.assertEqual(len(nbrs), 10)
def test_nn_preprocess_index(self):
faiss_index = self._make_inst(factory_string='PCAR64,IVF1,Flat')
self.assertEqual(faiss_index.factory_string, 'PCAR64,IVF1,Flat')
np.random.seed(self.RAND_SEED)
n = 10 ** 4
dim = 256
d_index = [DescriptorMemoryElement('test', i) for i in range(n)]
[d.set_vector(np.random.rand(dim)) for d in d_index]
q = DescriptorMemoryElement('q', -1)
q.set_vector(np.zeros((dim,)))
faiss_index.build_index(d_index)
nbrs, dists = faiss_index.nn(q, 10)
self.assertEqual(len(nbrs), len(dists))
self.assertEqual(len(nbrs), 10)
def test_nn_known_descriptors_euclidean_unit(self):
dim = 5
###
# Unit vectors -- Equal distance
#
index = self._make_inst()
test_descriptors = []
for i in range(dim):
v = np.zeros(dim, float)
v[i] = 1.
d = DescriptorMemoryElement('unit', i)
d.set_vector(v)
test_descriptors.append(d)
index.build_index(test_descriptors)
# query descriptor -- zero vector
# -> all modeled descriptors should be equally distant (unit
# corners)
q = DescriptorMemoryElement('query', 0)
q.set_vector(np.zeros(dim, float))
r, dists = index.nn(q, n=dim)
self.assertEqual(len(dists), dim)
# All dists should be 1.0, r order doesn't matter
for d in dists:
self.assertEqual(d, 1.)
def test_nn_known_descriptors_nearest(self):
dim = 5
###
# Unit vectors -- Equal distance
#
index = self._make_inst()
test_descriptors = []
vectors = np.eye(dim, dtype=np.float32)
for i in range(dim):
d = DescriptorMemoryElement('unit', i)
d.set_vector(vectors[i])
test_descriptors.append(d)
index.build_index(test_descriptors)
# query descriptor -- first point
q = DescriptorMemoryElement('query', 0)
q.set_vector(vectors[0])
r, dists = index.nn(q)
self.assertEqual(len(dists), 1)
# Distance should be zero
self.assertEqual(dists[0], 0.)
self.assertItemsEqual(r[0].vector(), vectors[0])
def test_nn_known_descriptors_euclidean_ordered(self):
index = self._make_inst()
# make vectors to return in a known euclidean distance order
i = 100
test_descriptors = []
for j in range(i):
d = DescriptorMemoryElement('ordered', j)
d.set_vector(np.array([j, j*2], float))
test_descriptors.append(d)
random.shuffle(test_descriptors)
index.build_index(test_descriptors)
# Since descriptors were built in increasing distance from (0,0),
# returned descriptors for a query of [0,0] should be in index
# order.
q = DescriptorMemoryElement('query', 99)
q.set_vector(np.array([0, 0], float))
r, dists = index.nn(q, n=i)
self.assertEqual(len(dists), i)
for j, d, dist in zip(range(i), r, dists):
self.assertEqual(d.uuid(), j)
np.testing.assert_equal(d.vector(), [j, j*2])
|
# encoding=utf8
"""
Author: 'jdwang'
Date: 'create date: 2017-01-13'; 'last updated date: 2017-01-13'
Email: '383287471@qq.com'
Describe: 所有属性规则的父类:比如 Price 等都是该类的子类
"""
from __future__ import print_function
__version__ = '1.3'
import re
from info_meta_data import InfoMetadata
class RegexBase(object):
"""
所有属性规则的父类:比如 Price 等都是该类的子类
"""
# 属性名
name = ''
# 要处理的句子
sentence = ''
# 描述型规则 --- 一般是属性(价格、价位)
statement_regexs = None
value_regexs = None
def __int__(self):
# 提取到的有效语义 列表
# 每个元素是 InfoMetadata 对象
self.info_meta_data_list = []
def __str__(self):
return u'\n------------------\n'.join([unicode(item) for item in self.info_meta_data_list])
# return ''
# return u'\n------------------\n'.join([unicode(item) for item in self.info_meta_data_list])
def to_dict(self):
"""
转换成字典
:return:
"""
return {idx: item.to_dict() for idx, item in enumerate(self.info_meta_data_list)}
def regex_process(self):
# 出现过的匹配值
present_regex_values = set()
# region 处理描述性正则表达式数组
pattern = '|'.join(['(%s)' % regex for regex in self.statement_regexs])
pattern = pattern.decode('utf8')
# print(pattern)
match_result = re.search(pattern, self.sentence)
if match_result:
info_meta_data = InfoMetadata()
info_meta_data.raw_data = self.sentence
info_meta_data.regex_name = self.name
info_meta_data.regex_value = match_result.group(0)
info_meta_data.is_statement = True
info_meta_data.left_index = match_result.start(0)
info_meta_data.right_index = match_result.end(0)
# 添加
self.info_meta_data_list.append(info_meta_data)
present_regex_values.add(info_meta_data.regex_value)
# endregion
# region 处理值性正则表达式数组
pattern = '|'.join(['(%s)' % regex for regex in self.value_regexs])
pattern = pattern.decode('utf8')
# print(pattern)
# match_result = re.findall(pattern, self.sentence)
for item in re.finditer(pattern, self.sentence):
# 遍历所有出现的语义块
if not item.group(0) in present_regex_values:
# 没出现过,才添加
info_meta_data = InfoMetadata()
info_meta_data.raw_data = self.sentence
info_meta_data.regex_name = self.name
info_meta_data.regex_value = item.group(0)
info_meta_data.is_statement = False
info_meta_data.left_index = item.start(0)
info_meta_data.right_index = item.end(0)
# 添加
self.info_meta_data_list.append(info_meta_data)
# endregion
|
from django.contrib import admin
from .models import *
@admin.register(Category,Categorylevel,Qualification,Maindocument,Staffdocument)
class ViewAdmin(admin.ModelAdmin):
pass
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from internal_plugins.test_lockfile_fixtures.lockfile_fixture import (
JVMLockfileFixture,
JVMLockfileFixtureDefinition,
)
from pants.backend.codegen.protobuf.java.rules import GenerateJavaFromProtobufRequest
from pants.backend.codegen.protobuf.java.rules import rules as java_protobuf_rules
from pants.backend.codegen.protobuf.target_types import (
ProtobufSourceField,
ProtobufSourcesGeneratorTarget,
)
from pants.backend.codegen.protobuf.target_types import rules as target_types_rules
from pants.backend.experimental.java.register import rules as java_backend_rules
from pants.backend.java.compile.javac import CompileJavaSourceRequest
from pants.backend.java.target_types import JavaSourcesGeneratorTarget, JavaSourceTarget
from pants.engine.addresses import Address
from pants.engine.target import GeneratedSources, HydratedSources, HydrateSourcesRequest
from pants.jvm import testutil
from pants.jvm.target_types import JvmArtifactTarget
from pants.jvm.testutil import (
RenderedClasspath,
expect_single_expanded_coarsened_target,
make_resolve,
)
from pants.testutil.rule_runner import QueryRule, RuleRunner
GRPC_PROTO_STANZA = """
syntax = "proto3";
package dir1;
// The greeter service definition.
service Greeter {
// Sends a greeting
rpc SayHello (HelloRequest) returns (HelloReply) {}
}
// The request message containing the user's name.
message HelloRequest {
string name = 1;
}
// The response message containing the greetings
message HelloReply {
string message = 1;
}
"""
@pytest.fixture
def protobuf_java_lockfile_def() -> JVMLockfileFixtureDefinition:
return JVMLockfileFixtureDefinition(
"protobuf-java.test.lock",
["com.google.protobuf:protobuf-java:3.19.4"],
)
@pytest.fixture
def protobuf_java_lockfile(
protobuf_java_lockfile_def: JVMLockfileFixtureDefinition, request
) -> JVMLockfileFixture:
return protobuf_java_lockfile_def.load(request)
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*java_backend_rules(),
*java_protobuf_rules(),
*target_types_rules(),
*testutil.rules(),
QueryRule(HydratedSources, [HydrateSourcesRequest]),
QueryRule(GeneratedSources, [GenerateJavaFromProtobufRequest]),
QueryRule(RenderedClasspath, (CompileJavaSourceRequest,)),
],
target_types=[
ProtobufSourcesGeneratorTarget,
JavaSourceTarget,
JavaSourcesGeneratorTarget,
JvmArtifactTarget,
],
)
def assert_files_generated(
rule_runner: RuleRunner,
address: Address,
*,
expected_files: list[str],
source_roots: list[str],
extra_args: list[str] | None = None,
) -> None:
args = [f"--source-root-patterns={repr(source_roots)}", *(extra_args or ())]
rule_runner.set_options(args, env_inherit={"PATH", "PYENV_ROOT", "HOME"})
tgt = rule_runner.get_target(address)
protocol_sources = rule_runner.request(
HydratedSources, [HydrateSourcesRequest(tgt[ProtobufSourceField])]
)
generated_sources = rule_runner.request(
GeneratedSources,
[GenerateJavaFromProtobufRequest(protocol_sources.snapshot, tgt)],
)
assert set(generated_sources.snapshot.files) == set(expected_files)
def test_generates_java(
rule_runner: RuleRunner, protobuf_java_lockfile: JVMLockfileFixture
) -> None:
# This tests a few things:
# * We generate the correct file names.
# * Protobuf files can import other protobuf files, and those can import others
# (transitive dependencies). We'll only generate the requested target, though.
# * We can handle multiple source roots, which need to be preserved in the final output.
rule_runner.write_files(
{
"src/protobuf/dir1/f.proto": dedent(
"""\
syntax = "proto3";
option java_package = "org.pantsbuild.java.proto";
package dir1;
message Person {
string name = 1;
int32 id = 2;
string email = 3;
}
"""
),
"src/protobuf/dir1/f2.proto": dedent(
"""\
syntax = "proto3";
package dir1;
"""
),
"src/protobuf/dir1/BUILD": "protobuf_sources()",
"src/protobuf/dir2/f.proto": dedent(
"""\
syntax = "proto3";
package dir2;
import "dir1/f.proto";
"""
),
"src/protobuf/dir2/BUILD": "protobuf_sources(dependencies=['src/protobuf/dir1'])",
# Test another source root.
"tests/protobuf/test_protos/f.proto": dedent(
"""\
syntax = "proto3";
package test_protos;
import "dir2/f.proto";
"""
),
"tests/protobuf/test_protos/BUILD": (
"protobuf_sources(dependencies=['src/protobuf/dir2'])"
),
"3rdparty/jvm/default.lock": protobuf_java_lockfile.serialized_lockfile,
"3rdparty/jvm/BUILD": protobuf_java_lockfile.requirements_as_jvm_artifact_targets(),
"src/jvm/BUILD": "java_sources(dependencies=['src/protobuf/dir1'])",
"src/jvm/TestJavaProtobuf.java": dedent(
"""\
package org.pantsbuild.java.example;
import org.pantsbuild.java.proto.F.Person;
public class TestJavaProtobuf {
Person person;
}
"""
),
}
)
def assert_gen(addr: Address, expected: str) -> None:
assert_files_generated(
rule_runner,
addr,
source_roots=["src/python", "/src/protobuf", "/tests/protobuf"],
expected_files=[expected],
)
assert_gen(
Address("src/protobuf/dir1", relative_file_path="f.proto"),
"src/protobuf/org/pantsbuild/java/proto/F.java",
)
assert_gen(
Address("src/protobuf/dir1", relative_file_path="f2.proto"), "src/protobuf/dir1/F2.java"
)
assert_gen(
Address("src/protobuf/dir2", relative_file_path="f.proto"), "src/protobuf/dir2/F.java"
)
assert_gen(
Address("tests/protobuf/test_protos", relative_file_path="f.proto"),
"tests/protobuf/test_protos/F.java",
)
request = CompileJavaSourceRequest(
component=expect_single_expanded_coarsened_target(
rule_runner, Address(spec_path="src/jvm")
),
resolve=make_resolve(rule_runner),
)
_ = rule_runner.request(RenderedClasspath, [request])
@pytest.fixture
def protobuf_java_grpc_lockfile_def() -> JVMLockfileFixtureDefinition:
return JVMLockfileFixtureDefinition(
"protobuf-grpc-java.test.lock",
[
"com.google.protobuf:protobuf-java:3.19.4",
"io.grpc:grpc-netty-shaded:1.48.0",
"io.grpc:grpc-protobuf:1.48.0",
"io.grpc:grpc-stub:1.48.0",
"org.apache.tomcat:annotations-api:6.0.53",
],
)
@pytest.fixture
def protobuf_java_grpc_lockfile(
protobuf_java_grpc_lockfile_def: JVMLockfileFixtureDefinition, request
) -> JVMLockfileFixture:
return protobuf_java_grpc_lockfile_def.load(request)
def test_generates_grpc_java(
rule_runner: RuleRunner, protobuf_java_grpc_lockfile: JVMLockfileFixture
) -> None:
rule_runner.write_files(
{
"protos/BUILD": "protobuf_sources(grpc=True)",
"protos/service.proto": dedent(
"""\
syntax = "proto3";
option java_package = "org.pantsbuild.java.proto";
package service;
message TestMessage {
string foo = 1;
}
service TestService {
rpc noStreaming (TestMessage) returns (TestMessage);
rpc clientStreaming (stream TestMessage) returns (TestMessage);
rpc serverStreaming (TestMessage) returns (stream TestMessage);
rpc bothStreaming (stream TestMessage) returns (stream TestMessage);
}
"""
),
"3rdparty/jvm/default.lock": protobuf_java_grpc_lockfile.serialized_lockfile,
"3rdparty/jvm/BUILD": protobuf_java_grpc_lockfile.requirements_as_jvm_artifact_targets(),
"src/jvm/BUILD": "java_sources(dependencies=['protos'])",
"src/jvm/TestJavaProtobufGrpc.java": dedent(
"""\
package org.pantsbuild.java.example;
import org.pantsbuild.java.proto.TestServiceGrpc;
public class TestJavaProtobufGrpc {
TestServiceGrpc service;
}
"""
),
}
)
assert_files_generated(
rule_runner,
Address("protos", relative_file_path="service.proto"),
source_roots=["/"],
expected_files=[
"org/pantsbuild/java/proto/Service.java",
"org/pantsbuild/java/proto/TestServiceGrpc.java",
],
)
request = CompileJavaSourceRequest(
component=expect_single_expanded_coarsened_target(
rule_runner, Address(spec_path="src/jvm")
),
resolve=make_resolve(rule_runner),
)
_ = rule_runner.request(RenderedClasspath, [request])
|
import cv2
img = cv2.imread('C:/Users/eugur/Downloads/Jupyter_lab/Computer-Vision-with-Python/DATA/00-puppy.jpg')
while True:
cv2.imshow('Puppy',img)
# if we have waited at least 1 ms AND we ve pressed the escepace key
if cv2.waitKey(1) & 0xff == 27:
break
cv2.destroyAllWindows()
|
sexo = str(input('Informe seu sexo: [M/F] → ')).upper()
while sexo != 'M' and sexo != 'F':
sexo = str(input('Dados inválidos! Por favor, informe seu sexo: ')).upper()
print(f'Sexo {sexo} registrado com sucesso!')
|
import requests
from bs4 import BeautifulSoup
url = 'https://en.wikipedia.org/wiki/Pawan_Kalyan'
resp1 = requests.get(url)
print(resp1)
resp = requests.get(url).content
# print(resp)
soup = BeautifulSoup(resp, 'html.parser')
headlines = soup.find('h1', id='firstHeading')
print(headlines.text)
info = soup.find_all('p')
for infos in info:
title = infos.find('a')
print(title)
|
from train_config import *
import torch.nn.functional as F
def main(args):
print("Arguments: ", args)
print("cpus:", os.sched_getaffinity(0))
_, testset, _, index = u.load_data(
data_dir=args.data_dir,
data_files=args.data_files,
test_size=1.,
classify=args.classify,
rlim=.9 if args.classify else None,
llim=.4 if args.classify else None,
topology=args.topology,
return_index=True,
from_scratch=True,
dropna=True,
preprocessor=args.preprocessor,
train_test_file=args.outdir/'test_data.npz')
if args.config is None:
args.config = args.model_dir/'model.config'
config = load(args.config)
best_trained_model = models.load_model(
config,
testset,
checkpoint_dir=args.model_dir)
best_trained_model.eval()
test_acc, preds = models.test_error(
best_trained_model,
testset,
loss=F.nll_loss if args.topology or args.classify else F.l1_loss,
predict=True)
# y = testset
print("Best trial test error: {}".format(test_acc))
preds = preds.cpu()
if len(preds.shape) > 1 and preds.shape[1] > 1:
res = pd.DataFrame(dict(enumerate(preds.T)), index=index)
res['y_true'] = testset.tensors[1]
else:
res = pd.DataFrame(
{'preds': preds.squeeze().cpu(),
'y_true': testset.tensors[1]},
index=index)
if args.classify:
res['pred_class'] = res.preds > args.threshold
res.to_csv(
args.outdir/'preds.csv.gz',
compression='gzip')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Test.")
parser.add_argument(
"--config",
type=Path,
help="path to config dict (joblib pickle)",
)
parser.add_argument(
"--threshold",
type=float,
default=.9511,
help="threshold for DNN-Class. Ignored if args.classify is False.",
)
parser.add_argument(
"--topology",
action="store_true",
help="predict topology",
)
parser.add_argument(
"--classify",
action="store_true",
help="predict binary target.",
)
parser.add_argument(
"--model_dir",
help="directory of trained model",
type=Path
)
parser.add_argument(
"--outdir",
help="dir to store data matrix and prediction file",
type=Path,
)
parser.add_argument(
"--data_files",
"-i",
type=Path,
nargs="+",
default=None,
help="input hdf5 file(s) "
)
parser.add_argument(
"--data_dir",
type=Path,
default=None,
help="dir containing input files"
)
parser.add_argument(
"--preprocessor",
type=Path,
default=None,
help="path to sklearn preprocessor"
)
args = parser.parse_args()
args.outdir.mkdir(parents=True, exist_ok=True)
main(args)
|
import pde
from matplotlib import pyplot as plt
import sys
import logging
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
f,a = plt.subplots(3)
prob = pde.problems.instpoisson1d.unforced
prob.plot(a)
solver = pde.solvers.linearsolver(prob)
solver.solve()
solver.plot(a)
plt.show()
|
from abc import ABCMeta, abstractmethod
import os
import os.path as osp
class BaseManager(metaclass=ABCMeta):
def __init__(self, params):
self.params = params
self.ROOT = params["ROOT"]
self.WORK_DIR = params["WORK_DIR"]
self.raw_dirname = params["raw_dirname"]
self.data_path = osp.join(self.ROOT, "input", self.raw_dirname)
self.val_preds_path = osp.join(self.WORK_DIR, "val_preds")
self.preds_path = osp.join(self.WORK_DIR, "preds")
self.weight_path = osp.join(self.WORK_DIR, "weight")
self.seeds = params["seeds"]
self.debug = params["debug"]
self.voc_classes = ['aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor']
self.image_size = params["image_size"]
if not osp.exists(self.val_preds_path): os.mkdir(self.val_preds_path)
if not osp.exists(self.weight_path): os.mkdir(self.weight_path)
if not osp.exists(self.preds_path): os.mkdir(self.preds_path)
def get(self, key):
try:
return self.params[key]
except:
raise ValueError(f"No such value in params, {key}")
@abstractmethod
def __call__(self):
raise NotImplementedError
|
import numpy as np
import math
from oval import Oval
from roots_finder import RootsFinder
class WignerCaustic:
"""
Class generating wigner caustic for the given oval
"""
def __init__(self, oval: Oval):
self.oval = oval
def wigner_caustic(self):
parameterization_t = self.oval.parameterization()
parameterization_t_pi = self.oval.parameterization(math.pi)
wigner_caustic_0 = self._wigner_caustic_i(parameterization_t, parameterization_t_pi, 0)
wigner_caustic_1 = self._wigner_caustic_i(parameterization_t, parameterization_t_pi, 1)
return (wigner_caustic_0, wigner_caustic_1)
def get_number_of_cusps(self):
roots_finder = RootsFinder(self._cusps_condition_function)
cusps = roots_finder.naive_global_newton(0, math.pi, 100)
return len(cusps)
def _wigner_caustic_i(self, parameterization_t, parameterization_t_pi, idx):
return (parameterization_t[idx] + parameterization_t_pi[idx]) / 2
def _cusps_condition_function(self, t):
params_len = len(self.oval.sin_params)
equation = 0
for i in range(params_len):
if i % 2 == 0:
arg = (i + 1) * t
sin_with_param = self.oval.sin_params[i] * np.sin(arg)
cos_with_param = self.oval.cos_params[i] * np.cos(arg)
equation += (1 - (i + 1) ** 2) * (sin_with_param + cos_with_param)
return 2 * equation
|
from sklearn.datasets import make_moons
from fuzzy_c.my_lib import main_execution_gmm
if __name__ == '__main__':
N = 300
data = make_moons(n_samples=N, noise=0.05, random_state=1)
X = data[0]
Y = data[1]
main_execution_gmm(X)
|
"""# 0. SETUP """
# --------------------------------------------------------------------
# --------------------------------------------------------------------
# --------------------------------------------------------------------
import sys
import pandas as pd
import math
import datetime as dt
from scipy.spatial.distance import pdist, squareform
import numpy as np
from itertools import product
from pyscipopt import Model, quicksum
"""# 1. READ IN DATA, create dataframe and adjancency matrix between locations"""
# --------------------------------------------------------------------
# --------------------------------------------------------------------
# --------------------------------------------------------------------
"""# 1.1 Make dataframe """
# --------------------------------------------------------------------
def wrangle(filename):
"""Function reads in file 'filename' of trip requests, and creates a dataframe of trip vertices, where
each pickup and dropoff is considered as a separate node.
The output dataframe has a first row (sink) and last row (source) of made-up nodes.
Output dataframe has columns:
x : x coordinate
y : y coordinate
E : earliest car can leave node
L : latest a car can leave node
Requester : name of passenger
trip_n : trip id (trips are identified by the row number in original input)
d : +1 if node is a pickup, -1 if node is a dropoff
"""
df = pd.read_csv(filename, sep = "\t", skiprows = 1, names = ('Requester', 'Trip','Depart After', 'Arrive Before', 'X1', 'Y1', 'X2', 'Y2'))
# Separate pickups and dropoffs:
pickups = df[['Requester','Depart After','Arrive Before', 'X1', 'Y1']].copy()
pickups = pickups.rename(columns={"X1": "x", "Y1": "y"})
pickups['d'] = 1
dropoffs = df[['Requester','Depart After','Arrive Before', 'X2', 'Y2']].copy()
dropoffs = dropoffs.rename(columns={"X2": "x", "Y2": "y"})
dropoffs['d'] = -1
# Concat pickup and dropoff dataframes:
df2 = pickups.append(dropoffs)
# Convert all times into datetime objects:
df2['Depart After'] = pd.to_datetime(df2['Depart After'],format='%H:%M')
df2['Arrive Before'] = pd.to_datetime(df2['Arrive Before'],format='%H:%M')
# M_E/M_L = global min/max times, respectively.
M_E = pd.to_datetime(df2['Depart After']).min()
M_L = pd.to_datetime(df2['Arrive Before']).max()
# Cleaning up the dataframe
df2 = df2.reset_index()
df2 = df2.rename(columns={"Depart After": "E", "Arrive Before": "L", 'index':'trip_n'})
# Adding artificial source/sink nodes (for the IP formulation)
depot_s = [[0, 0, M_E, M_L,'source', -1, 0]]
depot_t = [[0, 0, M_E, M_L,'sink', -1, 0]]
dfs = pd.DataFrame(depot_s, columns=['x', 'y', 'E', 'L', 'Requester', 'trip_n', 'd'])
dft = pd.DataFrame(depot_t, columns=['x', 'y', 'E', 'L', 'Requester', 'trip_n', 'd'])
df_output = dfs.append(df2)
df_ = df_output.append(dft)
# Cleaning up after dataframe is assembled
df_ = df_[['x', 'y', 'E','L','Requester','trip_n', 'd']]
# Convert all times into minutes using helper function (for the IP formulation)
df_['E'] = df_['E'].map(mins_since_midnight)
df_['L'] = df_['L'].map(mins_since_midnight)
df = df_.reset_index(drop=True)
return df
"""# 1.2 Helper functions """
# --------------------------------------------------------------------
def distance(a, b):
"""returns the travel time (scaled euclidean distance)
between points a = [Xa, Ya], and b = [Xb, Yb]"""
return (0.2)*math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)
def mins_since_midnight(time):
"""returns minutes since midnight"""
zero_time = dt.datetime(1900, 1, 1)
timesince = time - zero_time
return timesince.seconds//60
def distance_matrix(df2):
"""returns the time/distance between any two nodes
- creates a time/distance matrix between x/y coordinates
- if both nodes are "real" pu/do nodes, read from the distance matrix
- deals with special cases:
- "infinite" time (very big time) to travel to source
- zero time to travel from a source to any other node
- zero time to travel from any node to a sink
- "infinite" time big_num (very big time) to travel from a sink to any other node """
# pixels per minute
scaling_factor = 0.2
dist = squareform(pdist(df2[['x','y']],metric = 'euclidean'))*scaling_factor
# We return round(dist) due to problem instructions. Else, just return dist
dist = np.round(dist)
# We need to make sink/source (first and last indices) distance zero from every
# other node
# dist[i][j] is the distance from i to j
# "infinitely big distance"
big_dist = 2000
for i in range(len(dist[0])):
dist[i][0] = big_dist
dist[0][i] = 0
dist[-1][i] = big_dist
dist[i][-1] = 0
return np.round(dist)
def mins_to_time(mins):
"""turns mins back into HH:MM formated stringed"""
h = int(mins//60)
m = int(mins - h*60)
if m > 10:
return str(h)+ ":" + str(m)
else:
return str(h)+ ":0" + str(m)
"""# 3. Optimization problem setup and solving"""
# --------------------------------------------------------------------
# --------------------------------------------------------------------
# --------------------------------------------------------------------
# Overview of model variables:
# ----------------------------------
# The model will fit variables:
# x_ij = 1 iff a car uses edge (i,j) in a tour (zero otherwise)
# w_i = the time at when a car LEAVES node v_i (zero otherwise)
# y_ik = 1 iff car k visits node i (zero otherwise)
# It will have the following dummy variable:
# c_i = the number of people that are in the car that leaves node i
# and the problem is described using variables:
# d_i = 1 iff i is a pickup node / = -1 if i is a dropoff node / = 0 otherwise
# [E_i, L_i] - each node has an associated time window [E,L]
# where E_i/L_i is the earliest/latest a car can leave node i
# t_ij is the time it takes to travel on edge (i,j)
def solve_ip(df2, K, silence_optimizer=True):
"""Defines and solves VRTW for K cars on input data df2"""
m = Model()
# Build t, distance matrix for df2 nodes
t = distance_matrix(df2)
# Helper function for indices
# ----------------------------------
def p_d(i):
"""returns the index for the dropoff of pickup node i"""
# REQUIRES df2 - is called inside solver where df2 is defined
return i + (len(df2)-2)//2
# Iterators for loops
# ----------------------------------
# range for all nodes (df2 nodes + K artificial sinks)
# (Note: we add K-1 because df2 already contains one copy of a sink node)
N = range(len(df2)+K-1)
# range of pickups
N_p = range(1,len(df2)//2)
# range of pickups
N_d = range(len(df2)//2,len(df2)-1)
# range of sinks
N_sinks = range(len(df2)-1,len(df2)+K-1)
# K becomes range of cars
# We store num K as k_num
k_num = K
K = range(K)
# VARIABLES to fit
# ----------------------------------
# binary variable indicating if edge (i,j) is used in car k's tour
x = [[m.addVar(name = f"x{i},{j}", vtype="B") for j in N] for i in N]
# binary variable indicating if car k leaves node i
y = [[m.addVar(name = f"y{i},{k}", vtype="B") for k in K] for i in N]
# variable indicating the time at which the vehicle leaves node i
w = [m.addVar(name = f"w{i}", vtype="C") for i in N]
# variable indicating the number of people when the vehicle leaves node i
c = [m.addVar(name = f"c{i}", vtype="C") for i in N]
# NO OBJECTIVE - (since we just want existance of a feasible solution)
# ----------------------------------
# CONSTRAINTS
# ----------------------------------
# Tour constraints
# ----------------------------------
# This ensures that each node is visited (in particular, a car leaves each node i) at least once:
# For each node i, we need x[i][j] to be 1 for at least one j
for i in N:
m.addCons(quicksum(x[i][j] for j in N if j != i) >= 1)
# This ensures that a subtour that enters i also leaves i
for i in N:
m.addCons(quicksum(x[a][i] for a in N if a !=i) - quicksum(x[i][b] for b in N if b !=i) == 0)
# This ensures that, for a given trip request, the same car vists pickup and dropoff:
# the dropoff of node i is j, where j = p_d(i)
# and y_ik/y_jk = 1 iff car k visits node i/j respectively
for i,k in product(N_p,K):
m.addCons(y[i][k] - y[p_d(i)][k] == 0)
big_num = 5000
# This ensures that the car that enters i also leaves i
# This constraint is a linearized version of the constraint:
# (y_ik - y_jk)*x_ij = 0 (if edge (ij) is in a tour, then y_ik = y_jk)
for i,j,k in product(N,N,K):
# not for source nodes because they are special (all cars leave the source, node 0):
if i != 0 and j != 0:
m.addCons((y[i][k] - y[j][k]) - (1 - x[i][j])*big_num <= 0)
m.addCons((y[j][k] - y[i][k]) - (1 - x[i][j])*big_num <= 0)
# This ensures that each node (except for the sink, node 0) is served by one car:
# i.e. that there is one k for which y_ik = 1
for i in N:
if i != 0:
m.addCons(quicksum(y[i][k] for k in K) == 1)
# This specifies that each car k leaves the source, node 0:
for k in K:
m.addCons(y[0][k] == 1)
# This ensures that "sink node k" gets served by car k (one car exits "problem" at each sink)
# (Note that N_sinks iterates over sink nodes)
for k in K:
for i in N_sinks:
if i != len(df2)-1+k:
m.addCons(y[i][k] == 0)
else:
m.addCons(y[i][k] == 1)
# Time feasibility constraints
# ----------------------------------
# This ensures that a car never leaves a dropoff before its designated pickup
# w_i is the time that car leaves node i, and node i's dropoff is p_d(i)
for i in N_p:
m.addCons(w[p_d(i)] >= w[i])
# This ensures that all pickup and dropoff times must be within the appropirate time windows
for i in N:
# pickup/dropoff times for "real" (x,y) locations:
if i < len(df2)-1:
m.addCons(w[i] >= df2['E'][i])
m.addCons(w[i] <= df2['L'][i])
# pickup/dropoffs for sink nodes:
else:
m.addCons(w[i] >= df2['E'][len(df2)-1])
m.addCons(w[i] <= df2['L'][len(df2)-1])
# This is the "time limit" constraint:
# This ensures that a car can't leave node j before getting to note j (when (i,j) is in a tour)
# This constraint is a linearized version of the constraint:
# x_ij*(w_i + t_ij - w_j) <= 0
for i,j in product(N,N):
# if i is a sink (i.e. i >= len(df2)-1):
if i >= len(df2)-1:
# sinks can only go back to source node 0
if j != 0:
m.addCons(x[i][j] == 0)
# sinks can only go back to source node 0
else:
m.addCons(x[i][j] == 1)
# if j is a sink (i.e. j >= len(df2)-1): t_ij = 0 (going to a sink is "free")
elif j >= len(df2)-1:
t_temp = 0 # can't travel back in time (t_temp here for human comprehension)
m.addCons((w[i] + t_temp -w[j]) - (1-x[i][j])*big_num <= 0)
else:
m.addCons((w[i] + t[i][j] -w[j]) - (1-x[i][j])*big_num <= 0)
# Car capacity constraints
# ----------------------------------
# c_i, the number of people in a car when it leaves node i, must be between 0 and 3:
for i in N:
m.addCons(c[i] <= 3)
# no "negative" passengers, haha - no cheating IPs here!
m.addCons(c[i] >= 0)
# We know that there are 0 passengers in the car when it leaves the source
m.addCons(c[0] == 0)
# No car should arrive at a sink (i >= len(df2)-1) with people in it
for i in N_sinks:
m.addCons(c[i] == 0)
# This ensures that c updates correctly when we pickup/dropoff people.
# This constraint is a linearized version of the constraint:
# x_ij*(c_i-c_j+d_j) <= 0
# d_i = 1 iff i is a pickup node / = -1 if i is a dropoff node / = 0 otherwise
# It ensures that when edge (i,j) is part of a tour, the difference
# between c_i and c_j is +/- 1 (if i is a pickup/droppoff respectively)
for i,j in product(N,N):
d = 0
# if j is a real node:
if j > 0 and j < len(df2)-1:
d = df2['d'][j]
m.addCons((c[i] + d -c[j]) - (1-x[i][j])*4 <= 0)
# EXTRA: We help the solver go faster by adding some values we know
# ----------------------------------
# All "back-edges" are zero.
# i.e. no edges can go from a dropoff to its pickup
for i in N_p:
m.addCons(x[p_d(i)][i] == 0)
# No edges can go between nodes who's time window is AHEAD and completely disjoint from anothers'
# (These nodes might be in the tour of the same car, but the edges won't)
for j in N_p:
if df2['L'][j] < df2['E'][i]:
m.addCons(x[i][j] == 0)
m.addCons(x[i][p_d(j)] == 0)
m.addCons(x[p_d(i)][j] == 0)
m.addCons(x[p_d(i)][p_d(j)] == 0)
# OPTIMIZE!!!
# ----------------------------------
# if silence_optimizer = true (in main) we suppress all output from solver
# (There's a lot of it, so I suppress it here by default.)
m.hideOutput(silence_optimizer)
m.optimize()
return m
"""# 4. Make the deliverable (itinerary for humans to read) """
# --------------------------------------------------------------------
# --------------------------------------------------------------------
# --------------------------------------------------------------------
# Make itinerary from solution:
# --------------------------------------------------------------------
def make_itinerary(m, df2, cars, path):
"""Function that produces an itinerary from a solved model object m
ASSUMES that m.getStatus() == "optimal"
Takes in input SOLVED model m, df2, num of cars needed (cars),
and path, the path to output file. Returns itinerary (as dataframe),
prints itinerary, AND saves itinerary to path."""
# 0. Extract values from m:
d_sol = {}
sol = m.getBestSol()
status = m.getStatus()
print("status:", status)
# Build d_sol:
for v in m.getVars():
if abs(sol[v]) > 0.5 :
d_sol[v.name] = sol[v]
# 1. Make a list of vertices visited per car:
# vertices is a list of trips (one per car), where
# each list in vertices is a list of stops (given as vertex ids), in the order they're visited
vertices = []
N = range(len(df2)+cars-1)
N_p = range(1,len(df2)//2)
for i in N_p:
destinations = []
# find locations j (nodes) visited by car i
if f"x0,{i}" in d_sol:
while i < len(df2)-1:
for j in N:
if f"x{i},{j}" in d_sol:
destinations.append(i)
i = j
vertices.append(destinations)
# 3. Make a nice output
# for each list in our list of vertices per car, make a nice output that includes
# Dep time : w_i (the time at which car leaves this location to get to the next one)
# X : x coordinate
# Y : y coordinate
# Description : pickup or dropoff
# Trip: the trip number
# Passenger: name of passenger
# Helper structure to get the right text:
type = ["!","Pickup", "Dropoff"]
# itinerary is a list of dataframes - each dataframe is the schedule for a car
car_stops = []
for car_id, car in enumerate(vertices):
for stop in car:
# Build stop information:
stop_details = []
# add car number
stop_details.append(car_id)
# add dep time
stop_details.append(mins_to_time(d_sol[f'w{stop}']))
# add coordinates
stop_details.append(df2.iloc[stop]['x'])
stop_details.append(df2.iloc[stop]['y'])
# add description
stop_details.append(type[df2.iloc[stop]['d']])
stop_details.append(df2.iloc[stop]['trip_n'])
# add passenger name
stop_details.append(df2.iloc[stop]['Requester'])
# Add stop information to car's stop list
car_stops.append(stop_details)
itinerary = pd.DataFrame(car_stops, columns = ['Vehicle','Dep time', "X", "Y", "Description", "Trip", "Passenger"])
# 4. Produce output
print(itinerary)
itinerary.to_csv(path, index = False)
return itinerary
"""# 5. MAIN """
# --------------------------------------------------------------------
# --------------------------------------------------------------------
# --------------------------------------------------------------------
def main():
# read in filename
if len(sys.argv) < 2:
# remind human to include file
print(f"USAGE: python {sys.argv[0]} <input file>")
sys.exit(-1)
filename = sys.argv[1]
# process data
df = wrangle(filename)
# Upper bound (assuming requester's rides are distinct) is the num of people
k_upper = len(df['Requester'].unique())-2
# Lower bound (before we begin searching)
k_lower = 0
m_best = None
# Suppresses solver output if True
silence_optimizer=True
# binary search over possible values for solution (k = number of cars needed)
while k_lower < k_upper-1:
mid = (k_lower + k_upper)//2
print(f"Initialized solving for {mid} cars...")
m = solve_ip(df, mid, silence_optimizer)
print(f"Finalized solving for {mid}. Solver status: {m.getStatus()}")
# IP is feasible for k_upper:
# IP not feasible for k_lower
if m.getStatus() != "optimal":
k_lower = mid
else:
k_upper = mid
# we found a feasible sol:
m_best = m
schedule_path = f"schedule_for_{k_upper}_cars.csv"
make_itinerary(m_best, df, k_upper, schedule_path)
print(f"Feasible itinerary achieved with {k_upper} vehicles. No feasible solutions with fewer vehicles. Saving solution into {schedule_path}.")
if __name__ == "__main__":
main()
|
n = int(input("Enter a year: "))
if n%4==0:
if n%100 == 0:
if n%400 ==0:
print(f"{n} is a leap year")
else:
print(f"{n} is not a leap year")
else:
print(f"{n} is a leap year")
else:
print(f"{n} is not a leap year")
|
import tensorflow as tf
from window import WindowGenerator
from baseline import MultiStepBaseline
import numpy as np
import matplotlib.pyplot as plt
from data import Data, get_data_df
"""
Train model to predict next 3 days of stock prices, given 7 days of the past
"""
class FeedBack(tf.keras.Model):
def __init__(self, units, out_steps, num_features):
super().__init__()
self.out_steps = out_steps
self.units = units
self.lstm_cell = tf.keras.layers.LSTMCell(units)
# Wrap the LSTMCell in an RNN to simplify the warmup method
self.lstm_rnn = tf.keras.layers.RNN(self.lstm_cell, return_state=True)
self.dense = tf.keras.layers.Dense(num_features)
def warmup(self, inputs):
# inputs.shape => (batch, time, features)
# x.shape => (batch, lstm_units)
x, *state = self.lstm_rnn(inputs)
# predictions.shape => (batch, features)
prediction = self.dense(x)
return prediction, state
def call(self, inputs, training=None):
# Use a TensorArray to capture dynamically unrolled outputs.
predictions = []
# Initialize the LSTM state.
prediction, state = self.warmup(inputs)
# Insert the first prediction.
predictions.append(prediction)
# Run the rest of the prediction steps.
for n in range(1, self.out_steps):
# Use the last prediction as input.
x = prediction
# Execute one lstm step.
x, state = self.lstm_cell(x, states=state,
training=training)
# Convert the lstm output to a prediction.
prediction = self.dense(x)
# Add the prediction to the output.
predictions.append(prediction)
# predictions.shape => (time, batch, features)
predictions = tf.stack(predictions)
# predictions.shape => (batch, time, features)
predictions = tf.transpose(predictions, [1, 0, 2])
return predictions
def compile_and_fit(model, window, patience=8):
early_stopping = tf.keras.callbacks.EarlyStopping(monitor="val_loss",
patience=patience,
mode="min",
restore_best_weights=True)
model.compile(loss=tf.losses.MeanSquaredError(),
optimizer=tf.optimizers.Adam(),
metrics=[tf.metrics.MeanAbsoluteError()])
history = model.fit(window.train, epochs=MAX_EPOCHS,
validation_data=window.val,
callbacks=[early_stopping])
return history
if __name__ == "__main__":
OUT_STEPS = 3
MAX_EPOCHS = 50
data = Data(get_data_df())
train_df, val_df, test_df, num_features = data.get_data()
multi_window = WindowGenerator(input_width=7,
label_width=OUT_STEPS,
shift=OUT_STEPS,
train_df=train_df,
val_df=val_df,
test_df=test_df)
feedback_model = FeedBack(32, out_steps=OUT_STEPS, num_features=num_features)
prediction, state = feedback_model.warmup(multi_window.example[0])
print(prediction.shape)
print('Output shape (batch, time, features): ', feedback_model(multi_window.example[0]).shape)
history = compile_and_fit(feedback_model, multi_window)
multi_window.plot(feedback_model)
|
from django.test import TestCase
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from mealplanner.models import Week, Day
class WeekTest(TestCase):
def test_days_of_a_week_are_unique_to_a_week(self):
week = Week.objects.create(week_number=1)
Day.objects.create(week=week, name='Mon')
another_monday = Day(week=week, name='Mon')
with self.assertRaises(IntegrityError): #Should be updated to ValidationError
another_monday.save()
another_monday.full_clean()
def test_days_of_a_week_are_ordered_properly(self):
week = Week.objects.create(week_number=1)
day_objects = [Day.objects.create(week=week, name=day[0]) for day in Day.DAY_NAME_CHOICES]
days = week.day_set.all()
self.assertEqual(list(days), day_objects)
class DayTest(TestCase):
def test_day_must_belong_to_a_week(self):
week = Week.objects.create(week_number=1)
with self.assertRaises(IntegrityError):
no_day = Day.objects.create()
def test_day_is_related_to_week(self):
week = Week.objects.create(week_number=1)
monday = Day.objects.create(week=week, name='Mon')
self.assertEqual(monday.week, week)
def test_days_must_be_named_properly(self):
week = Week.objects.create(week_number=1)
blarg_day = Day.objects.create(week=week, name='blarg')
with self.assertRaises(ValidationError):
blarg_day.save()
blarg_day.full_clean()
def test_day_returns_a_readable_string_representation(self):
week = Week.objects.create(week_number=1)
monday = Day.objects.create(week=week, name='Sun')
self.assertEqual(str(monday), 'Søndag')
|
import re
import os
# import resource
import string
import pickle
import nltk
from bs4 import BeautifulSoup
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from pprint import pprint
import numpy as np
import mord
def savePkl(tagger, filename="name.pkl"):
with open(filename, "wb") as f:
pickle.dump(tagger, f, -1)
def loadPkl(filename="name.pkl"):
with open(filename, "rb") as f:
return pickle.load(f)
def lemmanize(tokens, lemmas='../generate.txt', polarity=True):
lemma = {}
if len(lemma) == 0:
with open(lemmas, encoding='latin-1') as f:
for l in f:
words = l.strip().split()
if len(words) > 2:
if words[-1] == '1':
if polarity:
if words[-1] == '-':
pol = int(words[-2])
else:
pol = int(words[-1])
lemma[words[0]] = (str(words[1]).lower(), pol)
else:
lemma[words[0]] = words[0]
lemmanized = []
unknown = []
pol = 0
for w in tokens:
try:
lemmanized.append(lemma[w][0])
if polarity:
pol += lemma[w][1]
except KeyError:
lemmanized.append(w)
unknown.append(w)
return lemmanized, unknown, pol
polarities = {}
def polarize(tokens):
global polarities
if len(polarities) == 0:
# Check if the file tagger.pkl exists
# if so load tagger, if not create a tagger
if os.path.isfile('polarities.pkl'):
polarities = loadPkl('polarities.pkl')
print("polarities loaded")
else:
print("polarities init")
dicts = ['../fullStrengthLexicon.txt',
'../mediumStrengthLexicon.txt']
for d in dicts:
with open(d, encoding='latin-1') as f:
for line in f:
words = line.strip().split()
if len(words) > 2:
if words[-1] == 'pos':
polarities[words[0]] = 1
elif words[-1] == 'neg':
polarities[words[0]] = -1
savePkl(polarities, 'polarities.pkl')
polarized = []
for w in tokens:
try:
polarized.append(polarities[w])
except KeyError:
# If the word is unknown the polarity is neutral
polarized.append(0)
return polarized
def removeSpecialCharacters(tokens):
'''Remove special characters, we receive tokens'''
pattern = re.compile('[{}]'.format(re.escape(string.punctuation)))
filtered_tokens = filter(None, [pattern.sub(' ', token)
for token in tokens])
return list(filtered_tokens)
def cleanText(text, lemmanized, cleaningLevel=1, lemmas='../generate.txt'):
''' A string is cleaned, depending on the level of cleaning:
0 -> raw string
1 - > lower case, special characters removed
2 - > Stopwords are removed
the cleaned string is return
'''
tokens = nltk.word_tokenize(text)
stopwords = nltk.corpus.stopwords.words('spanish')
if cleaningLevel == 0:
cleanedTokens = tokens
elif cleaningLevel == 1:
cleanedTokens = removeSpecialCharacters([
t.lower() for t in tokens
if t.isalpha() and t.lower() not in stopwords
])
elif cleaningLevel == 2: # Without stopwords
cleanedTokens = removeSpecialCharacters([
t.lower() for t in tokens if t.isalpha()
])
if lemmanized:
lemmanizedTokens, unkown, polarity = lemmanize(cleanedTokens, lemmas)
cleanedTokens = lemmanizedTokens
cleanedText = ' '.join(cleanedTokens)
return cleanedText, polarity
def readMessages(cleaningLevel=1, lemmanized=False,
encoding='latin-1'):
global parser
opinions = []
tags = []
path = "../corpusCriticasCine/"
stats = [[0, 0, 0] for i in range(5)]
if os.path.isfile('opinions.pkl') and os.path.isfile('tags.pkl'):
opinions = loadPkl('opinions.pkl')
print("opinions loaded")
tags = loadPkl('tags.pkl')
print("tags loaded")
stats = loadPkl('stats.pkl')
print("stats loaded")
else:
print("messages init")
for filename in sorted(os.listdir(path)):
if filename.endswith('xml'):
lemmas = path+filename.split('.')[0]+'.review.pos'
# print(filename, lemmas)
with open(path+filename, encoding=encoding) as f:
soup = BeautifulSoup(f.read(), 'xml')
tag = int(soup.find('review').attrs['rank'])
opinion = soup.find('body').getText()
opinion, polarity = cleanText(opinion, lemmanized,
cleaningLevel, lemmas)
opinions.append(opinion)
tags.append(tag)
polarized = polarize(opinion.split())
# print("P: ", polarized.count(1), "N: ", polarized.count(-1))
stats[tag-1][0] += polarity
stats[tag-1][1] += polarized.count(1)
stats[tag-1][2] += polarized.count(-1)
savePkl(opinions, 'opinions.pkl')
savePkl(tags, 'tags.pkl')
savePkl(stats, 'stats.pkl')
return opinions, tags, stats
def testModel(X, y, model, size=0.2):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=size)
clf = model
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print('Accuracy of prediction is', clf.score(X_test, y_test))
print('Confusion matrix:\n', confusion_matrix(y_test, y_pred))
targetNames = ['1', '2', '3', '4', '5']
report = metrics.classification_report(
y_test, y_pred,
target_names=targetNames,
output_dict=True
)
result = {
targetNames[0]: report[targetNames[0]],
targetNames[1]: report[targetNames[1]],
targetNames[2]: report[targetNames[2]],
targetNames[3]: report[targetNames[3]],
targetNames[4]: report[targetNames[4]],
}
return result
if __name__ == '__main__':
model = mord.LogisticIT(alpha=1.0)
cl = 0
lemmanized = True
cleaningDescription = [
"Tokens originales",
"Tokens con letras",
"Tokens con letras sin stopwords"
]
# list of texts, each text is a string (a sms)
sampleTexts, y, stats = readMessages(cl, lemmanized)
print(len(sampleTexts), "messages in corpus")
print(y.count(1), " 1 messages in corpus")
print(y.count(2), " 2 messages in corpus")
print(y.count(3), " 3 messages in corpus")
print(y.count(4), " 4 messages in corpus")
print(y.count(5), " 5 messages in corpus")
for i in range(len(stats)):
size = y.count(i+1)
# print(size, stats[i][1], stats[i][2])
print('-'*20)
print("Categoria ", i)
print('Has ', size, ' reviews')
print('Pos:', stats[i][1]/size)
print('Neg:', stats[i][2]/size)
'''
# Build vector of token counts
count_vect = CountVectorizer()
X = count_vect.fit_transform(sampleTexts)
y = np.asarray(y)
print("Testing model ", type(model))
print(cleaningDescription[cl])
print("Lemmatized ", lemmanized)
result = testModel(X, y, model)
print('class f1-score precision recall support')
print("1 ", result['1']['f1-score'],
result['1']['precision'], result['1']['recall'],
result['1']['support'])
print("2 ", result['2']['f1-score'],
result['2']['precision'], result['2']['recall'],
result['2']['support'])
print("3 ", result['3']['f1-score'],
result['3']['precision'], result['3']['recall'],
result['3']['support'])
print("4 ", result['4']['f1-score'],
result['4']['precision'], result['4']['recall'],
result['4']['support'])
print("5 ", result['5']['f1-score'],
result['5']['precision'], result['5']['recall'],
result['5']['support'])
pprint(result)
print("--"*30)
print()
print()
'''
|
#!/usr/bin/env python
import sys
import json
for line in sys.stdin:
try:
data = json.loads(line)
if ( 'status' in data ):
sys.stdout.write( data['status'] + "\n" )
except:
continue
|
from selenium import webdriver
driver = webdriver.Firefox(executable_path='C:\\Program Files\\Mozilla Firefox\\geckodriver-v0.29.0-win64\\geckodriver.exe')
import json
import time
import pandas as pd
# https://www.sportskeeda.com/cricket/ipl-teams-and-squads
season_obj = {
'2019': '1165643',
'2018': '1131611',
'2020': '1210595',
'2017': '1078425',
'2016': '968923',
'2015': '791129',
'2014': '695871',
'2013': '586733',
'2012': '520932',
'2011': '466304',
'2010': '418064',
'2009': '374163',
'2008': '313494',
}
def getSeasonSquadLinks(year,s_link):
driver.get(f'https://www.espncricinfo.com/ci/content/squad/index.html?object={s_link}')
time.sleep(2)
try:
squads_link = driver.find_element_by_class_name('squads_list').find_elements_by_tag_name('li')
season_squad_links = [link.find_element_by_tag_name('a').get_attribute("href") for link in squads_link]
print(f'{year} completed')
output = {'year': year, 'links':season_squad_links}
return output
except Exception as e:
print("BT",year, str(e))
return None
def scrapESPN(sq_links, season):
try:
driver.get(sq_links)
time.sleep(2)
try:
squad_name = (' ').join(driver.find_element_by_tag_name('h1').text.split(' ')[:-1])
except Exception as e:
print("Team name not obtained", str(e), sq_links)
squad_name = ''
squad_details = driver.find_elements_by_class_name('squad-player-content')
players = []
for squad in squad_details:
player = {}
player['name'] = squad.find_element_by_class_name('player-page-name').text
player['team'] = squad_name
player['season'] = season
try:
player['position'] = squad.find_element_by_class_name('playing-role').text
player['age'] = squad.find_element_by_class_name('meta-info').text
if len(squad.find_elements_by_class_name('meta-info')) == 2:
player['batting_hand'] = squad.find_elements_by_class_name('meta-info')[1].find_element_by_tag_name('div').text
if len(squad.find_elements_by_class_name('meta-info')[1].find_elements_by_tag_name('div')) ==2 :
player['bowling_hand'] = squad.find_elements_by_class_name('meta-info')[1].find_elements_by_tag_name('div')[1].text
players.append(player)
except Exception as e:
print("Error in finding player-", str(e),player['name'], sq_links)
return players
except Exception as e:
print("Error in finding squad players-", str(e), sq_links)
return None
main_links = list()
for year, obj in season_obj.items():
res = getSeasonSquadLinks(year, obj)
if res:
main_links.append(res)
output = []
for year in main_links[2:]:
for link in year['links']:
res = scrapESPN(link, year['year'])
if res:
output.extend(res)
print(year['year'], link, 'complete')
else:
print(year['year'], link, 'failed')
cols = ['name','team','season','position','age','batting_hand', 'bowling_hand']
df = pd.DataFrame(data=output, columns= cols)
df.to_csv('squad_list_espn.csv', index= False)
|
from __future__ import division
import numpy as np
from munkres import Munkres, print_matrix
import sys
import itertools
import math
from operator import itemgetter
from permanent import permanent as rysers_permanent
from scipy.optimize import linear_sum_assignment, minimize, LinearConstraint
from pymatgen.optimization import linear_assignment
import matplotlib
matplotlib.use('Agg') #prevent error running remotely
import matplotlib.pyplot as plt
from collections import defaultdict
import heapq
import time
from profilehooks import profile
import pickle
import numba as nb
import scipy
import scipy.stats
import copy
import os
from itertools import combinations
import operator as op
from functools import reduce
from gumbel_sample_permanent import optimized_minc_extened_UB2
#download rbpf_fireworks from here: https://github.com/ermongroup/rbpf_fireworks
# sys.path.insert(0, '/Users/jkuck/tracking_research/rbpf_fireworks/mht_helpers')
sys.path.insert(0, '../rbpf_fireworks/mht_helpers/')
from constant_num_targets_sample_permenant import conjectured_optimal_bound, sink_horn_scale_then_soules
#download "Approximating the Permanent with Belief Propagation" code
#at http://people.cs.vt.edu/~bhuang/
sys.path.insert(0, '/atlas/u/jkuck/bp_permanent')
import matlab.engine
eng = matlab.engine.start_matlab()
def calc_permanent_rysers(matrix):
'''
Exactly calculate the permanent of the given matrix user Ryser's method (faster than calc_permanent)
'''
N = matrix.shape[0]
assert(N == matrix.shape[1])
#this looks complicated because the method takes and returns a complex matrix,
#we are only dealing with real matrices so set complex component to 0
return np.real(rysers_permanent(1j*np.zeros((N,N)) + matrix))
gamma_cache = {}
def gamma(k):
if k == 0:
return 0
else:
assert(k >= 1)
if k in gamma_cache:
return gamma_cache[k]
else:
return_val = (math.factorial(k))**(1/k)
# return_val = (fast_factorial(k))**(1/k)
gamma_cache[k] = return_val
return return_val
delta_cache = {}
def delta(k):
if k in delta_cache:
return delta_cache[k]
else:
return_val = gamma(k) - gamma(k-1)
delta_cache[k] = return_val
return return_val
def minc_extended_UB2(matrix):
#another bound
#https://ac-els-cdn-com.stanford.idm.oclc.org/S002437950400299X/1-s2.0-S002437950400299X-main.pdf?_tid=fa4d00ee-39a5-4030-b7c1-28bb5fbc76c0&acdnat=1534454814_a7411b3006e0e092622de35cbf015275
# equation (6), U^M(A)
# return immediate_nesting_extended_bregman(matrix)
assert(matrix.shape[0] == matrix.shape[1])
N = matrix.shape[0]
minc_extended_upper_bound2 = 1.0
for row in range(N):
sorted_row = sorted(matrix[row], reverse=True)
row_sum = 0
for col in range(N):
row_sum += sorted_row[col] * delta(col+1)
# row_sum += sorted_row[col] * numba_delta(col+1)
minc_extended_upper_bound2 *= row_sum
return minc_extended_upper_bound2
def nCr(n, r):
#https://stackoverflow.com/questions/4941753/is-there-a-math-ncr-function-in-python
r = min(r, n-r)
numer = reduce(op.mul, range(n, n-r, -1), 1)
denom = reduce(op.mul, range(1, r+1), 1)
return numer / denom
def test_decompose_minc_extended_UB2(matrix):
#another bound
#https://ac-els-cdn-com.stanford.idm.oclc.org/S002437950400299X/1-s2.0-S002437950400299X-main.pdf?_tid=fa4d00ee-39a5-4030-b7c1-28bb5fbc76c0&acdnat=1534454814_a7411b3006e0e092622de35cbf015275
# equation (6), U^M(A)
# return immediate_nesting_extended_bregman(matrix)
assert(matrix.shape[0] == matrix.shape[1])
N = matrix.shape[0]
assert(N%2 == 0)
half_N = int(N/2)
UB_upper_matrix = 1.0
for row in range(half_N):
sorted_row = sorted(matrix[row], reverse=True)
row_sum = 0
for col in range(half_N):
row_sum += sorted_row[col] * delta(col+1)
# row_sum += sorted_row[col] * numba_delta(col+1)
UB_upper_matrix *= row_sum
UB_lower_matrix = 1.0
for row in range(half_N, N):
sorted_row = sorted(matrix[row], reverse=True)
row_sum = 0
for col in range(half_N):
row_sum += sorted_row[col] * delta(col+1)
# row_sum += sorted_row[col] * numba_delta(col+1)
UB_lower_matrix *= row_sum
total_UB = UB_upper_matrix * UB_lower_matrix * nCr(N, half_N)
return total_UB
def h_func(r):
if r >= 1:
return r + .5*math.log(r) + np.e - 1
else:
return 1 + (np.e - 1)*r
def immediate_nesting_extended_bregman(matrix):
#https://dukespace.lib.duke.edu/dspace/bitstream/handle/10161/1054/D_Law_Wai_a_200904.pdf?sequence=1&isAllowed=y
#bound the transpose due to our partitioning
assert((matrix <= 1).all())
assert((matrix >= 0).all())
N = matrix.shape[0]
assert(N == matrix.shape[1])
bregman_extended_upper_bound = 1
for col in range(N):
col_sum = 0
for row in range(N):
col_sum += matrix[row][col]
bregman_extended_upper_bound *= h_func(col_sum)/np.e
return bregman_extended_upper_bound
def create_diagonal2(N, k, zero_one=False):
'''
create NxN matrix with blocks on the diagonal of size at most kxk
'''
diag_matrix = np.zeros((N, N))
# diag_matrix = np.random.rand(N, N)/100
diag_matrix_permanent = 1.0
print diag_matrix.shape
for i in range(N): #only has to go up to the number of blocks
if N > k:
cur_block = np.random.rand(k,k)
if zero_one:
for row in range(k):
for col in range(k):
if cur_block[row][col] < .1:
cur_block[row][col] = 0
else:
cur_block[row][col] = 1
cur_block_exact_permanent = calc_permanent_rysers(cur_block)
diag_matrix_permanent *= cur_block_exact_permanent
diag_matrix[i*k:(i+1)*k, \
i*k:(i+1)*k] = cur_block
N -= k
else:
cur_block = np.random.rand(N,N)
if zero_one:
for row in range(N):
for col in range(N):
if cur_block[row][col] < .1:
cur_block[row][col] = 0
else:
cur_block[row][col] = 1
cur_block_exact_permanent = calc_permanent_rysers(cur_block)
diag_matrix_permanent *= cur_block_exact_permanent
diag_matrix[i*k:, \
i*k:] = cur_block
return diag_matrix, diag_matrix_permanent
def singular_value_bound(matrix):
#https://arxiv.org/abs/1212.0025
u, s, vh = np.linalg.svd(matrix, full_matrices=True)
assert(max(s) == s[0])
largest_singular_value = s[0]
n = matrix.shape[0]
assert(n == matrix.shape[1])
permanent_upper_bound = largest_singular_value ** n
return permanent_upper_bound
def test_permanent_bound_tightness(N, k):
use_diag_matrix = False
if use_diag_matrix:
# matrix, exact_permanent = create_diagonal2(N, k=10, zero_one=False)
matrix, exact_permanent = create_diagonal2(N, k=k, zero_one=False)
else:
matrix = np.random.rand(N,N)
for row in range(N):
for col in range(N):
if matrix[row][col] < .5:
matrix[row][col] = matrix[row][col] ** 1
# matrix[row][col] = 0
else:
matrix[row][col] = 1 - (1 - matrix[row][col])**1
# matrix[row][col] = 1
exact_permanent = calc_permanent_rysers(matrix)
minc_UB2 = minc_extended_UB2(matrix)
bregman_extended_upper_bound = immediate_nesting_extended_bregman(matrix)
# optimized_soules = 0
optimized_soules = optimized_minc_extened_UB2(matrix)
# print np.sum(matrix, axis=0)
# print np.reciprocal(np.sum(matrix, axis=0))
guess_at_hurt_col_scalings = np.sum(matrix, axis=0)
guess_at_col_scalings = np.reciprocal(np.sum(matrix, axis=0))
# print matrix
guess_optimize_soules = minc_extended_UB2(matrix * guess_at_col_scalings)/np.prod(guess_at_col_scalings)
guess_hurt_soules = minc_extended_UB2(matrix * guess_at_hurt_col_scalings)/np.prod(guess_at_hurt_col_scalings)
#gurvits_lower_bound (https://arxiv.org/pdf/1408.0976.pdf)
#is 'for a slightly modified variant of the Bethe permanent' (https://nimaanari.com/AR18.pdf, p. 4)
gurvits_lower_bound, gurvits_conjectured_optimal_UB = conjectured_optimal_bound(matrix, return_lower_bound=True)
sinkhorn_soules_bound = sink_horn_scale_then_soules(matrix)
get_BP_lower_bound = True
if get_BP_lower_bound:
matlab_matrix = eng.magic(N)
for row in range(N):
for col in range(N):
matlab_matrix[row][col] = matrix[row][col]
bp_lower_bound = eng.estperslow(matlab_matrix)
nima_upper_bound = np.sqrt(2)**N * bp_lower_bound
else:
bp_lower_bound = 1
nima_upper_bound = 1
print 'log(exact_permanent) =', np.log(exact_permanent)
print 'log(bregman_extended_upper_bound) =', np.log(bregman_extended_upper_bound)
print 'log extended minc2 UB =', np.log(minc_UB2)
print 'log optimized_soules =', np.log(optimized_soules)
print 'log guess_optimize_soules =', np.log(guess_optimize_soules)
print 'log guess_hurt_soules =', np.log(guess_hurt_soules)
print 'log bp_lower_bound =', np.log(bp_lower_bound)
print 'log sinkhorn_soules_bound =', np.log(sinkhorn_soules_bound)
return bp_lower_bound, nima_upper_bound, gurvits_lower_bound, gurvits_conjectured_optimal_UB,\
guess_optimize_soules, optimized_soules, minc_UB2, bregman_extended_upper_bound,\
exact_permanent, sinkhorn_soules_bound
def get_bp_lower_bound(matrix):
assert(matrix.shape[0] == matrix.shape[1])
N = matrix.shape[0]
matlab_matrix = eng.magic(N)
for row in range(N):
for col in range(N):
matlab_matrix[row][col] = matrix[row][col]
bp_lower_bound = eng.estperslow(matlab_matrix)
return bp_lower_bound
def plot_permanent_bound_tightness_VS_n(max_n, k=10):
law_ratios = []
soules_ratios = []
optimized_soules_ratios = []
guess_optimized_soules_ratios = []
gurvits_conjectured_optimal_UB_bound_ratios = []
gurvits_lower_bound_ratios = []
lower_bound_ratios = []
bp_lower_bound_ratios = []
sinkhorn_soules_ratios = []
nima_upper_bound_ratios = []
n_vals = range(3, max_n)
n_vals.extend(n_vals)
n_vals.extend(n_vals)
print n_vals
law_over_soules = []
for n in n_vals:
print "n=", n
bp_lower_bound, nima_upper_bound, gurvits_lower_bound, gurvits_conjectured_optimal_UB, guess_optimize_soules, optimized_soules, soules_UB, law_UB, exact_permanent, sinkhorn_soules = test_permanent_bound_tightness(n, k)
cur_law_ratio = law_UB/exact_permanent
law_ratios.append(cur_law_ratio)
cur_soules_ratio = soules_UB/exact_permanent
soules_ratios.append(cur_soules_ratio)
law_over_soules.append(law_UB/soules_UB)
optimized_soules_ratios.append(optimized_soules/exact_permanent)
guess_optimized_soules_ratios.append(guess_optimize_soules/exact_permanent)
gurvits_conjectured_optimal_UB_bound_ratios.append(gurvits_conjectured_optimal_UB/exact_permanent)
gurvits_lower_bound_ratios.append(gurvits_lower_bound/exact_permanent)
bp_lower_bound_ratios.append(bp_lower_bound/exact_permanent)
nima_upper_bound_ratios.append(nima_upper_bound/exact_permanent)
sinkhorn_soules_ratios.append(sinkhorn_soules/exact_permanent)
fig = plt.figure()
ax = plt.subplot(111)
matplotlib.rcParams.update({'font.size': 15})
# ax.semilogx(n_vals[:len(law_over_soules)], law_over_soules, 'x', label='law over soules')
# ax.loglog(n_vals[:len(law_over_soules)], law_ratios, 'x', label='Law ratios')
ax.semilogy(n_vals[:len(law_over_soules)], soules_ratios, 'x', label='Soules ratios')
# ax.semilogy(n_vals[:len(law_over_soules)], guess_optimized_soules_ratios, 'x', label='guess optimize Soules ratios')
# ax.semilogy(n_vals[:len(law_over_soules)], optimized_soules_ratios, 'x', label='optimized soules ratios')
ax.semilogy(n_vals[:len(law_over_soules)], nima_upper_bound_ratios, 'x', label='Nima UB')
ax.semilogy(n_vals[:len(law_over_soules)], gurvits_conjectured_optimal_UB_bound_ratios, 'x', label='Gurvits Conjectured UB')
# ax.semilogy(n_vals[:len(law_over_soules)], sinkhorn_soules_ratios, 'x', label='sinkhorn soules ratios')
ax.semilogy(n_vals[:len(law_over_soules)], bp_lower_bound_ratios, 'x', label='Bethe Permanent')
ax.semilogy(n_vals[:len(law_over_soules)], gurvits_lower_bound_ratios, 'x', label='Gurvits Modified Bethe Permanent')
# ax.plot(xp, np.log(p6), '-', label=r'$e^{-9.5} n^2 + e^{-20} n^5$')
plt.title('Bound tightness comparison')
plt.xlabel('n (matrix dimension)')
plt.ylabel('upper_bound/permanent')
lgd = ax.legend(loc='upper left', #prop={'size': 9},# bbox_to_anchor=(0.5, -.11),
fancybox=False, shadow=False, ncol=1, numpoints = 1)
plt.setp(lgd.get_title(),fontsize='xx-small')
# plt.show()
if not os.path.exists('./scaling_plots'):
os.makedirs('./scaling_plots')
# fig.savefig('loglog_bound_tightness_comparison_sinkhornSoules_uniformMatrix', bbox_extra_artists=(lgd,), bbox_inches='tight')
# fig.savefig('./scaling_plots/loglog_bound_tightness_comparison_sinkhornSoules_blockDiagk=10', bbox_extra_artists=(lgd,), bbox_inches='tight')
# experiment_name = './plots/loglog_bound_tightness_comparison_sinkhornSoules_blockDiagk=%d'%k
experiment_name = './plots/loglog_bound_tightness_comparison_sinkhornSoules_uniformMatrix'
# fig.savefig('loglog_bound_tightness_comparison_sinkhornSoules_uniformMatrix', bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(experiment_name, bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close()
data_dictionary = {"law_over_soules":law_over_soules,
"law_ratios":law_ratios,
"soules_ratios":soules_ratios,
"guess_optimized_soules_ratios":guess_optimized_soules_ratios,
"optimized_soules_ratios":optimized_soules_ratios,
"nima_upper_bound_ratios":nima_upper_bound_ratios,
"gurvits_conjectured_optimal_UB_bound_ratios":gurvits_conjectured_optimal_UB_bound_ratios,
"sinkhorn_soules_ratios":sinkhorn_soules_ratios,
"bp_lower_bound_ratios":bp_lower_bound_ratios,
"gurvits_lower_bound_ratios":gurvits_lower_bound_ratios,}
f = open(experiment_name + '.pickle', 'wb')
pickle.dump(data_dictionary, f)
f.close()
def replot_fromPickle_permanent_bound_tightness_VS_n(file_name):
f = open(file_name + '.pickle')
data_dictionary = pickle.load(f)
f.close()
n_vals = []
gurvits_upper_bound_ratios = [] #gurvits_LB * 2^n
for idx in range(len(data_dictionary['bp_lower_bound_ratios'])):
print(np.log(data_dictionary['nima_upper_bound_ratios'][idx]/data_dictionary['bp_lower_bound_ratios'][idx])/\
np.log(np.sqrt(2)))
print(np.log(data_dictionary['nima_upper_bound_ratios'][idx]/data_dictionary['bp_lower_bound_ratios'][idx])/\
np.log(np.sqrt(2)))
n = int(np.log(data_dictionary['nima_upper_bound_ratios'][idx]/data_dictionary['bp_lower_bound_ratios'][idx])/\
np.log(np.sqrt(2))) #reverse calculate the value of n, should just save this if used in future...
assert(np.log(data_dictionary['nima_upper_bound_ratios'][idx]/data_dictionary['bp_lower_bound_ratios'][idx])/\
np.log(np.sqrt(2)) == np.log(data_dictionary['nima_upper_bound_ratios'][idx]/data_dictionary['bp_lower_bound_ratios'][idx])/\
np.log(np.sqrt(2)))
n_vals.append(n)
gurvits_upper_bound_ratios.append(data_dictionary['gurvits_lower_bound_ratios'][idx] * (2**n))
print("nvals", n_vals)
fig = plt.figure()
ax = plt.subplot(111)
for item in ([ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(15)
# matplotlib.rcParams.update({'font.size': 30})
# ax.semilogx(n_vals, law_over_soules, 'x', label='law over soules')
# ax.loglog(n_vals, law_ratios, 'x', label='Law ratios')
# ax.semilogy(n_vals, guess_optimized_soules_ratios, 'x', label='guess optimize Soules ratios')
# ax.semilogy(n_vals, optimized_soules_ratios, 'x', label='optimized soules ratios')
# ax.semilogy(n_vals, gurvits_conjectured_optimal_UB_bound_ratios, 'x', label='Gurvits Conjectured UB')
ax.semilogy(n_vals, gurvits_upper_bound_ratios, 'x', label='Sinkhorn UB', markersize=10)
ax.semilogy(n_vals, data_dictionary['nima_upper_bound_ratios'], '+', label='Bethe UB', markersize=10)
ax.semilogy(n_vals, data_dictionary['soules_ratios'], 'x', label='Soules UB', markersize=10)
# ax.semilogy(n_vals, sinkhorn_soules_ratios, 'x', label='sinkhorn soules ratios')
ax.semilogy(n_vals, data_dictionary['bp_lower_bound_ratios'], '+', label='Bethe LB', markersize=10)
ax.semilogy(n_vals, data_dictionary['gurvits_lower_bound_ratios'], 'x', label='Sinkhorn LB', markersize=10)
# ax.plot(xp, np.log(p6), '-', label=r'$e^{-9.5} n^2 + e^{-20} n^5$')
# plt.title('Uniform Matrices', fontsize=30)
plt.title('Block Diagonal, K=3', fontsize=30)
# plt.title('Permanent Bound Comparison', fontsize=20)
plt.xlabel('n (matrix dimension)', fontsize=25)
plt.ylabel('Bound/Permanent', fontsize=25)
# lgd = ax.legend(loc='upper left', prop={'size': 16},# bbox_to_anchor=(0.5, -.11),
# fancybox=False, shadow=False, ncol=1, numpoints = 1)
# plt.setp(lgd.get_title(),fontsize='xx-small')
# plt.show()
# fig.savefig(file_name, bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(file_name, bbox_inches='tight')
plt.close()
def test_permanent_bound_tightness1(N):
use_diag_matrix = True
if use_diag_matrix:
matrix, exact_permanent = create_diagonal2(N, k=10, zero_one=False)
else:
matrix = np.random.rand(N,N)
for row in range(N):
for col in range(N):
if matrix[row][col] < .5:
matrix[row][col] = matrix[row][col] ** 1
# matrix[row][col] = 0
else:
matrix[row][col] = 1 - (1 - matrix[row][col])**1
# matrix[row][col] = 1
# exact_permanent = calc_permanent_rysers(matrix)
minc_UB2 = minc_extended_UB2(matrix)
bregman_extended_upper_bound = immediate_nesting_extended_bregman(matrix)
singular_value_upper_bound = singular_value_bound(matrix)
decomposed_minc_UB2 = test_decompose_minc_extended_UB2(matrix)
print 'log(exact_permanent) =', np.log(exact_permanent)
print 'log(bregman_extended_upper_bound) =', np.log(bregman_extended_upper_bound)
print 'log extended minc2 UB =', np.log(minc_UB2)
print 'log(singular_value_upper_bound)', np.log(singular_value_upper_bound)
print 'log decomposed_minc_UB2 =', np.log(decomposed_minc_UB2)
levels = []
upper_bounds = []
for level in range(2, N-1):
print 'level:', level
levels.append(level)
if level < N-3:
upper_bounds.append(minc_UB2)
continue
cur_UB = 0
for columns in combinations(range(N), level): # 2 for pairs, 3 for triplets, etc
upper_matrix = np.delete(matrix, columns, 1) #delete columnumns
upper_matrix = np.delete(upper_matrix, range(N-level,N), 0) #delete rows
cols_to_detete = [i for i in range(N) if i not in columns]
lower_matrix = np.delete(matrix, cols_to_detete, 1) #delete columnumns
lower_matrix = np.delete(lower_matrix, range(0,N-level), 0) #delete rows
cur_UB += calc_permanent_rysers(upper_matrix) * minc_extended_UB2(lower_matrix)
# cur_UB += calc_permanent_rysers(upper_matrix) * immediate_nesting_extended_bregman(lower_matrix)
upper_bounds.append(cur_UB)
fig = plt.figure()
ax = plt.subplot(111)
matplotlib.rcParams.update({'font.size': 15})
# ax.semilogx(n_vals[:len(law_over_soules)], law_over_soules, 'x', label='law over soules')
ax.plot(levels, upper_bounds, 'x', label='Law ratios')
# ax.semilogy(n_vals[:len(law_over_soules)], soules_ratios, 'x', label='Soules ratios')
# ax.plot(xp, np.log(p6), '-', label=r'$e^{-9.5} n^2 + e^{-20} n^5$')
# ax.axhline(y=bregman_extended_upper_bound, label="law", c='r')
ax.axhline(y=minc_UB2, label="soules", c='g')
# ax.axhline(y=decomposed_minc_UB2, label="soules decomposed", c='g')
ax.axhline(y=exact_permanent, label="exact_permanent", c='b')
plt.title('Bound tightness comparison')
plt.xlabel('level')
plt.ylabel('permenant/UB')
lgd = ax.legend(loc='upper left', #prop={'size': 9},# bbox_to_anchor=(0.5, -.11),
fancybox=False, shadow=False, ncol=1, numpoints = 1)
plt.setp(lgd.get_title(),fontsize='xx-small')
# plt.show()
fig.savefig('with_possible_lower_bound_UB_tightness_comparison', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close()
return minc_UB2, bregman_extended_upper_bound, exact_permanent
if __name__ == "__main__":
# replot_fromPickle_permanent_bound_tightness_VS_n("./plots/loglog_bound_tightness_comparison_sinkhornSoules_uniformMatrix")
replot_fromPickle_permanent_bound_tightness_VS_n("./plots/loglog_bound_tightness_comparison_sinkhornSoules_blockDiagk=%d"%3)
# replot_fromPickle_permanent_bound_tightness_VS_n("./plots/loglog_bound_tightness_comparison_sinkhornSoules_blockDiagk=%d"%10)
sleep(0234)
# N = 17
# matrix = np.random.rand(N,N)
# m = eng.magic(N)
# for row in range(N):
# for col in range(N):
# m[row][col] = matrix[row][col]
# t1 = time.time()
# lb = eng.estperslow(m)
# t2 = time.time()
# print "bethe permanent (lb) = ", lb, "runtime =", t2-t1
# t3 = time.time()
# lower_bound, conjectured_optimal_UB = conjectured_optimal_bound(matrix, return_lower_bound=True)
# t4 = time.time()
# print "sinkhorn scaling, possibly bethe permanent (lb) = ", lower_bound, "runtime =", t4-t3
# sleep(2341)
# test_permanent_bound_tightness1(30)
plot_permanent_bound_tightness_VS_n(max_n = 60)
# test_permanent_bound_tightness(N=50)
|
"""
Created on 5 fevr. 2013
@author: davidfourquet
inspired by Telmo Menezes's work : telmomenezes.com
"""
"""
this class inherits from networkx.Diself. It stores a distance matrix and some global variables about the network.
It allows us to update them easily instead of computing them many times.
"""
import collections
import igraph as ig
import networkx as nx
import numpy as np
import GraphWithUpdate as gwu
import community as com
class Directed_UnweightedGWU(gwu.GraphWithUpdate, nx.DiGraph):
# avoid decorators syntax problems for line_profiling
import __builtin__
try:
__builtin__.profile
except AttributeError:
# No line profiler, provide a pass-through version
def profile(func):
return func
__builtin__.profile = profile
def __init__(self, graph=None):
nx.DiGraph.__init__(self, graph)
self.i_graphe = ig.Graph(directed=True)
self.shortest_path_matrix = None
self.max_distance = None
self.max_in_degree = None
self.max_out_degree = None
def add_node(self, n):
nx.DiGraph.add_node(self, n)
self.i_graphe.add_vertices(1)
def add_edge(self, u, v, **args):
nx.DiGraph.add_edge(self, u, v, args)
# update info about the network : not really an update but a computation
if self.shortest_path_dict is not None:
self.shortest_path_dict = nx.shortest_path_length(self)
if self.shortest_path_matrix is not None:
self.shortest_path_matrix = np.array(self.i_graphe.shortest_paths_dijkstra())
if self.max_in_degree is not None:
self.max_in_degree = float(max(self.in_degree().values()))
if self.max_out_degree is not None:
self.max_out_degree = float(max(self.out_degree().values()))
@profile
def add_edges_from(self, ebunch):
nx.DiGraph.add_edges_from(self, ebunch)
self.i_graphe.add_edges([(u,v) for u,v,w in ebunch])
# update info about the network : not really an update but a computation
if self.shortest_path_matrix is not None:
self.shortest_path_matrix = np.array(self.i_graphe.shortest_paths_dijkstra())
if self.max_distance is not None:
self.max_distance = np.max(self.get_shortest_path_matrix())
if self.max_in_degree is not None:
self.max_in_degree = float(max(self.in_degree().values()))
if self.max_out_degree is not None:
self.max_out_degree = float(max(self.out_degree().values()))
def isWeighted(self):
return False
def isDirected(self):
return True
def Targ(self, dictionnaire):
result = np.outer(np.ones(self.number_of_nodes(), dtype=float), [dictionnaire[node] for node in self])
return result
def Orig(self, dictionnaire):
result = np.outer([dictionnaire[node] for node in self], np.ones(self.number_of_nodes(), dtype=float))
return result
def OrigId(self):
""" returns a 2d array containing the identity number (0 to n=number of nodes) of the origin node for all edges
"""
return self.Orig(range(self.number_of_nodes()))
def NormalizedOrigId(self):
""" returns a 2d array containing the identity number (0 to n=number of nodes) of the origin node for all edges divide by the total number of nodes
"""
return self.OrigId() / self.number_of_nodes()
def TargId(self):
""" returns a 2d array containing the identity number of the target node for all edges
"""
return self.Targ(range(self.number_of_nodes()))
def NormalizedTargId(self):
""" returns a 2d array containing the identity number of the target node for all edges divided by the number of nodes
"""
return self.TargId() / self.number_of_nodes()
def OrigInDegree(self):
""" returns a 2d array containing the in degree of the origin node for all edges
"""
return self.Orig(self.in_degree())
def NormalizedOrigInDegree(self):
""" returns a 2d array containing in degree of origin divided by max of in_degrees
"""
return self.OrigInDegree() / self.get_max_in_degree()
def OrigOutDegree(self):
""" returns a 2d array containing the out degree of the origin node for all edges
"""
return self.Orig(self.out_degree())
def NormalizedOrigOutDegree(self):
""" returns a 2d array containing the out degree of the origin node for all edges divide by max of out_degrees
"""
return self.OrigOutDegree() / self.get_max_out_degree()
def TargInDegree(self):
""" returns a 2d array containing the in degree of the target node for all edges
"""
return self.Targ(self.in_degree())
def NormalizedTargInDegree(self):
""" returns a 2d array containing the in degree of the target node for all edges divided by max of in_degrees
"""
return self.TargInDegree() / self.get_max_in_degree()
def TargOutDegree(self):
""" returns a 2d array containing the out degree of the target node for all edges
"""
return self.Targ(self.out_degree())
def NormalizedTargOutDegree(self):
""" returns a 2d array containing the out degree of the target node for all edges
"""
return self.TargOutDegree() / self.get_max_out_degree()
@profile
def OrigPagerank(self):
""" returns a 2d array containing the pagerank of the origin node for all edges
probas = np.dot(
np.array(nx.pagerank_scipy(self).values(), dtype=float).reshape(-1, 1),
np.ones((1, self.number_of_nodes())))
"""
try:
return self.Orig(self.i_graphe.pagerank())
except:
return self.Orig(np.ones(self.number_of_nodes(), dtype=float) / self.number_of_nodes())
@profile
def TargPagerank(self):
""" returns a 2d array containing the pagerank of the target node for all edges
probas = np.dot(
np.ones((self.number_of_nodes(), 1)),
np.array(nx.pagerank_scipy(self).values(), dtype=float).reshape(1, -1)
)
"""
try:
return self.Targ(self.i_graphe.pagerank())
except:
return self.Targ(np.ones(self.number_of_nodes(), dtype=float) / self.number_of_nodes())
@profile
def OrigCoreN(self):
""" returns a 2d array containing the pagerank of the origin node for all edges
probas = np.dot(
np.array(nx.core_number(self).values(), dtype=float).reshape(-1, 1),
np.ones((1, self.number_of_nodes())))
"""
return self.Orig(nx.core_number(self))
@profile
def TargCoreN(self):
""" returns a 2d array containing the pagerank of the target node for all edges
probas = np.dot(
np.ones((self.number_of_nodes(), 1)),
np.array(nx.core_number(self).values(), dtype=float).reshape(1, -1)
)
"""
return self.Targ(nx.core_number(self))
@profile
def OrigCloseness(self):
""" returns a 2d array containing the closeness of the origin node for all edges
probas = np.dot(
np.array(nx.closeness_centrality(self).values(), dtype=float).reshape(-1, 1),
np.ones((1, self.number_of_nodes())))
"""
return self.Orig(nx.closeness_centrality(self))
@profile
def TargCloseness(self):
""" returns a 2d array containing the closeness of the target node for all edges
probas = np.dot(
np.ones((self.number_of_nodes(), 1)),
np.array(nx.closeness_centrality(self).values(), dtype=float).reshape(1, -1)
)
"""
return self.Targ(nx.closeness_centrality(self))
@profile
def OrigBetweenness(self):
""" returns a 2d array containing the betweenness of the origin node for all edges
probas = np.dot(
np.array(nx.betweenness_centrality(self).values(), dtype=float).reshape(-1, 1),
np.ones((1, self.number_of_nodes())))
"""
return np.outer(self.i_graphe.betweenness(), np.ones(self.number_of_nodes(), dtype=float))
"""
return self.Orig(nx.betweenness_centrality(self))
"""
@profile
def TargBetweenness(self):
""" returns a 2d array containing the betweenness of the target node for all edges
probas = np.dot(
np.ones((self.number_of_nodes(), 1)),
np.array(nx.betweenness_centrality(self).values(), dtype=float).reshape(1, -1)
)
"""
return np.outer(np.ones(self.number_of_nodes(), dtype=float), self.i_graphe.betweenness())
"""
return self.Targ(nx.betweenness_centrality(self))
"""
@profile
def OrigClustering(self):
""" returns a 2d array containing the clustering of the origin node for all edges
probas = np.dot(
np.array(nx.clustering(self).values(), dtype=float).reshape(-1, 1),
np.ones((1, self.number_of_nodes())))
"""
return self.Orig(nx.clustering(self.to_undirected()))
@profile
def TargClustering(self):
""" returns a 2d array containing the clustering of the target node for all edges
probas = np.dot(
np.ones((self.number_of_nodes(), 1)),
np.array(nx.clustering(self).values(), dtype=float).reshape(1, -1)
)
"""
return self.Targ(nx.clustering(self.to_undirected()))
@profile
def OrigEccentricity(self):
""" returns a 2d array containing the eccentricity of the origin node for all edges
"""
sp = self.get_shortest_path_matrix()
eccentricity = collections.defaultdict(lambda: float("inf"))
for node in max(nx.connected_components(self), key=len):
eccentricity[node] = max(sp[node])
"""
probas = np.dot(
np.array(nx.eccentricity(self, sp=sp).values(), dtype=float).reshape(-1, 1),
np.ones((1, self.number_of_nodes())))
"""
return self.Orig(eccentricity)
@profile
def TargEccentricity(self):
""" returns a 2d array containing the eccentricity of the target node for all edges
"""
sp = self.get_shortest_path_matrix()
eccentricity = collections.defaultdict(lambda: float("inf"))
for node in max(nx.connected_components(self), key=len):
eccentricity[node] = max(sp[node])
"""
probas = np.dot(
np.ones((self.number_of_nodes(), 1)),
np.array(nx.eccentricity(self, sp=sp).values(), dtype=float).reshape(1, -1)
)
"""
return self.Targ(eccentricity)
@profile
def OrigEccentricity(self):
""" returns a 2d array containing the eccentricity of the origin node for all edges
"""
sp = self.get_shortest_path_matrix()
eccentricity = collections.defaultdict(lambda: float("inf"))
for node in max(nx.strongly_connected_components(self), key=len):
eccentricity[node] = max(sp[node])
"""
probas = np.dot(
np.array(nx.eccentricity(self, sp=sp).values(), dtype=float).reshape(-1, 1),
np.ones((1, self.number_of_nodes())))
"""
return self.Orig(eccentricity)
@profile
def TargEccentricity(self):
""" returns a 2d array containing the eccentricity of the target node for all edges
"""
sp = self.get_shortest_path_matrix()
eccentricity = collections.defaultdict(lambda: float("inf"))
for node in max(nx.strongly_connected_components(self), key=len):
eccentricity[node] = max(sp[node])
"""
probas = np.dot(
np.ones((self.number_of_nodes(), 1)),
np.array(nx.eccentricity(self, sp=sp).values(), dtype=float).reshape(1, -1)
)
"""
return self.Targ(eccentricity)
@profile
def SameCommunity(self):
""" returns a 2d array containing 1 when both nodes are in the same community"""
if self.number_of_edges() > 3:
try:
partition = com.best_partition(nx.Graph(self)).values()
except ZeroDivisionError:
partition = range(self.number_of_nodes())
print self.size(weight='weight')
print self.number_of_nodes(),self.number_of_edges()
else:
partition = range(self.number_of_nodes())
probas = np.zeros((self.number_of_nodes(), self.number_of_nodes()))
for node1 in partition:
for node2 in partition:
if partition[node1] == partition[node2]:
probas[node1, node2] = 1.
return probas
def Distance(self):
""" returns a 2d array containing the distance = shortest path length, takes weights into account"""
""" gives +infinity if no path"""
"""
probas = np.empty((self.number_of_nodes(), self.number_of_nodes()))
# every path that does not exist has distance +infinity
probas.fill(float('+inf'))
for node1, row in self.get_shortest_path_dict().iteritems():
for node2, length in row.iteritems():
probas[node1, node2] = length
return probas
"""
return self.get_shortest_path_matrix()
def RevDistance(self):
""" returns a 2d array containing the distance = shortest path length, takes weights into account"""
""" gives +infinity if no path"""
"""
probas = np.empty((self.number_of_nodes(), self.number_of_nodes()))
# every path that does not exist has distance +infinity
probas.fill(float('+inf'))
for node1, row in self.get_shortest_path_dict().iteritems():
for node2, length in row.iteritems():
probas[node2, node1] = length
"""
return np.transpose(self.get_shortest_path_matrix())
def FeedForwardLoop(self):
"""returns a 2d array where a_i_j = 1 there is a path i->x->j
"""
return np.array(np.sign(np.linalg.matrix_power(nx.to_numpy_matrix(self, dtype=float), 2)))
def FeedBackLoop(self):
"""returns a 2d array where a_i_j = 1 there is a path j->x->i
"""
adj = np.array(nx.to_numpy_matrix(self, dtype=float))
return np.sign(np.transpose(np.linalg.matrix_power(adj, 2)))
def SharedConsequence(self):
"""returns a 2d array where a_i_j = 1 there is a path i->x<-j
"""
adj = np.array(nx.to_numpy_matrix(self, dtype=float))
tr = np.transpose(adj)
return np.sign(np.dot(adj, tr))
def SharedCause(self):
"""returns a 2d array where a_i_j = 1 there is a path i<-x->j
"""
adj = np.array(nx.to_numpy_matrix(self, dtype=float))
tr = np.transpose(adj)
return np.sign(np.dot(tr, adj))
def Reciprocity(self):
"""returns a 2d array where a_i_j =1 if there is an edge from j to i"""
return np.array(np.transpose(nx.to_numpy_matrix(self, dtype=float)))
def NormalizedDistance(self):
""" returns a 2d array containing the distance = shortest path length, takes weights into account"""
""" gives +infinity if no path"""
""" divides by distance maximal distance which is always real but can be 0 """
return self.Distance() / self.get_max_distance()
def NormalizedRevDistance(self):
""" returns a 2d array containing the distance = shortest path length, takes weights into account"""
""" gives +infinity if no path"""
""" divides by distance maximal distance which is always real but can be 0 """
return self.RevDistance() / self.get_max_distance()
def NumberOfEdges(self):
""" returns a 2d array filled with only one value : the number of edges of the network"""
probas = np.empty((self.number_of_nodes(), self.number_of_nodes()))
value = self.number_of_edges()
probas.fill(value)
return probas
def Constant(self):
""" returns a 2d array filled with only one value : 1"""
probas = np.ones((self.number_of_nodes(), self.number_of_nodes()))
return probas
def Random(self):
""" returns a 2d array filled with only random value between 0 and 1"""
probas = np.random.rand(self.number_of_nodes(), self.number_of_nodes())
return probas
"""
def get_shortest_path_dict(self):
''' returns the dict od dict of shortest path lengths, if it does not exist, it creates it'''
if self.shortest_path_dict is None:
self.shortest_path_dict = nx.shortest_path_length(self)
return self.shortest_path_dict
"""
def get_shortest_path_matrix(self):
""" returns the dict od dict of shortest path lengths, if it does not exist, it creates it"""
if self.shortest_path_matrix is None:
self.shortest_path_matrix = np.array(self.i_graphe.shortest_paths_dijkstra())
return self.shortest_path_matrix
def get_max_in_degree(self):
""" returns the maximum of in_degrees, if it does not exist, it computes it"""
if self.max_in_degree is None:
self.max_in_degree = max(self.in_degree().values())
return self.max_in_degree
def get_max_out_degree(self):
""" returns the maximum of out_degrees, if it does not exist, it computes it"""
if self.max_out_degree is None:
self.max_out_degree = max(self.out_degree().values())
return self.max_out_degree
def get_max_distance(self):
if self.max_distance is None:
self.max_distance = np.max(self.get_shortest_path_matrix())
return self.max_distance
|
import os
from typing import Tuple
import pygame as pg
from game_screen import map
from asset import get_sprite, BURGER, BUBBLE_FF, BUBBLE_OO, BUBBLE_OF, BUBBLE_FO
from config import SCREEN_WIDTH, SCREEN_HEIGHT
class ThreatBubble(pg.sprite.Sprite):
def __init__(self, monster_position: Tuple[int, int]):
pg.sprite.Sprite.__init__(self)
self.image_orig: pg.Surface = pg.image.load(os.path.join('assets', 'speech_bubble.png')).convert_alpha()
self.images = [
[
get_sprite(BUBBLE_OO),
get_sprite(BUBBLE_OF)
],
[
get_sprite(BUBBLE_FO),
get_sprite(BUBBLE_FF)
]
]
self.monster_position = monster_position
self.set_image(monster_position)
self.rect = self.image.get_rect()
self.burger: pg.Surface = get_sprite(BURGER)
def set_image(self, monster_position: Tuple[int, int]):
self.offset = (37, 15)
self.x_burger_offset = 5
self.image = self.image_orig
qsw, qsh = SCREEN_WIDTH // 4, SCREEN_HEIGHT // 4
mpx, mpy = map.to_screen_coords(*monster_position)
if qsw <= mpx < 2*qsw or 3*qsw <= mpx:
reverse_x = 1
self.x_burger_offset *= -1
self.offset = (-self.offset[0], self.offset[1])
else:
reverse_x = 0
if mpy < qsh or 2*qsh <= mpy < 3*qsh:
reverse_y = 1
self.offset = (self.offset[0], -self.offset[1])
else:
reverse_y = 0
self.image = self.images[reverse_x][reverse_y]
def draw(self, screen: pg.Surface):
from game_screen import map
x, y = map.to_screen_coords(self.monster_position[0], self.monster_position[1])
cx, cy = SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2
minx, maxx = self.rect.w, SCREEN_WIDTH - self.rect.w
miny, maxy = self.rect.h, SCREEN_HEIGHT - self.rect.h
if x < minx:
k = (minx - cx) / (x - cx)
x = minx
y = int(k * (y - cy) + cy)
if y < miny:
k = (miny - cy) / (y - cy)
y = miny
x = int(k * (x - cx) + cx)
if x > maxx:
k = (maxx - cx) / (x - cx)
x = maxx
y = int(k * (y - cy) + cy)
if y > maxy:
k = (maxy - cy) / (y - cy)
y = maxy
x = int(k * (x - cx) + cx)
screen.blit(self.image, (x + self.offset[0] - self.rect.w // 2, y + self.offset[1] - self.rect.h // 2))
screen.blit(self.burger, (x + self.offset[0] + self.x_burger_offset - (self.burger.get_size()[0] // 2),
y + self.offset[1] - 2 - (self.burger.get_size()[1] // 2)))
|
import json
import cPickle as pickle
def loadTweets(filepath='resultsFirst100K.json'):
tweets = []
with open(filepath, 'rb') as f:
for line in f:
tweet = json.loads(line)
tweets.append(tweet)
return tweets
def addToDictionary(d, key, item):
if key not in d:
d[key] = []
d[key].append(item)
def extendToDictionary(d, key, items):
if key not in d:
d[key] = []
d[key].extend(items)
def extractDataFromTweets(tweets):
user_id_to_hashtags = {}
user_id_to_retweet_user_ids = {}
user_id_to_reply_user_ids = {}
user_id_to_mention_user_ids = {}
for tweet in tweets:
user_id = tweet['user']['id']
if 'retweeted_status' in tweet:
addToDictionary(user_id_to_retweet_user_ids, user_id, tweet['retweeted_status']['user']['id'])
if tweet['in_reply_to_user_id'] is not None:
addToDictionary(user_id_to_reply_user_ids, user_id, tweet['in_reply_to_user_id'])
entities = tweet['entities']
if len(entities['user_mentions']) > 0:
extendToDictionary(user_id_to_mention_user_ids, user_id, [user_mention['id'] for user_mention in entities['user_mentions']])
if len(entities['hashtags']) > 0:
extendToDictionary(user_id_to_hashtags, user_id, [hashtag['text'] for hashtag in entities['hashtags']])
return user_id_to_hashtags, user_id_to_retweet_user_ids, user_id_to_reply_user_ids, user_id_to_mention_user_ids
tweets = loadTweets() #pass in filepath to tweets as argument...
user_id_to_hashtags, user_id_to_retweet_user_ids, user_id_to_reply_user_ids, user_id_to_mention_user_ids = extractDataFromTweets(tweets)
with open('user_id_to_hashtags.pickle', 'wb') as f:
pickle.dump(user_id_to_hashtags, f)
with open('user_id_to_retweet_user_ids.pickle', 'wb') as f:
pickle.dump(user_id_to_retweet_user_ids, f)
with open('user_id_to_reply_user_ids.pickle', 'wb') as f:
pickle.dump(user_id_to_reply_user_ids, f)
with open('user_id_to_mention_user_ids.pickle', 'wb') as f:
pickle.dump(user_id_to_mention_user_ids, f)
|
print('Hello, World')
print('Go away')
print(3607 * 34227)
# ----------------------------------------------------------------------
# This line is a COMMENT -- a note to human readers of this file.
# When a program runs, it ignores everything from a # (hash) mark
# to the end of the line with the # mark.
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# We call files that have Python code in them MODULES. Line 1 of this
# module (look at it now) prints onto the Console the STRING
# Hello, World
# Anything surrounded by quote marks (single or double) is a string.
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# DONE: (yes, that means for you to do things per instructions below)
#
# 1. Run this module by using the green arrow on the toolbar up top.
# Find the Console tab (to the right or below, depends on your setup)
# and confirm that Hello, World did indeed get printed.
#
# 2. Add another print statement below the current Line 1.
# It should print any string that you want (but keep it G-rated!)
# Test your code by re-running this module and looking at
# the Console to be sure that your string printed as expected.
#
# 3. Add one more print statement, just below the other one.
# This one should print the product of 3607 and 34227.
# Let the computer do the arithmetic for you (no calculators!).
# You do NOT have to use strings for this, so no quote marks!
# Test your code by re-running this module, then asking someone
# whom you trust: What number did your print display?
#
# 4. After you have completed the above (including testing),
# COMMIT your work (which turns it in) by selecting this file
# and doing SVN ~ Commit.
# ----------------------------------------------------------------------
|
#!/usr/bin/env python3
"""
gpio-monitor is a program that will wait and display GPIO status when it change. The goal is
learning how to manipulate gpio through python3.
We will use some basic python module to have more versatility (as argparse) but we will embeded
it on sub-module so it will not interfer with basic GPIO command.
"""
from gpio_monitor.cli import config as cfg, display as dsp
from gpio_monitor.gpio import GPIOSMonitoring
def run():
"""
Define run function that will be started
"""
print('run gpio-monitor')
config = cfg.Config()
print('Configuration {}'.format(config.display()))
channels = GPIOSMonitoring(config.config['gpio'])
display = dsp.Display()
display.draw()
if __name__ == '__main__':
run()
|
#Leo Li
#11/12/18
#the periodic table of elements
#Description: In this text-based periodic table of elements, users can view the whole table with properties, specifically the name of the element, the chemical symbol of the element, the molar mass of the element, and the atomic number of the element. It can also print out a specific element in the table if the user types either the name or the symbol of that element. Furthermore, the program can also print out the molar mass of a specific compound. However, the program cannot deal with compounds such as C12H22O11 or CO; in order to check the molar mass of CO, the user needs to type in C1O or C1O1.
#Honor Code: On my honor, I have neither given nor received any unauthorized aid. Leo Li
#Sources: https://www.dataquest.io/blog/pandas-python-tutorial/
#https://pandas.pydata.org/pandas-docs/stable/indexing.html
from elements import Element#import the Element class from the other file to store information of elements
import pandas as pd#import pandas, which allows me to import data in the csv file
ele = pd.read_csv("elements.csv")#reads the file using pandas
#Create the PeriodicTable class
class PeriodicTable:
def __init__(self):
self.all_elements = []#create a list that includes all the information of properties of all the elements in it
for i in range(1, 104):#add values of properties from the file into the lists
self.all_elements.append(Element(i,ele.index[i-1],(ele['Number'])[i-1],(ele['Symbol'])[i-1]))
#This method allows me to print the whole table in the form of a list. With the redefined str() function in the Element class, the properties are more readable for users.
def display_all_elements(self):
return self.all_elements
#This method prints out the properties of a single element
def display_single_element(self, single):
if isinstance(single, int) == True:#if the user input is an integer
return self.all_elements[single-1]#return the element in the list that corresponds to that integer
elif isinstance(single, str) == True:#if the userinput is a string
for i in range(1,104):#try to find a element that has the name or symbol that matches the userinput, and if there is one, print information about that element
if single == ele.index[i-1] or single == (ele['Number'])[i-1]:
return self.all_elements[i-1]
#This method prints out the molar mass of a compound
def display_atomic_weight(self, compound):
char = []#create a 2d list that stores the location and the character of a specific character. For example, for H2O, the first element in the list would be [0, H]
num = []#a 2d list that stores the location and the integer of a specific number in the input
weight = 0#declare weight
word = ''#declare word
for i in range(len(compound)):#for every single element in the string
try:#try to convert the element at the specific location into a integer
compound[i] = int(compound[i])
num.append([i,compound[i]])#if it works, the store this integer to the num list
except ValueError:#if it doesn't, store the information into the char list
char.append([i,compound[i]])
char.append([len(compound)+2, 0])#take another extra variable in the char list in order to check the last character digit in the user input
num.append([100, 0])#take an extra element into the num list to avoid index error
for i in range(len(char)-1):#check every single element in the char list
if char[i][0]-char[i+1][0] == -1:#if the location of the first element minus the location of the second element is -1, that means the two strings are together, so the program would consider it as a single element that consisits of two letters.
word = char[i][1] + char[i+1][1]#store the name of the element into the word variable
for j in range(len(num)-1):#check if there is a number following the two-letter element
if num[j][0] == char[i][0]+2:#if there is a nunmber following
for k in range(1,104):#check if there is a element's chemical symbol that matches with the word vairaible here
if word == (ele['Number'])[k-1]:#If there is a match
weight+=(ele['Symbol'])[k-1]*num[j][1]#bump the value of weight up by the molar mass of that element multiplied by the coefficient following that element
break#break out of the loop
else:
if k == 103:#if the whole thing looped and there isn't a match, the user has typed wrongly
return("\nOops...Seems you didn't enter the correct symbol of the element(notice for compounds like CO you are supposed to enter C1O1 or C1O instead of CO")
elif char[i][0]-char[i+1][0] == -3:#If the character is at the end of the word
word = char[i][1]#Take that single letter as an element
for m in range(len(num)-1):#check if the character is the last thing in the list
if len(char)-char[i][0]==1:
for q in range (1,104):#Look for the element that matches the word variable, and bump the value of weight up by the molar mass of that element
if word == (ele['Number'])[q-1]:
weight+=(ele['Symbol'])[q-1]
elif char[i][0]-char[i+1][0] == -2: #If two characters in the list has a distance of two, that means there is a number following the first element
word = char[i][1]#set the word equal to the first letter
for r in range(len(num)-1):
if num[r][0] == char[i][0]+1:#If there is a number following that letter
for l in range(1,104):#look for the element that matches the word variable, and bump the value of weight up by the molar mass of that element multiplied by the constant following it
if word == (ele['Number'])[l-1]:
weight+=(ele['Symbol'])[l-1]*num[r][1]
elif char[i][0]-char[i+1][0] == -4:#if two characters in the input has a distance of 4, that means there is a number following the last string in the list
word = char[i][1]#repeat the same thing as the previous one
for s in range(len(num)-1):
if len(char)-char[i][0]==1 and char[i][0]+1 == num[s][0]:
for t in range (1,104):
if word == (ele['Number'])[t-1]:
weight+=(ele['Symbol'])[t-1]*(num[s][1])
return weight #return the value of weight for printing
#the main function
def user_input():
while True:
x = input("\nThank you for using the text version of table of elements! To view the whole table of elements, please enter 'a'; to check specific information of an element, please enter either the name, symbol, or atomic number of the element; to check the atomic weight of a compound, please enter the formula of the chemical(However, if you want to check the atomic weight of a compound such as CO2, please type C1O2); to quit the program, please enter 'q'\n\n>>")#print out the instruction for the program
try:#if the userinput can be converted to an integer, that means the user has entered the atomic number of an element, run the display_single_element method with input x
x = int(x)
print(pt.display_single_element(x))
except ValueError:#if the user entered a string
if x == 'a':#if it is a, then print the whole table
print(pt.display_all_elements())
elif x == 'q':#if it is q, then quit the program
quit()
elif any(char.isdigit() for char in x) == True:#if there is a number in the string, then run the atomic weight program
x = list(x)
print(pt.display_atomic_weight(x))
else:#else the user has entered the name or the symbol of an element, check for the thing that matches
a = 0
for i in range(1,104):
if x == ele.index[i-1] or x == (ele['Number'])[i-1]:
print(pt.display_single_element(x))
if a == 103:#if the program has gone through the whole list and didn't find a match, then the user didn't type in the string correctly, show the error massage
print("\nOops... Seems like you didn't enter the correct symbol or name... Please try again(notice for compounds like CO you are supposed to enter C1O1 or C1O instead of CO)")
pt = PeriodicTable()#set pt equal to the class
user_input()#run the main function
|
# Generated by Django 2.2.1 on 2019-06-12 08:38
import bbs.save
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='昵称')),
('head_img', models.ImageField(default='head/head_img.jfif', storage=bbs.save.newStorage(), unique=True, upload_to='head/%Y%m/', verbose_name='头像')),
('phone', models.CharField(max_length=11, verbose_name='手机')),
('sex', models.CharField(choices=[('1', '男'), ('2', '女'), ('3', '保密')], default='3', max_length=2, verbose_name='性别')),
('intro', models.CharField(blank=True, max_length=200, null=True, verbose_name='个人介绍')),
('wechatimg', models.ImageField(storage=bbs.save.newStorage(), unique=True, upload_to='wechat/%Y%m/', verbose_name='微信二维码')),
('website', models.URLField(blank=True, null=True, verbose_name='个人网站')),
('like', models.PositiveIntegerField(default=0, verbose_name='关注')),
('fans', models.PositiveIntegerField(default=0, verbose_name='粉丝')),
('article', models.PositiveIntegerField(default=0, verbose_name='文章')),
('words', models.PositiveIntegerField(default=0, verbose_name='字数')),
('loved', models.PositiveIntegerField(default=0, verbose_name='收获喜欢')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
options={
'verbose_name': '用户信息',
'verbose_name_plural': '用户信息管理',
},
),
]
|
'''Módulo responsável por importar textos em formato .txts e exportá-los em outros formatos'''
import os.path
import re
import pandas as pd
from pandas import DataFrame
DIRETORIO_DOS_ARQUIVOS_PADRÃO = 'base_dados/'
NOME_ARQUIVO_PLANILHA_PADRÃO = 'referencia_russo.xlsx'
def abre_documento(nome_documento, nome_pasta = DIRETORIO_DOS_ARQUIVOS_PADRÃO):
''' Função que abre documentos txts em uma pasta específica e retorna uma única string
com todo o corpo do documento'''
# previne que o método interprete uma pastas como um documento válido
if not os.path.isfile(nome_pasta + nome_documento):
return []
with open(nome_pasta + nome_documento, mode="r", encoding="utf8") as text_file:
conteudo_completo_texto = text_file.read()
return conteudo_completo_texto
def imprime_planilha(texto, colunas, nome_documento = NOME_ARQUIVO_PLANILHA_PADRÃO):
''' Função recebe uma lista de linhas e uma lista com os nomes das colunas
e imprime uma planilha em formato .xlsx'''
df_documentos = pd.DataFrame(texto, columns=colunas)
df_documentos.to_excel(nome_documento)
|
import pickle
import numpy as np
from wiki_dataloader.wiki_dataloader import WikiDataLoader
from word_embedding.embedding import Embedding
from classification.classify import Classify
from classification.visualize import Visualize
# Load 1k articles for 5 wikipedia categories.
wiki_data_loader = WikiDataLoader(5)
# Tokenize, Compute co-occurance, Compact and Convert these articles into a matrices.
embedding = Embedding(wiki_data_loader.getFullCorpus)
embedding.create_prob_array()
# Generate the autoencoded version of the articles.
# embedding.init_auto_encoder(skip_training=True)
# articles_encoded = embedding.create_encoded_article_array()
articles_encoded = pickle.load(
open('./saved_states/pickles/articles_encoded.pickle', 'rb'))
# Generate a Dataset for classification.
X = np.array([article_encoded['data']
for article_encoded in articles_encoded])
y = np.array([article_encoded['category']
for article_encoded in articles_encoded])
# Initialize classifier.
classifier = Classify(X, y)
# Perform Logistic Regression.
y_pred_lr, results_lr = classifier.logistic_regression()
print("=== Logistic Regression ===")
print("Best Parameters from gridSearch:", results_lr['best_params_'])
print("Accuracy obtained on test set: {0:.2f}%".format(results_lr['accuracy'] * 100))
# Perform Random Forest.
y_pred_rf, results_rf = classifier.random_forest()
print("=== Random Forest ===")
print("Best Parameters from gridSearch:", results_rf['best_params_'])
print("Accuracy obtained on test set: {0:.2f}%".format(results_rf['accuracy'] * 100))
# Visualize the results on a confusion Matrix.
viz = Visualize()
viz.plot_confusion_matrix(classifier.y_test, y_pred_lr,
wiki_data_loader.categories, title="Logistic Regression RAW")
viz.plot_confusion_matrix(classifier.y_test, y_pred_rf,
wiki_data_loader.categories, title="Random Forest RAW")
|
from flask import Flask, request, render_template, session, redirect, url_for, flash, g
import db
import sys
import hashlib
import functools
import bcrypt
from models.UserModel import User
app = Flask(__name__)
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
def login_required(func):
@functools.wraps(func)
def wrapper_login_required(*args, **kwargs):
if 'user_id' not in session:
flash("Log in, please, to access your tasks", "is-danger")
return redirect(url_for("login"))
return func(*args, **kwargs)
return wrapper_login_required
@app.before_request
def before_request():
g.db = db.get_db()
if 'user_id' in session:
g.user = User.getUserById(session['user_id'])
@app.route("/")
def home():
if 'user_id' in session:
return render_template("home.html",name=g.user.name)
return render_template("home.html", name=None)
@app.route("/signup", methods=['GET', 'POST'])
def signup():
if request.method == 'POST':
username = request.form['username'].strip()
password = request.form['password']
if not db.userExists(username):
db.addUser(username, password)
session.pop('user_id', None)
user = User.getUserByName(username)
session['user_id'] = user.id
g.user = user
return redirect(url_for("tasks"))
flash("Username not avaliable", "is-warning")
return redirect(url_for("signup"))
return render_template("signup.html")
@app.route("/login", methods=['GET', 'POST'])
def login():
if request.method == 'POST':
session.pop('user_id', None)
username = request.form['username'].strip()
password = request.form['password']
loginUser = User.getUserByName(username)
if loginUser != None and loginUser.validate(username, password):
session['user_id'] = loginUser.id
g.user = loginUser
return redirect(url_for("tasks"))
flash("Incorrect credentials!", "is-warning")
return redirect(url_for("login"))
return render_template("login.html")
@app.route("/tasks", methods=['GET', 'POST'])
@login_required
def tasks():
if request.method == 'POST':
task = request.form['content']
g.user.addTask(task)
tasks = g.user.getTasks()
return render_template("tasks.html", name=g.user.name, tasks=tasks)
tasks = g.user.getTasks()
return render_template("tasks.html", name=g.user.name, tasks=tasks)
@app.route("/update/<int:id>", methods=['GET', 'POST'])
@login_required
def update(id):
task = g.user.getTask(id)
if request.method == 'POST':
updated_task = request.form['content']
g.user.updateTask(id, updated_task)
return redirect(url_for("tasks"))
return render_template('update.html',task=task[0])
@app.route("/delete/<int:id>")
@login_required
def delete(id):
g.user.deleteTask(id)
return redirect(url_for("tasks"))
@app.route("/logout")
def logout():
if 'user_id' in session:
flash("You've successfully logged out", "is-success")
session.pop('user_id', None)
g.user = None
return redirect(url_for("login"))
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0')
|
from sklearn.preprocessing.data import MinMaxScaler
import torch
from torch import nn
from torch.distributions.multivariate_normal import MultivariateNormal
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import RobustScaler
from model import *
def main():
batch_size = 256
y_train = np.genfromtxt('../labels.csv',delimiter=',', dtype=float)
x_train = np.genfromtxt('../free.csv', delimiter=',', dtype=float)
print(x_train.shape)
x_train = x_train.astype(float)
scaler = MinMaxScaler()
x_train = scaler.fit_transform(x_train)
# Parameters of the VAE
d = 4 # latent space
D = input_dim = x_train.shape[1]
activFunName = 'relu'
activations_list = {
'softplus': nn.Softplus(),
'tanh': nn.Tanh(),
'relu': nn.ReLU()
}
activFun = activations_list[activFunName]
H1 = 64
H2 = 128
lambda_reg = 1e-3 # For the weights of the networks
epoch = 100
initial = int(0.33 * epoch)
learning_rate = 1e-3
clipping_value = 1
train_loader = torch.utils.data.DataLoader(list(zip(x_train, y_train)), shuffle=True, batch_size=batch_size)
model = VAE_model(d, D, H1, H2, activFun)
optimizer_model = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=lambda_reg)
ELBO = np.zeros((epoch, 1))
for i in range(epoch):
# Initialize the losses
train_loss = 0
train_loss_num = 0
for batch_idx, (x, y) in enumerate(train_loader):
x,y=x.type(torch.FloatTensor),y.type(torch.FloatTensor)
# MU_X_eval, LOG_X_eval, Z_ENC_eval, MU_Z_eval, LOG_VAR_Z_eval = model(x)
# MU_X_eval, Z_ENC_eval, MU_Z_eval, LOG_VAR_Z_eval = model(x)
MU_X_eval, Z_ENC_eval, MU_Z_eval = model(x)
# Compute the regularization parameter
# if initial == 0:
# r = 0
# else:
# r = 1. * i / initial
# if r > 1.:
# r = 1.
# The VAE loss
# loss = model.VAE_loss(x=x, mu_x=MU_X_eval, log_var_x= LOG_X_eval, mu_z=MU_Z_eval, log_var_z=LOG_VAR_Z_eval, r=r)
# loss = model.VAE_loss(x=x, mu_x=MU_X_eval, mu_z=MU_Z_eval, log_var_z=LOG_VAR_Z_eval, r=r)
loss = model.VAE_loss(x=x, mu_x=MU_X_eval, r=1, scaler = scaler)
# Update the parameters
optimizer_model.zero_grad()
# Compute the loss
loss.backward()
# Update the parameters
optimizer_model.step()
# Collect the ways
train_loss += loss.item()
train_loss_num += 1
ELBO[i] = train_loss / train_loss_num
if i % 10 == 0:
print("[Epoch: {}/{}] [objective: {:.3f}]".format(i, epoch, ELBO[i, 0]))
ELBO_train = ELBO[epoch-1, 0].round(2)
print('[ELBO train: ' + str(ELBO_train) + ']')
del MU_X_eval, MU_Z_eval, Z_ENC_eval
# del LOG_VAR_X_eval, LOG_VAR_Z_eval
print("Training finished")
plt.figure()
plt.plot(ELBO)
plt.savefig(f'./results/train')
plt.show()
torch.save(model.state_dict(), './results/model_VE_minmax_1.pt')
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""
Scoring Functions
Here, several scoring functions are collected.
@author: Markus Meister
"""
#%% -- imports --
import numpy as np
import pandas as pd
#%% -- Scoring Functions --
# RMSE
def rmse(p, y,convertExpM1=False):
def f(x,y):
return np.sqrt(np.mean((x - y)**2))
return err_frame(y, p, f,convertExpM1)
# RMSLE
def rmsle(p, y,convertExpM1=False):
def f(p,y):
return np.sqrt(np.mean( ( np.log1p(p) - np.log1p(y) ) ** 2 ))
return err_frame(y, p, f,convertExpM1)
# MAD
def mad(y, p,convertExpM1=False):
def f(y,p):
return np.mean(np.abs(y - p))
return err_frame(y, p, f,convertExpM1)
def err_frame(y, p, f,convertExpM1=False):
y = arr_form(y)
p = arr_form(p)
if convertExpM1:
p = np.expm1(p),
y = np.expm1(y)
return f(y,p)
def arr_form(y):
if type(y).__name__ == 'Series':
y = y.values
if len(y.shape) < 2:
y = y[:,np.newaxis]
# fixing transposed data
yN,yD = y.shape
if yN < yD:
y = y.T
return y.squeeze()
|
import urllib3
def getNameNodes():
i = 0
res = {}
archivo = open('links.csv', 'rt')
for linea in archivo:
k = linea.replace(' ', '')
k = k.replace('\n', '')
if i > 0:
j = k.split('.')
if j[0] in res:
res[j[0]].append(k)
else:
res[j[0]] = [k]
i+=1
archivo.close()
return res
def getDataWeb(url):
http = urllib3.PoolManager()
r = http.request('GET', url)
r.status
return r.data
def makeArchivos(archivos):
base = 'elib.zib.de/pub/mp-testdata/tsp/tsplib/tsp/'
for k,v in archivos.items():
for e in v:
data = str(getDataWeb(base + e))
a =data.replace('\\n', ',')
#b =a.replace('\\', '')
j = a.split(',')
if len(e.split('.')) > 2:
#captura el optimo
f = open ('archivos/'+ k + '.opt'+'.txt','w')
for elem in j:
f.write(elem + '\n')
f.close()
else:
f = open ('archivos/'+ k +'.txt','w')
for elem in j:
f.write(elem + '\n')
f.close()
if __name__ == "__main__":
archivos = getNameNodes()
#print(archivos)
makeArchivos(archivos)
|
from io import StringIO
import matplotlib.pyplot as plt
import numpy as np
import requests
import pandas as pd
url = "http://news.northeastern.edu/interactive/2021/08/updated-covid-dashboard/datasets/covidupdate_testData.csv"
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}
r = requests.get(url, allow_redirects=True, headers=headers)
# print(r.content)
# print(r.text)
csv = pd.read_csv(StringIO(r.text), sep=",")
print(csv)
date = csv['Date']
positive = csv['Positive Tests']
date = np.array(date)
positive = np.array(positive)
def moving_average(x, w):
return np.convolve(x, np.ones(w), 'valid') / w
windowSize = 3
plt.figure()
plt.subplot(221)
plt.scatter(date, positive, label="Positive tests per-day")
plt.ylim(0, max(positive))
plt.plot(date[windowSize - 1:], moving_average(positive, windowSize), 'y', label=f"{windowSize}-day average")
plt.legend()
plt.title("Cases over time")
plt.subplot(222)
per100k = positive / np.array(csv['Tests Completed']) * 100000
plt.scatter(date, per100k, label='Positive per 100k')
plt.ylim(0, max(per100k))
plt.plot(date[windowSize - 1:], moving_average(per100k, windowSize), 'y', label=f"{windowSize}-day average")
plt.legend()
plt.title("Cases per 100k")
plt.subplot(224)
plt.plot(date[1:], csv['Beds In Use'][1:], label='Beds In Use')
plt.plot(date[1:], csv['Beds Not In Use'][1:], label='Beds Not In Use')
plt.legend()
plt.title("Bed Usage")
plt.show()
|
### This script is passed a smiles string ###
### searches ZINC using the url based ###
### features, downloads the resulting html ###
### and searches it for molecules. ###
import sys
import urllib2
import urllib
### input
### command: python <this_script> <filename> <tan_cutoff> <zinc_return_count> <output filename>
filename = sys.argv[1]
tan_cutoff = float(sys.argv[2]) #similarity threshold
zinc_return_count = int(sys.argv[3])
outputfilename = sys.argv[4]
print(filename,tan_cutoff,zinc_return_count)
file=open(filename, "r")
lines=file.readlines()
file.close()
SMILES = []
### performs a logic check for spaces vs. commas a delimiters, based on the first line
### being 2 seperate columns
if len(lines[0].split(' ')) == 2:
for line in lines:
linesplit=line.split(' ')
SMILES.append(linesplit[0].strip())
elif len(lines[0].split(',')) == 2:
for line in lines:
linesplit=line.split(',')
SMILES.append(linesplit[0].strip())
else:
print("Delimiters must be spaces or commas.")
file=open(outputfilename + ".dat" , "w")
for i in range(1,len(SMILES)):
print (i)
### collected info
matches = 0
best_tan = 0
### http://zinc15.docking.org/substances/?highlight=CC%28C%29Cc1ccccc1&ecfp4_fp-tanimoto=CC%28C%29Cc1ccccc1
pagename = "http://zinc15.docking.org/substances/?count=%d&ecfp4_fp-tanimoto-%f=%s" % (zinc_return_count,tan_cutoff,urllib.quote_plus(SMILES[i]))
print(pagename)
try:
page = urllib2.urlopen( pagename )
except urllib2.HTTPError, error:
print "empty", "empty", pagename
continue
j=0
while(j==0):
for line in page:
if '<div class="zinc-tile annotation small">' in line:
matches += 1
### if (best_tan == 0): #zinc rank orders starting with best tan, only need to save best one
if "/substances/ZINC" in line:
line3 = line.replace("/" , ".")
spl3 = line3.split(".")
if (spl3[2] == "/n"):
continue
zinc_id = spl3[2]
print zinc_id
if "<nobr>" in line:
### print line
### replace characters so we only have one character to split on
line2 = line.replace("<" , ">")
spl = line2.split(">")
if (spl[4] == "\n"):
continue
best_tan = spl[4]
print best_tan
if float(best_tan)>=tan_cutoff:
print matches, best_tan, zinc_id, i, pagename
file.write('%s,%s,%s \n'% (SMILES[i],zinc_id,best_tan))
if float(best_tan)<tan_cutoff:
j=1
print j
if j==1:
break
file.close()
|
import math
# Qt
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QPoint
from PyQt5.QtCore import QLine
from PyQt5.QtGui import QBrush
# API
from Arrow import *
from PipelineObjects import PipelineObject
class Pipeline:
def __init__(self, objects=[], dtype=float):
self.objects = objects
def __len__(self):
"""
Returns pipeline length
"""
return len(self.objects)
def __setitem__(self, index, item):
"""
Sets pipeline object
at given position
"""
self.objects[index] = item
def __getitem__(self, index):
"""
Returns pipeline object
at given position
"""
return self.objects[index]
def __delitem__(self, index):
"""
Removes/deletes desired item
and possible associated connection with child
"""
target = self.objects[index]
parent = target.getParent()
target.unconnect(target.getChildren())
if (target.getParent()):
target.getParent().unconnect([target])
del self.objects[index]
def __str__(self):
"""
Converts pipeline to readable string
"""
string = ""
for obj in self.objects[:-1]:
string += str(obj)
string += " | \n"
string += " | \n"
string += " v \n"
string += str(self.objects[-1])
return string
def append(self, obj):
"""
Appends object in pipeline
Adds connection with parent/new child
"""
self.objects.append(obj)
self.objects[-1].setId(len(self.objects)) # ID/counter
def connect(self, parent, child):
"""
Connects two objects in pipeline
"""
parent.connect(child)
def clear(self):
"""
Clears pipeline
destroys all pipeline objects
"""
self.objects = []
def prepend(self, obj):
"""
Inserts object in pipeline
in first position
"""
self.objects.insert(0, obj)
def insert(self, pos, obj):
"""
Inserts object in pipeline
at given position
"""
self.objects.insert(pos, obj)
def push(self, symbols):
"""
Pushes n symbols through
the pipeline
"""
for obj in self.objects:
obj.push(symbols)
def print(self):
for obj in self.objects:
print(obj)
def getCollapsedItems(self):
"""
Returns pipeline items
that are currently collapsed
"""
results = []
for obj in self.objects:
if obj.isCollapsed:
results.append(obj)
return results
def getLastObject(self):
"""
Returns pipeline exit point
"""
return self.objects[-1]
def getFirstObject(self):
"""
Returns pipeline entry point
"""
return self.objects[0]
def getEndObjects(self):
"""
Returns all devices with no children
"""
l = []
for obj in self.objects:
if not(obj.hasChildren()):
l.append(obj)
return l
def longestPath(self):
"""
Returns number of nodes
in longest path
"""
n = []
endNodes = self.getEndObjects()
for node in endNodes:
n.append(node.nObjectsAbove()+1)
return max(n)
def getHighlightedObjects(self, highlight):
"""
Returns currently highlighted
objects in pipeline
"""
objects = []
for obj in self.objects:
if obj.isHighlighted(highlight):
objects.append(obj)
return objects
def highlightAllObjects(self, highlight, exobj=None):
"""
Set highlight state for all objects in self
except optionnal object
"""
if exobj is None:
for obj in self.objects:
obj.setAsHighlighted(highlight)
else:
for obj in self.objects:
if obj != exobj:
obj.setAsHighlighted(highlight)
def latency(self):
"""
Returns total latency
in pipeline
"""
lat = 0
for obj in self.objects:
lat += obj.getLatency()
return lat
def canMerge(self, objects):
"""
Returns true if all given objects
can be merged:
+ item types must be compatible
+ items must be linked togheter
"""
for i in range(1, len(objects)):
if not(objects[i-1].canBeMerged(objects[i])):
return False
return self.itemsAreLinked(objects)
def itemsAreLinked(self, objects):
"""
Returns true if all given objects
are connected toghether
"""
revsd = objects[::-1]
for i in range(1, len(revsd)):
child = revsd[i].getChildren()
if not(revsd[i-1] in child):
return False
return True
def remove(self, objects):
"""
Removes given objects from pipeline
"""
# identify within pipeline
for i in range(0, len(objects)):
for obj in self.objects:
if (obj == objects[i]): # identified
index = self.objects.index(obj)
target = self.objects[index]
# unconnect from its parent
if target.hasParent():
target.getParent().unconnect([target])
# remove from pipeline
del self.objects[self.objects.index(obj)]
def merge(self, objects):
"""
Merges given objects toghether
"""
rootIndex = self.objects.index(objects[0])
root = self.objects[rootIndex]
root.merge(objects[-1])
# disconnect target from root
targetIndex = self.objects.index(objects[-1])
target = self.objects[targetIndex]
if target.hasParent():
target.getParent().unconnect([target])
# remove target from pipeline
del self.objects[targetIndex]
def adjustObjectsPosition(self):
"""
Adjusts Pipe Objects (x,y) positions
within pipeline for optimum look&feel
"""
for obj in self.objects:
x = 20 # (x,y) base
y = 20
if obj.hasParent():
# x position is based on number of parents
parent = obj.getParent()
while (parent):
x += parent.width()+30 # x margin
if (parent.hasParent()):
parent = parent.getParent()
else:
parent = None
# y position is based on parent number of child
parent = obj.getParent()
pchild = parent.getChildren()
for child in pchild:
if (child != obj): # not self
if (child < obj):
y += child.height()+20 # y margin
# textbox currently visible
# parent.textBoxDisplayed()
# parent.getInfoTextBox().width()
obj.setTextBoxDisplay(False) # will change in near future
obj.setX(x)
obj.setY(y)
obj.setWidth(50)
obj.setHeight(50)
def draw(self, painter):
"""
Draws pipeline object
ie., draws all pipeline objects
contained in self
adds an arrow between connected objects
"""
for obj in self.objects:
obj.draw(painter)
# draw connections between objects
painter.setBrush(QBrush(Qt.black))
for child in obj.getChildren():
# create arrow object
# between obj & its children
points = []
points.append(
QPoint(
obj.center().x() + obj.width()/2,
obj.center().y()
)
)
if (child.y() != obj.y()): # identation level
# use intermediate point
points.append(
QPoint(
child.x()-(child.x()-obj.x())/2, # half way
child.center().y()
)
)
points.append(
QPoint(
child.center().x() - child.width()/2,
child.center().y()
)
)
cnt = Arrow(points, fill=True, color=Qt.black)
cnt.draw(painter)
|
apikey="<--put your NCBI key here-->"
ncbo_key="<--put your NCBO key here-->"
|
from __future__ import print_function
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import io
import codecs
import os
import sys
import EcxAnx
import tests
here = os.path.abspath(os.path.dirname(__file__))
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name='EcxAnx',
version='1.0',
description='Econometrics and analytics for time series.',
license='MIT',
author='Luis Huesca Molina',
author_email='katakrask@gmail.com',
packages=['EcxAnx'],
install_requires=['numpy', 'pandas'],
)
|
from ast import literal_eval
import numpy as np
import matplotlib.pyplot as plt
import itertools, copy
def sizeof(z): return np.sqrt(z.real**2 + z.imag**2)
def map_(z, c=0): return (z)**2 + c
def to_complex(u): return np.complex(u[0], u[1])
def in_M(z):
c = copy.copy(z)
count = 0
while sizeof(z) <=2:
if count > 200: return True
z = map_(z, c)
count+=1
return False
def main():
grid_size = 100
# make a 400x400 grid that sits
# within the window [-2-2i, 2+2i]
axis = [4*float(i)/grid_size - 2 for i in xrange(grid_size)]
grid = [e for e in itertools.product(axis, axis)]
# we start with z_0 = 0 and C to be a point on the grid
# then z_1 = z_0^2 + C = C
# convert each point in the grid into a complex number
count, total = 0, len(grid)
for u in grid:
c = to_complex(u)
if count %500 == 0: print 100*float(count)/total
count+=1
if in_M(c):
plt.plot(u[0], u[1],'*k')
plt.axis([-3,2,-2,2])
plt.show()
def plot_data():
try:
f = open('data')
except Exception, e:
raise e
data = f.read()
if not f.closed:
f.close()
lines = data.split()
l = [literal_eval(e) for e in lines]
for u in l:
plt.plot(u[0],u[1],'*k')
plt.axis([-3,2,-2,2])
plt.show()
if __name__ == '__main__':
# main()
plot_data()
|
# encoding:utf-8
from __future__ import unicode_literals
from helpers.director.shortcut import page_dc
from helpers.director.engine import BaseEngine,page,fa,can_list,can_touch
from django.contrib.auth.models import User,Group
from helpers.func.collection.container import evalue_container
from helpers.maintenance.update_static_timestamp import js_stamp
from django.utils.translation import ugettext as _
from django.conf import settings
class CMSMenu(BaseEngine):
url_name='cms'
title='展链'
brand = '管理后台'
mini_brand='管理'
need_login = False
@property
def menu(self):
crt_user = self.request.user
menu=[
{'label':_('DashBoard'),'url':page('home'),'icon':fa('fa-home'), 'visible':True},
#{'label':_('Marketing'),'icon':fa('fa-image'), 'visible': True,
#'submenu':[
#{'label':_('Banner'),'url':page('TbBanner'), 'visible': can_touch(TbBanner, crt_user) },
#{'label':_('App Package'),'url':page('maindb.TbAppversion'), 'visible': can_touch(TbAppversion, crt_user),},
#{'label':_('Notice'),'url':page('maindb.TbNotice'), 'visible': can_touch(TbNotice, crt_user),},
#{'label':_('Currency'),'url':page('maindb.TbCurrency'), 'visible': can_touch(TbCurrency, crt_user)},
#{'label':_('Help'),'url':page('maindb.TbQa'), 'visible': can_touch(TbQa, crt_user),},
#{'label':_('Activity'),'url':page('maindb.TBActive'), 'visible': can_touch(TbActivity, crt_user),},
#{'label':_('AppResource'),'url':page('AppResource'), 'visible': can_touch(TbAppresource, crt_user),},
#]},
#{'label':_('User'),'icon':fa('fa-user'),'visible':True,
#'submenu':[
#{'label':_('User'),'url':page('jb_user'),'visible':can_touch(User, crt_user)},
#{'label':_('权限组'),'url':page('jb_group'),'visible':can_touch(Group, crt_user)},
##{'label':'权限分组','url':page('group_human'),'visible':can_touch(Group)},
#]},
]
return menu
CMSMenu.add_pages(page_dc)
|
import pygame
import global_variables as gv
btn_x_left_ratio = 1 / 50
btn_x_right_ratio = 52 / 50
btn_y_top_ratio = 4 / 50
btn_y_bottom_ratio = 51 / 50
class TitleScreenView:
def __init__(self, window):
pygame.font.init()
self.__display = window.display
self.__surface = window.surface
self.__title_font = pygame.font.Font(gv.TITLE_FONT, gv.TITLE_TEXT_SIZE)
self.__title_size = self.title_font.size(gv.TITLE_TEXT)
title_x_placement = round(gv.WINDOW_W / 2 - self.__title_size[0] / 2)
title_y_placement = round(gv.WINDOW_L * 6 / 30)
self.__author_font = pygame.font.Font(gv.AUTHOR_FONT, gv.AUTHOR_TEXT_SIZE)
self.__author_size = self.author_font.size(gv.AUTHOR_TEXT)
author_x_placement = round(gv.WINDOW_W / 2 - self.__author_size[0] / 2)
author_y_placement = round(gv.WINDOW_L * 10 / 30)
# Button text font and placement positions
self.__btn_font = pygame.font.Font(gv.BUTTON_FONT, gv.BUTTON_TEXT_SIZE)
self.__p1_start_btn_size = self.__btn_font.size(gv.BUTTON_TEXTS[0])
p1_btn_x_placement = round(gv.WINDOW_W / 2 - self.__p1_start_btn_size[0] / 2)
p1_btn_y_placement = round(gv.WINDOW_L * 15 / 30)
self.__p2_start_btn_size = self.__btn_font.size(gv.BUTTON_TEXTS[1])
p2_btn_x_placement = round(gv.WINDOW_W / 2 - self.__p2_start_btn_size[0] / 2)
p2_btn_y_placement = round(gv.WINDOW_L * 18 / 30)
self.__options_btn_size = self.__btn_font.size(gv.BUTTON_TEXTS[2])
options_btn_x_placement = round(gv.WINDOW_W / 2 - self.__options_btn_size[0] / 2)
options_btn_y_placement = round(gv.WINDOW_L * 21 / 30)
self.__exit_btn_size = self.__btn_font.size(gv.BUTTON_TEXTS[3])
exit_btn_x_placement = round(gv.WINDOW_W / 2 - self.__exit_btn_size[0] / 2)
exit_btn_y_placement = round(gv.WINDOW_L * 24 / 30)
# holds x and y positions of the named variables
self.__title_pos = (title_x_placement, title_y_placement)
self.__author_pos = (author_x_placement, author_y_placement)
self.__p1_start_pos = (p1_btn_x_placement, p1_btn_y_placement)
self.__p2_start_pos = (p2_btn_x_placement, p2_btn_y_placement)
self.__options_pos = (options_btn_x_placement, options_btn_y_placement)
self.__exit_pos = (exit_btn_x_placement, exit_btn_y_placement)
# button position and sizes
self.__p1_btn_pos_size = (self.p1_start_pos[0] - round(self.p1_start_btn_size[0] * btn_x_left_ratio),
self.p1_start_pos[1] - round(self.p1_start_btn_size[1] * btn_y_top_ratio),
round(self.p1_start_btn_size[0] * btn_x_right_ratio),
round(self.p1_start_btn_size[1] * btn_y_bottom_ratio))
self.__p2_btn_pos_size = (self.p2_start_pos[0] - round(self.p2_start_btn_size[0] * btn_x_left_ratio),
self.p2_start_pos[1] - round(self.p2_start_btn_size[1] * btn_y_top_ratio),
round(self.p2_start_btn_size[0] * btn_x_right_ratio),
round(self.p2_start_btn_size[1] * btn_y_bottom_ratio))
self.__options_btn_pos_size = (self.options_pos[0] - round(self.options_btn_size[0] * btn_x_left_ratio),
self.options_pos[1] - round(self.options_btn_size[1] * btn_y_top_ratio),
round(self.options_btn_size[0] * btn_x_right_ratio),
round(self.options_btn_size[1] * btn_y_bottom_ratio))
self.__exit_btn_pos_size = (self.exit_pos[0] - round(self.exit_btn_size[0] * btn_x_left_ratio),
self.exit_pos[1] - round(self.exit_btn_size[1] * btn_y_top_ratio),
round(self.exit_btn_size[0] * btn_x_right_ratio),
round(self.exit_btn_size[1] * btn_y_bottom_ratio))
self.__btn_colors = [gv.ORANGE, gv.ORANGE, gv.ORANGE, gv.ORANGE]
""" METHODS """
def show_screen(self, btn_hover, btn_press):
self.surface.fill(gv.TANISH_YELLOW)
for i, item in enumerate(self.btn_colors):
self.btn_colors[i] = gv.ORANGE
if btn_hover != -1:
self.btn_colors[btn_hover] = gv.MID_DARK_PEACH
if btn_press:
self.btn_colors[btn_hover] = gv.YELLOW
text = self.title_font.render(gv.TITLE_TEXT, True, gv.RED)
self.surface.blit(text, self.title_pos)
text = self.author_font.render(gv.AUTHOR_TEXT, True, gv.DARK_RED)
self.surface.blit(text, self.author_pos)
pygame.draw.rect(self.surface, self.btn_colors[0], self.p1_btn_pos_size)
text = self.btn_font.render(gv.BUTTON_TEXTS[0], True, gv.BLACK)
self.surface.blit(text, self.p1_start_pos)
pygame.draw.rect(self.surface, self.btn_colors[1], self.p2_btn_pos_size)
text = self.btn_font.render(gv.BUTTON_TEXTS[1], True, gv.BLACK)
self.surface.blit(text, self.p2_start_pos)
pygame.draw.rect(self.surface, self.btn_colors[2], self.options_btn_pos_size)
text = self.btn_font.render(gv.BUTTON_TEXTS[2], True, gv.BLACK)
self.surface.blit(text, self.options_pos)
pygame.draw.rect(self.surface, self.btn_colors[3], self.exit_btn_pos_size)
text = self.btn_font.render(gv.BUTTON_TEXTS[3], True, gv.BLACK)
self.surface.blit(text, self.exit_pos)
self.display.update()
""" GETTERS """
@property
def display(self):
return self.__display
@property
def surface(self):
return self.__surface
@property
def title_font(self):
return self.__title_font
@property
def title_pos(self):
return self.__title_pos
@property
def author_font(self):
return self.__author_font
@property
def author_pos(self):
return self.__author_pos
@property
def btn_font(self):
return self.__btn_font
@property
def p1_start_pos(self):
return self.__p1_start_pos
@property
def p2_start_pos(self):
return self.__p2_start_pos
@property
def options_pos(self):
return self.__options_pos
@property
def exit_pos(self):
return self.__exit_pos
@property
def p1_start_btn_size(self):
return self.__p1_start_btn_size
@property
def p2_start_btn_size(self):
return self.__p2_start_btn_size
@property
def options_btn_size(self):
return self.__options_btn_size
@property
def exit_btn_size(self):
return self.__exit_btn_size
@property
def p1_btn_pos_size(self):
return self.__p1_btn_pos_size
@property
def p2_btn_pos_size(self):
return self.__p2_btn_pos_size
@property
def options_btn_pos_size(self):
return self.__options_btn_pos_size
@property
def exit_btn_pos_size(self):
return self.__exit_btn_pos_size
@property
def btn_colors(self):
return self.__btn_colors
""" SETTERS """
@btn_colors.setter
def btn_colors(self, btn_colors):
self.__btn_colors = btn_colors
|
import zope.interface
from buddydemo.interfaces import IPostalInfo, IPostalLookup
class Info:
zope.interface.implements(IPostalInfo)
def __init__(self,city,state):
self.city, self.state = city, state
class Lookup:
zope.interface.implements(IPostalLookup)
_data = {
'12345': ('Piliyandala', 'Western'),
'12356': ('Galle', 'Southern'),
'12367': ('Baticalo', 'Estern'),
}
def lookup(self,postal_code):
data = self._data.get(postal_code)
if(data):
return Info(*data)
|
def solution(n, k):
# 10진수 일 때는 바로 진행
if k == 10:
# count('')
Pn = str(n).split('0')
for _ in range(Pn.count('')):
Pn.remove('')
else:
# k진수일 때 자리수 구하기
length = 0
k_str = []
while k ** length <= n:
length += 1
k_str.append('0')
# n을 k 진수로 표현
for i in reversed(range(length)):
if k ** i <= n and k ** (i + 1) > n:
k_str[i] = str(n // k ** i)
n = n % k ** i
k_str.reverse()
# 0을 구분하여 split('0')
Pn = ''.join(k_str).split('0')
for _ in range(Pn.count('')):
Pn.remove('')
# 저장된 각 수가 소수인지 판단 (약수가 1가 자기 자신인지)
answer = 0
for i in Pn:
count = 0
if i == '1':
count += 1
for j in range(2, int((int(i) ** 0.5)) + 1):
if int(i) % j == 0:
count += 1
if count == 0:
answer += 1
return answer
|
# -*- coding: utf-8 -*-"""
"""
This module registers images to the database, The CLI to this operation has
been depricated.
"""
import argparse, argcomplete
import os
import os.path as osp
import sys
from .lamplight import image_info, save_images
from . import model as mod
from .utility import commit_resource, sign_path, TemporaryDirectory, get_resource
########################################
# IMAGES
########################################
def commit_register_image_data(src_image):
with TemporaryDirectory() as tmpdir:
[filename] = save_images(tmpdir, 'tmp', tmp_=src_image)
label = commit_resource(filename)
return _register_image_file(label)
def commit_register_image_file(filename):
label = commit_resource(filename)
return _register_image_file(label)
@mod.check_tables(mod.Image)
def _register_image_file(label):
image_type, name, dst_data = image_info(get_resource(label))
(h, w, _d) = dst_data.shape
return {'label':label, 'height':h, 'width':w, 'img_type':image_type}
def cli_interface(arguments):
filename = arguments.image_filename
_ = commit_register_image_file(filename)
def generate_parser(parser):
parser.add_argument('image_filename', type=str,
help="Image filename to register")
parser.set_defaults(func=cli_interface)
return parser
|
from OpenGL.GL import *
from glew_wish import *
import glfw
from math import *
import random
class Piso:
posicionX=0
posicionY=0
def dibujar(self):
glPushMatrix()
glTranslate(self.posicionX, self.posicionY, 0.0)
glBegin(GL_POLYGON)
glColor3f(0.0941, 0, 0.27058)
glVertex(-1,-1,0.0)
glVertex(1,-1,0.0)
glVertex(1,-.7,0.0)
glVertex(-1,-.7,0.0)
glEnd()
glPopMatrix()
|
###
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from ....lib import *
from ..util import *
spec = load_test_spec("vpp", "scale")
@slash.requires(have_gst)
@slash.requires(*have_gst_element("msdk"))
@slash.requires(*have_gst_element("msdkvpp"))
@slash.requires(*have_gst_element("checksumsink2"))
@slash.requires(using_compatible_driver)
@slash.parametrize(*gen_vpp_scale_parameters(spec))
@platform_tags(VPP_PLATFORMS)
def test_default(case, scale_width, scale_height):
params = spec[case].copy()
params.update(
mformat = mapformat(params["format"]),
scale_width = scale_width, scale_height = scale_height)
params["scaled"] = get_media()._test_artifact(
"{}_scaled_{scale_width}x{scale_height}_{format}"
".yuv".format(case, **params))
call(
"gst-launch-1.0 -vf filesrc location={source} num-buffers={frames}"
" ! rawvideoparse format={mformat} width={width} height={height}"
" ! msdkvpp hardware=true scaling-mode=1 ! video/x-raw,format=NV12"
" ! videoconvert"
" ! video/x-raw,width={scale_width},height={scale_height},format={format}"
" ! checksumsink2 file-checksum=false frame-checksum=false"
" plane-checksum=false dump-output=true dump-location={scaled}"
"".format(**params))
check_filesize(
params["scaled"], params["scale_width"], params["scale_height"],
params["frames"], params["format"])
fmtref = format_value(params["reference"], case = case, **params)
ssim = calculate_ssim(
fmtref, params["scaled"],
params["scale_width"], params["scale_height"],
params["frames"], params["format"])
get_media()._set_test_details(ssim = ssim)
assert 1.0 >= ssim[0] >= 0.97
assert 1.0 >= ssim[1] >= 0.97
assert 1.0 >= ssim[2] >= 0.97
|
# $ fab git_pull -f env.py
# $ fab git_pull:repo=dotfiles -f env.py
from fabric.api import *
from fabric.contrib.console import confirm
# It is important to know that these command-line switches are interpreted before
# your fabfile is loaded: any reassignment to env.hosts or env.roles in your
# fabfile will overwrite them.
# If you wish to nondestructively merge the command-line hosts with your fabfile-defined ones
# env.hosts.extend(['host1', host2])
env.hosts = ['10.16.66.180', 'xikangjie@10.16.66.181']
env.user = 'xikangjie'
def git_pull(repo='demo'):
repo_dir = '/home/%s/github/%s' % (env.user, repo)
print repo_dir
with cd(repo_dir):
# warn_only specifies to warn, instead of abort.
with settings(warn_only=True):
result = run('git pull')
if result.failed and not confirm('git pull failed, continue anyway?'):
abort('Aborting at user request.')
"""
$ fab git_pull:repo=dotfiles -f env.py -u xikangjie
[10.16.66.180] Executing task 'git_pull'
/home/xikangjie/github/dotfiles
[10.16.66.180] run: git pull
[10.16.66.180] Login password for 'xikangjie':
[10.16.66.180] out: Updating c8d51c4..1e21725
[10.16.66.180] out: error: Your local changes to 'tmux/tmux.conf' would be overwritten by merge. Aborting.
[10.16.66.180] out: Please, commit your changes or stash them before you can merge.
[10.16.66.180] out:
Warning: run() received nonzero return code 1 while executing 'git pull'!
git pull failed, continue anyway? [Y/n] y
[10.16.66.181] Executing task 'git_pull'
/home/xikangjie/github/dotfiles
[10.16.66.181] run: git pull
[10.16.66.181] out: Already up-to-date.
[10.16.66.181] out:
Done.
Disconnecting from 10.16.66.181... done.
Disconnecting from 10.16.66.180... done.
"""
|
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.template import RequestContext
from italy.models import Regioni, Provincie, Comuni
def index(request):
regioni_list = Regioni.objects.all().order_by('name')
return render_to_response('italy/index.html', {'regioni_list': regioni_list})
def detail_regione(request,regione_name):
regione= Regioni.objects.all().filter(slug=regione_name).values('codice_regione_istat')
regione_istat= regione[0]['codice_regione_istat']
prov_list= Provincie.objects.all().order_by('name').filter(codice_regione_istat=regione_istat)
return render_to_response('italy/detail_regione.html', {'prov_list': prov_list})
|
"""
This type stub file was generated by pyright.
"""
import os
from __future__ import absolute_import
from datetime import datetime, tzinfo
from contextlib import contextmanager
from typing import Callable, Dict, Generator, Iterator, List, Optional, TypeVar, Union
from babel.core import Locale
from babel.support import NullTranslations, Translations
from flask import current_app, request
from flask.app import Flask
from flask.ctx import has_request_context
from flask.helpers import locked_cached_property
from babel import Locale, dates, numbers, support
from pytz import UTC, timezone
from werkzeug.datastructures import ImmutableDict
from flask_babel.speaklater import LazyString
"""
flaskext.babel
~~~~~~~~~~~~~~
Implements i18n/l10n support for Flask applications based on Babel.
:copyright: (c) 2013 by Armin Ronacher, Daniel Neuhäuser.
:license: BSD, see LICENSE for more details.
"""
RT = TypeVar("RT")
class Babel(object):
"""Central controller class that can be used to configure how
Flask-Babel behaves. Each application that wants to use Flask-Babel
has to create, or run :meth:`init_app` on, an instance of this class
after the configuration was initialized.
"""
default_date_formats: Dict[str, Union[str, None]] = ...
date_formats: Dict[str, Union[str, None]]
def __init__(
self,
app: Flask = ...,
default_locale: str = ...,
default_timezone: str = ...,
default_domain: str = ...,
date_formats: Dict[str, Union[str, None]] = ...,
configure_jinja: bool = ...,
) -> None:
...
def init_app(self, app: Flask) -> None:
"""Set up this instance for use with *app*, if no app was passed to
the constructor.
"""
...
def localeselector(self, f: Callable[..., RT]) -> Callable[..., RT]:
"""Registers a callback function for locale selection. The default
behaves as if a function was registered that returns `None` all the
time. If `None` is returned, the locale falls back to the one from
the configuration.
This has to return the locale as string (eg: ``'de_AT'``, ``'en_US'``)
"""
...
def timezoneselector(self, f: Callable[..., RT]) -> Callable[..., RT]:
"""Registers a callback function for timezone selection. The default
behaves as if a function was registered that returns `None` all the
time. If `None` is returned, the timezone falls back to the one from
the configuration.
This has to return the timezone as string (eg: ``'Europe/Vienna'``)
"""
...
def list_translations(self) -> List[Locale]:
"""Returns a list of all the locales translations exist for. The
list returned will be filled with actual locale objects and not just
strings.
.. versionadded:: 0.6
"""
...
@property
def default_locale(self) -> Locale:
"""The default locale from the configuration as instance of a
`babel.Locale` object.
"""
...
@property
def default_timezone(self) -> tzinfo:
"""The default timezone from the configuration as instance of a
`pytz.timezone` object.
"""
...
@property
def domain(self) -> str:
"""The message domain for the translations as a string."""
...
@locked_cached_property
def domain_instance(self) -> Domain:
"""The message domain for the translations."""
...
@property
def translation_directories(self) -> Iterator[str]:
...
def get_translations() -> Union[Translations, NullTranslations]:
"""Returns the correct gettext translations that should be used for
this request. This will never fail and return a dummy translation
object if used outside of the request or if a translation cannot be
found.
"""
...
def get_locale() -> Optional[Locale]:
"""Returns the locale that should be used for this request as
`babel.Locale` object. This returns `None` if used outside of
a request.
"""
...
def get_timezone() -> Optional[tzinfo]:
"""Returns the timezone that should be used for this request as
`pytz.timezone` object. This returns `None` if used outside of
a request.
"""
...
def refresh() -> None:
"""Refreshes the cached timezones and locale information. This can
be used to switch a translation between a request and if you want
the changes to take place immediately, not just with the next request::
user.timezone = request.form['timezone']
user.locale = request.form['locale']
refresh()
flash(gettext('Language was changed'))
Without that refresh, the :func:`~flask.flash` function would probably
return English text and a now German page.
"""
...
@contextmanager
def force_locale(locale: Union[str, Locale]) -> Generator[None, None, None]:
"""Temporarily overrides the currently selected locale.
Sometimes it is useful to switch the current locale to different one, do
some tasks and then revert back to the original one. For example, if the
user uses German on the web site, but you want to send them an email in
English, you can use this function as a context manager::
with force_locale('en_US'):
send_email(gettext('Hello!'), ...)
:param locale: The locale to temporary switch to (ex: 'en_US').
"""
...
def to_user_timezone(datetime):
"""Convert a datetime object to the user's timezone. This automatically
happens on all date formatting unless rebasing is disabled. If you need
to convert a :class:`datetime.datetime` object at any time to the user's
timezone (as returned by :func:`get_timezone` this function can be used).
"""
...
def to_utc(datetime):
"""Convert a datetime object to UTC and drop tzinfo. This is the
opposite operation to :func:`to_user_timezone`.
"""
...
def format_datetime(datetime=..., format=..., rebase=...):
"""Return a date formatted according to the given pattern. If no
:class:`~datetime.datetime` object is passed, the current time is
assumed. By default rebasing happens which causes the object to
be converted to the users's timezone (as returned by
:func:`to_user_timezone`). This function formats both date and
time.
The format parameter can either be ``'short'``, ``'medium'``,
``'long'`` or ``'full'`` (in which cause the language's default for
that setting is used, or the default from the :attr:`Babel.date_formats`
mapping is used) or a format string as documented by Babel.
This function is also available in the template context as filter
named `datetimeformat`.
"""
...
def format_date(date=..., format=..., rebase=...):
"""Return a date formatted according to the given pattern. If no
:class:`~datetime.datetime` or :class:`~datetime.date` object is passed,
the current time is assumed. By default rebasing happens which causes
the object to be converted to the users's timezone (as returned by
:func:`to_user_timezone`). This function only formats the date part
of a :class:`~datetime.datetime` object.
The format parameter can either be ``'short'``, ``'medium'``,
``'long'`` or ``'full'`` (in which cause the language's default for
that setting is used, or the default from the :attr:`Babel.date_formats`
mapping is used) or a format string as documented by Babel.
This function is also available in the template context as filter
named `dateformat`.
"""
...
def format_time(time=..., format=..., rebase=...):
"""Return a time formatted according to the given pattern. If no
:class:`~datetime.datetime` object is passed, the current time is
assumed. By default rebasing happens which causes the object to
be converted to the users's timezone (as returned by
:func:`to_user_timezone`). This function formats both date and
time.
The format parameter can either be ``'short'``, ``'medium'``,
``'long'`` or ``'full'`` (in which cause the language's default for
that setting is used, or the default from the :attr:`Babel.date_formats`
mapping is used) or a format string as documented by Babel.
This function is also available in the template context as filter
named `timeformat`.
"""
...
def format_timedelta(
datetime_or_timedelta, granularity=..., add_direction=..., threshold=...
):
"""Format the elapsed time from the given date to now or the given
timedelta.
This function is also available in the template context as filter
named `timedeltaformat`.
"""
...
def format_number(number):
"""Return the given number formatted for the locale in request
:param number: the number to format
:return: the formatted number
:rtype: unicode
"""
...
def format_decimal(number, format=...):
"""Return the given decimal number formatted for the locale in request
:param number: the number to format
:param format: the format to use
:return: the formatted number
:rtype: unicode
"""
...
def format_currency(number, currency, format=..., currency_digits=..., format_type=...):
"""Return the given number formatted for the locale in request
:param number: the number to format
:param currency: the currency code
:param format: the format to use
:param currency_digits: use the currency’s number of decimal digits
[default: True]
:param format_type: the currency format type to use
[default: standard]
:return: the formatted number
:rtype: unicode
"""
...
def format_percent(number, format=...):
"""Return formatted percent value for the locale in request
:param number: the number to format
:param format: the format to use
:return: the formatted percent number
:rtype: unicode
"""
...
def format_scientific(number, format=...):
"""Return value formatted in scientific notation for the locale in request
:param number: the number to format
:param format: the format to use
:return: the formatted percent number
:rtype: unicode
"""
...
class Domain(object):
"""Localization domain. By default will use look for tranlations in Flask
application directory and "messages" domain - all message catalogs should
be called ``messages.mo``.
"""
def __init__(self, translation_directories=..., domain=...) -> None:
...
def __repr__(self):
...
@property
def translation_directories(self):
...
def as_default(self):
"""Set this domain as default for the current request"""
...
def get_translations_cache(self, ctx):
"""Returns dictionary-like object for translation caching"""
...
def get_translations(self):
...
def gettext(self, string, **variables):
"""Translates a string with the current locale and passes in the
given keyword arguments as mapping to a string formatting string.
::
gettext(u'Hello World!')
gettext(u'Hello %(name)s!', name='World')
"""
...
def ngettext(self, singular, plural, num, **variables):
"""Translates a string with the current locale and passes in the
given keyword arguments as mapping to a string formatting string.
The `num` parameter is used to dispatch between singular and various
plural forms of the message. It is available in the format string
as ``%(num)d`` or ``%(num)s``. The source language should be
English or a similar language which only has one plural form.
::
ngettext(u'%(num)d Apple', u'%(num)d Apples', num=len(apples))
"""
...
def pgettext(self, context, string, **variables):
"""Like :func:`gettext` but with a context.
.. versionadded:: 0.7
"""
...
def npgettext(self, context, singular, plural, num, **variables):
"""Like :func:`ngettext` but with a context.
.. versionadded:: 0.7
"""
...
def lazy_gettext(self, string, **variables):
"""Like :func:`gettext` but the string returned is lazy which means
it will be translated when it is used as an actual string.
Example::
hello = lazy_gettext(u'Hello World')
@app.route('/')
def index():
return unicode(hello)
"""
...
def lazy_ngettext(self, singular, plural, num, **variables):
"""Like :func:`ngettext` but the string returned is lazy which means
it will be translated when it is used as an actual string.
Example::
apples = lazy_ngettext(u'%(num)d Apple', u'%(num)d Apples', num=len(apples))
@app.route('/')
def index():
return unicode(apples)
"""
...
def lazy_pgettext(self, context, string, **variables):
"""Like :func:`pgettext` but the string returned is lazy which means
it will be translated when it is used as an actual string.
.. versionadded:: 0.7
"""
...
def get_domain():
...
def gettext(*args, **kwargs) -> str:
...
_ = gettext
def ngettext(*args, **kwargs):
...
def pgettext(*args, **kwargs):
...
def npgettext(*args, **kwargs):
...
def lazy_gettext(*args, **kwargs):
...
def lazy_pgettext(*args, **kwargs):
...
def lazy_ngettext(*args, **kwargs):
...
|
'''
Created on Jan 9, 2017
@author: Steinert Robotics
'''
import argparse
import cv2
import imutils
from collections import deque
import numpy as np
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
cap = cv2.VideoCapture(0)
# cap.open("http://10.21.80.11/")
lower_green = (29, 86, 6)
upper_green = (64, 255, 255)
pts = deque(maxlen=args["buffer"])
count = 1
while True:
_, frame = cap.read()
frame = imutils.resize(frame, width=600)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_green, upper_green)
mask = cv2.erode(mask, None, iterations = 2)
mask = cv2.dilate(mask, None, iterations = 2)
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
if len(contours) > 0:
c = max(contours, key = cv2.contourArea)
x,y,w,h = cv2.boundingRect(c)
M = cv2.moments(c)
if (M["m00"] == 0):
M["m00"] = 1
center = int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])
if w>20:
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2)
cv2.circle(frame, center, 5, (255,0,0), -1)
pts.appendleft(center)
for i in xrange(1, len(pts)):
if pts[i-1] is None or pts[i] is None:
continue
thickness = int(np.sqrt(args["buffer"] / float(i+1)) * 0.25)
cv2.line(frame, pts[i-1], pts[i], (255,0,255), thickness)
cv2.putText(frame, str(center) + ": ", (30, 400), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 1)
print(str(center) + ": " + str(count))
cv2.namedWindow("Frame", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("Frame", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.imshow("Frame", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
count += 1
cap.release()
cv2.destroyAllWindows()
|
import json
from datetime import date, timedelta
from decimal import *
from math import ceil
from typing import List
import isodate
import pdfkit
import requests
from jinja2 import Environment, FileSystemLoader, select_autoescape
from slugify import slugify
templates_env = Environment(
loader=FileSystemLoader('templates'),
autoescape=select_autoescape(['html']),
)
def format_invoice_num(n: int) -> str:
s = '{:0>6}'.format(n)
return f'{s[0:3]} {s[3:6]}'
def increment_invoice_num() -> str:
with open('next_invoice_num.json') as f:
current = json.load(f)
with open('next_invoice_num.json', 'w') as f:
json.dump(current + 1, f)
return format_invoice_num(current)
def get_currency_symbol(currency_code: str) -> str:
if currency_code == 'gbp':
return '£'
elif currency_code == 'usd':
return '$'
else:
raise
def format_date(d: date) -> str:
return d.strftime('%b %-d, %Y')
def md_to_html(line: str) -> str:
if line.startswith('**') and line.endswith('**'):
return f'<strong>{line[2:-2]}</strong>'
elif line.startswith('_') and line.endswith('_'):
return f'<em>{line[1:-1]}</em>'
else:
return line
def format_money(d: Decimal, currency_code: str = 'usd') -> str:
return get_currency_symbol(currency_code) + '{:,.2f}'.format(d)
templates_env.filters['cursym'] = get_currency_symbol
templates_env.filters['date'] = format_date
templates_env.filters['md'] = md_to_html
templates_env.filters['money'] = format_money
with open('profile.json') as profile_file:
profile = json.load(profile_file)
def clockify_get(url: str, params=None, **kwargs):
workspace_id = profile['clockify']['workspace_id']
url = f'https://api.clockify.me/api/v1/workspaces/{workspace_id}/{url}'
kwargs.update({'headers': {'x-api-key': profile['clockify']['api_key']}})
return requests.get(url, params=params, **kwargs).json()
def clockify_user_get(url: str, params=None, **kwargs):
user_id = profile['clockify']['user_id']
return clockify_get(f'user/{user_id}/{url}', params=params, **kwargs)
def wise_get(url: str, params=None, **kwargs):
kwargs.update({'headers': {'authorization': 'bearer ' + profile['wise']['api_key']}})
return requests.get('https://api.transferwise.com/v1/' + url, params=params, **kwargs).json()
def get_exchange_rate_from_usd(to: str) -> Decimal:
r = wise_get('rates', {'source': 'usd', 'target': to})
return Decimal(str(r[0]['rate']))
class WorkItem:
rounded_hours = None
total = None
def __init__(self, project: str, description: str, rate: Decimal, hours: float):
self.project = project
self.description = description
self.rate = rate
self.hours = hours
def get_rounded_hours(self, time_step: int) -> Decimal:
"""
Gets this work item's hours rounded up to the nearest given time step.
:param time_step: The increment in minutes
:return: The number of hours
"""
minutes = self.hours * 60
return Decimal(ceil(minutes / time_step) * time_step) / 60
def get_total(self, time_step: int) -> Decimal:
"""
Gets the amount of money to bill given this work item's hours,
rounded up to the nearest given time step, and the rate.
:param time_step: The increment in minutes
:return: The amount of money to bill in the same currency as the rate
"""
return self.rate * self.get_rounded_hours(time_step)
def set_rounded_hours(self, time_step: int):
self.rounded_hours = self.get_rounded_hours(time_step)
def set_total(self, time_step: int):
self.total = self.get_total(time_step)
class Client:
def __init__(self, name: str, work_items: List[WorkItem]):
self.name = name
self.work_items = work_items
self.profile = profile['clients'][self.name]
self.invoice_num = increment_invoice_num()
self.address = self.profile['address']
self.bill_time_step = int(self.profile['bill_time_step'])
self.currency_code = self.profile['currency_code']
self.days_until_due = int(self.profile['days_until_due'])
for item in work_items:
item.set_rounded_hours(self.bill_time_step)
item.set_total(self.bill_time_step)
def get_time_until_due(self) -> timedelta:
return timedelta(days=self.days_until_due)
def get_total_due(self) -> Decimal:
return sum(i.get_total(self.bill_time_step) for i in self.work_items)
def generate_invoice(self):
template = templates_env.get_template('invoice.html')
total_due = self.get_total_due()
exchange_rate = 1
if self.currency_code != 'usd':
exchange_rate = get_exchange_rate_from_usd(self.currency_code)
invoice_date = date.today()
invoice_due = invoice_date + self.get_time_until_due()
invoice_data = {
'invoice_date': invoice_date,
'invoice_due': invoice_due,
'client': self,
'work_items': self.work_items,
'work_total': total_due,
'work_total_converted': total_due * exchange_rate,
'bank_account': profile['bank_accounts'][self.currency_code],
'exchange_rate': exchange_rate,
}
invoice_data.update(profile)
return template.render(invoice_data)
def merge_time_entries(project, time_entries) -> WorkItem:
project_name = project['name']
description = time_entries[0]['description']
rate = Decimal(project['hourlyRate']['amount']) / 100
delta = timedelta()
for entry in time_entries:
delta += isodate.parse_duration(entry['timeInterval']['duration'])
hours = delta.total_seconds() / 60 / 60
return WorkItem(project_name, description, rate, hours)
def get_work_items(projects, time_entries, client_name: str):
project_ids = set(p['id'] for p in projects if p['clientName'] == client_name)
time_entries = [e for e in time_entries if e['projectId'] in project_ids]
for description in sorted(set(e['description'] for e in time_entries)):
group = [e for e in time_entries if e['description'] == description]
project = next(p for p in projects if p['id'] == group[0]['projectId'])
yield merge_time_entries(project, group)
def get_clients():
uninvoiced_tag_id = profile['clockify']['uninvoiced_tag_id']
time_entries = clockify_user_get('time-entries', {'tags': uninvoiced_tag_id})
project_ids = set(t['projectId'] for t in time_entries)
projects = [p for p in clockify_get('projects') if p['id'] in project_ids]
for client_name in set(p['clientName'] for p in projects):
yield Client(client_name, list(get_work_items(projects, time_entries, client_name)))
def main():
for client in get_clients():
filename = slugify(f'{client.name}_{client.invoice_num}', separator='_')
pdfkit.from_string(client.generate_invoice(), f'out/{filename}.pdf',
css='assets/styles.css')
if __name__ == '__main__':
main()
|
import os
import os.path
import pathlib
from typing import Any, Callable, Optional, Sequence, Tuple, Union
from PIL import Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class OxfordIIITPet(VisionDataset):
"""`Oxford-IIIT Pet Dataset <https://www.robots.ox.ac.uk/~vgg/data/pets/>`_.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"trainval"`` (default) or ``"test"``.
target_types (string, sequence of strings, optional): Types of target to use. Can be ``category`` (default) or
``segmentation``. Can also be a list to output a tuple with all specified target types. The types represent:
- ``category`` (int): Label for one of the 37 pet categories.
- ``segmentation`` (PIL image): Segmentation trimap of the image.
If empty, ``None`` will be returned as target.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and puts it into
``root/oxford-iiit-pet``. If dataset is already downloaded, it is not downloaded again.
"""
_RESOURCES = (
("https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz", "5c4f3ee8e5d25df40f4fd59a7f44e54c"),
("https://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz", "95a8c909bbe2e81eed6a22bccdf3f68f"),
)
_VALID_TARGET_TYPES = ("category", "segmentation")
def __init__(
self,
root: str,
split: str = "trainval",
target_types: Union[Sequence[str], str] = "category",
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
):
self._split = verify_str_arg(split, "split", ("trainval", "test"))
if isinstance(target_types, str):
target_types = [target_types]
self._target_types = [
verify_str_arg(target_type, "target_types", self._VALID_TARGET_TYPES) for target_type in target_types
]
super().__init__(root, transforms=transforms, transform=transform, target_transform=target_transform)
self._base_folder = pathlib.Path(self.root) / "oxford-iiit-pet"
self._images_folder = self._base_folder / "images"
self._anns_folder = self._base_folder / "annotations"
self._segs_folder = self._anns_folder / "trimaps"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
image_ids = []
self._labels = []
with open(self._anns_folder / f"{self._split}.txt") as file:
for line in file:
image_id, label, *_ = line.strip().split()
image_ids.append(image_id)
self._labels.append(int(label) - 1)
self.classes = [
" ".join(part.title() for part in raw_cls.split("_"))
for raw_cls, _ in sorted(
{(image_id.rsplit("_", 1)[0], label) for image_id, label in zip(image_ids, self._labels)},
key=lambda image_id_and_label: image_id_and_label[1],
)
]
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._images = [self._images_folder / f"{image_id}.jpg" for image_id in image_ids]
self._segs = [self._segs_folder / f"{image_id}.png" for image_id in image_ids]
def __len__(self) -> int:
return len(self._images)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image = Image.open(self._images[idx]).convert("RGB")
target: Any = []
for target_type in self._target_types:
if target_type == "category":
target.append(self._labels[idx])
else: # target_type == "segmentation"
target.append(Image.open(self._segs[idx]))
if not target:
target = None
elif len(target) == 1:
target = target[0]
else:
target = tuple(target)
if self.transforms:
image, target = self.transforms(image, target)
return image, target
def _check_exists(self) -> bool:
for folder in (self._images_folder, self._anns_folder):
if not (os.path.exists(folder) and os.path.isdir(folder)):
return False
else:
return True
def _download(self) -> None:
if self._check_exists():
return
for url, md5 in self._RESOURCES:
download_and_extract_archive(url, download_root=str(self._base_folder), md5=md5)
|
import math
def length(x, y):
return math.sqrt(x * x + y * y)
def rotate_vec(x, y, a):
return x * math.cos(a) - y * math.sin(a), x * math.sin(a) + y * math.cos(a)
def fix(x, y, origin_width, origin_height):
flag = False
if x < 0:
x = 0
flag = True
if x > origin_width - 1:
x = origin_width - 1
flag = True
if y < 0:
y = 0
flag = True
if y > origin_height - 1:
y = origin_height - 1
flag = True
return x, y, flag
|
#!/usr/bin/python
# Filename: CaesarCipher.py
# Author: Hercules Lemke Merscher
class CaesarCipher(object):
''' Cifra de Cesar.'''
def encrypt(self, msg, key):
if msg == None:
return None
size = len(msg)
encrypted_msg = []
for i in range(size):
cha_idx = ord(msg[i])
# when msg[i] = [A~Z]
if 65 <= cha_idx <= 90:
char_e_idx = cha_idx + key
if char_e_idx >= 91:
char_e_idx -= 26
elif char_e_idx <= 64:
char_e_idx += 26
# when msg[i] = [a~z]
elif 97 <= cha_idx <= 122:
char_e_idx = cha_idx + key
if char_e_idx >= 123:
char_e_idx -= 26
elif char_e_idx <= 96:
char_e_idx += 26
# when msg[i] not an alphabet, do nothing
else:
char_e_idx = cha_idx
encrypted_msg.append(chr(char_e_idx))
encrypted_msg = ''.join(encrypted_msg)
return encrypted_msg
def decrypt(self, encrypted_msg, key):
return self.encrypt(encrypted_msg, -key)
|
import unittest
from katas.kyu_6.lotto_6_of_49 import \
check_for_winning_category, number_generator
class Lotto6of49TestCase(unittest.TestCase):
def setUp(self):
self.winning_numbers = number_generator()
def test_equal_1(self):
self.assertEqual(len(self.winning_numbers), 7)
def test_equal_2(self):
self.assertEqual(len(set(self.winning_numbers[:6])), 6)
def test_equal_3(self):
self.assertEqual(self.winning_numbers[:6],
sorted(self.winning_numbers[:6]))
def test_equal_4(self):
self.assertEqual(check_for_winning_category(
[1, 2, 3, 4, 5, 6, 7], [1, 2, 3, 4, 5, 6, 7]
), 1)
def test_equal_5(self):
self.assertEqual(check_for_winning_category(
[1, 2, 3, 4, 5, 6, 0], [1, 2, 3, 4, 5, 6, 7]
), 2)
def test_equal_6(self):
self.assertEqual(check_for_winning_category(
[1, 2, 3, 34, 35, 39, 1], [1, 2, 3, 4, 5, 6, 7]
), 8)
def test_equal_7(self):
self.assertEqual(check_for_winning_category(
[11, 12, 13, 34, 35, 39, 1], [1, 2, 3, 4, 5, 6, 7]
), -1)
def test_equal_8(self):
self.assertEqual(check_for_winning_category(
[1, 12, 13, 34, 35, 39, 1], [1, 2, 3, 4, 5, 6, 1]
), -1)
def test_not_equal_1(self):
self.assertNotEqual(number_generator(), self.winning_numbers)
def test_true_1(self):
self.assertTrue(0 <= self.winning_numbers[6] <= 9)
def test_true_2(self):
self.assertTrue(all(1 <= a <= 49 for a in self.winning_numbers[:6]))
def test_true_3(self):
self.assertTrue(0 <= self.winning_numbers[6] <= 9)
|
import speech_recognition as sr
from datetime import *
import pyttsx3
import wikipedia
import webbrowser
import os
import subprocess
import ctypes
import time
import pyjokes
import random
from requests import get
from googlesearch import search
engine = pyttsx3.init("sapi5")
voices = engine.getProperty("voices")
rate = engine.getProperty("rate")
engine.setProperty("rate", rate - 25)
engine.setProperty("voice", voices[1].id)
chrome_path = "C:/Program Files/Google/Chrome/Application/chrome.exe %s"
name = "jarvis"
greetings = [
"hi",
"hai",
"hello",
"hey",
"hay",
"haay",
"hi " + name,
"hai " + name,
"hello " + name,
"hey " + name,
"hay " + name,
"haay " + name,
]
positive_responses = ["s", "yes", "yeah", "sure", "off course"]
negative_responses = ["n", "no", "nah", "not really", "not interested"]
def speak(audio):
engine.say(audio)
engine.runAndWait()
def takeCommand():
r = sr.Recognizer()
with sr.Microphone(device_index=1) as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language="en-in")
print(f"User said: {query}\n")
except Exception as e:
print(e)
print("Unable to Recognize your voice.")
return "None"
return query
def record():
r = sr.Recognizer()
with sr.Microphone(device_index=1) as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
with open("record.wav", "wb") as f:
f.write(audio.get_wav_data())
print("Transcript is being created")
query = r.recognize_google(audio, language="en-in")
print(f"transcript: {query}\n")
# speak('Do you want to save the transcript sir')
except Exception as e:
print(e)
print("Unable to Recognize your voice.")
return "None"
websites = {
"youtube": "https://youtube.com",
"wikipedia": "https://wikipedia.org",
"google": "https://google.com",
"whatsapp": "https://web.whatsapp.com",
"facebook": "https://www.facebook.com/",
"instagram": "https://www.instagram.com/",
}
subprocesses = ["lock window", "shutdown", "restart", "hibernate", "log off"]
if __name__ == "__main__":
def clear():
return os.system("cls")
# This Function will clean any
# command before execution of this python file
clear()
close = False
speak(name + " at your service sir")
while True:
query = takeCommand().lower()
try:
if "wikipedia" in query and "open wikipedia" not in query:
speak("Searching Wikipedia...")
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=3)
speak("According to Wikipedia")
print(results)
speak(results)
elif "what is the time" in query:
print(datetime.now())
elif "play music" in query or "play song" in query:
speak("Here you go with music")
music_dir = "C:/Users/nerus/Music"
songs = [song for song in list(os.listdir(music_dir)) if ".mp3" in song]
print(songs)
random = os.startfile(os.path.join(music_dir, songs[0]))
elif "search" in query or "play" in query:
query = query.replace("search", "")
query = query.replace("play", "")
webbrowser.open(query)
elif "record" in query:
record()
# websites
elif "open" in query:
for key in websites.keys():
if key in query:
webbrowser.open(url=websites[key])
speak(key + " opened successfully")
# applications
elif "open sublime" in query:
application = "C:/Program Files/Sublime Text 3/sublime_text.exe"
os.startfile(application)
speak("sublime text editor is opened")
elif "open chrome" in query:
application = chrome_path
os.startfile(application)
speak("google chrome opened successfully")
elif "open edge" in query:
application = (
"C:/Program Files (x86)/Microsoft/Edge/Application/msedge.exe"
)
os.startfile(application)
speak("Microsoft edge opened successfully")
elif "open android studio" in query:
application = "C:/Program Files/Android/Android Studio/bin/studio64.exe"
os.startfile(application)
speak("sublime text editor opened successfully")
elif "open vs code" in query:
application = (
"C:/Users/nerus/AppData/Local/Programs/Microsoft VS Code/Code.exe"
)
os.startfile(application)
speak("VS Code editor opened successfully")
# subprocesses
elif "lock window" in query:
speak("locking the device")
ctypes.windll.user32.LockWorkStation()
elif "shutdown" in query:
speak("Hold On a Sec ! Your system is on its way to shut down")
subprocess.call(["shutdown", "/s", "/f"])
elif "restart" in query:
subprocess.call(["shutdown", "/r"])
elif "hibernate" in query or "sleep" in query:
speak("Hibernating")
subprocess.call(["shutdown", "/h"])
elif "log off" in query or "sign out" in query:
speak("Make sure all the application are closed before sign-out")
time.sleep(5)
subprocess.call(["shutdown", "/l"])
elif "don't listen" in query or "stop listening" in query:
speak(
"for how much time you want to stop jarvis from listening commands (Specify time in minutes)"
)
a = int(takeCommand())
time.sleep(a * 60)
print(a)
# conversation
elif query in greetings:
speak(random.choice(greetings[:3]) + " sir")
elif "how are you" in query:
speak("I am fine, Thank you")
speak("How are you, Sir")
elif "fine" in query or "good" in query:
speak("It's good to know that your fine")
elif "change my name to" in query:
query = query.replace("change my name to", "")
name = query
elif "change name" in query:
speak("What would you like to call me, Sir ")
name = takeCommand()
speak("Thanks for naming me")
elif "what's your name" in query or "What is your name" in query:
speak("My friends call me")
speak(name)
print("My friends call me", name)
elif "who made you" in query or "who created you" in query:
speak("I have been created by Catherine.")
elif "joke" in query:
speak(pyjokes.get_joke())
elif "exit" in query or "break" in query:
speak("Thanks for giving me your time")
close = True
exit()
else:
speak("Searching Wikipedia...")
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=3)
speak("According to Wikipedia")
print(results)
speak(results)
except:
if close:
exit()
print("Unable to Recognize your Command.")
speak("Unable to Recognize your Command.")
"""
pyttsx3.drivers
pyttsx3.drivers.sapi5
"""
|
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import os
from google.oauth2 import service_account
from google.cloud import firestore
import RPi.GPIO as GPIO
import time, sys
from gpiozero import Servo
from time import sleep
IN_FLOW_SENSOR = 23
OUT_FLOW_SENSOR = 24
#TRIG1 = 17 #level in storage
#ECHO1 = 27 #level in storage
TRIG2 = 22 #level in canal
ECHO2 = 25 #level in canal
TRIG3 = 0 #level in dam2
ECHO3 = 1 #level in dam2
servo1 = Servo(6) #rainfall for dam1
#servo2 = Servo(5) #rainfall for dam2
in1 = 10
in2 = 9
GPIO.setmode(GPIO.BCM)
GPIO.setup(in1,GPIO.OUT)
GPIO.setup(in2,GPIO.OUT)
GPIO.setup(TRIG2,GPIO.OUT)
GPIO.setup(ECHO2,GPIO.IN)
GPIO.setup(TRIG3,GPIO.OUT)
GPIO.setup(ECHO3,GPIO.IN)
GPIO.setup(IN_FLOW_SENSOR, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
scope = ["https://spreadsheets.google.com/feeds",'https://www.googleapis.com/auth/spreadsheets',"https://www.googleapis.com/auth/drive.file","https://www.googleapis.com/auth/drive"]
creds = ServiceAccountCredentials.from_json_keyfile_name(r"/home/pi/Downloads/credential.json", scope)
client = gspread.authorize(creds)
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = r"/home/pi/Downloads/MECCLOUDTEST-468f2ea6f2c4.json"
sheet = client.open("MEC ").sheet1
global distance_11,distance_1,distance_22,distance2,distance_3,distance_33
distance_11 = 0
distance_1=0
distance_2=0
distance_22=0
distance_33=0
distance_3=0
distance_11 = distance_1
def upload_to_firestore_level(level):
db = firestore.Client()
doc_ref = db.collection(u'users').document(u'B2hzw68AZfJ0Hv0h4ORa')
doc_ref.update({
u'level':level,
})
def upload_to_firestore_level_in_canal(level_in_canal):
db = firestore.Client()
doc_ref = db.collection(u'users').document(u'B2hzw68AZfJ0Hv0h4ORa')
doc_ref.update({
u'level_in_canal':level_in_canal,
})
def upload_to_firestore_trigger(trigger):
db = firestore.Client()
doc_ref = db.collection(u'users').document(u'B2hzw68AZfJ0Hv0h4ORa')
doc_ref.update({
u'trigger':trigger,
})
def level3(): #level in dam2
global distance_33,distance_3
distance_33 = distance_3
GPIO.output(TRIG3, False)
time.sleep(2)
GPIO.output(TRIG3, True)
time.sleep(0.00001)
GPIO.output(TRIG3, False)
while GPIO.input(ECHO3)==0:
pulse_start = time.time()
while GPIO.input(ECHO3)==1:
pulse_end = time.time()
pulse_duration = pulse_end - pulse_start
distance_3 = pulse_duration*17150
distance_3 = round(distance_3, 2)
print("Distance:",distance_3,"cm")
upload_to_firestore_level(distance_3)
if distance_33 >= distance_3+1 or distance_33 <=distance_3-1: #check if 1 precision is enough
print("upload to firestore",distance_3) #upload data to firestore
def level2(): #level in canal
global distance_22,distance_2
distance_22 = distance_2
GPIO.output(TRIG2, False)
time.sleep(2)
GPIO.output(TRIG2, True)
time.sleep(0.00001)
GPIO.output(TRIG2, False)
while GPIO.input(ECHO2)==0:
pulse_start = time.time()
while GPIO.input(ECHO2)==1:
pulse_end = time.time()
pulse_duration = pulse_end - pulse_start
distance_2 = pulse_duration*17150
distance_2 = round(distance_2, 2)
print("Distance:",distance_2,"cm")
upload_to_firestore_level_in_canal(distance_2)
if distance_22 >= distance_2+1 or distance_22 <= distance_2-1: #check if 1 precision is enough
print("upload to firestore",distance_2) #upload data to firestore
def motor_open():
# servo1 = Servo(6)
GPIO.output(in1,False)
GPIO.output(in2,True)
sleep(5) #time to open #give x according to the release value, i.e 1lt or 2lts
GPIO.output(in1,True)
GPIO.output(in2,False)
sleep(5)
print("in in2")#time to close #we do not keep any holding time
GPIO.output(in1,True)
GPIO.output(in2,True)
sleep(5)
GPIO.output(in1,True)
GPIO.output(in2,True)
def rainfall():
servo1.min()
sleep(5) #change the value according to the experiment
servo1.max()#change the value according to the experiment
sleep(5)
global count1
global count_a
count1 = 0
global count2
count2=0
count_a=count1
def countPulse1(channel):
global count1,count_a
count1 = count1+1
if count_a==count1-5:
print("greater than 5")
count_a=count1
print("count1 = ",count1,"count_a = ",count_a)
upload_to_firestore_trigger(1)
sleep(3)
motor_open()
sleep(1)
rainfall()
print(GPIO.input(23))
level3()
level2()
|
import torch
import torch.nn as nn
import torch.optim as optim
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import os
from torch.utils.data import Dataset, DataLoader
from data_set import MyData, logger, collate_fn
import numpy as np
class Network(nn.Module):
def __init__(self, in_dim, hidden_dim, n_layer, n_classes, bias=True, batch_first=True, dropout=0.,
bidirectional=False):
"""
:param in_dim: 输入单个数据维数
:param hidden_dim: 隐藏层维数
:param n_layer: LSTM叠加层数
:param n_classes: 分类器
"""
super(Network, self).__init__()
self.n_layer = n_layer
self.hidden_dim = hidden_dim
self.batch_first = batch_first
self.lstm = nn.LSTM(in_dim, hidden_dim, n_layer, batch_first=batch_first, bias=bias,
dropout=dropout, bidirectional=bidirectional)
self.classifier = nn.Linear(hidden_dim, n_classes)
def forward(self, inputs):
y_lstm, (h_n, c_n) = self.lstm(inputs)
x = h_n[-1, :, :]
x = self.classifier(x)
return x
root_dir = './data'
train_dir = 'train'
valid_dir = 'valid'
test_dir = 'test'
Batch_size = 16
INPUT_DIM = 2
HIDDEN_DIM = 32
LSTM_LAYERS = 2
OUT_DIM = 1
LR = 0.02
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.manual_seed(1)
trainloader = DataLoader(MyData(root_dir, train_dir), batch_size=Batch_size, shuffle=True, collate_fn=collate_fn)
validloader = DataLoader(MyData(root_dir, valid_dir), batch_size=Batch_size, shuffle=True, collate_fn=collate_fn)
testloader = DataLoader(MyData(root_dir, test_dir), batch_size=1, shuffle=True, collate_fn=collate_fn)
# TODO :交叉验证
net = Network(in_dim=INPUT_DIM, hidden_dim=HIDDEN_DIM, n_layer=LSTM_LAYERS, n_classes=OUT_DIM).to(device)
net = net.to('cpu')
criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=LR)
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
print("Train: ")
batch_idx = 0
for inputs, datas_length, targets in trainloader:
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
inputs_pack = pack_padded_sequence(inputs, datas_length, batch_first=True)
outputs = net(inputs_pack)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
predicted = outputs
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
batch_idx += 1
print(batch_idx, len(trainloader), 'Loss: %.3f ' %
(train_loss / (batch_idx + 1)))
def valid(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
batch_idx = 0
print("Vaild: ")
with torch.no_grad():
for inputs, datas_length, targets in validloader:
inputs, targets = inputs.to(device), targets.to(device)
inputs_pack = pack_padded_sequence(inputs, datas_length, batch_first=True)
outputs = net(inputs_pack)
loss = criterion(outputs, targets)
test_loss += loss.item()
predicted = outputs
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
batch_idx += 1
print(batch_idx, len(validloader), 'Loss: %.3f '
% (test_loss / (batch_idx + 1)))
def Test():
net.eval()
print("Test: ")
for inputs, datas_length, targets in validloader:
inputs, targets = inputs.to(device), targets.to(device)
inputs_pack = pack_padded_sequence(inputs, datas_length, batch_first=True)
outputs = net(inputs_pack)
predicted = max(outputs.detach().numpy().reshape(1, -1)[0])
print(predicted)
# predicted = outputs.numpy()
#
# print(" Prediction : {0}".format(predicted))
if __name__ == '__main__':
for epoch in range(20):
train(epoch)
valid(epoch)
Test()
|
def new(X, Y, room, team, num):
if room.find("1") == -1: # 방이 다 비었을 때
room = "1"*Y + room[Y:]
team[num] = "%d:%d:%d" % (X, 0, Y)
print("%d %d" % (1, Y))
return room, team, 0
elif room.find("0") == -1 or room.find("%s" % "0"*Y) == -1: # 모든 방이 사용중일 때, 원하는 평수의 방이 없을 때
print("REJECTED"); return room, team, -1
else:
start = room.find("%s" % "0"*Y)
if start != 0:
room = room[0:start] + "1"*Y + room[start+Y:]
else:
room = "1"*Y + room[Y:]
team[num] = "%d:%d:%d" % (X, start, start+Y)
print("%d %d" % (start+1, start+Y))
return room, team, 0
def out(A, B, room, team):
n, L, R = int(team[A].split(":")[0]), int(team[A].split(":")[1]), int(team[A].split(":")[2])
n -= B
if n == 0:
print("CLEAN %d %d" % (L+1, R))
if L == 0:
room = "0"*(R) + room[R:]
else:
room = room[0:L] + "0"*(R-L) + room[R:]
return room, team
else:
team[A] = "%d:%d:%d" % (n, L, R)
return room, team
NQ = input()
N, Q = int(NQ.split()[0]), int(NQ.split()[1])
# Room number : 1 ~ N -> 0 ~ N-1
roomStat = "0" * N
teamMem = {}
teamNum = 1
for i in range(Q):
line = input().split()
if line[0] == "new":
roomStat, teamMem, status = new(int(line[1]), int(line[2]), roomStat, teamMem, teamNum)
if status == 0:
teamNum += 1
elif line[0] == "in":
A, B = int(line[1]), int(line[2])
n, L, R = int(teamMem[A].split(":")[0]), int(teamMem[A].split(":")[1]), int(teamMem[A].split(":")[2])
n += B
teamMem[A] = "%d:%d:%d" % (n, L, R)
elif line[0] == "out":
A, B = int(line[1]), int(line[2])
roomStat, teamMem = out(A, B, roomStat, teamMem)
|
import sys
import boto3
import os
def list_objects(start_after="tt0000000/"):
bucket = os.environ["MOVIES_BUCKET"]
client = boto3.client('s3')
objects = client.list_objects_v2(
Bucket=bucket,
Prefix='tt',
StartAfter=start_after,
Delimiter="/",
)
return objects
def lambda_handler(event,context):
imdb_ids = set()
objects = list_objects("tt0000000/")
while objects.has_key("CommonPrefixes"):
for data in objects["CommonPrefixes"]:
imdb_ids.add(data["Prefix"].split("/")[0])
start_after=data["Prefix"]
objects = list_objects(start_after)
return sorted(imdb_ids)
if __name__ == "__main__":
print(lambda_handler(None, None))
|
from prodict import Prodict
from aioredis.pool import ConnectionsPool
class Partner:
init: str
sync: str
class State(Prodict):
redis_pool: ConnectionsPool
partners: Prodict
def get_partner(self, partner) -> Partner:
if partner:
return self.partners.get(partner, None)
|
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import keras
__author__ = 'roeiherz'
class RedirectModel(keras.callbacks.Callback):
"""Callback which wraps another callback, but executed on a different model.
```python
model = keras.models.load_model('model.h5')
model_checkpoint = ModelCheckpoint(filepath='snapshot.h5')
parallel_model = multi_gpu_model(model, gpus=2)
parallel_model.fit(X_train, Y_train, callbacks=[RedirectModel(model_checkpoint, model)])
```
Args
callback : callback to wrap.
model : model to use when executing callbacks.
"""
def __init__(self,
callback,
model):
super(RedirectModel, self).__init__()
self.callback = callback
self.redirect_model = model
def on_epoch_begin(self, epoch, logs=None):
self.callback.on_epoch_begin(epoch, logs=logs)
def on_epoch_end(self, epoch, logs=None):
self.callback.on_epoch_end(epoch, logs=logs)
def on_batch_begin(self, batch, logs=None):
self.callback.on_batch_begin(batch, logs=logs)
def on_batch_end(self, batch, logs=None):
self.callback.on_batch_end(batch, logs=logs)
def on_train_begin(self, logs=None):
# overwrite the model with our custom model
self.callback.set_model(self.redirect_model)
self.callback.on_train_begin(logs=logs)
def on_train_end(self, logs=None):
self.callback.on_train_end(logs=logs)
class Evaluate(keras.callbacks.Callback):
"""
Evaluation callback for arbitrary datasets.
"""
def __init__(self, dataset, iou_threshold=0.5, save_path=None, tensorboard=None, verbose=1, config=None):
"""
Evaluate a given dataset using a given model at the end of every epoch during training.
# Arguments
dataset : The generator that represents the dataset to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
save_path : The path to save images with visualized detections to.
tensorboard : Instance of keras.callbacks.TensorBoard used to log the mAP value.
verbose : Set the verbosity level, by default this is set to 1.
"""
self.dataset = dataset
self.iou_threshold = iou_threshold
self.save_path = save_path
self.tensorboard = tensorboard
self.verbose = verbose
self.config = config
super(Evaluate, self).__init__()
def on_epoch_end(self, epoch, logs=None):
from samples.bdd100k.BDD100K import evaluate
logs = logs or {}
# run evaluation
average_precisions = evaluate(
self.dataset,
self.model,
iou_threshold=self.iou_threshold,
save_path=self.save_path,
config=self.config
)
self.mean_ap = sum(average_precisions.values()) / len(average_precisions)
if self.tensorboard is not None and self.tensorboard.writer is not None:
import tensorflow as tf
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = self.mean_ap
summary_value.tag = "mAP"
self.tensorboard.writer.add_summary(summary, epoch)
logs['mAP'] = self.mean_ap
if self.verbose == 1:
for label, average_precision in average_precisions.items():
print(self.dataset.class_names[label], '{:.4f}'.format(average_precision))
print('mAP: {:.4f}'.format(self.mean_ap))
|
import pygame
import time
import random
#import subprocess
#import os
pygame.init()
gameDisplay = pygame.display.set_mode()
pygame.display.set_caption('Get Over Here Mr.Mike!!!')
clock = pygame.time.Clock()
def blitImg(img, x, y):
gameDisplay.blit(pygame.image.load(str(img)),(x,y))
def read_file(filename):
f = open(filename,'r')
line = f.readline()
f.close()
return line
troublemaker = read_file('Player1.txt')
print(troublemaker)
troublemaker2 = read_file('Player2.txt')
player = pygame.image.load(str(troublemaker))
#print(player,"Player")
gameIcon = pygame.image.load('icon1.png')
pygame.display.set_icon(gameIcon)
black = (0,0,0)
white = (255,255,255)
brown = (160,113,69)
red = (200,0,0)
green = (0,200,0)
yellow = (255,255,0)
gold = (255,215,0)
bright_red = (255,0,0)
bright_green = (0,255,0)
dark_grey = (200,200,200)
grey = (215,215,215)
bright_grey = (225,225,225)
blue = (25,50,255)
bright_blue = (100,150,255)
first = True
class Mr:
Mike = "MR.MIKE GET OVER HERE"
cryForHelp = Mr.Mike
def Credits():
Credit = True
while Credit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
gameDisplay.fill(white)
largeText = pygame.font.Font("Raleway-Medium.ttf",90)
TextSurf, TextRect = text_objects(('Made By: Keith Farr and'),largeText)
TextRect.center = ((640),(200))
gameDisplay.blit(TextSurf, TextRect)
largeText = pygame.font.Font("Raleway-Medium.ttf",90)
TextSurf, TextRect = text_objects(('Ibrahim Dabarani'),largeText)
TextRect.center = ((640),(280))
gameDisplay.blit(TextSurf, TextRect)
button("Back",490,500,300,100,red,bright_red,main)
pygame.display.update()
clock.tick(15)
def quitgame():
pygame.quit()
quit()
def imgButton(msg,x,y,w,h,ic,ac,func,params,action=None,paramsSecond=None):
(params1,params2,params3) = params
(params4,params5) = paramsSecond
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x+w > mouse[0] > x and y+h >mouse[1] > y:
pygame.draw.rect(gameDisplay, ac,(x,y,w,h))
if click[0] == 1 and action != None:
# pygame.draw.rect(gameDisplay, bright_green,(x,y,w,h))
# time.sleep(.5)
if params2 != None:
action(params4,params5)
else:
action()
pygame.draw.rect(gameDisplay, green,(x,y,w,h))
# main()
else:
pygame.draw.rect(gameDisplay, ic,(x,y,w,h))
smallText = pygame.font.Font("Raleway-Medium.ttf",45)
textSurf, textRect = text_objects(msg, smallText)
textRect.center = ((x+(w/2)), (y+(h/2)))
gameDisplay.blit(textSurf, textRect)
func(params1,params2,params3)
def button(msg,x,y,w,h,ic,ac,action=None,params=None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x+w > mouse[0] > x and y+h >mouse[1] > y:
pygame.draw.rect(gameDisplay, ac,(x,y,w,h))
if click[0] == 1 and action != None:
if params != None:
(params1, params2) = params
action(params1,params2)
else:
action()
else:
pygame.draw.rect(gameDisplay, ic,(x,y,w,h))
smallText = pygame.font.Font("Raleway-Medium.ttf",45)
textSurf, textRect = text_objects(msg, smallText)
textRect.center = ((x+(w/2)), (y+(h/2)))
gameDisplay.blit(textSurf, textRect)
def quad(color,x,y,w,h,t):
pygame.draw.rect(gameDisplay,color,(x,y,w,h),t)
def line(color,closed,points,t):
pygame.draw.lines(gameDisplay,color,closed,points,t)
def write_file(filename,text):
f = open(filename,'w')
f.write(text)
f.close()
def text_objects(text, font):
textSurface = font.render(text, True, black)
return textSurface, textSurface.get_rect()
def text(msg, x, y, size=100):
largeText = pygame.font.Font("Raleway-Medium.ttf", size)
TextSurf, TextRect = text_objects((msg), largeText)
TextRect.center = ((x),(y))
gameDisplay.blit(TextSurf, TextRect)
def mobBoss():
while True:
gameDisplay.fill(white)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
blitImg("MobBoss.png",640,0)
text("Complete the Campaign to Unlock", 640, 300, 70)
button("Back",490,500,300,100,red,bright_red,playerSelect)
pygame.display.update()
clock.tick(120)
def playerSelect():
global first
first = False
nameSize = 25
quoteSize = 18
while True:
gameDisplay.fill(white)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
# Title
text('Select Your Trouble Maker!',650, 60, 90)
# Player1
imgButton('', 95, 115, 260,310, white, grey, blitImg,
('player1.png', 100, 120), write_file,
('Player1.txt','player1.png'))
text('Ibrahim Dabarani:', 220, 460, nameSize)
text('"Gimme your lunch money!"', 220, 490, quoteSize)
# Player2
imgButton('', 375, 115, 260,310, white, grey, blitImg,
('player2.png', 380, 120), write_file,
('Player1.txt','player2.png'))
text('Keith Farr:', 500, 460, nameSize)
text('"Keith was here"', 500, 490, quoteSize)
# Player3
imgButton('', 655, 115, 260,310, white, grey, blitImg,
('player3.png', 660, 120), write_file,
('Player1.txt','player3.png'))
text('Kaden Chin-Massey:', 780, 460, nameSize)
text('"I will take your juice box!"', 780, 490, quoteSize)
# Player4
imgButton('', 935, 115, 260,310, white, grey, blitImg,
('player4.png', 940, 120), write_file,
('Player1.txt','player4.png'))
text('Mohamud Hassan:', 1060, 460, nameSize)
text('"Can you gimme my bak-pak."', 1060, 490, quoteSize)
# Mob Boss
button('Mob Boss',275,580,300,100,dark_grey,grey,mobBoss)
button("Back",725,580,300,100,red,bright_red,main)
troublemaker = read_file('Player1.txt')
troublemaker2 = read_file('Player2.txt')
pygame.display.update()
clock.tick(30)
def main():
time.sleep(.2)
while True:
gameDisplay.fill(white)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
## button("Start!",275,450,300,100,green,bright_green,readyPlayerOne)
## button("Quit",725,450,300,100,red,bright_red,quitgame)
## button("Select Player",275,575,300,100,gold,yellow,playerSelect)
## button("Credits",725,575,300,100,dark_grey,grey)
button("1 Player",275,450,300,100,gold,yellow,singlePlayer)
button("2 Players",725,450,300,100,blue,bright_blue,dualPlayer)
button("Credits",275,575,300,100,dark_grey,grey,Credits)
button("Quit",725,575,300,100,red,bright_red,quitgame)
text(cryForHelp, 640, 200)
pygame.display.update()
clock.tick(120)
def win():
gameDisplay.fill(white)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
for i in range(27):
# time.sleep(.1)
blitImg("dance"+str(i+1)+".png",-500,-100)
pygame.display.update()
clock.tick(120)
for i in range(0,27,-1):
# time.sleep(.1)
blitImg("dance"+str(i+1)+".png",-500,-100)
pygame.display.update()
clock.tick(120)
def playerSelect(b1Message='Start',dual=False,sub=''):
global first
first = False
nameSize = 25
quoteSize = 18
if dual:
action = player2select
else:
action = loop
while True:
gameDisplay.fill(white)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
# Title
text('Select Your Trouble Maker!',650, 60, 90)
# SubTitle
text(sub,650, 70, 30)
# Player1
imgButton('', 95, 115, 260,310, white, grey, blitImg,
('player1.png', 100, 120), write_file,
('Player1.txt','player1.png'))
text('Ibrahim Dabarani:', 220, 460, nameSize)
text('"Gimme your lunch money!"', 220, 490, quoteSize)
# Player2
imgButton('', 375, 115, 260,310, white, grey, blitImg,
('player2.png', 380, 120), write_file,
('Player1.txt','player2.png'))
text('Keith Farr:', 500, 460, nameSize)
text('"Keith was here"', 500, 490, quoteSize)
# Player3
imgButton('', 655, 115, 260,310, white, grey, blitImg,
('player3.png', 660, 120), write_file,
('Player1.txt','player3.png'))
text('Kaden Chin-Massey:', 780, 460, nameSize)
text('"I will take your juice box!"', 780, 490, quoteSize)
# Player4
imgButton('', 935, 115, 260,310, white, grey, blitImg,
('player4.png', 940, 120), write_file,
('Player1.txt','player4.png'))
text('Mohamud Hassan:', 1060, 460, nameSize)
text('"Can you gimme my bak-pak."', 1060, 490, quoteSize)
## # Mob Boss
## button('Mob Boss',275,580,300,100,dark_grey,grey,mobBoss)
# Buttons
button(b1Message,275,580,300,100,green,bright_green,action) #b1
button("Back",725,580,300,100,red,bright_red,main)
troublemaker = read_file('Player1.txt')
pygame.display.update()
clock.tick(30)
def player2select(b1Message='Start'):
global first
first = False
nameSize = 25
quoteSize = 18
while True:
gameDisplay.fill(white)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
#This is hidden
# Title
text('Select Your Trouble Maker!',650, 60, 90)
# Player1
imgButton('', 95, 115, 260,310, white, grey, blitImg,
('player1.png', 100, 120), write_file,
('Player2.txt','player1.png'))
text('Ibrahim Dabarani:', 220, 460, nameSize)
text('"Gimme your lunch money!"', 220, 490, quoteSize)
# Player2
imgButton('', 375, 115, 260,310, white, grey, blitImg,
('player2.png', 380, 120), write_file,
('Player2.txt','player2.png'))
text('Keith Farr:', 500, 460, nameSize)
text('"Keith was here"', 500, 490, quoteSize)
# Player3
imgButton('', 655, 115, 260,310, white, grey, blitImg,
('player3.png', 660, 120), write_file,
('Player2.txt','player3.png'))
text('Kaden Chin-Massey:', 780, 460, nameSize)
text('"I will take your juice box!"', 780, 490, quoteSize)
# Player4
imgButton('', 935, 115, 260,310, white, grey, blitImg,
('player4.png', 940, 120), write_file,
('Player2.txt','player4.png'))
text('Mohamud Hassan:', 1060, 460, nameSize)
text('"Can you gimme my bak-pak."', 1060, 490, quoteSize)
## # Mob Boss
## button('Mob Boss',275,580,300,100,dark_grey,grey,mobBoss)
# Buttons
button(b1Message,275,580,300,100,green,bright_green,loop) #b1
button("Back",725,580,300,100,red,bright_red,main)
troublemaker2 = read_file('Player2.txt')
pygame.display.update()
clock.tick(30)
def singlePlayer():
playerSelect()
def dualPlayer():
playerSelect("Next",True,"Troublemaker No.1")
def drawPlayer(dplayer):
mikeX,mikeY = 100,200
car1X,car1Y = 100,200
car2X,car2Y = 100,200
car3X,car3Y = 100,200
if troublemaker == "player1.png":
player = "face1.png"
x,y = car1X,car1Y
elif troublemaker == "player2.png":
player = "face2.png"
x,y = car2X,car2Y
elif troublemaker == "player3.png":
player = "face3.png"
x,y = car3X,car3Y
elif troublemaker == "player4.png":
player = "face4.png"
x,y = mikeX,mikeY
else:
player = "icon1.png"
if dplayer == "player1.png":
# Mike car
mike = blitImg("lamboveiw1.png",mikeX,mikeY)
blitImg(player,mikeX+75,mikeY+40)
if dplayer == "player2.png":
# Ibrahim car
blitImg("car1.png",x,y)
blitImg(player,x+70,y+75)
if dplayer == "player3.png":
# Keith car
blitImg("car2.png",car2X,car2Y)
blitImg(player,x+75,y+48)
if dplayer == "player4.png":
# Kden car
blitImg("Car3.png",car3X,car3Y)
blitImg(player,x+72,y+55)
if dplayer == "player5.png":
# Mohamud
pass
def game_over():
while True:
gameDisplay.fill(white)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
text("Game Over!",640,100)
# Back
button("Back",725,580,300,100,red,bright_red,main)
pygame.display.update()
clock.tick(120)
#***********************#
# Main Game: #
# Function/Loop #
#***********************#
def loop():
global first
first = True
crashed = False
troublemaker = read_file('Player1.txt')
troublemaker2 = read_file('Player2.txt')
up,down,left,right,b,a = 0,0,0,0,0,0
deltaX = 0
deltaY = 0
## Start Cordinates
mikeX,mikeY = 100,100
car1X,car1Y = 100,10
car2X,car2Y = 300,200
car3X,car3Y = 100,200
if troublemaker == "player1.png":
player = "face1.png"
x,y = car1X,car1Y
elif troublemaker == "player2.png":
player = "face2.png"
x,y = car2X,car2Y
elif troublemaker == "player3.png":
player = "face3.png"
x,y = car3X,car3Y
elif troublemaker == "player4.png":
player = "face4.png"
x,y = mikeX,mikeY
else:
player = "icon1.png"
def drawPlayer(dplayer):
if dplayer == "player1.png":
# Mike car
mike = blitImg("Lamboveiw1.png",x,y)
blitImg(player,x+75,y+40)
print("Mike")
if dplayer == "player2.png":
# Ibrahim car
blitImg("Car1.png",x,y)
blitImg(player,x+70,y+75)
print("Brahim")
if dplayer == "player3.png":
# Keith car
blitImg("Car2.png",x,y)
blitImg(player,x+75,y+48)
print("keith")
if dplayer == "player4.png":
# Kden car
blitImg("Car3.png",x,y)
blitImg(player,x+72,y+55)
print("kaden")
if dplayer == "player5.png":
# Mohamud
print("mohom")
pass
time.sleep(.2)
while True:
gameDisplay.fill(white)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
## Controls
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
deltaY -= 10
up += 1
if event.key == pygame.K_DOWN:
deltaY += 10
down += 1
if event.key == pygame.K_LEFT:
deltaX -= 10
left += 1
if event.key == pygame.K_RIGHT:
deltaX += 10
right += 1
if event.key == pygame.K_b:
b += 1
if event.key == pygame.K_a:
a += 1
if event.type == pygame.KEYUP:
if event.key == pygame.K_UP:
deltaY = 0
if event.key == pygame.K_DOWN:
deltaY = 0
if event.key == pygame.K_LEFT:
deltaX = 0
if event.key == pygame.K_RIGHT:
deltaX = 0
if x > 495:
crashed = True
if up == 2 and down == 2 and left == 2 and right == 2 and b == 1 and a == 1:
x,y = random.randint(0,1280),random.randint(0,720)
win()
##Colision
if crashed == True:
print ("crashed")
game_over()
drawPlayer(troublemaker)
y += deltaY
x += deltaX
##Back
button("Back",725,580,300,100,red,bright_red,main)
line(black,False,[(640,0),(640,1.1234e9)],10)
pygame.display.update()
clock.tick(30)
main()
pygame.quit()
quit()
|
def tidyNumber(n):
l_n = list(str(n))
return sorted(l_n) == l_n
'''
Definition
A Tidy number is a number whose digits are in non-decreasing order.
Task
Notes
Number passed is always Positive .
Return the result as a Boolean
Input >> Output Examples
tidyNumber (12) ==> return (true)
Explanation:
The number's digits { 1 , 2 } are in non-Decreasing Order (i.e) 1 <= 2 .
tidyNumber (32) ==> return (false)
Explanation:
The Number's Digits { 3, 2} are not in non-Decreasing Order (i.e) 3 > 2 .
tidyNumber (1024) ==> return (false)
Explanation:
The Number's Digits {1 , 0, 2, 4} are not in non-Decreasing Order as 0 <= 1 .
tidyNumber (13579) ==> return (true)
Explanation:
The number's digits {1 , 3, 5, 7, 9} are in non-Decreasing Order .
tidyNumber (2335) ==> return (true)
Explanation:
The number's digits {2 , 3, 3, 5} are in non-Decreasing Order , Note 3 <= 3
'''
|
# -*- coding: utf-8 -*-
from collections import Counter
class Solution:
def domainToLevels(self, domain):
parts = domain.split(".")
if len(parts) == 3:
return [".".join(parts[-3:]), ".".join(parts[-2:]), parts[-1]]
elif len(parts) == 2:
return [".".join(parts[-2:]), parts[-1]]
def subdomainVisits(self, cpdomains):
result = Counter()
for cpdomain in cpdomains:
count, domain = cpdomain.split()
for level in self.domainToLevels(domain):
result[level] += int(count)
return [
"%d %s" % (count, level) for level, count in result.items()
] # noqa: F812
if __name__ == "__main__":
solution = Solution()
assert sorted(
["9001 discuss.leetcode.com", "9001 leetcode.com", "9001 com"]
) == sorted(solution.subdomainVisits(["9001 discuss.leetcode.com"]))
|
#! /usr/bin/env python
from djl_ui import *
from djl_input import *
from djl_post_responder import *
from djl_post_processor import *
djl_seperator()
print_exit_tip()
fb = FacebookAccessInput()
fb.show()
year = YearInput(fb.me["birthday"])
year.show()
processor = PostProcessor(year.birthdate, fb)
processor.get_posts()
prompt_for_continue()
reply_template = ReplyTemplateInput()
reply_template.show()
print_begin_responding()
print_confirm_continue()
GenericResponder(processor.generic_posts, reply_template.val, fb.graph).respond()
SpecialResponder(processor.special_posts, reply_template.val, fb.graph).respond()
|
from sys import stdin, stdout
n = int(input())
negitives = []
positives = []
for i in range(2):
negitives.append([])
positives.append([])
sums = 0
answer = [0]*n
for i in range(n):
x = [float(x) for x in input().split()][0]
# print(x)
if x < 0:
if abs(int(x) - x) >= 10**-6:
negitives[1].append((i, x))
sums += int(x)
answer[i] = int(x)
else:
negitives[0].append((i, x))
sums += int(x)
answer[i] = int(x)
if x > 0:
if abs(int(x) - x) >= 10**-6:
positives[1].append((i, x))
sums += int(x)
answer[i] = int(x)
else:
positives[0].append((i, x))
sums += int(x)
answer[i] = int(x)
if sums > 0:
for i in range(sums):
answer[negitives[1][i][0]] -= 1
elif sums < 0:
for i in range(abs(sums)):
answer[positives[1][i][0]] += 1
for i in range(n):
print (answer[i])
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a project hierarchy created with the --generator-output=
option can be built even when it's relocated to a different path.
"""
import TestGyp
import os
test = TestGyp.TestGyp()
test.run_gyp('standalone.gyp', '-Gstandalone')
# Look at all the files in the tree to make sure none
# of them reference the gyp file.
for root, dirs, files in os.walk("."):
for file in files:
# ignore ourself
if os.path.splitext(__file__)[0] in file:
continue
file = os.path.join(root, file)
contents = open(file).read()
if 'standalone.gyp' in contents:
print 'gyp file referenced in generated output: %s' % file
test.fail_test()
test.pass_test()
|
n = int(raw_input())
arr = list()
for i in range(1,n+1):
if i%3 == 0 and i%5 ==0:
arr.append("FizzBuzz")
elif i%3 == 0:
arr.append("Fizz")
elif i%5 == 0:
arr.append("Buzz")
else:
arr.append(str(i))
print arr
|
# Let us denote A -> B as the process where B is obtained by summing all
# the factorial of the digits of A.
# We have:
# 145 -> 145
# 169 -> ... -> 169
# 871 -> ... -> 871
# 872 -> ... -> 872
#
# For a natural number A, we denote NonRepeatTerm(A) to be the number of
# non-repeating terms in the chain A -> ... -> ...
# We have:
# NonRepeatTerm(145)= 1
# NonRepeatTerm(169) = NonRepeatTerm(36301) = NonRepeatTerm(1454) = 3
# NonRepeatTerm(871) = NonRepeatTerm(45361) = 2
# NonRepeatTerm(872) = NonRepeatTerm(45362) = 2
# NonRepeatTerm(1) = NonRepeatTerm(0) = NonRepeatTerm(2) = 1 trivially
#
# TASK: Count the number of chains, with a starting number below 10^6,
# contains exactly 60 non-repeating terms.
#
# Note that only 0, 1, 2, 145, 169, 871, 872, 36301, 1454, 45361, 45362 have
# the loop properties.
FACTORIAL_DIGIT = []
LIMIT = 1000000
RECURSE_LIMIT = 80
# Global sieve for memoization purposes.
NonRepeatTerm = []
# Count numbers X such that NonRepeatTerm(X) = L where L is the specified input.
# Assumption: L < RECURSE_LIMIT
def countChain(L):
# Pre-processing
preComputeFactorialDigit()
computeNonRepeatTerm()
# Counting!
count = 0
for n in range(0, LIMIT):
if NonRepeatTerm[n] == L:
count += 1
return count
# Compute min(NonRepeatTerm(N), RECURSE_LIMIT) for all N < LIMIT
def computeNonRepeatTerm():
global NonRepeatTerm
NonRepeatTerm = [0] * LIMIT
# Initialize the array with some special numbers N (that result in a loop
# in the chain N -> ... -> ...)
NonRepeatTerm[0] = NonRepeatTerm[1] = NonRepeatTerm[2] = 1
NonRepeatTerm[145] = 1
NonRepeatTerm[169] = NonRepeatTerm[36301] = NonRepeatTerm[1454] = 3
NonRepeatTerm[871] = NonRepeatTerm[45361] = 2
NonRepeatTerm[872] = NonRepeatTerm[45362] = 2
for n in range(0, LIMIT):
if NonRepeatTerm[n] == 0:
countNumNonRepeatTerm(n, 1)
# Compute min(NonRepeatTerm(N), RECURSE_LIMIT) for a specific N
# When the number of recursion levels exceeds RECURSE_LIMIT, we return
# RECURSE_LIMIT
def countNumNonRepeatTerm(N, recurseLevel):
global NonRepeatTerm
if recurseLevel > RECURSE_LIMIT:
return RECURSE_LIMIT
if N < LIMIT and NonRepeatTerm[N] != 0:
return NonRepeatTerm[N]
# When reaching here, N is not special (i.e. it does not generate a loop
# to itself in the chain N -> ... -> ...). Therefore, we have:
# NonRepeatTerm(N) = 1 + NonRepeatTerm(X) where X is obtained by
# summing factorials of all the digits of N.
resutl = 0
nextResult = countNumNonRepeatTerm(sumFactorialDigit(N), recurseLevel + 1)
if nextResult == RECURSE_LIMIT:
result = RECURSE_LIMIT
else:
result = 1 + nextResult
if N < LIMIT:
NonRepeatTerm[N] = result
return result
# Pre-compute factorial of digits 0, 1, ..., 9
def preComputeFactorialDigit():
global FACTORIAL_DIGIT
FACTORIAL_DIGIT = [1]*10
for n in range(2, 10):
FACTORIAL_DIGIT[n] = n * FACTORIAL_DIGIT[n - 1]
# Compute the sum
def sumFactorialDigit(N):
mySum = 0
while N != 0:
mySum += FACTORIAL_DIGIT[N % 10]
N //= 10
return mySum
|
# pihsm: Turn your Raspberry Pi into a Hardware Security Module
# Copyright (C) 2017 System76, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from unittest import TestCase
import os
import nacl.signing
from .helpers import random_u64
from .. import sign
class TestFunctions(TestCase):
def test_bulid_signing_form(self):
previous = os.urandom(64)
public = os.urandom(32)
msg = os.urandom(48)
self.assertEqual(
sign.build_signing_form(public, previous, 0, 0, msg),
public + previous + (b'\x00' * 16) + msg
)
self.assertEqual(
sign.build_signing_form(public, previous, 0, 0, b''),
public + previous + (b'\x00' * 16)
)
cnt = os.urandom(8)
ts = os.urandom(8)
counter = int.from_bytes(cnt, 'little')
timestamp = int.from_bytes(ts, 'little')
self.assertEqual(
sign.build_signing_form(public, previous, counter, timestamp, msg),
public + previous + cnt + ts + msg
)
self.assertEqual(
sign.build_signing_form(public, previous, counter, timestamp, b''),
public + previous + cnt + ts
)
class TestSigner(TestCase):
def test_init(self):
s = sign.Signer()
self.assertIsInstance(s.key, nacl.signing.SigningKey)
self.assertEqual(s.public, bytes(s.key.verify_key))
self.assertEqual(s.genesis, bytes(s.key.sign(s.public)))
self.assertIs(s.tail, s.genesis)
self.assertEqual(s.previous, s.key.sign(s.public).signature)
self.assertEqual(s.message, b'')
self.assertEqual(s.counter, 0)
self.assertIs(type(s.store), sign.DummyStore)
def test_build_signing_form(self):
s = sign.Signer()
ts = random_u64()
msg = os.urandom(48)
self.assertEqual(
s.build_signing_form(ts, msg),
sign.build_signing_form(s.public, s.previous, 0, ts, msg)
)
def test_sign(self):
s = sign.Signer()
pub = s.public
prev = s.previous
ts = random_u64()
msg = os.urandom(48)
sf = sign.build_signing_form(pub, prev, 1, ts, msg)
expected = bytes(s.key.sign(sf))
self.assertEqual(s.sign(msg, timestamp=ts), expected)
self.assertNotEqual(s.previous, prev)
self.assertEqual(s.previous, expected[:64])
self.assertEqual(s.message, msg)
self.assertEqual(s.tail, expected)
self.assertEqual(s.counter, 1)
self.assertEqual(s.public, pub)
prev = s.previous
ts = random_u64()
msg = os.urandom(48)
sf = sign.build_signing_form(pub, prev, 2, ts, msg)
expected = bytes(s.key.sign(sf))
self.assertEqual(s.sign(msg, timestamp=ts), expected)
self.assertNotEqual(s.previous, prev)
self.assertEqual(s.previous, expected[:64])
self.assertEqual(s.message, msg)
self.assertEqual(s.tail, expected)
self.assertEqual(s.counter, 2)
self.assertEqual(s.public, pub)
|
from food_planner.settings import ITEM_LOC, PRICE_LOC, MEAL_LOC
PRICES = """chicken2breast,300.0,0.006
eggs,12.0,0.170833333
ham,10.0,0.1
tortilla_wraps,8.0,0.11875
edam,10.0,0.175
2avocado,2.0,1.25
4_pork_loins,4.0,0.75
petit_pois,1000.0,0.0013
baby_potatoes,1000.0,0.001
asparagus_tips,125.0,0.0144
2_salmon,2.0,1.625
spagetti,1000.0,0.001
bolognese,1000.0,0.00064
smoked_bacon_lardons,2.0,1.0
sausages,12.0,0.20833333333333334
carrots,1000.0,0.0004
couscous,500.0,0.0014
red_onions,3.0,0.2833333333333333
peppers,3.0,0.45
minced_beef,500.0,0.005
corn_can,200.0,0.0034999999999999996
mixed_frozen_veggies,1000.0,0.00079
sliced_bread,20.0,0.05500000000000001
hash_browns,15.0,0.08
gnocci,500.0,0.0015
pepperoni_pizza,1.0,3.5
meal_deal,1.0,3.0
"""
MEALS = """tortillas,tortilla_wraps:2.0:0.11875,ham:5.0:0.1,edam:2.0:0.175,2avocado:0.5:1.25
pork_hash_petitpois,4_pork_loins:2.0:0.75,petit_pois:200.0:0.0013,hash_browns:5.0:0.08
spaghetti bolognese,spagetti:200.0:0.001,bolognese:250.0:0.00064,minced_beef:500.0:0.005
beef and corn,corn_can:200.0:0.0034999999999999996,minced_beef:250.0:0.005,couscous:150.0:0.0014
eggs and potatoes,eggs:3.0:0.170833333,baby_potatoes:330.0:0.001
eggs_poatoes_carrots,baby_potatoes:330.0:0.001,eggs:3.0:0.170833333,carrots:200.0:0.0004
carbonara,spagetti:200.0:0.001,smoked_bacon_lardons:1.0:1.0,red_onions:0.5:0.2833333333333333,eggs:1.0:0.170833333
sasage_veggies_potatoes,sausages:6.0:0.20833333333333334,baby_potatoes:330.0:0.001,mixed_frozen_veggies:250.0:0.00079
"""
CUPBOARD = """"""
def main():
with open(ITEM_LOC, "w") as f:
f.write(PRICES)
with open(PRICE_LOC, "w") as f:
f.write(PRICES)
with open(MEAL_LOC, "w") as f:
f.write(MEALS)
if __name__ == "__main__":
main()
|
import factory
from users.models import User
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = User
|
# coding=utf-8
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField
from wtforms.validators import Required, Length
from flask_pagedown.fields import PageDownField
class NameForm(FlaskForm):
name = StringField(u'你的名字?', validators=[Required()])
submit = SubmitField(u'提交')
class EditProfileForm(FlaskForm):
username = StringField(u'昵称', validators=[Length(0, 64)])
location = StringField(u'地点', validators=[Length(0, 64)])
about_me = PageDownField(u'个人简介')
submit = SubmitField(u'提交')
class PostForm(FlaskForm):
title = StringField(u'标题', validators=[Required(message=u"请输入标题"), Length(0, 64)])
text = TextAreaField(u'正文', validators=[Required(message=u"请输入文章内容")])
submit = SubmitField(u'提交')
class CommentForm(FlaskForm):
body = StringField('', validators=[Required()])
submit = SubmitField(u'提交')
|
from django.contrib import admin
from .models import Article, Video
class ArticleAdmin(admin.ModelAdmin):
fieldsets = [
('Critical Information', {'fields': ['title', 'publisher', 'time']}),
('Others', {'fields': ['description', 'part_1', 'part_2', 'part_3']}),
]
list_display = ('title', 'publisher', 'time')
search_fields = ['title']
list_filter = ['time', 'publisher']
class VideoAdmin(admin.ModelAdmin):
fieldsets = [
('Critical Information', {'fields': ['title', 'publisher', 'url', 'time']}),
('Others', {'fields': ['description']}),
]
list_display = ('title', 'publisher', 'time')
search_fields = ['title']
list_filter = ['time', 'publisher']
admin.site.register(Article, ArticleAdmin)
admin.site.register(Video, VideoAdmin)
|
print("Day 1 - Python Print Function")
print("The function is declared like this:")
print("print('what to print')")
# can use \n between what we want to print to put it on different lines too; see below
# print("Day 1 - Python Print Function\nThe Function is declared like this:\nprint('what to print;)")
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 16:25:56 2019
@author: Administrator
"""
for i in range(1,10):
s=" "
for j in range(1,10):
s+=str.format("{0:1}*{1:1}={2:<2} ",i,j,i*j)
print(s)
print('上三角输出')
a = 1
while a <=9: #外层循环控制行数
b = 1
while b <= a: #内层循环控制列数
print("%d*%d=%d\t" % (a, b, a*b), end="")
b = b + 1
print() # 换行
a = a + 1
print('下三角输出')
for i in range(1,10):
for k in range(1,i):
print(end=" ")
for j in range(i,10):
print("%d*%d=%2d" % (i,j,i*j),end=" ")
print("")
|
import xlrd
import xlwt
from collections import Counter
def filterBookListMaker(book,bookRule):
sheetrule = bookRule.sheet_by_name('字段对照表')
sheetRuleCol = sheetrule.col_values(0)
taDef = sheetrule.row_values(sheetRuleCol.index('天安'))#天安表
taofDef = sheetrule.row_values(sheetRuleCol.index('天安')+2)#天安表所属表
filterBookList={}
for index in range(len(taofDef)):
if taofDef[index] in book.sheet_names():
taSheetRow = book.sheet_by_name(taofDef[index]).row_values(0)
if taofDef[index] in filterBookList:
filterBookList[taofDef[index]].append(taSheetRow.index(taDef[index]))
else:
filterBookList[taofDef[index]] = [taSheetRow.index(taDef[index])]
return filterBookList
def sheetToList(sheet,sheetName, filterBookList):
#excel表转List
##所有sheet转list都在这里,利用rule中的对应关系,只保留rule中的字段,减少无用字段
myList = [[] for i in range(sheet.nrows)]
for i in range(sheet.nrows):
for j in range(sheet.ncols):
if j in filterBookList[sheetName] or sheet.cell_value(0,j) in ['险种代码','被保人编码','保单号码']:
#'险种代码','被保人编码'是多表合并的关键数据,保留
myList[i].append(sheet.cell_value(i,j))
return myList
def listMerge(origList,list):
# 简单合并sheet,不进行对比,默认对的上
myList = []
print(len(origList), len(list))
for i in range(len(origList)):
myList.append(origList[i]+list[i])
return myList
def lackListMerge(origList,lackList,arr):
# 缺省表合并sheet,不进行对比,缺省的后延
myList = []
print(len(origList), len(lackList))
for i in range(len(origList)):
if i==0:
# title直接合并
myList.append(origList[i]+lackList[i])
elif i>0 and arr[i] != None:
myList.append(origList[i]+lackList[arr[i]])
else:
# 缺省情况直接用空字符串补齐
myList.append(origList[i]+['' for index in range(len(lackList[0]))])
return myList
def resultsToWorksheet(savesheet,results):
#将数据写入excel中
for i in range(len(results)):
for j in range(len(results[0])):
try:
res = results[i][j]
except BaseException:
res = ''
savesheet.write(i,j,res)
workbook.save('天安导出数据.xlsx')
def mergeElementForList(origList,mergeWhich):
listArr = []
indexList = []#险种代码被保人编码
listX = origList[0].index(mergeWhich[0])
listY = origList[0].index(mergeWhich[1])
for item in origList:
listArr.append([item[listX]+item[listY]]+item)
indexList.append(item[listX]+item[listY])
return {'list':listArr,'indexList':indexList}
def beneficiaryPushInsured(diffDict):
#受益人表并入被保人
#受益人表
print(diffDict)
diffDictSheet = mergeElementForList(diffDict['受益人表']['sheetList'],['险种代码', '被保人编码'])
#被保人表
insuredSheet = mergeElementForList(diffDict['被保人表']['sheetList'],['险种代码', '被保人编码'])
isi = insuredSheet['list'][0].index('险种代码')
for index in range(len(insuredSheet['indexList'])):
if insuredSheet['indexList'][index] in diffDictSheet['indexList']:
#存在受益人
dsi = diffDictSheet['indexList'].index(insuredSheet['indexList'][index])
if dsi==0:
pushKey = '受益类型'
else:
pushKey = '指定'
insuredSheet['list'][index] += [pushKey] + diffDictSheet['list'][dsi]
else:
#不存在受益人
insuredSheet['list'][index] += ['法定'] + ['' for i in range(len(diffDictSheet['list'][0]))]
return insuredSheet['list']
def balaPushRisk(diffDict,beneficiaryAndInsured):
#被保人表(+受益人表)并入险种表,并入过程扩充险种表
#险种表的compareList并入后失效
baiList = beneficiaryAndInsured#被保人表(+受益人表)
baiIndexList = [item[beneficiaryAndInsured[0].index('险种代码')]+item[beneficiaryAndInsured[0].index('保单号码')] for item in beneficiaryAndInsured]
riskSheetList = diffDict['险种表']['sheetList']
riskSheetListForCheck = [item[riskSheetList[0].index('险种代码')]+item[riskSheetList[0].index('保单号码')] for item in riskSheetList]
rsiOri = riskSheetList[0].index('险种代码')
policyOri = riskSheetList[0].index('保单号码')
riskList = []
for index in range(len(riskSheetListForCheck)):
#险种表的险种代码字段,在被保人表(+受益人表)的出现次数
# 保单号与险种编码合并到一起校验两条数据是否需要合并
riskCode = riskSheetListForCheck[index] #险种表当前循环中的 险种代码+保单号码
repeatNum = dict( Counter(baiIndexList) )[riskCode]
startIndex = 0
if riskCode in baiIndexList:
#险种表的险种代码 在 被保人表中存在
if repeatNum == 1:
# riskCode相同且唯一,合并为一条数据
riskList.append(riskSheetList[index] + baiList[baiIndexList.index(riskCode)])
else:
#多条数据,根据被保人数据分成多条数据
while repeatNum>0:
thisIndex = baiIndexList.index(riskCode,startIndex)
if riskSheetListForCheck[index] == baiIndexList[thisIndex]:
# riskCode相同才可以合并为一条数据
riskList.append(riskSheetList[index] + baiList[thisIndex])
else:
print('118=====>',index,thisIndex,riskCode,startIndex,repeatNum,riskSheetList[index][riskSheetList[0].index('保单号码')], baiList[thisIndex][baiList[0].index('保单号码')],baiList[thisIndex])
pass
startIndex = thisIndex+1
repeatNum -= 1
else:
#暂时不存在这种情况
print('没有匹配到---balaPushRisk')
return riskList
def riskMaker(riskList,sheetOrigList):
#险种表,多险种处理成多条数据
arr = []
origPolicyList = [item[sheetOrigList[0].index('保单号码')] for item in sheetOrigList]
riskPolicyList = [item[riskList[0].index('保单号码')] for item in riskList]
riskCompareList = compareListMaker(arr,origPolicyList,riskPolicyList)
print(riskCompareList,len(origPolicyList),len(riskPolicyList))
sheetOrigListAddRisk = []
for index in range(len(riskCompareList)):
if isinstance(riskCompareList[index],list):
#多险种数据
for i in riskCompareList[index]:
sheetOrigListAddRisk.append(sheetOrigList[index]+riskList[i])
else:
#单险种
if riskCompareList[index]!=None:
sheetOrigListAddRisk.append(sheetOrigList[index]+riskList[riskCompareList[index]])
return sheetOrigListAddRisk
def compareListMaker(arr,origPolicyList,itemPolicyList):
##对比基础表的保单号码,生成每个表的index对照列表,提出独立函数
for policyValue in origPolicyList:
findStart = 0
flag = True
indexArr = []
while flag:
try:
#循环找每个能够匹配上的数据
index = itemPolicyList.index(policyValue,findStart)
indexArr.append(index)
findStart = index+1
except BaseException:
flag = False
if len(indexArr)==1:
arr.append(indexArr[0])
elif len(indexArr)>1:
arr.append(indexArr)
else:
#查找不到的数据
arr.append(None)
return arr
def isComplex(arr):
#判断list中是否包含list
for i in arr:
if isinstance(i, list):
#包含
return 1
#不包含
return 0
def duplicateRemoval(comList):
# 删除重复表头
dupDict = {key:value for key,value in dict(Counter(comList[0])).items()if value>1 }
arr = []
for i in dupDict:
count = dupDict[i]
indexLast = 0
while count>0:
currentIndex = comList[0].index(i,indexLast)
# 天安重复数据无需删除
if indexLast != 0 and i not in ['受益人姓名','受益人关系','受益人证件号码','受益比例','受益人顺序','邮编','投保人邮箱','投保人国籍','投保人联系电话']:
arr.append(currentIndex)
indexLast = currentIndex + 1
count -= 1
arr.sort()
arr.reverse()
#至此,arr包含了所有要删除的表头index,从大到小排列
for i in arr:
for j in range(len(comList)):
del comList[j][i]
return comList
def tableMakerMain(book, bookRule):
#数据源头 book
# book = xlrd.open_workbook('./副本保险公司提供数据格式-天安20201012.xlsx')
#数据规则 bookRule
# bookRule = xlrd.open_workbook('./fileRule20200925.xlsx')
filterBookList = filterBookListMaker(book,bookRule)
#保存用
workbook = xlwt.Workbook()
sheetOrigList = []
diffDict = {}
for sheetIndex in range(len(book.sheet_names())):
arr = []
sheetName = book.sheet_names()[sheetIndex]
if sheetName not in filterBookList:
continue
if sheetIndex==0:
sheetOrig = book.sheet_by_name(sheetName)#基础表
sheetOrigList = sheetToList(sheetOrig,sheetName,filterBookList)
continue
else:
sheetItem = book.sheet_by_name(sheetName)#变化表
sheetItemList = sheetToList(sheetItem,sheetName,filterBookList)
arr = compareListMaker(arr,sheetOrig.col_values(1),sheetItem.col_values(1))
whichFunc = isComplex(arr) # 1复杂
#受益人表必须是复杂表
if whichFunc == 0 and sheetName != '受益人表' and sheetName != '被保人表' and sheetName != '险种表':
# 简单表直接数据合并
# print(arr,len(arr),sheetName,'简单',len(sheetItemList))
if len(sheetOrigList) == len(sheetItemList):
# 基础表数据与简单表数据个数一样
sheetOrigList = listMerge(sheetOrigList, sheetItemList)
elif len(sheetOrigList) > len(sheetItemList):
# 基础表数据比简单表数据个数多
sheetOrigList = lackListMerge(sheetOrigList, sheetItemList, arr)
elif len(sheetOrigList) < len(sheetItemList):
print('此情况还未出现')
elif whichFunc==1 or sheetName == '受益人表' or sheetName == '被保人表' or sheetName == '险种表':
# 复杂表记录数据后,在之后流程中转门处理
print(arr,len(arr),sheetName,'复杂')
diffDict[sheetName]={
'compareList': arr,
'sheetList': sheetItemList.copy()
}
print(sheetName,whichFunc)
print(diffDict)
if len(diffDict)>0:
#复杂表有数据
#受益人表并入被保人
beneficiaryAndInsured = beneficiaryPushInsured(diffDict)
#被保人表(+受益人表)并入险种表,并入过程扩充险种表
riskList = balaPushRisk(diffDict,beneficiaryAndInsured)
#险种表并入基本表,多险种处理成多条数据
completeList = riskMaker(riskList,sheetOrigList)
completeList = duplicateRemoval(completeList)
# resultsToWorksheet(workbook.add_sheet('天安',cell_overwrite_ok=True),completeList)
else:
#复杂表无数据
completeList = sheetOrigList
resultDict = {'天安':completeList} #excel结果
return resultDict
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Interfaces to LUNA's PSRAM chips."""
import unittest
from amaranth import Signal, Module, Cat, Elaboratable, Record, ClockDomain, ClockSignal
from amaranth.hdl.rec import DIR_FANIN, DIR_FANOUT
from ..utils.io import delay
from ..test.utils import LunaGatewareTestCase, sync_test_case
class HyperBus(Record):
""" Record representing an HyperBus (DDR-ish connection for HyperRAM). """
def __init__(self):
super().__init__([
('clk', 1, DIR_FANOUT),
('dq',
('i', 8, DIR_FANIN),
('o', 8, DIR_FANOUT),
('e', 1, DIR_FANOUT),
),
('rwds',
('i', 1, DIR_FANIN),
('o', 1, DIR_FANOUT),
('e', 1, DIR_FANOUT),
),
('cs', 1, DIR_FANOUT),
('reset', 1, DIR_FANOUT)
])
class HyperRAMInterface(Elaboratable):
""" Gateware interface to HyperRAM series self-refreshing DRAM chips.
I/O port:
B: bus -- The primary physical connection to the DRAM chip.
I: reset -- An active-high signal used to provide a prolonged reset upon configuration.
I: address[32] -- The address to be targeted by the given operation.
I: register_space -- When set to 1, read and write requests target registers instead of normal RAM.
I: perform_write -- When set to 1, a transfer request is viewed as a write, rather than a read.
I: single_page -- If set, data accesses will wrap around to the start of the current page when done.
I: start_transfer -- Strobe that goes high for 1-8 cycles to request a read operation.
[This added duration allows other clock domains to easily perform requests.]
I: final_word -- Flag that indicates the current word is the last word of the transaction.
O: read_data[16] -- word that holds the 16 bits most recently read from the PSRAM
I: write_data[16] -- word that accepts the data to output during this transaction
O: idle -- High whenever the transmitter is idle (and thus we can start a new piece of data.)
O: new_data_ready -- Strobe that indicates when new data is ready for reading
"""
LOW_LATENCY_EDGES = 6
HIGH_LATENCY_EDGES = 14
def __init__(self, *, bus, in_skew=None, out_skew=None, clock_skew=None):
"""
Parmeters:
bus -- The RAM record that should be connected to this RAM chip.
data_skews -- If provided, adds an input delay to each line of the data input.
Can be provided as a single delay number, or an interable of eight
delays to separately delay each of the input lines.
"""
self.in_skew = in_skew
self.out_skew = out_skew
self.clock_skew = clock_skew
#
# I/O port.
#
self.bus = bus
self.reset = Signal()
# Control signals.
self.address = Signal(32)
self.register_space = Signal()
self.perform_write = Signal()
self.single_page = Signal()
self.start_transfer = Signal()
self.final_word = Signal()
# Status signals.
self.idle = Signal()
self.new_data_ready = Signal()
# Data signals.
self.read_data = Signal(16)
self.write_data = Signal(16)
def elaborate(self, platform):
m = Module()
#
# Delayed input and output.
#
if self.in_skew is not None:
data_in = delay(m, self.bus.dq.i, self.in_skew)
else:
data_in = self.bus.dq.i
data_oe = self.bus.dq.oe
if self.out_skew is not None:
data_out = Signal.like(self.bus.dq.o)
delay(m, data_out, self.out_skew, out=self.bus.dq.o)
else:
data_out = self.bus.dq.o
#
# Transaction clock generator.
#
advance_clock = Signal()
reset_clock = Signal()
if self.clock_skew is not None:
out_clock = Signal()
delay(m, out_clock, self.clock_skew, out=self.bus.clk)
else:
out_clock = self.bus.clk
with m.If(reset_clock):
m.d.sync += out_clock.eq(0)
with m.Elif(advance_clock):
m.d.sync += out_clock.eq(~out_clock)
#
# Latched control/addressing signals.
#
is_read = Signal()
is_register = Signal()
current_address = Signal(32)
is_multipage = Signal()
#
# FSM datapath signals.
#
# Tracks whether we need to add an extra latency period between our
# command and the data body.
extra_latency = Signal()
# Tracks how many cycles of latency we have remaining between a command
# and the relevant data stages.
latency_edges_remaining = Signal(range(0, self.HIGH_LATENCY_EDGES + 1))
# One cycle delayed version of RWDS.
# This is used to detect edges in RWDS during reads, which semantically mean
# we should accept new data.
last_rwds = Signal.like(self.bus.rwds.i)
m.d.sync += last_rwds.eq(self.bus.rwds.i)
# Create a sync-domain version of our 'new data ready' signal.
new_data_ready = self.new_data_ready
#
# Core operation FSM.
#
# Provide defaults for our control/status signals.
m.d.sync += [
advance_clock .eq(1),
reset_clock .eq(0),
new_data_ready .eq(0),
self.bus.cs .eq(1),
self.bus.rwds.oe .eq(0),
self.bus.dq.oe .eq(0),
]
with m.FSM() as fsm:
# IDLE state: waits for a transaction request
with m.State('IDLE'):
m.d.sync += reset_clock .eq(1)
m.d.comb += self.idle .eq(1)
# Once we have a transaction request, latch in our control
# signals, and assert our chip-select.
with m.If(self.start_transfer):
m.next = 'LATCH_RWDS'
m.d.sync += [
is_read .eq(~self.perform_write),
is_register .eq(self.register_space),
is_multipage .eq(~self.single_page),
current_address .eq(self.address),
]
with m.Else():
m.d.sync += self.bus.cs.eq(0)
# LATCH_RWDS -- latch in the value of the RWDS signal, which determines
# our read/write latency. Note that we advance the clock in this state,
# as our out-of-phase clock signal will output the relevant data before
# the next edge can occur.
with m.State("LATCH_RWDS"):
m.d.sync += extra_latency.eq(self.bus.rwds.i),
m.next="SHIFT_COMMAND0"
# Commands, in order of bytes sent:
# - WRBAAAAA
# W => selects read or write; 1 = read, 0 = write
# R => selects register or memory; 1 = register, 0 = memory
# B => selects burst behavior; 0 = wrapped, 1 = linear
# AAAAA => address bits [27:32]
#
# - AAAAAAAA => address bits [19:27]
# - AAAAAAAA => address bits [11:19]
# - AAAAAAAA => address bits [ 3:16]
# - 00000000 => [reserved]
# - 00000AAA => address bits [ 0: 3]
# SHIFT_COMMANDx -- shift each of our command bytes out
with m.State('SHIFT_COMMAND0'):
m.next = 'SHIFT_COMMAND1'
# Build our composite command byte.
command_byte = Cat(
current_address[27:32],
is_multipage,
is_register,
is_read
)
# Output our first byte of our command.
m.d.sync += [
data_out .eq(command_byte),
data_oe .eq(1)
]
# Note: it's felt that this is more readable with each of these
# states defined explicitly. If you strongly disagree, feel free
# to PR a for-loop, here.~
with m.State('SHIFT_COMMAND1'):
m.d.sync += [
data_out .eq(current_address[19:27]),
data_oe .eq(1)
]
m.next = 'SHIFT_COMMAND2'
with m.State('SHIFT_COMMAND2'):
m.d.sync += [
data_out .eq(current_address[11:19]),
data_oe .eq(1)
]
m.next = 'SHIFT_COMMAND3'
with m.State('SHIFT_COMMAND3'):
m.d.sync += [
data_out .eq(current_address[ 3:16]),
data_oe .eq(1)
]
m.next = 'SHIFT_COMMAND4'
with m.State('SHIFT_COMMAND4'):
m.d.sync += [
data_out .eq(0),
data_oe .eq(1)
]
m.next = 'SHIFT_COMMAND5'
with m.State('SHIFT_COMMAND5'):
m.d.sync += [
data_out .eq(current_address[0:3]),
data_oe .eq(1)
]
# If we have a register write, we don't need to handle
# any latency. Move directly to our SHIFT_DATA state.
with m.If(is_register & ~is_read):
m.next = 'WRITE_DATA_MSB'
# Otherwise, react with either a short period of latency
# or a longer one, depending on what the RAM requested via
# RWDS.
with m.Else():
m.next = "HANDLE_LATENCY"
with m.If(extra_latency):
m.d.sync += latency_edges_remaining.eq(self.HIGH_LATENCY_EDGES)
with m.Else():
m.d.sync += latency_edges_remaining.eq(self.LOW_LATENCY_EDGES)
# HANDLE_LATENCY -- applies clock edges until our latency period is over.
with m.State('HANDLE_LATENCY'):
m.d.sync += latency_edges_remaining.eq(latency_edges_remaining - 1)
with m.If(latency_edges_remaining == 0):
with m.If(is_read):
m.next = 'READ_DATA_MSB'
with m.Else():
m.next = 'WRITE_DATA_MSB'
# STREAM_DATA_MSB -- scans in or out the first byte of data
with m.State('READ_DATA_MSB'):
# If RWDS has changed, the host has just sent us new data.
with m.If(self.bus.rwds.i != last_rwds):
m.d.sync += self.read_data[8:16].eq(data_in)
m.next = 'READ_DATA_LSB'
# STREAM_DATA_LSB -- scans in or out the second byte of data
with m.State('READ_DATA_LSB'):
# If RWDS has changed, the host has just sent us new data.
# Sample it, and indicate that we now have a valid piece of new data.
with m.If(self.bus.rwds.i != last_rwds):
m.d.sync += [
self.read_data[0:8] .eq(data_in),
new_data_ready .eq(1)
]
# If our controller is done with the transcation, end it.
with m.If(self.final_word):
m.next = 'RECOVERY'
m.d.sync += advance_clock.eq(0)
with m.Else():
#m.next = 'READ_DATA_MSB'
m.next = 'RECOVERY'
# WRITE_DATA_MSB -- write the first of our two bytes of data to the to the PSRAM
with m.State("WRITE_DATA_MSB"):
m.d.sync += [
data_out .eq(self.write_data[8:16]),
data_oe .eq(1),
]
m.next = "WRITE_DATA_LSB"
# WRITE_DATA_LSB -- write the first of our two bytes of data to the to the PSRAM
with m.State("WRITE_DATA_LSB"):
m.d.sync += [
data_out .eq(self.write_data[0:8]),
data_oe .eq(1),
]
m.next = "WRITE_DATA_LSB"
# If we just finished a register write, we're done -- there's no need for recovery.
with m.If(is_register):
m.next = 'IDLE'
m.d.sync += advance_clock.eq(0)
with m.Elif(self.final_word):
m.next = 'RECOVERY'
m.d.sync += advance_clock.eq(0)
with m.Else():
#m.next = 'READ_DATA_MSB'
m.next = 'RECOVERY'
# RECOVERY state: wait for the required period of time before a new transaction
with m.State('RECOVERY'):
m.d.sync += [
self.bus.cs .eq(0),
advance_clock .eq(0)
]
# TODO: implement recovery
m.next = 'IDLE'
return m
class TestHyperRAMInterface(LunaGatewareTestCase):
def instantiate_dut(self):
# Create a record that recreates the layout of our RAM signals.
self.ram_signals = Record([
("clk", 1),
("clkN", 1),
("dq", [("i", 8), ("o", 8), ("oe", 1)]),
("rwds", [("i", 1), ("o", 1), ("oe", 1)]),
("cs", 1),
("reset", 1)
])
# Create our HyperRAM interface...
return HyperRAMInterface(bus=self.ram_signals)
def assert_clock_pulses(self, times=1):
""" Function that asserts we get a specified number of clock pulses. """
for _ in range(times):
yield
self.assertEqual((yield self.ram_signals.clk), 1)
yield
self.assertEqual((yield self.ram_signals.clk), 0)
@sync_test_case
def test_register_write(self):
# Before we transact, CS should be de-asserted, and RWDS and DQ should be undriven.
yield
self.assertEqual((yield self.ram_signals.cs), 0)
self.assertEqual((yield self.ram_signals.dq.oe), 0)
self.assertEqual((yield self.ram_signals.rwds.oe), 0)
yield from self.advance_cycles(10)
self.assertEqual((yield self.ram_signals.cs), 0)
# Request a register write to ID register 0.
yield self.dut.perform_write .eq(1)
yield self.dut.register_space .eq(1)
yield self.dut.address .eq(0x00BBCCDD)
yield self.dut.start_transfer .eq(1)
yield self.dut.final_word .eq(1)
yield self.dut.write_data .eq(0xBEEF)
# Simulate the RAM requesting a extended latency.
yield self.ram_signals.rwds.i .eq(1)
yield
# Ensure that upon requesting, CS goes high, and our clock starts low.
yield
self.assertEqual((yield self.ram_signals.cs), 1)
self.assertEqual((yield self.ram_signals.clk), 0)
# Drop our "start request" line somewhere during the transaction;
# so we don't immediately go into the next transfer.
yield self.dut.start_transfer.eq(0)
# We should then move to shifting out our first command word,
# which means we're driving DQ with the first word of our command.
yield
yield
self.assertEqual((yield self.ram_signals.cs), 1)
self.assertEqual((yield self.ram_signals.clk), 1)
self.assertEqual((yield self.ram_signals.dq.oe), 1)
self.assertEqual((yield self.ram_signals.dq.o), 0x60)
# Next, on the falling edge of our clock, the next byte should be presented.
yield
self.assertEqual((yield self.ram_signals.clk), 0)
self.assertEqual((yield self.ram_signals.dq.o), 0x17)
# This should continue until we've shifted out a full command.
yield
self.assertEqual((yield self.ram_signals.clk), 1)
self.assertEqual((yield self.ram_signals.dq.o), 0x79)
yield
self.assertEqual((yield self.ram_signals.clk), 0)
self.assertEqual((yield self.ram_signals.dq.o), 0x9B)
yield
self.assertEqual((yield self.ram_signals.clk), 1)
self.assertEqual((yield self.ram_signals.dq.o), 0x00)
yield
self.assertEqual((yield self.ram_signals.clk), 0)
self.assertEqual((yield self.ram_signals.dq.o), 0x05)
# Check that we've been driving our output this whole time,
# and haven't been driving RWDS.
self.assertEqual((yield self.ram_signals.dq.oe), 1)
self.assertEqual((yield self.ram_signals.rwds.oe), 0)
yield
# For a _register_ write, there shouldn't be latency period.
# This means we should continue driving DQ...
self.assertEqual((yield self.ram_signals.dq.oe), 1)
self.assertEqual((yield self.ram_signals.rwds.oe), 0)
self.assertEqual((yield self.ram_signals.clk), 1)
self.assertEqual((yield self.ram_signals.dq.o), 0xBE)
yield
self.assertEqual((yield self.ram_signals.clk), 0)
self.assertEqual((yield self.ram_signals.dq.o), 0xEF)
@sync_test_case
def test_register_read(self):
# Before we transact, CS should be de-asserted, and RWDS and DQ should be undriven.
yield
self.assertEqual((yield self.ram_signals.cs), 0)
self.assertEqual((yield self.ram_signals.dq.oe), 0)
self.assertEqual((yield self.ram_signals.rwds.oe), 0)
yield from self.advance_cycles(10)
self.assertEqual((yield self.ram_signals.cs), 0)
# Request a register read of ID register 0.
yield self.dut.perform_write .eq(0)
yield self.dut.register_space .eq(1)
yield self.dut.address .eq(0x00BBCCDD)
yield self.dut.start_transfer .eq(1)
yield self.dut.final_word .eq(1)
# Simulate the RAM requesting a extended latency.
yield self.ram_signals.rwds.i .eq(1)
yield
# Ensure that upon requesting, CS goes high, and our clock starts low.
yield
self.assertEqual((yield self.ram_signals.cs), 1)
self.assertEqual((yield self.ram_signals.clk), 0)
# Drop our "start request" line somewhere during the transaction;
# so we don't immediately go into the next transfer.
yield self.dut.start_transfer.eq(0)
# We should then move to shifting out our first command word,
# which means we're driving DQ with the first word of our command.
yield
yield
self.assertEqual((yield self.ram_signals.cs), 1)
self.assertEqual((yield self.ram_signals.clk), 1)
self.assertEqual((yield self.ram_signals.dq.oe), 1)
self.assertEqual((yield self.ram_signals.dq.o), 0xe0)
# Next, on the falling edge of our clock, the next byte should be presented.
yield
self.assertEqual((yield self.ram_signals.clk), 0)
self.assertEqual((yield self.ram_signals.dq.o), 0x17)
# This should continue until we've shifted out a full command.
yield
self.assertEqual((yield self.ram_signals.clk), 1)
self.assertEqual((yield self.ram_signals.dq.o), 0x79)
yield
self.assertEqual((yield self.ram_signals.clk), 0)
self.assertEqual((yield self.ram_signals.dq.o), 0x9B)
yield
self.assertEqual((yield self.ram_signals.clk), 1)
self.assertEqual((yield self.ram_signals.dq.o), 0x00)
yield
self.assertEqual((yield self.ram_signals.clk), 0)
self.assertEqual((yield self.ram_signals.dq.o), 0x05)
# Check that we've been driving our output this whole time,
# and haven't been driving RWDS.
self.assertEqual((yield self.ram_signals.dq.oe), 1)
self.assertEqual((yield self.ram_signals.rwds.oe), 0)
# Once we finish scanning out the word, we should stop driving
# the data lines, and should finish two latency periods before
# sending any more data.
yield
self.assertEqual((yield self.ram_signals.dq.oe), 0)
self.assertEqual((yield self.ram_signals.rwds.oe), 0)
self.assertEqual((yield self.ram_signals.clk), 1)
# By this point, the RAM will drive RWDS low.
yield self.ram_signals.rwds.i.eq(0)
# Ensure the clock still ticking...
yield
self.assertEqual((yield self.ram_signals.clk), 0)
# ... and remains so for the remainder of the latency period.
yield from self.assert_clock_pulses(6)
# Now, shift in a pair of data words.
yield self.ram_signals.dq.i.eq(0xCA)
yield self.ram_signals.rwds.i.eq(1)
yield
yield self.ram_signals.dq.i.eq(0xFE)
yield self.ram_signals.rwds.i.eq(0)
yield
yield
# Once this finished, we should have a result on our data out.
self.assertEqual((yield self.dut.read_data), 0xCAFE)
self.assertEqual((yield self.dut.new_data_ready), 1)
yield
self.assertEqual((yield self.ram_signals.cs), 0)
self.assertEqual((yield self.ram_signals.dq.oe), 0)
self.assertEqual((yield self.ram_signals.rwds.oe), 0)
# Ensure that our clock drops back to '0' during idle cycles.
yield from self.advance_cycles(2)
self.assertEqual((yield self.ram_signals.clk), 0)
# TODO: test recovery time
if __name__ == "__main__":
unittest.main()
|
import os, sys, requests, tweepy
from bs4 import BeautifulSoup
from datetime import datetime
import logging
logger = logging.getLogger()
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
frmt = logging.Formatter('%(asctime)s - %(name)s:%(levelname)s - %(message)s')
ch.setFormatter(frmt)
logger.addHandler(ch)
logger.setLevel(logging.INFO)
def get_forecast(city_url="https://www.yr.no/place/United_Kingdom/Scotland/Edinburgh/forecast.xml"):
"""Gets forecast data dict for city_url (from https://www.yr.no/.*/forecast.xml)"""
logger.info("Getting yr.no data")
r = requests.get(city_url)
r.raise_for_status()
soup = BeautifulSoup(r.text, "lxml")
forecast = soup.findAll('time')
data = {}
for i in range(4): ## Change depending on time of pull (range(1,5) for 6AM today till 6AM tomorrow?)
x = forecast[i]
date_from = datetime.strptime(x['from'], "%Y-%m-%dT%H:%M:%S")
date_to = datetime.strptime(x['to'], "%Y-%m-%dT%H:%M:%S")
date = "{}".format(date_from.strftime("%H:%M"))
data[date] = {
'symbol': x.find('symbol')['name'],
'precip': x.find('precipitation')['value'],
'temp': x.find('temperature')['value'],
'wind': x.find('windspeed')['name'],
'pressure': x.find('pressure')['value']
}
logger.info("Got data")
return data
def get_tweet(forecast):
logger.info("Formatting tweet")
results = []
for date in forecast:
tweet = ""
data = forecast[date]
tweet += "{} > {}°C, {}".format(date, data['temp'], data['symbol'])
if float(data['precip']) > 0: tweet += " ({}mm)".format(data['precip'])
extra_info = ", {} hPa".format(data['pressure'])
if len(tweet) + len(extra_info) < 70:
tweet += extra_info
extra_info = ", {}".format(data['wind'])
if len(tweet) + len(extra_info) < 70:
tweet += extra_info
results.append(tweet)
return "\n".join(results)
def tweet_weather():
try:
tweet = get_tweet(get_forecast())
logger.info("Connecting to twitter api")
auth = tweepy.OAuthHandler(os.environ['API_KEY'], os.environ['API_SECRET'])
auth.set_access_token(os.environ['TOKEN'], os.environ['TOKEN_SECRET'])
api = tweepy.API(auth)
logger.info("Posting tweet")
api.update_status(tweet)
logger.info("Tweet successfully posted")
except Exception as e:
logger.exception(e)
if __name__ == '__main__':
tweet_weather()
|
#!/usr/bin/env python3
#
# Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# Simple tool for verifying that sources from the standalone embedder do not
# directly include sources from the VM or vice versa.
import os
import re
import sys
INCLUDE_DIRECTIVE_RE = re.compile(r'^#include "(.*)"')
PLATFORM_LAYER_RE = re.compile(r'^runtime/platform/')
VM_LAYER_RE = re.compile(r'^runtime/(vm|lib)/')
BIN_LAYER_RE = re.compile(r'^runtime/bin/')
# Tests that don't match the simple case of *_test.cc.
EXTRA_TEST_FILES = [
'runtime/bin/run_vm_tests.cc',
'runtime/bin/ffi_unit_test/run_ffi_unit_tests.cc',
'runtime/vm/libfuzzer/dart_libfuzzer.cc'
]
def CheckFile(sdk_root, path):
includes = set()
with open(os.path.join(sdk_root, path), encoding='utf-8') as file:
for line in file:
m = INCLUDE_DIRECTIVE_RE.match(line)
if m is not None:
header = os.path.join('runtime', m.group(1))
if os.path.isfile(os.path.join(sdk_root, header)):
includes.add(header)
errors = []
for include in includes:
if PLATFORM_LAYER_RE.match(path):
if VM_LAYER_RE.match(include):
errors.append(
'LAYERING ERROR: %s must not include %s' % (path, include))
elif BIN_LAYER_RE.match(include):
errors.append(
'LAYERING ERROR: %s must not include %s' % (path, include))
elif VM_LAYER_RE.match(path):
if BIN_LAYER_RE.match(include):
errors.append(
'LAYERING ERROR: %s must not include %s' % (path, include))
elif BIN_LAYER_RE.match(path):
if VM_LAYER_RE.match(include):
errors.append(
'LAYERING ERROR: %s must not include %s' % (path, include))
return errors
def CheckDir(sdk_root, dir):
errors = []
for file in os.listdir(dir):
path = os.path.join(dir, file)
if os.path.isdir(path):
errors += CheckDir(sdk_root, path)
elif path.endswith('test.cc') or path in EXTRA_TEST_FILES:
None # Tests may violate layering.
elif path.endswith('.cc') or path.endswith('.h'):
errors += CheckFile(sdk_root, os.path.relpath(path, sdk_root))
return errors
def DoCheck(sdk_root):
return CheckDir(sdk_root, 'runtime')
if __name__ == '__main__':
errors = DoCheck('.')
print('\n'.join(errors))
if errors:
sys.exit(-1)
|
"""controller_ver3 controller."""
# You may need to import some classes of the controller module. Ex:
# from controller import Robot, Motor, DistanceSensor
from controller import Robot
from controller import InertialUnit
from controller import Gyro
from controller import Keyboard
from controller import Motor
import math
import time
#ROS#import rospy
import os
#ROS#from geometry_msgs.msg import Vector3
#ROS#from std_msgs.msg import Int16
#ROS#from std_msgs.msg import Float32
#ROS#from std_msgs.msg import String
#ROS#from sensor_msgs.msg import Image
# create the Robot instance.
robot = Robot()
pitch_disturbance = 0
roll_disturbance = 0
yaw_disturbance = 0
def crying_orangutan(data):
# pitch - data.data is either 1,0,-1
global pitch_disturbance
pitch_disturbance = 0.349066*data.data
pass
def crying_polar_bears(data):
global roll_disturbance
roll_disturbance = 0.349066*data.data
pass
def crying_animals_in_general(data):
global yaw_disturbance
yaw_disturbance = 5*data.data
pass
def crying_mother_earth_in_general(data):
global altitude
global target_altitude
if data.data==1:
target_altitude = altitude+0.1
elif data.data==-1:
target_altitude = altitude+0.1
pass
# get the time step of the current world.
timeStep = int(robot.getBasicTimeStep())
print("Time Step is: "+str(timeStep))
# Get and enable devices.
#ROS#rospy.init_node('python_submarine_controller', anonymous=True) # node is called 'python_webots_controller'
#ROS#rospy.loginfo("Loading Webots Controller")
#ROS#pub = rospy.Publisher('imu_values_topic', Vector3, queue_size=10)
#ROS#depth_pub = rospy.Publisher('depth_topic', Float32, queue_size=10)
#ROS#log_pub = rospy.Publisher('python_submarine_logger', String, queue_size=10)
#ROS#camera_pub = rospy.Publisher('python_submarine_camera_images', Image, queue_size=10)
#ROS#rearcamera_pub = rospy.Publisher('python_submarine_rear_camera_images', Image, queue_size=10)
#ROS#bleh_pub = rospy.Publisher("python_submarine_heading_speed",Float32,queue_size=10)
#ROS#speed_pub = rospy.Publisher('python_submarine_speeds', Vector3, queue_size=10)
#ROS#rospy.Subscriber("pitch_control_input", Int16, crying_orangutan)
#ROS#rospy.Subscriber("roll_control_input", Int16, crying_polar_bears)
#ROS#rospy.Subscriber("heading_control_input", Int16, crying_animals_in_general)
#ROS#rospy.Subscriber("altitude_control_input", Int16, crying_mother_earth_in_general)
IMUsensor = robot.getInertialUnit('inertial unit') # front central proximity sensor
IMUsensor.enable(timeStep)
GPSsensor = robot.getGPS('gps')
GPSsensor.enable(timeStep)
GYROsensor = robot.getGyro("gyro")
GYROsensor.enable(timeStep)
KeyB = robot.getKeyboard()
KeyB.enable(timeStep)
front_left_motor = robot.getMotor("front left thruster")
front_right_motor = robot.getMotor("front right thruster")
rear_left_motor = robot.getMotor("rear left thruster")
rear_right_motor = robot.getMotor("rear right thruster")
front_left_motor.setPosition(float('inf'))
front_right_motor.setPosition(float('inf'))
rear_left_motor.setPosition(float('inf'))
rear_right_motor.setPosition(float('inf'))
front_left_motor.setVelocity(0.0)
front_right_motor.setVelocity(0.0)
rear_left_motor.setVelocity(0.0)
rear_right_motor.setVelocity(0.0)
camera = robot.getCamera("camera")
camera.enable(timeStep)
rearcamera = robot.getCamera("rearcamera")
rearcamera.enable(timeStep)
FL_wheel = robot.getMotor("front left wheel")
FR_wheel = robot.getMotor("front right wheel")
RL_wheel = robot.getMotor("rear left wheel")
RR_wheel = robot.getMotor("rear right wheel")
FL_wheel.setPosition(float('inf'))
FR_wheel.setPosition(float('inf'))
RL_wheel.setPosition(float('inf'))
RR_wheel.setPosition(float('inf'))
FL_wheel.setVelocity(0.0)
FR_wheel.setVelocity(0.0)
RL_wheel.setVelocity(0.0)
RR_wheel.setVelocity(0.0)
#fly_wheel = robot.getMotor("flywheel")
#fly_wheel.setPosition(float('inf'))
#fly_wheel.setVelocity(0.0)
k_roll_p = 600 # P constant of the roll PID.
k_pitch_p = 600 # P constant of the pitch PID.
k_yaw_p = 500
k_roll_d = 800
k_pitch_d = 800
k_yaw_d = 300
# You should insert a getDevice-like function in order to get the
# instance of a device of the robot. Something like:
# motor = robot.getMotor('motorname')
# ds = robot.getDistanceSensor('dsname')
# ds.enable(timestep)
target_altitude = 9.0
k_vertical_thrust = 398.3476#187.28#592.2569#398.3476#327.3495#128.1189 # with this thrust, the drone lifts.
#k_vertical_offset = 0.1 # Vertical offset where the robot actually targets to stabilize itself.
k_vertical_p = 300 # P constant of the vertical PID.
k_vertical_d = 1000
def CLAMP(value, low, high):
if value < low:
return low
elif value > high:
return high
return value
robot.step(timeStep)
xpos, altitude , zpos = GPSsensor.getValues()
xpos_old=xpos
altitude_old=altitude
zpos_old=zpos
drone_mode = True
atache_mode = False
car_mode = False
altitude_bool=False
angle_dist = 0.785398
angle_lock = -2.0944
logger=False
#ROS#depth_msg = Float32()
pi = math.pi
# Main loop:
# - perform simulation steps until Webots is stopping the controller
while robot.step(timeStep) != -1:
# Read the sensors:
roll, pitch, heading = IMUsensor.getRollPitchYaw()
xpos, altitude , zpos = GPSsensor.getValues()
roll_vel, bleh, pitch_vel =GYROsensor.getValues()
#print(str(roll_vel)+"\t"+str(pitch_vel))
littleTimeStep = timeStep/1000.0
xSpeed=(xpos-xpos_old)/littleTimeStep
ySpeed=(altitude-altitude_old)/littleTimeStep
zSpeed=(zpos-zpos_old)/littleTimeStep
#print(str(xSpeed)+"\t"+str(ySpeed)+"\t"+str(zSpeed))
xpos_old=xpos
altitude_old=altitude
zpos_old=zpos
# val = ds.getValue()
fl_wheel=0
fr_wheel=0
rl_wheel=0
rr_wheel=0
## Now we send some things to ros BELOW
#ROS# camera_image_msg = Image()
#ROS# camera_image_msg.width = 320
#ROS# camera_image_msg.height = 240
#ROS# camera_image_msg.encoding = "bgra8"
#ROS# camera_image_msg.is_bigendian = 1
#ROS# camera_image_msg.step = 1280
#ROS# camera_image_msg.data = camera.getImage()
#ROS# camera_pub.publish(camera_image_msg)
## Now we send some things to ros BELOW
#ROS# rearcamera_image_msg = Image()
#ROS# rearcamera_image_msg.width = 320
#ROS# rearcamera_image_msg.height = 240
#ROS# rearcamera_image_msg.encoding = "bgra8"
#ROS# rearcamera_image_msg.is_bigendian = 1
#ROS# rearcamera_image_msg.step = 1280
#ROS# rearcamera_image_msg.data = rearcamera.getImage()
#rearcamera_image_msg.data = flat_list
#ROS# rearcamera_pub.publish(rearcamera_image_msg)
#ROS# depth_msg.data = altitude
#ROS# depth_pub.publish(depth_msg)
radcoeff = 180.0/pi
# Process sensor data here.
#rospy.loginfo("Sending Simulated IMU Data. Roll: "+str(round(roll*radcoeff))+" Pitch: "+str(round(pitch*radcoeff))+" Heading: "+str(round(heading*radcoeff)))
#ROS# pub.publish(Vector3(roll*radcoeff*-1,pitch*radcoeff*-1,heading*radcoeff*-1))
#ROS# speed_pub.publish(Vector3(math.cos(heading)*xSpeed*-1+math.sin(heading)*zSpeed*-1,ySpeed,math.sin(heading)*xSpeed+math.cos(heading)*zSpeed))
#ROS# log_pub.publish(str(round(roll*radcoeff))+","+str(round(pitch*radcoeff))+","+str(round(heading*radcoeff))+","+str(altitude)+","+str(roll_vel)+","+str(bleh)+","+str(pitch_vel)+","+str(xSpeed)+","+str(ySpeed)+","+str(zSpeed))
#ROS# bleh_pub.publish(bleh)
drive = 0
side = 0
car_trun=0
yaw_stop=1
key=KeyB.getKey()
while (key>0):
if (key==ord('W')):
drive = 1
if (key==ord('A')):
side = 1
if (key==ord('S')):
drive = -1
if (key==ord('D')):
side = -1
if (key==ord('Q')):
yaw_disturbance = heading+0.174533
car_trun=1
if (key==ord('E')):
yaw_disturbance = heading-0.174533
car_trun=-1
if (key==ord('Z')):
target_altitude = altitude+0.8
altitude_bool=True
if (key==ord('X')):
target_altitude = altitude-0.8
altitude_bool=True
if (key==ord('1')):
drone_mode = True
atache_mode = False
car_mode = False
print("Drone Mode")
if (key==ord('2')):
drone_mode = False
atache_mode = True
car_mode = False
print("atache Mode")
if (key==ord('3')):
drone_mode = False
atache_mode = False
car_mode = True
print("Car Mode")
if (key==ord('4')):
drone_mode = False
atache_mode = False
car_mode = False
if (key==ord('N')):
f = open("test2.txt", "w")
f.write("xpos,ypos,zpos\n")
logger=True
if (key==ord('M')):
f.close()
logger=False
key=KeyB.getKey()
# Process sensor data here.
if logger==True:
f.write(str(xpos)+","+str(altitude)+","+str(zpos)+"\n")
if (drone_mode==True):
#print(str(roll)+"\t"+str(pitch)+"\t"+str(heading))
#print(str(roll)+"\t"+str(roll_vel))
#if abs(roll_vel_old-roll_vel)>2:
# k_roll_d=0
#else:
# k_roll_d=0#10.0
roll_input = k_roll_p * (angle_dist*side-roll) - k_roll_d*roll_vel
pitch_input = (k_pitch_p *(angle_dist*drive-pitch) - k_pitch_d*pitch_vel)
if abs(roll_input)>20 or abs(pitch_input)>20:
yaw_stop=0
if (yaw_disturbance>(math.pi)):
yaw_disturbance=yaw_disturbance-2*math.pi
elif (yaw_disturbance<-(math.pi)):
yaw_disturbance=yaw_disturbance+2*math.pi
yaw_error=yaw_disturbance-heading
if (yaw_error>(math.pi)):
yaw_error=yaw_error-2*math.pi
elif (yaw_error<(-math.pi)):
yaw_error=yaw_error+2*math.pi
yaw_input = yaw_stop*(k_yaw_p*yaw_error- k_yaw_d*bleh);
#print("pitch_input: "+str(pitch_input)+"\t velocity: "+str(pitch_vel))
#print(str(yaw_disturbance)+" \t"+str(heading)+" \t"+str(yaw_error))
vertical_input = k_vertical_p *CLAMP(target_altitude - altitude, -2.0, 2.0)-k_vertical_d*ySpeed;
if roll>math.pi/2 or roll<-math.pi/2:
vertical_input=-vertical_input
if altitude_bool==True:
target_altitude=altitude
altitude_bool=False
#print(str(vertical_input)+"\t"+str(target_altitude - altitude))
#vertical_input = 0# k_vertical_p * pow(clamped_difference_altitude, 3.0);
#0.2635 #0.266 #0.2635 #0.266 #roll distance
#0.3582 #0.3582 #0.3346 #0.3346 #pitch distance
#print(str(vertical_input)+"\t"+str(roll_input)+"\t"+str(pitch_input))
front_left_motor_input =k_vertical_thrust + vertical_input + roll_input + (0.321/0.246)*pitch_input - yaw_input
front_right_motor_input=k_vertical_thrust + vertical_input - roll_input + (0.321/0.246)*pitch_input + yaw_input
rear_left_motor_input =k_vertical_thrust + vertical_input + roll_input - (0.321/0.246)*pitch_input + yaw_input
rear_right_motor_input =k_vertical_thrust + vertical_input - roll_input - (0.321/0.246)*pitch_input - yaw_input
elif atache_mode==True:
if not(abs(drive) or abs(side)):
vertical_input = k_vertical_p *CLAMP(target_altitude - altitude, -2.0, 2.0)-k_vertical_d*ySpeed;
if roll>math.pi/2 or roll<-math.pi/2:
vertical_input=-vertical_input
if altitude_bool==True:
target_altitude=altitude
altitude_bool=False
lock_on=1
else:
vertical_input=0
lock_on=-2
roll_input = k_roll_p * (angle_lock*side-roll) - k_roll_d*roll_vel
pitch_input = (k_pitch_p *(-1.48353*drive-pitch) - k_pitch_d*pitch_vel)
#print("pitch_input: "+str(pitch_input)+"\t velocity: "+str(pitch_vel))
#print(str(yaw_disturbance)+" \t"+str(heading)+" \t"+str(yaw_error))
front_left_motor_input =lock_on*k_vertical_thrust + vertical_input + roll_input + (0.321/0.246)*pitch_input
front_right_motor_input=lock_on*k_vertical_thrust + vertical_input - roll_input + (0.321/0.246)*pitch_input
rear_left_motor_input =lock_on*k_vertical_thrust + vertical_input + roll_input - (0.321/0.246)*pitch_input
rear_right_motor_input =lock_on*k_vertical_thrust + vertical_input - roll_input - (0.321/0.246)*pitch_input
#print(str(k_vertical_thrust)+" \t"+str(roll_input))
elif car_mode==True:
front_left_motor_input =-2*k_vertical_thrust-car_trun*400
front_right_motor_input=-2*k_vertical_thrust+car_trun*400
rear_left_motor_input =-2*k_vertical_thrust+car_trun*400
rear_right_motor_input =-2*k_vertical_thrust-car_trun*400
fl_wheel=4*drive+4*side
fr_wheel=4*drive-4*side
rl_wheel=4*drive+4*side
rr_wheel=4*drive-4*side
#print(str(k_vertical_thrust)+" \t"+str(roll_input))
else :
front_left_motor_input =2*k_vertical_thrust
front_right_motor_input=2*k_vertical_thrust
rear_left_motor_input =2*k_vertical_thrust
rear_right_motor_input =2*k_vertical_thrust
clampval = 10000
#print(str(front_left_motor_input)+"\t"+str(front_right_motor_input)+"\t"+str(rear_left_motor_input)+"\t"+str(rear_right_motor_input))
if front_left_motor_input>1000 or front_right_motor_input>1000 or rear_left_motor_input>1000 or rear_right_motor_input>1000:
print("motor input maxed: "+str(int(time.time())))
front_left_motor.setVelocity(CLAMP(front_left_motor_input,-clampval,clampval))#positive is up #0.44467908653
front_right_motor.setVelocity(CLAMP(-front_right_motor_input,-clampval,clampval))#negative is up #0.44616503673
rear_left_motor.setVelocity(CLAMP(-rear_left_motor_input,-clampval,clampval))#negative is up #0.42589835641
rear_right_motor.setVelocity(CLAMP(rear_right_motor_input,-clampval,clampval))#positive is up #0.42744959936
#fly_wheel.setVelocity(spin_boy)
FL_wheel.setVelocity(fl_wheel)
FR_wheel.setVelocity(fr_wheel)
RL_wheel.setVelocity(rl_wheel)
RR_wheel.setVelocity(rr_wheel)
# Enter here functions to send actuator commands, like:
# motor.setPosition(10.0)
pass
# Enter here exit cleanup code.
|
#
# @lc app=leetcode.cn id=1122 lang=python3
#
# [1122] 数组的相对排序
#
# @lc code=start
class Solution:
def relativeSortArray(self, arr1: List[int], arr2: List[int]) -> List[int]:
rank = {x:i for i,x in enumerate(arr2)}
def CMP(x):
return rank[x] if x in arr2 else x+1000
arr1.sort(key = CMP)
return arr1
# @lc code=end
|
#coding=utf-8
from numpy import *
def loadTrainData(trainFile):
fp = open(trainFile)
lines = fp.readlines()
docLabel, docVec = [], []
for tmp in lines:
line = tmp.strip().split(' ')
docLabel.append(int(line[0]))
docVec.append([int(i) for i in line[1:]])
return docLabel, docVec #both are int type
def loadTestData(testFile):
fp = open(testFile)
lines = fp.readlines()
testLabel, testVec = [], []
for tmp in lines:
line = tmp.strip().split(' ')
testLabel.append(int(line[0]))
testVec.append([int(i) for i in line[1:]])
return testLabel, testVec #both are int type
def naiveBayes(trainFile, k): #The k should be identical with feature numbers in every class
docLabel, docVec = loadTrainData(trainFile)
docArr = array(docVec)
countNj = ones((10))+1 #one dimension array
length = len(docLabel)
countVecIJ = ones((10, k*10))
for j in range(length):
countNj[docLabel[j]] += 1 #count the docs of different classes
countVecIJ[docLabel[j], :] += docArr[j,:] #每行的feature向量加到计数向量部分
pNJ = log(countNj / length) #return possibility of every class
pVecIJ = ones(shape(countVecIJ))
pNonVecIJ = ones(shape(countVecIJ)) #(10,300)
for i in range(10):
pVecIJ[i,:] = log(countVecIJ[i, :] / countNj[i]) #get conditional possibility
pNonVecIJ[i,:] = log(1.0 - (countVecIJ[i, :] / countNj[i]))
return pNJ, pVecIJ, pNonVecIJ
def test(pNJ, pVecIJ, pNonVecIJ, testFile, k): #The k should be identical with feature numbers in every class
testLabel, testVec = loadTestData(testFile)
num, right = len(testLabel), 0
testLabelArr, testArr = array(testLabel), array(testVec)
for i in range(num):
docArr = (testArr[i,:]*pVecIJ)+(1-testArr[i,:])*pNonVecIJ #docArr is two dimension
sumJ = array(zeros(10))
for j in range(10):
sumJ[j] = sum(docArr[j,:]) + pNJ[j] #wrong!!!here,notice countVECIJ
ans = sumJ.argsort()[-1]
if ans == testLabelArr[i]:
right += 1
print 'accuracy=', float(right) / num
if __name__ == '__main__':
ks = [30,40,50,60,80,100]
for kNum in ks:
print kNum, 'Features start'
pNJ, pVecIJ, pNonVecIJ = naiveBayes('train_'+str(kNum), k=kNum) # call the train function
test(pNJ, pVecIJ, pNonVecIJ, 'test_'+str(kNum), k=kNum)
|
from django.urls import path
from alimentos.views import food, create_food, delete_food, update_food
urlpatterns = [
path('food/', food, name='food'),
path('create_food/', create_food, name='create_food'),
path('update_food/<int:id>', update_food, name="update_food"),
path('delete_food/<int:id>', delete_food, name="delete_food"),
]
|
#!/usr/bin/python
from Emakefun_MotorHAT import Emakefun_MotorHAT, Emakefun_Servo
import time
mh = Emakefun_MotorHAT(addr=0x60)
myServo = mh.getServo(1)
speed = 9
while (True):
myServo.writeServoWithSpeed(0, speed)
time.sleep(1)
myServo.writeServoWithSpeed(90, speed)
time.sleep(1)
myServo.writeServoWithSpeed(180, speed)
time.sleep(1)
# for i in range (0, 181, 10):
# myServo.writeServo(i, 9)
# time.sleep(0.02)
# time.sleep(1)
# for i in range (180, -1, -10):
# myServo.writeServo(i, 9)
# time.sleep(0.02)
# time.sleep(1)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('basketball', '0019_auto_20150818_1944'),
]
operations = [
migrations.AlterField(
model_name='game',
name='team1',
field=models.ManyToManyField(to='basketball.Player', related_name='team1_set', default=[5]),
),
migrations.AlterField(
model_name='game',
name='team2',
field=models.ManyToManyField(to='basketball.Player', related_name='team2_set', default=[6]),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Filename: __init__.py
# @Date: 2020/3/6
# @Author: Mark Wang
# @Email: wangyouan@gamil.com
from .path_info import PathInfo
class Constants(PathInfo):
GVKEY = 'gvkey'
COUNTRY = 'country'
COUNTRY_ISO3 = 'country_iso3'
COUNTRY_ISO3N = 'country_iso3n'
YEAR = 'year'
MONTH = 'month'
CIK = 'cik'
CURRENCY = 'ioscur'
SIC = 'sic_code'
FISCAL_YEAR = 'fyear'
FORMAL = 'formal'
REAL = 'real'
|
from typing import List
from fastapi import APIRouter
from starlette.responses import JSONResponse
from app.response.tags import TagsAllRespose
from app.api.operation.tags import create_tag, get_tags
from app.schemas.tags import Tag, TagCreate
router = APIRouter()
@router.post("/add_tag/", response_model=Tag, tags=['tags'])
async def create_tags(tag: TagCreate):
return await create_tag(tag.name)
@router.get("/hot", response_model=TagsAllRespose, tags=['tags'])
async def all_tags():
tags = await get_tags()
data = [{"id": i.id, "tagName": i.name,"avatar":i.avatar, "createTime": i.created_at.strftime("%Y-%m-%d %H:%M:%S")} for i in tags]
return {"data": data}
|
import time
import Board
print('''
**********************************************************
********功能:幻尔科技树莓派扩展板,串口舵机控制例程*******
**********************************************************
----------------------------------------------------------
Official website:http://www.lobot-robot.com/pc/index/index
Online mall:https://lobot-zone.taobao.com/
----------------------------------------------------------
以下指令均需在LX终端使用,LX终端可通过ctrl+alt+t打开,或点
击上栏的黑色LX终端图标。
----------------------------------------------------------
Usage:
sudo python3 BusServoMove.py
----------------------------------------------------------
Version: --V1.0 2020/08/12
----------------------------------------------------------
Tips:
* 按下Ctrl+C可关闭此次程序运行,若失败请多次尝试!
----------------------------------------------------------
''')
while True:
# 参数:参数1:舵机id; 参数2:位置; 参数3:运行时间
Board.setBusServoPulse(2, 500, 500) # 2号舵机转到500位置,用时500ms
time.sleep(0.5) # 延时时间和运行时间相同
Board.setBusServoPulse(2, 200, 500) #舵机的转动范围0-240度,对应的脉宽为0-1000,即参数2的范围为0-1000
time.sleep(0.5)
Board.setBusServoPulse(2, 500, 200)
time.sleep(0.2)
Board.setBusServoPulse(2, 200, 200)
time.sleep(0.2)
Board.setBusServoPulse(2, 500, 500)
Board.setBusServoPulse(3, 300, 500)
time.sleep(0.5)
Board.setBusServoPulse(2, 200, 500)
Board.setBusServoPulse(3, 500, 500)
time.sleep(0.5)
|
#!/usr/bin/python
'''
Cue_Control_Middleware.py
Written by Andy Carluccio
University of Virginia
This file is designed to run on a properly configured YUN Linux Environment
Extensive documentation is available at:
Good things to know about communication with the Arduino environment:
1. The first int sent is always the number of ints to follow (accomplished within the sendCommand method)
2. Using sendCommand(-1) alerts for load mode enter
3. Using sendCommand(-2) alerts for load mode exit
4. Using sendCommand(-3) alerts for a manual control packet to follow
5. Using sendCommand(-4) alerts for an incomming cue
6. Using sendCommand(-5) is a short-hand soft stop trigger
'''
#import statements for required libraries and bridge
#setup------------------------
import sys
import requests
import math
sys.path.insert(0, '/usr/lib/python2.7/bridge')
from time import sleep
from bridgeclient import BridgeClient as bridgeclient
value = bridgeclient()
#global varaibles and
#flags-------------------------------------------------------
oldTime = 0
controlCode = 0
loadFlag = False
cueFlag = False
manualFlag = False
runningCue = False
#URLs of server web
#pages---------------------------------------------------------
mainServerPage = "http://192.168.1.2:5000/api/home"
manualControlPage = "http://192.168.1.2:5000/api/xsend"
responsePage = "http://192.168.1.2:5000/api/encoder"
cuePage = "http://192.168.1.2:5000/api/cue"
#Stage Cue Data
#Structure---------------------------------------------------------
#A stage vector is a pair of distance and rotation values with acceleration,
#deceleration, and a maximum speed
#There is a way to create the vector using ordered pairs and acceleration,
#deceleration, and a maximum speed
class stageVector(object):
#Constructor
def __init__(self, dist, theta, accel, maxSpeed, deccel):
self.distance = dist
self.angle = theta
self.accel = accel
self.deccel = deccel
self.maxSpeed = maxSpeed
#Makeshift alternate constructor
def makeFromPoints(self, x1, y1, x2, y2, acc, decc, vel):
self.distance = math.sqrt((abs(x2 - x1) ** 2) + abs(y2 - y1) ** 2)
self.angle = (180 / math.pi) * (math.atan(abs(x2 - x1) / self.distance))
self.accel = acc
self.deccel = dec
self.maxSpeed = vel
#Getters
def get_distance(self):
return self.distance
def get_angle(self):
return self.angle
def get_maxSpeed(self):
return self.maxSpeed
def get_accel(self):
return self.accel
def get_deccel(self):
return self.deccel
#A Stage Cue is a list of stage vectors
class stageCue(object):
#Constructor
def __init__(self, id):
self.vectorList = []
self.cueID = id
#push a vector to the list
def appendVector(self, stageVec):
self.vectorList.append(stageVec)
#returns a list of the vectors
def getVectorsInCue(self):
return self.vectorList
def popVector(self):
return self.vectorList.pop()
def popThisVector(self, loc):
return self.vectorList.pop(loc)
def appendVectorList(self, newVecList):
for sv in newVecList:
self.vectorList.appendVector(sv)
def addVectorAtLocation(self, loc, vec):
self.vectorList.add(loc,vec)
def getID(self):
return self.cueID
def setID(self, newID):
self.cueID = newID
#A stage cue list is, well, a list of stage cues!
class stageCueList(object):
#Constructor
def __init__(self, id):
self.cueList = []
self.listID = id
def appendCue(self, stageQ):
self.cueList.append(stageQ)
def getCuesInList(self):
return self.cueList
def popCue(self):
return self.cueList.pop()
def popThisCue(self, loc):
return self.cueList.pop(loc)
def appendCueList(self, newCueList):
for q in newCueList:
self.cueList.appendCue(q)
def addCueAtLocation(self, loc, q):
self.cueList.add(loc,q)
def getCueAtLocation(self, loc):
allCues = self.cueList
return allCues[loc]
def getID(self):
return self.listID
def setID(self, newID):
self.listID = newID
#provided a file name, this will create a csv style save file for the cue
#list
def saveToFile(self, fileName):
file = open(fileName,"w")
file.write(self.listID + "," + '\n')
for c in self.getCuesInList():
file.write(c.getID() + ",")
for v in c.getVectorsInCue():
file.write(str(v.get_distance()) + "," + str(v.get_angle()) + "," + str(v.get_accel()) + "," + str(v.get_maxSpeed()) + "," + str(v.get_deccel()))
file.write('\n')
#function for sending encoder data to the
#server------------------------------------------
def sendEncoderVals():
encoderValues = {"encoder1data": leftEncoderRunningSum, "encoder2data": rightEncoderRunningSum}
#should change the encoderReadings to the numbers
r = requests.post(responsePage, data = encoderValues)
#function for sending other data to the
#server-------------------------------------------
def sendEncoderVals(str):
encoderValues = {"message": str}
#should change the encoderReadings to the numbers
r = requests.post(responsePage, data = encoderValues)
#function for sending data to YUN Arduino
#Processor--------------------------------------
def sendCommand(cmd):
chars = []
length = 0
#offset values to positive range
for instr in cmd:
chars.append(int(instr) + 127)
length+=1
#the console is what the Ardunio Processor reads from, must flush each time
#the first message must always be the number of messages to follow
print(length)
sys.stdout.flush()
loc = 0
while(loc < length):
print(chars[loc])
sys.stdout.flush()
loc+=1
#function for pulling cues down from the web page and converting them into cues
#in local memory--------------------
def loadCues():
#Notify YUN Arduino Processor that new cues are being loaded (sets LED and
#causes a stop)
notifyLoadCommand = [-1]
sendCommand(notifyLoadCommand)
#This is where the code for loading cues into the data structure will go
cuesString = requests.get(cuePage)
#Tell the server we have completed the load
msg = {"message": str}
#should change the encoderReadings to the numbers
r = requests.post(responsePage, data = msg)
#Notify the YUN Arduino Processor that the new cues are loaded and it is
#safe to continue (sets LED, no drive)
notifyLoadCompleteCommand = [-2]
sendCommand(notifyLoadCompleteCommand)
def startCue(number):
#Update global variable
runningCue = True
currentCueNum = number
notifyIncommingCue = [-4]
sendCommand(notifyIncommingCue)
#calculate the cue's effective values
cueToRun = cueList.getCueAtLocation(number)
#TODO: DO SOME FANCY MATH TO MAKE A CONTROL ARRAY
fancyArray = []
#we also need to figure out what it means to go backwards a cue, or to skip
#a cue, or to do anything nonlinear!
cueControlArray = fancyArray
#set current and target encoder values
sendCommand(cueControlArray)
'''
How to run a cue:
set the flag
warn the arduino
calculate the cue's effective values and update global control array
calculate the target values for the encoders to report
send the command for the first time
check in the main loop for the flag
if the flag is set,
check the encoders current values against the targets
update the global encoder values
send the encoder values to the web
if we have not reached the destination yet
send the stored message of the current cue again
otherwise
turn off the running a cue flag
tell website we completed
tell the arduino to stop
continue to check the server's status throughout for a network stop command
'''
#Get updated encoder data from the Arduino's latest bridge report
def updateEncoderValues():
#TODO: Implement the receiving of the encoder data over Bridge
updateLeft = 0
updateRight = 0
#TODO: what happens when a wheel spins backwards?
leftEncoderRunningSum = int(updateLeft - leftEncoderRunningSum) + int(leftEncoderRunningSum)
rightEncoderRunningSum = int(updateRight - rightEncoderRunningSum) + int(rightEncoderRunningSum)
def handleRunningCue():
updateEncoderValues()
if(abs(leftEncoderRunningSum - leftEncoderTarget) > encoderErrorMargin and abs(rightEncoderRunningSum - rightEncoderTarget) > encoderErrorMargin):
sendCommand(cueControlArray)
else:
#TODO: notify server
runningCue = False
stop = [-5]
sendCommand(stop)
#MAIN CONTROL ALGORITHM----------------------------------------------------
lastCompletedCueNum = 0
currentCueNum = 0
cueControlArray = []
cueList = stageCueList("Current YUN-Loaded Cue List")
leftEncoderTarget = 0
rightEncoderTarget = 0
leftEncoderRunningSum = 0
rightEncoderRunningSum = 0
encoderErrorMargin = 1
#Runs forever (in theory!)
while(True):
#PHASE 0: Check if a cue is in progress:
if(runningCue):
handleRunningCue()
mcp = requests.get(mainControlPage)
codeLine = mcp.text.split('\n')
controlData = controlData[1:-1]
controlCode = controlData.split(',')
if(controlCode[3] == 1):
stop = [-5]
sendCommand(stop)
runningCue = False
else:
#PHASE 1: Scrape the Main Control Page
mcp = requests.get(mainServerPage)
codeLine = mcp.text.split('\n')
mainLine = codeLine[0]
trimmed = mainLine[1:-1]
controlData = trimmed.split(',')
#PHASE 2: Determine what control mode we are in and act accordingly
controlCode = controlData[0]
loadFlag = controlData[2]
if (controlCode == 1):
cueFlag = 10
elif(controlCode == 2):
manualFlag = True
loadFlag = 1
if(loadFlag == 1):
print("Loading...")
loadCues()
elif(cueFlag):
if(codeLine[1] != lastCompletedCueNum):
startCue(int(codeLine[1]))
#Switch to manual drive mode
elif(manualFlag):
#notify the arduino that we are entering manual control
sendCommand[-3]
#scrape the manual control webpage
resp = requests.get(manualControlPage)
#create an array of the movement values
codeLines = resp.text.split('\n')
importantLine = codeLines[0]
web_line = importantLine[1:-1]
listVals = web_line.split(',')
#if the data has not gone stale
if(int(listVals[8]) - int(oldTime) < 30):
driveCommand = [listVals[0],listVals[1], listVals[2], listVals[3], listVals[4], listVals[5], listVals[6], listVals[7]]
sendCommand(driveCommand)
oldTime = listVals[8]
#add just enough delay to keep the processors in sync
sleep(0.1)
print(controlCode)
print(cueFlag)
print(manualFlag)
#A message we should never see
print("We're in the endgame now...\n")
|
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import RobustScaler
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Generalizar -> Scaling(data_train,data_test,columns,metodo):
def Scaling_StandardScaler(data_train, data_test, columns):
SC = StandardScaler()
scaling_data = SC.fit_transform(data_train)
scaling_data_train = pd.DataFrame(scaling_data, columns=columns)
scaling_data_test = SC.transform(data_test)
return scaling_data_train, scaling_data_test
def Scaling_MinMaxScaler(data_train, data_test, columns):
MMS = MinMaxScaler()
scaling_data = MMS.fit_transform(data_train)
scaling_data_train = pd.DataFrame(scaling_data, columns=columns)
scaling_data_test = MMS.transform(data_test)
return scaling_data_train, scaling_data_test
def Scaling_RobustScaler(data_train, data_test, columns):
Roboust = RobustScaler()
scaling_data = Roboust.fit_transform(data_train)
scaling_data_train = pd.DataFrame(scaling_data, columns=columns)
scaling_data_test = Roboust.transform(data_test)
return scaling_data_train, scaling_data_test
def ScalingComparationScaling(data, scaling_data):
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(6, 5))
columns = list(data)
ax1.set_title('Before Scaling')
ax2.set_title('After Scaling')
for name in columns:
sns.kdeplot(data[name], ax=ax1)
sns.kdeplot(scaling_data[name], ax=ax2)
plt.show()
pass
|
# GUESS GAME
def guess_game(s_no):
# The secret code will be stored here
secret_name = s_no
# The answer of the user will be stored here temporarily
guess = ""
# The number of times the user has guessed from beginning
guess_counts = 0
# The number of times the user can guess
guess_limit = 3
# If the user has
out_of_guess = False
while guess != secret_name and not(out_of_guess):
if guess_counts < guess_limit:
guess = input('What is chocolate made of? : ')
guess_counts += 1
else:
out_of_guess = True
if out_of_guess:
print('You are out of guesses & YOU LOSE !')
else:
print('You won the GAME !')
# print(guess_game('coco'))
# Unlimited guesses
def guess_game_1(secret_no):
# The secret code will be stored here
secret_name = secret_no
user_guess = ""
# Condition
while user_guess != secret_name:
user_guess = input('What is chocolate made of? : ')
else:
print('You won the Game !!')
print(guess_game_1('coco'))
|
"""This script is used to generate a BPSK or QPSK OFDM signal to transmit using USRPs.
Usage:
- Generate a BSPK signal:
python3 generate_ofdm.py
- Generate a QPAK signal:
python3 generate_ofdm.py --use_qpsk=True
"""
from __future__ import print_function, division
from utils import ofdm_util as ofdm
from utils import nonflat_channel_timing_error
import matplotlib.pyplot as plt
import numpy as np
import argparse
FLAGS = None
parser = argparse.ArgumentParser()
parser.add_argument('--use_qpsk', type=bool, default=False,
help='Specify that the generated data is QPSK instead of BPSK.')
FLAGS, unparsed = parser.parse_known_args()
# Create the long training sequence (LTS). This is a random sequence of 64
# complex time domain samples.
seed_real = 5
seed_imag = 3
lts = ofdm.create_long_training_sequence(ofdm.NUM_SAMPLES_PER_PACKET, seed_real, seed_imag)
# Create the channel estimation sequence.
seed = 11
num_known_packets = 10
known_signal_freq_tx = ofdm.create_signal_freq_domain(
ofdm.NUM_SAMPLES_PER_PACKET,
num_known_packets,
seed,
pilot=True,
qpsk=FLAGS.use_qpsk)
known_signal_time_tx = ofdm.create_signal_time_domain(
ofdm.NUM_SAMPLES_PER_PACKET,
ofdm.NUM_SAMPLES_CYCLIC_PREFIX,
known_signal_freq_tx)
# Create the data to transmit.
seed = 10
data_freq_tx = ofdm.create_signal_freq_domain(ofdm.NUM_SAMPLES_PER_PACKET,
ofdm.NUM_PACKETS, seed, qpsk=FLAGS.use_qpsk)
data_time_tx = ofdm.create_signal_time_domain(ofdm.NUM_SAMPLES_PER_PACKET,
ofdm.NUM_SAMPLES_CYCLIC_PREFIX, data_freq_tx)
rms = np.sqrt(np.mean(np.square(np.abs(data_time_tx))))
# Zero padding
zero_pad = np.zeros(5000)
# Concatenate the LTS, channel estimation signal, and the data together and transmit.
signal_time_tx = np.concatenate((zero_pad, lts * rms, known_signal_time_tx, data_time_tx))
# Normalize to +/- 0.5.
signal_time_tx = 0.5 * signal_time_tx / np.max(np.abs(signal_time_tx))
# Interleave real and imaginary samples to transmit with USRP.
tmp = np.zeros(2 * signal_time_tx.shape[-1], dtype=np.float32)
tmp[::2] = signal_time_tx.real
tmp[1::2] = signal_time_tx.imag
plt.plot(tmp)
plt.show()
# Save to a binary file.
tmp.tofile('tx_data.dat')
# Save the comonents of the transmitted signal for analysis and correction at the receiver.
dest = 'tx_arrays.npz'
np.savez(dest, lts=lts, header_time=known_signal_time_tx,
data_time=data_time_tx, header_freq=known_signal_freq_tx,
data_freq=data_freq_tx)
|
import tools
import Chrome_driver
from time import sleep
import zipfile
import os
import shutil
from shutil import copyfile
import Changer_windows_info as changer
import db
from os.path import join, getsize
import luminati
def get_updateinfo():
print('======get_updateinfo')
sql_content = "select * from update_config;"
account = db.get_account(1)
# print(account)
conn,cursor = db.login_sql(account)
# for sql_content in sql_contents:
# print('\n\n\n')
# print(sql_content)
res = cursor.execute(sql_content)
desc = cursor.description # 获取字段的描述,默认获取数据库字段名称,重新定义时通过AS关键重新命名即可
update_config = [dict(zip([col[0] for col in desc], row)) for row in cursor.fetchall()] # 列表表达式把数据组装起来
# print(update_config[0])
db.login_out_sql(conn,cursor)
return update_config[0]
def get_code():
for i in range(10):
try:
update_config = get_updateinfo()
break
except:
pass
# chrome_driver = Chrome_driver.get_chrome()
chrome_driver = Chrome_driver.get_chrome(headless=1)
# print(update_config)
url_back = update_config['url_back']
url_code = update_config['url_code']
for j in range(10):
try:
chrome_driver.get(url_back)
chrome_driver.find_element_by_xpath(update_config['xpath_username']).send_keys('18122710376')
chrome_driver.find_element_by_xpath(update_config['xpath_pwd']).send_keys('3r!i1@,JZQ27!')
sleep(2)
chrome_driver.find_element_by_name(update_config['xpath_checkbox']).click()
# chrome_driver.find_element_by_xpath(update_config['xpath_checkbox']).click()
sleep(1)
print('file 1.0')
chrome_driver.find_element_by_tag_name(update_config['xpath_button']).click()
sleep(1)
except Exception as e:
print(str(e))
continue
chrome_driver.refresh()
Chrome_driver.clean_download()
chrome_driver.get(url_code)
sleep(10)
for i in range(100):
flag = 0
modules = Chrome_driver.download_status()
names = update_config['zipname'].split(',')
names = [name+'.zip' for name in names]
module_name = ''
for module in modules:
if module in names:
module_name = module
print('Find zip src')
sleep(3)
flag = 1
chrome_driver.close()
chrome_driver.quit()
flag_zip = test_zip(module)
if flag_zip == 0:
return -1
delete_folder()
unfold_zip(module_name)
break
else:
sleep(2)
if flag == 1:
break
if flag == 1:
break
# sleep(1)
if flag != 1:
chrome_driver.close()
chrome_driver.quit()
return flag
def file_copy(flag):
flag_zip = test_zip(module)
if flag_zip == 0:
return -1
delete_folder()
unfold_zip(module_name)
return 1
def test_zip(module):
path_download = Chrome_driver.get_dir()
print(path_download)
# module = 'emu_multi-src-master.zip'
zipfile_name = os.path.join(path_download,module)
size = getsize(zipfile_name)/1024/1024
print('The zipfile size:',size,'M')
flag = 0
if size>25:
print('size ok')
zFile = zipfile.ZipFile(zipfile_name, "r")
#ZipFile.namelist(): 获取ZIP文档内所有文件的名称列表
for fileM in zFile.namelist():
try:
zFile.extract(fileM, path_download)
flag = 1
except:
flag = 0
zFile.close()
print('zipfile open ok,not bad zip......................')
else:
print('size not ok, bad zip')
return flag
def unfold_zip(module):
path_download = Chrome_driver.get_dir()
# module = 'emu_multi-src-master.zip'
files_unzip = os.listdir(path_download)
for file in files_unzip:
if 'src' in file and '.zip' not in file:
file_unzip = file
print('folder name:',file_unzip)
path_folder = os.path.join(path_download,file_unzip)
modules = os.listdir(path_folder)
folder_ = [file for file in modules if '.' not in file]
path_folder_file = [os.path.join(path_folder,file) for file in modules]
duplicated_file = os.listdir(os.getcwd())
# [shutil.copyfile(file,os.getcwd()) for file in path_folder_file if '.' in file]
path_cur = os.getcwd()
for file in path_folder_file:
if '.' in file:
try:
print(file)
dirname,filename = os.path.split(file)
shutil.copyfile(file,os.path.join(path_cur,filename))
except Exception as e:
print('copyfile wrong:.........',file)
print(str(e))
folders_path = [file for file in path_folder_file if '.' not in file]
for file_folder in folders_path:
dirname,filename = os.path.split(file_folder)
new_folder = os.path.join(os.getcwd(),filename)
try:
shutil.copytree(file_folder,new_folder)
except Exception as e:
print(str(e))
print('copyfolder wrong:.........',file_folder)
def change_version():
file = r'ini\\VERSION.ini'
num_db = db.get_current_version()
num_db = str.join('.',num_db)
with open(file,'w') as f:
f.write(num_db)
# def copy_zip():
# path_download = Chrome_driver.get_dir()
# path_zip = os.path.join(path_download,'emu_multi-src-master.zip')
# copyfile(path_download,)
def move_all():
path_download = Chrome_driver.get_dir()
path_folder = os.path.join(path_download,'src-master')
modules = os.listdir(path_folder)
print(modules)
files = [file for file in modules if '.' in file]
folders = [file for file in modules if '.' not in file]
print(files)
print(folders)
# def move_folder(source_path,target_path):
# # delete_folder()
# if not os.path.exists(target_path):
# os.makedirs(target_path)
# if os.path.exists(source_path):
# # root 所指的是当前正在遍历的这个文件夹的本身的地址
# # dirs 是一个 list,内容是该文件夹中所有的目录的名字(不包括子目录)
# # files 同样是 list, 内容是该文件夹中所有的文件(不包括子目录)
# for root, dirs, files in os.walk(source_path):
# for file in files:
# src_file = os.path.join(root, file)
# shutil.copy(src_file, target_path)
# print(src_file)
# pass
def delete_folder():
print('Star delete folder............')
path_src = os.getcwd()
modules_path = os.listdir(path_src)
modules_file = [os.path.join(path_src,file) for file in modules_path if '.' in file]
print(modules_path)
modules_folder = [os.path.join(path_src,file) for file in modules_path if '.' not in file]
[os.remove(file) for file in modules_file if '.' in file and 'Auto_update' not in file and '.git' not in file and 'chromedriver' not in file]
print(modules_folder)
for folder in modules_folder:
file_folder = os.listdir(folder)
# file_folder_path = [os.path.join(folder,file) for file in file_folder if 'driver' not in file_folder]
file_folder_path = [os.path.join(folder,file) for file in file_folder ]
# [os.remove(file) for file in file_folder_path if 'chromedriver' not in file]
[os.remove(file) for file in file_folder_path]
# [os.rmdir(folder) for folder in modules_folder if 'driver' not in folder]
[os.rmdir(folder) for folder in modules_folder]
def clean_ports():
account = db.get_account()
plan_id = account['plan_id']
plans = db.read_plans(plan_id)
print('read plan finished')
ports = [plan['port_lpm'] for plan in plans]
luminati.delete_port(ports)
def main():
tools.killpid()
flag = get_code()
if flag == 1:
print('Update success')
# flag_update = file_copy(flag)
else:
print('Update failed!!!!!!!!')
return
if flag == 1:
change_version()
clean_ports()
changer.Restart()
def test():
# source_path = os.getcwd()
# target_path = 'ini\\'
# move_folder(source_path,source_path)
delete_folder()
if __name__ == '__main__':
main()
|
from app.DAOs.MasterDAO import MasterDAO
from app.DAOs.AuditDAO import AuditDAO
from psycopg2 import sql, errors
class PhotoDAO(MasterDAO):
"""
All Methods in this DAO close connections upon proper completion.
Do not instantiate this class and assign it, as running a method
call will render it useless afterwards.
"""
def insertPhoto(self, photourl, uid, cursor):
"""
Attempt to insert a photo's url into the photos table; Does nothing if
the photourl is either None or and empty string. DOES NOT COMMIT CHANGES.
:param photourl: a non-empty string or None
:type photourl: str
:param cursor: createEvent method call connection cursor to database.
:type cursor: psycopg2 cursor object
:return Tuple: the photoID of the photo in the Photos table, as an SQL result
"""
if photourl is not None and photourl != "" and not photourl.isspace():
cursor = cursor
audit = AuditDAO()
tablename = "photos"
pkey = "photourl"
oldValue = audit.getTableValueByIntID(table=tablename, pkeyname=pkey, pkeyval=photourl, cursor=cursor)
query = sql.SQL("insert into {table1} "
"({insert_field})"
"values (%s) on conflict(photourl) "
"do update set photourl=%s"
"returning {pkey1}").format(
table1=sql.Identifier('photos'),
insert_field=sql.Identifier('photourl'),
pkey1=sql.Identifier('photoid'))
cursor.execute(query, (str(photourl), str(photourl)))
result = cursor.fetchone()
newValue = audit.getTableValueByIntID(table=tablename, pkeyname=pkey, pkeyval=photourl, cursor=cursor)
audit.insertAuditEntry(changedTable=tablename, changeType=audit.INSERTVALUE, oldValue=oldValue,
newValue=newValue, uid=uid, cursor=cursor)
else:
result = [None, None]
return result
|
from flask import Flask, request
from newspaper import Article
import tensorflow as tf
import numpy as np
from flask_cors import CORS
ml = 2500
model_path = 'model/V3/'
# load vocab
dict_vocab = []
try:
with open(model_path + 'vocab.txt', 'r') as inf:
for line in inf:
dict_vocab.append(eval(line))
vocab = dict_vocab[0]
print(len(vocab))
except Exception:
pass
# load model
f = open(model_path + "model.json", "r")
# print(f.read())
model = tf.keras.models.model_from_json(f.read())
# load weight
model.load_weights(model_path + "model.h5")
# preprocess
def getDomain(url):
p1 = url.find('//') + 2
p2 = url.find('/', p1)
return url[p1:p2]
app = Flask(__name__)
def encoder(data):
res = []
for i in data:
k = []
for j in i.split():
try:
k.append(vocab[j])
except Exception:
k.append(0);
res.append(k)
return res
def pre_progress(data, max_len):
# res = np.empty(223, dtype=list)
# j = 0
res = []
for i in data:
zeros = [0] * (max_len - len(i))
res.append(zeros + i)
# res[j] = zeros + i
# j += 1
return np.array(res)
# def RorF(domain, title, content, model):
# temp = domain + ' ' + title + ' ' + content
# target = pre_progress(encoder(temp), ml)
# res = model.predict(target)
# return res
def create_predict_data(ddata, tdata, cdata):
res = []
for i in range(len(ddata)):
temp = ddata[i] + ' ' + tdata[i] + ' ' + cdata[i]
res.append(temp)
# print(res)
return pre_progress(encoder(res), ml)
def RorF(domain, title, content, model):
if type(domain) == list:
target = create_predict_data(domain, title, content)
else:
temp = domain + ' ' + title + ' ' + content
target = pre_progress(encoder(temp), ml)
res = model.predict(target)
return res
@app.route('/cn')
def predict():
url_target = request.args.get('url')
article = Article(url_target, language='vi')
article.download()
article.parse()
# print(article.title)
domain = [getDomain(url_target)]
title = [article.title]
content = [article.text]
return str((RorF(domain, title, content, model))[0][0])
# return str(RorF(domain, title, content, model))
if __name__ == '__main__':
CORS(app)
app.run(debug=True, port=5000)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-11-20 22:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Candidato',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=40)),
('rg', models.CharField(max_length=9)),
('cpf', models.CharField(max_length=11)),
('idade', models.CharField(max_length=3)),
],
),
migrations.CreateModel(
name='Eleitor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=40)),
('rg', models.CharField(max_length=9)),
('cpf', models.CharField(max_length=11)),
('idade', models.CharField(max_length=3)),
],
),
migrations.CreateModel(
name='Vaga',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='Votacao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('consistente', models.BooleanField(default=False, verbose_name='Voto em branco')),
('dataHora', models.DateTimeField()),
('candidato', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='eleicao.Candidato')),
('eleitor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='eleicao.Eleitor')),
],
),
migrations.AddField(
model_name='candidato',
name='vaga',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='eleicao.Vaga'),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.