hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
โ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
โ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
โ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
โ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
โ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
โ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
โ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
โ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
โ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
808b83cd4a03ada23ba7b0d19c7fdff35b5b8ea3
| 978
|
py
|
Python
|
models_test.py
|
bmritz/spend-tracker
|
093ace1a3ec20b0d9e0d918ca9e074ecfd03734c
|
[
"Apache-2.0"
] | 1
|
2019-01-20T17:09:50.000Z
|
2019-01-20T17:09:50.000Z
|
models_test.py
|
bmritz/spend-tracker
|
093ace1a3ec20b0d9e0d918ca9e074ecfd03734c
|
[
"Apache-2.0"
] | null | null | null |
models_test.py
|
bmritz/spend-tracker
|
093ace1a3ec20b0d9e0d918ca9e074ecfd03734c
|
[
"Apache-2.0"
] | null | null | null |
"""Tests."""
from models import Message
def test_msg_to_sections():
msg_content = """---------- Forwarded message ---------
From: Personal Capital <support@personalcapital.com>
Date: Sat, Jan 12, 2019 at 9:28 AM
Subject: Your Personal Capital Daily Monitor Email
To: bmritz@indiana.edu <bmritz@indiana.edu>
*this
*Section 1*
content
Auth : Kevs Convenience Store
-$2.02
*Top Gainers*
*Top Losers*
No gainers to show
No losers to show
*Accounts That Need Your Attention*
1st Source Bank: Hsa - Individual - Ending in 8056
Discovery Benefits: Health Savings Account
"""
section_1 = 'content\nAuth : Kevs Convenience Store\n-$2.02'
msg = Message(content=msg_content)
sections = msg.sections()
assert all(x in sections.keys()
for x in ["Section 1", "Top Gainers", "Top Losers", "Accounts That Need Your Attention"])
assert sections['Section 1'] == section_1
| 21.733333
| 104
| 0.643149
|
c5cf133fb6229e37733476f874a7adca4849c3bd
| 4,845
|
py
|
Python
|
framework/communication/aiocoap/util/prettyprint.py
|
nidiascampos/smartgreen
|
d574d90918702ac3bd383ed77d673f871576c5b0
|
[
"Apache-2.0"
] | 1
|
2021-02-13T07:42:04.000Z
|
2021-02-13T07:42:04.000Z
|
framework/communication/aiocoap/util/prettyprint.py
|
nidiascampos/smartgreen
|
d574d90918702ac3bd383ed77d673f871576c5b0
|
[
"Apache-2.0"
] | null | null | null |
framework/communication/aiocoap/util/prettyprint.py
|
nidiascampos/smartgreen
|
d574d90918702ac3bd383ed77d673f871576c5b0
|
[
"Apache-2.0"
] | null | null | null |
# This file is part of the Python aiocoap library project.
#
# Copyright (c) 2012-2014 Maciej Wasilak <http://sixpinetrees.blogspot.com/>,
# 2013-2014 Christian Amsรผss <c.amsuess@energyharvesting.at>
#
# aiocoap is free software, this file is published under the MIT license as
# described in the accompanying LICENSE file.
"""A pretty-printer for known mime types"""
import json
import pprint
import re
import cbor
import pygments
import pygments.formatters
import pygments.lexers
from aiocoap.numbers import media_types
from communication.aiocoap.util import _register
from communication.aiocoap.util import linkformat
_register()
MEDIATYPE_HEXDUMP = 'text/vnd.aiocoap.hexdump'
def lexer_for_mime(mime):
"""A wrapper around pygments.lexers.get_lexer_for_mimetype that takes
subtypes into consideration and catches the custom hexdump mime type."""
if mime == MEDIATYPE_HEXDUMP:
return pygments.lexers.HexdumpLexer()
if mime == 'text/plain;charset=utf8':
# We have fall-throughs in place anwyay, no need to go through a no-op
# TextLexer
raise pygments.util.ClassNotFound
try:
return pygments.lexers.get_lexer_for_mimetype(mime)
except pygments.util.ClassNotFound:
mime = re.sub('^([^/]+)/.*\\+([^;]+)(;.*)?$',
lambda args: args[1] + '/' + args[2], mime)
return pygments.lexers.get_lexer_for_mimetype(mime)
def pretty_print(message):
"""Given a CoAP message, reshape its payload into something human-readable.
The return value is a triple (infos, mime, text) where text represents the
payload, mime is a type that could be used to syntax-highlight the text
(not necessarily related to the original mime type, eg. a report of some
binary data that's shaped like Markdown could use a markdown mime type),
and some line of infos that give additional data (like the reason for a hex
dump or the original mime type).
"""
infos = []
info = lambda m: infos.append(m)
cf = message.opt.content_format
mime_type = media_types.get(cf, "type %s" % cf)
mime_type, *parameters = mime_type.split(';')
type, _, subtype = mime_type.partition('/')
show_hex = None
if linkformat is not None and mime_type == 'application/link-format':
try:
parsed = linkformat.link_header.parse(message.payload.decode('utf8'))
except ValueError:
pass
else:
info("application/link-format content was re-formatted")
prettyprinted = ",\n".join(str(l) for l in parsed.links)
return (infos, 'application/link-format', prettyprinted)
elif subtype == 'cbor' or subtype.endswith('+cbor'):
try:
parsed = cbor.loads(message.payload)
except ValueError:
show_hex = "CBOR value is invalid"
else:
info("CBOR message shown in naรฏve Python decoding")
# Formatting it via Python b/c that's reliably available (as
# opposed to JSON which might not round-trip well). The repr for
# tags might still not be parsable, but I think chances of good
# highlighting are best this way
formatted = pprint.pformat(parsed)
return (infos, 'text/x-python3', formatted)
elif subtype == 'json' or subtype.endswith('+json'):
try:
parsed = json.loads(message.payload.decode('utf8'))
except ValueError:
pass
else:
info("JSON re-formated and indented")
formatted = json.dumps(parsed, indent=4)
return (infos, 'application/json', formatted)
# That's about the formats we do for now.
if show_hex is None:
try:
text = message.payload.decode('utf8')
except UnicodeDecodeError:
show_hex = "Message can not be parsed as UTF-8"
else:
return (infos, 'text/plain;charset=utf8', text)
info("Showing hex dump of %s payload%s" % (
mime_type if cf is not None else "untyped",
": " + show_hex if show_hex is not None else ""))
data = message.payload
# Not the most efficient hex dumper, but we won't stream video over
# this anyway
formatted = []
offset = 0
while data:
line, data = data[:16], data[16:]
formatted.append("%08x " % offset + \
" ".join("%02x" % line[i] if i < len(line) else " " for i in range(8)) + " " + \
" ".join("%02x" % line[i] if i < len(line) else " " for i in range(8, 16)) + " |" + \
"".join(chr(x) if 32 <= x <= 127 else '.' for x in line) + \
"|\n")
offset += len(line)
if offset % 16 != 0:
formatted.append("%08x\n" % offset)
return (infos, MEDIATYPE_HEXDUMP, "".join(formatted))
| 36.704545
| 103
| 0.62807
|
a28f9571e669dd85d7d84017dcdb5dfd0a03f0af
| 129
|
py
|
Python
|
oktacli/exceptions.py
|
bousquf/okta-cli
|
8073ee171bd0ae690f087fa8f3260cbd24cefbda
|
[
"MIT"
] | 28
|
2019-02-10T00:10:36.000Z
|
2022-03-02T14:33:36.000Z
|
oktacli/exceptions.py
|
bousquf/okta-cli
|
8073ee171bd0ae690f087fa8f3260cbd24cefbda
|
[
"MIT"
] | 9
|
2020-03-27T03:39:08.000Z
|
2021-12-03T21:09:57.000Z
|
oktacli/exceptions.py
|
bousquf/okta-cli
|
8073ee171bd0ae690f087fa8f3260cbd24cefbda
|
[
"MIT"
] | 11
|
2019-04-30T06:26:41.000Z
|
2022-02-06T03:41:31.000Z
|
# OktaException is defined in okta.py :)
class CLIException(Exception):
pass
class ExitException(CLIException):
pass
| 12.9
| 40
| 0.728682
|
015a22c4577c9e770eee669f9a35b646e30e1397
| 7,274
|
py
|
Python
|
benchmarks/test_utils.py
|
tbeatty/edgetpu
|
14237f65ba07b7b1d8287e9f60dd20c88562871a
|
[
"Apache-2.0"
] | 1
|
2020-02-05T15:12:53.000Z
|
2020-02-05T15:12:53.000Z
|
benchmarks/test_utils.py
|
tbeatty/edgetpu
|
14237f65ba07b7b1d8287e9f60dd20c88562871a
|
[
"Apache-2.0"
] | null | null | null |
benchmarks/test_utils.py
|
tbeatty/edgetpu
|
14237f65ba07b7b1d8287e9f60dd20c88562871a
|
[
"Apache-2.0"
] | 1
|
2020-01-08T05:55:58.000Z
|
2020-01-08T05:55:58.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utils for benchmark and manual tests."""
import argparse
import collections
import contextlib
import csv
import os
import platform
import random
import urllib.parse
import numpy as np
from PIL import Image
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--enable_assertion', dest='enable_assertion',
action='store_true', default=False)
return parser.parse_args()
def check_cpu_scaling_governor_status():
"""Checks whether CPU scaling enabled."""
with open('/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor') as f:
status = f.read()
if 'performance' != status.strip():
print('************************ WARNING *****************************')
print('CPU scaling is enabled! Please switch to \'performance\' mode ')
print('**************************************************************')
def machine_info():
"""Gets platform info to choose reference value."""
machine = platform.machine()
if machine == 'armv7l':
with open('/proc/device-tree/model') as model_file:
board_info = model_file.read()
if 'Raspberry Pi 3 Model B Rev' in board_info:
machine = 'rp3b'
elif 'Raspberry Pi 3 Model B Plus Rev' in board_info:
machine = 'rp3b+'
elif 'Raspberry Pi 4 Model B Rev 1.1' in board_info:
machine = 'rp4b'
else:
machine = 'unknown'
return machine
TEST_DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..', 'test_data')
REFERENCE_DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'reference')
BENCHMARK_RESULT_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'result')
def test_data_path(path, *paths):
"""Returns absolute path for a given test file."""
return os.path.abspath(os.path.join(TEST_DATA_DIR, path, *paths))
def reference_path(path, *paths):
"""Returns absolute path for a given benchmark reference file."""
return os.path.abspath(os.path.join(REFERENCE_DATA_DIR, path, *paths))
def benchmark_result_path(path, *paths):
"""Returns absolute path for a given benchmark result file."""
return os.path.abspath(os.path.join(BENCHMARK_RESULT_DIR, path, *paths))
@contextlib.contextmanager
def test_image(path, *paths):
"""Returns opened test image."""
with open(test_data_path(path, *paths), 'rb') as f:
with Image.open(f) as image:
yield image
def generate_random_input(seed, n):
"""Generates a list with n uint8 numbers."""
random.seed(a=seed)
return [random.randint(0, 255) for _ in range(n)]
def prepare_classification_data_set(filename):
"""Prepares classification data set.
Args:
filename: name of the csv file. It contains filenames of images and the
categories they belonged.
Returns:
Dict with format {category_name : list of filenames}
"""
ret = collections.defaultdict(list)
with open(filename, mode='r') as csv_file:
for row in csv.DictReader(csv_file):
if not row['URL']:
continue
url = urllib.parse.urlparse(row['URL'])
filename = os.path.basename(url.path)
ret[row['Category']].append(filename)
return ret
def prepare_images(image_list, directory, shape):
"""Reads images and converts them to numpy array with specified shape.
Args:
image_list: a list of strings storing file names.
directory: string, path of directory storing input images.
shape: a 2-D tuple represents the shape of required input tensor.
Returns:
A list of numpy.array.
"""
ret = []
for filename in image_list:
file_path = os.path.join(directory, filename)
if not os.path.isfile(file_path):
continue
with Image.open(file_path) as img:
img = img.resize(shape, Image.NEAREST)
flat_img = np.asarray(img).flatten()
if flat_img.shape[0] == shape[0] * shape[1] * 3:
ret.append(flat_img)
return np.array(ret)
def read_reference(file_name):
"""Reads reference from csv file.
Args:
file_name: string, name of the reference file.
Returns:
model_list: list of string.
reference: { environment : reference_time}, environment is a string tuple
while reference_time is a float number.
"""
model_list = set()
reference = {}
with open(reference_path(file_name), newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
# Drop first line(column names).
next(reader)
for row in reader:
reference[tuple(row[:-1])] = float(row[-1])
model_list.add(row[0])
return sorted(model_list), reference
def check_result(reference, result_list, enable_assertion):
"""Checks result, warns when latency is abnormal.
Args:
reference: { environment : reference_time}, environment is a string tuple
while reference_time is a float number.
result_list: a list of tuple.
enable_assertion: bool, throw assertion when unexpected latencty detected.
"""
# Allow 30% variance.
variance_threshold = 0.30
print('******************** Check results *********************')
cnt = 0
# Drop first line(column name).
for result in result_list[1:]:
environment = result[:-1]
inference_time = result[-1]
if environment not in reference:
print(' * No matching record for [%s].' % (','.join(environment)))
cnt += 1
reference_latency = reference[environment]
up_limit = reference_latency * (1 + variance_threshold)
down_limit = reference_latency * (1 - variance_threshold)
if inference_time > up_limit:
msg = ((' * Unexpected high latency! [%s]\n'
' Inference time: %s ms Reference time: %s ms') %
(','.join(environment), inference_time, reference_latency))
print(msg)
cnt += 1
if inference_time < down_limit:
msg = ((' * Unexpected low latency! [%s]\n'
' Inference time: %s ms Reference time: %s ms') %
(','.join(environment), inference_time, reference_latency))
print(msg)
cnt += 1
print('******************** Check finished! *******************')
if enable_assertion:
assert cnt == 0, 'Benchmark test failed!'
def save_as_csv(file_name, result):
"""Saves benchmark result as csv files.
Args:
file_name: string, name of the saved file.
result: A list of tuple.
"""
os.makedirs(BENCHMARK_RESULT_DIR, exist_ok=True)
with open(benchmark_result_path(file_name), 'w', newline='') as csv_file:
writer = csv.writer(
csv_file, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for line in result:
writer.writerow(line)
print(file_name, ' saved!')
| 32.328889
| 80
| 0.659747
|
e785c76ea0b95dd944a0359055ea67b92fd0e617
| 3,427
|
py
|
Python
|
ElevatorBot/commands/destiny/weapon.py
|
LukasSchmid97/destinyBloodoakStats
|
1420802ce01c3435ad5c283f44eb4531d9b22c38
|
[
"MIT"
] | 3
|
2019-10-19T11:24:50.000Z
|
2021-01-29T12:02:17.000Z
|
ElevatorBot/commands/destiny/weapon.py
|
LukasSchmid97/destinyBloodoakStats
|
1420802ce01c3435ad5c283f44eb4531d9b22c38
|
[
"MIT"
] | 29
|
2019-10-14T12:26:10.000Z
|
2021-07-28T20:50:29.000Z
|
ElevatorBot/commands/destiny/weapon.py
|
LukasSchmid97/destinyBloodoakStats
|
1420802ce01c3435ad5c283f44eb4531d9b22c38
|
[
"MIT"
] | 2
|
2019-10-13T17:11:09.000Z
|
2020-05-13T15:29:04.000Z
|
# from discord.ext.commands import Cog
# from discord_slash import cog_ext
# from discord_slash import SlashContext
# from discord_slash.utils.manage_commands import create_choice
# from discord_slash.utils.manage_commands import create_option
#
# from ElevatorBot.commandHelpers.optionTemplates import default_user_option
# from ElevatorBot.commandHelpers.optionTemplates import get_mode_choices
#
#
# class Weapon(Cog):
# def __init__(self, client):
# self.client = client
#
# @cog_ext.cog_slash(
# name="weapon",
# description="Shows weapon stats for the specified weapon with in-depth customisation",
# options=[
# create_option(
# name="weapon",
# description="The name of the weapon you want to see stats for",
# option_type=3,
# required=True,
# ),
# create_option(
# name="stat",
# description="Which stat you want to see for the weapon",
# option_type=3,
# required=False,
# choices=[
# create_choice(name="Kills (default)", value="kills"),
# create_choice(name="Precision Kills", value="precisionkills"),
# create_choice(name="% Precision Kills", value="precisionkillspercent"),
# ],
# ),
# create_option(
# name="graph",
# description="Default: 'False' - See a timeline of your weapon usage instead of an overview of key stats",
# option_type=5,
# required=False,
# ),
# create_option(
# name="class",
# description="You can restrict the class where the weapon stats count",
# option_type=3,
# required=False,
# choices=[
# create_choice(name="Warlock", value="2271682572"),
# create_choice(name="Hunter", value="671679327"),
# create_choice(name="Titan", value="3655393761"),
# ],
# ),
# create_option(
# name="starttime",
# description="Format: 'DD/MM/YY' - You can restrict the time from when the weapon stats start counting",
# option_type=3,
# required=False,
# ),
# create_option(
# name="endtime",
# description="Format: 'DD/MM/YY' - You can restrict the time up until which the weapon stats count",
# option_type=3,
# required=False,
# ),
# create_option(
# name="mode",
# description="You can restrict the game mode where the weapon stats count",
# option_type=3,
# required=False,
# choices=get_mode_choices(),
# ),
# create_option(
# name="activityhash",
# description="You can restrict the activity where the weapon stats count (advanced)",
# option_type=4,
# required=False,
# ),
# default_user_option(),
# ],
# )
# async def _weapon(self, ctx: SlashContext, **kwargs):
# pass
#
#
# def setup(client):
# Weapon(client)
| 39.390805
| 123
| 0.520572
|
98f3acd2ee3e29a26696c320111aba7876c64021
| 1,138
|
py
|
Python
|
setup.py
|
aprendizaje-de-maquinas/addict
|
54f00e3e3d32446571996f2050b831b5fe6f9a52
|
[
"MIT"
] | 1
|
2019-12-14T15:35:10.000Z
|
2019-12-14T15:35:10.000Z
|
setup.py
|
mbenhaddou/addict
|
cf29d47eab24a7d935cb6841d13eac686dcd6e86
|
[
"MIT"
] | null | null | null |
setup.py
|
mbenhaddou/addict
|
cf29d47eab24a7d935cb6841d13eac686dcd6e86
|
[
"MIT"
] | null | null | null |
from setuptools import setup
import addict
SHORT='Addict is a dictionary whose items can be set using both attribute and item syntax.'
LONG=('Addict is a module that exposes a dictionary subclass that allows items to be set like attributes. '
'Values are gettable and settable using both attribute and item syntax. '
'For more info check out the README at \'github.com/mewwts/addict\'.')
setup(
name='addict',
version=addict.__version__,
packages=['addict'],
url='https://github.com/mewwts/addict',
author=addict.__author__,
author_email='mats@plysjbyen.net',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
],
description=SHORT,
long_description=LONG,
test_suite='test_addict',
package_data={'': ['LICENSE']}
)
| 36.709677
| 107
| 0.666081
|
764e7d831f594f8fd48a09d869bad37feb65fb1b
| 19,987
|
py
|
Python
|
fireworks/core/message_test.py
|
kellylab/Fireworks
|
ff027cd8d1b8ce5eec6a37d786e7de675d8c0849
|
[
"MIT"
] | 9
|
2019-05-01T01:22:10.000Z
|
2020-12-08T15:41:13.000Z
|
fireworks/core/message_test.py
|
smk508/Fireworks
|
ff027cd8d1b8ce5eec6a37d786e7de675d8c0849
|
[
"MIT"
] | 53
|
2019-01-20T17:02:38.000Z
|
2019-03-24T18:00:08.000Z
|
fireworks/core/message_test.py
|
smk508/Fireworks
|
ff027cd8d1b8ce5eec6a37d786e7de675d8c0849
|
[
"MIT"
] | 4
|
2019-07-04T15:39:46.000Z
|
2021-08-17T04:59:25.000Z
|
from fireworks import message as messi
from fireworks import Message, TensorMessage
import torch
import os
import numpy as np
from itertools import product
import pandas as pd
from itertools import count
from io import BytesIO
import pickle
tensors = {
'a': torch.Tensor([1,2,3]),
'b': torch.Tensor([4,5,6]),
}
vectors = {
'c': np.array([7,8,9]),
'd': np.array([10,11,12]),
}
dtensors = {
'a': torch.Tensor([[1,2,3],[4,5,6],[7,8,9]]),
'b': torch.Tensor([[-1,-2,-3],[-4,-5,-6], [-7,-8,-9]]),
}
def test_compute_length():
l = messi.compute_length(tensors)
assert l == 3
l = messi.compute_length(vectors)
assert l == 3
def test_extract_tensors():
target = {**tensors, **vectors}
t, v = messi.extract_tensors(target)
assert t == tensors
assert v == vectors
t, v = messi.extract_tensors(tensors)
assert t == tensors
assert v == {}
t, v = messi.extract_tensors(vectors)
assert t == {}
assert v == vectors
def test_complement():
n = 10
index = 7
complement = messi.complement(index, n)
assert complement == [0,1,2,3,4,5,6,8,9]
index = slice(2,5)
complement = messi.complement(index, n)
assert complement == [0,1,5,6,7,8,9]
index = [2,4,6]
complement = messi.complement(index, n)
assert complement == [0,1,3,5,7,8,9]
def test_Message():
""" Test init, getitem, and len methopl. """
def attribute_test(message, length = 3):
assert len(message) == length
assert message[0].tensors() == {
'a': torch.Tensor([1]),
'b': torch.Tensor([4]),
}
assert message[0].dataframe().equals(pd.DataFrame({
'c': np.array([7]),
'd': np.array([10]),
}))
assert message[0] == Message({'a': torch.Tensor([1]),'b': torch.Tensor([4])}, pd.DataFrame({'c': np.array([7]),'d': np.array([10]),}))
assert message[1:3].tensors() == {
'a': torch.Tensor([2,3]),
'b': torch.Tensor([5,6]),
}
assert message[1:3].dataframe().equals(pd.DataFrame({
'c': np.array([8,9]),
'd': np.array([11,12]),
}))
assert (message['a'] == torch.Tensor([1,2,3])).all()
assert message[['a','c']] == Message({'a': torch.Tensor([1,2,3]), 'c': np.array([7,8,9])})
assert message[1:3] == Message({'a': torch.Tensor([2,3]),'b': torch.Tensor([5,6])}, pd.DataFrame({'c': np.array([8,9]),'d': np.array([11,12])}))
# Test length
assert len(message) == length
# Test __getitem__
# Init empty message
m = Message()
assert len(m) == 0
# Init message from tensor_dict / TensorMessage and dict of arrays / dataframe using positional arguments.
tensor_message = TensorMessage(tensors)
tensor_as_message = Message(tensors = tensors)
df = pd.DataFrame(vectors)
df_as_message = Message(df = vectors)
# Try every combination
tensor_options = [tensors, tensor_message, tensor_as_message]
vector_options = [vectors, df, df_as_message]
for t, v in product(tensor_options, vector_options):
m = Message(t, v)
attribute_test(m)
m = Message(tensors = t, df = v)
attribute_test(m)
# Test one sided Messages
for t in tensor_options:
m = Message(t, None)
assert len(m) == 3
assert m == Message(tensors)
for v in vector_options:
m = Message(None, v)
assert len(m) == 3
assert m == Message(vectors)
# Init message from a single dict
everything = {**tensors, **vectors}
m = Message(everything)
attribute_test(m)
def test_Message_from_objects():
v = vectors.copy()
t = tensors.copy()
v['c'] = np.array([1.,2.])
v['r'] = 'howdy'
t['a'] = torch.randn(5)
t['q'] = torch.randn([4,3])
combined = {**t, **v}
m = Message.from_objects(t, v)
assert (set(m.keys()) == set(['c','d','r','b','a','q']))
for key in ['c','d','b','a','q']:
assert (m[key][0] == combined[key]).all()
assert m['r'][0] == combined['r']
assert len(m) == 1
def test_getitem():
m = Message(tensors, vectors)
assert m[0] == Message({'a': torch.Tensor([1]), 'b': torch.Tensor([4])}, {'c': np.array([7]), 'd': np.array([10])})
assert m[[0,2]] == Message({'a': torch.Tensor([1,3]), 'b': torch.Tensor([4,6])}, {'c': np.array([7,9]), 'd': np.array([10,12])})
# Check that out of bounds index calls raise errors
try:
m[3]
assert False
except IndexError:
assert True
try:
m[3:5]
assert False
except IndexError:
assert True
def test_cache(): pass
def test_tensors():
m = Message(tensors, vectors)
t = m.tensors()
assert t == TensorMessage(tensors)
t = m.tensors(keys=['a'])
assert t == TensorMessage({'a': tensors['a']})
t = m.tensors(keys=['a','c'])
assert t == TensorMessage({'a': tensors['a'], 'c': torch.Tensor(vectors['c'])})
def test_df():
m = Message(tensors, vectors)
df = m.dataframe()
assert df.equals(pd.DataFrame(vectors))
df = m.dataframe(keys=['c'])
assert df.equals(pd.DataFrame({'c': vectors['c']}))
df = m.dataframe(keys=['c','a'])
assert (df == (pd.DataFrame({'c': vectors['c'], 'a': np.array(tensors['a'])}))).all().all()
def test_to_dataframe():
mo = Message(tensors,vectors)
# no = mo.to_dataframe()
# assert no.tensor_message == {}
# assert (no['a'] == mo['a']).all()
# assert (no['b'] == mo['b']).all()
# for letter in ['a','b','c','d']:
# assert letter in no.df
lo = Message(dtensors, vectors)
ok = lo.to_dataframe()
for i in range(3):
assert (ok['a'][i] == dtensors['a'][i].numpy()).all()
assert (ok['b'][i] == dtensors['b'][i].numpy()).all()
def test_cpu_gpu():
m = Message(tensors, vectors)
m.cpu()
assert set(m.tensors().keys()) == set(['a','b'])
for key, tensor in m.tensors().items():
assert tensor.device.type == 'cpu'
if torch.cuda.is_available():
m.cuda()
for key, tensor in m.tensors().items():
assert tensor.device.type == 'cuda'
m.cpu()
for key, tensor in m.tensors().items():
assert tensor.device.type == 'cpu'
def test_append():
t = tensors
v = vectors
m1 = Message(t, v)
m2 = Message(t, v)
m3 = Message(t)
m4 = TensorMessage(t)
m5 = Message(pd.DataFrame(v))
m6 = pd.DataFrame(v)
m0 = Message()
assert(len(m0) == 0)
m = m0.append(Message(t))
assert m == Message(t)
m = m0.append(Message(v))
assert m == Message(v)
m = m0.append(Message(t,v))
assert m == Message(t,v)
m = m1.append(m2)
assert len(m) == 6
assert m == Message({'a': torch.Tensor([1,2,3,1,2,3]), 'b': torch.Tensor([4,5,6,4,5,6])}, {'c': np.array([7,8,9,7,8,9]), 'd': np.array([10,11,12,10,11,12])})
m = m3.append(t)
assert len(m) == 6
assert m == Message({'a': torch.Tensor([1,2,3,1,2,3]), 'b': torch.Tensor([4,5,6,4,5,6])})
m = m3.append(m3)
assert len(m) == 6
assert m == Message({'a': torch.Tensor([1,2,3,1,2,3]), 'b': torch.Tensor([4,5,6,4,5,6])})
m = m3.append(m4)
assert len(m) == 6
assert m == Message({'a': torch.Tensor([1,2,3,1,2,3]), 'b': torch.Tensor([4,5,6,4,5,6])})
m = m4.append(t)
assert len(m) == 6
assert m == TensorMessage({'a': torch.Tensor([1,2,3,1,2,3]), 'b': torch.Tensor([4,5,6,4,5,6])})
m = m4.append(m3)
assert len(m) == 6
assert m == TensorMessage({'a': torch.Tensor([1,2,3,1,2,3]), 'b': torch.Tensor([4,5,6,4,5,6])})
m = m4.append(m4)
assert len(m) == 6
assert m == TensorMessage({'a': torch.Tensor([1,2,3,1,2,3]), 'b': torch.Tensor([4,5,6,4,5,6])})
m = m5.append(v)
assert len(m) == 6
assert m == Message({'c': np.array([7,8,9,7,8,9]), 'd': np.array([10,11,12,10,11,12])})
m = m5.append(m5)
assert len(m) == 6
assert m == Message({'c': np.array([7,8,9,7,8,9]), 'd': np.array([10,11,12,10,11,12])})
m = m5.append(m6)
assert len(m) == 6
assert m == Message({'c': np.array([7,8,9,7,8,9]), 'd': np.array([10,11,12,10,11,12])})
# Test type conversions on appending to TensorMessage
m = m4.append({'a': np.array([42]), 'b': np.array([24])})
assert len(m) == 4
assert m == TensorMessage({'a': torch.Tensor([1,2,3,42]), 'b': torch.Tensor([4,5,6,24])})
def test_join():
t = tensors
v = vectors
t2 = {'d': torch.Tensor([13,14,15])}
v2 = {'e': np.array([16,17,18])}
m1 = Message(t,v)
m2 = Message(t)
m2_t = TensorMessage(t)
m3 = Message(v)
m4 = Message(t2,v2)
m5 = Message(t2)
m5_t = TensorMessage(t2)
m6 = Message(v2)
m7 = Message(t,v2)
m8 = Message(t2, v)
# Test if a tensor message can be merged into a message and vice versa
assert m2.merge(m3) == m1
assert m3.merge(m2) == m1
assert m3.merge(m2_t) == m1
assert m3.merge(t) == m1
# Test if the tensors in messages can be merged
assert m2.merge(t2) == Message({**t, **t2})
assert m2.merge(m5) == Message({**t, **t2})
assert m2.merge(m5_t) == Message({**t, **t2})
assert m2_t.merge(t2) == TensorMessage({**t, **t2})
assert m2_t.merge(m5) == TensorMessage({**t, **t2})
assert m2_t.merge(m5_t) == TensorMessage({**t, **t2})
# Test if the dataframes in messages can be merged
assert m3.merge(m6) == Message({**v, **v2})
assert m6.merge(m3) == Message({**v, **v2})
assert m3.merge(v2) == Message({**v, **v2})
def test_Message_set_get():
# Test point updates
email = Message(tensors, vectors)
gmail = Message({'a':torch.Tensor([1,42,3]), 'b':torch.Tensor([4,43,6]), 'c': np.array([7,99,9]), 'd': np.array([10,100,12])})
replacement = {'a': torch.Tensor([42]), 'b': torch.Tensor([43]), 'c': np.array([99]), 'd': np.array([100])}
assert len(email) == 3
assert email != gmail
email[1] = replacement
assert email == gmail
# Test ranged updates
email = Message(tensors, vectors)
gmail = Message({'a':torch.Tensor([1,42,33]), 'b':torch.Tensor([4,43,66]), 'c': np.array([7,99,99]), 'd': np.array([10,100,122])})
replacement = {'a': torch.Tensor([42,33]), 'b': torch.Tensor([43,66]), 'c': np.array([99,99]), 'd': np.array([100,122])}
assert email != gmail
email[1:3] = replacement
assert email == gmail
# Test updates using lists as indexes
email = Message(tensors, vectors)
assert email != gmail
email[[1,2]] = replacement
assert email == gmail
# Test column updates
email['a'] = torch.Tensor([9,9,9])
assert torch.equal(email['a'], torch.Tensor([9,9,9]))
email['c'] = np.array([9,9,9])
assert email['c'].equals(pd.Series([9,9,9]))
# Test column updates that switch from df to tensor and vice-versa
email = Message(tensors, vectors)
assert set(email.columns) == set(['a','b','c','d'])
assert set(email.tensor_message.columns) == set(['a','b'])
assert set(email.df.columns) == set(['c','d'])
new_a = np.array([1,2,3]) # Switch from tensor to vector
email['a'] = new_a
assert set(email.columns) == set(['a','b','c','d'])
assert set(email.tensor_message.columns) == set(['b'])
assert set(email.df.columns) == set(['a','c','d'])
assert (email['a'] == new_a).all()
new_c = torch.Tensor([7,8,9])
email['c'] = new_c
assert set(email.columns) == set(['a','b','c','d'])
assert set(email.tensor_message.columns) == set(['b','c'])
assert set(email.df.columns) == set(['a','d'])
assert (email['c'] == new_c).all()
# Test column updates that end up clearing either self.df or self.tensor_message
email = Message(tensors, vectors)
df = email.dataframe(['a', 'b'])
assert len(email) == 3
assert len(email.tensor_message) == 3
assert len(email.df) == 3
email[['a','b']] = df
assert len(email) == 3
assert len(email.tensor_message) == 0
assert len(email.df) == 3
# TODO: Test the other way around
def test_Message_del():
t = {
'a': torch.Tensor([1,2,3]),
'b': torch.Tensor([4,5,6]),
}
v = {
'c': np.array([7,8,9]),
'd': np.array([10,11,12]),
}
t2 = {
'a': torch.Tensor([1,2]),
'b': torch.Tensor([4,5]),
}
v2 = {
'c': np.array([7,8]),
'd': np.array([10,11]),
}
t3 = {
'a': torch.Tensor([1]),
'b': torch.Tensor([4]),
}
v3 = {
'c': np.array([7]),
'd': np.array([10]),
}
# Test deletions for messages with only tensors, only df, and both
# Test point deletions
m = Message(t,v)
m1 = Message(t)
m2 = Message(v)
assert m != Message(t2,v2)
assert m1 != Message(t2)
assert m2 != Message(v2)
assert len(m) == 3
assert len(m1) == 3
assert len(m2) == 3
del m[2]
del m1[2]
del m2[2]
assert len(m) == 2
assert len(m1) == 2
assert len(m2) == 2
assert m == Message(t2,v2)
assert m1 == Message(t2)
assert m2 == Message(v2)
# Test range deletions
m = Message(t,v)
m1 = Message(t)
m2 = Message(v)
assert m != Message(t3,v3)
assert m1 != Message(t3)
assert m2 != Message(v3)
assert len(m) == 3
assert len(m1) == 3
assert len(m2) == 3
del m[1:3]
del m1[1:3]
del m2[1:3]
assert len(m) == 1
assert len(m1) == 1
assert len(m2) == 1
assert m == Message(t3,v3)
assert m1 == Message(t3)
assert m2 == Message(v3)
# Test list deletions
m = Message(t,v)
m1 = Message(t)
m2 = Message(v)
assert m != Message(t3,v3)
assert m1 != Message(t3)
assert m2 != Message(v3)
assert len(m) == 3
assert len(m1) == 3
assert len(m2) == 3
del m[[1,2]]
del m1[[1,2]]
del m2[[1,2]]
assert len(m) == 1
assert len(m1) == 1
assert len(m2) == 1
assert m == Message(t3,v3)
assert m1 == Message(t3)
assert m2 == Message(v3)
# Test column deletions
m = Message(t,v)
assert set(m.columns) == set(['a','b','c','d'])
del m['a']
assert set(m.columns)== set(['b','c','d'])
del m['c']
assert set(m.columns) == set(['b','d'])
def test_Message_iter():
m = Message(tensors, vectors)
l = len(m)
for x,i in zip(m, count()):
assert type(x) is Message
if i > l:
assert False
assert i == l - 1
t = TensorMessage(tensors)
l = len(t)
for x,i in zip(t, count()):
assert type(x) is TensorMessage
if i > l:
assert False
assert i == l - 1
def test_map(): pass
def test_TensorMessage():
a = [1,2,3]
b = [4, 5, 6]
# Test init
empty = TensorMessage()
assert len(empty) == 0
assert empty.keys() == {}.keys()
email = TensorMessage({'a': a, 'b':b})
# Test error cases
# TODO: test error cases
# Test length
assert len(email) == 3
# Test getitem
x = email[2]
assert set(x.keys()) == set(['a','b'])
assert (x['a'] == torch.Tensor([3])).all()
assert (x['b'] == torch.Tensor([6])).all()
x = email[0:2]
assert set(x.keys()) == set(['a','b'])
assert (x['a'] == torch.Tensor([1,2])).all()
assert (x['b'] == torch.Tensor([4,5])).all()
# Test for length 1 init
gmail = TensorMessage({'a':1, 'b': 80})
assert len(gmail) == 1
y = gmail[0]
assert set(y.keys()) == set(['a','b'])
assert (y['a'] == torch.Tensor([1])).all()
assert (y['b'] == torch.Tensor([80])).all()
# Test extend
yahoomail = email.append(gmail)
assert len(yahoomail) == 4
z = yahoomail[0:4]
assert set(z.keys()) == set(['a','b'])
assert (z['a'] == torch.Tensor([1,2,3,1])).all()
assert (z['b'] == torch.Tensor([4, 5, 6, 80])).all()
def test_TensorMessage_set_get_del():
a = [1,2,3]
b = [4, 5, 6]
email = TensorMessage({'a': a, 'b':b})
replacement = {'a': torch.Tensor([42]), 'b': torch.Tensor([43])}
gmail = TensorMessage({'a':torch.Tensor([42,2,3]), 'b': torch.Tensor([43,5,6])})
yahoomail = TensorMessage({'a':torch.Tensor([2,3]), 'b': torch.Tensor([5,6])})
assert email != gmail
email[0] = replacement
assert email == gmail
assert len(email) == 3
email['a'] = torch.Tensor([9,9,9])
assert torch.equal(email['a'], torch.Tensor([9,9,9]))
assert gmail != yahoomail
del gmail[0]
assert len(gmail) == 2
assert gmail == yahoomail
# Test column deletions
email = TensorMessage({'a': a, 'b':b})
assert set(email.columns) == set(['a','b'])
del email['a']
assert set(email.columns) == set(['b'])
# Test that out of bounds requests raise errors
try:
email[3]
assert False
except IndexError:
assert True
try:
email[3:5]
assert False
except IndexError:
assert True
# Test length adjustment if all columns are deleted
zohomail = TensorMessage({'a':a,'b':b})
assert len(zohomail) == 3
del zohomail['a']
assert len(zohomail) == 3
del zohomail['b']
assert len(zohomail) == 0
def test_TensorMessage_eq():
a = [1,2,3]
b = [4, 5, 6]
# Test init
email = TensorMessage({'a': a, 'b':b})
gmail = TensorMessage(email)
def test_cat():
m = Message(tensors, vectors)
m0 = m[0]
m1 = m[1]
m2 = m[2]
babaghanush = messi.cat([m0,m1,m2])
assert babaghanush == m
def test_TensorMessage_permute():
a = [1,2,3]
b = [4, 5, 6]
email = TensorMessage({'a': a, 'b':b})
gmail = email.permute([2,1,0])
assert gmail == TensorMessage({'a':[3,2,1], 'b':[6,5,4]})
gmail = email.permute([0,0,0])
assert gmail == TensorMessage({'a':[1,1,1], 'b':[4,4,4]})
def test_permute():
tensors = {
'a': torch.Tensor([1,2,3]),
'b': torch.Tensor([4,5,6]),
}
vectors = {
'c': np.array([7,8,9]),
'd': np.array([10,11,12]),
}
email = Message(tensors, vectors)
gmail = email.permute([2,1,0])
assert gmail == Message({'a':[3,2,1], 'b':[6,5,4]}, {'c': np.array([9,8,7]), 'd': np.array([12,11,10])})
gmail = email.permute([0,0,0])
assert gmail == Message({'a':[1,1,1], 'b':[4,4,4]}, {'c': np.array([7,7,7]), 'd': np.array([10,10,10])})
# Test with only tensors
email = Message(tensors)
gmail = email.permute([2,1,0])
assert gmail == Message({'a':[3,2,1], 'b':[6,5,4]}, {})
gmail = email.permute([0,0,0])
assert gmail == Message({'a':[1,1,1], 'b':[4,4,4]}, {})
# Test with only dataframes
email = Message(vectors)
gmail = email.permute([2,1,0])
assert gmail == Message({'c': np.array([9,8,7]), 'd': np.array([12,11,10])})
gmail = email.permute([0,0,0])
assert gmail == Message({'c': np.array([7,7,7]), 'd': np.array([10,10,10])})
def test_to_csv():
m = Message(tensors, vectors)
pass #TODO: Implement
def test_to_pickle():
m = Message(tensors, vectors)
pass #TODO: Implement
def test_to_sql():
m = Message(tensors, vectors)
pass #TODO: Implement
def test_to_dict():
m = Message(tensors, vectors)
md = m.to_dict()
assert type(md) is dict
assert (md['c'] == md['c'])
assert (md['d'] == md['d'])
assert (md['a'] == np.array(md['a'])).all()
assert (md['b'] == np.array(md['b'])).all()
def test_to_excel():
m = Message(tensors, vectors)
pass #TODO: Implement
def test_to_json():
m = Message(tensors, vectors)
pass #TODO: Implement
def test_to_string():
m = Message(tensors, vectors)
pass #TODO: Implement
def test_save_load():
m = Message(tensors, vectors)
test_path = 'test.fireworks'
m.save(test_path)
new_m = Message.load(test_path)
assert new_m == m
os.remove(test_path)
buffer = BytesIO()
m.save(buffer)
buffed_m = Message.load(buffer)
assert buffed_m == m
def test_pickle():
m = Message(tensors, vectors)
state = pickle.dumps(m)
new_m = pickle.loads(state)
assert new_m == m
| 30.100904
| 161
| 0.551609
|
014443d27280436fa9155d28fe25026c95ccd13f
| 4,361
|
py
|
Python
|
Spyder/newsSpyder-.py
|
Ironstarboy/DataSciBasic
|
6fb5af851388a3d6dfab7c3bcc6916f3e19ba654
|
[
"MIT"
] | 1
|
2021-04-30T12:53:03.000Z
|
2021-04-30T12:53:03.000Z
|
Spyder/newsSpyder-.py
|
Ironstarboy/DataSciBasic
|
6fb5af851388a3d6dfab7c3bcc6916f3e19ba654
|
[
"MIT"
] | null | null | null |
Spyder/newsSpyder-.py
|
Ironstarboy/DataSciBasic
|
6fb5af851388a3d6dfab7c3bcc6916f3e19ba654
|
[
"MIT"
] | 3
|
2021-03-06T07:55:26.000Z
|
2021-04-30T12:52:58.000Z
|
import datetime
from fake_useragent import UserAgent
import requests
import re
import os
from bs4 import BeautifulSoup as bs
import time
import random
def show_time(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
def get_random_header():#้ๆบๅคด
ua = UserAgent()
user_agent = ua.random
return user_agent
def get_html_text(url):
'''่ทๅๅฝๅurlๆบไปฃ็ '''
sleepTime=random.uniform(1,2.33)#็ญๅพ
ๆถ้ด๏ผไธ่ฆๅคชๅฐๅง
time.sleep(sleepTime)
myheader=get_random_header()
try:
r=requests.request("GET",url,headers={'user-agent':myheader},timeout=3)
r.encoding='utf-8'
#r.apparent_encoding
return r.text
except Exception as e:
return ''
'''
<div class="box-result clearfix" data-sudaclick="blk_result_index_3">
<h2><a href="https://news.sina.com.cn/o/2019-12-26/doc-iihnzahk0127138.shtml" target="_blank">ๅฝๅฎถๅซๅฅๅง๏ผ็ฎๅๅ
จๅฝไผ ๆ็
<font color="red">็ซๆ
</font>ๅฝขๅฟๆปไฝๅนณ็จณ</a>
<span class="fgray_time">ไธญๅฝๆฐ้ป็ฝ 2019-12-26 15:23:32</span></h2>
<div class="r-img">
<a href="https://news.sina.com.cn/o/2019-12-26/doc-iihnzahk0127138.shtml" target="_blank" class="a-img"><img alt="" class="left_img" width="120" onload="a_r_i(this);" onerror="set_right_url3(this,'http:\/\/n.sinaimg.cn\/spider20191226\/145\/w540h405\/20191226\/1c81-imfiehq4029080.jpg');" src="http://n.sinaimg.cn/spider20191226/145/w540h405/20191226/1c81-imfiehq4029080.jpg" /></a>
</div>
<div class="r-info">
<p class="content"> ใใๅฝๅฎถๅซๅฅๅง๏ผ็ฎๅไธญๅฝไผ ๆ็
<font color="red">็ซๆ
</font>ๅฝขๅฟๆปไฝๅนณ็จณ ไธญๆฐ็คพๅไบฌ12ๆ26ๆฅ็ต (่ฎฐ่
ๆไบๅ)ไธญๅฝๅฝๅฎถๅซ็ๅฅๅบทๅงๅไผ็พ็
้ข้ฒๆงๅถๅฑๅฏๅฑ้ฟ็ๆ26ๆฅๅจๅไบฌ่กจ็คบ</p>
</div>
</div>
'''
def getOutcomeHtmlText(htmltext):#ๅพๅฐๅ
ๅซๆ็ดข็ปๆๆบไปฃ็ ๆๆฌ ๅ่กจ,ๆ ผๅผๅฆไธ
soup = bs(htmltext, 'html.parser',)
eachOutcomeText=soup.find_all('div',attrs={'class':"box-result clearfix"}) #่ฟๅๆฏไธชๆ็ดข็ปๆ็ๅฏนๅบๆบไปฃ็ ้จๅ
return eachOutcomeText
def save_outcome_info2csv(htmlText,filename):
title = ''
jumpUrl=''
source_and_time=''
source=''
publish_time=''
try:
title = re.search('target="_blank">(.*?)</a>', htmlText).group(1).strip().replace('<font color="red">','').replace('</font>','')
jumpUrl = re.search('<a href="(.*?)" target="_blank">',htmlText).group(1).strip()
source_and_time=re.search('<span class="fgray_time">(.*?)</span>',htmlText).group(1).strip()
spaceIndex=source_and_time.index(' ')
source=source_and_time[:spaceIndex]
publish_time=source_and_time[spaceIndex+1:spaceIndex+11]
except Exception as e:
print(e)
with open(filename,'a+') as f: #ๅฏ่ฝไผๅบ็ฐ็ผ็ ้่ฏฏ
try:
f.write(title+',')
f.write(jumpUrl+',')
f.write(source+',')
f.write(publish_time+'\n')
except:
f.write('\n')
def urlParam(stime, etime, page, keyword='%e8%82%ba%e7%82%8e', my_range='title'):#range๏ผallๅ
จๆ titleๆ ้ข
'''time:2020-01-01'''
out='https://search.sina.com.cn/?q={keyword}&c=news&range={my_range}&size=20&time=2020&stime={stime}%2000:00:00&etime={etime}%2023:59:59&num=10&page={page}'.format(keyword=keyword,my_range=my_range, stime=stime, etime=etime, page=page)
return out
def timeitr(smonth,sday,emonth,eday,year=2020): #้ๅไธๅฎ่ๅดๅ
็ๆฅๆ๏ผ่ฟๅๆฅๆๅญ็ฌฆไธฒๅ่กจ๏ผ้ญๅบ้ด
begin = datetime.date(year, smonth, sday)
end = datetime.date(year, emonth, eday)
outDaylst=[]
for i in range((end - begin).days + 1):
outday = begin + datetime.timedelta(days=i)
outDaylst.append(str(outday))
return outDaylst
def run():
#่ฟ้ไฟฎๆนๅๆฐ
keyword='่บ็'
my_range='all'#ๅ
จๆ:all๏ผๆ ้ข:title
fileName=r'test.csv'
days=timeitr(3,18,3,18,2020)#้ญๅบ้ด๏ผ่ทจๅนด้่ฆๅ2ๆฎต
for ymd in days:#ymd:year month day
for page in range(1):
currentPageUrl=urlParam(ymd,ymd,str(page),keyword,my_range)
currentPageText=get_html_text(currentPageUrl)
outcomeTextList=getOutcomeHtmlText(currentPageText)
for i in range(len(outcomeTextList)):
text=str(outcomeTextList[i]).replace('\n','')
save_outcome_info2csv(text,fileName)
print(ymd+' done!')
print('done!')
if __name__=='__main__':
start_time = datetime.datetime.now() # ่ฎก็ฎไธป็จๅบ่ฟ่กๆถ้ด
run()
end_time = datetime.datetime.now()
seconds=(end_time - start_time).seconds
spendTime=show_time(seconds)
print(spendTime)
| 31.601449
| 387
| 0.649163
|
6497c3c632d6649abc1604de6e48ee44ba18552d
| 598
|
py
|
Python
|
outros-codigos/beautiful.py
|
Exterminus/WebScrapingSecomp
|
18a8a079dcb995f965e6a346724f4bbb585ce706
|
[
"MIT"
] | 1
|
2018-09-14T04:14:43.000Z
|
2018-09-14T04:14:43.000Z
|
outros-codigos/beautiful.py
|
Exterminus/WebScrapingSecomp
|
18a8a079dcb995f965e6a346724f4bbb585ce706
|
[
"MIT"
] | null | null | null |
outros-codigos/beautiful.py
|
Exterminus/WebScrapingSecomp
|
18a8a079dcb995f965e6a346724f4bbb585ce706
|
[
"MIT"
] | 1
|
2018-09-14T04:14:45.000Z
|
2018-09-14T04:14:45.000Z
|
from bs4 import BeautifulSoup
html_doc = """
<!DOCTYPE html>
<html lang='pt' dir="ltr">
<head>
<meta charset="utf-8">
<title>Tรญtulo</title>
</head>
<body>
<div id= 'msg_1' class= 'mensagens'>
<p>Meu nome รฉ Thrawn</p>
<div>
<div id= 'msg_2' class= 'mensagens' >
<p>Olรก meu nome รฉ Vader</p>
<a href="https://ufsj.edu.br">UFSJ</a>
</body>
</html>
"""
soup = BeautifulSoup(html_doc, 'html.parser')
print(soup.text)
print("Titulo:",soup.title)
# <title>The Dormouse's story</title>
print("Tag P:",soup.p)
#print("Div",soup.div)
print(soup.find_all("div",class_='mensa'))
| 22.148148
| 45
| 0.628763
|
98071f646a1fe2d1ea33a6d4c995d73bfc35f6b3
| 5,600
|
py
|
Python
|
prob_mbrl/envs/pendulum/env.py
|
Praneethsv/prob_mbrl
|
7b1adee6bff742b6f90e9b96ea243f12c9153b9b
|
[
"MIT"
] | 108
|
2018-10-24T07:59:14.000Z
|
2021-11-28T05:29:35.000Z
|
prob_mbrl/envs/pendulum/env.py
|
Praneethsv/prob_mbrl
|
7b1adee6bff742b6f90e9b96ea243f12c9153b9b
|
[
"MIT"
] | 8
|
2019-08-14T00:20:13.000Z
|
2019-10-18T01:45:29.000Z
|
prob_mbrl/envs/pendulum/env.py
|
Praneethsv/prob_mbrl
|
7b1adee6bff742b6f90e9b96ea243f12c9153b9b
|
[
"MIT"
] | 14
|
2019-06-27T10:10:08.000Z
|
2020-08-31T03:16:22.000Z
|
# Copyright (C) 2018, Anass Al
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>
"""Pendulum environment."""
import torch
import numpy as np
from gym import spaces
from .model import PendulumModel
from ..base import GymEnv
from ...utils import angles
class PendulumReward(torch.nn.Module):
def __init__(self,
pole_length=1.0,
target=torch.tensor([np.pi, 0]),
Q=4.0 * torch.eye(2),
R=1e-4 * torch.eye(1)):
super(PendulumReward, self).__init__()
self.Q = torch.nn.Parameter(Q, requires_grad=False)
self.R = torch.nn.Parameter(R, requires_grad=False)
if target.dim() == 1:
target = target.unsqueeze(0)
self.target = torch.nn.Parameter(target, requires_grad=False)
self.pole_length = torch.nn.Parameter(pole_length, requires_grad=False)
def forward(self, x, u):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x)
if not isinstance(u, torch.Tensor):
u = torch.tensor(u)
x = x.to(device=self.Q.device, dtype=self.Q.dtype)
u = u.to(device=self.Q.device, dtype=self.Q.dtype)
if x.dim() == 1:
x = x.unsqueeze(0)
if u.dim() == 1:
u = u.unsqueeze(0)
# compute the distance between the tip of the pole and the target tip
# location
targeta = angles.to_complex(self.target, [0])
target_tip_xy = torch.cat([
self.pole_length * targeta[:, 1:2],
-self.pole_length * targeta[:, 2:3]
],
dim=-1)
if x.shape[-1] != targeta.shape[-1]:
xa = angles.to_complex(x, [0])
else:
xa = x
pole_tip_xy = torch.cat(
[self.pole_length * xa[:, 1:2], -self.pole_length * xa[:, 2:3]],
dim=-1)
# normalized distance so that cost at [0 ,0] is 1
delta = (pole_tip_xy - target_tip_xy)
delta = delta / (2 * self.pole_length)
# compute cost
cost = 0.5 * ((delta.mm(self.Q) * delta).sum(-1, keepdim=True) +
(u.mm(self.R) * u).sum(-1, keepdim=True))
# reward is negative cost.
# optimizing the exponential of the negative cost
reward = (-cost).exp()
return reward
class Pendulum(GymEnv):
"""Open AI gym pendulum environment.
Based on the OpenAI gym Pendulum-v0 environment, but with more
custom dynamics for a better ground truth.
"""
metadata = {
"render.modes": ["human", "rgb_array"],
"video.frames_per_second": 30,
}
def __init__(self, model=None, reward_func=None, **kwargs):
if model is None:
model = PendulumModel()
# init parent class
reward_func = reward_func if callable(reward_func) else PendulumReward(
pole_length=model.l)
measurement_noise = torch.tensor([0.1, 0.01])
super(Pendulum, self).__init__(model,
reward_func,
measurement_noise,
angle_dims=[0],
**kwargs)
# init this class
high = np.array([2.5])
self.action_space = spaces.Box(low=-high, high=high, dtype=np.float32)
high = np.array([np.pi, np.finfo(np.float32).max])
if self.angle_dims is not None:
low = angles.to_complex(torch.tensor(-high),
self.angle_dims).numpy()
high = angles.to_complex(torch.tensor(high),
self.angle_dims).numpy()
else:
low = -high
self.observation_space = spaces.Box(low=low,
high=high,
dtype=np.float32)
def reset(self, init_state=np.array([0.0, 0.0]), init_state_std=1e-1):
return super(Pendulum, self).reset(init_state, init_state_std)
def render(self, mode="human"):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(500, 500)
self.viewer.window.set_vsync(False)
self.viewer.set_bounds(-2.2, 2.2, -2.2, 2.2)
rod = rendering.make_capsule(1.0 * self.model.l,
0.2 * torch.sqrt(self.model.m / 1.0))
rod.set_color(0.8, 0.3, 0.3)
self.pole_transform = rendering.Transform()
rod.add_attr(self.pole_transform)
self.viewer.add_geom(rod)
axle = rendering.make_circle(0.05)
axle.set_color(0, 0, 0)
self.viewer.add_geom(axle)
theta, _ = self.state
self.pole_transform.set_rotation(theta - np.pi / 2)
return self.viewer.render(return_rgb_array=mode == "rgb_array")
def close(self):
if self.viewer:
self.viewer.close()
| 36.842105
| 79
| 0.565179
|
0f9be3ddba8c0711e6374e85d5c57523c385c80d
| 2,276
|
py
|
Python
|
src/plotting/bin/modules/decodingPotential.py
|
WoutDavid/ST-nextflow-pipeline
|
8de3da218ec4f10f183e1163fe782c19fd8dd841
|
[
"MIT"
] | null | null | null |
src/plotting/bin/modules/decodingPotential.py
|
WoutDavid/ST-nextflow-pipeline
|
8de3da218ec4f10f183e1163fe782c19fd8dd841
|
[
"MIT"
] | null | null | null |
src/plotting/bin/modules/decodingPotential.py
|
WoutDavid/ST-nextflow-pipeline
|
8de3da218ec4f10f183e1163fe782c19fd8dd841
|
[
"MIT"
] | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
def plotDecodingPotential(decoded_genes: str, codebook: str):
decoded_genes = pd.read_csv(decoded_genes)
codebook = pd.read_csv(codebook)
# First extracts all possible "good" barcodes
true_barcode_list = [str(i) for i in list(codebook['Barcode'])]
# extract called barcodesk
called_barcodes_list = [str(i) for i in list(decoded_genes['Barcode'])]
total_nr_called_spots = len(called_barcodes_list)
# This function assumes that the length of the future barcode list elements are the same length as the inputted barcode barcode_excerpt
intervals = range(1, len(true_barcode_list[0])+1)
# key = length of the intermediate barcode, value = list of barcodes of that length
sliced_barcode_dict = {str(n_rounds): [barcode[:n_rounds] for barcode in true_barcode_list] for n_rounds in intervals}
nr_future_matches_dict = {} # key = len of the barcode, value = number of spots that still represent a future possible barcodes
for n_rounds in intervals:
for spot in called_barcodes_list:
barcode_excerpt = spot[:n_rounds]
if barcode_excerpt in sliced_barcode_dict[str(n_rounds)]:
nr_future_matches_dict[n_rounds] = nr_future_matches_dict.get(n_rounds, 0) + 1
ratio_future_matches_dict = {k:round((v/total_nr_called_spots), 3)*100 for k,v in nr_future_matches_dict.items()}
# Make a pretty plot out of it
fig, ax = plt.subplots(1,1)
ax.set_title("Measurement of possible true barcode matches by round progression")
ax.set_xlabel("Round number")
ax.set_ylabel("Ratio of valid barcodes (%)")
ax.plot(ratio_future_matches_dict.keys(), ratio_future_matches_dict.values(), '-o')
ax.set_xticks(list(ratio_future_matches_dict.keys()))
for x,y in zip(ratio_future_matches_dict.keys(), ratio_future_matches_dict.values()):
label = "{:.2f}".format(y)
plt.annotate(label, # this is the text
(x,y), # this is the point to label
textcoords="offset points", # how to position the text
xytext=(0,10), # distance from text to points (x,y)
ha='center')
fig.tight_layout()
return plt
| 48.425532
| 139
| 0.696397
|
57fc0e4cfad715e917a9be463b7f1ab954225f82
| 3,956
|
py
|
Python
|
users/management/commands/load_sighting_data.py
|
maverick-labs-pune/wikirumours
|
51651aae651fd88468b54d08abb8ec28a93e65fa
|
[
"MIT"
] | null | null | null |
users/management/commands/load_sighting_data.py
|
maverick-labs-pune/wikirumours
|
51651aae651fd88468b54d08abb8ec28a93e65fa
|
[
"MIT"
] | null | null | null |
users/management/commands/load_sighting_data.py
|
maverick-labs-pune/wikirumours
|
51651aae651fd88468b54d08abb8ec28a93e65fa
|
[
"MIT"
] | null | null | null |
import csv
import datetime
import os
from django.contrib.gis.geos import Point
from django.core.management import BaseCommand, CommandError
from django.db import transaction
from geopy import Nominatim
from countries.models import Country
from report.models import Report, Sighting, ReportedViaChoice
from users.models import User
class Command(BaseCommand):
help = "load sighting data"
def handle(self, *args, **kwargs):
import_sightings()
def import_sightings():
with transaction.atomic():
dir_path = os.path.dirname(os.path.realpath(__file__))
# load users
with transaction.atomic():
sightings_csv_file_path = dir_path + '/../data/wikirumours_production_table_wr_rumour_sightings.csv'
with open(sightings_csv_file_path, "r") as file:
reader = csv.DictReader(x.replace('\0', '') for x in file)
for row in reader:
sighting_id = row["sighting_id"].strip()
public_id = row["public_id"].strip()
rumour_id = row["rumour_id"].strip()
details = row["details"].strip()
heard_on = row["heard_on"].strip()
country_id = row["country_id"].strip()
city = row["city"].strip()
location_type = row["location_type"].strip()
latitude = row["latitude"].strip()
longitude = row["longitude"].strip()
unable_to_geocode = row["unable_to_geocode"].strip()
source_id = row["source_id"].strip()
ipv4 = row["ipv4"].strip()
ipv6 = row["ipv6"].strip()
created_by = row["created_by"].strip()
entered_by = row["entered_by"].strip()
entered_on = row["entered_on"].strip()
user = User.objects.filter(id=created_by).first()
if user is None:
continue
report = Report.objects.filter(id=rumour_id).first()
if report is None:
continue
else:
latitude = float(latitude)
longitude = float(longitude)
address = city
if not heard_on or heard_on[0] == '0':
heard_on = None
else:
try:
heard_on = datetime.datetime.strptime(
heard_on, "%Y-%m-%d %H:%M:%S"
)
except:
heard_on = None
created_at = datetime.datetime.strptime(
entered_on, "%Y-%m-%d %H:%M:%S"
)
country = Country.objects.filter(
iso_code=country_id.replace('"', "")
).first()
reported_via = ReportedViaChoice.objects.filter(id=source_id).first()
sighting = Sighting()
sighting.id = int(sighting_id)
sighting.report = report
sighting.user = user
sighting.heard_on = heard_on
sighting.reported_via = reported_via
sighting.address = address
sighting.country = country
sighting.source = None
sighting.overheard_at = None
sighting.location = Point(longitude, latitude)
sighting.is_first_sighting = False
sighting.save()
sighting.created_at = created_at
sighting.save()
| 40.783505
| 112
| 0.476239
|
c38111916dbe94a58306d0cd5340e5b1aa38cb46
| 229
|
py
|
Python
|
app/core/tests/test_simple_function.py
|
mmansuri8701/recipe-app-api
|
ac587d203fd09dadc06f83e5419f034bcdbc93c7
|
[
"MIT"
] | null | null | null |
app/core/tests/test_simple_function.py
|
mmansuri8701/recipe-app-api
|
ac587d203fd09dadc06f83e5419f034bcdbc93c7
|
[
"MIT"
] | null | null | null |
app/core/tests/test_simple_function.py
|
mmansuri8701/recipe-app-api
|
ac587d203fd09dadc06f83e5419f034bcdbc93c7
|
[
"MIT"
] | null | null | null |
from unittest.mock import patch
from core import simple
from django.test import TestCase
class SimpleTest(TestCase):
def test_use_simple_function(self):
#result = simple.simple_function()
print(simple.simple_function())
| 22.9
| 36
| 0.79476
|
f5cd9bb57ce575298429e24c25dba42bc85929eb
| 441
|
py
|
Python
|
backend/work/migrations/0005_auto_20200504_0153.py
|
ecto0310/groupware
|
e1c9f76b19e6d1f6782f8e2b287ff75d1351fa83
|
[
"MIT"
] | 3
|
2020-03-23T19:18:00.000Z
|
2021-04-12T04:01:17.000Z
|
backend/work/migrations/0005_auto_20200504_0153.py
|
ecto0310/groupware
|
e1c9f76b19e6d1f6782f8e2b287ff75d1351fa83
|
[
"MIT"
] | 95
|
2020-03-07T12:29:38.000Z
|
2022-02-17T22:44:07.000Z
|
backend/work/migrations/0005_auto_20200504_0153.py
|
ecto0310/groupware
|
e1c9f76b19e6d1f6782f8e2b287ff75d1351fa83
|
[
"MIT"
] | 2
|
2021-12-27T16:50:36.000Z
|
2021-12-27T16:53:12.000Z
|
# Generated by Django 3.0.3 on 2020-05-03 16:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tool', '0006_delete_usertool'),
('work', '0004_auto_20200501_1600'),
]
operations = [
migrations.AlterField(
model_name='work',
name='tools',
field=models.ManyToManyField(blank=True, to='tool.Tool'),
),
]
| 22.05
| 69
| 0.594104
|
300d53333d50a06f1156024e4729e6af9b3b655d
| 4,521
|
py
|
Python
|
tests/test_sklearn_feature_union.py
|
MaxNoe/sklearn-onnx
|
698c9347e7c70cbb1a2c5bba1657e6548ff5897d
|
[
"MIT"
] | null | null | null |
tests/test_sklearn_feature_union.py
|
MaxNoe/sklearn-onnx
|
698c9347e7c70cbb1a2c5bba1657e6548ff5897d
|
[
"MIT"
] | null | null | null |
tests/test_sklearn_feature_union.py
|
MaxNoe/sklearn-onnx
|
698c9347e7c70cbb1a2c5bba1657e6548ff5897d
|
[
"MIT"
] | 1
|
2020-10-01T09:26:27.000Z
|
2020-10-01T09:26:27.000Z
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import unittest
import numpy as np
from sklearn.datasets import load_digits, load_iris
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.model_selection import train_test_split
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import FloatTensorType, Int64TensorType
from test_utils import dump_data_and_model
class TestSklearnAdaBoostModels(unittest.TestCase):
def test_feature_union_default(self):
data = load_iris()
X, y = data.data, data.target
X = X.astype(np.float32)
X_train, X_test, *_ = train_test_split(X, y, test_size=0.5,
random_state=42)
model = FeatureUnion([('standard', StandardScaler()),
('minmax', MinMaxScaler())]).fit(X_train)
model_onnx = convert_sklearn(
model, 'feature union',
[('input', FloatTensorType([None, X_test.shape[1]]))])
self.assertTrue(model_onnx is not None)
dump_data_and_model(X_test,
model,
model_onnx,
basename="SklearnFeatureUnionDefault")
def test_feature_union_transformer_weights_0(self):
data = load_iris()
X, y = data.data, data.target
X = X.astype(np.float32)
X_train, X_test, *_ = train_test_split(X, y, test_size=0.5,
random_state=42)
model = FeatureUnion([('standard', StandardScaler()),
('minmax', MinMaxScaler())],
transformer_weights={'standard': 2, 'minmax': 4}
).fit(X_train)
model_onnx = convert_sklearn(
model, 'feature union',
[('input', FloatTensorType([None, X_test.shape[1]]))])
self.assertTrue(model_onnx is not None)
dump_data_and_model(X_test,
model,
model_onnx,
basename="SklearnFeatureUnionTransformerWeights0")
def test_feature_union_transformer_weights_1(self):
data = load_digits()
X, y = data.data, data.target
X = X.astype(np.int64)
X_train, X_test, *_ = train_test_split(X, y, test_size=0.5,
random_state=42)
model = FeatureUnion([('pca', PCA()),
('svd', TruncatedSVD())],
transformer_weights={'pca': 10, 'svd': 3}
).fit(X_train)
model_onnx = convert_sklearn(
model, 'feature union',
[('input', Int64TensorType([None, X_test.shape[1]]))])
self.assertTrue(model_onnx is not None)
dump_data_and_model(
X_test,
model,
model_onnx,
basename="SklearnFeatureUnionTransformerWeights1-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
def test_feature_union_transformer_weights_2(self):
data = load_digits()
X, y = data.data, data.target
X = X.astype(np.float32)
X_train, X_test, *_ = train_test_split(X, y, test_size=0.5,
random_state=42)
model = FeatureUnion([('pca', PCA()),
('svd', TruncatedSVD())],
transformer_weights={'pca1': 10, 'svd2': 3}
).fit(X_train)
model_onnx = convert_sklearn(
model, 'feature union',
[('input', FloatTensorType([None, X_test.shape[1]]))])
self.assertTrue(model_onnx is not None)
dump_data_and_model(
X_test,
model,
model_onnx,
basename="SklearnFeatureUnionTransformerWeights2-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
if __name__ == "__main__":
unittest.main()
| 42.252336
| 78
| 0.538376
|
86c7fb53ec475248fd25329584e82d21855e3b7a
| 2,931
|
py
|
Python
|
project/code/analysis/bayes.py
|
cycomachead/info290
|
694361cfa755daec24c773e15d5bc965411d4caf
|
[
"BSD-2-Clause"
] | 2
|
2015-05-12T01:21:56.000Z
|
2015-07-04T21:14:06.000Z
|
project/code/analysis/bayes.py
|
cycomachead/info290
|
694361cfa755daec24c773e15d5bc965411d4caf
|
[
"BSD-2-Clause"
] | 1
|
2015-02-19T12:26:34.000Z
|
2015-05-14T12:42:56.000Z
|
project/code/analysis/bayes.py
|
cycomachead/info290
|
694361cfa755daec24c773e15d5bc965411d4caf
|
[
"BSD-2-Clause"
] | null | null | null |
#! /usr/bin/env python3
from pandas import *
import sklearn
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.grid_search import GridSearchCV
import numpy as np
import random
STYLE = "American_IPA"
""" Performs cross validation on data using a given method
Returns the average score.
Percent is the percentage of data to use as validation,
this should be an integer, not a decimal.
Rounds is the number of rounds of cv to run.
"""
def cross_val(data, labels, percent, rounds, method):
row_count = len(data.index)
scores = []
# Test round times and take average score
for _ in range(rounds):
# randomly select row indices for test/train sets
test_rows = []
for i in range(row_count//percent):
test_rows.append(random.randint(0, row_count-1))
test_rows.sort()
train_rows = [i for i in range(len(data.index))]
train_rows = [i for i in train_rows if i not in test_rows]
train_rows.sort()
# select test/train sets
test_data = data.drop(train_rows)
train_data = data.drop(test_rows)
test_labels = labels.drop(train_rows)
train_labels = labels.drop(test_rows)
# train random forest
fit_cv = method.fit(train_data, train_labels)
# calculate score
score_cv = method.score(test_data, test_labels)
scores.append(score_cv)
return sum(scores)/len(scores)
data = read_pickle("processed/pandas/%s.pkl"%(STYLE))
labels = data['beer_id']
del data['beer_id']
data = data.fillna(0)
###########################
### Basic Bayes Methods ###
###########################
gnb = GaussianNB()
fit = gnb.fit(data, labels)
score = gnb.score(data, labels)
print('Gaussian NB')
print(score)
mbn = MultinomialNB()
fit = mbn.fit(data, labels)
score = mbn.score(data, labels)
print('Multinomial NB')
print(score)
########################
### Cross Validation ###
########################
# rounds = 2
# pct = 10
# # for c in criterion:
# # for t in trees:
# # for s in samples:
#
# param_grid = {'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000] }
# clf = GridSearchCV(LogisticRegression(penalty='l2'), param_grid)
# lr = LogisticRegression(C=1.0, intercept_scaling=1, dual=False,
# fit_intercept=True, penalty='l2', tol=0.0001)
#
# gs = GridSearchCV(cv=None, estimator=lr, param_grid=param_grid)
#
# # fit = clf.fit(data, labels)
# # score = clf.score(data, labels)
# # print('Grid Search Method')
# # print(score)
#
# print("===== Cross Validation ====")
# lr = LogisticRegression(C=1.0, intercept_scaling=1, dual=False,
# fit_intercept=True, penalty='l2', tol=0.0001)
# fit = clf.fit(data, labels)
# score = clf.score(data, labels)
# print("Training Score: %f "% score)
# print("Cross Validation Score: %f" % (cross_val(data, labels, pct, rounds, clf)))
#
| 28.182692
| 83
| 0.635278
|
bbd3d4f2166ef1066e6b144eae94c534f44e9065
| 7,696
|
py
|
Python
|
cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IETF_ATM2_PVCTRAP_MIB.py
|
bopopescu/ACI
|
dd717bc74739eeed4747b3ea9e36b239580df5e1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IETF_ATM2_PVCTRAP_MIB.py
|
bopopescu/ACI
|
dd717bc74739eeed4747b3ea9e36b239580df5e1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IETF_ATM2_PVCTRAP_MIB.py
|
bopopescu/ACI
|
dd717bc74739eeed4747b3ea9e36b239580df5e1
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-07-22T04:04:44.000Z
|
2020-07-22T04:04:44.000Z
|
""" CISCO_IETF_ATM2_PVCTRAP_MIB
This MIB Module is a supplement to the
ATM\-MIB.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class CISCOIETFATM2PVCTRAPMIB(Entity):
"""
.. attribute:: atmcurrentlyfailingpvcltable
A table indicating all VCLs for which there is an active row in the atmVclTable having an atmVclConnKind value of `pvc' and an atmVclOperStatus with a value other than `up'
**type**\: :py:class:`Atmcurrentlyfailingpvcltable <ydk.models.cisco_ios_xe.CISCO_IETF_ATM2_PVCTRAP_MIB.CISCOIETFATM2PVCTRAPMIB.Atmcurrentlyfailingpvcltable>`
"""
_prefix = 'CISCO-IETF-ATM2-PVCTRAP-MIB'
_revision = '1998-02-03'
def __init__(self):
super(CISCOIETFATM2PVCTRAPMIB, self).__init__()
self._top_entity = None
self.yang_name = "CISCO-IETF-ATM2-PVCTRAP-MIB"
self.yang_parent_name = "CISCO-IETF-ATM2-PVCTRAP-MIB"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("atmCurrentlyFailingPVclTable", ("atmcurrentlyfailingpvcltable", CISCOIETFATM2PVCTRAPMIB.Atmcurrentlyfailingpvcltable))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.atmcurrentlyfailingpvcltable = CISCOIETFATM2PVCTRAPMIB.Atmcurrentlyfailingpvcltable()
self.atmcurrentlyfailingpvcltable.parent = self
self._children_name_map["atmcurrentlyfailingpvcltable"] = "atmCurrentlyFailingPVclTable"
self._children_yang_names.add("atmCurrentlyFailingPVclTable")
self._segment_path = lambda: "CISCO-IETF-ATM2-PVCTRAP-MIB:CISCO-IETF-ATM2-PVCTRAP-MIB"
class Atmcurrentlyfailingpvcltable(Entity):
"""
A table indicating all VCLs for which there is an
active row in the atmVclTable having an atmVclConnKind
value of `pvc' and an atmVclOperStatus with a value
other than `up'.
.. attribute:: atmcurrentlyfailingpvclentry
Each entry in this table represents a VCL for which the atmVclRowStatus is `active', the atmVclConnKind is `pvc', and the atmVclOperStatus is other than `up'
**type**\: list of :py:class:`Atmcurrentlyfailingpvclentry <ydk.models.cisco_ios_xe.CISCO_IETF_ATM2_PVCTRAP_MIB.CISCOIETFATM2PVCTRAPMIB.Atmcurrentlyfailingpvcltable.Atmcurrentlyfailingpvclentry>`
"""
_prefix = 'CISCO-IETF-ATM2-PVCTRAP-MIB'
_revision = '1998-02-03'
def __init__(self):
super(CISCOIETFATM2PVCTRAPMIB.Atmcurrentlyfailingpvcltable, self).__init__()
self.yang_name = "atmCurrentlyFailingPVclTable"
self.yang_parent_name = "CISCO-IETF-ATM2-PVCTRAP-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("atmCurrentlyFailingPVclEntry", ("atmcurrentlyfailingpvclentry", CISCOIETFATM2PVCTRAPMIB.Atmcurrentlyfailingpvcltable.Atmcurrentlyfailingpvclentry))])
self._leafs = OrderedDict()
self.atmcurrentlyfailingpvclentry = YList(self)
self._segment_path = lambda: "atmCurrentlyFailingPVclTable"
self._absolute_path = lambda: "CISCO-IETF-ATM2-PVCTRAP-MIB:CISCO-IETF-ATM2-PVCTRAP-MIB/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(CISCOIETFATM2PVCTRAPMIB.Atmcurrentlyfailingpvcltable, [], name, value)
class Atmcurrentlyfailingpvclentry(Entity):
"""
Each entry in this table represents a VCL for which
the atmVclRowStatus is `active', the atmVclConnKind is
`pvc', and the atmVclOperStatus is other than `up'.
.. attribute:: ifindex (key)
**type**\: int
**range:** 1..2147483647
**refers to**\: :py:class:`ifindex <ydk.models.cisco_ios_xe.IF_MIB.IFMIB.Iftable.Ifentry>`
.. attribute:: atmvclvpi (key)
**type**\: int
**range:** 0..4095
**refers to**\: :py:class:`atmvclvpi <ydk.models.cisco_ios_xe.ATM_MIB.ATMMIB.Atmvcltable.Atmvclentry>`
.. attribute:: atmvclvci (key)
**type**\: int
**range:** 0..65535
**refers to**\: :py:class:`atmvclvci <ydk.models.cisco_ios_xe.ATM_MIB.ATMMIB.Atmvcltable.Atmvclentry>`
.. attribute:: atmcurrentlyfailingpvcltimestamp
The time at which this PVCL began to fail
**type**\: int
**range:** 0..4294967295
.. attribute:: atmpreviouslyfailedpvcltimestamp
The time at which this PVCL began to fail during the PVC Notification interval
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'CISCO-IETF-ATM2-PVCTRAP-MIB'
_revision = '1998-02-03'
def __init__(self):
super(CISCOIETFATM2PVCTRAPMIB.Atmcurrentlyfailingpvcltable.Atmcurrentlyfailingpvclentry, self).__init__()
self.yang_name = "atmCurrentlyFailingPVclEntry"
self.yang_parent_name = "atmCurrentlyFailingPVclTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['ifindex','atmvclvpi','atmvclvci']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('ifindex', YLeaf(YType.str, 'ifIndex')),
('atmvclvpi', YLeaf(YType.str, 'atmVclVpi')),
('atmvclvci', YLeaf(YType.str, 'atmVclVci')),
('atmcurrentlyfailingpvcltimestamp', YLeaf(YType.uint32, 'atmCurrentlyFailingPVclTimeStamp')),
('atmpreviouslyfailedpvcltimestamp', YLeaf(YType.uint32, 'atmPreviouslyFailedPVclTimeStamp')),
])
self.ifindex = None
self.atmvclvpi = None
self.atmvclvci = None
self.atmcurrentlyfailingpvcltimestamp = None
self.atmpreviouslyfailedpvcltimestamp = None
self._segment_path = lambda: "atmCurrentlyFailingPVclEntry" + "[ifIndex='" + str(self.ifindex) + "']" + "[atmVclVpi='" + str(self.atmvclvpi) + "']" + "[atmVclVci='" + str(self.atmvclvci) + "']"
self._absolute_path = lambda: "CISCO-IETF-ATM2-PVCTRAP-MIB:CISCO-IETF-ATM2-PVCTRAP-MIB/atmCurrentlyFailingPVclTable/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(CISCOIETFATM2PVCTRAPMIB.Atmcurrentlyfailingpvcltable.Atmcurrentlyfailingpvclentry, ['ifindex', 'atmvclvpi', 'atmvclvci', 'atmcurrentlyfailingpvcltimestamp', 'atmpreviouslyfailedpvcltimestamp'], name, value)
def clone_ptr(self):
self._top_entity = CISCOIETFATM2PVCTRAPMIB()
return self._top_entity
| 43.480226
| 244
| 0.633966
|
621137bd4a03944c4a8c965f6881cb12180e6a53
| 385
|
py
|
Python
|
conf/wsgi.py
|
almazkun/dup
|
f188d771c02e5b4da96131c09c74ad981280e7a5
|
[
"MIT"
] | null | null | null |
conf/wsgi.py
|
almazkun/dup
|
f188d771c02e5b4da96131c09c74ad981280e7a5
|
[
"MIT"
] | 1
|
2021-12-22T02:45:00.000Z
|
2021-12-22T02:45:00.000Z
|
conf/wsgi.py
|
almazkun/dup
|
f188d771c02e5b4da96131c09c74ad981280e7a5
|
[
"MIT"
] | null | null | null |
"""
WSGI config for conf project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "conf.settings")
application = get_wsgi_application()
| 22.647059
| 78
| 0.781818
|
6f8606d67bd82cc3cd92aebceaf4c27e6769369e
| 12,805
|
py
|
Python
|
tests/test_nat.py
|
abanu-ms/sonic-swss
|
2cab51d11f26fa673ab08210c9422b6a31168ac3
|
[
"Apache-2.0"
] | null | null | null |
tests/test_nat.py
|
abanu-ms/sonic-swss
|
2cab51d11f26fa673ab08210c9422b6a31168ac3
|
[
"Apache-2.0"
] | null | null | null |
tests/test_nat.py
|
abanu-ms/sonic-swss
|
2cab51d11f26fa673ab08210c9422b6a31168ac3
|
[
"Apache-2.0"
] | 1
|
2020-12-04T10:35:38.000Z
|
2020-12-04T10:35:38.000Z
|
import time
from dvslib.dvs_common import wait_for_result
class TestNat(object):
def setup_db(self, dvs):
self.app_db = dvs.get_app_db()
self.asic_db = dvs.get_asic_db()
self.config_db = dvs.get_config_db()
def set_interfaces(self, dvs):
fvs = {"NULL": "NULL"}
self.config_db.create_entry("INTERFACE", "Ethernet0|67.66.65.1/24", fvs)
self.config_db.create_entry("INTERFACE", "Ethernet4|18.18.18.1/24", fvs)
self.config_db.create_entry("INTERFACE", "Ethernet0", fvs)
self.config_db.create_entry("INTERFACE", "Ethernet4", fvs)
dvs.runcmd("config interface startup Ethernet0")
dvs.runcmd("config interface startup Ethernet4")
dvs.servers[0].runcmd("ip link set down dev eth0")
dvs.servers[0].runcmd("ip link set up dev eth0")
dvs.servers[0].runcmd("ifconfig eth0 67.66.65.2/24")
dvs.servers[0].runcmd("ip route add default via 67.66.65.1")
dvs.servers[1].runcmd("ip link set down dev eth0")
dvs.servers[1].runcmd("ip link set up dev eth0")
dvs.servers[1].runcmd("ifconfig eth0 18.18.18.2/24")
dvs.servers[1].runcmd("ip route add default via 18.18.18.1")
dvs.runcmd("config nat add interface Ethernet0 -nat_zone 1")
time.sleep(1)
def clear_interfaces(self, dvs):
dvs.servers[0].runcmd("ifconfig eth0 0.0.0.0")
dvs.servers[1].runcmd("ifconfig eth0 0.0.0.0")
time.sleep(1)
def test_NatGlobalTable(self, dvs, testlog):
# initialize
self.setup_db(dvs)
# enable NAT feature
dvs.runcmd("config nat feature enable")
dvs.runcmd("config nat set timeout 450")
dvs.runcmd("config nat set udp-timeout 360")
dvs.runcmd("config nat set tcp-timeout 900")
# check NAT global values in appdb
self.app_db.wait_for_n_keys("NAT_GLOBAL_TABLE", 1)
fvs = self.app_db.wait_for_entry("NAT_GLOBAL_TABLE", "Values")
assert fvs == {"admin_mode": "enabled", "nat_timeout": "450", "nat_udp_timeout": "360", "nat_tcp_timeout": "900"}
def test_NatInterfaceZone(self, dvs, testlog):
# initialize
self.setup_db(dvs)
self.set_interfaces(dvs)
# check NAT zone is set for interface in app db
fvs = self.app_db.wait_for_entry("INTF_TABLE", "Ethernet0")
zone = False
for f, v in fvs.items():
if f == "nat_zone" and v == '1':
zone = True
break
assert zone
def test_AddNatStaticEntry(self, dvs, testlog):
# initialize
self.setup_db(dvs)
# get neighbor and arp entry
dvs.servers[0].runcmd("ping -c 1 18.18.18.2")
# add a static nat entry
dvs.runcmd("config nat add static basic 67.66.65.1 18.18.18.2")
# check the entry in the config db
self.config_db.wait_for_n_keys("STATIC_NAT", 1)
fvs = self.config_db.wait_for_entry("STATIC_NAT", "67.66.65.1")
assert fvs == {"local_ip": "18.18.18.2"}
# check the entry in app db
self.app_db.wait_for_n_keys("NAT_TABLE", 2)
fvs = self.app_db.wait_for_entry("NAT_TABLE", "67.66.65.1")
assert fvs == {
"translated_ip": "18.18.18.2",
"nat_type": "dnat",
"entry_type": "static"
}
#check the entry in asic db, 3 keys = SNAT, DNAT and DNAT_Pool
keys = self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 3)
for key in keys:
if (key.find("dst_ip:67.66.65.1")) or (key.find("src_ip:18.18.18.2")):
assert True
else:
assert False
def test_DelNatStaticEntry(self, dvs, testlog):
# initialize
self.setup_db(dvs)
# delete a static nat entry
dvs.runcmd("config nat remove static basic 67.66.65.1 18.18.18.2")
# check the entry is no there in the config db
self.config_db.wait_for_n_keys("STATIC_NAT", 0)
# check the entry is not there in app db
self.app_db.wait_for_n_keys("NAT_TABLE", 0)
#check the entry is not there in asic db
self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 0)
def test_AddNaPtStaticEntry(self, dvs, testlog):
# initialize
self.setup_db(dvs)
# get neighbor and arp entry
dvs.servers[0].runcmd("ping -c 1 18.18.18.2")
# add a static nat entry
dvs.runcmd("config nat add static udp 67.66.65.1 670 18.18.18.2 180")
# check the entry in the config db
self.config_db.wait_for_n_keys("STATIC_NAPT", 1)
fvs = self.config_db.wait_for_entry("STATIC_NAPT", "67.66.65.1|UDP|670")
assert fvs == {"local_ip": "18.18.18.2", "local_port": "180"}
# check the entry in app db
self.app_db.wait_for_n_keys("NAPT_TABLE:UDP", 2)
fvs = self.app_db.wait_for_entry("NAPT_TABLE:UDP", "67.66.65.1:670")
assert fvs == {"translated_ip": "18.18.18.2", "translated_l4_port": "180", "nat_type": "dnat", "entry_type": "static"}
#check the entry in asic db, 3 keys = SNAT, DNAT and DNAT_Pool
keys = self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 3)
for key in keys:
if (key.find("dst_ip:67.66.65.1")) and (key.find("key.l4_dst_port:670")):
assert True
if (key.find("src_ip:18.18.18.2")) or (key.find("key.l4_src_port:180")):
assert True
else:
assert False
def test_DelNaPtStaticEntry(self, dvs, testlog):
# initialize
self.setup_db(dvs)
# delete a static nat entry
dvs.runcmd("config nat remove static udp 67.66.65.1 670 18.18.18.2 180")
# check the entry is no there in the config db
self.config_db.wait_for_n_keys("STATIC_NAPT", 0)
# check the entry is not there in app db
self.app_db.wait_for_n_keys("NAPT_TABLE:UDP", 0)
#check the entry is not there in asic db
self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 0)
def test_AddTwiceNatEntry(self, dvs, testlog):
# initialize
self.setup_db(dvs)
# get neighbor and arp entry
dvs.servers[0].runcmd("ping -c 1 18.18.18.2")
dvs.servers[1].runcmd("ping -c 1 67.66.65.2")
# add a twice nat entry
dvs.runcmd("config nat add static basic 67.66.65.2 18.18.18.1 -nat_type snat -twice_nat_id 9")
dvs.runcmd("config nat add static basic 67.66.65.1 18.18.18.2 -nat_type dnat -twice_nat_id 9")
# check the entry in the config db
self.config_db.wait_for_n_keys("STATIC_NAT", 2)
fvs = self.config_db.wait_for_entry("STATIC_NAT", "67.66.65.1")
assert fvs == {"nat_type": "dnat", "twice_nat_id": "9", "local_ip": "18.18.18.2"}
fvs = self.config_db.wait_for_entry("STATIC_NAT", "67.66.65.2")
assert fvs == {"nat_type": "snat", "twice_nat_id": "9", "local_ip": "18.18.18.1"}
# check the entry in app db
self.app_db.wait_for_n_keys("NAT_TWICE_TABLE", 2)
fvs = self.app_db.wait_for_entry("NAT_TWICE_TABLE", "67.66.65.2:67.66.65.1")
assert fvs == {"translated_src_ip": "18.18.18.1", "translated_dst_ip": "18.18.18.2", "entry_type": "static"}
fvs = self.app_db.wait_for_entry("NAT_TWICE_TABLE", "18.18.18.2:18.18.18.1")
assert fvs == {"translated_src_ip": "67.66.65.1", "translated_dst_ip": "67.66.65.2", "entry_type": "static"}
#check the entry in asic db, 4 keys = SNAT, DNAT and 2 DNAT_Pools
keys = self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 4)
for key in keys:
if (key.find("dst_ip:18.18.18.1")) or (key.find("src_ip:18.18.18.2")):
assert True
else:
assert False
def test_DelTwiceNatStaticEntry(self, dvs, testlog):
# initialize
self.setup_db(dvs)
# delete a static nat entry
dvs.runcmd("config nat remove static basic 67.66.65.2 18.18.18.1")
dvs.runcmd("config nat remove static basic 67.66.65.1 18.18.18.2")
# check the entry is no there in the config db
self.config_db.wait_for_n_keys("STATIC_NAT", 0)
# check the entry is not there in app db
self.app_db.wait_for_n_keys("NAT_TWICE_TABLE", 0)
#check the entry is not there in asic db
self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 0)
def test_AddTwiceNaPtEntry(self, dvs, testlog):
# initialize
self.setup_db(dvs)
# get neighbor and arp entry
dvs.servers[0].runcmd("ping -c 1 18.18.18.2")
dvs.servers[1].runcmd("ping -c 1 67.66.65.2")
# add a twice nat entry
dvs.runcmd("config nat add static udp 67.66.65.2 670 18.18.18.1 181 -nat_type snat -twice_nat_id 7")
dvs.runcmd("config nat add static udp 67.66.65.1 660 18.18.18.2 182 -nat_type dnat -twice_nat_id 7")
# check the entry in the config db
self.config_db.wait_for_n_keys("STATIC_NAPT", 2)
fvs = self.config_db.wait_for_entry("STATIC_NAPT", "67.66.65.1|UDP|660")
assert fvs == {"nat_type": "dnat", "local_ip": "18.18.18.2", "twice_nat_id": "7", "local_port": "182"}
fvs = self.config_db.wait_for_entry("STATIC_NAPT", "67.66.65.2|UDP|670")
assert fvs == {"nat_type": "snat", "local_ip": "18.18.18.1", "twice_nat_id": "7", "local_port": "181"}
# check the entry in app db
self.app_db.wait_for_n_keys("NAPT_TWICE_TABLE", 2)
fvs = self.app_db.wait_for_entry("NAPT_TWICE_TABLE", "UDP:67.66.65.2:670:67.66.65.1:660")
assert fvs == {"translated_src_ip": "18.18.18.1", "translated_src_l4_port": "181", "translated_dst_ip": "18.18.18.2", "translated_dst_l4_port": "182", "entry_type": "static"}
fvs = self.app_db.wait_for_entry("NAPT_TWICE_TABLE", "UDP:18.18.18.2:182:18.18.18.1:181")
assert fvs == {"translated_src_ip": "67.66.65.1", "translated_src_l4_port": "660", "translated_dst_ip": "67.66.65.2", "translated_dst_l4_port": "670", "entry_type": "static"}
#check the entry in asic db, 4 keys = SNAT, DNAT and 2 DNAT_Pools
keys = self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 4)
for key in keys:
if (key.find("src_ip:18.18.18.2")) or (key.find("l4_src_port:182")):
assert True
if (key.find("dst_ip:18.18.18.1")) or (key.find("l4_dst_port:181")):
assert True
else:
assert False
def test_DelTwiceNaPtStaticEntry(self, dvs, testlog):
# initialize
self.setup_db(dvs)
# delete a static nat entry
dvs.runcmd("config nat remove static udp 67.66.65.2 670 18.18.18.1 181")
dvs.runcmd("config nat remove static udp 67.66.65.1 660 18.18.18.2 182")
# check the entry is not there in the config db
self.config_db.wait_for_n_keys("STATIC_NAPT", 0)
# check the entry is not there in app db
self.app_db.wait_for_n_keys("NAPT_TWICE_TABLE", 0)
#check the entry is not there in asic db
self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 0)
# clear interfaces
self.clear_interfaces(dvs)
def test_VerifyConntrackTimeoutForNatEntry(self, dvs, testlog):
# get neighbor and arp entry
dvs.servers[0].runcmd("ping -c 1 18.18.18.2")
# add a static nat entry
dvs.runcmd("config nat add static basic 67.66.65.1 18.18.18.2")
# check the conntrack timeout for static entry
def _check_conntrack_for_static_entry():
output = dvs.runcmd("conntrack -j -L -s 18.18.18.2 -p udp -q 67.66.65.1")
if len(output) != 2:
return (False, None)
conntrack_list = list(output[1].split(" "))
src_exists = "src=18.18.18.2" in conntrack_list
dst_exists = "dst=67.66.65.1" in conntrack_list
proto_exists = "udp" in conntrack_list
if not src_exists or not dst_exists or not proto_exists:
return (False, None)
proto_index = conntrack_list.index("udp")
if int(conntrack_list[proto_index + 7]) > 432000 or int(conntrack_list[proto_index + 7]) < 431900:
return (False, None)
return (True, None)
wait_for_result(_check_conntrack_for_static_entry)
# delete a static nat entry
dvs.runcmd("config nat remove static basic 67.66.65.1 18.18.18.2")
# Add Dummy always-pass test at end as workaroud
# for issue when Flaky fail on final test it invokes module tear-down before retrying
def test_nonflaky_dummy():
pass
| 39.039634
| 182
| 0.620773
|
e6a5916da8516ca978c7505bb56075d47bacaa77
| 826
|
py
|
Python
|
tools/webcam/webcam_apis/nodes/__init__.py
|
ivmtorres/mmpose
|
662cb50c639653ae2fc19d3421ce10bd02246b85
|
[
"Apache-2.0"
] | 1
|
2022-02-13T12:27:40.000Z
|
2022-02-13T12:27:40.000Z
|
tools/webcam/webcam_apis/nodes/__init__.py
|
ivmtorres/mmpose
|
662cb50c639653ae2fc19d3421ce10bd02246b85
|
[
"Apache-2.0"
] | null | null | null |
tools/webcam/webcam_apis/nodes/__init__.py
|
ivmtorres/mmpose
|
662cb50c639653ae2fc19d3421ce10bd02246b85
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import NODES
from .faceswap_nodes import FaceSwapNode
from .frame_effect_nodes import (BackgroundNode, BugEyeNode, MoustacheNode,
NoticeBoardNode, PoseVisualizerNode,
SaiyanNode, SunglassesNode)
from .helper_nodes import ModelResultBindingNode, MonitorNode, RecorderNode
from .mmdet_nodes import DetectorNode
from .mmpose_nodes import TopDownPoseEstimatorNode
from .xdwendwen_nodes import XDwenDwenNode
__all__ = [
'NODES', 'PoseVisualizerNode', 'DetectorNode', 'TopDownPoseEstimatorNode',
'MonitorNode', 'BugEyeNode', 'SunglassesNode', 'ModelResultBindingNode',
'NoticeBoardNode', 'RecorderNode', 'FaceSwapNode', 'MoustacheNode',
'SaiyanNode', 'BackgroundNode', 'XDwenDwenNode'
]
| 45.888889
| 78
| 0.74092
|
c9be5f2f7eab533dd99ab06dfcdb9543e8e88a5d
| 5,558
|
py
|
Python
|
models/SSCNet.py
|
reinforcementdriving/JS3C-Net
|
40326fdbebc688c10a6247f46ed08463de0db206
|
[
"MIT"
] | 136
|
2020-12-07T16:05:13.000Z
|
2022-03-28T11:42:23.000Z
|
models/SSCNet.py
|
reinforcementdriving/JS3C-Net
|
40326fdbebc688c10a6247f46ed08463de0db206
|
[
"MIT"
] | 14
|
2021-01-14T13:06:06.000Z
|
2022-03-19T07:20:16.000Z
|
models/SSCNet.py
|
reinforcementdriving/JS3C-Net
|
40326fdbebc688c10a6247f46ed08463de0db206
|
[
"MIT"
] | 23
|
2020-12-26T12:01:12.000Z
|
2022-01-20T01:24:23.000Z
|
# *_*coding:utf-8 *_*
"""
Author: Jiantao Gao
File: complt_sscnet.py
Date: 2020/4/27 17:46
"""
import torch
import torch.nn as nn
from models import model_utils
import spconv
def get_model(config):
return SSCNet(config)
class SSCNet_Decoder(nn.Module):
def __init__(self, input_dim, nPlanes, classes):
super().__init__()
# Block 1
self.b1_conv1=nn.Sequential(nn.Conv3d(input_dim, 16, 7, 2, padding=3), nn.BatchNorm3d(16),nn.ReLU())
self.b1_conv2=nn.Sequential(nn.Conv3d(16, nPlanes[0], 3, 1, padding=1), nn.BatchNorm3d(nPlanes[0]),nn.ReLU())
self.b1_conv3=nn.Sequential(nn.Conv3d(nPlanes[0], nPlanes[0], 3, 1, padding=1), nn.BatchNorm3d(nPlanes[0]),nn.ReLU())
self.b1_res=nn.Sequential(nn.Conv3d(16, nPlanes[0], 3, 1,padding=1), nn.BatchNorm3d(nPlanes[0]),nn.ReLU())
self.pool1=nn.Sequential(nn.MaxPool3d(2, 2))
# Block 2
self.b2_conv1=nn.Sequential(nn.Conv3d(nPlanes[0], nPlanes[1], 3, 1, padding=1), nn.BatchNorm3d(nPlanes[1]),nn.ReLU())
self.b2_conv2=nn.Sequential(nn.Conv3d(nPlanes[1], nPlanes[1], 3, 1, padding=1), nn.BatchNorm3d(nPlanes[1]),nn.ReLU())
self.b2_res=nn.Sequential(nn.Conv3d(nPlanes[0], nPlanes[1], 3, 1, padding=1), nn.BatchNorm3d(nPlanes[1]),nn.ReLU())
# Block 3
self.b3_conv1=nn.Sequential(nn.Conv3d(nPlanes[1], nPlanes[2], 3, 1, padding=1), nn.BatchNorm3d(nPlanes[2]),nn.ReLU())
self.b3_conv2=nn.Sequential(nn.Conv3d(nPlanes[2], nPlanes[2], 3, 1, padding=1), nn.BatchNorm3d(nPlanes[2]),nn.ReLU())
# Block 4
self.b4_conv1=nn.Sequential(nn.Conv3d(nPlanes[2], nPlanes[3], 3, 1, dilation=2, padding=2), nn.BatchNorm3d(nPlanes[3]),nn.ReLU())
self.b4_conv2=nn.Sequential(nn.Conv3d(nPlanes[3], nPlanes[3], 3, 1, dilation=2, padding=2), nn.BatchNorm3d(nPlanes[3]),nn.ReLU())
# Block 5
self.b5_conv1=nn.Sequential(nn.Conv3d(nPlanes[3], nPlanes[4], 3, 1, dilation=2, padding=2), nn.BatchNorm3d(nPlanes[4]),nn.ReLU())
self.b5_conv2=nn.Sequential(nn.Conv3d(nPlanes[4], nPlanes[4], 3, 1, dilation=2, padding=2), nn.BatchNorm3d(nPlanes[4]),nn.ReLU())
# Prediction
self.pre_conv1=nn.Sequential(nn.Conv3d(nPlanes[2]+nPlanes[3]+nPlanes[4], int((nPlanes[2]+nPlanes[3]+nPlanes[4])/3*2), 1, 1),\
nn.BatchNorm3d(int((nPlanes[2]+nPlanes[3]+nPlanes[4])/3*2)),nn.ReLU())
self.pre_conv2=nn.Sequential(nn.Conv3d(int((nPlanes[2]+nPlanes[3]+nPlanes[4])/3*2), classes, 1, 1))
def forward(self, x):
# Block 1
x = self.b1_conv1(x)
res_x = self.b1_res(x)
x = self.b1_conv2(x)
x = self.b1_conv3(x)
x = x + res_x
# Block 2
res_x = self.b2_res(x)
x = self.b2_conv1(x)
x = self.b2_conv2(x)
x = x +res_x
# Block 3
b3_x1 = self.b3_conv1(x)
b3_x2 = self.b3_conv2(b3_x1)
b3_x = b3_x1 + b3_x2
# Block 4
b4_x1 = self.b4_conv1(b3_x)
b4_x2 = self.b4_conv2(b4_x1)
b4_x = b4_x1 +b4_x2
# Block 5
b5_x1 = self.b5_conv1(b4_x)
b5_x2 = self.b5_conv2(b5_x1)
b5_x = b5_x1 + b5_x2
# Concat b3,b4,b5
x = torch.cat((b3_x, b4_x, b5_x),dim=1)
# Prediction
x = self.pre_conv1(x)
x = self.pre_conv2(x)
return x
class SSCNet(nn.Module):
def __init__(self, args):
nn.Module.__init__(self)
self.args = args
classes = args['DATA']['classes_completion']
m = args['Completion']['m']
if args['Completion']['feeding'] == 'feat':
input_dim = args['Segmentation']['m']
elif args['Completion']['feeding'] == 'both':
input_dim = args['Segmentation']['m'] + args['DATA']['classes_seg']
else:
input_dim = args['DATA']['classes_seg']
self.Decoder = SSCNet_Decoder(input_dim=input_dim, nPlanes=[m, m, m, m, m], classes=classes)
self.upsample = nn.Sequential(nn.Conv3d(in_channels=classes, out_channels=classes * 8, kernel_size=1, stride=1),
nn.BatchNorm3d(classes * 8), nn.ReLU(), model_utils.PixelShuffle3D(upscale_factor=2))
if args['Completion']['interaction']:
self.interaction_module = model_utils.interaction_module(args,
self.args['Completion']['point_cloud_range'],
self.args['Completion']['voxel_size'],
self.args['Completion']['search_k'],
feat_relation=args['Completion']['feat_relation'])
def forward(self, feat):
x = feat.dense()
x = self.Decoder(x)
if self.args['Completion']['interaction']:
coord, features = model_utils.extract_coord_features(x)
if self.args['Completion']['feeding'] == 'both':
feat.features = feat.features[:, self.args['DATA']['classes_seg']:]
x = spconv.SparseConvTensor(features=features.float(),
indices=coord.int(),
spatial_shape=[int(s/2) for s in self.args['Completion']['full_scale']],
batch_size=self.args['TRAIN']['batch_size'])
x = self.interaction_module(feat, x)
x = self.upsample(x)
return [x]
| 44.111111
| 137
| 0.56765
|
1d73005832a418da8ae44cce561d109cbe46c10e
| 7,765
|
py
|
Python
|
GraphDefinitions.py
|
MojaveTom/HomeGraphing
|
d8e4e296f71bd153f86ce41432df2e41ce8b58c4
|
[
"MIT"
] | null | null | null |
GraphDefinitions.py
|
MojaveTom/HomeGraphing
|
d8e4e296f71bd153f86ce41432df2e41ce8b58c4
|
[
"MIT"
] | 1
|
2019-05-25T17:17:57.000Z
|
2019-05-25T17:17:57.000Z
|
GraphDefinitions.py
|
MojaveTom/HomeGraphing
|
d8e4e296f71bd153f86ce41432df2e41ce8b58c4
|
[
"MIT"
] | null | null | null |
'''
Define the schema for graph definition dictionary.
'''
import os # https://docs.python.org/3/library/os.html
import sys # https://docs.python.org/3/library/sys.html
import json # https://docs.python.org/3/library/json.html
import toml # https://github.com/uiri/toml https://github.com/toml-lang/toml
# comment json strips python and "//" comments form json before applying json.load routines.
import commentjson # https://github.com/vaidik/commentjson https://commentjson.readthedocs.io/en/latest/
# Lark is used by commentjson -- import commented out, but here for documentation.
# import lark # https://github.com/lark-parser/lark https://lark-parser.readthedocs.io/en/latest/
import logging # https://docs.python.org/3/library/logging.html
from progparams.GetLoggingDict import setConsoleLoggingLevel, setLogFileLoggingLevel, getConsoleLoggingLevel, getLogFileLoggingLevel
# https://github.com/keleshev/schema
from schema import Schema, And, Or, Use, Optional, SchemaError
# import matplotlib.cm as cm
import bokeh.palettes as bp
import bokeh.colors as bc
import glob
from itertools import chain
flatten = chain.from_iterable
logger = logging.getLogger(__name__)
debug = logger.debug
critical = logger.critical
info = logger.info
MyPath = os.path.dirname(os.path.realpath(__file__))
ProgName, ext = os.path.splitext(os.path.basename(sys.argv[0]))
ProgPath = os.path.dirname(os.path.realpath(sys.argv[0]))
# logger.debug('bokeh __palettes__ are: %s' % bp.__palettes__)
bokehKnownColors = bc.named.__all__
# This makes the VSCode happy, but I don't think it will work as I intend.
# bokehPaletteFamilies = list(bp._PalettesModule.all_palettes)
## The following works for Python, but not for VSCode.
bokehPaletteFamilies = list(bp.all_palettes.keys())
palettesAndColors = bokehPaletteFamilies + bokehKnownColors
# logger.debug('bokeh palette families are: %s' % bokehPaletteFamilies)
# And(str, lambda s: s in cm.datad.keys())
# # And(str, lambda s: s in cm.datad.keys())
# Or(
gds = {str: {'GraphTitle': str
, 'DBHost': And(Use(str.lower), Use(str.lower), lambda s: s in ('rc', 'ss'), error='DBHost must be "RC" or "SS"')
, 'outputFile': str
, Optional('ShowGraph', default=True): bool
, Optional('graph_color_map', default='Dark2'): Or(
And(str, lambda s: s in bokehPaletteFamilies)
, None, error='Graph color map not defined.')
, 'XaxisTitle': str
, 'Yaxes': [{'title': str
, Optional('color_map', default=None): Or(
And(str, lambda s: s in palettesAndColors)
, None, error='Axis color map not defined.')
, Optional('color', default=None): Or(
And(str, lambda s: s in bokehKnownColors)
, None, error='Axis color not defined.')
, Optional('location', default='left'):
And(str, lambda s: s in ('left', 'right'), error='Axis location must be "left" or "right".')}]
, 'items': [ { 'query': Or(str, None)
, 'variableNames': [str]
, 'datafile': str
, 'dataname': str
, Optional('axisNum', default=0): Use(int)
, Optional('includeInLegend', default=True): Use(bool)
, Optional('lineType', default='line'):
And(str, lambda s: s in ('line', 'step'), error='Item lineType must be "line" or "step".')
, Optional('dataTimeZone', default='serverLocal'):
And(str, lambda s: s in ('serverLocal', 'UTC'), error='Item dataTimeZone must be "serverLocal" or "UTC".')
, Optional('lineMods', default={"glyph.line_width": "2", "muted_glyph.line_width": "4"}): {str: str}
, Optional('color_map', default=None): Or(
And(str, lambda s: s in bokehPaletteFamilies)
, None, error='Axis color map not defined.')
, Optional('color', default=None): Or(
And(str, lambda s: s in bokehKnownColors)
, None, error='Axis color not defined.') } ] } }
# Doesn't work since some of the keys are class objects defined in schema.
# Can't pickle gds either.
# with open(os.path.join(MyPath, "GraphDefsSchema.json"), 'w') as file:
# json.dump(gds, file, indent=2)
def GetGraphDefs(GraphDefFile=None, *args, **kwargs):
'''
Load a toml or json file with graph definitions in it.
The graph definitions dictionary is validated.
'''
logger.debug(f"Entered GetGraphDefs with argument {GraphDefFile}, and kwargs {kwargs!r}")
logger.debug('MyPath in GraphDefinitions.GetGraphDefs is: %s' % MyPath)
if kwargs.get('loggingLevel') is not None:
setConsoleLoggingLevel(kwargs.get('loggingLevel'))
pass
if kwargs.get('GraphDefFile') is not None:
fns = kwargs['GraphDefFile']
if isinstance(fns, str): fns = (fns,)
else:
if GraphDefFile is None: GraphDefFile = "OneLineGraph"
# Look for .toml, .jsonc and .json files with GraphDefFile in the main program's dir then in cwd.
fns = [ os.path.join(ProgPath, f"{GraphDefFile}") # First try un-adorned file name
, os.path.join(ProgPath, f"{GraphDefFile}*.toml")
, os.path.join(ProgPath, f"{GraphDefFile}*.jsonc")
, os.path.join(ProgPath, f"{GraphDefFile}*.json")
, f"{GraphDefFile}"
, f"{GraphDefFile}*.toml"
, f"{GraphDefFile}*.jsonc"
, f"{GraphDefFile}*.json"
]
debug(f"Looking for graph definition file in default locations.")
# glob process graph def paths
# Make a list of actual files to read.
fns = list(flatten([glob.glob(x) for x in fns]))
debug(f"Looking for first good JSON or TOML graph defs file in {fns!r}")
if fns is None: return None, None # no graph defs, and no files to read it from.
for fn in fns:
try:
debug(f"Trying to load graph definitions from file: {fn}")
fnExt = os.path.splitext(fn)[1]
if fnExt == ".json" or fnExt == ".jsonc":
GraphDefs = commentjson.load(open(fn))
elif fnExt == ".toml":
GraphDefs = toml.load(fn)
else:
critical(f"Unrecognized file type from which to load graph definitions: {fnExt}")
return None, fn
debug(f"Successfully loaded GraphDefs: {GraphDefs}\n\nFrom file {fn}")
break # exit the for loop without doing the else clause.
except json.JSONDecodeError as e:
info(f"Json file: {fn} did not load successfully: {e}")
except FileNotFoundError as f:
info(f"Param file: {fn} does not exist. {f}")
except IsADirectoryError as d:
info(f"Param file: {fn} is a directory! {d}")
except toml.TomlDecodeError as t:
info(f"Toml file: {fn} did not load successfully: {t}")
else:
critical(f'No graph definitions file was found and loaded.')
return None, None
try:
debug(f'Validating the loaded graph definitions dictionary.')
GraphDefsSchema = Schema(gds, name = 'Graphing Schema')
GraphDefs = GraphDefsSchema.validate(GraphDefs)
logger.debug('Graph definitions file is valid.')
except SchemaError as e:
logger.critical('Graph definition dictionary is not valid. %s', e)
logger.debug('%s' % e.autos)
return None, fn
return GraphDefs, fn
| 49.775641
| 132
| 0.61056
|
02c385d0079dcd89765916768fc7d57338c3f284
| 918
|
py
|
Python
|
Pacote Dowload/CursoemVideo/ex 069.py
|
AMF1971/Cursoemvideo-Python
|
814ce748ab72e2d6b09a4e15f943bd72b0922f8c
|
[
"MIT"
] | null | null | null |
Pacote Dowload/CursoemVideo/ex 069.py
|
AMF1971/Cursoemvideo-Python
|
814ce748ab72e2d6b09a4e15f943bd72b0922f8c
|
[
"MIT"
] | null | null | null |
Pacote Dowload/CursoemVideo/ex 069.py
|
AMF1971/Cursoemvideo-Python
|
814ce748ab72e2d6b09a4e15f943bd72b0922f8c
|
[
"MIT"
] | null | null | null |
# Crie um programa que leia a idade e o sexo de vรกrias pessoas. A cada pessoa cadastrada,
# o programa deverรก perguntar se o usuรกrio quer ou nรฃo continuar. No final, mostre:
#A) quantas pessoas tem mais de 18 anos.
#B) quantos homens foram cadastrados.
#C) quantas mulheres tem menos de 20 anos.
tot18 = totH = totM20 = 0
while True:
idade = int(input('Idade:'))
sexo = ' '
while sexo not in 'MF':
sexo = str(input('Sexo: [M/F]')).strip().upper()[0]
if idade >= 18:
tot18 += 1
if sexo == 'M':
totH += 1
if sexo == 'F' and idade < 20:
totM20 += 1
resp = ' '
while resp not in 'SN':
resp = str(input('Quer continuar? [S/N]')).strip().upper()[0]
if resp == 'N':
break
print(f'Total de pessoas com mais de 18 anos {tot18}')
print(f'Ao todo temos {totH} homens cadastrados')
print(f'E temos {totM20} mulheres com menos de 20 anos')
| 27.818182
| 89
| 0.608932
|
9d30dc0dbd2a639c06078643dae38c5fa0392ef7
| 11,672
|
py
|
Python
|
tools/engine/tester.py
|
jameslong95/FasterSeg
|
872e04964ea46494a6018d9915cee5476e361c27
|
[
"MIT"
] | 1
|
2020-05-11T00:41:43.000Z
|
2020-05-11T00:41:43.000Z
|
tools/engine/tester.py
|
jameslong95/FasterSeg
|
872e04964ea46494a6018d9915cee5476e361c27
|
[
"MIT"
] | null | null | null |
tools/engine/tester.py
|
jameslong95/FasterSeg
|
872e04964ea46494a6018d9915cee5476e361c27
|
[
"MIT"
] | null | null | null |
import os
import os.path as osp
import cv2
import numpy as np
import time
from tqdm import tqdm
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
from engine.logger import get_logger
from utils.pyt_utils import load_model, link_file, ensure_dir
from utils.img_utils import pad_image_to_shape, normalize
logger = get_logger()
class Tester(object):
def __init__(self, dataset, class_num, image_mean, image_std, network,
multi_scales, is_flip, devices=0, out_idx=0, threds=3, config=None, logger=None,
verbose=False, save_path=None, show_image=False):
self.dataset = dataset
self.ndata = self.dataset.get_length()
self.class_num = class_num
self.image_mean = image_mean
self.image_std = image_std
self.multi_scales = multi_scales
self.is_flip = is_flip
self.network = network
self.devices = devices
if type(self.devices) == int: self.devices = [self.devices]
self.out_idx = out_idx
self.threds = threds
self.config = config
self.logger = logger
self.context = mp.get_context('spawn')
self.val_func = None
self.results_queue = self.context.Queue(self.ndata)
self.verbose = verbose
self.save_path = save_path
if save_path is not None:
ensure_dir(save_path)
self.show_image = show_image
def run(self, model_path, model_indice, log_file, log_file_link):
"""There are four evaluation modes:
1.only eval a .pth model: -e *.pth
2.only eval a certain epoch: -e epoch
3.eval all epochs in a given section: -e start_epoch-end_epoch
4.eval all epochs from a certain started epoch: -e start_epoch-
"""
if '.pth' in model_indice:
models = [model_indice, ]
elif "-" in model_indice:
start_epoch = int(model_indice.split("-")[0])
end_epoch = model_indice.split("-")[1]
models = os.listdir(model_path)
models.remove("epoch-last.pth")
sorted_models = [None] * len(models)
model_idx = [0] * len(models)
for idx, m in enumerate(models):
num = m.split(".")[0].split("-")[1]
model_idx[idx] = num
sorted_models[idx] = m
model_idx = np.array([int(i) for i in model_idx])
down_bound = model_idx >= start_epoch
up_bound = [True] * len(sorted_models)
if end_epoch:
end_epoch = int(end_epoch)
assert start_epoch < end_epoch
up_bound = model_idx <= end_epoch
bound = up_bound * down_bound
model_slice = np.array(sorted_models)[bound]
models = [os.path.join(model_path, model) for model in
model_slice]
else:
models = [os.path.join(model_path,
'epoch-%s.pth' % model_indice), ]
results = open(log_file, 'a')
link_file(log_file, log_file_link)
for model in models:
logger.info("Load Model: %s" % model)
self.val_func = load_model(self.network, model)
result_line, mIoU = self.multi_process_evaluation()
results.write('Model: ' + model + '\n')
results.write(result_line)
results.write('\n')
results.flush()
results.close()
def run_online(self):
"""
eval during training
"""
self.val_func = self.network
self.single_process_evaluation()
def single_process_evaluation(self):
with torch.no_grad():
for idx in tqdm(range(self.ndata)):
dd = self.dataset[idx]
self.func_per_iteration(dd, self.devices[0], iter=idx)
def run_online_multiprocess(self):
"""
eval during training
"""
self.val_func = self.network
self.multi_process_single_gpu_evaluation()
def multi_process_single_gpu_evaluation(self):
# start_eval_time = time.perf_counter()
stride = int(np.ceil(self.ndata / self.threds))
# start multi-process on single-gpu
procs = []
for d in range(self.threds):
e_record = min((d + 1) * stride, self.ndata)
shred_list = list(range(d * stride, e_record))
device = self.devices[0]
logger.info('Thread %d handle %d data.' % (d, len(shred_list)))
p = self.context.Process(target=self.worker, args=(shred_list, device))
procs.append(p)
for p in procs:
p.start()
for p in procs:
p.join()
def multi_process_evaluation(self):
start_eval_time = time.perf_counter()
nr_devices = len(self.devices)
stride = int(np.ceil(self.ndata / nr_devices))
# start multi-process on multi-gpu
procs = []
for d in range(nr_devices):
e_record = min((d + 1) * stride, self.ndata)
shred_list = list(range(d * stride, e_record))
device = self.devices[d]
logger.info('GPU %s handle %d data.' % (device, len(shred_list)))
p = self.context.Process(target=self.worker, args=(shred_list, device))
procs.append(p)
for p in procs:
p.start()
for p in procs:
p.join()
def worker(self, shred_list, device):
start_load_time = time.time()
# logger.info('Load Model on Device %d: %.2fs' % (device, time.time() - start_load_time))
for idx in shred_list:
dd = self.dataset[idx]
results_dict = self.func_per_iteration(dd, device, iter=idx)
self.results_queue.put(results_dict)
def func_per_iteration(self, data, device, iter=None):
raise NotImplementedError
def compute_metric(self, results):
raise NotImplementedError
# evaluate the whole image at once
def whole_eval(self, img, output_size, input_size=None, device=None):
if input_size is not None:
img, margin = self.process_image(img, input_size)
else:
img = self.process_image(img, input_size)
pred = self.val_func_process(img, device)
if input_size is not None:
pred = pred[:, margin[0]:(pred.shape[1] - margin[1]),
margin[2]:(pred.shape[2] - margin[3])]
pred = pred.permute(1, 2, 0)
pred = pred.cpu().numpy()
if output_size is not None:
pred = cv2.resize(pred,
(output_size[1], output_size[0]),
interpolation=cv2.INTER_LINEAR)
pred = pred.argmax(2)
return pred
# slide the window to evaluate the image
def sliding_eval(self, img, crop_size, stride_rate, device=None):
ori_rows, ori_cols, c = img.shape
processed_pred = np.zeros((ori_rows, ori_cols, self.class_num))
for s in self.multi_scales:
img_scale = cv2.resize(img, None, fx=s, fy=s,
interpolation=cv2.INTER_LINEAR)
new_rows, new_cols, _ = img_scale.shape
processed_pred += self.scale_process(img_scale,
(ori_rows, ori_cols),
crop_size, stride_rate, device)
pred = processed_pred.argmax(2)
return pred
def scale_process(self, img, ori_shape, crop_size, stride_rate,
device=None):
new_rows, new_cols, c = img.shape
long_size = new_cols if new_cols > new_rows else new_rows
if long_size <= crop_size:
input_data, margin = self.process_image(img, crop_size)
score = self.val_func_process(input_data, device)
score = score[:, margin[0]:(score.shape[1] - margin[1]),
margin[2]:(score.shape[2] - margin[3])]
else:
stride = int(np.ceil(crop_size * stride_rate))
img_pad, margin = pad_image_to_shape(img, crop_size,
cv2.BORDER_CONSTANT, value=0)
pad_rows = img_pad.shape[0]
pad_cols = img_pad.shape[1]
r_grid = int(np.ceil((pad_rows - crop_size) / stride)) + 1
c_grid = int(np.ceil((pad_cols - crop_size) / stride)) + 1
data_scale = torch.zeros(self.class_num, pad_rows, pad_cols).cuda(
device)
count_scale = torch.zeros(self.class_num, pad_rows, pad_cols).cuda(
device)
for grid_yidx in range(r_grid):
for grid_xidx in range(c_grid):
s_x = grid_xidx * stride
s_y = grid_yidx * stride
e_x = min(s_x + crop_size, pad_cols)
e_y = min(s_y + crop_size, pad_rows)
s_x = e_x - crop_size
s_y = e_y - crop_size
img_sub = img_pad[s_y:e_y, s_x: e_x, :]
count_scale[:, s_y: e_y, s_x: e_x] += 1
input_data, tmargin = self.process_image(img_sub, crop_size)
temp_score = self.val_func_process(input_data, device)
temp_score = temp_score[:,
tmargin[0]:(temp_score.shape[1] - tmargin[1]),
tmargin[2]:(temp_score.shape[2] - tmargin[3])]
data_scale[:, s_y: e_y, s_x: e_x] += temp_score
# score = data_scale / count_scale
score = data_scale
score = score[:, margin[0]:(score.shape[1] - margin[1]),
margin[2]:(score.shape[2] - margin[3])]
score = score.permute(1, 2, 0)
data_output = cv2.resize(score.cpu().numpy(),
(ori_shape[1], ori_shape[0]),
interpolation=cv2.INTER_LINEAR)
return data_output
def val_func_process(self, input_data, device=None):
input_data = np.ascontiguousarray(input_data[None, :, :, :], dtype=np.float32)
input_data = torch.FloatTensor(input_data).cuda(device)
with torch.cuda.device(input_data.get_device()):
self.val_func.eval()
self.val_func.to(input_data.get_device())
with torch.no_grad():
score = self.val_func(input_data)
if (isinstance(score, tuple) or isinstance(score, list)) and len(score) > 1:
score = score[self.out_idx]
score = score[0] # a single image pass, ignore batch dim
if self.is_flip:
input_data = input_data.flip(-1)
score_flip = self.val_func(input_data)
score_flip = score_flip[0]
score += score_flip.flip(-1)
score = torch.exp(score)
# score = score.data
return score
def process_image(self, img, crop_size=None):
p_img = img
if img.shape[2] < 3:
im_b = p_img
im_g = p_img
im_r = p_img
p_img = np.concatenate((im_b, im_g, im_r), axis=2)
p_img = normalize(p_img, self.image_mean, self.image_std)
if crop_size is not None:
p_img, margin = pad_image_to_shape(p_img, crop_size, cv2.BORDER_CONSTANT, value=0)
p_img = p_img.transpose(2, 0, 1)
return p_img, margin
p_img = p_img.transpose(2, 0, 1)
return p_img
| 37.290735
| 97
| 0.558687
|
b58c882560a3be07791b4f51026b4399d6951a77
| 304
|
py
|
Python
|
gde/models/mnist/_convnet.py
|
MIPT-Oulu/greedy_ensembles_training
|
de72d8f84f151a0398c49aaf56c1cc9c709f79b7
|
[
"Apache-2.0"
] | 10
|
2021-06-01T05:15:18.000Z
|
2021-12-26T03:59:53.000Z
|
gde/models/mnist/_convnet.py
|
Oulu-IMEDS/greedy_ensembles_training
|
de72d8f84f151a0398c49aaf56c1cc9c709f79b7
|
[
"Apache-2.0"
] | null | null | null |
gde/models/mnist/_convnet.py
|
Oulu-IMEDS/greedy_ensembles_training
|
de72d8f84f151a0398c49aaf56c1cc9c709f79b7
|
[
"Apache-2.0"
] | 1
|
2021-06-06T07:08:43.000Z
|
2021-06-06T07:08:43.000Z
|
from torch import nn
from gde.models.cifar import PreResNet as PreResNetMeta
class PreResNet8(PreResNetMeta):
def __init__(self, num_classes=10):
super(PreResNet8, self).__init__(
num_classes=num_classes, depth=8, dropout_rate=0
)
self.avgpool = nn.AvgPool2d(7)
| 27.636364
| 60
| 0.697368
|
5f70a15b5073cda988a72f44b044dbddf9dd8331
| 13,330
|
py
|
Python
|
ndcube/tests/test_ndcube.py
|
DanRyanIrish/ndcube
|
f98f97ad9e65a8ddd79f047d76c596599cf94882
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
ndcube/tests/test_ndcube.py
|
DanRyanIrish/ndcube
|
f98f97ad9e65a8ddd79f047d76c596599cf94882
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
ndcube/tests/test_ndcube.py
|
DanRyanIrish/ndcube
|
f98f97ad9e65a8ddd79f047d76c596599cf94882
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
import astropy.units as u
import numpy as np
import pytest
from astropy.coordinates import SkyCoord, SpectralCoord
from astropy.wcs.wcsapi import BaseHighLevelWCS, BaseLowLevelWCS
from astropy.wcs.wcsapi.wrappers import SlicedLowLevelWCS
from ndcube.tests import helpers
def generate_data(shape):
data = np.arange(np.product(shape))
return data.reshape(shape)
def test_wcs_object(all_ndcubes):
assert isinstance(all_ndcubes.wcs.low_level_wcs, BaseLowLevelWCS)
assert isinstance(all_ndcubes.wcs, BaseHighLevelWCS)
@pytest.mark.parametrize("ndc, item",
(
("ndcube_3d_ln_lt_l", np.s_[:, :, 0]),
("ndcube_3d_ln_lt_l", np.s_[..., 0]),
("ndcube_3d_ln_lt_l", np.s_[1:2, 1:2, 0]),
("ndcube_3d_ln_lt_l", np.s_[..., 0]),
("ndcube_3d_ln_lt_l", np.s_[:, :, 0]),
("ndcube_3d_ln_lt_l", np.s_[1:2, 1:2, 0]),
("ndcube_4d_ln_lt_l_t", np.s_[:, :, 0, 0]),
("ndcube_4d_ln_lt_l_t", np.s_[..., 0, 0]),
("ndcube_4d_ln_lt_l_t", np.s_[1:2, 1:2, 1, 1]),
),
indirect=("ndc",))
def test_slicing_ln_lt(ndc, item):
sndc = ndc[item]
assert len(sndc.dimensions) == 2
assert set(sndc.wcs.world_axis_physical_types) == {"custom:pos.helioprojective.lat",
"custom:pos.helioprojective.lon"}
if sndc.uncertainty is not None:
assert np.allclose(sndc.data, sndc.uncertainty.array)
if sndc.mask is not None:
assert np.allclose(sndc.data > 0, sndc.mask)
if ndc.extra_coords and ndc.extra_coords.keys():
ec = sndc.extra_coords
assert set(ec.keys()) == {"time", "hello"}
wcs = sndc.wcs
assert isinstance(wcs, BaseHighLevelWCS)
assert isinstance(wcs.low_level_wcs, SlicedLowLevelWCS)
assert wcs.pixel_n_dim == 2
assert wcs.world_n_dim == 2
assert np.allclose(wcs.array_shape, sndc.data.shape)
assert np.allclose(sndc.wcs.axis_correlation_matrix, np.ones(2, dtype=bool))
@pytest.mark.parametrize("ndc, item",
(
("ndcube_3d_ln_lt_l", np.s_[0, 0, :]),
("ndcube_3d_ln_lt_l", np.s_[0, 0, ...]),
("ndcube_3d_ln_lt_l", np.s_[1, 1, 1:2]),
("ndcube_3d_ln_lt_l", np.s_[0, 0, :]),
("ndcube_3d_ln_lt_l", np.s_[0, 0, ...]),
("ndcube_3d_ln_lt_l", np.s_[1, 1, 1:2]),
("ndcube_4d_ln_lt_l_t", np.s_[0, 0, :, 0]),
("ndcube_4d_ln_lt_l_t", np.s_[0, 0, ..., 0]),
("ndcube_4d_ln_lt_l_t", np.s_[1, 1, 1:2, 1]),
),
indirect=("ndc",))
def test_slicing_wave(ndc, item):
sndc = ndc[item]
assert len(sndc.dimensions) == 1
assert set(sndc.wcs.world_axis_physical_types) == {"em.wl"}
if sndc.uncertainty is not None:
assert np.allclose(sndc.data, sndc.uncertainty.array)
if sndc.mask is not None:
assert np.allclose(sndc.data > 0, sndc.mask)
if ndc.extra_coords and ndc.extra_coords.keys():
ec = sndc.extra_coords
assert set(ec.keys()) == {"bye"}
wcs = sndc.wcs
assert isinstance(wcs, BaseHighLevelWCS)
assert isinstance(wcs.low_level_wcs, SlicedLowLevelWCS)
assert wcs.pixel_n_dim == 1
assert wcs.world_n_dim == 1
assert np.allclose(wcs.array_shape, sndc.data.shape)
assert np.allclose(sndc.wcs.axis_correlation_matrix, np.ones(1, dtype=bool))
@pytest.mark.parametrize("ndc, item",
(
("ndcube_3d_ln_lt_l", np.s_[0, :, :]),
("ndcube_3d_ln_lt_l", np.s_[0, ...]),
("ndcube_3d_ln_lt_l", np.s_[1, 1:2]),
("ndcube_3d_ln_lt_l", np.s_[0, :, :]),
("ndcube_3d_ln_lt_l", np.s_[0, ...]),
("ndcube_3d_ln_lt_l", np.s_[1, :, 1:2]),
("ndcube_4d_ln_lt_l_t", np.s_[0, :, :, 0]),
("ndcube_4d_ln_lt_l_t", np.s_[0, ..., 0]),
("ndcube_4d_ln_lt_l_t", np.s_[1, 1:2, 1:2, 1]),
),
indirect=("ndc",))
def test_slicing_split_celestial(ndc, item):
sndc = ndc[item]
assert len(sndc.dimensions) == 2
if sndc.uncertainty is not None:
assert np.allclose(sndc.data, sndc.uncertainty.array)
if sndc.mask is not None:
assert np.allclose(sndc.data > 0, sndc.mask)
if ndc.extra_coords and ndc.extra_coords.keys():
ec = sndc.extra_coords
assert set(ec.keys()) == {"hello", "bye"}
assert isinstance(sndc.wcs, BaseHighLevelWCS)
assert isinstance(sndc.wcs.low_level_wcs, SlicedLowLevelWCS)
wcs = sndc.wcs
assert wcs.pixel_n_dim == 2
assert wcs.world_n_dim == 3
assert np.allclose(wcs.array_shape, sndc.data.shape)
assert set(wcs.world_axis_physical_types) == {"custom:pos.helioprojective.lat",
"custom:pos.helioprojective.lon",
"em.wl"}
assert np.allclose(wcs.axis_correlation_matrix, np.array([[True, False],
[False, True],
[False, True]], dtype=bool))
@pytest.mark.parametrize("axes", ([-1], [2], ["em"]))
def test_axis_world_coords_single(axes, ndcube_3d_ln_lt_l):
coords = ndcube_3d_ln_lt_l.axis_world_coords_values(*axes)
assert u.allclose(coords, [1.02e-09, 1.04e-09, 1.06e-09, 1.08e-09]*u.m)
@pytest.mark.parametrize("axes", ([-1], [2], ["em"]))
def test_axis_world_coords_single_edges(axes, ndcube_3d_ln_lt_l):
coords = ndcube_3d_ln_lt_l.axis_world_coords_values(*axes, edges=True)
assert u.allclose(coords, [1.01e-09, 1.03e-09, 1.05e-09, 1.07e-09, 1.09e-09]*u.m)
@pytest.mark.parametrize("ndc, item",
(
("ndcube_3d_ln_lt_l", np.s_[0, 0, :]),
("ndcube_3d_ln_lt_l", np.s_[0, 0, ...]),
("ndcube_3d_ln_lt_l", np.s_[0, 0, :]),
("ndcube_3d_ln_lt_l", np.s_[0, 0, ...]),
),
indirect=("ndc",))
def test_axis_world_coords_sliced_all_3d(ndc, item):
coords = ndc[item].axis_world_coords_values()
assert u.allclose(coords, [1.02e-09, 1.04e-09, 1.06e-09, 1.08e-09] * u.m)
@pytest.mark.parametrize("ndc, item",
(
("ndcube_4d_ln_lt_l_t", np.s_[0, 0, :, 0]),
("ndcube_4d_ln_lt_l_t", np.s_[0, 0, ..., 0]),
),
indirect=("ndc",))
def test_axis_world_coords_sliced_all_4d(ndc, item):
coords = ndc[item].axis_world_coords_values()
expected = [2.0e-11, 4.0e-11, 6.0e-11, 8.0e-11, 1.0e-10,
1.2e-10, 1.4e-10, 1.6e-10, 1.8e-10, 2.0e-10] * u.m
assert u.allclose(coords, expected)
@pytest.mark.xfail
def test_axis_world_coords_all(ndcube_3d_ln_lt_l):
coords = ndcube_3d_ln_lt_l.axis_world_coord()
assert len(coords) == 2
assert isinstance(coords[0], SkyCoord)
assert u.allclose(coords[0].Tx, [[0.60002173, 0.59999127, 0.5999608],
[1., 1., 1.]] * u.deg)
assert u.allclose(coords[0].Ty, [[1.26915033e-05, 4.99987815e-01, 9.99962939e-01],
[1.26918126e-05, 5.00000000e-01, 9.99987308e-01]] * u.deg)
assert isinstance(coords[1], u.Quantity)
assert u.allclose(coords[1], [1.02e-09, 1.04e-09, 1.06e-09, 1.08e-09] * u.m)
def test_axis_world_coords_values_all(ndcube_3d_ln_lt_l):
coords = ndcube_3d_ln_lt_l.axis_world_coords_values()
assert len(coords) == 3
assert all(isinstance(c, u.Quantity) for c in coords)
assert u.allclose(coords[0], [[0.00277778, 0.00277778, 0.00277778],
[0.00555556, 0.00555556, 0.00555556]] * u.deg)
assert u.allclose(coords[1], [[-0.00555556, -0.00416667, -0.00277778],
[-0.00555556, -0.00416667, -0.00277778]] * u.deg)
assert u.allclose(coords[2], [1.02e-09, 1.04e-09, 1.06e-09, 1.08e-09] * u.m)
def test_array_axis_physical_types(ndcube_4d_ln_lt_l_t):
expected = [
('custom:pos.helioprojective.lon', 'custom:pos.helioprojective.lat'),
('custom:pos.helioprojective.lon', 'custom:pos.helioprojective.lat'),
('em.wl',), ('time',)]
output = ndcube_4d_ln_lt_l_t.array_axis_physical_types
for i in range(len(expected)):
assert all([physical_type in expected[i] for physical_type in output[i]])
def test_crop(ndcube_4d_ln_lt_l_t):
intervals = ndcube_4d_ln_lt_l_t.wcs.array_index_to_world([1, 2], [0, 1], [0, 1], [0, 2])
lower_corner = [coord[0] for coord in intervals]
upper_corner = [coord[-1] for coord in intervals]
expected = ndcube_4d_ln_lt_l_t[1:3, 0:2, 0:2, 0:3]
output = ndcube_4d_ln_lt_l_t.crop(lower_corner, upper_corner)
helpers.assert_cubes_equal(output, expected)
def test_crop_with_nones(ndcube_4d_ln_lt_l_t):
lower_corner = [None] * 3
upper_corner = [None] * 3
interval0 = ndcube_4d_ln_lt_l_t.wcs.array_index_to_world([1, 2], [0, 1], [0, 1], [0, 2])[0]
lower_corner[0] = interval0[0]
upper_corner[0] = interval0[-1]
expected = ndcube_4d_ln_lt_l_t[:, :, :, 0:3]
output = ndcube_4d_ln_lt_l_t.crop(lower_corner, upper_corner)
helpers.assert_cubes_equal(output, expected)
def test_crop_1d_independent(ndcube_4d_ln_lt_l_t):
cube_1d = ndcube_4d_ln_lt_l_t[0, 0, :, 0]
wl_range = SpectralCoord([3e-11, 4.5e-11], unit=u.m)
expected = cube_1d[0:2]
output = cube_1d.crop([wl_range[0]], [wl_range[-1]])
helpers.assert_cubes_equal(output, expected)
def test_crop_1d_dependent(ndcube_4d_ln_lt_l_t):
cube_1d = ndcube_4d_ln_lt_l_t[0, :, 0, 0]
sky_range = cube_1d.wcs.array_index_to_world([0, 1])
expected = cube_1d[0:2]
output = cube_1d.crop([sky_range[0]], [sky_range[-1]])
helpers.assert_cubes_equal(output, expected)
def test_crop_by_values(ndcube_4d_ln_lt_l_t):
intervals = ndcube_4d_ln_lt_l_t.wcs.array_index_to_world_values([1, 2], [0, 1], [0, 1], [0, 2])
units = [u.min, u.m, u.deg, u.deg]
lower_corner = [coord[0] * unit for coord, unit in zip(intervals, units)]
upper_corner = [coord[-1] * unit for coord, unit in zip(intervals, units)]
expected = ndcube_4d_ln_lt_l_t[1:3, 0:2, 0:2, 0:3]
output = ndcube_4d_ln_lt_l_t.crop_by_values(lower_corner, upper_corner)
helpers.assert_cubes_equal(output, expected)
def test_crop_by_coords_with_units(ndcube_4d_ln_lt_l_t):
intervals = ndcube_4d_ln_lt_l_t.wcs.array_index_to_world_values([1, 2], [0, 1], [0, 1], [0, 2])
units = [u.min, u.m, u.deg, u.deg]
lower_corner = [coord[0] for coord in intervals]
upper_corner = [coord[-1] for coord in intervals]
lower_corner[0] *= u.min
upper_corner[0] *= u.min
lower_corner[1] *= u.m
upper_corner[1] *= u.m
lower_corner[2] *= u.deg
units[0] = None
expected = ndcube_4d_ln_lt_l_t[1:3, 0:2, 0:2, 0:3]
output = ndcube_4d_ln_lt_l_t.crop_by_values(lower_corner, upper_corner, units=units)
helpers.assert_cubes_equal(output, expected)
def test_crop_by_values_with_nones(ndcube_4d_ln_lt_l_t):
lower_corner = [None] * 4
lower_corner[0] = 0.5 * u.min
upper_corner = [None] * 4
upper_corner[0] = 1.1 * u.min
expected = ndcube_4d_ln_lt_l_t[:, :, :, 0:3]
output = ndcube_4d_ln_lt_l_t.crop_by_values(lower_corner, upper_corner)
helpers.assert_cubes_equal(output, expected)
def test_crop_by_values_all_nones(ndcube_4d_ln_lt_l_t):
lower_corner = [None] * 4
upper_corner = [None] * 4
output = ndcube_4d_ln_lt_l_t.crop_by_values(lower_corner, upper_corner)
helpers.assert_cubes_equal(output, ndcube_4d_ln_lt_l_t)
def test_crop_by_values_indexerror(ndcube_4d_ln_lt_l_t):
intervals = ndcube_4d_ln_lt_l_t.wcs.array_index_to_world_values([1, 2], [0, 1], [0, 1], [0, 2])
units = [u.min, u.m, u.deg, u.deg]
lower_corner = [coord[0] * unit for coord, unit in zip(intervals, units)]
upper_corner = [coord[-1] * unit for coord, unit in zip(intervals, units)]
lower_corner[1] *= -1
upper_corner[1] *= -1
with pytest.raises(IndexError):
ndcube_4d_ln_lt_l_t.crop_by_values(lower_corner, upper_corner)
def test_crop_by_values_1d_dependent(ndcube_4d_ln_lt_l_t):
cube_1d = ndcube_4d_ln_lt_l_t[0, :, 0, 0]
print(cube_1d.array_axis_physical_types)
lat_range, lon_range = cube_1d.wcs.low_level_wcs.array_index_to_world_values([0, 1])
lower_corner = [lat_range[0] * u.deg, lon_range[0] * u.deg]
upper_corner = [lat_range[-1] * u.deg, lon_range[-1] * u.deg]
expected = cube_1d[0:2]
output = cube_1d.crop_by_values(lower_corner, upper_corner)
helpers.assert_cubes_equal(output, expected)
| 43.562092
| 99
| 0.597299
|
d4454778c9bc57cff49a6951be433982bb587624
| 31
|
py
|
Python
|
applied/encoders/__init__.py
|
ndoll1998/AppliedTransformers
|
76cbdef6fdd765b2178af71038a61e3e71e0cec9
|
[
"MIT"
] | 3
|
2020-09-02T03:51:49.000Z
|
2020-09-18T14:09:48.000Z
|
applied/encoders/__init__.py
|
ndoll1998/AppliedTransformers
|
76cbdef6fdd765b2178af71038a61e3e71e0cec9
|
[
"MIT"
] | null | null | null |
applied/encoders/__init__.py
|
ndoll1998/AppliedTransformers
|
76cbdef6fdd765b2178af71038a61e3e71e0cec9
|
[
"MIT"
] | 2
|
2021-01-30T12:37:43.000Z
|
2021-05-19T06:29:31.000Z
|
from .huggingface import BERT
| 15.5
| 30
| 0.806452
|
4d7ab152eb8554d25021a280124e4dc0b6d9eaf0
| 3,120
|
py
|
Python
|
athene/minigames/move_to_beacon/agent.py
|
alkurbatov/athene
|
867797f7f7888ffab73a041eb17ec1b3753199bc
|
[
"MIT"
] | 3
|
2018-08-27T10:49:41.000Z
|
2019-01-29T14:55:45.000Z
|
athene/minigames/move_to_beacon/agent.py
|
alkurbatov/athene
|
867797f7f7888ffab73a041eb17ec1b3753199bc
|
[
"MIT"
] | null | null | null |
athene/minigames/move_to_beacon/agent.py
|
alkurbatov/athene
|
867797f7f7888ffab73a041eb17ec1b3753199bc
|
[
"MIT"
] | null | null | null |
# The MIT License (MIT)
#
# Copyright (c) 2017-2018 Alexander Kurbatov
"""A simple agent to play in the MoveToBeacon minigame.
There is not much of machine learning inside because the purpose is
to set up a very simple agent and test that everything works on a very simple
task.
Here we use a very simple state machine to complete the task in two iterations.
To run this code do:
$ python -m pysc2.bin.agent --map MoveToBeacon --agent athene.minigames.move_to_beacon.Agent
"""
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
from pysc2.lib import units
from athene.api.actions import \
ACTION_DO_NOTHING, \
ACTION_MOVE_TO_BEACON, \
ACTION_SELECT_MARINE
from athene.api.geometry import DIAMETERS
from athene.api.screen import UnitPos
class Agent(base_agent.BaseAgent):
def __init__(self):
super().__init__()
self.smart_action = ACTION_DO_NOTHING
def step(self, obs):
super().step(obs)
if obs.first():
print('[INFO] Game started!')
self.smart_action = ACTION_SELECT_MARINE
return actions.FUNCTIONS.no_op()
if obs.last():
print('[INFO] Game Finished!')
return actions.FUNCTIONS.no_op()
if self.smart_action == ACTION_SELECT_MARINE:
unit_type = obs.observation.feature_screen.unit_type
marine_y, marine_x = (unit_type == units.Terran.Marine).nonzero()
if not marine_y.any():
# NOTE (alkurbatov): Sometimes we are too fast and the marine
# hasn't been placed on the screen yet.
return actions.FUNCTIONS.no_op()
if len(marine_y) < DIAMETERS.get(units.Terran.Marine):
# NOTE (alkurbatov): Sometimes we receive not fully formed
# marine coordinates probably because we are too fast again.
# Just ignore it.
return actions.FUNCTIONS.no_op()
# NOTE (alkurbatov): There is only one marine on the screen and
# no other objects around it so it is safe to select any point
# in the list.
marine = UnitPos(marine_x, marine_y)
self.smart_action = ACTION_MOVE_TO_BEACON
return actions.FUNCTIONS.select_point('select', marine.pos)
if self.smart_action == ACTION_MOVE_TO_BEACON:
if actions.FUNCTIONS.Move_screen.id not in obs.observation.available_actions:
print('[WARNING] Nothing selected?')
self.smart_action = ACTION_SELECT_MARINE
return actions.FUNCTIONS.no_op()
player_relative = obs.observation.feature_screen.player_relative
beacon_y, beacon_x = (player_relative == features.PlayerRelative.NEUTRAL).nonzero()
if not beacon_y.any():
print('[WARNING] Where is your beacon?')
return actions.FUNCTIONS.no_op()
beacon = UnitPos(beacon_x, beacon_y)
return actions.FUNCTIONS.Move_screen('now', beacon.pos)
return actions.FUNCTIONS.no_op()
| 35.454545
| 95
| 0.651923
|
0f5f04aef3b858789498988c0576b66c0b681a12
| 1,505
|
py
|
Python
|
nci-checker.py
|
kdruken/nci-checker-v2
|
e37d24e401c01f1bad348ab64160455020828810
|
[
"MIT"
] | null | null | null |
nci-checker.py
|
kdruken/nci-checker-v2
|
e37d24e401c01f1bad348ab64160455020828810
|
[
"MIT"
] | null | null | null |
nci-checker.py
|
kdruken/nci-checker-v2
|
e37d24e401c01f1bad348ab64160455020828810
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
Usage: python nci-checker.py <file> [--check <options>]
Options:
--help Print this usage message and exit
--check Specify specific checkers (optional, default is all)
'''
import checkers
import sys, os
from datetime import datetime
from output import Output
def main():
start_time = datetime.now()
path = []
for item in sys.argv[1:]:
if item in ['--help', '-help', '-h', '--h']:
print __doc__
sys.exit()
if os.path.exists(item):
path = item
if not path:
sys.exit('No file specified or path does not exist.')
print 'Checking: ', os.path.abspath(path), '\n'
checks = {
'cf': checkers.CFChecker(path),
'acdd': checkers.ACDDChecker(path),
'gdal': checkers.GDALChecker(path),
'h5': checkers.HDF5Checker(path),
'meta': checkers.NetCDF4Checker(path),
}
for item in checks.keys():
checks[item] = checks[item].check()
out = Output(path, checks)
if os.path.isfile(path):
out.simple_report()
out.single_file()
out.to_screen()
elif os.path.isdir(path):
# launch batch script result = batch(xxxx)
# print xxx
pass
# Display total duration for compliance check
end_time = datetime.now()
print "\n"*3
print "Duration: {}".format(end_time - start_time)
if __name__ == "__main__":
main()
| 19.294872
| 64
| 0.566777
|
84ba0fb7c6372e6bce7468e3c56e97d9619af051
| 1,303
|
py
|
Python
|
src/random_facts.py
|
tehwalris/open-data-backend
|
265dab05bdca16a3997e570db5ab99b5edfc04c7
|
[
"MIT"
] | null | null | null |
src/random_facts.py
|
tehwalris/open-data-backend
|
265dab05bdca16a3997e570db5ab99b5edfc04c7
|
[
"MIT"
] | null | null | null |
src/random_facts.py
|
tehwalris/open-data-backend
|
265dab05bdca16a3997e570db5ab99b5edfc04c7
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import json
from .path import get_path_from_root
from .memoize import memoize
from .response import json_from_df
source = {
"src_url": "https://data.stadt-zuerich.ch/dataset/prd_ssz_gang-dur-zueri_od1005",
"src_label": "Schul- und Sportdepartement, Stadt Zรผrich",
}
def _load_data():
df = pd.read_csv(
get_path_from_root("data/random_facts/data.csv"),
parse_dates=[],
dtype={},
)
df = df.rename(
columns={
"RaumNr": "placeId",
"Raum": "placeName",
"Oberthema": "category",
"Zahl": "value",
"Thema": "topic",
}
)
df = df.drop(columns=["Vergleichszahl", "Vergleichstext", "Bemerkungen"])
df["topic_slug"] = df["topic"].str.extract(r"^([^:]+)", expand=True)[0]
return df
load_data = memoize(_load_data)
def make_answer_fact(topic_slug, unit):
def answer_fact():
df = load_data()
df = df[df["topic_slug"] == topic_slug]
if df.empty:
raise ValueError("unknown topic")
df = df[df["placeId"] <= 12]
df = df[["placeId", "placeName", "value"]]
return {
"unit": unit,
"values": json.loads(json_from_df(df)),
}
return answer_fact
| 25.057692
| 85
| 0.576362
|
1a3613dff1cdfc6a420222a030f7083c34976694
| 864
|
py
|
Python
|
Code Challenges/python/checkPalindrome_codesignal.py
|
lineality/Coding-Challenges-Study-Practice
|
76d868b11b42b3bd3634f9a62abecb2e1eaac76d
|
[
"MIT"
] | null | null | null |
Code Challenges/python/checkPalindrome_codesignal.py
|
lineality/Coding-Challenges-Study-Practice
|
76d868b11b42b3bd3634f9a62abecb2e1eaac76d
|
[
"MIT"
] | 1
|
2021-06-24T17:39:48.000Z
|
2021-06-24T17:39:48.000Z
|
Code Challenges/python/checkPalindrome_codesignal.py
|
lineality/Coding-Study
|
76d868b11b42b3bd3634f9a62abecb2e1eaac76d
|
[
"MIT"
] | null | null | null |
# not working, not sure why (as parts work separately
# outside of function)
# (User's) Problem
# We have:
# a string
# We need:
# is that string a paindrome? yes/no
# We must:
# boolean output
# name of function is
# checkPalindrome
# Solution (Product)
# Strategy 1:
# turn string into a list(array)
# Make a compare_list which is the reverse order of
# the original list
# compare the two, if they are the same: true, else false
def checkPalindrome(inputString):
# make input a list
input_as_list = list(inputString)
# make a reverse list
# (first make a copy)
reverse_order = input_as_list
# (this function has no input or output, it reverses in place)
reverse_order.reverse()
# compare two lists
if input_as_list == reverse_order:
return True
else:
return False
| 24
| 66
| 0.664352
|
9d9a3f2dd8b1444db250b6addc9aec5fd940f3ca
| 7,724
|
py
|
Python
|
elegantrl/vrepdoggo/ddpg_steplength_difference.py
|
lotharelvin/ElegantRL
|
602b1cf8019bef107a5e0c0d6f655f4f5a42f3ce
|
[
"Apache-2.0"
] | null | null | null |
elegantrl/vrepdoggo/ddpg_steplength_difference.py
|
lotharelvin/ElegantRL
|
602b1cf8019bef107a5e0c0d6f655f4f5a42f3ce
|
[
"Apache-2.0"
] | null | null | null |
elegantrl/vrepdoggo/ddpg_steplength_difference.py
|
lotharelvin/ElegantRL
|
602b1cf8019bef107a5e0c0d6f655f4f5a42f3ce
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import numpy as np
import numpy.random as rd
from collections import deque
import random
import os
import doggo_env
class ActorDPG(nn.Module):
def __init__(self, state_dim, action_dim, mid_dim):
super(ActorDPG, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.net = nn.Sequential(
nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, action_dim), nn.Tanh(),
)
def forward(self, s, noise_std=0.0):
a = self.net(s)
return a if noise_std == 0.0 else self.add_noise(a, noise_std)
def add_noise(self, action, noise_std):
normal_noise = ((torch.rand_like(action, device=self.device)) * noise_std).clamp_(-0.5, 0.5)
a_noise = (action + normal_noise).clamp_(-1.0, 1.0)
return a_noise
class Critic(nn.Module):
def __init__(self, state_dim, action_dim, mid_dim):
super(Critic, self).__init__()
self.devide = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.net = nn.Sequential(
nn.Linear(state_dim + action_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, 1), # ่พๅบๅ็ฌ็Qๅผ๏ผไธ็จๆฟๆดปๅฝๆฐ
)
def forward(self, s, a):
x = torch.cat((s, a), dim=1) # ็ฑไบๆฏbatch,sๅaๅฎ้
ไธไธบไบ็ปดtensor๏ผๅฏนdim=1ๆไฝๅฐฑๆฏๆจชๅๆผๆฅ
q = self.net(x)
return q
class AgentDDPG:
def __init__(self, state_dim, action_dim, net_dim):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
"network"
self.act = ActorDPG(state_dim, action_dim, net_dim).to(self.device)
self.act_optimizer = torch.optim.Adam(self.act.parameters(), lr=2e-4)
self.act_target = ActorDPG(state_dim, action_dim, net_dim).to(self.device)
self.act_target.load_state_dict(self.act.state_dict())
self.cri = Critic(state_dim, action_dim, net_dim)
self.cri_optimizer = torch.optim.Adam(self.cri.parameters(), lr=2e-4)
self.cri_target = Critic(state_dim, action_dim, net_dim)
self.cri_target.load_state_dict(self.cri.state_dict())
self.criterion = nn.MSELoss() # criticไฝฟ็จmse่ฏฏๅทฎ
"training record"
self.step = 0
"extensions"
self.ou_noise = OrnsteinUhlenbeckProcess(size=action_dim, sigma=0.3)
"code below are memory"
self.replay_buffer = deque(maxlen=10000)
self.discount_factor = 0.99
def store_transition(self, state, action, reward, next_state):
self.replay_buffer.append((state, action, reward, next_state))
def select_action(self, states, explore_noise=0.0):
states = torch.tensor(states, dtype=torch.float32, device=self.device)
actions = self.act(states, explore_noise).cpu().data.numpy()
return actions
@staticmethod
def soft_target_update(target, source, tau=5e-3):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(tau * param.data + (1.0 - tau) * target_param.data)
def save_or_load_model(self, mod_dir, is_save,times):
act_save_path = '{}/actor{}.pth'.format(mod_dir,times)
cri_save_path = '{}/critic{}.pth'.format(mod_dir,times)
if is_save:
torch.save(self.act.state_dict(), act_save_path)
torch.save(self.cri.state_dict(), cri_save_path)
print("save act and cri:", mod_dir)
elif os.path.exists(act_save_path):
act_dict = torch.load(act_save_path, map_location=lambda storage, loc: storage)
self.act.load_state_dict(act_dict)
self.act_target.load_state_dict(act_dict)
cri_dict = torch.load(cri_save_path, map_location=lambda storage, loc: storage)
self.cri.load_state_dict(cri_dict)
self.cri_target.load_state_dict(cri_dict)
else:
print("FileNotFound when load_model: {}".format(mod_dir))
def update_parameters(self, batch_size):
loss_a_sum = 0.0
loss_c_sum = 0.0
if len(self.replay_buffer) < batch_size:
return
update_times = self.step
for _ in range(update_times):
with torch.no_grad():
replay_batch = random.sample(self.replay_buffer, batch_size)
states = [replay[0] for replay in replay_batch]
states = torch.tensor(states).float()
actions = [replay[1] for replay in replay_batch]
actions = torch.tensor(actions).float()
rewards = [[replay[2]] for replay in replay_batch]
rewards = torch.tensor(rewards).float()
next_states = [replay[3] for replay in replay_batch]
next_states = torch.tensor(next_states).float()
# data processing
next_action = self.act_target(next_states)
next_q_target = self.cri_target(next_states, next_action)
# print("next q target",next_q_target)
# print("rewards",rewards)
q_target = rewards + self.discount_factor * next_q_target
"critic loss"
q_eval = self.cri(states, actions)
# print("q_eval",q_eval)
critic_loss = self.criterion(q_eval, q_target)
loss_c_sum += critic_loss.item()
self.cri_optimizer.zero_grad()
critic_loss.backward()
self.cri_optimizer.step()
"actor loss"
action_cur = self.act(states)
actor_loss = -self.cri(states, action_cur).mean() # ้็จ-Qๅผ็ๅนณๅๅผๆดๆฐ
loss_a_sum += actor_loss
self.act_optimizer.zero_grad()
actor_loss.backward()
self.act_optimizer.step()
self.soft_target_update(self.act_target, self.act)
self.soft_target_update(self.cri_target, self.cri)
loss_a_avg = loss_a_sum / update_times
loss_c_avg = loss_c_sum / update_times
return loss_a_avg, loss_c_avg
class OrnsteinUhlenbeckProcess:
def __init__(self, size, theta=0.15, sigma=0.3, x0=0.0, dt=1e-2):
self.theta = theta
self.sigma = sigma
self.x0 = x0
self.dt = dt
self.size = size
def __call__(self):
noise = self.sigma * np.sqrt(self.dt) * rd.mormal(size=self.size)
x = self.x0 - self.theta * self.x0 * self.dt + noise
self.x0 = x
return x
if __name__ == "__main__":
env = doggo_env.VrepDoggo()
episode = 2000
score_list = []
state_dim = 4
action_dim = 4
agent = AgentDDPG(state_dim, action_dim, 32)
for i in range(episode):
env.pre_read()
env.start_new_simulation()
s = env.reset()
score = 0
while True:
a = agent.select_action(s, 0.1)
env.make_log("/Users/ouyangyikang/Downloads/CoppeliaSim_Edu_V4_1_0_Mac/models/logs.txt")
#ๅจไธไธไธช็ถๆๅๆlog
next_s, reward, done, _ = env.step(0.01, a)
agent.store_transition(s, a, reward, next_s)
agent.update_parameters(64)
score += reward
s = next_s
agent.step += 1
if done:
score_list.append(score)
print("episode:", i, "score:", score)
break
if i % 50 == 0:
dir = '/Users/ouyangyikang/Downloads/CoppeliaSim_Edu_V4_1_0_Mac/models'
agent.save_or_load_model(dir,is_save=True, times=i)
# import matplotlib.pyplot as plt
#
# plt.plot(score_list, color='green')
# plt.show()
#
| 36.262911
| 100
| 0.611212
|
794dbd8fd6bd42e6b27c901deb0aafa877641475
| 584
|
py
|
Python
|
blousebrothers/confs/management/commands/clean_conf_images.py
|
sladinji/blousebrothers
|
461de3ba011c0aaed3f0014136c4497b6890d086
|
[
"MIT"
] | 1
|
2022-01-27T11:58:10.000Z
|
2022-01-27T11:58:10.000Z
|
blousebrothers/confs/management/commands/clean_conf_images.py
|
sladinji/blousebrothers
|
461de3ba011c0aaed3f0014136c4497b6890d086
|
[
"MIT"
] | 5
|
2021-03-19T00:01:54.000Z
|
2022-03-11T23:46:21.000Z
|
blousebrothers/confs/management/commands/clean_conf_images.py
|
sladinji/blousebrothers
|
461de3ba011c0aaed3f0014136c4497b6890d086
|
[
"MIT"
] | null | null | null |
from django.core.management.base import BaseCommand
from blousebrothers.confs.models import Conference
class Command(BaseCommand):
help = 'Check conference images given his conference slug'
def add_arguments(self, parser):
# This is an optional argument
parser.add_argument('slug', nargs='+', type=str)
def handle(self, *args, **options):
print(options["slug"])
obj = Conference.objects.prefetch_related(
"questions__answers",
"questions__images",
).get(slug=options['slug'][0])
obj.check_images()
| 34.352941
| 62
| 0.666096
|
7ba86d8b37e0b4faac555e045f40c36f91a577d9
| 2,364
|
py
|
Python
|
retrieval/biencoder/sbert_scripts/generate_sbert_predictions_biencoder.py
|
viswavi/dataset-recommendation
|
8193e5ad5f4bad25852b565e96d943530d307422
|
[
"Apache-2.0"
] | null | null | null |
retrieval/biencoder/sbert_scripts/generate_sbert_predictions_biencoder.py
|
viswavi/dataset-recommendation
|
8193e5ad5f4bad25852b565e96d943530d307422
|
[
"Apache-2.0"
] | null | null | null |
retrieval/biencoder/sbert_scripts/generate_sbert_predictions_biencoder.py
|
viswavi/dataset-recommendation
|
8193e5ad5f4bad25852b565e96d943530d307422
|
[
"Apache-2.0"
] | null | null | null |
'''
python sbert_scripts/generate_sbert_predictions_biencoder.py \
--model-directory sbert_models/bert_hard_negatives\
--search-collection dataset_search_collection.jsonl \
--test-queries tevatron_data/test_queries.jsonl \
--output-file sbert_models/bert_hard_negatives/sbert.trec \
--results-limit 5
'''
import argparse
import jsonlines
import numpy as np
import os
import sys
import faiss
from sentence_transformers import SentenceTransformer
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from generate_knn_results import knn_search, write_hits_to_tsv
from prepare_tevatron_data import format_search_text
parser = argparse.ArgumentParser()
parser.add_argument('--model-directory', type=str, default="sbert_models/bert_hard_negatives")
parser.add_argument("--search-collection", type=str, default="dataset_search_collection.jsonl", help="Test collection of queries and documents")
parser.add_argument('--test-queries', type=str, default="test_queries.jsonl", help="List of newline-delimited queries")
parser.add_argument('--output-file', type=str, default="sbert_models/bert_hard_negatives/sbert.trec", help="Retrieval file, in TREC format")
parser.add_argument('--results-limit', type=int, default=5)
def construct_search_index(search_collection, model):
dataset_texts = []
dataset_ids = []
for dataset_row in jsonlines.open(search_collection):
dataset_texts.append(format_search_text(dataset_row))
dataset_ids.append(dataset_row["id"])
dataset_encodings = model.encode(dataset_texts)
vectors = np.array(dataset_encodings, dtype=np.float32)
index = faiss.IndexFlatL2(vectors.shape[1])
# index = faiss.GpuIndexFlatL2(vectors.shape[0])
index.add(vectors)
return index, dataset_ids
if __name__ == "__main__":
args = parser.parse_args()
model = SentenceTransformer(args.model_directory)
query_texts = []
for row in jsonlines.open(args.test_queries):
query_texts.append(row["text"])
query_encodings = model.encode(query_texts)
faiss_index, dataset_ids = construct_search_index(args.search_collection, model)
knn_distances, knn_indices = faiss_index.search(query_encodings, args.results_limit)
all_hits = knn_search(knn_distances, knn_indices, dataset_ids)
write_hits_to_tsv(args.output_file, all_hits, query_texts, args.results_limit)
| 41.473684
| 144
| 0.77242
|
58e0377456773f63ec9668c7d5f74eb5ab13b1e6
| 1,122
|
py
|
Python
|
test/optim/test.py
|
wxwoods/mctorch
|
7cd6eb51fdd01fa75ed9245039a4f145ba342de2
|
[
"BSD-3-Clause"
] | 1
|
2019-07-23T11:20:58.000Z
|
2019-07-23T11:20:58.000Z
|
test/optim/test.py
|
wxwoods/mctorch
|
7cd6eb51fdd01fa75ed9245039a4f145ba342de2
|
[
"BSD-3-Clause"
] | null | null | null |
test/optim/test.py
|
wxwoods/mctorch
|
7cd6eb51fdd01fa75ed9245039a4f145ba342de2
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import torch
import torch.legacy.optim as optim
def rosenbrock(tensor):
x, y = tensor
return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2
def drosenbrock(tensor):
x, y = tensor
return torch.DoubleTensor((-400 * x * (y - x ** 2) - 2 * (1 - x), 200 * (y - x ** 2)))
algorithms = {
'adadelta': optim.adadelta,
'adagrad': optim.adagrad,
'adam': optim.adam,
'adamax': optim.adamax,
'asgd': optim.asgd,
'cg': optim.cg,
'nag': optim.nag,
'rmsprop': optim.rmsprop,
'rprop': optim.rprop,
'sgd': optim.sgd,
'lbfgs': optim.lbfgs,
}
with open('tests.json', 'r') as f:
tests = json.loads(f.read())
for test in tests:
print(test['algorithm'] + '\t')
algorithm = algorithms[test['algorithm']]
for config in test['config']:
print('================================================================================\t')
params = torch.DoubleTensor((1.5, 1.5))
for i in range(100):
algorithm(lambda x: (rosenbrock(x), drosenbrock(x)), params, config)
print('{:.8f}\t{:.8f}\t'.format(params[0], params[1]))
| 27.365854
| 99
| 0.524064
|
a693d1b5b602c466a43d111ceb99a4bed86dea0e
| 6,142
|
py
|
Python
|
tests/test_dataset.py
|
whn09/deepar
|
7430057eb2de8c0f9b48983123e646fadadd92ce
|
[
"MIT"
] | null | null | null |
tests/test_dataset.py
|
whn09/deepar
|
7430057eb2de8c0f9b48983123e646fadadd92ce
|
[
"MIT"
] | null | null | null |
tests/test_dataset.py
|
whn09/deepar
|
7430057eb2de8c0f9b48983123e646fadadd92ce
|
[
"MIT"
] | null | null | null |
import pandas as pd
import unittest
from deepar.dataset.time_series import MockTs, TimeSeries
class TestRecurrentTs(unittest.TestCase):
def setUp(self):
self.data_to_pad = pd.DataFrame({'feature_1': [i for i in range(6)],
'feature_2': [i for i in range(6)],
'target': [i for i in range(6)]})
# print(self.data_to_pad)
self.input_data = pd.DataFrame({'feature_1': [i for i in range(100)],
'feature_2': [i for i in range(100)],
'target': [i for i in range(100)],
'category': [str(int(i//10 + 1)) for i in range(100)]})
# print(self.input_data)
self.data_to_pad_with_categorical = pd.DataFrame({'one_hot_yes': [1, 1, 1, 1, 1, 1],
'feature_2': [i for i in range(6)],
'one_hot_no': [0, 0, 0, 0, 0, 0],
'target': [i for i in range(6)]})
self.data_to_pad_with_multiple_categorical = pd.DataFrame({'one_hot_yes': [1, 1, 1, 1, 1, 1],
'feature_2': [i for i in range(6)],
'one_hot_no': [0, 0, 0, 0, 0, 0],
'other_no': [0, 0, 0, 0, 0, 0],
'other_yes': [1, 1, 1, 1, 1, 1],
'target': [i for i in range(6)]})
def test_len_padding(self):
rec_instance = TimeSeries(pandas_df=self.data_to_pad)
results = rec_instance._pad_ts(pandas_df=self.data_to_pad,
desired_len=10)
self.assertEqual(results.shape[0], 10)
def test_zero_len_padding(self):
rec_instance = TimeSeries(pandas_df=self.data_to_pad)
results = rec_instance._pad_ts(pandas_df=self.data_to_pad,
desired_len=6) # len is the same as the original time series
self.assertEqual(results.shape[0], 6)
def test_next_batch_production(self):
rec_ts = TimeSeries(self.input_data)
X_feature_space, y_target = rec_ts.next_batch(batch_size=4, n_steps=10)
self.assertEqual(len(X_feature_space), 4)
self.assertEqual(len(X_feature_space[0]), 10)
self.assertEqual(len(X_feature_space[0][0]), 2)
self.assertEqual(X_feature_space[3][0][0], y_target[3][0][0])
def test_padding_with_one_hot(self):
rec_ts = TimeSeries(pandas_df=self.data_to_pad_with_categorical,
one_hot_root_list=['one_hot'])
results = rec_ts._pad_ts(pandas_df=self.data_to_pad_with_categorical,
desired_len=10)
self.assertEqual(results.shape[0], 10)
self.assertEqual(results.one_hot_yes.values[0], 1)
self.assertEqual(results.one_hot_no.values[0], 0)
def test_padding_with_one_hot_multiple(self):
rec_ts = TimeSeries(pandas_df=self.data_to_pad_with_categorical,
one_hot_root_list=['one_hot', 'other'])
results = rec_ts._pad_ts(pandas_df=self.data_to_pad_with_multiple_categorical,
desired_len=10)
self.assertEqual(results.shape[0], 10)
self.assertEqual(results.one_hot_yes.values[0], 1)
self.assertEqual(results.one_hot_no.values[0], 0)
self.assertEqual(results.other_yes.values[0], 1)
self.assertEqual(results.other_no.values[0], 0)
def test_next_batch_covariates(self):
"""
Feature space is supplied in input if target_only is False (no need to lag y dataset)
"""
rec_ts = TimeSeries(self.input_data)
X_feature_space, y_target = rec_ts.next_batch(batch_size=1, n_steps=10)
print('X_feature_space:', X_feature_space.shape, X_feature_space)
print('y_target:', y_target.shape, y_target)
self.assertEqual(len(X_feature_space), 1)
self.assertEqual(len(X_feature_space[0][0]), 2)
def test_next_batch_covariates_2(self):
"""
Feature space is supplied in input if target_only is False (no need to lag y dataset)
"""
rec_ts = TimeSeries(self.input_data)
X_feature_space, y_target = rec_ts.next_batch(batch_size=2, n_steps=10)
print('X_feature_space:', X_feature_space.shape, X_feature_space)
print('y_target:', y_target.shape, y_target)
self.assertEqual(len(X_feature_space), 2)
self.assertEqual(len(X_feature_space[0][0]), 2)
def test_next_batch_covariates_3(self):
"""
Feature space is supplied in input if target_only is False (no need to lag y dataset)
"""
rec_ts = TimeSeries(self.input_data)
X_feature_space, y_target = rec_ts.next_batch(batch_size=2, n_steps=20)
print('X_feature_space:', X_feature_space.shape, X_feature_space)
print('y_target:', y_target.shape, y_target)
self.assertEqual(len(X_feature_space), 2)
self.assertEqual(len(X_feature_space[0][0]), 2)
def test_sample_ts(self):
"""
When the length of the pandas df is longer than required length the function should sample
from the time series and return that sample
"""
rec_instance = TimeSeries(pandas_df=self.data_to_pad)
results = rec_instance._sample_ts(pandas_df=self.data_to_pad,
desired_len=3)
self.assertEqual(results.shape[0], 3)
def test_mockts(self):
ts = MockTs()
batch = ts.next_batch(1, 20)
print('batch:', batch[0].shape, batch[1].shape)
print(batch)
test_data = ts.generate_test_data(20)
print('test_data:', len(test_data))
print(test_data)
if __name__ == '__main__':
unittest.main()
| 47.246154
| 102
| 0.569196
|
2a7873e0d060947c9b7b9499ca082294a5dfe49b
| 1,297
|
py
|
Python
|
setup.py
|
adeo/iwc-tfc-client
|
f2606d8d6f6d5499e41553abb53594ca830396e5
|
[
"MIT"
] | 9
|
2019-11-18T13:38:10.000Z
|
2021-09-24T21:59:10.000Z
|
setup.py
|
adeo/iwc-tfc-client
|
f2606d8d6f6d5499e41553abb53594ca830396e5
|
[
"MIT"
] | 10
|
2019-11-10T23:46:54.000Z
|
2022-03-30T15:46:56.000Z
|
setup.py
|
adeo/iwc-tfc-client
|
f2606d8d6f6d5499e41553abb53594ca830396e5
|
[
"MIT"
] | 4
|
2019-11-18T14:06:04.000Z
|
2021-11-09T15:42:44.000Z
|
import setuptools
from tfc_client import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="tfc_client",
version=__version__,
author="Alexandre Dath for ADEO",
author_email="alex.dath@gmail.com",
license="MIT",
keywords="API Terraform TFC",
description="A developer friendly Terraform Cloud API client",
long_description_content_type="text/markdown",
long_description=long_description,
url="https://github.com/adeo/iwc-tfc-client",
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
extras_require={
"dev": ["black", "twine", "wheel"],
"test": ["pytest", "coverage", "pytest-cov"],
},
tests_require=["pytest", "pytest-cov"],
install_requires=[
"requests",
"pydantic>=0.32.2",
"pydantic[email]",
"email-validator>=1.0.3",
"idna>=2.0.0",
"dnspython>=1.15.0",
"inflection",
],
)
| 28.822222
| 66
| 0.607556
|
1c0fa696533ffa9356f96de6451ad369c0cc5bec
| 37,870
|
py
|
Python
|
src/webpy1/src/spider/ganji.py
|
ptphp/PyLib
|
07ac99cf2deb725475f5771b123b9ea1375f5e65
|
[
"Apache-2.0"
] | 1
|
2020-02-17T08:18:29.000Z
|
2020-02-17T08:18:29.000Z
|
src/webpy1/src/spider/ganji.py
|
ptphp/PyLib
|
07ac99cf2deb725475f5771b123b9ea1375f5e65
|
[
"Apache-2.0"
] | null | null | null |
src/webpy1/src/spider/ganji.py
|
ptphp/PyLib
|
07ac99cf2deb725475f5771b123b9ea1375f5e65
|
[
"Apache-2.0"
] | null | null | null |
#coding=UTF-8
'''
Created on 2011-7-6
@author: Administrator
'''
from urlparse import urlparse
import cookielib
from pyquery.pyquery import PyQuery #@UnresolvedImport
import re
import datetime #@UnusedImport
import urllib2
from lxml import etree #@UnresolvedImport
from lxml.cssselect import CSSSelector #@UnresolvedImport
import simplejson as js #@UnusedImport @UnresolvedImport
from config import housetype, checkPath, makePath,fitment,toward,deposit
import threading
from BeautifulSoup import BeautifulSoup #@UnresolvedImport
from spider.globalvars import fetch_quere
import time
import gc
homepath="e:\\home\\spider\\"
class LinkCrawl(object):
def __init__(self,citycode="",kind=""):
cj = cookielib.MozillaCookieJar()
self.br=urllib2.build_opener(urllib2.HTTPHandler(),urllib2.HTTPCookieProcessor(cj),urllib2.HTTPRedirectHandler())
self.header={
"User-Agent":'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB6.6; .NET CLR 3.5.30729)',
}
self.endtime=str(datetime.date.today() -datetime.timedelta(days=7))
self.clinks=[]
self.pn=[]
self.citycode=citycode
self.baseUrl="http://%s.ganji.com"%self.citycode
self.kind=kind
if kind=="1":#ๅบๅฎ
self.urlpath="/fang5/a1u2%s/"
self.folder="sell\\"
elif kind=="2":#ๅบ็ง
self.urlpath="/fang1/u2%s/"
self.folder="rent\\"
elif kind=="3":#ๆฑ่ดญ
self.urlpath="/fang4/u2f0/a1%s/"
self.folder="buy\\"
elif kind=="4":#ๆฑ็ง
self.urlpath="/fang2/u2f0/a1%s/"
self.folder="req\\"
def __getAllNeedLinks(self):
cond=True
idx=0
checkit="0"
while cond:
url=self.baseUrl+self.urlpath%("f"+str(idx*32))
#url="http://gz.ganji.com/fang2/u2f0/a1f768/"
print url
try:
req=urllib2.Request(url, None, self.header)
p=self.br.open(req).read()
except:
continue
else:
check=PyQuery(p)("ul.pageLink li a.c").text()
if check==None or check==checkit:
cond=False
break
else:
checkit=check
links=PyQuery(p)("div.list dl")
p=None
print len(links)
for link in links:
lk=self.baseUrl+PyQuery(link)(" a.list_title").attr("href")
if self.kind=="3" or self.kind=="4":
tm=PyQuery(link)("dd span.time").text()
if re.match('''\d{2}-\d{2}''', tm):
Y=int(time.strftime('%Y', time.localtime()))
tm="%s-%s"%(Y,tm.strip())
if tm<self.endtime:
break
elif "ๅ้" in tm:
pass
elif "ๅฐๆถ" in tm:
pass
else:
cond=False
break
if not checkPath(homepath,self.folder,lk):
fetch_quere.put({"mod":"ganji","link":lk,"citycode":self.citycode,"kind":self.kind})
# if lk not in self.clinks:
# self.clinks.append(lk)
idx=idx+1
print len(self.clinks)
def runme(self):
#self.__initPageNum()
self.__getAllNeedLinks()
class ContentCrawl(object):
def __init__(self,links,citycode,kind):
cj = cookielib.MozillaCookieJar()
self.br=urllib2.build_opener(urllib2.HTTPHandler(),urllib2.HTTPCookieProcessor(cj),urllib2.HTTPRedirectHandler())
self.pdb={}
self.header={
"User-Agent":'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB6.6; .NET CLR 3.5.30729)',
}
self.urls=links
self.kind=kind
self.fd={}
self.citycode=citycode
if kind=="1":
self.folder="sell\\"
elif kind=="2":
self.folder="rent\\"
elif kind=="3":
self.folder="buy\\"
else:
self.folder="req\\"
#js resgx
self.xiaoqu_regex="xiaoqu : '(.*?)',"
self.address_regex="address : '(.*?)',"
self.house_room_regex="(\d+)ๅฎค"
self.house_hall_regex="(\d+)ๅ
"
self.house_toilet_regex="(\d+)ๅซ"
self.house_desc_regex="ๆฟๅฑๆฆๅต</p>(.*?)</p>"
self.house_floor_regex="<li>ๆฅผๅฑ: ็ฌฌ(\d+)ๅฑ/ๆป(\d+)ๅฑ</li>"
self.house_totalarea_regex="<li>้ข็งฏ: (\d+) ใก</li>"
self.house_totalarea_regex_qiu="(\d+)ใก"
self.house_type_regex3="<li>ๆทๅ: (.*)</li>"
self.house_toward_regex="<li>ๆๅ: (.*)</li>"
self.house_type_regex="<li>็ฑปๅ: (.*)</li>"
self.cityarea_regex="<li>ๅบๅ:([\s\S]*?)</li>"
self.house_age_regex="<li>ๆฟ้พ: (\d+) ๅนด</li>"
self.house_fitment_regex="<li>่ฃ
ไฟฎ: (.*)</li>"
self.house_support_regex="<li>้
็ฝฎ: (.*) </li>"
self.house_price_regex="<li>ๅฎไปท: <span>(.*)</span>.*</li>"
self.house_price_regex_2="<li>็ง้: <span>(.*)</span>.*</li>"
self.borough_name_regex="<li>ๅฐๅบ:(.*)</li>"
self.house_deposit_regex="<li>็ง้: (.*)</li>"
self.house_price_regex_zu = "<li>ๆๆ็ง้: (.*)</li>"
self.borough_name_regex_reg = "<li>ๆๆๅฐๅบ: (.*)</li>"
self.house_addr_regex_reg = "<li>ๅฐๅบๅฐๅ:(.*)</li>"
self.house_price_regex_gou = "<li>ๆๆๅฎไปท: (.*)</li>"
def __addText(self,tag, no_tail=False):
text = []
if tag.text:
text.append(tag.text)
for child in tag.getchildren():
text.append(self.__addText(child))
if not no_tail and tag.tail:
text.append(tag.tail)
return "".join(text)
def getText(self,html):
text=[]
for tag in html:
text.append(self.__addText(tag, no_tail=True))
return ' '.join([t.strip() for t in text if t.strip()])
def sell(self,url):
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
tree = etree.HTML(response)
soup =BeautifulSoup(response)
self.fd['house_flag'] = 1
self.fd['belong']=0
detail_mer = soup.find('div',{'class':'detail_mer'})
#้ไธชไบบๆฟๆบ return
if u"ไธชไบบๆฟๆบ" not in str(detail_mer):return
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = Dname.string
else:
self.fd['owner_name'] = None
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone'] = None
else:
self.fd['owner_phone'] = None
#ๆฒกๆ่็ณปๆนๅผ return
if not self.fd['owner_phone']:return
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
return
if re.search(self.house_floor_regex, response):
house_floor=re.search(self.house_floor_regex, response).group(1)
house_topfloor=re.search(self.house_floor_regex, response).group(2)
self.fd['house_floor'] = house_floor
self.fd['house_topfloor'] = house_topfloor
else:
self.fd['house_floor'] = None
self.fd['house_topfloor'] = None
if re.search(self.house_totalarea_regex, response):
house_totalarea=re.search(self.house_totalarea_regex, response).group(1)
self.fd['house_totalarea'] = house_totalarea
else:
self.fd['house_totalarea'] = None
#็ฑปๅ
if re.search(self.house_type_regex, response):
house_type=re.search(self.house_type_regex, response).group(1)
self.fd['house_type'] = housetype(house_type)
else:
self.fd['house_type'] = None
if re.search(self.house_price_regex, response):
house_price=re.search(self.house_price_regex, response).group(1)
if house_price=="้ข่ฎฎ":
house_price="0"
self.fd['house_price'] = house_price
else:
self.fd['house_price'] = None
posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
if posttime:
Y=int(time.strftime('%Y', time.localtime()))
M=int(posttime.split(' ')[0].split('-')[0])
D=int(posttime.split(' ')[0].split('-')[1])
s = datetime.datetime(Y,M,D,0,0)
posttime=int(time.mktime(s.timetuple()))
self.fd['posttime'] =posttime
else:
self.fd['posttime'] =None
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = house_room
else:
self.fd['house_room'] = '0'
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = house_hall
else:
self.fd['house_hall'] = '0'
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = house_toilet
else:
self.fd['house_toilet'] = '0'
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title
#ๆ่ฟฐ
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|่็ณปๆๆถ่ฏท่ฏดๆๆฏไป่ตถ้็ฝไธ็ๅฐ็","",house_desc)
else:
self.fd['house_desc'] = None
d_i = soup.find('ul',{'class':'d_i'})
#ๅฐๅบๅ
#ๅ
ๅค็JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if d_i.find(text="ๅฐๅบ: "):
borough_box = d_i.find(text="ๅฐๅบ: ").parent
borough_name = borough_box.find("a")
if borough_name:
self.fd['borough_name'] = borough_name.string
else:
self.fd['borough_name'] = None
#ๅฐๅ
if borough_name and borough_name.nextSibling:
house_addr = borough_name.nextSibling.string
self.fd['house_addr'] = re.sub("\(|\)| ","",house_addr)
else:
self.fd['house_addr'] = None
else:
if re.search(self.borough_name_regex, response):
borough_name=re.search(self.borough_name_regex, response).group(1)
self.fd['borough_name'] = re.sub("\(.*\)| ","",borough_name)
#ๅบๅ
area_box = d_i.find(text="ๅบๅ: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
if re.search(self.house_age_regex, response):
house_age=re.search(self.house_age_regex, response).group(1)
self.fd['house_age'] = house_age
else:
self.fd['house_age'] = None
#ๆๅ
if re.search(self.house_toward_regex, response):
house_toward=re.search(self.house_toward_regex, response).group(1)
self.fd['house_toward'] = toward(house_toward)
else:
self.fd['house_toward'] = None
if re.search(self.house_fitment_regex, response):
house_fitment=re.search(self.house_fitment_regex, response).group(1)
self.fd['house_fitment'] = fitment(house_fitment)
else:
self.fd['house_fitment'] = 2
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def buy(self,url):
self.fd['city'] = self.citycode
self.fd['house_flag'] = 3
# self.fd['belong']="1"
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
tree = etree.HTML(response)
soup =BeautifulSoup(response)
detail_mer = soup.find('div',{'class':'detail_mer'})
#้ไธชไบบๆฟๆบ return
if u"ไธชไบบๆฟๆบ" not in str(detail_mer):return
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = Dname.string
else:
self.fd['owner_name'] = None
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone'] = None
else:
self.fd['owner_phone'] = None
#ๆฒกๆ่็ณปๆนๅผ return
if not self.fd['owner_phone']:return
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
return
self.fd['house_floor'] = 0
self.fd['house_topfloor'] = 0
self.fd['house_type'] = 0
self.fd['house_age'] = 0
self.fd['house_toward'] = 0
self.fd['house_fitment'] = 0
if re.search(self.house_totalarea_regex_qiu, response):
house_totalarea=re.search(self.house_totalarea_regex_qiu, response).group(1)
self.fd['house_totalarea'] = house_totalarea
self.fd['house_totalarea_max'] = house_totalarea
self.fd['house_totalarea_min'] = house_totalarea
else:
self.fd['house_totalarea'] = 0
self.fd['house_totalarea_max'] = 0
self.fd['house_totalarea_min'] = 0
if re.search(self.house_price_regex_gou, response):
house_price_zu = re.search(self.house_price_regex_gou, response).group(1)
house_price_zu = house_price_zu.replace('ไธ','')
if house_price_zu.find("ไปฅไธ") != -1:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = house_price_zu.replace('ไปฅไธ','')
self.fd['house_price'] = self.fd['house_price_min']
elif house_price_zu.find("ไปฅไธ") != -1:
self.fd['house_price_max'] = house_price_zu.replace('ไปฅไธ','')
self.fd['house_price_min'] = 0
self.fd['house_price'] = self.fd['house_price_max']
elif house_price_zu.find("-") != -1:
self.fd['house_price_max'] = house_price_zu.split('-')[1]
self.fd['house_price_min'] = house_price_zu.split('-')[0]
self.fd['house_price'] = house_price_zu.split('-')[1]
else:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = 0
self.fd['house_price'] = 0
else:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = 0
self.fd['house_price'] = 0
posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
if posttime:
Y=int(time.strftime('%Y', time.localtime()))
M=int(posttime.split(' ')[0].split('-')[0])
D=int(posttime.split(' ')[0].split('-')[1])
s = datetime.datetime(Y,M,D,0,0)
posttime=int(time.mktime(s.timetuple()))
self.fd['posttime'] =posttime
else:
self.fd['posttime'] =None
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = house_room
else:
self.fd['house_room'] = '0'
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = house_hall
else:
self.fd['house_hall'] = '0'
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = house_toilet
else:
self.fd['house_toilet'] = '0'
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title
#ๆ่ฟฐ
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|่็ณปๆๆถ่ฏท่ฏดๆๆฏไป่ตถ้็ฝไธ็ๅฐ็","",house_desc)
else:
self.fd['house_desc'] = None
d_i = soup.find('ul',{'class':'d_i'})
#ๅฐๅบๅ
#ๅ
ๅค็JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if d_i.find(text="ๅฐๅบ: "):
borough_box = d_i.find(text="ๅฐๅบ: ").parent
borough_name = borough_box.find("a")
if borough_name:
self.fd['borough_name'] = borough_name.string
else:
self.fd['borough_name'] = None
else:
if re.search(self.borough_name_regex_reg, response):
borough_name=re.search(self.borough_name_regex_reg, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.house_addr_regex_reg, response):
house_addr=re.search(self.house_addr_regex_reg, response).group(1)
self.fd['house_addr'] = house_addr
else:
self.fd['house_addr'] = ''
#ๅบๅ
area_box = d_i.find(text="ๅบๅ: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def rent(self,url):
self.fd['city'] = urlparse(url)[1].replace('.ganji.com',"")
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
tree = etree.HTML(response)
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
return
self.fd['house_flag'] = 2
self.fd['house_type'] = 0
self.fd['house_floor'] = ""
self.fd['house_topfloor'] = ""
soup =BeautifulSoup(response)
detail_mer = soup.find('div',{'class':'detail_mer'})
#้ไธชไบบๆฟๆบ return
if u"ไธชไบบๆฟๆบ" not in str(detail_mer):return
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = Dname.string
else:
self.fd['owner_name'] = None
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone'] = None
else:
self.fd['owner_phone'] = None
#ๆฒกๆ่็ณปๆนๅผ return
if not self.fd['owner_phone']:return
if re.search(self.house_totalarea_regex, response):
house_totalarea=re.search(self.house_totalarea_regex, response).group(1)
self.fd['house_totalarea'] = house_totalarea
else:
self.fd['house_totalarea'] = None
if re.search(self.house_price_regex_2, response):
house_price=re.search(self.house_price_regex_2, response).group(1)
if house_price=="้ข่ฎฎ":
house_price="0"
self.fd['house_price'] = house_price
else:
self.fd['house_price'] = None
# house_price=tree.xpath("/html/body/div[2]/div/div/ul/li/span") and tree.xpath("/html/body/div[2]/div/div/ul/li/span")[0].text.strip() or None
# v['house_price'] = house_price
posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
if posttime:
Y=int(time.strftime('%Y', time.localtime()))
M=int(posttime.split(' ')[0].split('-')[0])
D=int(posttime.split(' ')[0].split('-')[1])
s = datetime.datetime(Y,M,D,0,0)
posttime=int(time.mktime(s.timetuple()))
self.fd['posttime'] =posttime
else:
self.fd['posttime'] =None
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = house_room
else:
self.fd['house_room'] = '0'
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = house_hall
else:
self.fd['house_hall'] = '0'
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = house_toilet
else:
self.fd['house_toilet'] = '0'
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title
#ๆ่ฟฐ
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|่็ณปๆๆถ่ฏท่ฏดๆๆฏไป่ตถ้็ฝไธ็ๅฐ็","",house_desc)
else:
self.fd['house_desc'] = None
d_i = soup.find('ul',{'class':'d_i'})
#ๅฐๅบๅ
#ๅ
ๅค็JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if d_i.find(text="ๅฐๅบ: "):
borough_box = d_i.find(text="ๅฐๅบ: ").parent
borough_name = borough_box.find("a")
if borough_name:
self.fd['borough_name'] = borough_name.string
else:
self.fd['borough_name'] = None
#ๅฐๅ
if borough_name and borough_name.nextSibling:
house_addr = borough_name.nextSibling.string
self.fd['house_addr'] = re.sub("\(|\)| ","",house_addr)
else:
self.fd['house_addr'] = None
else:
if re.search(self.borough_name_regex, response):
borough_name=re.search(self.borough_name_regex, response).group(1)
self.fd['borough_name'] = re.sub("\(.*\)| ","",borough_name)
#ๅบๅ
area_box = d_i.find(text="ๅบๅ: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
if re.search(self.house_age_regex, response):
house_age=re.search(self.house_age_regex, response).group(1)
self.fd['house_age'] = house_age
else:
self.fd['house_age'] = None
#ๆๅ
if re.search(self.house_toward_regex, response):
house_toward=re.search(self.house_toward_regex, response).group(1)
self.fd['house_toward'] = toward(house_toward)
else:
self.fd['house_toward'] = None
if re.search(self.house_fitment_regex, response):
house_fitment=re.search(self.house_fitment_regex, response).group(1)
self.fd['house_fitment'] = fitment(house_fitment)
else:
self.fd['house_fitment'] = 2
if re.search(self.house_deposit_regex, response):
house_deposit=re.search(self.house_deposit_regex, response).group(1)
self.fd['house_deposit'] = deposit(house_deposit)
else:
self.fd['house_deposit'] = None
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def require(self,url):
self.fd['city'] = urlparse(url)[1].replace('.ganji.com',"")
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
tree = etree.HTML(response)
if re.search("<span class=\"city\"><a .*?>(.*?)</a>", response):
cityname=re.search("<span class=\"city\"><a .*?>(.*?)</a>", response).group(1)
self.fd['cityname'] = cityname
else:
return
self.fd['house_flag'] = 4
self.fd['house_type'] = 0
self.fd['house_floor'] = ""
self.fd['house_topfloor'] = ""
self.fd['house_totalarea']=0
self.fd['house_age'] = 0
self.fd['house_toward'] = 0
self.fd['house_fitment'] = 0
self.fd['house_deposit'] = 0
self.fd['house_totalarea_max'] = 0
self.fd['house_totalarea_min'] = 0
self.fd['house_totalarea'] = 0
soup =BeautifulSoup(response)
detail_mer = soup.find('div',{'class':'detail_mer'})
#้ไธชไบบๆฟๆบ return
if u"ไธชไบบๆฟๆบ" not in str(detail_mer):return
Dname = detail_mer.find('span',{'class':'Dname'})
if Dname:
self.fd['owner_name'] = Dname.string
else:
self.fd['owner_name'] = None
ganji_phone_call_class = detail_mer.find('span',{'class':'ganji_phone_call_class'})
if ganji_phone_call_class:
self.fd['owner_phone'] = ganji_phone_call_class.contents[0]
if str(ganji_phone_call_class).find('src='):
self.fd['owner_phone'] = 'http://'+urlparse(url)[1]+ganji_phone_call_class.img['src']
else:
self.fd['owner_phone'] = None
else:
self.fd['owner_phone'] = None
#ๆฒกๆ่็ณปๆนๅผ return
if not self.fd['owner_phone']:return
if re.search(self.house_price_regex_zu, response):
house_price_zu = re.search(self.house_price_regex_zu, response).group(1)
house_price_zu = house_price_zu.replace('ๅ
/ๆ','')
if house_price_zu.find("ไปฅไธ") != -1:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = house_price_zu.replace('ไปฅไธ','')
self.fd['house_price'] = house_price_zu.replace('ไปฅไธ','')
elif house_price_zu.find("ไปฅไธ") != -1:
self.fd['house_price_max'] = house_price_zu.replace('ไปฅไธ','')
self.fd['house_price_min'] = 0
self.fd['house_price'] = house_price_zu.replace('ไปฅไธ','')
elif house_price_zu.find("-") != -1:
self.fd['house_price_max'] = house_price_zu.split('-')[1]
self.fd['house_price_min'] = house_price_zu.split('-')[0]
self.fd['house_price'] = house_price_zu.split('-')[1]
else:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = 0
self.fd['house_price'] = 0
else:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = 0
self.fd['house_price'] = 0
posttime=CSSSelector('span.pub_time')(tree)!=None and CSSSelector('span.pub_time')(tree)[0].text.strip() or None
if posttime:
Y=int(time.strftime('%Y', time.localtime()))
M=int(posttime.split(' ')[0].split('-')[0])
D=int(posttime.split(' ')[0].split('-')[1])
s = datetime.datetime(Y,M,D,0,0)
posttime=int(time.mktime(s.timetuple()))
self.fd['posttime'] =posttime
else:
self.fd['posttime'] =None
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title
if re.search(self.house_room_regex, response):
house_room=re.search(self.house_room_regex, response).group(1)
self.fd['house_room'] = house_room
else:
self.fd['house_room'] = '0'
if re.search(self.house_hall_regex, response):
house_hall=re.search(self.house_hall_regex, response).group(1)
self.fd['house_hall'] = house_hall
else:
self.fd['house_hall'] = '0'
if re.search(self.house_toilet_regex, response):
house_toilet=re.search(self.house_toilet_regex, response).group(1)
self.fd['house_toilet'] = house_toilet
else:
self.fd['house_toilet'] = '0'
house_title=CSSSelector("div.detail_title h1")(tree)[0] !=None and CSSSelector("div.detail_title h1")(tree)[0].text.strip() or None
self.fd['house_title'] = house_title
#ๆ่ฟฐ
detail_box = soup.find('div',{'class':'detail_box'})
if detail_box:
house_desc = str(detail_box('p')[1])
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|่็ณปๆๆถ่ฏท่ฏดๆๆฏไป่ตถ้็ฝไธ็ๅฐ็","",house_desc)
else:
self.fd['house_desc'] = None
d_i = soup.find('ul',{'class':'d_i'})
#ๅฐๅบๅ
#ๅ
ๅค็JS
if re.search(self.xiaoqu_regex, response):
borough_name=re.search(self.xiaoqu_regex, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.address_regex, response):
house_addr=re.search(self.address_regex, response).group(1)
self.fd['house_addr'] = house_addr
else:
if re.search(self.borough_name_regex_reg, response):
borough_name=re.search(self.borough_name_regex_reg, response).group(1)
self.fd['borough_name'] = borough_name
if re.search(self.house_addr_regex_reg, response):
house_addr=re.search(self.house_addr_regex_reg, response).group(1)
self.fd['house_addr'] = house_addr
else:
self.fd['house_addr'] = ''
#ๅบๅ
area_box = d_i.find(text="ๅบๅ: ").parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
request = None
response = None
soup=None
tree=None
del tree
del request
del response
del soup
def extractDict(self):
if checkPath(homepath,self.folder,self.urls):
pass
else:
try:
if self.kind=="1":
self.sell(self.urls)
elif self.kind=="2":
self.rent(self.urls)
elif self.kind=="3":
self.buy(self.urls)
else:
self.require(self.urls)
makePath(homepath,self.folder,self.urls)
#่ถ
่ฟไธๅคฉ
if (time.time() -self.fd["posttime"]) > 7*24*36000:return
except:pass
self.fd["c"]="houseapi"
self.fd["a"]="savehouse"
self.fd["is_checked"] = 1
self.fd["web_flag"] = "gj"
return self.fd
if not self.fd["is_checked"]:
for i in self.fd.items():
print i[0],i[1]
print "*"*80
# if len(self.fd)==7 or len(self.fd)==17:
# print "#####################################"
# continue
# req=urllib2.Request("http://site.jjr360.com/app.php", urllib.urlencode(self.fd))
# p=self.br.open(req).read().strip()
# print p.decode('gbk')
# print "*"*80
class fetchData(threading.Thread):
def __init__(self,d):
threading.Thread.__init__(self)
self.d=d
def run(self):
lc=LinkCrawl(self.d["citycode"],self.d["kind"])
clinks=lc.runme()
cc=ContentCrawl(clinks,self.d["citycode"],self.d["kind"])
cc.extractDict()
class getLinksThread(threading.Thread):
def __init__(self,d):
threading.Thread.__init__(self)
self.d=d
def run(self):
gc.enable()
lc=LinkCrawl(self.d["citycode"],self.d["kind"])
lc.runme()
del gc.garbage[:]
def getLinks(d):
gc.enable()
lc=LinkCrawl(d["citycode"],d["kind"])
lc.runme()
del gc.garbage[:]
def getContent(clinks,citycode,kind):
gc.enable()
cc=ContentCrawl(clinks,citycode,kind)
fd=cc.extractDict()
del gc.garbage[:]
return fd
if __name__=="__main__":
lc=LinkCrawl(citycode="su",kind="1")
lc.runme()#
#url1 = "http://su.ganji.com/fang5/11071015_233901.htm"
#url2 = "http://su.ganji.com/fang1/11071017_418972.htm"
#url3 = "http://su.ganji.com/fang4/11062413_4152.htm"
#url4 = "http://su.ganji.com/fang2/11070900_21214.htm"
#cc=ContentCrawl([url3],citycode="su",kind="3")
#cc.extractDict()
# while 1:
# for i in range(1,5):
# k = "%s" % str(i)
# try:
# lc=LinkCrawl(citycode="su",kind=k)
# clinks=lc.runme()
# cc=ContentCrawl(clinks,citycode="su",kind=k)
# cc.extractDict()
# except:
# pass
| 40.287234
| 154
| 0.528439
|
083d0746bfb5439520db3aa4334f131bb14eb840
| 504
|
py
|
Python
|
tests/test_class_oelint_file_nospaces.py
|
QuakeSaver/oelint-adv
|
e03617b51c7ebdeb8ea245eb61da3e3e03195b37
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_class_oelint_file_nospaces.py
|
QuakeSaver/oelint-adv
|
e03617b51c7ebdeb8ea245eb61da3e3e03195b37
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_class_oelint_file_nospaces.py
|
QuakeSaver/oelint-adv
|
e03617b51c7ebdeb8ea245eb61da3e3e03195b37
|
[
"BSD-2-Clause"
] | null | null | null |
import pytest
from base import TestBaseClass
class TestClassOelintFileNoSpaces(TestBaseClass):
@pytest.mark.parametrize('id', ['oelint.file.nospaces'])
@pytest.mark.parametrize('occurrence', [1])
@pytest.mark.parametrize('input',
[
{
'oelint adv-test.bb':
'''
VAR = "1"
'''
}
],
)
def test_bad(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
| 24
| 67
| 0.561508
|
c02048790bdcc79c365d23bbf2b8933b8ef39f8a
| 221
|
py
|
Python
|
timm/loss/__init__.py
|
chilung/dvit_repo
|
e2d51717c131048b860b5dfa61b85f2a9d3438db
|
[
"MIT"
] | 90
|
2021-03-28T17:33:03.000Z
|
2022-03-26T01:44:20.000Z
|
timm/loss/__init__.py
|
chilung/dvit_repo
|
e2d51717c131048b860b5dfa61b85f2a9d3438db
|
[
"MIT"
] | 7
|
2021-03-30T10:57:59.000Z
|
2021-12-19T13:40:12.000Z
|
timm/loss/__init__.py
|
chilung/dvit_repo
|
e2d51717c131048b860b5dfa61b85f2a9d3438db
|
[
"MIT"
] | 19
|
2021-04-09T06:27:50.000Z
|
2022-02-11T14:24:25.000Z
|
from .cross_entropy import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy, SoftTargetCrossEntropyCosReg
from .jsd import JsdCrossEntropy
from .asymmetric_loss import AsymmetricLossMultiLabel, AsymmetricLossSingleLabel
| 73.666667
| 107
| 0.909502
|
ed9108a60b1d5a6e468201edaa763c1004a18f0a
| 55,690
|
py
|
Python
|
tensorflow/c01/t9/retrain.py
|
tomsnail/opencv_tf_py
|
cf9aa7fa250546564cff56aa33b5a39991b0d8f1
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/c01/t9/retrain.py
|
tomsnail/opencv_tf_py
|
cf9aa7fa250546564cff56aa33b5a39991b0d8f1
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/c01/t9/retrain.py
|
tomsnail/opencv_tf_py
|
cf9aa7fa250546564cff56aa33b5a39991b0d8f1
|
[
"Apache-2.0"
] | 1
|
2020-05-22T09:19:56.000Z
|
2020-05-22T09:19:56.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# NOTICE: This work was derived from tensorflow/examples/image_retraining
# and modified to use TensorFlow Hub modules.
# pylint: disable=line-too-long
r"""Simple transfer learning with image modules from TensorFlow Hub.
This example shows how to train an image classifier based on any
TensorFlow Hub module that computes image feature vectors. By default,
it uses the feature vectors computed by Inception V3 trained on ImageNet.
For more options, search https://tfhub.dev for image feature vector modules.
The top layer receives as input a 2048-dimensional vector (assuming
Inception V3) for each image. We train a softmax layer on top of this
representation. If the softmax layer contains N labels, this corresponds
to learning N + 2048*N model parameters for the biases and weights.
Here's an example, which assumes you have a folder containing class-named
subfolders, each full of images for each label. The example folder flower_photos
should have a structure like this:
~/flower_photos/daisy/photo1.jpg
~/flower_photos/daisy/photo2.jpg
...
~/flower_photos/rose/anotherphoto77.jpg
...
~/flower_photos/sunflower/somepicture.jpg
The subfolder names are important, since they define what label is applied to
each image, but the filenames themselves don't matter. (For a working example,
download http://download.tensorflow.org/example_images/flower_photos.tgz
and run tar xzf flower_photos.tgz to unpack it.)
Once your images are prepared, and you have pip-installed tensorflow-hub and
a sufficiently recent version of tensorflow, you can run the training with a
command like this:
```bash
python retrain.py --image_dir ~/flower_photos
```
You can replace the image_dir argument with any folder containing subfolders of
images. The label for each image is taken from the name of the subfolder it's
in.
This produces a new model file that can be loaded and run by any TensorFlow
program, for example the tensorflow/examples/label_image sample code.
By default this script will use the highly accurate, but comparatively large and
slow Inception V3 model architecture. It's recommended that you start with this
to validate that you have gathered good training data, but if you want to deploy
on resource-limited platforms, you can try the `--tfhub_module` flag with a
Mobilenet model. For more information on Mobilenet, see
https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html
For example:
Run floating-point version of Mobilenet:
```bash
python retrain.py --image_dir ~/flower_photos \
--tfhub_module https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/feature_vector/1
```
Run Mobilenet, instrumented for quantization:
```bash
python retrain.py --image_dir ~/flower_photos/ \
--tfhub_module https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/quantops/feature_vector/1
```
These instrumented models can be converted to fully quantized mobile models via
TensorFlow Lite.
There are different Mobilenet models to choose from, with a variety of file
size and latency options.
- The first number can be '100', '075', '050', or '025' to control the number
of neurons (activations of hidden layers); the number of weights (and hence
to some extent the file size and speed) shrinks with the square of that
fraction.
- The second number is the input image size. You can choose '224', '192',
'160', or '128', with smaller sizes giving faster speeds.
To use with TensorBoard:
By default, this script will log summaries to /tmp/retrain_logs directory
Visualize the summaries with this command:
tensorboard --logdir /tmp/retrain_logs
To use with Tensorflow Serving, run this tool with --saved_model_dir set
to some increasingly numbered export location under the model base path, e.g.:
```bash
python retrain.py (... other args as before ...) \
--saved_model_dir=/tmp/saved_models/$(date +%s)/
tensorflow_model_server --port=9000 --model_name=my_image_classifier \
--model_base_path=/tmp/saved_models/
```
"""
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
from datetime import datetime
import hashlib
import os.path
import random
import re
import sys
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
FLAGS = None
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
# The location where variable checkpoints will be stored.
CHECKPOINT_NAME = './tmp/_retrain_checkpoint'
# A module is understood as instrumented for quantization with TF-Lite
# if it contains any of these ops.
FAKE_QUANT_OPS = ('FakeQuantWithMinMaxVars',
'FakeQuantWithMinMaxVarsPerChannel')
def create_image_lists(image_dir, testing_percentage, validation_percentage):
"""Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
Args:
image_dir: String path to a folder containing subfolders of images.
testing_percentage: Integer percentage of the images to reserve for tests.
validation_percentage: Integer percentage of images reserved for validation.
Returns:
An OrderedDict containing an entry for each label subfolder, with images
split into training, testing, and validation sets within each label.
The order of items defines the class indices.
"""
if not tf.gfile.Exists(image_dir):
tf.logging.error("Image directory '" + image_dir + "' not found.")
return None
result = collections.OrderedDict()
sub_dirs = sorted(x[0] for x in tf.gfile.Walk(image_dir))
# The root directory comes first, so skip it.
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
extensions = sorted(set(os.path.normcase(ext) # Smash case on Windows.
for ext in ['JPEG', 'JPG', 'jpeg', 'jpg', 'png']))
file_list = []
dir_name = os.path.basename(
# tf.gfile.Walk() returns sub-directory with trailing '/' when it is in
# Google Cloud Storage, which confuses os.path.basename().
sub_dir[:-1] if sub_dir.endswith('/') else sub_dir)
if dir_name == image_dir:
continue
tf.logging.info("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(tf.gfile.Glob(file_glob))
if not file_list:
tf.logging.warning('No files found')
continue
if len(file_list) < 20:
tf.logging.warning(
'WARNING: Folder has less than 20 images, which may cause issues.')
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
tf.logging.warning(
'WARNING: Folder {} has more than {} images. Some images will '
'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put an image in, the data set creator has a way of
# grouping photos that are close variations of each other. For example
# this is used in the plant disease data set to group multiple pictures of
# the same leaf.
hash_name = re.sub(r'_nohash_.*$', '', file_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(tf.compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if percentage_hash < validation_percentage:
validation_images.append(base_name)
elif percentage_hash < (testing_percentage + validation_percentage):
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, label_name, index, image_dir, category):
"""Returns a path to an image for a label at the given index.
Args:
image_lists: OrderedDict of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category, module_name):
"""Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: OrderedDict of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or
validation.
module_name: The name of the image module being used.
Returns:
File system path string to an image that meets the requested parameters.
"""
module_name = (module_name.replace('://', '~') # URL scheme.
.replace('/', '~') # URL and Unix paths.
.replace(':', '~').replace('\\', '~')) # Windows paths.
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '_' + module_name + '.txt'
def create_module_graph(module_spec):
"""Creates a graph and loads Hub Module into it.
Args:
module_spec: the hub.ModuleSpec for the image module being used.
Returns:
graph: the tf.Graph that was created.
bottleneck_tensor: the bottleneck values output by the module.
resized_input_tensor: the input images, resized as expected by the module.
wants_quantization: a boolean, whether the module has been instrumented
with fake quantization ops.
"""
height, width = hub.get_expected_image_size(module_spec)
with tf.Graph().as_default() as graph:
resized_input_tensor = tf.placeholder(tf.float32, [None, height, width, 3])
m = hub.Module(module_spec)
bottleneck_tensor = m(resized_input_tensor)
wants_quantization = any(node.op in FAKE_QUANT_OPS
for node in graph.as_graph_def().node)
return graph, bottleneck_tensor, resized_input_tensor, wants_quantization
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: String of raw JPEG data.
image_data_tensor: Input data layer in the graph.
decoded_image_tensor: Output of initial image resizing and preprocessing.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
"""
# First decode the JPEG image, resize it, and rescale the pixel values.
resized_input_values = sess.run(decoded_image_tensor,
{image_data_tensor: image_data})
# Then run it through the recognition network.
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: resized_input_values})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def ensure_dir_exists(dir_name):
"""Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Create a single bottleneck file."""
tf.logging.debug('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index,
image_dir, category)
if not tf.gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
image_data = tf.gfile.GFile(image_path, 'rb').read()
try:
bottleneck_values = run_bottleneck_on_image(
sess, image_data, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor)
except Exception as e:
raise RuntimeError('Error during processing file %s (%s)' % (image_path,
str(e)))
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, module_name):
"""Retrieves or calculates bottleneck values for an image.
If a cached version of the bottleneck data exists on-disk, return that,
otherwise calculate the data and save it to disk for future use.
Args:
sess: The current active TensorFlow Session.
image_lists: OrderedDict of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be modulo-ed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of which set to pull images from - training, testing,
or validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: The tensor to feed loaded jpeg data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The output tensor for the bottleneck values.
module_name: The name of the image module being used.
Returns:
Numpy array of values produced by the bottleneck layer for the image.
"""
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index,
bottleneck_dir, category, module_name)
if not os.path.exists(bottleneck_path):
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
did_hit_error = False
try:
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
except ValueError:
tf.logging.warning('Invalid float found, recreating bottleneck')
did_hit_error = True
if did_hit_error:
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
# Allow exceptions to propagate here, since they shouldn't happen after a
# fresh creation
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: OrderedDict of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The penultimate output layer of the graph.
module_name: The name of the image module being used.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(
sess, image_lists, label_name, index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
tf.logging.info(
str(how_many_bottlenecks) + ' bottleneck files created.')
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, module_name):
"""Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: OrderedDict of training images for each label.
how_many: If positive, a random sample of this size will be chosen.
If negative, all bottlenecks will be retrieved.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
module_name: The name of the image module being used.
Returns:
List of bottleneck arrays, their corresponding ground truths, and the
relevant filenames.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
filenames = []
if how_many >= 0:
# Retrieve a random sample of bottlenecks.
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name)
bottlenecks.append(bottleneck)
ground_truths.append(label_index)
filenames.append(image_name)
else:
# Retrieve all bottlenecks.
for label_index, label_name in enumerate(image_lists.keys()):
for image_index, image_name in enumerate(
image_lists[label_name][category]):
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name)
bottlenecks.append(bottleneck)
ground_truths.append(label_index)
filenames.append(image_name)
return bottlenecks, ground_truths, filenames
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor):
"""Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: OrderedDict of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not tf.gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = tf.gfile.GFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: distorted_image_data})
bottleneck_values = np.squeeze(bottleneck_values)
bottlenecks.append(bottleneck_values)
ground_truths.append(label_index)
return bottlenecks, ground_truths
def should_distort_images(flip_left_right, random_crop, random_scale,
random_brightness):
"""Whether any distortions are enabled, from the input flags.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
Returns:
Boolean value indicating whether any distortions should be applied.
"""
return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
(random_brightness != 0))
def add_input_distortions(flip_left_right, random_crop, random_scale,
random_brightness, module_spec):
"""Creates the operations to apply the specified distortions.
During training it can help to improve the results if we run the images
through simple distortions like crops, scales, and flips. These reflect the
kind of variations we expect in the real world, and so can help train the
model to cope with natural data more effectively. Here we take the supplied
parameters and construct a network of operations to apply them to an image.
Cropping
~~~~~~~~
Cropping is done by placing a bounding box at a random position in the full
image. The cropping parameter controls the size of that box relative to the
input image. If it's zero, then the box is the same size as the input and no
cropping is performed. If the value is 50%, then the crop box will be half the
width and height of the input. In a diagram it looks like this:
< width >
+---------------------+
| |
| width - crop% |
| < > |
| +------+ |
| | | |
| | | |
| | | |
| +------+ |
| |
| |
+---------------------+
Scaling
~~~~~~~
Scaling is a lot like cropping, except that the bounding box is always
centered and its size varies randomly within the given range. For example if
the scale percentage is zero, then the bounding box is the same size as the
input and no scaling is applied. If it's 50%, then the bounding box will be in
a random range between half the width and height and full size.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
graph.
module_spec: The hub.ModuleSpec for the image module being used.
Returns:
The jpeg input layer and the distorted result tensor.
"""
input_height, input_width = hub.get_expected_image_size(module_spec)
input_depth = hub.get_num_image_channels(module_spec)
jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
# Convert from full range of uint8 to range [0,1] of float32.
decoded_image_as_float = tf.image.convert_image_dtype(decoded_image,
tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
margin_scale = 1.0 + (random_crop / 100.0)
resize_scale = 1.0 + (random_scale / 100.0)
margin_scale_value = tf.constant(margin_scale)
resize_scale_value = tf.random_uniform(shape=[],
minval=1.0,
maxval=resize_scale)
scale_value = tf.multiply(margin_scale_value, resize_scale_value)
precrop_width = tf.multiply(scale_value, input_width)
precrop_height = tf.multiply(scale_value, input_height)
precrop_shape = tf.stack([precrop_height, precrop_width])
precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
precropped_image = tf.image.resize_bilinear(decoded_image_4d,
precrop_shape_as_int)
precropped_image_3d = tf.squeeze(precropped_image, axis=[0])
cropped_image = tf.random_crop(precropped_image_3d,
[input_height, input_width, input_depth])
if flip_left_right:
flipped_image = tf.image.random_flip_left_right(cropped_image)
else:
flipped_image = cropped_image
brightness_min = 1.0 - (random_brightness / 100.0)
brightness_max = 1.0 + (random_brightness / 100.0)
brightness_value = tf.random_uniform(shape=[],
minval=brightness_min,
maxval=brightness_max)
brightened_image = tf.multiply(flipped_image, brightness_value)
distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')
return jpeg_data, distort_result
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def add_final_retrain_ops(class_count, final_tensor_name, bottleneck_tensor,
quantize_layer, is_training):
"""Adds a new softmax and fully-connected layer for training and eval.
We need to retrain the top layer to identify our new classes, so this function
adds the right operations to the graph, along with some variables to hold the
weights, and then sets up all the gradients for the backward pass.
The set up for the softmax and fully-connected layers is based on:
https://www.tensorflow.org/tutorials/mnist/beginners/index.html
Args:
class_count: Integer of how many categories of things we're trying to
recognize.
final_tensor_name: Name string for the new final node that produces results.
bottleneck_tensor: The output of the main CNN graph.
quantize_layer: Boolean, specifying whether the newly added layer should be
instrumented for quantization with TF-Lite.
is_training: Boolean, specifying whether the newly add layer is for training
or eval.
Returns:
The tensors for the training and cross entropy results, and tensors for the
bottleneck input and ground truth input.
"""
batch_size, bottleneck_tensor_size = bottleneck_tensor.get_shape().as_list()
assert batch_size is None, 'We want to work with arbitrary batch size.'
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor,
shape=[batch_size, bottleneck_tensor_size],
name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(
tf.int64, [batch_size], name='GroundTruthInput')
# Organizing the following ops so they are easier to see in TensorBoard.
layer_name = 'final_retrain_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
initial_value = tf.truncated_normal(
[bottleneck_tensor_size, class_count], stddev=0.001)
layer_weights = tf.Variable(initial_value, name='final_weights')
variable_summaries(layer_weights)
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases)
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.summary.histogram('pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
# The tf.contrib.quantize functions rewrite the graph in place for
# quantization. The imported model graph has already been rewritten, so upon
# calling these rewrites, only the newly added final layer will be
# transformed.
if quantize_layer:
if is_training:
tf.contrib.quantize.create_training_graph()
else:
tf.contrib.quantize.create_eval_graph()
tf.summary.histogram('activations', final_tensor)
# If this is an eval graph, we don't need to add loss ops or an optimizer.
if not is_training:
return None, None, bottleneck_input, ground_truth_input, final_tensor
with tf.name_scope('cross_entropy'):
cross_entropy_mean = tf.losses.sparse_softmax_cross_entropy(
labels=ground_truth_input, logits=logits)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'):
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
train_step = optimizer.minimize(cross_entropy_mean)
return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Tuple of (evaluation step, prediction).
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(prediction, ground_truth_tensor)
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
def run_final_eval(train_session, module_spec, class_count, image_lists,
jpeg_data_tensor, decoded_image_tensor,
resized_image_tensor, bottleneck_tensor):
"""Runs a final evaluation on an eval graph using the test data set.
Args:
train_session: Session for the train graph with the tensors below.
module_spec: The hub.ModuleSpec for the image module being used.
class_count: Number of classes
image_lists: OrderedDict of training images for each label.
jpeg_data_tensor: The layer to feed jpeg image data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_image_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
"""
test_bottlenecks, test_ground_truth, test_filenames = (
get_random_cached_bottlenecks(train_session, image_lists,
FLAGS.test_batch_size,
'testing', FLAGS.bottleneck_dir,
FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor,
bottleneck_tensor, FLAGS.tfhub_module))
(eval_session, _, bottleneck_input, ground_truth_input, evaluation_step,
prediction) = build_eval_session(module_spec, class_count)
test_accuracy, predictions = eval_session.run(
[evaluation_step, prediction],
feed_dict={
bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth
})
tf.logging.info('Final test accuracy = %.1f%% (N=%d)' %
(test_accuracy * 100, len(test_bottlenecks)))
if FLAGS.print_misclassified_test_images:
tf.logging.info('=== MISCLASSIFIED TEST IMAGES ===')
for i, test_filename in enumerate(test_filenames):
if predictions[i] != test_ground_truth[i]:
tf.logging.info('%70s %s' % (test_filename,
list(image_lists.keys())[predictions[i]]))
def build_eval_session(module_spec, class_count):
"""Builds an restored eval session without train operations for exporting.
Args:
module_spec: The hub.ModuleSpec for the image module being used.
class_count: Number of classes
Returns:
Eval session containing the restored eval graph.
The bottleneck input, ground truth, eval step, and prediction tensors.
"""
# If quantized, we need to create the correct eval graph for exporting.
eval_graph, bottleneck_tensor, resized_input_tensor, wants_quantization = (
create_module_graph(module_spec))
eval_sess = tf.Session(graph=eval_graph)
with eval_graph.as_default():
# Add the new layer for exporting.
(_, _, bottleneck_input,
ground_truth_input, final_tensor) = add_final_retrain_ops(
class_count, FLAGS.final_tensor_name, bottleneck_tensor,
wants_quantization, is_training=False)
# Now we need to restore the values from the training graph to the eval
# graph.
tf.train.Saver().restore(eval_sess, CHECKPOINT_NAME)
evaluation_step, prediction = add_evaluation_step(final_tensor,
ground_truth_input)
return (eval_sess, resized_input_tensor, bottleneck_input, ground_truth_input,
evaluation_step, prediction)
def save_graph_to_file(graph_file_name, module_spec, class_count):
"""Saves an graph to file, creating a valid quantized one if necessary."""
sess, _, _, _, _, _ = build_eval_session(module_spec, class_count)
graph = sess.graph
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
with tf.gfile.GFile(graph_file_name, 'wb') as f:
f.write(output_graph_def.SerializeToString())
def prepare_file_system():
# Set up the directory we'll write summaries to for TensorBoard
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
if FLAGS.intermediate_store_frequency > 0:
ensure_dir_exists(FLAGS.intermediate_output_graphs_dir)
return
def add_jpeg_decoding(module_spec):
"""Adds operations that perform JPEG decoding and resizing to the graph..
Args:
module_spec: The hub.ModuleSpec for the image module being used.
Returns:
Tensors for the node to feed JPEG data into, and the output of the
preprocessing steps.
"""
input_height, input_width = hub.get_expected_image_size(module_spec)
input_depth = hub.get_num_image_channels(module_spec)
jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
# Convert from full range of uint8 to range [0,1] of float32.
decoded_image_as_float = tf.image.convert_image_dtype(decoded_image,
tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
resize_shape = tf.stack([input_height, input_width])
resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)
resized_image = tf.image.resize_bilinear(decoded_image_4d,
resize_shape_as_int)
return jpeg_data, resized_image
def export_model(module_spec, class_count, saved_model_dir):
"""Exports model for serving.
Args:
module_spec: The hub.ModuleSpec for the image module being used.
class_count: The number of classes.
saved_model_dir: Directory in which to save exported model and variables.
"""
# The SavedModel should hold the eval graph.
sess, in_image, _, _, _, _ = build_eval_session(module_spec, class_count)
with sess.graph.as_default() as graph:
tf.saved_model.simple_save(
sess,
saved_model_dir,
inputs={'image': in_image},
outputs={'prediction': graph.get_tensor_by_name('final_result:0')},
legacy_init_op=tf.group(tf.tables_initializer(), name='legacy_init_op')
)
def logging_level_verbosity(logging_verbosity):
"""Converts logging_level into TensorFlow logging verbosity value
Args:
logging_level: String value representing logging level: 'DEBUG', 'INFO',
'WARN', 'ERROR', 'FATAL'
"""
name_to_level = {
'FATAL': tf.logging.FATAL,
'ERROR': tf.logging.ERROR,
'WARN': tf.logging.WARN,
'INFO': tf.logging.INFO,
'DEBUG': tf.logging.DEBUG
}
try:
return name_to_level[logging_verbosity]
except Exception as e:
raise RuntimeError('Not supported logs verbosity (%s). Use one of %s.' %
(str(e), list(name_to_level)))
def main(_):
# Needed to make sure the logging output is visible.
# See https://github.com/tensorflow/tensorflow/issues/3047
logging_verbosity = logging_level_verbosity(FLAGS.logging_verbosity)
tf.logging.set_verbosity(logging_verbosity)
if not FLAGS.image_dir:
tf.logging.error('Must set flag --image_dir.')
return -1
# Prepare necessary directories that can be used during training
prepare_file_system()
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,
FLAGS.validation_percentage)
class_count = len(image_lists.keys())
if class_count == 0:
tf.logging.error('No valid folders of images found at ' + FLAGS.image_dir)
return -1
if class_count == 1:
tf.logging.error('Only one valid folder of images found at ' +
FLAGS.image_dir +
' - multiple classes are needed for classification.')
return -1
# See if the command-line flags mean we're applying any distortions.
do_distort_images = should_distort_images(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
# Set up the pre-trained graph.
module_spec = hub.load_module_spec(FLAGS.tfhub_module)
graph, bottleneck_tensor, resized_image_tensor, wants_quantization = (
create_module_graph(module_spec))
# Add the new layer that we'll be training.
with graph.as_default():
(train_step, cross_entropy, bottleneck_input,
ground_truth_input, final_tensor) = add_final_retrain_ops(
class_count, FLAGS.final_tensor_name, bottleneck_tensor,
wants_quantization, is_training=True)
with tf.Session(graph=graph) as sess:
# Initialize all weights: for the module to their pretrained values,
# and for the newly added retraining layer to random initial values.
init = tf.global_variables_initializer()
sess.run(init)
# Set up the image decoding sub-graph.
jpeg_data_tensor, decoded_image_tensor = add_jpeg_decoding(module_spec)
if do_distort_images:
# We will be applying distortions, so set up the operations we'll need.
(distorted_jpeg_data_tensor,
distorted_image_tensor) = add_input_distortions(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness, module_spec)
else:
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, FLAGS.image_dir,
FLAGS.bottleneck_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor,
bottleneck_tensor, FLAGS.tfhub_module)
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step, _ = add_evaluation_step(final_tensor, ground_truth_input)
# Merge all the summaries and write them out to the summaries_dir
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(
FLAGS.summaries_dir + '/validation')
# Create a train saver that is used to restore values into an eval graph
# when exporting models.
train_saver = tf.train.Saver()
# Run the training for as many cycles as requested on the command line.
for i in range(FLAGS.how_many_training_steps):
# Get a batch of input bottleneck values, either calculated fresh every
# time with distortions applied, or from the cache stored on disk.
if do_distort_images:
(train_bottlenecks,
train_ground_truth) = get_random_distorted_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.image_dir, distorted_jpeg_data_tensor,
distorted_image_tensor, resized_image_tensor, bottleneck_tensor)
else:
(train_bottlenecks,
train_ground_truth, _) = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.tfhub_module)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run(
[merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == FLAGS.how_many_training_steps)
if (i % FLAGS.eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
tf.logging.info('%s: Step %d: Train accuracy = %.1f%%' %
(datetime.now(), i, train_accuracy * 100))
tf.logging.info('%s: Step %d: Cross entropy = %f' %
(datetime.now(), i, cross_entropy_value))
# TODO: Make this use an eval graph, to avoid quantization
# moving averages being updated by the validation set, though in
# practice this makes a negligable difference.
validation_bottlenecks, validation_ground_truth, _ = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.validation_batch_size, 'validation',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.tfhub_module))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
tf.logging.info('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %
(datetime.now(), i, validation_accuracy * 100,
len(validation_bottlenecks)))
# Store intermediate results
intermediate_frequency = FLAGS.intermediate_store_frequency
if (intermediate_frequency > 0 and (i % intermediate_frequency == 0)
and i > 0):
# If we want to do an intermediate save, save a checkpoint of the train
# graph, to restore into the eval graph.
train_saver.save(sess, CHECKPOINT_NAME)
intermediate_file_name = (FLAGS.intermediate_output_graphs_dir +
'intermediate_' + str(i) + '.pb')
tf.logging.info('Save intermediate result to : ' +
intermediate_file_name)
save_graph_to_file(intermediate_file_name, module_spec,
class_count)
# After training is complete, force one last save of the train checkpoint.
train_saver.save(sess, CHECKPOINT_NAME)
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
run_final_eval(sess, module_spec, class_count, image_lists,
jpeg_data_tensor, decoded_image_tensor, resized_image_tensor,
bottleneck_tensor)
# Write out the trained graph and labels with the weights stored as
# constants.
tf.logging.info('Save final result to : ' + FLAGS.output_graph)
if wants_quantization:
tf.logging.info('The model is instrumented for quantization with TF-Lite')
save_graph_to_file(FLAGS.output_graph, module_spec, class_count)
with tf.gfile.GFile(FLAGS.output_labels, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
if FLAGS.saved_model_dir:
export_model(module_spec, class_count, FLAGS.saved_model_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_dir',
type=str,
default='',
help='Path to folders of labeled images.'
)
parser.add_argument(
'--output_graph',
type=str,
default='./tmp/output_graph.pb',
help='Where to save the trained graph.'
)
parser.add_argument(
'--intermediate_output_graphs_dir',
type=str,
default='./tmp/intermediate_graph/',
help='Where to save the intermediate graphs.'
)
parser.add_argument(
'--intermediate_store_frequency',
type=int,
default=0,
help="""\
How many steps to store intermediate graph. If "0" then will not
store.\
"""
)
parser.add_argument(
'--output_labels',
type=str,
default='./tmp/output_labels.txt',
help='Where to save the trained graph\'s labels.'
)
parser.add_argument(
'--summaries_dir',
type=str,
default='/tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.'
)
parser.add_argument(
'--how_many_training_steps',
type=int,
default=4000,
help='How many training steps to run before ending.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='How large a learning rate to use when training.'
)
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of images to use as a test set.'
)
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of images to use as a validation set.'
)
parser.add_argument(
'--eval_step_interval',
type=int,
default=10,
help='How often to evaluate the training results.'
)
parser.add_argument(
'--train_batch_size',
type=int,
default=100,
help='How many images to train on at a time.'
)
parser.add_argument(
'--test_batch_size',
type=int,
default=-1,
help="""\
How many images to test on. This test set is only used once, to evaluate
the final accuracy of the model after training completes.
A value of -1 causes the entire test set to be used, which leads to more
stable results across runs.\
"""
)
parser.add_argument(
'--validation_batch_size',
type=int,
default=100,
help="""\
How many images to use in an evaluation batch. This validation set is
used much more often than the test set, and is an early indicator of how
accurate the model is during training.
A value of -1 causes the entire validation set to be used, which leads to
more stable results across training iterations, but may be slower on large
training sets.\
"""
)
parser.add_argument(
'--print_misclassified_test_images',
default=False,
help="""\
Whether to print out a list of all misclassified test images.\
""",
action='store_true'
)
parser.add_argument(
'--bottleneck_dir',
type=str,
default='./tmp/bottleneck',
help='Path to cache bottleneck layer values as files.'
)
parser.add_argument(
'--final_tensor_name',
type=str,
default='final_result',
help="""\
The name of the output classification layer in the retrained graph.\
"""
)
parser.add_argument(
'--flip_left_right',
default=False,
help="""\
Whether to randomly flip half of the training images horizontally.\
""",
action='store_true'
)
parser.add_argument(
'--random_crop',
type=int,
default=0,
help="""\
A percentage determining how much of a margin to randomly crop off the
training images.\
"""
)
parser.add_argument(
'--random_scale',
type=int,
default=0,
help="""\
A percentage determining how much to randomly scale up the size of the
training images by.\
"""
)
parser.add_argument(
'--random_brightness',
type=int,
default=0,
help="""\
A percentage determining how much to randomly multiply the training image
input pixels up or down by.\
"""
)
parser.add_argument(
'--tfhub_module',
type=str,
default=(
'./tfhub/1.tar.gz'),
help="""\
Which TensorFlow Hub module to use. For more options,
search https://tfhub.dev for image feature vector modules.\
""")
parser.add_argument(
'--saved_model_dir',
type=str,
default='',
help='Where to save the exported graph.')
parser.add_argument(
'--logging_verbosity',
type=str,
default='INFO',
choices=['DEBUG', 'INFO', 'WARN', 'ERROR', 'FATAL'],
help='How much logging output should be produced.')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 41.374443
| 99
| 0.697109
|
067fc185730d21a25cacf5ffb7e51c038af68d6e
| 16,606
|
py
|
Python
|
qiskit/opflow/state_fns/state_fn.py
|
ewinston/qiskit-sdk-py
|
4d64125aba4ff31f15d0054b90437bcef352782e
|
[
"Apache-2.0"
] | null | null | null |
qiskit/opflow/state_fns/state_fn.py
|
ewinston/qiskit-sdk-py
|
4d64125aba4ff31f15d0054b90437bcef352782e
|
[
"Apache-2.0"
] | 1
|
2018-06-15T08:15:47.000Z
|
2018-06-15T14:38:19.000Z
|
qiskit/opflow/state_fns/state_fn.py
|
ewinston/qiskit-sdk-py
|
4d64125aba4ff31f15d0054b90437bcef352782e
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" StateFn Class """
from typing import Callable, Dict, List, Optional, Set, Tuple, Union
import numpy as np
from qiskit import QuantumCircuit
from qiskit.circuit import Instruction, ParameterExpression
from qiskit.opflow.operator_base import OperatorBase
from qiskit.quantum_info import Statevector
from qiskit.result import Result
class StateFn(OperatorBase):
r"""
A class for representing state functions and measurements.
State functions are defined to be complex functions over a single binary string (as
compared to an operator, which is defined as a function over two binary strings, or a
function taking a binary function to another binary function). This function may be
called by the eval() method.
Measurements are defined to be functionals over StateFns, taking them to real values.
Generally, this real value is interpreted to represent the probability of some classical
state (binary string) being observed from a probabilistic or quantum system represented
by a StateFn. This leads to the equivalent definition, which is that a measurement m is
a function over binary strings producing StateFns, such that the probability of measuring
a given binary string b from a system with StateFn f is equal to the inner
product between f and m(b).
NOTE: State functions here are not restricted to wave functions, as there is
no requirement of normalization.
"""
def __init_subclass__(cls):
cls.__new__ = lambda cls, *args, **kwargs: super().__new__(cls)
@staticmethod
# pylint: disable=unused-argument
def __new__(cls,
primitive: Union[str, dict, Result,
list, np.ndarray, Statevector,
QuantumCircuit, Instruction,
OperatorBase] = None,
coeff: Union[complex, ParameterExpression] = 1.0,
is_measurement: bool = False) -> 'StateFn':
""" A factory method to produce the correct type of StateFn subclass
based on the primitive passed in. Primitive, coeff, and is_measurement arguments
are passed into subclass's init() as-is automatically by new().
Args:
primitive: The primitive which defines the behavior of the underlying State function.
coeff: A coefficient by which the state function is multiplied.
is_measurement: Whether the StateFn is a measurement operator
Returns:
The appropriate StateFn subclass for ``primitive``.
Raises:
TypeError: Unsupported primitive type passed.
"""
# Prevents infinite recursion when subclasses are created
if cls.__name__ != StateFn.__name__:
return super().__new__(cls)
# pylint: disable=cyclic-import
if isinstance(primitive, (str, dict, Result)):
from .dict_state_fn import DictStateFn
return DictStateFn.__new__(DictStateFn)
if isinstance(primitive, (list, np.ndarray, Statevector)):
from .vector_state_fn import VectorStateFn
return VectorStateFn.__new__(VectorStateFn)
if isinstance(primitive, (QuantumCircuit, Instruction)):
from .circuit_state_fn import CircuitStateFn
return CircuitStateFn.__new__(CircuitStateFn)
if isinstance(primitive, OperatorBase):
from .operator_state_fn import OperatorStateFn
return OperatorStateFn.__new__(OperatorStateFn)
raise TypeError('Unsupported primitive type {} passed into StateFn '
'factory constructor'.format(type(primitive)))
# TODO allow normalization somehow?
def __init__(self,
primitive: Union[str, dict, Result,
list, np.ndarray, Statevector,
QuantumCircuit, Instruction,
OperatorBase] = None,
coeff: Union[complex, ParameterExpression] = 1.0,
is_measurement: bool = False) -> None:
"""
Args:
primitive: The primitive which defines the behavior of the underlying State function.
coeff: A coefficient by which the state function is multiplied.
is_measurement: Whether the StateFn is a measurement operator
"""
super().__init__()
self._primitive = primitive
self._is_measurement = is_measurement
self._coeff = coeff
@property
def primitive(self):
""" The primitive which defines the behavior of the underlying State function. """
return self._primitive
@property
def coeff(self) -> Union[complex, ParameterExpression]:
""" A coefficient by which the state function is multiplied. """
return self._coeff
@property
def is_measurement(self) -> bool:
""" Whether the StateFn object is a measurement Operator. """
return self._is_measurement
def primitive_strings(self) -> Set[str]:
raise NotImplementedError
@property
def num_qubits(self) -> int:
raise NotImplementedError
def add(self, other: OperatorBase) -> OperatorBase:
raise NotImplementedError
def adjoint(self) -> OperatorBase:
raise NotImplementedError
def _expand_dim(self, num_qubits: int) -> 'StateFn':
raise NotImplementedError
def permute(self, permutation: List[int]) -> OperatorBase:
"""Permute the qubits of the state function.
Args:
permutation: A list defining where each qubit should be permuted. The qubit at index
j of the circuit should be permuted to position permutation[j].
Returns:
A new StateFn containing the permuted primitive.
"""
raise NotImplementedError
def equals(self, other: OperatorBase) -> bool:
if not isinstance(other, type(self)) or not self.coeff == other.coeff:
return False
return self.primitive == other.primitive
# Will return NotImplementedError if not supported
def mul(self, scalar: Union[complex, ParameterExpression]) -> OperatorBase:
if not isinstance(scalar, (int, float, complex, ParameterExpression)):
raise ValueError('Operators can only be scalar multiplied by float or complex, not '
'{} of type {}.'.format(scalar, type(scalar)))
return self.__class__(self.primitive,
coeff=self.coeff * scalar,
is_measurement=self.is_measurement)
def tensor(self, other: OperatorBase) -> OperatorBase:
r"""
Return tensor product between self and other, overloaded by ``^``.
Note: You must be conscious of Qiskit's big-endian bit printing
convention. Meaning, Plus.tensor(Zero)
produces a \|+โฉ on qubit 0 and a \|0โฉ on qubit 1, or \|+โฉโจ\|0โฉ, but
would produce a QuantumCircuit like
\|0โฉ--
\|+โฉ--
Because Terra prints circuits and results with qubit 0
at the end of the string or circuit.
Args:
other: The ``OperatorBase`` to tensor product with self.
Returns:
An ``OperatorBase`` equivalent to the tensor product of self and other.
"""
raise NotImplementedError
def tensorpower(self, other: int) -> Union[OperatorBase, int]:
if not isinstance(other, int) or other <= 0:
raise TypeError('Tensorpower can only take positive int arguments')
temp = StateFn(self.primitive,
coeff=self.coeff,
is_measurement=self.is_measurement) # type: OperatorBase
for _ in range(other - 1):
temp = temp.tensor(self)
return temp
def _expand_shorter_operator_and_permute(
self, other: OperatorBase, permutation: Optional[List[int]] = None
) -> Tuple[OperatorBase, OperatorBase]:
# pylint: disable=cyclic-import
from ..operator_globals import Zero
if self == StateFn({'0': 1}, is_measurement=True):
# Zero is special - we'll expand it to the correct qubit number.
return StateFn('0' * other.num_qubits, is_measurement=True), other
elif other == Zero:
# Zero is special - we'll expand it to the correct qubit number.
return self, StateFn('0' * self.num_qubits)
return super()._expand_shorter_operator_and_permute(other, permutation)
def to_matrix(self, massive: bool = False) -> np.ndarray:
raise NotImplementedError
def to_density_matrix(self, massive: bool = False) -> np.ndarray:
""" Return matrix representing product of StateFn evaluated on pairs of basis states.
Overridden by child classes.
Args:
massive: Whether to allow large conversions, e.g. creating a matrix representing
over 16 qubits.
Returns:
The NumPy array representing the density matrix of the State function.
Raises:
ValueError: If massive is set to False, and exponentially large computation is needed.
"""
raise NotImplementedError
def compose(self, other: OperatorBase,
permutation: Optional[List[int]] = None, front: bool = False) -> OperatorBase:
r"""
Composition (Linear algebra-style: A@B(x) = A(B(x))) is not well defined for states
in the binary function model, but is well defined for measurements.
Args:
other: The Operator to compose with self.
permutation: ``List[int]`` which defines permutation on other operator.
front: If front==True, return ``other.compose(self)``.
Returns:
An Operator equivalent to the function composition of self and other.
Raises:
ValueError: If self is not a measurement, it cannot be composed from the right.
"""
# TODO maybe allow outers later to produce density operators or projectors, but not yet.
if not self.is_measurement and not front:
raise ValueError(
'Composition with a Statefunction in the first operand is not defined.')
new_self, other = self._expand_shorter_operator_and_permute(other, permutation)
if front:
return other.compose(self)
# TODO maybe include some reduction here in the subclasses - vector and Op, op and Op, etc.
from ..primitive_ops.circuit_op import CircuitOp
if self.primitive == {'0' * self.num_qubits: 1.0} and isinstance(other, CircuitOp):
# Returning CircuitStateFn
return StateFn(other.primitive, is_measurement=self.is_measurement,
coeff=self.coeff * other.coeff)
from ..list_ops.composed_op import ComposedOp
return ComposedOp([new_self, other])
def power(self, exponent: int) -> OperatorBase:
""" Compose with Self Multiple Times, undefined for StateFns.
Args:
exponent: The number of times to compose self with self.
Raises:
ValueError: This function is not defined for StateFns.
"""
raise ValueError('Composition power over Statefunctions or Measurements is not defined.')
def __str__(self) -> str:
prim_str = str(self.primitive)
if self.coeff == 1.0:
return "{}({})".format('StateFunction' if not self.is_measurement
else 'Measurement', self.coeff)
else:
return "{}({}) * {}".format('StateFunction' if not self.is_measurement
else 'Measurement',
self.coeff,
prim_str)
def __repr__(self) -> str:
return "{}({}, coeff={}, is_measurement={})".format(self.__class__.__name__,
repr(self.primitive),
self.coeff, self.is_measurement)
def eval(
self,
front: Optional[
Union[str, Dict[str, complex], np.ndarray, OperatorBase, Statevector]
] = None,
) -> Union[OperatorBase, complex]:
raise NotImplementedError
@property
def parameters(self):
params = set()
if isinstance(self.primitive, (OperatorBase, QuantumCircuit)):
params.update(self.primitive.parameters)
if isinstance(self.coeff, ParameterExpression):
params.update(self.coeff.parameters)
return params
def assign_parameters(self, param_dict: dict) -> OperatorBase:
param_value = self.coeff
if isinstance(self.coeff, ParameterExpression):
unrolled_dict = self._unroll_param_dict(param_dict)
if isinstance(unrolled_dict, list):
from ..list_ops.list_op import ListOp
return ListOp([self.assign_parameters(param_dict) for param_dict in unrolled_dict])
if self.coeff.parameters <= set(unrolled_dict.keys()):
binds = {param: unrolled_dict[param] for param in self.coeff.parameters}
param_value = float(self.coeff.bind(binds))
return self.traverse(lambda x: x.assign_parameters(param_dict), coeff=param_value)
# Try collapsing primitives where possible. Nothing to collapse here.
def reduce(self) -> OperatorBase:
return self
def traverse(self,
convert_fn: Callable,
coeff: Optional[Union[complex, ParameterExpression]] = None
) -> OperatorBase:
r"""
Apply the convert_fn to the internal primitive if the primitive is an Operator (as in
the case of ``OperatorStateFn``). Otherwise do nothing. Used by converters.
Args:
convert_fn: The function to apply to the internal OperatorBase.
coeff: A coefficient to multiply by after applying convert_fn.
If it is None, self.coeff is used instead.
Returns:
The converted StateFn.
"""
if coeff is None:
coeff = self.coeff
if isinstance(self.primitive, OperatorBase):
return StateFn(convert_fn(self.primitive),
coeff=coeff, is_measurement=self.is_measurement)
else:
return self
def to_matrix_op(self, massive: bool = False) -> OperatorBase:
""" Return a ``VectorStateFn`` for this ``StateFn``.
Args:
massive: Whether to allow large conversions, e.g. creating a matrix representing
over 16 qubits.
Returns:
A VectorStateFn equivalent to self.
"""
# pylint: disable=cyclic-import
from .vector_state_fn import VectorStateFn
return VectorStateFn(self.to_matrix(massive=massive), is_measurement=self.is_measurement)
def to_circuit_op(self) -> OperatorBase:
""" Returns a ``CircuitOp`` equivalent to this Operator. """
raise NotImplementedError
# TODO to_dict_op
def sample(self,
shots: int = 1024,
massive: bool = False,
reverse_endianness: bool = False) -> Dict[str, float]:
""" Sample the state function as a normalized probability distribution. Returns dict of
bitstrings in order of probability, with values being probability.
Args:
shots: The number of samples to take to approximate the State function.
massive: Whether to allow large conversions, e.g. creating a matrix representing
over 16 qubits.
reverse_endianness: Whether to reverse the endianness of the bitstrings in the return
dict to match Terra's big-endianness.
Returns:
A dict containing pairs sampled strings from the State function and sampling
frequency divided by shots.
"""
raise NotImplementedError
| 41.10396
| 99
| 0.630917
|
1db4b1283512372db71b73b4beb3fb5090923aa3
| 12,897
|
py
|
Python
|
qt_widgets/slider.py
|
marl0ny/grids-on-the-complex-plane
|
7dfc635baad2c7cb13caf2b71055da18a57ea642
|
[
"MIT"
] | null | null | null |
qt_widgets/slider.py
|
marl0ny/grids-on-the-complex-plane
|
7dfc635baad2c7cb13caf2b71055da18a57ea642
|
[
"MIT"
] | null | null | null |
qt_widgets/slider.py
|
marl0ny/grids-on-the-complex-plane
|
7dfc635baad2c7cb13caf2b71055da18a57ea642
|
[
"MIT"
] | null | null | null |
"""
Slider widgets.
"""
from . import QtWidgets, QtCore, QtGui
from .labelled_line_edit import LabelWithLineEdit
from .editable_label import EditableLabel
from typing import Any, List
class Slider(QtWidgets.QSlider):
"""
Slider class
"""
def __init__(self, slider_id: Any,
orientation: QtCore.Qt.Orientation,
context: Any) -> None:
"""
Constructor.
Parameters:
slider_id: slider identification.
orientation: slider orientation.
context: the object that is using this slider.
"""
QtWidgets.QSlider.__init__(self, orientation, context)
self._slider_id = slider_id
self._observers = []
self._lim = [self.minimum(), self.maximum()]
self.setRange(0, 200)
self.valueChanged.connect(self.notify_change)
def set_observers(self, slider_observers: list) -> None:
"""
Set slider observers.
Parameters:
slider_observers: the objects that will observe this slider.
"""
self._observers = slider_observers
def add_observers(self, slider_observer: QtWidgets.QWidget) -> None:
"""
Add a slider observer.
Parameters:
slider_observer: an observer.
"""
self._observers.append(slider_observer)
def set_number_of_ticks(self, number_of_ticks: int) -> None:
"""
Set the total number of intervals in the slider.
Parameters:
number_of_ticks: total number of intervals.
"""
self.setRange(1, number_of_ticks)
def set_range(self, min_val: float, max_val: float) -> None:
"""
Set the range of the slider.
Parameters:
min_val: The lowest possible value that the slider can take.
max_val: The largest possible value that the slider can take.
"""
self._lim = [min_val, max_val]
def get_range(self) -> List[float]:
"""
Get the range of the slider.
Returns:
A list containing the
minimum and maximum value of the slider.
"""
return self._lim
def _transform(self, slider_val: int) -> float:
"""
Transform rules for the slider.
"""
lim = self._lim
slider_val = slider_val - self.minimum()
m = (lim[1] - lim[0])/(self.maximum() - self.minimum())
return m*slider_val + lim[0]
def set_value(self, value: float) -> None:
"""
Set a value for the slider.
Parameters:
value: the value to set the slider to.
"""
lim = self._lim
value = value - lim[0]
m = (self.maximum() - self.minimum())/(lim[1] - lim[0])
slider_float_value = m*value + self.minimum()
slider_value = int(slider_float_value)
if slider_float_value - slider_value > 0.5:
slider_value += 1
self.setSliderPosition(slider_value)
def notify_change(self, val: int) -> None:
"""
Notify to observers that the slider has changed.
Parameters:
val: the value that the slider changed to.
"""
val = self._transform(val)
for observer in self._observers:
observer.on_slider_changed({'value': val,
'id': self._slider_id})
def get_value(self) -> float:
"""
Get the value of the slider.
Returns:
the value of the slider.
"""
return self._transform(self.value())
def get_slider_info(self) -> dict:
"""
Get information about the slider.
Returns:
A dictionary containing information about the slider.
"""
val = self._transform(self.value())
return {'value': val, 'id': self._slider_id}
class SliderBoxRangeControls(QtWidgets.QFrame):
"""
A range control widget for the
HorizontalSliderBox class.
"""
def __init__(self, slider_lim: List[int],
number_of_ticks: int,
parent: "HorizontalSliderBox" = None) -> None:
"""
Constructor.
Parameters:
slider_lim: list containing the
initial minimum and maximum values of the slider
number_of_ticks: number of ticks of the slider.
parent: the parent HorizontalSliderBox widget.
"""
QtWidgets.QFrame.__init__(self, parent)
self._parent = parent
layout = QtWidgets.QVBoxLayout(self)
self._layout = layout
self.setLayout(layout)
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
min_label = "min: "
max_label = "max: "
ticks_label = "number of ticks: "
min_label_line_edit = LabelWithLineEdit(min_label, self)
# min_label_line_edit.setFocusPolicy(QtCore.Qt.NoFocus)
min_label_line_edit.set_line_edit(str(slider_lim[0]))
max_label_line_edit = LabelWithLineEdit(max_label, self)
# max_label_line_edit.setFocusPolicy(QtCore.Qt.NoFocus)
max_label_line_edit.set_line_edit(str(slider_lim[1]))
tick_label_line_edit = LabelWithLineEdit(ticks_label, self)
# tick_label_line_edit.setFocusPolicy(QtCore.Qt.NoFocus)
tick_label_line_edit.set_line_edit(str(number_of_ticks))
layout.addWidget(min_label_line_edit)
layout.addWidget(max_label_line_edit)
layout.addWidget(tick_label_line_edit)
button = QtWidgets.QPushButton("Close")
if parent is not None:
button.clicked.connect(parent.close_range_controls)
layout.addWidget(button)
# self.setMinimumHeight(parent.height() if parent
# is not None else 100)
if parent is not None:
# parent.setMinimumHeight(2*parent.height() + self.height())
parent.setMinimumHeight(3*parent.height() + self.height())
def line_edit_returned(self, *args: Any) -> None:
"""
Perform an action when the line edit is returned.
"""
# TODO Need to improve this.
min_val = float(self._layout.itemAt(0).widget().text())
max_val = float(self._layout.itemAt(1).widget().text())
tick_val = int(self._layout.itemAt(2).widget().text())
if min_val >= max_val or tick_val <= 1 or tick_val > 65535:
return
if self._parent is not None:
value = self._parent.get_value()
self._parent.set_number_of_ticks(tick_val)
self._parent.set_range(min_val, max_val)
if value > max_val:
value = max_val
if value < min_val:
value = min_val
m = (tick_val - 1)/(max_val - min_val)
slider = self._parent.get_slider()
slider_info = self._parent.get_slider_info()
tick_float_value = m*(value - min_val)
if (tick_float_value % 1.0) >= 0.5:
tick_float_value += (1.0 -
tick_float_value % 1.0)
tick_value = int(tick_float_value + slider.minimum())
value = (tick_value - slider.minimum())/m + min_val
slider.setValue(tick_value)
slider_info['value'] = value
self._parent.on_slider_changed(slider_info)
class HorizontalSliderBox(QtWidgets.QFrame):
"""
GUI Box containing a slider as well as some other widgets.
"""
def __init__(self, context: Any,
slider_id: Any) -> None:
"""
Constructor.
Parameters:
context: the object that is using the widget.
slider_id: the id of the slider.
"""
# QtWidgets.QGroupBox.__init__(self)
QtWidgets.QFrame.__init__(self)
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.setMinimumWidth(250)
self.setMaximumWidth(300)
self.setMaximumHeight(100)
self._enable_range_controls = True
self._range_controls = None
self._varname_equals_string = "%s ="
self._string_format = self._varname_equals_string + " %.2f"
self._number_format = "%.2f"
self._layout = QtWidgets.QVBoxLayout()
self._label = EditableLabel("Set " + str(slider_id), parent=self)
self._slider = Slider(slider_id,
QtCore.Qt.Horizontal,
context)
self._layout.addWidget(self._label)
self._layout.addWidget(self._slider)
self.setLayout(self._layout)
def set_range(self, min_val: float, max_val: float) -> None:
"""
Set the range of the slider.
Parameters:
min_val: The lowest possible value that the slider can take.
max_val: The largest possible value that the slider can take.
"""
self._slider.set_range(min_val, max_val)
def set_number_of_ticks(self, number_of_ticks: int) -> None:
"""
Set the total number of intervals in the slider.
Parameters:
number_of_ticks: total number of intervals.
"""
self._slider.setRange(0, number_of_ticks - 1)
def get_slider(self) -> Slider:
"""
Getter for the slider.
Returns:
the slider.
"""
return self._slider
def set_value(self, value: float) -> None:
"""
Set a value for the slider.
Parameters:
value: the value to set the slider to.
"""
self._slider.set_value(value)
def get_value(self) -> float:
"""
Get the slider value.
Returns:
the slider value.
"""
return self._slider.get_value()
def set_observers(self,
slider_observers: list) -> None:
"""
Set slider observers.
Parameters:
slider_observers: the objects that will observe the slider.
"""
slider_observers.append(self)
self._slider.set_observers(slider_observers)
def set_value_string_format(self, string_format: str) -> None:
"""
Set the value string format.
Parameters:
string format: the string format to display the value
of the slider.
"""
self._number_format = string_format
self._string_format = self._varname_equals_string + ' ' + string_format
def on_slider_changed(self, slider_input: dict) -> None:
"""
Respond to changes in the slider.
Parameters:
slider_input: the changes from the slider.
"""
val = slider_input['value']
slider_id = slider_input['id']
self._label.set_line_edit_label(self._varname_equals_string
% slider_id)
self._label.setCurrentIndex(0)
self._label.set_line_edit(self._number_format % val)
self._label.setText(self._string_format % (slider_id, val))
def destroy_slider(self) -> None:
"""
Destroy the slider.
"""
self._layout.removeWidget(self._slider)
self._slider.destroy()
self._slider.close()
self.close()
def get_slider_info(self) -> dict:
"""
Get information about the slider.
Returns:
A dictionary containing information about the slider.
"""
return self._slider.get_slider_info()
def mousePressEvent(self, qt_event: QtGui.QMouseEvent) -> None:
"""
Respond to a mouse press event.
Parameters:
qt_event: the mouse event.
"""
if (self._enable_range_controls and
qt_event.buttons() == QtCore.Qt.RightButton
and not self._range_controls):
pass
self._show_range_controls = True
q = QtWidgets.QMenu("menu", self)
q.addAction("Set range", self.build_range_controls)
q.exec_(QtCore.QPoint(QtGui.QCursor.pos()))
def toggle_range_controls(self) -> None:
"""
Toggle the range controls.
"""
self._enable_range_controls = \
not self._enable_range_controls
def build_range_controls(self, *arg: Any) -> None:
"""
Build the range control widgets.
"""
self.setMaximumHeight(220)
slider_lim = self._slider.get_range()
n_ticks = self._slider.maximum() \
- self._slider.minimum() + 1
self._range_controls = \
SliderBoxRangeControls(slider_lim, n_ticks, self)
self._layout.addWidget(self._range_controls)
def close_range_controls(self) -> None:
"""
Close the range control widgets.
"""
self.setMinimumHeight(0)
self.setMaximumHeight(100)
self._range_controls.line_edit_returned()
self._range_controls.close()
self._range_controls = None
| 32.568182
| 79
| 0.593781
|
8bbb539954a8fd14a57150a79423cf45c7c0a58c
| 2,660
|
py
|
Python
|
src/dlqmc/mplext.py
|
noegroup/dlqmc-project
|
ed7561ec0156df6d6309e49c1276646173ec8641
|
[
"MIT"
] | 3
|
2020-12-22T16:26:36.000Z
|
2021-08-11T16:54:46.000Z
|
src/dlqmc/mplext.py
|
noegroup/dlqmc-project
|
ed7561ec0156df6d6309e49c1276646173ec8641
|
[
"MIT"
] | 5
|
2020-07-26T23:13:16.000Z
|
2020-07-26T23:13:45.000Z
|
src/dlqmc/mplext.py
|
noegroup/dlqmc-project
|
ed7561ec0156df6d6309e49c1276646173ec8641
|
[
"MIT"
] | 1
|
2021-06-18T05:00:39.000Z
|
2021-06-18T05:00:39.000Z
|
import matplotlib as mpl
import matplotlib.scale
import matplotlib.ticker
import matplotlib.transforms
import numpy as np
def corr_ene_tf(a):
with np.errstate(divide='ignore', invalid='ignore'):
out = -np.log10(1 - a)
out = np.where(a >= 1, 10, out)
return out
def corr_ene_inv_tf(a):
return 1 - 10 ** (-a)
class CorrelationEnergyTransform(mpl.transforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def transform_non_affine(self, a):
return corr_ene_tf(a)
def inverted(self):
return InvertedCorrelationEnergyTransform()
class InvertedCorrelationEnergyTransform(mpl.transforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def transform_non_affine(self, a):
return corr_ene_inv_tf(a)
def inverted(self):
return CorrelationEnergyTransform()
class CorrelationEnergyLocator(mpl.ticker.Locator):
def __init__(self, subs=1):
self.subs = subs
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
vmin = np.floor(corr_ene_tf(vmin))
vmax = np.ceil(corr_ene_tf(vmax))
bases = np.arange(vmin, vmax + 1e-10)
decades = corr_ene_inv_tf(bases)
ticks = np.concatenate(
[
np.arange(decades[i], decades[i + 1], 10 ** -bases[i] / self.subs)
for i in range(len(decades) - 1)
]
)
return ticks
def view_limits(self, vmin, vmax):
lims = corr_ene_tf(np.array([vmin, vmax]))
rng = lims[1] - lims[0]
lims = np.array([lims[0] - 0.05 * rng, lims[1] + 0.05 * rng])
return tuple(corr_ene_inv_tf(lims))
class CorrelationEnergyFormatter(mpl.ticker.Formatter):
def __call__(self, x, pos=None):
acc = max(0, int(np.round(corr_ene_tf(x))) - 2)
return f'{100 * x:.{acc}f}%'
class CorrelationEnegryScale(mpl.scale.ScaleBase):
name = 'corr_energy'
def __init__(self, axis, subs=10):
self.subs = subs
def get_transform(self):
return CorrelationEnergyTransform()
def set_default_locators_and_formatters(self, axis):
axis.set_major_locator(CorrelationEnergyLocator())
axis.set_minor_locator(CorrelationEnergyLocator(self.subs))
axis.set_major_formatter(CorrelationEnergyFormatter())
def limit_range_for_scale(self, vmin, vmax, minpos):
return min(vmin, 1 - 1e-10), min(vmax, 1 - 1e-10)
mpl.scale.register_scale(CorrelationEnegryScale)
| 27.142857
| 82
| 0.649248
|
890d458c330348b158dace8db272ca96bf6d74d5
| 520
|
py
|
Python
|
scvi/model/__init__.py
|
SarahND97/scvi-tools
|
fbb4acf72b09cef6e4a9465255a7f95caf3f3eb5
|
[
"BSD-3-Clause"
] | null | null | null |
scvi/model/__init__.py
|
SarahND97/scvi-tools
|
fbb4acf72b09cef6e4a9465255a7f95caf3f3eb5
|
[
"BSD-3-Clause"
] | null | null | null |
scvi/model/__init__.py
|
SarahND97/scvi-tools
|
fbb4acf72b09cef6e4a9465255a7f95caf3f3eb5
|
[
"BSD-3-Clause"
] | null | null | null |
from ._amortizedlda import AmortizedLDA
from ._autozi import AUTOZI
from ._condscvi import CondSCVI
from ._destvi import DestVI
from ._hybridvi import HYBRIDVI
from ._linear_scvi import LinearSCVI
from ._multivi import MULTIVI
from ._peakvi import PEAKVI
from ._scanvi import SCANVI
from ._scvi import SCVI
from ._totalvi import TOTALVI
__all__ = [
"SCVI",
"TOTALVI",
"LinearSCVI",
"AUTOZI",
"SCANVI",
"PEAKVI",
"CondSCVI",
"DestVI",
"MULTIVI",
"AmortizedLDA",
"HYBRIDVI"
]
| 19.259259
| 39
| 0.709615
|
ec70417fd415953004a86eb97ef2732c6d1a1ee8
| 466
|
py
|
Python
|
src/computeCentroid.py
|
CANGA/MIRA
|
2f1214d34b884790fa8660b5208cd12495800f92
|
[
"BSD-3-Clause"
] | 2
|
2019-04-23T20:28:50.000Z
|
2021-08-12T15:09:49.000Z
|
src/computeCentroid.py
|
CANGA/Remapping-Intercomparison
|
2f1214d34b884790fa8660b5208cd12495800f92
|
[
"BSD-3-Clause"
] | 10
|
2020-03-18T17:08:39.000Z
|
2021-08-15T21:09:25.000Z
|
src/computeCentroid.py
|
CANGA/Remapping-Intercomparison
|
2f1214d34b884790fa8660b5208cd12495800f92
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 25 16:28:19 2019
@author: TempestGuerra
"""
import numpy as np
def computeCentroid(NP, cell):
# Centroid by averaging corner sphere-center vectors
centroid = np.mat([0.0, 0.0, 0.0])
for pp in range(NP):
centroid += cell[:, pp]
centroid *= 1.0 / NP
# Renormalize the centroid vector
RO = np.linalg.norm(centroid)
centroid *= 1.0 / RO
return centroid
| 18.64
| 56
| 0.622318
|
ad176f8936e9c20e41422f4eeb588e62d513965d
| 4,542
|
py
|
Python
|
grand/backends/backend.py
|
aplbrain/grand
|
d85669df17a40834a13478ae200e984e13b41650
|
[
"Apache-2.0"
] | 31
|
2020-10-16T16:46:02.000Z
|
2022-03-04T20:45:05.000Z
|
grand/backends/backend.py
|
aplbrain/grand
|
d85669df17a40834a13478ae200e984e13b41650
|
[
"Apache-2.0"
] | 15
|
2020-10-15T16:28:49.000Z
|
2022-02-10T16:41:32.000Z
|
grand/backends/backend.py
|
aplbrain/grand
|
d85669df17a40834a13478ae200e984e13b41650
|
[
"Apache-2.0"
] | null | null | null |
from typing import Hashable, Iterable
import abc
import pandas as pd
class Backend(abc.ABC):
"""
Abstract base class for the management of persisted graph structure.
Do not use this class directly.
"""
def __init__(self, directed: bool = False):
"""
Create a new Backend instance.
Arguments:
directed (bool: False): Whether to make the backend graph directed
Returns:
None
"""
...
def ingest_from_edgelist_dataframe(
self, edgelist: pd.DataFrame, source_column: str, target_column: str
) -> None:
"""
Ingest an edgelist from a Pandas DataFrame.
"""
...
def is_directed(self) -> bool:
"""
Return True if the backend graph is directed.
Arguments:
None
Returns:
bool: True if the backend graph is directed.
"""
...
def add_node(self, node_name: Hashable, metadata: dict):
"""
Add a new node to the graph.
Arguments:
node_name (Hashable): The ID of the node
metadata (dict: None): An optional dictionary of metadata
Returns:
Hashable: The ID of this node, as inserted
"""
...
def get_node_by_id(self, node_name: Hashable):
"""
Return the data associated with a node.
Arguments:
node_name (Hashable): The node ID to look up
Returns:
dict: The metadata associated with this node
"""
...
def all_nodes_as_iterable(self, include_metadata: bool = False) -> Iterable:
"""
Get a generator of all of the nodes in this graph.
Arguments:
include_metadata (bool: False): Whether to include node metadata in
the response
Returns:
Generator: A generator of all nodes (arbitrary sort)
"""
...
def has_node(self, u: Hashable) -> bool:
"""
Return true if the node exists in the graph.
Arguments:
u (Hashable): The ID of the node to check
Returns:
bool: True if the node exists
"""
...
def add_edge(self, u: Hashable, v: Hashable, metadata: dict):
"""
Add a new edge to the graph between two nodes.
If the graph is directed, this edge will start (source) at the `u` node
and end (target) at the `v` node.
Arguments:
u (Hashable): The source node ID
v (Hashable): The target node ID
metadata (dict): Optional metadata to associate with the edge
Returns:
Hashable: The edge ID, as inserted.
"""
...
def all_edges_as_iterable(self, include_metadata: bool = False) -> Iterable:
"""
Get a list of all edges in this graph, arbitrary sort.
Arguments:
include_metadata (bool: False): Whether to include edge metadata
Returns:
Generator: A generator of all edges (arbitrary sort)
"""
...
def get_edge_by_id(self, u: Hashable, v: Hashable):
"""
Get an edge by its source and target IDs.
Arguments:
u (Hashable): The source node ID
v (Hashable): The target node ID
Returns:
dict: Metadata associated with this edge
"""
...
def get_node_successors(
self, u: Hashable, include_metadata: bool = False
) -> Iterable:
return self.get_node_neighbors(u, include_metadata)
def get_node_neighbors(
self, u: Hashable, include_metadata: bool = False
) -> Iterable:
"""
Get a generator of all downstream nodes from this node.
Arguments:
u (Hashable): The source node ID
Returns:
Generator
"""
...
def get_node_predecessors(
self, u: Hashable, include_metadata: bool = False
) -> Iterable:
"""
Get a generator of all upstream nodes from this node.
Arguments:
u (Hashable): The source node ID
Returns:
Generator
"""
...
def get_node_count(self) -> Iterable:
"""
Get an integer count of the number of nodes in this graph.
Arguments:
None
Returns:
int: The count of nodes
"""
return len([i for i in self.all_nodes_as_iterable()])
| 23.292308
| 80
| 0.551299
|
f899e885a5bd42f51ccaf91875bccedc713efb56
| 1,181
|
py
|
Python
|
setup.py
|
teresam856/jbrowse-jupyter
|
977038e160cb6cb876aa6eb4467ed199c40c8807
|
[
"Apache-2.0"
] | 2
|
2021-11-10T23:07:51.000Z
|
2022-01-26T09:14:33.000Z
|
setup.py
|
teresam856/jbrowse-jupyter
|
977038e160cb6cb876aa6eb4467ed199c40c8807
|
[
"Apache-2.0"
] | 10
|
2021-11-08T22:28:01.000Z
|
2021-12-07T08:09:13.000Z
|
setup.py
|
teresam856/jbrowse-jupyter
|
977038e160cb6cb876aa6eb4467ed199c40c8807
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import setuptools
with open('requirements.txt') as f:
requires = f.read().splitlines()
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
# TODO: figure out install requires vs requirements.txt and look up classifiers
setuptools.setup(
name="jbrowse-jupyter",
version="0.0.2",
author="Teresa De Jesus Martinez",
author_email="tere486martinez@gmail.com",
maintainer="Teresa De Jesus Martinez; JBrowse Team",
maintainer_email="tere486martinez@gmail.com",
description="Jupyter interface to the JBrowse's Linear Genome View",
license="Apache-2.0",
include_package_data=True,
long_description=long_description,
install_requires=requires,
long_description_content_type="text/markdown",
url="https://github.com/teresam856/jbrowse-jupyter",
project_urls={
"Bug Tracker": "https://github.com/teresam856/jbrowse-jupyter/issues",
},
packages=['jbrowse_jupyter'],
python_requires=">=3.8",
classifiers=[
"Development Status :: 1 - Planning",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering",
],
)
| 32.805556
| 79
| 0.690093
|
280648b2a97664e8026a1eaefd6f52e3dcf1bd56
| 5,415
|
py
|
Python
|
data/p3BR/R2/benchmark/startQiskit_noisy64.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R2/benchmark/startQiskit_noisy64.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R2/benchmark/startQiskit_noisy64.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=10
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.cx(input_qubit[0],input_qubit[2]) # number=7
prog.x(input_qubit[2]) # number=8
prog.cx(input_qubit[0],input_qubit[2]) # number=9
prog.cx(input_qubit[2],input_qubit[1]) # number=6
prog.cx(input_qubit[2],input_qubit[1]) # number=4
prog.z(input_qubit[2]) # number=3
prog.y(input_qubit[2]) # number=5
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_noisy64.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 29.112903
| 140
| 0.628624
|
64b03d7997f13938552987c62ce5a666a2801b1f
| 167
|
py
|
Python
|
test/python/input_readlines/wsgi.py
|
afxcn/unit
|
a336928e1027af92d0c9bb2ccb369a3f9b53abae
|
[
"Apache-2.0"
] | 2,633
|
2017-09-06T16:10:12.000Z
|
2022-03-24T07:18:45.000Z
|
test/python/input_readlines/wsgi.py
|
afxcn/unit
|
a336928e1027af92d0c9bb2ccb369a3f9b53abae
|
[
"Apache-2.0"
] | 637
|
2017-09-06T23:43:11.000Z
|
2022-03-31T19:28:46.000Z
|
test/python/input_readlines/wsgi.py
|
afxcn/unit
|
a336928e1027af92d0c9bb2ccb369a3f9b53abae
|
[
"Apache-2.0"
] | 365
|
2017-09-06T22:39:55.000Z
|
2022-03-29T13:06:38.000Z
|
def application(environ, start_response):
body = environ['wsgi.input'].readlines()
start_response('200', [('X-Lines-Count', str(len(body)))])
return body
| 27.833333
| 62
| 0.670659
|
255da472decf04a2e37315aca01e6a74140fae99
| 647
|
py
|
Python
|
custom_metrics.py
|
ivanlen/time_series_classification_and_ensembles
|
0a1d814927257a8c60fba332e7be339d46c40e00
|
[
"MIT"
] | 19
|
2018-12-05T20:34:27.000Z
|
2022-03-26T09:28:36.000Z
|
custom_metrics.py
|
ivanlen/time_series_classification_and_ensembles
|
0a1d814927257a8c60fba332e7be339d46c40e00
|
[
"MIT"
] | null | null | null |
custom_metrics.py
|
ivanlen/time_series_classification_and_ensembles
|
0a1d814927257a8c60fba332e7be339d46c40e00
|
[
"MIT"
] | 12
|
2019-01-21T07:33:04.000Z
|
2021-11-28T21:14:35.000Z
|
import tensorflow as tf
def as_keras_metric(method):
import functools
from keras import backend as K
import tensorflow as tf
@functools.wraps(method)
def wrapper(self, args, **kwargs):
""" Wrapper for turning tensorflow metrics into keras metrics """
value, update_op = method(self, args, **kwargs)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([update_op]):
value = tf.identity(value)
return value
return wrapper
def auc_roc():
return as_keras_metric(tf.metrics.auc)
def recall():
return as_keras_metric(tf.metrics.recall)
| 30.809524
| 73
| 0.684699
|
c1c61db90c59e1732d29c608351ecc801bc95791
| 1,702
|
py
|
Python
|
ooobuild/lo/ucb/x_property_set_registry_factory.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/ucb/x_property_set_registry_factory.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/ucb/x_property_set_registry_factory.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.ucb
import typing
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
if typing.TYPE_CHECKING:
from .x_property_set_registry import XPropertySetRegistry as XPropertySetRegistry_c2e0e84
class XPropertySetRegistryFactory(XInterface_8f010a43):
"""
A factory for property set registries.
See Also:
`API XPropertySetRegistryFactory <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1ucb_1_1XPropertySetRegistryFactory.html>`_
"""
__ooo_ns__: str = 'com.sun.star.ucb'
__ooo_full_ns__: str = 'com.sun.star.ucb.XPropertySetRegistryFactory'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.ucb.XPropertySetRegistryFactory'
@abstractmethod
def createPropertySetRegistry(self, URL: str) -> 'XPropertySetRegistry_c2e0e84':
"""
creates a property set registry.
"""
__all__ = ['XPropertySetRegistryFactory']
| 36.212766
| 156
| 0.758519
|
2f33e381b3a5049bc342f8772b167fe36bf9654f
| 998
|
gyp
|
Python
|
gpu/command_buffer/command_buffer_nacl.gyp
|
iplo/Chain
|
8bc8943d66285d5258fffc41bed7c840516c4422
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 231
|
2015-01-08T09:04:44.000Z
|
2021-12-30T03:03:10.000Z
|
gpu/command_buffer/command_buffer_nacl.gyp
|
JasonEric/chromium
|
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2017-02-14T21:55:58.000Z
|
2017-02-14T21:55:58.000Z
|
gpu/command_buffer/command_buffer_nacl.gyp
|
JasonEric/chromium
|
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 268
|
2015-01-21T05:53:28.000Z
|
2022-03-25T22:09:01.000Z
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'includes': [
'../../build/common_untrusted.gypi',
'command_buffer.gypi',
],
'conditions': [
['disable_nacl==0 and disable_nacl_untrusted==0', {
'targets': [
{
'target_name': 'gles2_utils_nacl',
'type': 'none',
'variables': {
'gles2_utils_target': 1,
'nacl_untrusted_build': 1,
'nlib_target': 'libgles2_utils_nacl.a',
'build_glibc': 0,
'build_newlib': 0,
'build_irt': 1,
},
'dependencies': [
'../../native_client/tools.gyp:prep_toolchain',
'../../base/base_untrusted.gyp:base_untrusted',
'../../third_party/khronos/khronos.gyp:khronos_headers',
],
},
],
}],
],
}
| 26.972973
| 72
| 0.532064
|
b495b6699b5d02ca8c466c984820be5c497d626e
| 679
|
py
|
Python
|
python/paddle/fluid/trainer.py
|
xuezhong/Paddle
|
be9ec5208160bfed02e767bdb23db5aba9cf5eb0
|
[
"Apache-2.0"
] | 2
|
2019-04-03T05:36:17.000Z
|
2020-04-29T03:38:54.000Z
|
python/paddle/fluid/trainer.py
|
xuezhong/Paddle
|
be9ec5208160bfed02e767bdb23db5aba9cf5eb0
|
[
"Apache-2.0"
] | 1
|
2016-12-22T10:52:40.000Z
|
2016-12-22T13:28:20.000Z
|
python/paddle/fluid/trainer.py
|
xuezhong/Paddle
|
be9ec5208160bfed02e767bdb23db5aba9cf5eb0
|
[
"Apache-2.0"
] | 3
|
2019-01-07T06:50:29.000Z
|
2019-03-13T08:48:23.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: Trainer is moved into fluid.contrib.trainer.
__all__ = []
| 39.941176
| 74
| 0.756996
|
2ba293c5dfc3783f449b8bc6e0b060a90a4d0c3e
| 1,708
|
py
|
Python
|
examples/demo_meta_codepy.py
|
developmentseed/pyopencl
|
a36176cda33d125fe9cfb2b3221cbdee4cd81b03
|
[
"Apache-2.0"
] | null | null | null |
examples/demo_meta_codepy.py
|
developmentseed/pyopencl
|
a36176cda33d125fe9cfb2b3221cbdee4cd81b03
|
[
"Apache-2.0"
] | null | null | null |
examples/demo_meta_codepy.py
|
developmentseed/pyopencl
|
a36176cda33d125fe9cfb2b3221cbdee4cd81b03
|
[
"Apache-2.0"
] | null | null | null |
import pyopencl as cl
import numpy
import numpy.linalg as la
local_size = 256
thread_strides = 32
macroblock_count = 33
dtype = numpy.float32
total_size = local_size*thread_strides*macroblock_count
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
a = numpy.random.randn(total_size).astype(dtype)
b = numpy.random.randn(total_size).astype(dtype)
mf = cl.mem_flags
a_buf = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a)
b_buf = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=b)
c_buf = cl.Buffer(ctx, mf.WRITE_ONLY, b.nbytes)
from cgen import FunctionBody, \
FunctionDeclaration, Typedef, POD, Value, \
Pointer, Module, Block, Initializer, Assign, Const
from cgen.opencl import CLKernel, CLGlobal, \
CLRequiredWorkGroupSize
mod = Module([
FunctionBody(
CLKernel(CLRequiredWorkGroupSize((local_size,),
FunctionDeclaration(
Value("void", "add"),
arg_decls=[CLGlobal(Pointer(Const(POD(dtype, name))))
for name in ["tgt", "op1", "op2"]]))),
Block([
Initializer(POD(numpy.int32, "idx"),
"get_local_id(0) + %d * get_group_id(0)"
% (local_size*thread_strides))
]+[
Assign(
"tgt[idx+%d]" % (o*local_size),
"op1[idx+%d] + op2[idx+%d]" % (
o*local_size,
o*local_size))
for o in range(thread_strides)]))])
knl = cl.Program(ctx, str(mod)).build().add
knl(queue, (local_size*macroblock_count,), (local_size,),
c_buf, a_buf, b_buf)
c = numpy.empty_like(a)
cl.enqueue_copy(queue, c, c_buf).wait()
assert la.norm(c-(a+b)) == 0
| 29.964912
| 66
| 0.622365
|
cd642e316f035785758dd38ced43b59535bda9fe
| 398
|
py
|
Python
|
codango/userprofile/migrations/0003_auto_20151118_1454.py
|
andela-ooshodi/codango-debug
|
fa68f4305586c2d7f28307f10204c3b50f731fef
|
[
"MIT"
] | null | null | null |
codango/userprofile/migrations/0003_auto_20151118_1454.py
|
andela-ooshodi/codango-debug
|
fa68f4305586c2d7f28307f10204c3b50f731fef
|
[
"MIT"
] | null | null | null |
codango/userprofile/migrations/0003_auto_20151118_1454.py
|
andela-ooshodi/codango-debug
|
fa68f4305586c2d7f28307f10204c3b50f731fef
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0002_auto_20151118_1451'),
]
operations = [
migrations.RenameField(
model_name='userprofile',
old_name='fb_id',
new_name='social_id',
),
]
| 19.9
| 51
| 0.605528
|
9fbce803ce7c6a336c7f168b5116921d65f42ad3
| 2,557
|
py
|
Python
|
tests/test_parser.py
|
ninoNinkovic/python-edl
|
c7f5cbb524194a070d892137a46902f7a89a930a
|
[
"MIT"
] | 3
|
2018-02-16T13:10:31.000Z
|
2021-03-09T15:51:19.000Z
|
tests/test_parser.py
|
ninoNinkovic/python-edl
|
c7f5cbb524194a070d892137a46902f7a89a930a
|
[
"MIT"
] | null | null | null |
tests/test_parser.py
|
ninoNinkovic/python-edl
|
c7f5cbb524194a070d892137a46902f7a89a930a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""
Module EDL
unit test suite
"""
import unittest
from edl import Parser
class ParserTestCase(unittest.TestCase):
"""tests the Parser
"""
def runTest(self):
print "Running Parser Tests"
self.test_pal()
self.test_ntsc()
self.test_24fps()
self.test_2398fps()
def test_24fps(self):
p = Parser('24')
with open('tests/test_data/test_24.edl') as f:
s = p.parse(f)
self.assertEqual(s.events[0].clip_name, 'clip 1',
'Failed clip name test')
self.assertEqual(s.events[0].src_length(), 1440,
'Wrong source frame length')
self.assertEqual(s.events[0].rec_length(), 1440,
'Wrong record frame length')
self.assertEqual(s.events[0].src_end_tc.frame_number, 87840,
'Wrong source end timecode')
self.assertEqual(s.events[0].rec_start_tc.frame_number, 0,
'Wrong record start timecode')
self.assertEqual(s.events[0].rec_end_tc.frame_number, 1440,
'Wrong record end timecode')
self.assertEqual(s.events[1].clip_name, 'clip #2',
'Failed clip name test char 2')
self.assertEqual(s.events[2].clip_name, 'clip -3',
'Failed clip name test char 3')
self.assertEqual(s.events[3].clip_name, 'clip $4',
'Failed clip name test char 4')
self.assertEqual(s.events[4].clip_name, 'clip &5',
'Failed clip name test char 5')
self.assertEqual(s.events[5].src_start_tc.frame_number, 697,
"Wrong Source start complex event")
self.assertEqual(s.events[5].src_end_tc.frame_number, 697,
"Wrong Source end complex event")
self.assertEqual(s.events[5].rec_start_tc.frame_number, 2857,
"Wrong Source start complex event")
self.assertEqual(s.events[5].rec_end_tc.frame_number, 2857,
"Wrong Source end complex event")
def test_pal(self):
p = Parser('25')
with open('tests/test_data/test_25.edl') as f:
s = p.parse(f)
def test_ntsc(self):
p = Parser('29.97')
with open('tests/test_data/test_2997NDF.edl') as f:
s = p.parse(f)
def test_2398fps(self):
p = Parser('23.98')
with open('tests/test_data/test_2398.edl') as f:
s = p.parse(f)
| 36.528571
| 69
| 0.558467
|
77fafa8a5f21ae33c7c956b171e9430478fe08bd
| 14,711
|
py
|
Python
|
mmseg/models/decode_heads/point_head.py
|
shuaizzZ/mmsegmentation
|
a6c6b348dbf8c4a0a39ffbdb832a1e82309c533c
|
[
"Apache-2.0"
] | null | null | null |
mmseg/models/decode_heads/point_head.py
|
shuaizzZ/mmsegmentation
|
a6c6b348dbf8c4a0a39ffbdb832a1e82309c533c
|
[
"Apache-2.0"
] | null | null | null |
mmseg/models/decode_heads/point_head.py
|
shuaizzZ/mmsegmentation
|
a6c6b348dbf8c4a0a39ffbdb832a1e82309c533c
|
[
"Apache-2.0"
] | null | null | null |
# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, normal_init
# TODO win10 not support, 2020.11.26
from mmcv.ops import point_sample
from mmseg.models.builder import HEADS
from mmseg.ops import resize
from ..losses import accuracy
from .cascade_decode_head import BaseCascadeDecodeHead
def calculate_uncertainty(seg_logits):
"""Estimate uncertainty based on seg logits.
For each location of the prediction ``seg_logits`` we estimate
uncertainty as the difference between top first and top second
predicted logits.
Args:
seg_logits (Tensor): Semantic segmentation logits,
shape (batch_size, num_classes, height, width).
Returns:
scores (Tensor): T uncertainty scores with the most uncertain
locations having the highest uncertainty score, shape (
batch_size, 1, height, width)
"""
top2_scores = torch.topk(seg_logits, k=2, dim=1)[0]
return (top2_scores[:, 1] - top2_scores[:, 0]).unsqueeze(1)
@HEADS.register_module()
class PointHead(BaseCascadeDecodeHead):
"""A mask point head use in PointRend.
``PointHead`` use shared multi-layer perceptron (equivalent to
nn.Conv1d) to predict the logit of input points. The fine-grained feature
and coarse feature will be concatenate together for predication.
Args:
num_fcs (int): Number of fc layers in the head. Default: 3.
in_channels (int): Number of input channels. Default: 256.
fc_channels (int): Number of fc channels. Default: 256.
num_classes (int): Number of classes for logits. Default: 80.
class_agnostic (bool): Whether use class agnostic classification.
If so, the output channels of logits will be 1. Default: False.
coarse_pred_each_layer (bool): Whether concatenate coarse feature with
the output of each fc layer. Default: True.
conv_cfg (dict|None): Dictionary to construct and config conv layer.
Default: dict(type='Conv1d'))
norm_cfg (dict|None): Dictionary to construct and config norm layer.
Default: None.
loss_point (dict): Dictionary to construct and config loss layer of
point head. Default: dict(type='CrossEntropyLoss', use_mask=True,
loss_weight=1.0).
"""
def __init__(self,
num_fcs=3,
coarse_pred_each_layer=True,
conv_cfg=dict(type='Conv1d'),
norm_cfg=None,
act_cfg=dict(type='ReLU', inplace=False),
**kwargs):
super(PointHead, self).__init__(
input_transform='multiple_select',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
**kwargs)
self.num_fcs = num_fcs
self.coarse_pred_each_layer = coarse_pred_each_layer
fc_in_channels = sum(self.in_channels) + self.num_classes
fc_channels = self.channels
self.fcs = nn.ModuleList()
for k in range(num_fcs):
fc = ConvModule(
fc_in_channels,
fc_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.fcs.append(fc)
fc_in_channels = fc_channels
fc_in_channels += self.num_classes if self.coarse_pred_each_layer \
else 0
self.fc_seg = nn.Conv1d(
fc_in_channels,
self.num_classes,
kernel_size=1,
stride=1,
padding=0)
if self.dropout_ratio > 0:
self.dropout = nn.Dropout(self.dropout_ratio)
delattr(self, 'conv_seg')
def init_weights(self):
"""Initialize weights of classification layer."""
normal_init(self.fc_seg, std=0.001)
def cls_seg(self, feat):
"""Classify each pixel with fc."""
if self.dropout is not None:
feat = self.dropout(feat)
output = self.fc_seg(feat)
return output
def forward(self, fine_grained_point_feats, coarse_point_feats):
x = torch.cat([fine_grained_point_feats, coarse_point_feats], dim=1)
for fc in self.fcs:
x = fc(x)
if self.coarse_pred_each_layer:
x = torch.cat((x, coarse_point_feats), dim=1)
return self.cls_seg(x)
def _get_fine_grained_point_feats(self, x, points):
"""Sample from fine grained features.
Args:
x (list[Tensor]): Feature pyramid from by neck or backbone.
points (Tensor): Point coordinates, shape (batch_size,
num_points, 2).
Returns:
fine_grained_feats (Tensor): Sampled fine grained feature,
shape (batch_size, sum(channels of x), num_points).
"""
fine_grained_feats_list = [
point_sample(_, points, align_corners=self.align_corners)
for _ in x
]
if len(fine_grained_feats_list) > 1:
fine_grained_feats = torch.cat(fine_grained_feats_list, dim=1)
else:
fine_grained_feats = fine_grained_feats_list[0]
return fine_grained_feats
def _get_coarse_point_feats(self, prev_output, points):
"""Sample from fine grained features.
Args:
prev_output (list[Tensor]): Prediction of previous decode head.
points (Tensor): Point coordinates, shape (batch_size,
num_points, 2).
Returns:
coarse_feats (Tensor): Sampled coarse feature, shape (batch_size,
num_classes, num_points).
"""
coarse_feats = point_sample(
prev_output, points, align_corners=self.align_corners)
return coarse_feats
def forward_train(self, inputs, prev_output, img_metas, gt_semantic_seg,
train_cfg):
"""Forward function for training.
Args:
inputs (list[Tensor]): List of multi-level img features.
prev_output (Tensor): The output of previous decode head.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
train_cfg (dict): The training config.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
x = self._transform_inputs(inputs)
with torch.no_grad():
points = self.get_points_train(
prev_output, calculate_uncertainty, cfg=train_cfg)
fine_grained_point_feats = self._get_fine_grained_point_feats(
x, points)
coarse_point_feats = self._get_coarse_point_feats(prev_output, points)
point_logits = self.forward(fine_grained_point_feats,
coarse_point_feats)
point_label = point_sample(
gt_semantic_seg.float(),
points,
mode='nearest',
align_corners=self.align_corners)
point_label = point_label.squeeze(1).long()
losses = self.losses(point_logits, point_label)
return losses
def forward_test(self, inputs, prev_output, img_metas, test_cfg):
"""Forward function for testing.
Args:
inputs (list[Tensor]): List of multi-level img features.
prev_output (Tensor): The output of previous decode head.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
test_cfg (dict): The testing config.
Returns:
Tensor: Output segmentation map.
"""
x = self._transform_inputs(inputs)
refined_seg_logits = prev_output.clone()
for _ in range(test_cfg.subdivision_steps):
refined_seg_logits = resize(
refined_seg_logits,
scale_factor=test_cfg.scale_factor,
mode='bilinear',
align_corners=self.align_corners)
batch_size, channels, height, width = refined_seg_logits.shape
point_indices, points = self.get_points_test(
refined_seg_logits, calculate_uncertainty, cfg=test_cfg)
fine_grained_point_feats = self._get_fine_grained_point_feats(
x, points)
coarse_point_feats = self._get_coarse_point_feats(
prev_output, points)
point_logits = self.forward(fine_grained_point_feats,
coarse_point_feats)
point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1)
refined_seg_logits = refined_seg_logits.reshape(
batch_size, channels, height * width)
refined_seg_logits = refined_seg_logits.scatter_(
2, point_indices, point_logits)
refined_seg_logits = refined_seg_logits.view(
batch_size, channels, height, width)
return refined_seg_logits
def losses(self, point_logits, point_label):
"""Compute segmentation loss."""
loss = dict()
loss['loss_point'] = self.loss_decode(
point_logits, point_label, ignore_index=self.ignore_index)
loss['acc_point'] = accuracy(point_logits, point_label)
return loss
def get_points_train(self, seg_logits, uncertainty_func, cfg):
"""Sample points for training.
Sample points in [0, 1] x [0, 1] coordinate space based on their
uncertainty. The uncertainties are calculated for each point using
'uncertainty_func' function that takes point's logit prediction as
input.
Args:
seg_logits (Tensor): Semantic segmentation logits, shape (
batch_size, num_classes, height, width).
uncertainty_func (func): uncertainty calculation function.
cfg (dict): Training config of point head.
Returns:
point_coords (Tensor): A tensor of shape (batch_size, num_points,
2) that contains the coordinates of ``num_points`` sampled
points.
"""
num_points = cfg.num_points
oversample_ratio = cfg.oversample_ratio
importance_sample_ratio = cfg.importance_sample_ratio
assert oversample_ratio >= 1
assert 0 <= importance_sample_ratio <= 1
batch_size = seg_logits.shape[0]
num_sampled = int(num_points * oversample_ratio)
point_coords = torch.rand(
batch_size, num_sampled, 2, device=seg_logits.device)
point_logits = point_sample(seg_logits, point_coords)
# It is crucial to calculate uncertainty based on the sampled
# prediction value for the points. Calculating uncertainties of the
# coarse predictions first and sampling them for points leads to
# incorrect results. To illustrate this: assume uncertainty func(
# logits)=-abs(logits), a sampled point between two coarse
# predictions with -1 and 1 logits has 0 logits, and therefore 0
# uncertainty value. However, if we calculate uncertainties for the
# coarse predictions first, both will have -1 uncertainty,
# and sampled point will get -1 uncertainty.
point_uncertainties = uncertainty_func(point_logits)
num_uncertain_points = int(importance_sample_ratio * num_points)
num_random_points = num_points - num_uncertain_points
idx = torch.topk(
point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
shift = num_sampled * torch.arange(
batch_size, dtype=torch.long, device=seg_logits.device)
idx += shift[:, None]
point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
batch_size, num_uncertain_points, 2)
if num_random_points > 0:
rand_point_coords = torch.rand(
batch_size, num_random_points, 2, device=seg_logits.device)
point_coords = torch.cat((point_coords, rand_point_coords), dim=1)
return point_coords
def get_points_test(self, seg_logits, uncertainty_func, cfg):
"""Sample points for testing.
Find ``num_points`` most uncertain points from ``uncertainty_map``.
Args:
seg_logits (Tensor): A tensor of shape (batch_size, num_classes,
height, width) for class-specific or class-agnostic prediction.
uncertainty_func (func): uncertainty calculation function.
cfg (dict): Testing config of point head.
Returns:
point_indices (Tensor): A tensor of shape (batch_size, num_points)
that contains indices from [0, height x width) of the most
uncertain points.
point_coords (Tensor): A tensor of shape (batch_size, num_points,
2) that contains [0, 1] x [0, 1] normalized coordinates of the
most uncertain points from the ``height x width`` grid .
"""
num_points = cfg.subdivision_num_points
uncertainty_map = uncertainty_func(seg_logits)
batch_size, _, height, width = uncertainty_map.shape
h_step = 1.0 / height
w_step = 1.0 / width
uncertainty_map = uncertainty_map.view(batch_size, height * width)
num_points = min(height * width, num_points)
point_indices = uncertainty_map.topk(num_points, dim=1)[1]
point_coords = torch.zeros(
batch_size,
num_points,
2,
dtype=torch.float,
device=seg_logits.device)
point_coords[:, :, 0] = w_step / 2.0 + (point_indices %
width).float() * w_step
point_coords[:, :, 1] = h_step / 2.0 + (point_indices //
width).float() * h_step
return point_indices, point_coords
| 41.911681
| 126
| 0.622935
|
be4484265a8d370098164d367f72475375f5aadb
| 953
|
py
|
Python
|
REST/delete-Interface-IOS_XE-restconf.py
|
AnkitDeshwal89/NETMIKO
|
81c164e9cff46d11b56612f6adc343b6bcdfe87a
|
[
"Apache-2.0"
] | null | null | null |
REST/delete-Interface-IOS_XE-restconf.py
|
AnkitDeshwal89/NETMIKO
|
81c164e9cff46d11b56612f6adc343b6bcdfe87a
|
[
"Apache-2.0"
] | null | null | null |
REST/delete-Interface-IOS_XE-restconf.py
|
AnkitDeshwal89/NETMIKO
|
81c164e9cff46d11b56612f6adc343b6bcdfe87a
|
[
"Apache-2.0"
] | null | null | null |
import requests
from pprint import pprint
from urllib3.exceptions import InsecureRequestWarning
import json
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
#Set up connection parameters
intf = input("Enter the Interface Name :")
router = {
"ip":"192.168.129.139",
"port":443,
"user":"ankit",
"password":"deshwal",
"interf":intf
}
#Set API header By default we get XML , we will set to JSON
headers = {
"Accept": "application/yang-data+json",
"Content-Type":"application/yang-data+json"
}
payload = {}
url = f"https://{router['ip']}/restconf/data/ietf-interfaces:interfaces/interface={router['interf']}"
response = requests.request("DELETE",url,headers=headers,data=payload,auth=(router['user'],router['password']),verify=False)
print(response)
if response.ok:
print("Interface delete sucessfully")
#api_data =response.json()
#pprint(api_data)
| 29.78125
| 124
| 0.693599
|
b20509c492cbc6d0a3b28e1f17d1b64138e7c6a4
| 3,715
|
py
|
Python
|
alfred/nn/enc_lang.py
|
arjunakula/amazon_alfred_latest
|
e50c8572064f597b0a9f3c99ea12af3c52e3f820
|
[
"MIT"
] | 44
|
2021-04-28T08:32:01.000Z
|
2022-03-20T02:35:21.000Z
|
alfred/nn/enc_lang.py
|
arjunakula/amazon_alfred_latest
|
e50c8572064f597b0a9f3c99ea12af3c52e3f820
|
[
"MIT"
] | 6
|
2021-05-15T13:17:14.000Z
|
2021-11-18T01:27:31.000Z
|
alfred/nn/enc_lang.py
|
arjunakula/amazon_alfred_latest
|
e50c8572064f597b0a9f3c99ea12af3c52e3f820
|
[
"MIT"
] | 6
|
2021-06-08T19:01:38.000Z
|
2021-11-10T17:56:28.000Z
|
import os
import torch
import numpy as np
from torch import nn
from torch.nn import functional as F
from torch.nn.utils.rnn import pad_sequence
from alfred.gen import constants
from alfred.nn.encodings import PosLangEncoding, InstrLangEncoding
class EncoderLang(nn.Module):
def __init__(self, num_layers, args, embs_ann,
subgoal_token='<<instr>>', goal_token='<<goal>>'):
'''
transformer encoder for language inputs
'''
super(EncoderLang, self).__init__()
self.subgoal_token = subgoal_token
self.goal_token = goal_token
# transofmer layers
encoder_layer = nn.TransformerEncoderLayer(
args.demb, args.encoder_heads, args.demb,
args.dropout['transformer']['encoder'])
if args.encoder_lang['shared']:
enc_transformer = nn.TransformerEncoder(
encoder_layer, num_layers)
self.enc_transformers = nn.ModuleDict({
data: enc_transformer
for data in embs_ann.keys()})
else:
self.enc_transformers = nn.ModuleDict({
data: nn.TransformerEncoder(
encoder_layer, num_layers)
for data in embs_ann.keys()})
# encodings
self.enc_pos = PosLangEncoding(args.demb) if args.encoder_lang['pos_enc'] else None
self.enc_instr = InstrLangEncoding(args.demb) if args.encoder_lang['instr_enc'] else None
self.enc_layernorm = nn.LayerNorm(args.demb)
self.enc_dropout = nn.Dropout(args.dropout['lang'], inplace=True)
def forward(self, lang_pad, embedder, vocab, pad):
'''
pass embedded inputs through embeddings and encode them using a transformer
'''
# pad the input language sequences and embed them with a linear layer
mask_pad = (lang_pad == pad)
emb_lang = embedder(lang_pad)
# add positional encodings
mask_token = EncoderLang.mask_token(
lang_pad, vocab, {self.subgoal_token, self.goal_token})
emb_lang = self.encode_inputs(emb_lang, mask_token, mask_pad)
# pass the inputs through the encoder
hiddens = EncoderLang.encoder(
self.enc_transformers, emb_lang, mask_pad, vocab)
lengths = (lang_pad != pad).sum(dim=1)
return hiddens, lengths
@staticmethod
def mask_token(lang_pad, vocab, tokens):
'''
returns mask of the tokens
'''
tokens_mask = torch.zeros_like(lang_pad).long()
for token in tokens:
tokens_mask += lang_pad == vocab.word2index(token)
return tokens_mask.bool()
@staticmethod
def encoder(encoders, emb_lang, mask_pad, vocab, mask_attn=None):
'''
compute encodings for all tokens using a normal flat encoder
'''
# skip mask: mask padded words
if mask_attn is None:
# attention mask: all tokens can attend to all others
mask_attn = torch.zeros(
(mask_pad.shape[1], mask_pad.shape[1]), device=mask_pad.device).float()
# encode the inputs
output = encoders[vocab.name](
emb_lang.transpose(0, 1),
mask_attn,
mask_pad).transpose(0, 1)
return output
def encode_inputs(self, emb_lang, mask_token, mask_pad):
'''
add positional encodings, apply layernorm and dropout
'''
emb_lang = self.enc_pos(emb_lang) if self.enc_pos else emb_lang
emb_lang = self.enc_instr(emb_lang, mask_token) if self.enc_instr else emb_lang
emb_lang = self.enc_dropout(emb_lang)
emb_lang = self.enc_layernorm(emb_lang)
return emb_lang
| 38.298969
| 97
| 0.632301
|
85a9995946a3562a57d8cc336ab34a603fd2978a
| 146
|
py
|
Python
|
maskrcnn_benchmark/data/__init__.py
|
meryusha/seeds_faster
|
a80cd144c2826cdee5dd929087005f57567ae367
|
[
"MIT"
] | 1
|
2021-12-06T10:47:31.000Z
|
2021-12-06T10:47:31.000Z
|
maskrcnn_benchmark/data/__init__.py
|
SilvioGiancola/seeds_faster
|
4c6a1f1fa71beff7c9d0722d134eb1291f57983e
|
[
"MIT"
] | null | null | null |
maskrcnn_benchmark/data/__init__.py
|
SilvioGiancola/seeds_faster
|
4c6a1f1fa71beff7c9d0722d134eb1291f57983e
|
[
"MIT"
] | 1
|
2019-07-18T13:57:07.000Z
|
2019-07-18T13:57:07.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .build import make_data_loader
from .build import make_data_loader_AL
| 48.666667
| 71
| 0.815068
|
4e56115382190d567432d61e445964b433d04681
| 3,020
|
py
|
Python
|
tests/test_sequential.py
|
ethereon/merlin
|
0babfed51e65197086d74479a1ca9150259b4f7f
|
[
"BSD-3-Clause"
] | 1
|
2019-08-15T16:22:20.000Z
|
2019-08-15T16:22:20.000Z
|
tests/test_sequential.py
|
ethereon/merlin
|
0babfed51e65197086d74479a1ca9150259b4f7f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_sequential.py
|
ethereon/merlin
|
0babfed51e65197086d74479a1ca9150259b4f7f
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import tensorflow as tf
from merlin.modules.module import Module
from merlin.modules.util import Sequential
from merlin.util.testing import private_context
class ConstantAdder(Module):
def __init__(self, value, name=None):
super().__init__(name=name)
self.value = value
self.variable = None
def compute(self, input):
if self.variable is None:
# A contrived variable to test scoping
self.variable = tf.Variable(self.value, name='value')
return self.variable + input
def test_basic():
with Sequential(name='composite') as seq:
seq += ConstantAdder(value=42.)
seq += ConstantAdder(value=16.)
seq += [ConstantAdder(value=1.), ConstantAdder(value=2.)]
assert seq(12.).numpy() == 73.
@private_context
def test_scoping():
with Sequential(name='alpha') as seq:
adder = ConstantAdder(value=42., name='beta')
seq += adder
seq(0.)
assert adder.variable.name == 'alpha/beta/value:0'
@private_context
def test_scoping_explicit():
with Sequential(scoped=True) as seq:
adder = ConstantAdder(value=42., name='beta')
seq += adder
seq(0.)
assert adder.variable.name == 'sequential/beta/value:0'
@private_context
def test_no_scoping():
adder = ConstantAdder(value=42., name='beta')
seq = Sequential([adder])
seq(0.)
assert adder.variable.name == 'beta/value:0'
@private_context
def test_no_scoping_guard():
with pytest.raises(RuntimeError):
with Sequential() as seq:
seq += ConstantAdder(value=42., name='beta')
@private_context
def test_accessors():
alpha = ConstantAdder(value=42., name='alpha')
beta = ConstantAdder(value=16., name='beta')
seq = Sequential([alpha, beta])
assert len(seq) == 2
assert seq[0] is alpha
assert seq[1] is beta
assert seq['alpha'] is alpha
assert seq['beta'] is beta
assert seq / 'alpha' is alpha
assert seq / 'beta' is beta
assert seq.alpha is alpha
assert seq.beta is beta
def test_iteration():
alpha = ConstantAdder(value=42.)
beta = ConstantAdder(value=16.)
seq = Sequential([alpha, beta])
assert list(seq) == [alpha, beta]
def test_add_flattened():
a = ConstantAdder(value=1.)
b = ConstantAdder(value=2.)
c = ConstantAdder(value=3.)
d = ConstantAdder(value=4.)
e = ConstantAdder(value=5.)
f = ConstantAdder(value=6.)
seq = Sequential()
seq.add_flattened([a, [b, [c, d], e], f])
assert list(seq) == [a, b, c, d, e, f]
@private_context
def test_nested_access():
with Sequential(name='alpha') as alpha:
with Sequential(name='beta') as beta:
with Sequential(name='gamma') as gamma:
leaf = ConstantAdder(value=42., name='leaf')
gamma += leaf
beta += gamma
alpha += beta
alpha(0.)
assert alpha['beta/gamma/leaf'] == leaf
assert leaf.variable.name == 'alpha/beta/gamma/leaf/value:0'
| 26.491228
| 65
| 0.636093
|
54fdf15515eaa75f85d5b36df61b5c9cf0ed709f
| 7,094
|
py
|
Python
|
venv/lib/python3.8/site-packages/tensorflow/_api/v2/compat/v1/nn/__init__.py
|
JIANG-CX/data_labeling
|
8d2470bbb537dfc09ed2f7027ed8ee7de6447248
|
[
"MIT"
] | 1
|
2021-05-24T10:08:51.000Z
|
2021-05-24T10:08:51.000Z
|
venv/lib/python3.8/site-packages/tensorflow/_api/v2/compat/v1/nn/__init__.py
|
JIANG-CX/data_labeling
|
8d2470bbb537dfc09ed2f7027ed8ee7de6447248
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/tensorflow/_api/v2/compat/v1/nn/__init__.py
|
JIANG-CX/data_labeling
|
8d2470bbb537dfc09ed2f7027ed8ee7de6447248
|
[
"MIT"
] | null | null | null |
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Wrappers for primitive Neural Net (NN) Operations.
"""
from __future__ import print_function as _print_function
import sys as _sys
from . import rnn_cell
from tensorflow.python.ops.array_ops import depth_to_space
from tensorflow.python.ops.array_ops import space_to_batch
from tensorflow.python.ops.array_ops import space_to_depth
from tensorflow.python.ops.candidate_sampling_ops import all_candidate_sampler
from tensorflow.python.ops.candidate_sampling_ops import compute_accidental_hits
from tensorflow.python.ops.candidate_sampling_ops import fixed_unigram_candidate_sampler
from tensorflow.python.ops.candidate_sampling_ops import learned_unigram_candidate_sampler
from tensorflow.python.ops.candidate_sampling_ops import log_uniform_candidate_sampler
from tensorflow.python.ops.candidate_sampling_ops import uniform_candidate_sampler
from tensorflow.python.ops.ctc_ops import collapse_repeated
from tensorflow.python.ops.ctc_ops import ctc_beam_search_decoder
from tensorflow.python.ops.ctc_ops import ctc_beam_search_decoder_v2
from tensorflow.python.ops.ctc_ops import ctc_greedy_decoder
from tensorflow.python.ops.ctc_ops import ctc_loss
from tensorflow.python.ops.ctc_ops import ctc_loss_v2
from tensorflow.python.ops.ctc_ops import ctc_unique_labels
from tensorflow.python.ops.embedding_ops import embedding_lookup
from tensorflow.python.ops.embedding_ops import embedding_lookup_sparse
from tensorflow.python.ops.embedding_ops import safe_embedding_lookup_sparse
from tensorflow.python.ops.gen_math_ops import tanh
from tensorflow.python.ops.gen_nn_ops import conv3d_backprop_filter_v2
from tensorflow.python.ops.gen_nn_ops import conv3d_backprop_filter_v2 as conv3d_backprop_filter
from tensorflow.python.ops.gen_nn_ops import elu
from tensorflow.python.ops.gen_nn_ops import l2_loss
from tensorflow.python.ops.gen_nn_ops import lrn
from tensorflow.python.ops.gen_nn_ops import lrn as local_response_normalization
from tensorflow.python.ops.gen_nn_ops import quantized_avg_pool
from tensorflow.python.ops.gen_nn_ops import quantized_conv2d
from tensorflow.python.ops.gen_nn_ops import quantized_max_pool
from tensorflow.python.ops.gen_nn_ops import quantized_relu_x
from tensorflow.python.ops.gen_nn_ops import relu
from tensorflow.python.ops.gen_nn_ops import selu
from tensorflow.python.ops.gen_nn_ops import softsign
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import softplus
from tensorflow.python.ops.nn_impl import batch_norm_with_global_normalization
from tensorflow.python.ops.nn_impl import batch_normalization
from tensorflow.python.ops.nn_impl import compute_average_loss
from tensorflow.python.ops.nn_impl import depthwise_conv2d
from tensorflow.python.ops.nn_impl import fused_batch_norm
from tensorflow.python.ops.nn_impl import l2_normalize
from tensorflow.python.ops.nn_impl import log_poisson_loss
from tensorflow.python.ops.nn_impl import moments
from tensorflow.python.ops.nn_impl import nce_loss
from tensorflow.python.ops.nn_impl import normalize_moments
from tensorflow.python.ops.nn_impl import relu_layer
from tensorflow.python.ops.nn_impl import sampled_softmax_loss
from tensorflow.python.ops.nn_impl import scale_regularization_loss
from tensorflow.python.ops.nn_impl import separable_conv2d
from tensorflow.python.ops.nn_impl import sigmoid_cross_entropy_with_logits
from tensorflow.python.ops.nn_impl import sufficient_statistics
from tensorflow.python.ops.nn_impl import swish
from tensorflow.python.ops.nn_impl import swish as silu
from tensorflow.python.ops.nn_impl import weighted_cross_entropy_with_logits
from tensorflow.python.ops.nn_impl import weighted_moments
from tensorflow.python.ops.nn_impl import zero_fraction
from tensorflow.python.ops.nn_ops import atrous_conv2d
from tensorflow.python.ops.nn_ops import atrous_conv2d_transpose
from tensorflow.python.ops.nn_ops import avg_pool
from tensorflow.python.ops.nn_ops import avg_pool as avg_pool2d
from tensorflow.python.ops.nn_ops import avg_pool1d
from tensorflow.python.ops.nn_ops import avg_pool3d
from tensorflow.python.ops.nn_ops import avg_pool_v2
from tensorflow.python.ops.nn_ops import bias_add
from tensorflow.python.ops.nn_ops import conv1d
from tensorflow.python.ops.nn_ops import conv1d_transpose
from tensorflow.python.ops.nn_ops import conv2d
from tensorflow.python.ops.nn_ops import conv2d_backprop_filter
from tensorflow.python.ops.nn_ops import conv2d_backprop_input
from tensorflow.python.ops.nn_ops import conv2d_transpose
from tensorflow.python.ops.nn_ops import conv3d_transpose
from tensorflow.python.ops.nn_ops import conv3d_v1 as conv3d
from tensorflow.python.ops.nn_ops import conv_transpose
from tensorflow.python.ops.nn_ops import convolution
from tensorflow.python.ops.nn_ops import crelu
from tensorflow.python.ops.nn_ops import depthwise_conv2d_native
from tensorflow.python.ops.nn_ops import depthwise_conv2d_native_backprop_filter
from tensorflow.python.ops.nn_ops import depthwise_conv2d_native_backprop_filter as depthwise_conv2d_backprop_filter
from tensorflow.python.ops.nn_ops import depthwise_conv2d_native_backprop_input
from tensorflow.python.ops.nn_ops import depthwise_conv2d_native_backprop_input as depthwise_conv2d_backprop_input
from tensorflow.python.ops.nn_ops import dilation2d_v1 as dilation2d
from tensorflow.python.ops.nn_ops import dropout
from tensorflow.python.ops.nn_ops import erosion2d
from tensorflow.python.ops.nn_ops import fractional_avg_pool
from tensorflow.python.ops.nn_ops import fractional_max_pool
from tensorflow.python.ops.nn_ops import in_top_k
from tensorflow.python.ops.nn_ops import leaky_relu
from tensorflow.python.ops.nn_ops import log_softmax
from tensorflow.python.ops.nn_ops import max_pool
from tensorflow.python.ops.nn_ops import max_pool1d
from tensorflow.python.ops.nn_ops import max_pool2d
from tensorflow.python.ops.nn_ops import max_pool3d
from tensorflow.python.ops.nn_ops import max_pool_v2
from tensorflow.python.ops.nn_ops import max_pool_with_argmax_v1 as max_pool_with_argmax
from tensorflow.python.ops.nn_ops import pool
from tensorflow.python.ops.nn_ops import relu6
from tensorflow.python.ops.nn_ops import softmax
from tensorflow.python.ops.nn_ops import softmax_cross_entropy_with_logits
from tensorflow.python.ops.nn_ops import softmax_cross_entropy_with_logits_v2_helper as softmax_cross_entropy_with_logits_v2
from tensorflow.python.ops.nn_ops import sparse_softmax_cross_entropy_with_logits
from tensorflow.python.ops.nn_ops import top_k
from tensorflow.python.ops.nn_ops import with_space_to_batch
from tensorflow.python.ops.nn_ops import xw_plus_b
from tensorflow.python.ops.rnn import bidirectional_dynamic_rnn
from tensorflow.python.ops.rnn import dynamic_rnn
from tensorflow.python.ops.rnn import raw_rnn
from tensorflow.python.ops.rnn import static_bidirectional_rnn
from tensorflow.python.ops.rnn import static_rnn
from tensorflow.python.ops.rnn import static_state_saving_rnn
del _print_function
| 58.147541
| 124
| 0.880603
|
50ff78a395e011f6f4c2d0adc2687de1f076e9d1
| 5,593
|
py
|
Python
|
style_transfer/sub_networks/Vgg.py
|
haonguyen1107/style_transfer
|
8df9b20ce8ebc446cf2c0a67393001b3cf318fed
|
[
"MIT"
] | null | null | null |
style_transfer/sub_networks/Vgg.py
|
haonguyen1107/style_transfer
|
8df9b20ce8ebc446cf2c0a67393001b3cf318fed
|
[
"MIT"
] | 6
|
2021-05-21T16:38:24.000Z
|
2022-02-10T02:01:14.000Z
|
style_transfer/sub_networks/Vgg.py
|
haonguyen1107/style_transfer
|
8df9b20ce8ebc446cf2c0a67393001b3cf318fed
|
[
"MIT"
] | null | null | null |
from style_transfer.sub_networks.Sub_network import Sub_network
import tensorflow as tf
from style_transfer.layer import upsample_nearest
class VGG(Sub_network):
_VGG19 = [
('prep', 'prep', {}),
('conv', 'conv1_1', {'filters': 64}),
('conv', 'conv1_2', {'filters': 64}),
('pool', 'pool1', {}),
('conv', 'conv2_1', {'filters': 128}),
('conv', 'conv2_2', {'filters': 128}),
('pool', 'pool2', {}),
('conv', 'conv3_1', {'filters': 256}),
('conv', 'conv3_2', {'filters': 256}),
('conv', 'conv3_3', {'filters': 256}),
('conv', 'conv3_4', {'filters': 256}),
('pool', 'pool3', {}),
('conv', 'conv4_1', {'filters': 512}),
('conv', 'conv4_2', {'filters': 512}),
('conv', 'conv4_3', {'filters': 512}),
('conv', 'conv4_4', {'filters': 512}),
('pool', 'pool4', {}),
('conv', 'conv5_1', {'filters': 512}),
('conv', 'conv5_2', {'filters': 512}),
('conv', 'conv5_3', {'filters': 512}),
('conv', 'conv5_4', {'filters': 512}),
('pool', 'pool5', {})
]
_DECODER = [
('conv', 'conv4_1', {'filters': 256}),
('upsample', 'upsample3', {}),
('conv', 'conv3_4', {'filters': 256}),
('conv', 'conv3_3', {'filters': 256}),
('conv', 'conv3_2', {'filters': 256}),
('conv', 'conv3_1', {'filters': 128}),
('upsample', 'upsample2', {}),
('conv', 'conv2_2', {'filters': 128}),
('conv', 'conv2_1', {'filters': 64}),
('upsample', 'upsample1', {}),
('conv', 'conv1_2', {'filters': 64}),
('conv', 'conv1_1', {'filters': 3})
]
def build_subnetwork(self, inputs, weights,
last_layer='conv4_1'
):
definition = self._truncate(self._VGG19, [last_layer])
with tf.compat.v1.variable_scope('vgg'):
layers = self._build_net(definition, inputs, weights,
activation=tf.nn.relu, trainable=False)
return layers
def subnetwork_layer_params(self, layer):
for _, name, params in self._VGG19:
if name == layer:
return params
raise ValueError('Unknown layer: ' + layer)
def build_decoder(self, inputs, weights, trainable,
activation=tf.nn.relu):
with tf.compat.v1.variable_scope('decoder'):
layers = self._build_net(self._DECODER, inputs, weights,
activation=activation, trainable=trainable)
return layers['conv1_1']
def _build_net(self, definition, inputs, weights, activation, trainable):
layer, layers = inputs, {}
for type, name, params in definition:
if type == 'conv':
layer = tf.pad(tensor=layer, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]],
mode='reflect')
if weights: # pretrained weights provided
W_init = tf.compat.v1.constant_initializer(weights[name + '_W'])
b_init = tf.compat.v1.constant_initializer(weights[name + '_b'])
else:
W_init = tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")
b_init = tf.compat.v1.zeros_initializer()
layer = tf.compat.v1.layers.conv2d(layer,
name=name,
padding='valid',
activation=activation,
kernel_size=3,
kernel_initializer=W_init,
bias_initializer=b_init,
trainable=trainable,
**params)
elif type == 'pool':
layer = tf.compat.v1.layers.max_pooling2d(layer,
name=name, strides=2, pool_size=2
)
elif type == 'upsample':
layer = upsample_nearest(layer, scale=2)
elif type == 'prep':
layer = self._vgg_preprocess(layer)
else:
raise ValueError('Unknown layer: %s' % type)
layers[name] = layer
return layers
def _truncate(self, definition, used_layers):
names = [name for _, name, _ in definition]
return definition[:max(names.index(name) for name in used_layers) + 1]
def _vgg_preprocess(self, inputs):
"""Preprocess image for the VGG network using the convolutional layer
The layer expects an RGB image with pixel values in [0,1].
The layer flips the channels (RGB -> BGR), scales the values to [0,255] range,
and subtracts the VGG mean pixel.
"""
#data_format = 'NCHW' if data_format == 'channels_first' else 'NHWC'
W = tf.Variable([[[
[0, 0, 255],
[0, 255, 0],
[255, 0, 0]
]]], trainable=False, dtype=tf.float32)
# VGG19 mean pixel value is taken from
# https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md
b = tf.Variable([-103.939, -116.779, -123.68], trainable=False, dtype=tf.float32)
conv2d = tf.nn.conv2d(input=inputs, filters=W, strides=(1, 1, 1, 1), padding='VALID')
return tf.nn.bias_add(conv2d, b)
| 44.388889
| 127
| 0.495441
|
856496f4ff906d563584c3d40dc096c27dd8ddac
| 2,784
|
py
|
Python
|
core/funcoes.py
|
bruno-zaccariello/sgeheroku
|
c3d1a0292a33ffc3296746838dc8324c1496ff7e
|
[
"Apache-2.0"
] | null | null | null |
core/funcoes.py
|
bruno-zaccariello/sgeheroku
|
c3d1a0292a33ffc3296746838dc8324c1496ff7e
|
[
"Apache-2.0"
] | 4
|
2020-02-11T23:12:36.000Z
|
2021-11-15T17:47:44.000Z
|
core/funcoes.py
|
bruno-zaccariello/sgeheroku
|
c3d1a0292a33ffc3296746838dc8324c1496ff7e
|
[
"Apache-2.0"
] | null | null | null |
"""
Mรณdulo auxiliar com funรงรตes para views e outros
"""
from django.core.serializers import serialize
# from xml.etree import ElementTree
# import requests
from django.db.models import Q
from core.models import Produto, Pessoa
__all__ = ['filtra_produtos', 'filtra_pessoas',
'paginar', 'arruma_url_page', 'JSON']
def arruma_url_page(request):
""" Arruma a url de views que possuem paginaรงรฃo e pesquisa """
url = str(request.get_full_path())
if "&page=" in url:
url = url[:-7]
elif url.endswith('/'):
url += '?'
return url
def filtra_produtos(codigo, palavra_chave):
""" Funรงรฃo para fazer a filtragem de produtos """
return Produto.objects.filter(
Q(nomeproduto__icontains=palavra_chave) |
Q(descricao__icontains=palavra_chave),
codproduto__icontains=codigo,
hide=False
).order_by('codproduto')
def filtra_pessoas(codigo, palavraChave):
""" Funรงรฃo para fazer a filtragem de clientes """
return Pessoa.objects.filter(
Q(nomecompleto_razaosocial__icontains=palavraChave) |
Q(apelido_nomefantasia=palavraChave) |
Q(email=palavraChave),
hide=False,
pkid_pessoa__icontains=codigo
).order_by('pkid_pessoa')
def paginar(lista):
""" Funรงรฃo inutilizada """
page = 1
ctrl = 0
page_content = {1: []}
for i in lista:
page_content[page] += [i]
ctrl += 1
if ctrl == 10:
ctrl = 0
page += 1
page_content[page] = []
if page != 1 and not page_content[page]:
page_content.pop(page)
return page_content
def JSON(object):
return serialize('json', object)
# def calcula_frete(
# nCdEmpresa='',
# sDsSenha='',
# nCdServico="4014",
# sCepOrigem="",
# sCepDestino="",
# nVlPeso="0.5",
# nCdFormato=1,
# nVlComprimento=16,
# nVlAltura=2,
# nVlLargura=11,
# nVlDiametro="0",
# ):
# """ Funรงรฃo para consumir a API do correios """
# url = 'http://ws.correios.com.br/calculador/CalcPrecoPrazo.asmx/CalcPrecoPrazo?'
# url += f'nCdEmpresa={nCdEmpresa}'
# url += f'&sDsSenha={sDsSenha}'
# url += f'&nCdServico={nCdServico}'
# url += f'&sCepOrigem={sCepOrigem}'
# url += f'&sCepDestino={sCepDestino}'
# url += f'&nVlPeso={nVlPeso}'
# url += f'&nCdFormato={nCdFormato}'
# url += f'&nVlComprimento={nVlComprimento}'
# url += f'&nVlAltura={nVlAltura}'
# url += f'&nVlLargura={nVlLargura}'
# url += f'&nVlDiametro={nVlDiametro}'
# retorno = requests.get(url)
# tree = ElementTree.fromstring(retorno.content)
# dici = {}
# for child in tree.iter('*'):
# tag = child.tag.split('}')[1]
# dici[tag] = str(child.text)
# return dici
| 27.294118
| 86
| 0.613865
|
39bdc2fa3a09d6639107c22d13ef5363d40756d2
| 2,811
|
py
|
Python
|
tests/unit/test_music_handler.py
|
rbaltrusch/bach_generator
|
a5de2d55c982b94d22c62d2cbc8adecd25456069
|
[
"MIT"
] | null | null | null |
tests/unit/test_music_handler.py
|
rbaltrusch/bach_generator
|
a5de2d55c982b94d22c62d2cbc8adecd25456069
|
[
"MIT"
] | null | null | null |
tests/unit/test_music_handler.py
|
rbaltrusch/bach_generator
|
a5de2d55c982b94d22c62d2cbc8adecd25456069
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Tests for the music_handler module"""
from typing import Type
import music21
import pytest
from bach_generator.src import music_handler
SIXTEENTH = "16th"
EIGHT = "8th"
@pytest.mark.usefixtures("midi_file")
def test_extract_notes_from_part(midi_file):
score = music21.stream.Score()
for note_name in midi_file.notes:
note = music21.note.Note(nameWithOctave=note_name, type="16th")
score.append(note)
notes = music_handler.extract_notes_from_part(score)
assert [note.nameWithOctave for note in notes] == midi_file.notes
@pytest.mark.usefixtures("midi_file")
@pytest.mark.parametrize(
"handler_type", [music_handler.SimpleMusicHandler, music_handler.CopyMusicHandler]
)
def test_parse_file(midi_file, handler_type: Type[music_handler.BaseMusicHandler]):
handler = handler_type()
notes = handler.parse(midi_file.path)
assert notes == midi_file.notes
def test_instantiate_fail():
with pytest.raises(TypeError):
music_handler.BaseMusicHandler()
@pytest.mark.usefixtures("midi_file")
@pytest.mark.parametrize(
"handler_type, duration",
[
(music_handler.SimpleMusicHandler, SIXTEENTH),
(music_handler.CopyMusicHandler, EIGHT), # original duration
],
)
def test_regenerate_score(
handler_type: Type[music_handler.BaseMusicHandler], duration, midi_file
):
handler = handler_type()
notes = handler.parse(midi_file.path)
score = handler.generate_score(notes)
assert isinstance(score, music21.stream.Score)
notes = music_handler.extract_notes_from_part(score)
assert all(note.duration.type == duration for note in notes)
@pytest.mark.parametrize("note_names", [[], ["A2"], ["A5", "A3", "B4"]])
def test_simple_score_generation(note_names):
handler = music_handler.SimpleMusicHandler()
score = handler.generate_score(note_names)
notes = music_handler.extract_notes_from_part(score)
assert len(notes) == len(note_names)
assert all(note.nameWithOctave == name for note, name in zip(notes, note_names))
@pytest.mark.usefixtures("midi_file")
@pytest.mark.parametrize("note_names", [[], ["A2"], ["A5", "A3", "B4"]])
def test_copy_score_generation(note_names, midi_file):
handler = music_handler.CopyMusicHandler()
handler.parse(midi_file.path)
parts = list(handler.generate_score(note_names))
notes = music_handler.extract_notes_from_part(parts[0])
assert len(notes) == len(midi_file.notes)
assert all(note.nameWithOctave == name for note, name in zip(notes, note_names))
@pytest.mark.parametrize("note_names", [[], ["A2"], ["A5", "A3", "B4", "G4"]])
def test_copy_handler_without_part(note_names):
handler = music_handler.CopyMusicHandler()
with pytest.raises(TypeError):
handler.generate_score(note_names)
| 33.86747
| 86
| 0.73248
|
2fc29fa77d05da4b27a84e4ca3486451e1a411e0
| 727
|
py
|
Python
|
leetcode/python/208_implement_trie_prefix_tree.py
|
VVKot/leetcode-solutions
|
7d6e599b223d89a7861929190be715d3b3604fa4
|
[
"MIT"
] | 4
|
2019-04-22T11:57:36.000Z
|
2019-10-29T09:12:56.000Z
|
leetcode/python/208_implement_trie_prefix_tree.py
|
VVKot/coding-competitions
|
7d6e599b223d89a7861929190be715d3b3604fa4
|
[
"MIT"
] | null | null | null |
leetcode/python/208_implement_trie_prefix_tree.py
|
VVKot/coding-competitions
|
7d6e599b223d89a7861929190be715d3b3604fa4
|
[
"MIT"
] | null | null | null |
class Trie:
WORD_MARK = '*'
def __init__(self):
self.trie = {}
def insert(self, word: str) -> None:
trie = self.trie
for ch in word:
trie = trie.setdefault(ch, {})
trie[self.WORD_MARK] = self.WORD_MARK
def search(self, word: str) -> bool:
trie = self.trie
for ch in word:
if ch in trie:
trie = trie[ch]
else:
return False
return self.WORD_MARK in trie
def startsWith(self, prefix: str) -> bool:
trie = self.trie
for ch in prefix:
if ch in trie:
trie = trie[ch]
else:
return False
return bool(trie)
| 23.451613
| 46
| 0.480055
|
8a69407669d3d0a09192eb6e02c5528a4af944f4
| 4,280
|
py
|
Python
|
setup.py
|
SparXalt/deafwave-blockchain
|
579eac55d55285f750c622bf66a1aa30ed6d949d
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
SparXalt/deafwave-blockchain
|
579eac55d55285f750c622bf66a1aa30ed6d949d
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
SparXalt/deafwave-blockchain
|
579eac55d55285f750c622bf66a1aa30ed6d949d
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup
dependencies = [
"blspy==1.0.2", # Signature library
"chiavdf==1.0.1", # timelord and vdf verification
"chiabip158==1.0", # bip158-style wallet filters
"chiapos==1.0.2", # proof of space
"clvm==0.9.6",
"clvm_rs==0.1.7",
"clvm_tools==0.4.3",
"aiohttp==3.7.4", # HTTP server for full node rpc
"aiosqlite==0.17.0", # asyncio wrapper for sqlite, to store blocks
"bitstring==3.1.7", # Binary data management library
"colorlog==5.0.1", # Adds color to logs
"concurrent-log-handler==0.9.19", # Concurrently log and rotate logs
"cryptography==3.4.7", # Python cryptography library for TLS - keyring conflict
"keyring==23.0.1", # Store keys in MacOS Keychain, Windows Credential Locker
# Secure storage for keys on Linux (Will be replaced)
"keyrings.cryptfile==1.3.4",
# "keyrings.cryptfile==1.3.8", # Secure storage for keys on Linux (Will be replaced)
# See https://github.com/frispete/keyrings.cryptfile/issues/15
"PyYAML==5.4.1", # Used for config file format
"setproctitle==1.2.2", # Gives the deafwave processes readable names
"sortedcontainers==2.3.0", # For maintaining sorted mempools
"websockets==8.1.0", # For use in wallet RPC and electron UI
"click==7.1.2", # For the CLI
"dnspython==2.1.0", # Query DNS seeds
]
upnp_dependencies = [
"miniupnpc==2.1", # Allows users to open ports on their router
]
dev_dependencies = [
"pytest",
"pytest-asyncio",
"flake8",
"mypy",
"black",
"aiohttp_cors", # For blackd
"ipython", # For asyncio debugging
]
kwargs = dict(
name="deafwave-blockchain",
author="Mariano Sorgente",
author_email="mariano@deafwave.net",
description="Deafwave blockchain full node, farmer, timelord, and wallet.",
url="https://deafwave.net/",
license="Apache License",
python_requires=">=3.7, <4",
keywords="deafwave blockchain node",
install_requires=dependencies,
setup_requires=["setuptools_scm"],
extras_require=dict(
uvloop=["uvloop"],
dev=dev_dependencies,
upnp=upnp_dependencies,
),
packages=[
"build_scripts",
"deafwave",
"deafwave.cmds",
"deafwave.consensus",
"deafwave.daemon",
"deafwave.full_node",
"deafwave.timelord",
"deafwave.farmer",
"deafwave.harvester",
"deafwave.introducer",
"deafwave.plotting",
"deafwave.protocols",
"deafwave.rpc",
"deafwave.server",
"deafwave.simulator",
"deafwave.types.blockchain_format",
"deafwave.types",
"deafwave.util",
"deafwave.wallet",
"deafwave.wallet.puzzles",
"deafwave.wallet.rl_wallet",
"deafwave.wallet.cc_wallet",
"deafwave.wallet.did_wallet",
"deafwave.wallet.settings",
"deafwave.wallet.trading",
"deafwave.wallet.util",
"deafwave.ssl",
"mozilla-ca",
],
entry_points={
"console_scripts": [
"deafwave = deafwave.cmds.deafwave:main",
"deafwave_wallet = deafwave.server.start_wallet:main",
"deafwave_full_node = deafwave.server.start_full_node:main",
"deafwave_harvester = deafwave.server.start_harvester:main",
"deafwave_farmer = deafwave.server.start_farmer:main",
"deafwave_introducer = deafwave.server.start_introducer:main",
"deafwave_timelord = deafwave.server.start_timelord:main",
"deafwave_timelord_launcher = deafwave.timelord.timelord_launcher:main",
"deafwave_full_node_simulator = deafwave.simulator.start_simulator:main",
]
},
package_data={
"deafwave": ["pyinstaller.spec"],
"deafwave.wallet.puzzles": ["*.clvm", "*.clvm.hex"],
"deafwave.util": ["initial-*.yaml", "english.txt"],
"deafwave.ssl": ["deafwave_ca.crt", "deafwave_ca.key", "dst_root_ca.pem"],
"mozilla-ca": ["cacert.pem"],
},
use_scm_version={"fallback_version": "unknown-no-.git-directory"},
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
zip_safe=False,
)
if __name__ == "__main__":
setup(**kwargs)
| 35.966387
| 90
| 0.63014
|
985d59217598a6a84899984137a64db1fd9a8050
| 359
|
py
|
Python
|
adminapp/migrations/0010_alter_exhibit_options.py
|
mofresh27/MuseumExperience-Group2-Python-BE-1
|
d6ca7aceeddfcfdefdf112ab5e40cf74d6b472ce
|
[
"MIT"
] | null | null | null |
adminapp/migrations/0010_alter_exhibit_options.py
|
mofresh27/MuseumExperience-Group2-Python-BE-1
|
d6ca7aceeddfcfdefdf112ab5e40cf74d6b472ce
|
[
"MIT"
] | 1
|
2021-07-19T14:27:28.000Z
|
2021-07-19T14:27:28.000Z
|
adminapp/migrations/0010_alter_exhibit_options.py
|
mofresh27/MuseumExperience-Group2-Python-BE-1
|
d6ca7aceeddfcfdefdf112ab5e40cf74d6b472ce
|
[
"MIT"
] | 2
|
2021-07-14T21:56:46.000Z
|
2021-07-15T16:11:41.000Z
|
# Generated by Django 3.2.4 on 2021-07-10 20:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('adminapp', '0009_auto_20210709_0156'),
]
operations = [
migrations.AlterModelOptions(
name='exhibit',
options={'verbose_name_plural': 'Exhibit'},
),
]
| 19.944444
| 55
| 0.610028
|
4d8be3344c3be84c10da9ee8a613a7f1d460d285
| 9,831
|
py
|
Python
|
Student Registration Project Files/realtimedetector.py
|
mastadict/automated-student-register
|
43ea2f9cced1129a0a6cec88d791894d0a0c9a20
|
[
"MIT"
] | 1
|
2020-12-09T15:12:24.000Z
|
2020-12-09T15:12:24.000Z
|
Student Registration Project Files/realtimedetector.py
|
mastadict/automated-student-register
|
43ea2f9cced1129a0a6cec88d791894d0a0c9a20
|
[
"MIT"
] | 4
|
2021-06-08T19:44:04.000Z
|
2022-03-11T23:44:05.000Z
|
Student Registration Project Files/realtimedetector.py
|
khoisan25/automated-student-register
|
43ea2f9cced1129a0a6cec88d791894d0a0c9a20
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
'''
Author: Guido Diepen <gdiepen@deloitte.nl>
'''
#Import the OpenCV and dlib libraries
import cv2
import dlib
import threading
import time
#Initialize a face cascade using the frontal face haar cascade provided with
#the OpenCV library
#Make sure that you copy this file from the opencv project to the root of this
#project folder
faceCascade = cv2.CascadeClassifier('frontalface.xml')
#The deisred output width and height
OUTPUT_SIZE_WIDTH = 575
OUTPUT_SIZE_HEIGHT = 400
#We are not doing really face recognition
def doRecognizePerson(faceNames, fid):
time.sleep(2)
faceNames[ fid ] = "Person " + str(fid)
def detectAndTrackMultipleFaces():
#Open the first webcame device
camID = int(open('resources/cam.txt').read())
capture = cv2.VideoCapture(camID)
#Create two opencv named windows
cv2.namedWindow("base-image", cv2.WINDOW_AUTOSIZE)
cv2.namedWindow("result-image", cv2.WINDOW_AUTOSIZE)
#Position the windows next to eachother
cv2.moveWindow("base-image",0,100)
cv2.moveWindow("result-image",400,100)
#Start the window thread for the two windows we are using
cv2.startWindowThread()
#The color of the rectangle we draw around the face
rectangleColor = (0,255,0)
#variables holding the current frame number and the current faceid
frameCounter = 0
currentFaceID = 0
#Variables holding the correlation trackers and the name per faceid
faceTrackers = {}
faceNames = {}
try:
while True:
#Retrieve the latest image from the webcam
rc,fullSizeBaseImage = capture.read()
#Resize the image to 320x240
baseImage = cv2.resize( fullSizeBaseImage, ( 320, 240))
#Check if a key was pressed and if it was x, then break
#from the infinite loop
if cv2.waitKey(2) & 0xFF == ord('x'):
break
#Result image is the image we will show the user, which is a
#combination of the original image from the webcam and the
#overlayed rectangle for the largest face
resultImage = baseImage.copy()
#STEPS:
# * Update all trackers and remove the ones that are not
# relevant anymore
# * Every 10 frames:
# + Use face detection on the current frame and look
# for faces.
# + For each found face, check if centerpoint is within
# existing tracked box. If so, nothing to do
# + If centerpoint is NOT in existing tracked box, then
# we add a new tracker with a new face-id
#Increase the framecounter
frameCounter += 1
#Update all the trackers and remove the ones for which the update
#indicated the quality was not good enough
fidsToDelete = []
for fid in faceTrackers.keys():
trackingQuality = faceTrackers[ fid ].update( baseImage )
#If the tracking quality is good enough, we must delete
#this tracker
if trackingQuality < 7:
fidsToDelete.append( fid )
for fid in fidsToDelete:
print("Removing fid " + str(fid) + " from list of trackers")
faceTrackers.pop( fid , None )
#Every 10 frames, we will have to determine which faces
#are present in the frame
if (frameCounter % 10) == 0:
#For the face detection, we need to make use of a gray
#colored image so we will convert the baseImage to a
#gray-based image
gray = cv2.cvtColor(baseImage, cv2.COLOR_BGR2GRAY)
#Now use the haar cascade detector to find all faces
#in the image
faces = faceCascade.detectMultiScale(gray, 1.5, 7)
#Loop over all faces and check if the area for this
#face is the largest so far
#We need to convert it to int here because of the
#requirement of the dlib tracker. If we omit the cast to
#int here, you will get cast errors since the detector
#returns numpy.int32 and the tracker requires an int
for (_x,_y,_w,_h) in faces:
x = int(_x)
y = int(_y)
w = int(_w)
h = int(_h)
#calculate the centerpoint
x_bar = x + 0.5 * w
y_bar = y + 0.5 * h
#Variable holding information which faceid we
#matched with
matchedFid = None
#Now loop over all the trackers and check if the
#centerpoint of the face is within the box of a
#tracker
for fid in faceTrackers.keys():
tracked_position = faceTrackers[fid].get_position()
t_x = int(tracked_position.left())
t_y = int(tracked_position.top())
t_w = int(tracked_position.width())
t_h = int(tracked_position.height())
#calculate the centerpoint
t_x_bar = t_x + 0.5 * t_w
t_y_bar = t_y + 0.5 * t_h
#check if the centerpoint of the face is within the
#rectangleof a tracker region. Also, the centerpoint
#of the tracker region must be within the region
#detected as a face. If both of these conditions hold
#we have a match
if ( ( t_x <= x_bar <= (t_x + t_w)) and
( t_y <= y_bar <= (t_y + t_h)) and
( x <= t_x_bar <= (x + w )) and
( y <= t_y_bar <= (y + h ))):
matchedFid = fid
#If no matched fid, then we have to create a new tracker
if matchedFid is None:
print("Creating new tracker " + str(currentFaceID))
#Create and store the tracker
tracker = dlib.correlation_tracker()
tracker.start_track(baseImage,
dlib.rectangle( x-10,
y-20,
x+w+10,
y+h+20))
faceTrackers[ currentFaceID ] = tracker
#Start a new thread that is used to simulate
#face recognition. This is not yet implemented in this
#version :)
t = threading.Thread( target = doRecognizePerson ,
args=(faceNames, currentFaceID))
t.start()
#Increase the currentFaceID counter
currentFaceID += 1
#Now loop over all the trackers we have and draw the rectangle
#around the detected faces. If we 'know' the name for this person
#(i.e. the recognition thread is finished), we print the name
#of the person, otherwise the message indicating we are detecting
#the name of the person
for fid in faceTrackers.keys():
tracked_position = faceTrackers[fid].get_position()
t_x = int(tracked_position.left())
t_y = int(tracked_position.top())
t_w = int(tracked_position.width())
t_h = int(tracked_position.height())
cv2.rectangle(resultImage, (t_x, t_y),
(t_x + t_w , t_y + t_h),
rectangleColor ,2)
if fid in faceNames.keys():
cv2.putText(resultImage, faceNames[fid] ,
(int(t_x + t_w/2), int(t_y)),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255, 255, 255), 2)
else:
cv2.putText(resultImage, "Detecting..." ,
(int(t_x + t_w/2), int(t_y)),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255, 255, 255), 2)
#Since we want to show something larger on the screen than the
#original 320x240, we resize the image again
#
#Note that it would also be possible to keep the large version
#of the baseimage and make the result image a copy of this large
#base image and use the scaling factor to draw the rectangle
#at the right coordinates.
largeResult = cv2.resize(resultImage,
(OUTPUT_SIZE_WIDTH,OUTPUT_SIZE_HEIGHT))
#Finally, we want to show the images on the screen
cv2.imshow("base-image", baseImage)
cv2.imshow("result-image", largeResult)
#To ensure we can also deal with the user pressing Ctrl-C in the console
#we have to check for the KeyboardInterrupt exception and break out of
#the main loop
except KeyboardInterrupt as e:
pass
#Destroy any OpenCV windows and exit the application
cv2.destroyAllWindows()
exit(0)
if __name__ == '__main__':
detectAndTrackMultipleFaces()
| 35.490975
| 79
| 0.522327
|
3930d034e0fae17ed3c63b0c3517864718c8f781
| 466
|
py
|
Python
|
sources/malshare.py
|
asrabon/MalFind
|
ca3fcf59b335f3bd0e1d4596f545a917c4a0e613
|
[
"MIT"
] | 1
|
2021-12-13T17:19:09.000Z
|
2021-12-13T17:19:09.000Z
|
sources/malshare.py
|
asrabon/MalFind
|
ca3fcf59b335f3bd0e1d4596f545a917c4a0e613
|
[
"MIT"
] | null | null | null |
sources/malshare.py
|
asrabon/MalFind
|
ca3fcf59b335f3bd0e1d4596f545a917c4a0e613
|
[
"MIT"
] | null | null | null |
import requests
SEARCH_URL = "https://malshare.com/api.php?api_key={}&action=search&query={}"
SAMPLE_URL = "https://malshare.com/sample.php?action=detail&hash={}"
def search(file_hash, api_key):
r = requests.get(
SEARCH_URL.format(api_key, file_hash),
timeout=30
)
search_submissions = r.json()
if len(search_submissions) > 0:
md5 = search_submissions[0]["md5"]
return SAMPLE_URL.format(md5)
return None
| 23.3
| 77
| 0.656652
|
4a7227c2fd3b323e97ab507ad9797313ca6afa00
| 1,085
|
py
|
Python
|
MangAdventure/urls.py
|
fossabot/MangAdventure
|
20e1f27056b8c4b9cb58ce6e815a5bb93739fe11
|
[
"MIT"
] | null | null | null |
MangAdventure/urls.py
|
fossabot/MangAdventure
|
20e1f27056b8c4b9cb58ce6e815a5bb93739fe11
|
[
"MIT"
] | null | null | null |
MangAdventure/urls.py
|
fossabot/MangAdventure
|
20e1f27056b8c4b9cb58ce6e815a5bb93739fe11
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.conf import settings
from .views import index, search, opensearch, contribute, robots
try:
from django.urls import include, re_path as url
except ImportError:
from django.conf.urls import include, url
urlpatterns = [
url(r'^$', index, name='index'),
url(r'^', include('config.urls')),
url(r'^search/$', search, name='search'),
url(r'^admin/', admin.site.urls),
url(r'^reader/', include('reader.urls')),
url(r'^api/', include('api.urls')),
url(r'^groups/', include('groups.urls')),
url(r'^user/', include('users.urls')),
url(r'^opensearch\.xml$', opensearch, name='opensearch'),
url(r'^contribute\.json$', contribute, name='contribute'),
url(r'^robots\.txt$', robots, name='robots')
]
if settings.DEBUG:
from django.conf.urls.static import static
urlpatterns += static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
handler404 = 'MangAdventure.views.handler404'
handler500 = 'MangAdventure.views.handler500'
handler503 = 'MangAdventure.views.handler503'
| 31.911765
| 64
| 0.682028
|
33bdfb784d6a50ee474514775db5e5496b3f4c5b
| 1,700
|
py
|
Python
|
python/ecs/fargate-service-with-autoscaling/app.py
|
damshenas/aws-cdk-examples
|
85d247df404444cde6ef913aae31aaa47cd93daa
|
[
"Apache-2.0"
] | 1
|
2022-02-02T20:23:28.000Z
|
2022-02-02T20:23:28.000Z
|
python/ecs/fargate-service-with-autoscaling/app.py
|
damshenas/aws-cdk-examples
|
85d247df404444cde6ef913aae31aaa47cd93daa
|
[
"Apache-2.0"
] | null | null | null |
python/ecs/fargate-service-with-autoscaling/app.py
|
damshenas/aws-cdk-examples
|
85d247df404444cde6ef913aae31aaa47cd93daa
|
[
"Apache-2.0"
] | null | null | null |
from aws_cdk import (
aws_ec2 as ec2,
aws_ecs as ecs,
aws_ecs_patterns as ecs_patterns,
App, CfnOutput, Duration, Stack
)
from constructs import Construct
class AutoScalingFargateService(Stack):
def __init__(self, scope: Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# Create a cluster
vpc = ec2.Vpc(
self, "Vpc",
max_azs=2
)
cluster = ecs.Cluster(
self, 'fargate-service-autoscaling',
vpc=vpc
)
# Create Fargate Service
fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
self, "sample-app",
cluster=cluster,
task_image_options={
'image': ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample")
}
)
fargate_service.service.connections.security_groups[0].add_ingress_rule(
peer = ec2.Peer.ipv4(vpc.vpc_cidr_block),
connection = ec2.Port.tcp(80),
description="Allow http inbound from VPC"
)
# Setup AutoScaling policy
scaling = fargate_service.service.auto_scale_task_count(
max_capacity=2
)
scaling.scale_on_cpu_utilization(
"CpuScaling",
target_utilization_percent=50,
scale_in_cooldown=Duration.seconds(60),
scale_out_cooldown=Duration.seconds(60),
)
CfnOutput(
self, "LoadBalancerDNS",
value=fargate_service.load_balancer.load_balancer_dns_name
)
app = App()
AutoScalingFargateService(app, "aws-fargate-application-autoscaling")
app.synth()
| 28.333333
| 85
| 0.608235
|
38a2f8504722c74ebe7ae9c3e50877c9fd820c79
| 342
|
py
|
Python
|
bets/views.py
|
mattchere/letsmakebets
|
d34ffbc98022250eb9352ccddb63a5f25a92ab6a
|
[
"MIT"
] | null | null | null |
bets/views.py
|
mattchere/letsmakebets
|
d34ffbc98022250eb9352ccddb63a5f25a92ab6a
|
[
"MIT"
] | null | null | null |
bets/views.py
|
mattchere/letsmakebets
|
d34ffbc98022250eb9352ccddb63a5f25a92ab6a
|
[
"MIT"
] | null | null | null |
from .models import Bet, Bettor, Taker
from django.shortcuts import render
from django.views import generic
def index(request):
"""
View function for homepage of site.
"""
return render(request, 'index.html')
class BetListView(generic.ListView):
model = Bet
class BetDetailView(generic.DetailView):
model = Bet
| 17.1
| 40
| 0.710526
|
f640e87a9d13f9f365aef5d920bd1b7f59e0a591
| 22,038
|
py
|
Python
|
flink-python/pyflink/table/tests/test_table_environment_api.py
|
sundargates/flink
|
aa489269a1429f25136765af94b05d10ef5b7fd3
|
[
"Apache-2.0"
] | null | null | null |
flink-python/pyflink/table/tests/test_table_environment_api.py
|
sundargates/flink
|
aa489269a1429f25136765af94b05d10ef5b7fd3
|
[
"Apache-2.0"
] | null | null | null |
flink-python/pyflink/table/tests/test_table_environment_api.py
|
sundargates/flink
|
aa489269a1429f25136765af94b05d10ef5b7fd3
|
[
"Apache-2.0"
] | null | null | null |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
################################################################################
import glob
import os
import pathlib
import sys
from py4j.protocol import Py4JJavaError
from pyflink.find_flink_home import _find_flink_source_root
from pyflink.java_gateway import get_gateway
from pyflink.dataset import ExecutionEnvironment
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.table import DataTypes, CsvTableSink, StreamTableEnvironment, EnvironmentSettings
from pyflink.table.descriptors import FileSystem, OldCsv, Schema
from pyflink.table.table_config import TableConfig
from pyflink.table.table_environment import BatchTableEnvironment
from pyflink.table.types import RowType
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase, PyFlinkBatchTableTestCase, \
PyFlinkBlinkBatchTableTestCase
from pyflink.util.exceptions import TableException
from pyflink.util.utils import get_j_env_configuration
class TableEnvironmentTest(object):
def test_set_sys_executable_for_local_mode(self):
jvm = get_gateway().jvm
actual_executable = get_j_env_configuration(self.t_env) \
.getString(jvm.PythonOptions.PYTHON_EXECUTABLE.key(), None)
self.assertEqual(sys.executable, actual_executable)
def test_explain(self):
schema = RowType()\
.add('a', DataTypes.INT())\
.add('b', DataTypes.STRING())\
.add('c', DataTypes.STRING())
t_env = self.t_env
t = t_env.from_elements([], schema)
result = t.select("1 + a, b, c")
actual = t_env.explain(result)
assert isinstance(actual, str)
def test_explain_with_extended(self):
schema = RowType() \
.add('a', DataTypes.INT()) \
.add('b', DataTypes.STRING()) \
.add('c', DataTypes.STRING())
t_env = self.t_env
t = t_env.from_elements([], schema)
result = t.select("1 + a, b, c")
actual = t_env.explain(result, True)
assert isinstance(actual, str)
def test_register_java_function(self):
t_env = self.t_env
t_env.register_java_function("scalar_func",
"org.apache.flink.table.expressions.utils.RichFunc0")
t_env.register_java_function(
"agg_func", "org.apache.flink.table.functions.aggfunctions.ByteMaxAggFunction")
t_env.register_java_function("table_func", "org.apache.flink.table.utils.TableFunc1")
actual = t_env.list_user_defined_functions()
expected = ['scalar_func', 'agg_func', 'table_func']
self.assert_equals(actual, expected)
class StreamTableEnvironmentTests(TableEnvironmentTest, PyFlinkStreamTableTestCase):
def test_register_table_source_scan(self):
t_env = self.t_env
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
source_path = os.path.join(self.tempdir + '/streaming.csv')
csv_source = self.prepare_csv_source(source_path, [], field_types, field_names)
t_env.register_table_source("Source", csv_source)
result = t_env.scan("Source")
self.assertEqual(
'CatalogTable: (identifier: [`default_catalog`.`default_database`.`Source`]'
', fields: [a, b, c])',
result._j_table.getQueryOperation().asSummaryString())
def test_register_table_sink(self):
t_env = self.t_env
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"Sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.from_elements([(1, "Hi", "Hello")], ["a", "b", "c"]).insert_into("Sinks")
self.t_env.execute("test")
actual = source_sink_utils.results()
expected = ['1,Hi,Hello']
self.assert_equals(actual, expected)
def test_from_table_source(self):
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
source_path = os.path.join(self.tempdir + '/streaming.csv')
csv_source = self.prepare_csv_source(source_path, [], field_types, field_names)
result = self.t_env.from_table_source(csv_source)
self.assertEqual(
'TableSource: (fields: [a, b, c])',
result._j_table.getQueryOperation().asSummaryString())
def test_list_tables(self):
source_path = os.path.join(self.tempdir + '/streaming.csv')
field_names = ["a", "b", "c"]
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
data = []
csv_source = self.prepare_csv_source(source_path, data, field_types, field_names)
t_env = self.t_env
t_env.register_table_source("Orders", csv_source)
t_env.register_table_sink(
"Sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.register_table_sink(
"Results",
source_sink_utils.TestAppendSink(field_names, field_types))
actual = t_env.list_tables()
expected = ['Orders', 'Results', 'Sinks']
self.assert_equals(actual, expected)
def test_temporary_tables(self):
t_env = self.t_env
t_env.connect(FileSystem().path(os.path.join(self.tempdir + '/temp_1.csv'))) \
.with_format(OldCsv()
.field_delimiter(',')
.field("a", DataTypes.INT())
.field("b", DataTypes.STRING())) \
.with_schema(Schema()
.field("a", DataTypes.INT())
.field("b", DataTypes.STRING())) \
.create_temporary_table("temporary_table_1")
t_env.connect(FileSystem().path(os.path.join(self.tempdir + '/temp_2.csv'))) \
.with_format(OldCsv()
.field_delimiter(',')
.field("a", DataTypes.INT())
.field("b", DataTypes.STRING())) \
.with_schema(Schema()
.field("a", DataTypes.INT())
.field("b", DataTypes.STRING())) \
.create_temporary_table("temporary_table_2")
actual = t_env.list_temporary_tables()
expected = ['temporary_table_1', 'temporary_table_2']
self.assert_equals(actual, expected)
t_env.drop_temporary_table("temporary_table_1")
actual = t_env.list_temporary_tables()
expected = ['temporary_table_2']
self.assert_equals(actual, expected)
def test_temporary_views(self):
t_env = self.t_env
t_env.create_temporary_view(
"temporary_view_1",
t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c']))
t_env.create_temporary_view(
"temporary_view_2",
t_env.from_elements([(1, 'Hi')], ['a', 'b']))
actual = t_env.list_temporary_views()
expected = ['temporary_view_1', 'temporary_view_2']
self.assert_equals(actual, expected)
t_env.drop_temporary_view("temporary_view_1")
actual = t_env.list_temporary_views()
expected = ['temporary_view_2']
self.assert_equals(actual, expected)
def test_from_path(self):
t_env = self.t_env
t_env.create_temporary_view(
"temporary_view_1",
t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c']))
result = t_env.from_path("temporary_view_1")
self.assertEqual(
'CatalogTable: (identifier: [`default_catalog`.`default_database`.`temporary_view_1`]'
', fields: [a, b, c])',
result._j_table.getQueryOperation().asSummaryString())
def test_insert_into(self):
t_env = self.t_env
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"Sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.insert_into("Sinks", t_env.from_elements([(1, "Hi", "Hello")], ["a", "b", "c"]))
self.t_env.execute("test")
actual = source_sink_utils.results()
expected = ['1,Hi,Hello']
self.assert_equals(actual, expected)
def test_explain_with_multi_sinks(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sink1",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.register_table_sink(
"sink2",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.sql_update("insert into sink1 select * from %s where a > 100" % source)
t_env.sql_update("insert into sink2 select * from %s where a < 100" % source)
actual = t_env.explain(extended=True)
assert isinstance(actual, str)
def test_sql_query(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
result = t_env.sql_query("select a + 1, b, c from %s" % source)
result.insert_into("sinks")
self.t_env.execute("test")
actual = source_sink_utils.results()
expected = ['2,Hi,Hello', '3,Hello,Hello']
self.assert_equals(actual, expected)
def test_sql_update(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sinks",
source_sink_utils.TestAppendSink(field_names, field_types))
t_env.sql_update("insert into sinks select * from %s" % source)
self.t_env.execute("test_sql_job")
actual = source_sink_utils.results()
expected = ['1,Hi,Hello', '2,Hello,Hello']
self.assert_equals(actual, expected)
def test_create_table_environment(self):
table_config = TableConfig()
table_config.set_max_generated_code_length(32000)
table_config.set_null_check(False)
table_config.set_local_timezone("Asia/Shanghai")
env = StreamExecutionEnvironment.get_execution_environment()
t_env = StreamTableEnvironment.create(env, table_config)
readed_table_config = t_env.get_config()
self.assertFalse(readed_table_config.get_null_check())
self.assertEqual(readed_table_config.get_max_generated_code_length(), 32000)
self.assertEqual(readed_table_config.get_local_timezone(), "Asia/Shanghai")
def test_create_table_environment_with_blink_planner(self):
t_env = StreamTableEnvironment.create(
self.env,
environment_settings=EnvironmentSettings.new_instance().use_blink_planner().build())
planner = t_env._j_tenv.getPlanner()
self.assertEqual(
planner.getClass().getName(),
"org.apache.flink.table.planner.delegation.StreamPlanner")
t_env = StreamTableEnvironment.create(
environment_settings=EnvironmentSettings.new_instance().build())
planner = t_env._j_tenv.getPlanner()
self.assertEqual(
planner.getClass().getName(),
"org.apache.flink.table.planner.StreamPlanner")
t_env = StreamTableEnvironment.create(
environment_settings=EnvironmentSettings.new_instance().use_blink_planner().build())
planner = t_env._j_tenv.getPlanner()
self.assertEqual(
planner.getClass().getName(),
"org.apache.flink.table.planner.delegation.StreamPlanner")
def test_table_environment_with_blink_planner(self):
self.env.set_parallelism(1)
t_env = StreamTableEnvironment.create(
self.env,
environment_settings=EnvironmentSettings.new_instance().use_blink_planner().build())
source_path = os.path.join(self.tempdir + '/streaming.csv')
sink_path = os.path.join(self.tempdir + '/result.csv')
field_names = ["a", "b", "c"]
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
data = [(1, 'hi', 'hello'), (2, 'hello', 'hello')]
csv_source = self.prepare_csv_source(source_path, data, field_types, field_names)
t_env.register_table_source("source", csv_source)
t_env.register_table_sink(
"sink",
CsvTableSink(field_names, field_types, sink_path))
source = t_env.scan("source")
result = source.alias("a, b, c").select("1 + a, b, c")
result.insert_into("sink")
t_env.execute("blink_test")
results = []
with open(sink_path, 'r') as f:
results.append(f.readline())
results.append(f.readline())
self.assert_equals(results, ['2,hi,hello\n', '3,hello,hello\n'])
def test_set_jars(self):
self.verify_set_java_dependencies("pipeline.jars")
def test_set_classpaths(self):
self.verify_set_java_dependencies("pipeline.classpaths")
def verify_set_java_dependencies(self, config_key):
original_class_loader = \
get_gateway().jvm.Thread.currentThread().getContextClassLoader()
try:
jar_urls = []
func1_class_name = "org.apache.flink.python.util.TestScalarFunction1"
func2_class_name = "org.apache.flink.python.util.TestScalarFunction2"
func1_jar_pattern = "flink-python/target/func1/flink-python*-tests.jar"
func2_jar_pattern = "flink-python/target/func2/flink-python*-tests.jar"
self.ensure_jar_not_loaded(func1_class_name, func1_jar_pattern)
self.ensure_jar_not_loaded(func2_class_name, func2_jar_pattern)
jar_urls.extend(self.get_jar_url(func1_jar_pattern))
jar_urls.extend(self.get_jar_url(func2_jar_pattern))
# test set the "pipeline.jars" multiple times
self.t_env.get_config().get_configuration().set_string(config_key, ";".join(jar_urls))
first_class_loader = get_gateway().jvm.Thread.currentThread().getContextClassLoader()
self.t_env.get_config().get_configuration().set_string(config_key, jar_urls[0])
self.t_env.get_config().get_configuration().set_string(config_key, ";".join(jar_urls))
second_class_loader = get_gateway().jvm.Thread.currentThread().getContextClassLoader()
self.assertEqual(first_class_loader, second_class_loader)
source = self.t_env.from_elements([(1, "Hi"), (2, "Hello")], ["a", "b"])
self.t_env.register_java_function("func1", func1_class_name)
self.t_env.register_java_function("func2", func2_class_name)
table_sink = source_sink_utils.TestAppendSink(
["a", "b"], [DataTypes.STRING(), DataTypes.STRING()])
self.t_env.register_table_sink("sink", table_sink)
source.select("func1(a, b), func2(a, b)").insert_into("sink")
self.t_env.execute("test")
actual = source_sink_utils.results()
expected = ['1 and Hi,1 or Hi', '2 and Hello,2 or Hello']
self.assert_equals(actual, expected)
finally:
get_gateway().jvm.Thread.currentThread().setContextClassLoader(original_class_loader)
def ensure_jar_not_loaded(self, func_class_name, jar_filename_pattern):
test_jars = glob.glob(os.path.join(_find_flink_source_root(), jar_filename_pattern))
if not test_jars:
self.fail("'%s' is not available. Please compile the test jars first."
% jar_filename_pattern)
try:
self.t_env.register_java_function("func", func_class_name)
except Py4JJavaError:
pass
else:
self.fail("The scalar function '%s' should not be able to be loaded. Please remove "
"the '%s' from the classpath of the PythonGatewayServer process." %
(func_class_name, jar_filename_pattern))
@staticmethod
def get_jar_url(jar_filename_pattern):
test_jars = glob.glob(os.path.join(_find_flink_source_root(), jar_filename_pattern))
return [pathlib.Path(jar_path).as_uri() for jar_path in test_jars]
class BatchTableEnvironmentTests(TableEnvironmentTest, PyFlinkBatchTableTestCase):
def test_explain_with_multi_sinks(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sink1",
CsvTableSink(field_names, field_types, "path1"))
t_env.register_table_sink(
"sink2",
CsvTableSink(field_names, field_types, "path2"))
t_env.sql_update("insert into sink1 select * from %s where a > 100" % source)
t_env.sql_update("insert into sink2 select * from %s where a < 100" % source)
with self.assertRaises(TableException):
t_env.explain(extended=True)
def test_create_table_environment(self):
table_config = TableConfig()
table_config.set_max_generated_code_length(32000)
table_config.set_null_check(False)
table_config.set_local_timezone("Asia/Shanghai")
env = ExecutionEnvironment.get_execution_environment()
t_env = BatchTableEnvironment.create(env, table_config)
readed_table_config = t_env.get_config()
self.assertFalse(readed_table_config.get_null_check())
self.assertEqual(readed_table_config.get_max_generated_code_length(), 32000)
self.assertEqual(readed_table_config.get_local_timezone(), "Asia/Shanghai")
def test_create_table_environment_with_blink_planner(self):
t_env = BatchTableEnvironment.create(
environment_settings=EnvironmentSettings.new_instance().in_batch_mode()
.use_blink_planner().build())
planner = t_env._j_tenv.getPlanner()
self.assertEqual(
planner.getClass().getName(),
"org.apache.flink.table.planner.delegation.BatchPlanner")
def test_table_environment_with_blink_planner(self):
t_env = BatchTableEnvironment.create(
environment_settings=EnvironmentSettings.new_instance().in_batch_mode()
.use_blink_planner().build())
source_path = os.path.join(self.tempdir + '/streaming.csv')
sink_path = os.path.join(self.tempdir + '/results')
field_names = ["a", "b", "c"]
field_types = [DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING()]
data = [(1, 'hi', 'hello'), (2, 'hello', 'hello')]
csv_source = self.prepare_csv_source(source_path, data, field_types, field_names)
t_env.register_table_source("source", csv_source)
t_env.register_table_sink(
"sink",
CsvTableSink(field_names, field_types, sink_path))
source = t_env.scan("source")
result = source.alias("a, b, c").select("1 + a, b, c")
result.insert_into("sink")
t_env.execute("blink_test")
results = []
for root, dirs, files in os.walk(sink_path):
for sub_file in files:
with open(os.path.join(root, sub_file), 'r') as f:
line = f.readline()
while line is not None and line != '':
results.append(line)
line = f.readline()
self.assert_equals(results, ['2,hi,hello\n', '3,hello,hello\n'])
class BlinkBatchTableEnvironmentTests(PyFlinkBlinkBatchTableTestCase):
def test_explain_with_multi_sinks(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.STRING()]
t_env.register_table_sink(
"sink1",
CsvTableSink(field_names, field_types, "path1"))
t_env.register_table_sink(
"sink2",
CsvTableSink(field_names, field_types, "path2"))
t_env.sql_update("insert into sink1 select * from %s where a > 100" % source)
t_env.sql_update("insert into sink2 select * from %s where a < 100" % source)
actual = t_env.explain(extended=True)
self.assertIsInstance(actual, str)
| 42.299424
| 100
| 0.637308
|
129c94f17fa4fe205481d0cdc728c2124db88cb2
| 256
|
py
|
Python
|
aioTelegramLogs/utils.py
|
AniWaffl/aioTelegramLogs
|
47819de02f8e7c1012fae8e274f7d1fac06d8603
|
[
"MIT"
] | 2
|
2020-11-28T19:47:01.000Z
|
2021-12-29T21:35:19.000Z
|
aioTelegramLogs/utils.py
|
AniWaffl/aioTelegramLogs
|
47819de02f8e7c1012fae8e274f7d1fac06d8603
|
[
"MIT"
] | null | null | null |
aioTelegramLogs/utils.py
|
AniWaffl/aioTelegramLogs
|
47819de02f8e7c1012fae8e274f7d1fac06d8603
|
[
"MIT"
] | null | null | null |
# ะ ััะพ ัะพะฑั ะฑะธะฑะปะธะพัะตะบะฐ ะดะตะปะฐะปะฐ ะฝะพัะผะฐะปัะฝัะต ะทะฐะฟัะพัั ะฝะฐ ัะตัะฒะตั
def escape_html(text):
"""
Escapes all html characters in text
:param str text:
:rtype: str
"""
return text.replace('&', '&').replace('<', '<').replace('>', '>')
| 28.444444
| 79
| 0.605469
|
ffc37db1565f151250b189480f8eaa35db64d8ae
| 811
|
py
|
Python
|
server/problem_sets/static/static_problem_set.py
|
iiridescent/problem-sets
|
e906fe7509cd158ecdb5920853636339d4d531c3
|
[
"MIT"
] | null | null | null |
server/problem_sets/static/static_problem_set.py
|
iiridescent/problem-sets
|
e906fe7509cd158ecdb5920853636339d4d531c3
|
[
"MIT"
] | 5
|
2021-03-09T10:36:59.000Z
|
2022-02-26T14:36:08.000Z
|
server/problem_sets/static/static_problem_set.py
|
vinhowe/problem-sets
|
e906fe7509cd158ecdb5920853636339d4d531c3
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from problem_sets.serialization import serialize_recursive
from problem_sets.static.data.static_problem_set_entity import StaticProblemSetEntity
from problem_sets.static.static_problem import static_content_list_to_widget_list
@dataclass
class StaticProblemSet(StaticProblemSetEntity):
def serialize(self) -> dict:
data = self.__dict__.copy()
del data['instruction_contents']
del data['answer_contents']
data['instructionContents'] = serialize_recursive(
static_content_list_to_widget_list(self.instruction_contents))
data['answerContents'] = serialize_recursive(static_content_list_to_widget_list(self.answer_contents))
return data
@classmethod
def deserialize(cls, serialized: dict):
pass
| 33.791667
| 110
| 0.771887
|
b0ba7fd83fe7ba1a58f2737fbcdf7b27fed32730
| 2,676
|
py
|
Python
|
egs/librispeech/ASR/transducer_stateless/joiner.py
|
TIFOSI528/icefall
|
6f7860a0a60b53026216fa4ba19048955951333e
|
[
"Apache-2.0"
] | null | null | null |
egs/librispeech/ASR/transducer_stateless/joiner.py
|
TIFOSI528/icefall
|
6f7860a0a60b53026216fa4ba19048955951333e
|
[
"Apache-2.0"
] | null | null | null |
egs/librispeech/ASR/transducer_stateless/joiner.py
|
TIFOSI528/icefall
|
6f7860a0a60b53026216fa4ba19048955951333e
|
[
"Apache-2.0"
] | 1
|
2022-03-23T02:39:34.000Z
|
2022-03-23T02:39:34.000Z
|
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
class Joiner(nn.Module):
def __init__(self, input_dim: int, output_dim: int):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.output_linear = nn.Linear(input_dim, output_dim)
def forward(
self,
encoder_out: torch.Tensor,
decoder_out: torch.Tensor,
encoder_out_len: torch.Tensor,
decoder_out_len: torch.Tensor,
) -> torch.Tensor:
"""
Args:
encoder_out:
Output from the encoder. Its shape is (N, T, self.input_dim).
decoder_out:
Output from the decoder. Its shape is (N, U, self.input_dim).
encoder_out_len:
A 1-D tensor of shape (N,) containing valid number of frames
before padding in `encoder_out`.
decoder_out_len:
A 1-D tensor of shape (N,) containing valid number of frames
before padding in `decoder_out`.
Returns:
Return a tensor of shape (sum_all_TU, self.output_dim).
"""
assert encoder_out.ndim == decoder_out.ndim == 3
assert encoder_out.size(0) == decoder_out.size(0)
assert encoder_out.size(2) == self.input_dim
assert decoder_out.size(2) == self.input_dim
N = encoder_out.size(0)
encoder_out_len = encoder_out_len.tolist()
decoder_out_len = decoder_out_len.tolist()
encoder_out_list = [
encoder_out[i, : encoder_out_len[i], :] for i in range(N)
]
decoder_out_list = [
decoder_out[i, : decoder_out_len[i], :] for i in range(N)
]
x = [
e.unsqueeze(1) + d.unsqueeze(0)
for e, d in zip(encoder_out_list, decoder_out_list)
]
x = [p.reshape(-1, self.input_dim) for p in x]
x = torch.cat(x)
activations = torch.tanh(x)
logits = self.output_linear(activations)
return logits
| 32.634146
| 74
| 0.627055
|
66259119fa6c0a2f85a8e25f238fd108c2c5ae8b
| 422
|
py
|
Python
|
src/models/face_detection.py
|
monim67/openvino-computer-pointer-controller
|
5ea50b33ae37ee29f52252eb0db2cafd36fc6df4
|
[
"MIT"
] | null | null | null |
src/models/face_detection.py
|
monim67/openvino-computer-pointer-controller
|
5ea50b33ae37ee29f52252eb0db2cafd36fc6df4
|
[
"MIT"
] | null | null | null |
src/models/face_detection.py
|
monim67/openvino-computer-pointer-controller
|
5ea50b33ae37ee29f52252eb0db2cafd36fc6df4
|
[
"MIT"
] | null | null | null |
"""
model: face-detection-adas-binary-0001
input: BxCxHxW
input shape: (1, 3, 384, 672)
output: (image_id, label, conf, x_min, y_min, x_max, y_max)
output shape: (1, 1, N, 7)
"""
from .base_model import BaseModel
class FaceDetect(BaseModel):
model_name = "face-detection-adas-binary-0001"
precision_directory_dict = {
"FP32": "FP32-INT1",
"FP16": "FP32-INT1",
"INT8": "FP32-INT1",
}
| 22.210526
| 59
| 0.63981
|
e17bd98a135f31645ae528d7aa4882ad6711b460
| 1,659
|
py
|
Python
|
accounts/urls.py
|
SolomonMbak/3_a
|
d5d7656091e866efa2cd5dcc7bd5bc54627ac62a
|
[
"Apache-2.0"
] | 2
|
2019-08-07T05:50:25.000Z
|
2020-05-19T17:28:05.000Z
|
accounts/urls.py
|
SolomonMbak/3_a
|
d5d7656091e866efa2cd5dcc7bd5bc54627ac62a
|
[
"Apache-2.0"
] | 11
|
2020-02-12T01:19:56.000Z
|
2022-03-11T23:55:28.000Z
|
accounts/urls.py
|
SolomonMbak/3_a
|
d5d7656091e866efa2cd5dcc7bd5bc54627ac62a
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
app_name = "accounts"
urlpatterns = [
path("register/", views.register, name="register"),
path("account/", views.account, name="account"),
path("logout/", views.logout_request, name="logout"),
path("login/", views.login_request, name="login"),
# path('accounts/password-reset/',
# auth_views.PasswordResetView.as_view(), name='password_reset'),
# path('accounts/password-reset/', auth_views.PasswordResetView.as_view(
# template_name='accounts/password_reset_form.html'), name='password_reset'),
# path('accounts/password-reset/done',
# auth_views.PasswordResetDoneView.as_view(), name='password_reset_done'),
# path('accounts/password-reset-confirm/<uidb64>/<token>/',
# auth_views.PasswordResetConfirmView.as_view(), name='password_reset_confirm'),
# path('accounts/password-reset-complete/',
# auth_views.PasswordResetCompleteView.as_view(), name='password_reset_complete'),
# path("", views.index, name="index"),
# path("about/", views.about, name="about"),
# path("password-reset/", views.password_reset, name="password_reset"),
# path("change_password/", views.change_password, name="change_password"),
# path("account/login/", views.login_request, name="login"),
# path("<single_slug>", views.single_slug, name="single_slug"),
# path("privacy_policy/", views.privacy_policy, name="privacy_policy"),
# path("terms/", views.terms, name="terms"),
# path("publish_a_course/", views.publish_a_course, name="publish_a_course"),
]
| 40.463415
| 91
| 0.694394
|
2f96e2c6fd00231ad1221ec9869bf4db9125ac49
| 954
|
py
|
Python
|
doc/gauss/listings/generators/gauss.py
|
lijun99/pyre
|
004dfd4c06489b4ba5b32877338ca6440f2d523b
|
[
"BSD-3-Clause"
] | 3
|
2019-08-02T21:02:47.000Z
|
2021-09-08T13:59:43.000Z
|
doc/gauss/listings/generators/gauss.py
|
lijun99/pyre
|
004dfd4c06489b4ba5b32877338ca6440f2d523b
|
[
"BSD-3-Clause"
] | null | null | null |
doc/gauss/listings/generators/gauss.py
|
lijun99/pyre
|
004dfd4c06489b4ba5b32877338ca6440f2d523b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aรฏvรกzis
# orthologue
# (c) 1998-2019 all rights reserved
#
def gauss():
"""
The driver for the generator based implementation
"""
from Disk import Disk
from Mersenne import Mersenne
# inputs
N = 10**5
box = [(0,1), (0,1)]
# the point cloud
cloud = Mersenne()
# the region of integration
disk = Disk(center=(0,0), radius=1)
# the integration algorithm
# build the point sample
sample = cloud.points(N, box)
# count the interior points
interior = count(disk.interior(sample))
# print the estimate of ฯ
print("ฯ: {:.8f}".format(4*interior/N))
return
def count(iterable): #@\label{line:driver:generators:count}@
"""
Count the entries of iterable
"""
counter = 0
for item in iterable:
counter += 1
return counter
# main
if __name__ == "__main__":
gauss()
# end of file
| 18.705882
| 60
| 0.606918
|
59c14eddfd03c5b034cb37c57da65e7575577d50
| 3,121
|
py
|
Python
|
example.py
|
boxblox/gdxtools
|
8d440a85f54c5a290be2cbe6d96c3d05a3f2ea44
|
[
"MIT"
] | 2
|
2019-11-01T01:05:36.000Z
|
2020-02-08T01:42:41.000Z
|
example.py
|
boxblox/gdxtools
|
8d440a85f54c5a290be2cbe6d96c3d05a3f2ea44
|
[
"MIT"
] | null | null | null |
example.py
|
boxblox/gdxtools
|
8d440a85f54c5a290be2cbe6d96c3d05a3f2ea44
|
[
"MIT"
] | null | null | null |
import pandas as pd
import gdxtools as gt
if __name__ == '__main__':
# create instance of gams gdx data
gdxin = gt.gdxrw.gdxReader('trnsport_output.gdx')
# get all symbols inside a GDX
gdxin.symbols
# get symbol types from a GDX file
gdxin.symbolType
# get symbol dimensions from a GDX file
gdxin.symbolDimension
# read in single items
i = gdxin.rgdx(name='i')
j = gdxin.rgdx(name='j')
# read in multiple items
m = gdxin.rgdx(name=['i', 'j'])
# read in parameters and turn it into 'c' into a dataframe
c = gdxin.rgdx(name='c')
# create a simple index/value pandas dataframe
c_df = pd.DataFrame(data=zip(c['values'].keys(),
c['values'].values()), columns=['index', 'value'])
# might also be helpful to split out the index tuple into different columns
c_df2 = pd.DataFrame(data=c['values'].keys(), columns=c['domain'])
c_df2['value'] = c['values'].values()
# read in a variable and turn it into a dataframe
x = gdxin.rgdx(name='x')
x_df = pd.DataFrame(data=x['values'].keys(), columns=c['domain'])
x_df['LO'] = [x['values'][i]['lower'] for i in x['values'].keys()]
x_df['L'] = [x['values'][i]['level'] for i in x['values'].keys()]
x_df['UP'] = [x['values'][i]['upper'] for i in x['values'].keys()]
x_df['scale'] = [x['values'][i]['scale'] for i in x['values'].keys()]
x_df['M'] = [x['values'][i]['marginal'] for i in x['values'].keys()]
# --------------------------------------------------------------------------
# Write out another GDX with the similar structure to the original input gdx
# Does NOT support EQUATIONS or VARIABLES
# Check can be run with gdxdiff
# --------------------------------------------------------------------------
gdxout = gt.gdxrw.gdxWriter('./trnsport_output_chk.gdx')
# add sets without domain checking (universe domain)
gdxout.add_set(gamssetname='i', toset=i['elements'], desc=i['text'])
gdxout.add_set(gamssetname='j', toset=j['elements'], desc=j['text'])
# there are no subsets in this example, but if you wanted to run domain checking you would use this:
# gdxout.add_set_dc(gamssetname=, domain=, toset=, desc=)
# add parameters and do the domain checking
a = gdxin.rgdx(name='a')
gdxout.add_parameter_dc(gamsparametername='a',
domain=a['domain'], toparameter=a['values'], desc=a['text'])
b = gdxin.rgdx(name='b')
gdxout.add_parameter_dc(gamsparametername='b',
domain=b['domain'], toparameter=b['values'], desc=b['text'])
d = gdxin.rgdx(name='d')
gdxout.add_parameter_dc(gamsparametername='d',
domain=d['domain'], toparameter=d['values'], desc=d['text'])
f = gdxin.rgdx(name='f')
gdxout.add_scalar(gamsparametername='f', toparameter=f['values'], desc=f['text'])
c = gdxin.rgdx(name='c')
gdxout.add_parameter_dc(gamsparametername='c',
domain=c['domain'], toparameter=c['values'], desc=c['text'])
gdxout.export_gdx()
| 38.060976
| 104
| 0.585069
|
c6193867a749b05d40016b87bbe8334564844a24
| 37
|
py
|
Python
|
examples/list_folder.py
|
mohan3d/PyOpenload
|
83222bd0c55b474c1bb3c27732a79d95455c5d28
|
[
"MIT"
] | 35
|
2016-09-13T21:29:00.000Z
|
2019-10-25T07:55:15.000Z
|
examples/list_folder.py
|
mohan3d/PyOpenload
|
83222bd0c55b474c1bb3c27732a79d95455c5d28
|
[
"MIT"
] | 15
|
2017-05-14T20:20:59.000Z
|
2019-09-22T11:10:44.000Z
|
examples/list_folder.py
|
mohan3d/PyOpenload
|
83222bd0c55b474c1bb3c27732a79d95455c5d28
|
[
"MIT"
] | 12
|
2017-01-28T17:45:54.000Z
|
2019-07-20T07:45:27.000Z
|
resp = ol.list_folder()
print(resp)
| 9.25
| 23
| 0.702703
|
02dc1cfdd69bd6f884580c7338cbb3a281976a68
| 43,762
|
py
|
Python
|
src/sage/algebras/free_algebra.py
|
fredstro/sage
|
c936d2cda81ec7ec3552a3bdb29c994b40d1bb24
|
[
"BSL-1.0"
] | null | null | null |
src/sage/algebras/free_algebra.py
|
fredstro/sage
|
c936d2cda81ec7ec3552a3bdb29c994b40d1bb24
|
[
"BSL-1.0"
] | null | null | null |
src/sage/algebras/free_algebra.py
|
fredstro/sage
|
c936d2cda81ec7ec3552a3bdb29c994b40d1bb24
|
[
"BSL-1.0"
] | null | null | null |
"""
Free algebras
AUTHORS:
- David Kohel (2005-09)
- William Stein (2006-11-01): add all doctests; implemented many
things.
- Simon King (2011-04): Put free algebras into the category framework.
Reimplement free algebra constructor, using a
:class:`~sage.structure.factory.UniqueFactory` for handling
different implementations of free algebras. Allow degree weights
for free algebras in letterplace implementation.
EXAMPLES::
sage: F = FreeAlgebra(ZZ,3,'x,y,z')
sage: F.base_ring()
Integer Ring
sage: G = FreeAlgebra(F, 2, 'm,n'); G
Free Algebra on 2 generators (m, n) over Free Algebra on 3 generators (x, y, z) over Integer Ring
sage: G.base_ring()
Free Algebra on 3 generators (x, y, z) over Integer Ring
The above free algebra is based on a generic implementation. By
:trac:`7797`, there is a different implementation
:class:`~sage.algebras.letterplace.free_algebra_letterplace.FreeAlgebra_letterplace`
based on Singular's letterplace rings. It is currently restricted to
weighted homogeneous elements and is therefore not the default. But the
arithmetic is much faster than in the generic implementation.
Moreover, we can compute Groebner bases with degree bound for its
two-sided ideals, and thus provide ideal containment tests::
sage: F.<x,y,z> = FreeAlgebra(QQ, implementation='letterplace')
sage: F
Free Associative Unital Algebra on 3 generators (x, y, z) over Rational Field
sage: I = F*[x*y+y*z,x^2+x*y-y*x-y^2]*F
sage: I.groebner_basis(degbound=4)
Twosided Ideal (y*z*y*y - y*z*y*z + y*z*z*y - y*z*z*z, y*z*y*x + y*z*y*z + y*z*z*x + y*z*z*z, y*y*z*y - y*y*z*z + y*z*z*y - y*z*z*z, y*y*z*x + y*y*z*z + y*z*z*x + y*z*z*z, y*y*y - y*y*z + y*z*y - y*z*z, y*y*x + y*y*z + y*z*x + y*z*z, x*y + y*z, x*x - y*x - y*y - y*z) of Free Associative Unital Algebra on 3 generators (x, y, z) over Rational Field
sage: y*z*y*y*z*z + 2*y*z*y*z*z*x + y*z*y*z*z*z - y*z*z*y*z*x + y*z*z*z*z*x in I
True
Positive integral degree weights for the letterplace implementation
was introduced in :trac:`7797`::
sage: F.<x,y,z> = FreeAlgebra(QQ, implementation='letterplace', degrees=[2,1,3])
sage: x.degree()
2
sage: y.degree()
1
sage: z.degree()
3
sage: I = F*[x*y-y*x, x^2+2*y*z, (x*y)^2-z^2]*F
sage: Q.<a,b,c> = F.quo(I)
sage: TestSuite(Q).run()
sage: a^2*b^2
c*c
TESTS::
sage: F = FreeAlgebra(GF(5),3,'x')
sage: TestSuite(F).run()
sage: F is loads(dumps(F))
True
sage: F = FreeAlgebra(GF(5),3,'x', implementation='letterplace')
sage: TestSuite(F).run()
sage: F is loads(dumps(F))
True
::
sage: F.<x,y,z> = FreeAlgebra(GF(5),3)
sage: TestSuite(F).run()
sage: F is loads(dumps(F))
True
sage: F.<x,y,z> = FreeAlgebra(GF(5),3, implementation='letterplace')
sage: TestSuite(F).run()
sage: F is loads(dumps(F))
True
::
sage: F = FreeAlgebra(GF(5),3, ['xx', 'zba', 'Y'])
sage: TestSuite(F).run()
sage: F is loads(dumps(F))
True
sage: F = FreeAlgebra(GF(5),3, ['xx', 'zba', 'Y'], implementation='letterplace')
sage: TestSuite(F).run()
sage: F is loads(dumps(F))
True
::
sage: F = FreeAlgebra(GF(5),3, 'abc')
sage: TestSuite(F).run()
sage: F is loads(dumps(F))
True
sage: F = FreeAlgebra(GF(5),3, 'abc', implementation='letterplace')
sage: TestSuite(F).run()
sage: F is loads(dumps(F))
True
::
sage: F = FreeAlgebra(FreeAlgebra(ZZ,2,'ab'), 2, 'x')
sage: TestSuite(F).run()
sage: F is loads(dumps(F))
True
Note that the letterplace implementation can only be used if the corresponding
(multivariate) polynomial ring has an implementation in Singular::
sage: FreeAlgebra(FreeAlgebra(ZZ,2,'ab'), 2, 'x', implementation='letterplace')
Traceback (most recent call last):
...
NotImplementedError: The letterplace implementation is not available for the free algebra you requested
"""
#*****************************************************************************
# Copyright (C) 2005 David Kohel <kohel@maths.usyd.edu>
# Copyright (C) 2005,2006 William Stein <wstein@gmail.com>
# Copyright (C) 2011 Simon King <simon.king@uni-jena.de>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
import six
from sage.categories.rings import Rings
from sage.monoids.free_monoid import FreeMonoid
from sage.monoids.free_monoid_element import FreeMonoidElement
from sage.algebras.free_algebra_element import FreeAlgebraElement
import sage.structure.parent_gens
from sage.structure.factory import UniqueFactory
from sage.misc.cachefunc import cached_method
from sage.all import PolynomialRing
from sage.rings.ring import Algebra
from sage.rings.polynomial.multi_polynomial_libsingular import MPolynomialRing_libsingular
from sage.categories.algebras_with_basis import AlgebrasWithBasis
from sage.combinat.free_module import CombinatorialFreeModule, CombinatorialFreeModuleElement
from sage.combinat.words.word import Word
from sage.structure.category_object import normalize_names
class FreeAlgebraFactory(UniqueFactory):
"""
A constructor of free algebras.
See :mod:`~sage.algebras.free_algebra` for examples and corner cases.
EXAMPLES::
sage: FreeAlgebra(GF(5),3,'x')
Free Algebra on 3 generators (x0, x1, x2) over Finite Field of size 5
sage: F.<x,y,z> = FreeAlgebra(GF(5),3)
sage: (x+y+z)^2
x^2 + x*y + x*z + y*x + y^2 + y*z + z*x + z*y + z^2
sage: FreeAlgebra(GF(5),3, 'xx, zba, Y')
Free Algebra on 3 generators (xx, zba, Y) over Finite Field of size 5
sage: FreeAlgebra(GF(5),3, 'abc')
Free Algebra on 3 generators (a, b, c) over Finite Field of size 5
sage: FreeAlgebra(GF(5),1, 'z')
Free Algebra on 1 generators (z,) over Finite Field of size 5
sage: FreeAlgebra(GF(5),1, ['alpha'])
Free Algebra on 1 generators (alpha,) over Finite Field of size 5
sage: FreeAlgebra(FreeAlgebra(ZZ,1,'a'), 2, 'x')
Free Algebra on 2 generators (x0, x1) over Free Algebra on 1 generators (a,) over Integer Ring
Free algebras are globally unique::
sage: F = FreeAlgebra(ZZ,3,'x,y,z')
sage: G = FreeAlgebra(ZZ,3,'x,y,z')
sage: F is G
True
sage: F.<x,y,z> = FreeAlgebra(GF(5),3) # indirect doctest
sage: F is loads(dumps(F))
True
sage: F is FreeAlgebra(GF(5),['x','y','z'])
True
sage: copy(F) is F is loads(dumps(F))
True
sage: TestSuite(F).run()
By :trac:`7797`, we provide a different implementation of free
algebras, based on Singular's "letterplace rings". Our letterplace
wrapper allows for chosing positive integral degree weights for the
generators of the free algebra. However, only (weighted) homogenous
elements are supported. Of course, isomorphic algebras in different
implementations are not identical::
sage: G = FreeAlgebra(GF(5),['x','y','z'], implementation='letterplace')
sage: F == G
False
sage: G is FreeAlgebra(GF(5),['x','y','z'], implementation='letterplace')
True
sage: copy(G) is G is loads(dumps(G))
True
sage: TestSuite(G).run()
::
sage: H = FreeAlgebra(GF(5),['x','y','z'], implementation='letterplace', degrees=[1,2,3])
sage: F != H != G
True
sage: H is FreeAlgebra(GF(5),['x','y','z'], implementation='letterplace', degrees=[1,2,3])
True
sage: copy(H) is H is loads(dumps(H))
True
sage: TestSuite(H).run()
Free algebras commute with their base ring.
::
sage: K.<a,b> = FreeAlgebra(QQ,2)
sage: K.is_commutative()
False
sage: L.<c> = FreeAlgebra(K,1)
sage: L.is_commutative()
False
sage: s = a*b^2 * c^3; s
a*b^2*c^3
sage: parent(s)
Free Algebra on 1 generators (c,) over Free Algebra on 2 generators (a, b) over Rational Field
sage: c^3 * a * b^2
a*b^2*c^3
"""
def create_key(self,base_ring, arg1=None, arg2=None,
sparse=False, order='degrevlex',
names=None, name=None,
implementation=None, degrees=None):
"""
Create the key under which a free algebra is stored.
TESTS::
sage: FreeAlgebra.create_key(GF(5),['x','y','z'])
(Finite Field of size 5, ('x', 'y', 'z'))
sage: FreeAlgebra.create_key(GF(5),['x','y','z'],3)
(Finite Field of size 5, ('x', 'y', 'z'))
sage: FreeAlgebra.create_key(GF(5),3,'xyz')
(Finite Field of size 5, ('x', 'y', 'z'))
sage: FreeAlgebra.create_key(GF(5),['x','y','z'], implementation='letterplace')
(Multivariate Polynomial Ring in x, y, z over Finite Field of size 5,)
sage: FreeAlgebra.create_key(GF(5),['x','y','z'],3, implementation='letterplace')
(Multivariate Polynomial Ring in x, y, z over Finite Field of size 5,)
sage: FreeAlgebra.create_key(GF(5),3,'xyz', implementation='letterplace')
(Multivariate Polynomial Ring in x, y, z over Finite Field of size 5,)
sage: FreeAlgebra.create_key(GF(5),3,'xyz', implementation='letterplace', degrees=[1,2,3])
((1, 2, 3), Multivariate Polynomial Ring in x, y, z, x_ over Finite Field of size 5)
"""
if arg1 is None and arg2 is None and names is None:
# this is used for pickling
if degrees is None:
return (base_ring,)
return tuple(degrees),base_ring
PolRing = None
# test if we can use libSingular/letterplace
if implementation is not None and implementation != 'generic':
try:
PolRing = PolynomialRing(base_ring, arg1, arg2,
sparse=sparse, order=order,
names=names, name=name,
implementation=implementation if implementation != 'letterplace' else None)
if not isinstance(PolRing, MPolynomialRing_libsingular):
if PolRing.ngens() == 1:
PolRing = PolynomialRing(base_ring, 1, PolRing.variable_names())
if not isinstance(PolRing, MPolynomialRing_libsingular):
raise TypeError
else:
raise TypeError
except (TypeError, NotImplementedError) as msg:
raise NotImplementedError("The letterplace implementation is not available for the free algebra you requested")
if PolRing is not None:
if degrees is None:
return (PolRing,)
from sage.all import TermOrder
T = PolRing.term_order() + TermOrder('lex',1)
varnames = list(PolRing.variable_names())
newname = 'x'
while newname in varnames:
newname += '_'
varnames.append(newname)
return tuple(degrees),PolynomialRing(PolRing.base(), varnames,
sparse=sparse, order=T,
implementation=implementation if implementation != 'letterplace' else None)
# normalise the generator names
from sage.all import Integer
if isinstance(arg1, (int, long, Integer)):
arg1, arg2 = arg2, arg1
if not names is None:
arg1 = names
elif not name is None:
arg1 = name
if arg2 is None:
arg2 = len(arg1)
names = normalize_names(arg2, arg1)
return base_ring, names
def create_object(self, version, key):
"""
Construct the free algebra that belongs to a unique key.
NOTE:
Of course, that method should not be called directly,
since it does not use the cache of free algebras.
TESTS::
sage: FreeAlgebra.create_object('4.7.1', (QQ['x','y'],))
Free Associative Unital Algebra on 2 generators (x, y) over Rational Field
sage: FreeAlgebra.create_object('4.7.1', (QQ['x','y'],)) is FreeAlgebra(QQ,['x','y'])
False
"""
if len(key) == 1:
from sage.algebras.letterplace.free_algebra_letterplace import FreeAlgebra_letterplace
return FreeAlgebra_letterplace(key[0])
if isinstance(key[0], tuple):
from sage.algebras.letterplace.free_algebra_letterplace import FreeAlgebra_letterplace
return FreeAlgebra_letterplace(key[1], degrees=key[0])
return FreeAlgebra_generic(key[0], len(key[1]), key[1])
FreeAlgebra = FreeAlgebraFactory('FreeAlgebra')
def is_FreeAlgebra(x):
"""
Return True if x is a free algebra; otherwise, return False.
EXAMPLES::
sage: from sage.algebras.free_algebra import is_FreeAlgebra
sage: is_FreeAlgebra(5)
False
sage: is_FreeAlgebra(ZZ)
False
sage: is_FreeAlgebra(FreeAlgebra(ZZ,100,'x'))
True
sage: is_FreeAlgebra(FreeAlgebra(ZZ,10,'x',implementation='letterplace'))
True
sage: is_FreeAlgebra(FreeAlgebra(ZZ,10,'x',implementation='letterplace', degrees=range(1,11)))
True
"""
from sage.algebras.letterplace.free_algebra_letterplace import FreeAlgebra_letterplace
return isinstance(x, (FreeAlgebra_generic,FreeAlgebra_letterplace))
class FreeAlgebra_generic(CombinatorialFreeModule, Algebra):
"""
The free algebra on `n` generators over a base ring.
INPUT:
- ``R`` -- a ring
- ``n`` -- an integer
- ``names`` -- the generator names
EXAMPLES::
sage: F.<x,y,z> = FreeAlgebra(QQ, 3); F
Free Algebra on 3 generators (x, y, z) over Rational Field
sage: mul(F.gens())
x*y*z
sage: mul([ F.gen(i%3) for i in range(12) ])
x*y*z*x*y*z*x*y*z*x*y*z
sage: mul([ F.gen(i%3) for i in range(12) ]) + mul([ F.gen(i%2) for i in range(12) ])
x*y*x*y*x*y*x*y*x*y*x*y + x*y*z*x*y*z*x*y*z*x*y*z
sage: (2 + x*z + x^2)^2 + (x - y)^2
4 + 5*x^2 - x*y + 4*x*z - y*x + y^2 + x^4 + x^3*z + x*z*x^2 + x*z*x*z
TESTS:
Free algebras commute with their base ring.
::
sage: K.<a,b> = FreeAlgebra(QQ)
sage: K.is_commutative()
False
sage: L.<c,d> = FreeAlgebra(K)
sage: L.is_commutative()
False
sage: s = a*b^2 * c^3; s
a*b^2*c^3
sage: parent(s)
Free Algebra on 2 generators (c, d) over Free Algebra on 2 generators (a, b) over Rational Field
sage: c^3 * a * b^2
a*b^2*c^3
"""
Element = FreeAlgebraElement
def __init__(self, R, n, names):
"""
The free algebra on `n` generators over a base ring.
EXAMPLES::
sage: F.<x,y,z> = FreeAlgebra(QQ, 3); F # indirect doctet
Free Algebra on 3 generators (x, y, z) over Rational Field
TEST:
Note that the following is *not* the recommended way to create
a free algebra::
sage: from sage.algebras.free_algebra import FreeAlgebra_generic
sage: FreeAlgebra_generic(ZZ, 3, 'abc')
Free Algebra on 3 generators (a, b, c) over Integer Ring
"""
if R not in Rings():
raise TypeError("Argument R must be a ring.")
self.__ngens = n
indices = FreeMonoid(n, names=names)
cat = AlgebrasWithBasis(R)
CombinatorialFreeModule.__init__(self, R, indices, prefix='F',
category=cat)
self._assign_names(indices.variable_names())
def one_basis(self):
"""
Return the index of the basis element `1`.
EXAMPLES::
sage: F = FreeAlgebra(QQ, 2, 'x,y')
sage: F.one_basis()
1
sage: F.one_basis().parent()
Free monoid on 2 generators (x, y)
"""
return self._indices.one()
def is_field(self, proof=True):
"""
Return True if this Free Algebra is a field, which is only if the
base ring is a field and there are no generators
EXAMPLES::
sage: A = FreeAlgebra(QQ,0,'')
sage: A.is_field()
True
sage: A = FreeAlgebra(QQ,1,'x')
sage: A.is_field()
False
"""
if self.__ngens == 0:
return self.base_ring().is_field(proof)
return False
def is_commutative(self):
"""
Return True if this free algebra is commutative.
EXAMPLES::
sage: R.<x> = FreeAlgebra(QQ,1)
sage: R.is_commutative()
True
sage: R.<x,y> = FreeAlgebra(QQ,2)
sage: R.is_commutative()
False
"""
return self.__ngens <= 1 and self.base_ring().is_commutative()
def __cmp__(self, other):
"""
Two free algebras are considered the same if they have the same
base ring, number of generators and variable names, and the same
implementation.
EXAMPLES::
sage: F = FreeAlgebra(QQ,3,'x')
sage: F == FreeAlgebra(QQ,3,'x')
True
sage: F is FreeAlgebra(QQ,3,'x')
True
sage: F == FreeAlgebra(ZZ,3,'x')
False
sage: F == FreeAlgebra(QQ,4,'x')
False
sage: F == FreeAlgebra(QQ,3,'y')
False
Note that since :trac:`7797` there is a different
implementation of free algebras. Two corresponding free
algebras in different implementations are not equal, but there
is a coercion::
"""
if not isinstance(other, FreeAlgebra_generic):
return -1
c = cmp(self.base_ring(), other.base_ring())
if c: return c
c = cmp(self.__ngens, other.ngens())
if c: return c
c = cmp(self.variable_names(), other.variable_names())
if c: return c
return 0
def _repr_(self):
"""
Text representation of this free algebra.
EXAMPLES::
sage: F = FreeAlgebra(QQ,3,'x')
sage: F # indirect doctest
Free Algebra on 3 generators (x0, x1, x2) over Rational Field
sage: F.rename('QQ<<x0,x1,x2>>')
sage: F #indirect doctest
QQ<<x0,x1,x2>>
sage: FreeAlgebra(ZZ,1,['a'])
Free Algebra on 1 generators (a,) over Integer Ring
"""
return "Free Algebra on {} generators {} over {}".format(
self.__ngens, self.gens(), self.base_ring())
def _element_constructor_(self, x):
"""
Convert ``x`` into ``self``.
EXAMPLES::
sage: R.<x,y> = FreeAlgebra(QQ,2)
sage: R(3) # indirect doctest
3
TESTS::
sage: F.<x,y,z> = FreeAlgebra(GF(5),3)
sage: L.<x,y,z> = FreeAlgebra(ZZ,3,implementation='letterplace')
sage: F(x) # indirect doctest
x
sage: F.1*L.2
y*z
sage: (F.1*L.2).parent() is F
True
::
sage: K.<z> = GF(25)
sage: F.<a,b,c> = FreeAlgebra(K,3)
sage: L.<a,b,c> = FreeAlgebra(K,3, implementation='letterplace')
sage: F.1+(z+1)*L.2
b + (z+1)*c
Check that :trac:`15169` is fixed::
sage: A.<x> = FreeAlgebra(CC)
sage: A(2)
2.00000000000000
We check that the string coercions work correctly over
inexact fields::
sage: F.<x,y> = FreeAlgebra(CC)
sage: F('2')
2.00000000000000
sage: F('x')
1.00000000000000*x
Check that it also converts factorizations::
sage: f = Factorization([(x,2),(y,3)]); f
1.00000000000000*x^2 * 1.00000000000000*y^3
sage: F(f)
1.00000000000000*x^2*y^3
"""
if isinstance(x, FreeAlgebraElement):
P = x.parent()
if P is self:
return x
if P is not self.base_ring():
return self.element_class(self, x)
elif hasattr(x,'letterplace_polynomial'):
P = x.parent()
if self.has_coerce_map_from(P): # letterplace versus generic
ngens = P.ngens()
M = self._indices
def exp_to_monomial(T):
out = []
for i in xrange(len(T)):
if T[i]:
out.append((i%ngens,T[i]))
return M(out)
return self.element_class(self, dict([(exp_to_monomial(T),c) for T,c in x.letterplace_polynomial().dict().iteritems()]))
# ok, not a free algebra element (or should not be viewed as one).
if isinstance(x, six.string_types):
from sage.all import sage_eval
G = self.gens()
d = {str(v): G[i] for i,v in enumerate(self.variable_names())}
return self(sage_eval(x, locals=d))
R = self.base_ring()
# coercion from free monoid
if isinstance(x, FreeMonoidElement) and x.parent() is self._indices:
return self.element_class(self, {x: R.one()})
# coercion from the PBW basis
if isinstance(x, PBWBasisOfFreeAlgebra.Element) \
and self.has_coerce_map_from(x.parent()._alg):
return self(x.parent().expansion(x))
# Check if it's a factorization
from sage.structure.factorization import Factorization
if isinstance(x, Factorization):
return self.prod(f**i for f,i in x)
# coercion via base ring
x = R(x)
if x == 0:
return self.element_class(self, {})
return self.element_class(self, {self.one_basis(): x})
def _coerce_map_from_(self, R):
"""
Return ``True`` if there is a coercion from ``R`` into ``self`` and
``False`` otherwise. The things that coerce into ``self`` are:
- This free algebra.
- Anything with a coercion into ``self.monoid()``.
- Free algebras in the same variables over a base with a coercion
map into ``self.base_ring()``.
- The underlying monoid.
- The PBW basis of ``self``.
- Anything with a coercion into ``self.base_ring()``.
TESTS::
sage: F = FreeAlgebra(ZZ, 3, 'x,y,z')
sage: G = FreeAlgebra(QQ, 3, 'x,y,z')
sage: H = FreeAlgebra(ZZ, 1, 'y')
sage: F._coerce_map_from_(G)
False
sage: G._coerce_map_from_(F)
True
sage: F._coerce_map_from_(H)
False
sage: F._coerce_map_from_(QQ)
False
sage: G._coerce_map_from_(QQ)
True
sage: F._coerce_map_from_(G.monoid())
True
sage: F._coerce_map_from_(F.pbw_basis())
True
sage: F.has_coerce_map_from(PolynomialRing(ZZ, 3, 'x,y,z'))
False
sage: K.<z> = GF(25)
sage: F.<a,b,c> = FreeAlgebra(K,3)
sage: F._coerce_map_from_(ZZ)
True
sage: F._coerce_map_from_(QQ)
False
sage: F._coerce_map_from_(F.monoid())
True
sage: F._coerce_map_from_(F.pbw_basis())
True
sage: G = FreeAlgebra(ZZ, 3, 'a,b,c')
sage: F._coerce_map_from_(G)
True
sage: G._coerce_map_from_(F)
False
sage: L.<a,b,c> = FreeAlgebra(K,3, implementation='letterplace')
sage: F.1 + (z+1) * L.2
b + (z+1)*c
"""
if self._indices.has_coerce_map_from(R):
return True
# free algebras in the same variable over any base that coerces in:
if is_FreeAlgebra(R):
if R.variable_names() == self.variable_names():
return self.base_ring().has_coerce_map_from(R.base_ring())
if isinstance(R, PBWBasisOfFreeAlgebra):
return self.has_coerce_map_from(R._alg)
return self.base_ring().has_coerce_map_from(R)
def gen(self, i):
"""
The ``i``-th generator of the algebra.
EXAMPLES::
sage: F = FreeAlgebra(ZZ,3,'x,y,z')
sage: F.gen(0)
x
"""
if i < 0 or not i < self.__ngens:
raise IndexError("Argument i (= {}) must be between 0 and {}.".format(i, self.__ngens-1))
R = self.base_ring()
F = self._indices
return self.element_class(self, {F.gen(i): R.one()})
@cached_method
def algebra_generators(self):
"""
Return the algebra generators of ``self``.
EXAMPLES::
sage: F = FreeAlgebra(ZZ,3,'x,y,z')
sage: F.algebra_generators()
Finite family {'y': y, 'x': x, 'z': z}
"""
ret = {}
for i in range(self.__ngens):
x = self.gen(i)
ret[str(x)] = x
from sage.sets.family import Family
return Family(ret)
@cached_method
def gens(self):
"""
Return the generators of ``self``.
EXAMPLES::
sage: F = FreeAlgebra(ZZ,3,'x,y,z')
sage: F.gens()
(x, y, z)
"""
return tuple(self.gen(i) for i in range(self.__ngens))
def product_on_basis(self, x, y):
"""
Return the product of the basis elements indexed by ``x`` and ``y``.
EXAMPLES::
sage: F = FreeAlgebra(ZZ,3,'x,y,z')
sage: I = F.basis().keys()
sage: x,y,z = I.gens()
sage: F.product_on_basis(x*y, z*y)
x*y*z*y
"""
return self.monomial(x * y)
def quotient(self, mons, mats=None, names=None):
"""
Return a quotient algebra.
The quotient algebra is defined via the action of a free algebra
`A` on a (finitely generated) free module. The input for the quotient
algebra is a list of monomials (in the underlying monoid for `A`)
which form a free basis for the module of `A`, and a list of
matrices, which give the action of the free generators of `A` on this
monomial basis.
EXAMPLES:
Here is the quaternion algebra defined in terms of three generators::
sage: n = 3
sage: A = FreeAlgebra(QQ,n,'i')
sage: F = A.monoid()
sage: i, j, k = F.gens()
sage: mons = [ F(1), i, j, k ]
sage: M = MatrixSpace(QQ,4)
sage: mats = [M([0,1,0,0, -1,0,0,0, 0,0,0,-1, 0,0,1,0]), M([0,0,1,0, 0,0,0,1, -1,0,0,0, 0,-1,0,0]), M([0,0,0,1, 0,0,-1,0, 0,1,0,0, -1,0,0,0]) ]
sage: H.<i,j,k> = A.quotient(mons, mats); H
Free algebra quotient on 3 generators ('i', 'j', 'k') and dimension 4 over Rational Field
"""
if mats is None:
return super(FreeAlgebra_generic, self).quotient(mons, names)
import free_algebra_quotient
return free_algebra_quotient.FreeAlgebraQuotient(self, mons, mats, names)
quo = quotient
def ngens(self):
"""
The number of generators of the algebra.
EXAMPLES::
sage: F = FreeAlgebra(ZZ,3,'x,y,z')
sage: F.ngens()
3
"""
return self.__ngens
def monoid(self):
"""
The free monoid of generators of the algebra.
EXAMPLES::
sage: F = FreeAlgebra(ZZ,3,'x,y,z')
sage: F.monoid()
Free monoid on 3 generators (x, y, z)
"""
return self._indices
def g_algebra(self, relations, names=None, order='degrevlex', check=True):
"""
The `G`-Algebra derived from this algebra by relations.
By default is assumed, that two variables commute.
.. TODO::
- Coercion doesn't work yet, there is some cheating about assumptions
- The optional argument ``check`` controls checking the degeneracy
conditions. Furthermore, the default values interfere with
non-degeneracy conditions.
EXAMPLES::
sage: A.<x,y,z> = FreeAlgebra(QQ,3)
sage: G = A.g_algebra({y*x: -x*y})
sage: (x,y,z) = G.gens()
sage: x*y
x*y
sage: y*x
-x*y
sage: z*x
x*z
sage: (x,y,z) = A.gens()
sage: G = A.g_algebra({y*x: -x*y+1})
sage: (x,y,z) = G.gens()
sage: y*x
-x*y + 1
sage: (x,y,z) = A.gens()
sage: G = A.g_algebra({y*x: -x*y+z})
sage: (x,y,z) = G.gens()
sage: y*x
-x*y + z
"""
from sage.matrix.constructor import Matrix
base_ring = self.base_ring()
n = self.__ngens
cmat = Matrix(base_ring, n)
dmat = Matrix(self, n)
for i in xrange(n):
for j in xrange(i+1,n):
cmat[i,j] = 1
for (to_commute,commuted) in relations.iteritems():
#This is dirty, coercion is broken
assert isinstance(to_commute, FreeAlgebraElement), to_commute.__class__
assert isinstance(commuted, FreeAlgebraElement), commuted
((v1,e1),(v2,e2)) = list(list(to_commute)[0][0])
assert e1 == 1
assert e2 == 1
assert v1 > v2
c_coef = None
d_poly = None
for (m,c) in commuted:
if list(m) == [(v2,1),(v1,1)]:
c_coef = c
#buggy coercion workaround
d_poly = commuted - self(c) * self(m)
break
assert not c_coef is None,list(m)
v2_ind = self.gens().index(v2)
v1_ind = self.gens().index(v1)
cmat[v2_ind,v1_ind] = c_coef
if d_poly:
dmat[v2_ind,v1_ind] = d_poly
from sage.rings.polynomial.plural import g_Algebra
return g_Algebra(base_ring, cmat, dmat, names = names or self.variable_names(),
order=order, check=check)
def poincare_birkhoff_witt_basis(self):
"""
Return the Poincare-Birkhoff-Witt (PBW) basis of ``self``.
EXAMPLES::
sage: F.<x,y> = FreeAlgebra(QQ, 2)
sage: F.poincare_birkhoff_witt_basis()
The Poincare-Birkhoff-Witt basis of Free Algebra on 2 generators (x, y) over Rational Field
"""
return PBWBasisOfFreeAlgebra(self)
pbw_basis = poincare_birkhoff_witt_basis
def pbw_element(self, elt):
"""
Return the element ``elt`` in the Poincare-Birkhoff-Witt basis.
EXAMPLES::
sage: F.<x,y> = FreeAlgebra(QQ, 2)
sage: F.pbw_element(x*y - y*x + 2)
2*PBW[1] + PBW[x*y]
sage: F.pbw_element(F.one())
PBW[1]
sage: F.pbw_element(x*y*x + x^3*y)
PBW[x*y]*PBW[x] + PBW[y]*PBW[x]^2 + PBW[x^3*y] + PBW[x^2*y]*PBW[x]
+ PBW[x*y]*PBW[x]^2 + PBW[y]*PBW[x]^3
"""
PBW = self.pbw_basis()
if elt == self.zero():
return PBW.zero()
l = {}
while elt: # != 0
lst = list(elt)
support = [i[0].to_word() for i in lst]
min_elt = support[0]
for word in support[1:len(support)-1]:
if min_elt.lex_less(word):
min_elt = word
coeff = lst[support.index(min_elt)][1]
min_elt = min_elt.to_monoid_element()
l[min_elt] = l.get(min_elt, 0) + coeff
elt = elt - coeff * self.lie_polynomial(min_elt)
return PBW.sum_of_terms([(k, v) for k,v in l.items() if v != 0], distinct=True)
def lie_polynomial(self, w):
"""
Return the Lie polynomial associated to the Lyndon word ``w``. If
``w`` is not Lyndon, then return the product of Lie polynomials of the
Lyndon factorization of ``w``.
INPUT:
- ``w`` -- a word or an element of the free monoid
EXAMPLES::
sage: F = FreeAlgebra(QQ, 3, 'x,y,z')
sage: M.<x,y,z> = FreeMonoid(3)
sage: F.lie_polynomial(x*y)
x*y - y*x
sage: F.lie_polynomial(y*x)
y*x
sage: F.lie_polynomial(x^2*y*x)
x^2*y*x - x*y*x^2
sage: F.lie_polynomial(y*z*x*z*x*z)
y*z*x*z*x*z - y*z*x*z^2*x - y*z^2*x^2*z + y*z^2*x*z*x
- z*y*x*z*x*z + z*y*x*z^2*x + z*y*z*x^2*z - z*y*z*x*z*x
TESTS:
We test some corner cases and alternative inputs::
sage: F.lie_polynomial(Word('xy'))
x*y - y*x
sage: F.lie_polynomial('xy')
x*y - y*x
sage: F.lie_polynomial(M.one())
1
sage: F.lie_polynomial(Word([]))
1
sage: F.lie_polynomial('')
1
"""
if not w:
return self.one()
M = self._indices
if len(w) == 1:
return self(M(w))
ret = self.one()
# We have to be careful about order here.
# Since the Lyndon factors appear from left to right
# we must multiply from left to right as well.
for factor in Word(w).lyndon_factorization():
if len(factor) == 1:
ret = ret * self(M(factor))
continue
x,y = factor.standard_factorization()
x = M(x)
y = M(y)
ret = ret * (self(x * y) - self(y * x))
return ret
class PBWBasisOfFreeAlgebra(CombinatorialFreeModule):
"""
The Poincare-Birkhoff-Witt basis of the free algebra.
EXAMPLES::
sage: F.<x,y> = FreeAlgebra(QQ, 2)
sage: PBW = F.pbw_basis()
sage: px, py = PBW.gens()
sage: px * py
PBW[x*y] + PBW[y]*PBW[x]
sage: py * px
PBW[y]*PBW[x]
sage: px * py^3 * px - 2*px * py
-2*PBW[x*y] - 2*PBW[y]*PBW[x] + PBW[x*y^3]*PBW[x] + PBW[y]*PBW[x*y^2]*PBW[x]
+ PBW[y]^2*PBW[x*y]*PBW[x] + PBW[y]^3*PBW[x]^2
We can convert between the two bases::
sage: p = PBW(x*y - y*x + 2); p
2*PBW[1] + PBW[x*y]
sage: F(p)
2 + x*y - y*x
sage: f = F.pbw_element(x*y*x + x^3*y + x + 3)
sage: F(PBW(f)) == f
True
sage: p = px*py + py^4*px^2
sage: F(p)
x*y + y^4*x^2
sage: PBW(F(p)) == p
True
Note that multiplication in the PBW basis agrees with multiplication
as monomials::
sage: F(px * py^3 * px - 2*px * py) == x*y^3*x - 2*x*y
True
TESTS:
Check that going between the two bases is the identity::
sage: F = FreeAlgebra(QQ, 2, 'x,y')
sage: PBW = F.pbw_basis()
sage: M = F.monoid()
sage: L = [j.to_monoid_element() for i in range(6) for j in Words('xy', i)]
sage: all(PBW(F(PBW(m))) == PBW(m) for m in L)
True
sage: all(F(PBW(F(m))) == F(m) for m in L)
True
"""
@staticmethod
def __classcall_private__(cls, R, n=None, names=None):
"""
Normalize input to ensure a unique representation.
EXAMPLES::
sage: from sage.algebras.free_algebra import PBWBasisOfFreeAlgebra
sage: PBW1 = FreeAlgebra(QQ, 2, 'x,y').pbw_basis()
sage: PBW2.<x,y> = PBWBasisOfFreeAlgebra(QQ)
sage: PBW3 = PBWBasisOfFreeAlgebra(QQ, 2, ['x','y'])
sage: PBW1 is PBW2 and PBW2 is PBW3
True
"""
if n is None and names is None:
if not isinstance(R, FreeAlgebra_generic):
raise ValueError("{} is not a free algebra".format(R))
alg = R
else:
if n is None:
n = len(names)
alg = FreeAlgebra(R, n, names)
return super(PBWBasisOfFreeAlgebra, cls).__classcall__(cls, alg)
def __init__(self, alg):
"""
Initialize ``self``.
EXAMPLES::
sage: PBW = FreeAlgebra(QQ, 2, 'x,y').pbw_basis()
sage: TestSuite(PBW).run()
"""
R = alg.base_ring()
self._alg = alg
category = AlgebrasWithBasis(R)
CombinatorialFreeModule.__init__(self, R, alg.monoid(), prefix='PBW',
category=category)
self._assign_names(alg.variable_names())
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: FreeAlgebra(QQ, 2, 'x,y').pbw_basis()
The Poincare-Birkhoff-Witt basis of Free Algebra on 2 generators (x, y) over Rational Field
"""
return "The Poincare-Birkhoff-Witt basis of {}".format(self._alg)
def _repr_term(self, w):
"""
Return a representation of term indexed by ``w``.
EXAMPLES::
sage: PBW = FreeAlgebra(QQ, 2, 'x,y').pbw_basis()
sage: x,y = PBW.gens()
sage: x*y # indirect doctest
PBW[x*y] + PBW[y]*PBW[x]
sage: y*x
PBW[y]*PBW[x]
sage: x^3
PBW[x]^3
sage: PBW.one()
PBW[1]
sage: 3*PBW.one()
3*PBW[1]
"""
if len(w) == 0:
return super(PBWBasisOfFreeAlgebra, self)._repr_term(w)
ret = ''
p = 1
cur = None
for x in w.to_word().lyndon_factorization():
if x == cur:
p += 1
else:
if len(ret) != 0:
if p != 1:
ret += "^{}".format(p)
ret += "*"
ret += super(PBWBasisOfFreeAlgebra, self)._repr_term(x.to_monoid_element())
cur = x
p = 1
if p != 1:
ret += "^{}".format(p)
return ret
def _element_constructor_(self, x):
"""
Convert ``x`` into ``self``.
EXAMPLES::
sage: F.<x,y> = FreeAlgebra(QQ, 2)
sage: R = F.pbw_basis()
sage: R(3)
3*PBW[1]
sage: R(x*y)
PBW[x*y] + PBW[y]*PBW[x]
"""
if isinstance(x, FreeAlgebraElement):
return self._alg.pbw_element(self._alg(x))
return CombinatorialFreeModule._element_constructor_(self, x)
def _coerce_map_from_(self, R):
"""
Return ``True`` if there is a coercion from ``R`` into ``self`` and
``False`` otherwise. The things that coerce into ``self`` are:
- Anything that coerces into the associated free algebra of ``self``
TESTS::
sage: F = FreeAlgebra(ZZ, 3, 'x,y,z').pbw_basis()
sage: G = FreeAlgebra(QQ, 3, 'x,y,z').pbw_basis()
sage: H = FreeAlgebra(ZZ, 1, 'y').pbw_basis()
sage: F._coerce_map_from_(G)
False
sage: G._coerce_map_from_(F)
True
sage: F._coerce_map_from_(H)
False
sage: F._coerce_map_from_(QQ)
False
sage: G._coerce_map_from_(QQ)
True
sage: F._coerce_map_from_(G._alg.monoid())
True
sage: F.has_coerce_map_from(PolynomialRing(ZZ, 3, 'x,y,z'))
False
sage: F.has_coerce_map_from(FreeAlgebra(ZZ, 3, 'x,y,z'))
True
"""
return self._alg.has_coerce_map_from(R)
def one_basis(self):
"""
Return the index of the basis element for `1`.
EXAMPLES::
sage: PBW = FreeAlgebra(QQ, 2, 'x,y').pbw_basis()
sage: PBW.one_basis()
1
sage: PBW.one_basis().parent()
Free monoid on 2 generators (x, y)
"""
return self._indices.one()
def algebra_generators(self):
"""
Return the generators of ``self`` as an algebra.
EXAMPLES::
sage: PBW = FreeAlgebra(QQ, 2, 'x,y').pbw_basis()
sage: gens = PBW.algebra_generators(); gens
(PBW[x], PBW[y])
sage: all(g.parent() is PBW for g in gens)
True
"""
return tuple(self.monomial(x) for x in self._indices.gens())
gens = algebra_generators
def gen(self, i):
"""
Return the ``i``-th generator of ``self``.
EXAMPLES::
sage: PBW = FreeAlgebra(QQ, 2, 'x,y').pbw_basis()
sage: PBW.gen(0)
PBW[x]
sage: PBW.gen(1)
PBW[y]
"""
return self.algebra_generators()[i]
def free_algebra(self):
"""
Return the associated free algebra of ``self``.
EXAMPLES::
sage: PBW = FreeAlgebra(QQ, 2, 'x,y').pbw_basis()
sage: PBW.free_algebra()
Free Algebra on 2 generators (x, y) over Rational Field
"""
return self._alg
def product(self, u, v):
"""
Return the product of two elements ``u`` and ``v``.
EXAMPLES::
sage: F = FreeAlgebra(QQ, 2, 'x,y')
sage: PBW = F.pbw_basis()
sage: x, y = PBW.gens()
sage: PBW.product(x, y)
PBW[x*y] + PBW[y]*PBW[x]
sage: PBW.product(y, x)
PBW[y]*PBW[x]
sage: PBW.product(y^2*x, x*y*x)
PBW[y]^2*PBW[x^2*y]*PBW[x] + PBW[y]^2*PBW[x*y]*PBW[x]^2 + PBW[y]^3*PBW[x]^3
TESTS:
Check that multiplication agrees with the multiplication in the
free algebra::
sage: F = FreeAlgebra(QQ, 2, 'x,y')
sage: PBW = F.pbw_basis()
sage: x, y = PBW.gens()
sage: F(x*y)
x*y
sage: F(x*y*x)
x*y*x
sage: PBW(F(x)*F(y)*F(x)) == x*y*x
True
"""
return self(self.expansion(u) * self.expansion(v))
def expansion(self, t):
"""
Return the expansion of the element ``t`` of the Poincare-Birkhoff-Witt
basis in the monomials of the free algebra.
EXAMPLES::
sage: F = FreeAlgebra(QQ, 2, 'x,y')
sage: PBW = F.pbw_basis()
sage: x,y = F.monoid().gens()
sage: PBW.expansion(PBW(x*y))
x*y - y*x
sage: PBW.expansion(PBW.one())
1
sage: PBW.expansion(PBW(x*y*x) + 2*PBW(x) + 3)
3 + 2*x + x*y*x - y*x^2
TESTS:
Check that we have the correct parent::
sage: PBW.expansion(PBW(x*y)).parent() is F
True
sage: PBW.expansion(PBW.one()).parent() is F
True
"""
return sum([i[1] * self._alg.lie_polynomial(i[0]) for i in list(t)],
self._alg.zero())
class Element(CombinatorialFreeModuleElement):
def expand(self):
"""
Expand ``self`` in the monomials of the free algebra.
EXAMPLES::
sage: F = FreeAlgebra(QQ, 2, 'x,y')
sage: PBW = F.pbw_basis()
sage: x,y = F.monoid().gens()
sage: f = PBW(x^2*y) + PBW(x) + PBW(y^4*x)
sage: f.expand()
x + x^2*y - x*y*x + y^4*x
"""
return self.parent().expansion(self)
| 33.329779
| 352
| 0.534939
|
599b33e35c1a23e92d28d82efc52732675fab326
| 5,687
|
py
|
Python
|
publ/cli.py
|
PlaidWeb/Publ
|
67efc5e32bf25dbac72a83d1167de038b79db5a7
|
[
"MIT"
] | 27
|
2018-11-30T21:32:26.000Z
|
2022-03-20T19:46:25.000Z
|
publ/cli.py
|
PlaidWeb/Publ
|
67efc5e32bf25dbac72a83d1167de038b79db5a7
|
[
"MIT"
] | 249
|
2018-09-30T07:04:37.000Z
|
2022-03-29T04:31:00.000Z
|
publ/cli.py
|
PlaidWeb/Publ
|
67efc5e32bf25dbac72a83d1167de038b79db5a7
|
[
"MIT"
] | 4
|
2019-03-01T06:46:13.000Z
|
2019-06-30T17:45:46.000Z
|
""" CLI utilities for Publ """
# pylint:disable=too-many-arguments
import itertools
import logging
import os.path
import re
import time
import arrow
import click
import slugify
from flask.cli import AppGroup, with_appcontext
from pony import orm
from . import queries
from .config import config
LOGGER = logging.getLogger(__name__)
publ_cli = AppGroup('publ', short_help="Publ-specific commands") # pylint:disable=invalid-name
@publ_cli.command('reindex', short_help="Reindex the content store")
@click.option('--quietly', '-q', 'quietly', is_flag=True, help="Quietly")
@click.option('--fresh', '-f', 'fresh', is_flag=True, help="Start with a fresh database")
@with_appcontext
def reindex_command(quietly, fresh):
""" Forces a reindex of the content store.
This is particularly useful to ensure that all content has been indexed
before performing another action, such as sending out notifications.
"""
from . import index, model
if fresh:
model.reset()
spinner = itertools.cycle('|/-\\')
index.scan_index(config.content_folder, False)
while index.in_progress():
if not quietly:
qlen = index.queue_size() or ''
print(f"\rIndexing... {next(spinner)} {qlen} ", end='', flush=True)
time.sleep(0.1)
if not quietly:
print("Done")
@publ_cli.command('token', short_help="Generate a bearer token")
@click.argument('identity')
@click.option('--scope', '-s', help="The token's permission scope")
@click.option('--lifetime', '-l', help="The token's lifetime (in seconds)", default=3600)
@with_appcontext
def token_command(identity, scope, lifetime):
""" Generates a bearer token for use with external applications. """
from . import tokens
print(tokens.get_token(identity, int(lifetime), scope))
@publ_cli.command('normalize', short_help="Normalize entry filenames")
@click.argument('category', nargs=-1)
@click.option('--recurse', '-r', 'recurse', is_flag=True,
help="Include subdirectories")
@click.option('--all', '-a', 'all_entries', is_flag=True,
help="Apply to all entries, not just reachable ones")
@click.option('--dry-run', '-n', 'dry_run', is_flag=True,
help="Show, but don't apply, changes")
@click.option('--format', '-f', 'format_str',
help="Filename format to use",
default="{date} {sid} {title}")
@click.option('--verbose', '-v', 'verbose', is_flag=True,
help="Show detailed actions")
@with_appcontext
@orm.db_session
def normalize_command(category, recurse, dry_run, format_str, verbose, all_entries):
""" Normalizes the filenames of content files based on a standardized format.
This will only normalize entries which are already in the content index.
If no categories are specified, it defaults to the root category. To include
the root category in a list of other categories, use an empty string parameter,
e.g.:
flask publ normalize '' blog
Available tokens for --format/-f:
{date} The entry's publish date, in YYYYMMDD format
{time} The entry's publish time, in HHMMSS format
{id} The entry's ID
{status} The entry's publish status
{sid} If the entry is reachable, the ID, otherwise the status
{title} The entry's title, normalized to filename-safe characters
{slug} The entry's slug text
{type} The entry's type
"""
# pylint:disable=too-many-locals
from .model import PublishStatus
entries = queries.build_query({
'category': category or '',
'recurse': recurse,
'_future': True,
'_all': all_entries,
})
fname_slugify = slugify.UniqueSlugify(max_length=100, safe_chars='-.', separator=' ')
for entry in entries:
path = os.path.dirname(entry.file_path)
basename, ext = os.path.splitext(os.path.basename(entry.file_path))
status = PublishStatus(entry.status)
eid = entry.id
if status == PublishStatus.DRAFT:
# Draft entries don't get a stable entry ID
eid = status.name
sid = entry.id if status in (PublishStatus.PUBLISHED,
PublishStatus.HIDDEN,
PublishStatus.SCHEDULED) else status.name
date = arrow.get(entry.local_date)
dest_basename = format_str.format(
date=date.format('YYYYMMDD'),
time=date.format('HHmmss'),
id=eid,
status=status.name,
sid=sid,
title=entry.title,
slug=entry.slug_text,
type=entry.entry_type).strip()
dest_basename = re.sub(r' +', ' ', dest_basename)
dest_basename = fname_slugify(dest_basename)
if dest_basename != basename:
dest_path = os.path.join(path, dest_basename + ext)
if verbose:
print(f'{entry.file_path} -> {dest_path}')
if not os.path.isfile(entry.file_path):
LOGGER.warning('File %s does not exist; is the index up-to-date?', entry.file_path)
elif os.path.exists(dest_path):
LOGGER.warning('File %s already exists', dest_path)
elif not dry_run:
try:
os.rename(entry.file_path, dest_path)
except OSError:
LOGGER.exception('Error moving %s to %s', entry.file_path, dest_path)
entry.file_path = dest_path
orm.commit()
def setup(app):
""" Register the CLI commands with the command parser """
app.cli.add_command(publ_cli)
| 33.452941
| 99
| 0.629506
|
599318540b498896b040f8b56f2ba355b4ec70f6
| 161
|
py
|
Python
|
python/Journal.LinesByBlock.py
|
BIMpraxis/Journalysis
|
af0c042b28d01ba5e44dafc2bbe9556434e897b8
|
[
"MIT"
] | 26
|
2017-11-23T19:30:03.000Z
|
2022-02-09T10:35:10.000Z
|
python/Journal.LinesByBlock.py
|
BIMpraxis/Journalysis
|
af0c042b28d01ba5e44dafc2bbe9556434e897b8
|
[
"MIT"
] | 51
|
2017-11-16T15:02:32.000Z
|
2022-03-01T13:51:58.000Z
|
python/Journal.LinesByBlock.py
|
BIMpraxis/Journalysis
|
af0c042b28d01ba5e44dafc2bbe9556434e897b8
|
[
"MIT"
] | 9
|
2017-11-20T09:20:01.000Z
|
2021-09-15T13:08:30.000Z
|
import clr
OUT = []
if IN[0].__repr__() == 'Journal':
if isinstance(IN[1], list): OUT = IN[0].GetLinesByBlocks(IN[1])
else: OUT = IN[0].GetLinesByBlock(IN[1])
| 26.833333
| 64
| 0.645963
|
75ff7101b226381d3bdb98df021396de19fd09e6
| 1,901
|
py
|
Python
|
backend/path-slice.py
|
marborkowski/nasa-space-apps-challenge
|
b8d1a57e7274de8dbecff1073ad56e80988c6593
|
[
"MIT"
] | null | null | null |
backend/path-slice.py
|
marborkowski/nasa-space-apps-challenge
|
b8d1a57e7274de8dbecff1073ad56e80988c6593
|
[
"MIT"
] | null | null | null |
backend/path-slice.py
|
marborkowski/nasa-space-apps-challenge
|
b8d1a57e7274de8dbecff1073ad56e80988c6593
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - Piotr Skonieczka
#
import Image
import pylab
def get_polygon(list_of_points):
return [(list_of_points[idx], list_of_points[idx+1]) for idx in xrange(len(list_of_points)-1)]
def get_track(x1, y1, x2, y2):
# Bresengham algorithm
list_of_points = list()
dx = abs(x1 - x2)
dy = abs(y1 - y2)
x_step = 1 if x1 < x2 else -1
y_step = 1 if y1 < y2 else -1
error = dx - dy
while x1 != x2 and y1 != y2:
list_of_points.append((x1, y1))
doubled_error = 2 * error
if doubled_error > -dy:
error -= dy
x1 += x_step
if doubled_error < dx:
error += dx
y1 += y_step
list_of_points.append((x2, y2))
return list_of_points
def get_track_of_polygon(polygon):
return [get_track(p1[0], p1[1], p2[0], p2[1]) for p1, p2 in polygon]
def get_image_cross_section(image, polygon):
crossection_points = reduce(list.__add__, get_track_of_polygon(polygon))
crossection_pixels = map(lambda point: image.getpixel(point), crossection_points)
return crossection_pixels
def draw_the_track_cross_section(image, crossection_points):
track_polygon = get_polygon(crossection_points)
cross_section = get_image_cross_section(image, track_polygon)
pylab.plot(map(lambda pixel: 255 - pixel[2], cross_section))
pylab.ylim(0, 255)
pylab.xlim(0, len(cross_section))
pylab.xticks([])
pylab.autoscale(False)
pylab.show()
def main():
demo_image = Image.open("temporary-image.jpg", "r")
# Some points
p1 = (62, 1022)
p2 = (30, 890)
p3 = (415, 555)
p4 = (252, 918)
track1 = [p1, p4]
track2 = [p1, p2, p3, p4]
draw_the_track_cross_section(demo_image, track1)
draw_the_track_cross_section(demo_image, track2)
if __name__ == '__main__':
main()
| 22.364706
| 98
| 0.642294
|
07132933343aa219a87da459baa1bf7e849944b1
| 5,450
|
py
|
Python
|
aispace/utils/str_utils.py
|
SmileGoat/AiSpace
|
35fc120667e4263c99b300815e0bf018f5064a40
|
[
"Apache-2.0"
] | 32
|
2020-01-16T07:59:03.000Z
|
2022-03-31T09:24:00.000Z
|
aispace/utils/str_utils.py
|
SmileGoat/AiSpace
|
35fc120667e4263c99b300815e0bf018f5064a40
|
[
"Apache-2.0"
] | 9
|
2020-06-05T03:27:06.000Z
|
2022-03-12T01:00:17.000Z
|
aispace/utils/str_utils.py
|
SmileGoat/AiSpace
|
35fc120667e4263c99b300815e0bf018f5064a40
|
[
"Apache-2.0"
] | 3
|
2020-06-09T02:22:50.000Z
|
2021-07-19T06:07:07.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2019-11-04 11:08
# @Author : yingyuankai
# @Email : yingyuankai@aliyun.com
# @File : str_utils.py
from typing import Union, List
import unicodedata
import six
import nltk
from nltk.util import ngrams
import re
from random import randint
from datetime import datetime
import hashlib
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def preprocess_text(inputs, lower=False, remove_space=True, keep_accents=False):
if remove_space:
outputs = ' '.join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace("``", '"').replace("''", '"')
if six.PY2 and isinstance(outputs, str):
outputs = outputs.decode('utf-8')
if not keep_accents:
outputs = unicodedata.normalize('NFKD', outputs)
outputs = ''.join([c for c in outputs if not unicodedata.combining(c)])
if lower:
outputs = outputs.lower()
return outputs
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def mixed_segmentation(in_str, rm_punc=False):
"""
split Chinese with English
:param in_str:
:param rm_punc:
:return:
"""
in_str = str(in_str).lower().strip()
segs_out = []
temp_str = ""
sp_char = ['-', ':', '_', '*', '^', '/', '\\', '~', '`', '+', '=',
'๏ผ', 'ใ', '๏ผ', '๏ผ', '๏ผ', 'โ', 'โ', '๏ผ', 'โ', 'ใ', 'ใ', 'โฆโฆ', 'ยท', 'ใ',
'ใ', 'ใ', '๏ผ', '๏ผ', '๏ผ', '๏ฝ', 'ใ', 'ใ']
for char in in_str:
if rm_punc and char in sp_char:
continue
if re.search(r'[\u4e00-\u9fa5]', char) or char in sp_char:
if temp_str != "":
ss = nltk.word_tokenize(temp_str)
segs_out.extend(ss)
temp_str = ""
segs_out.append(char)
else:
temp_str += char
# handling last part
if temp_str != "":
ss = nltk.word_tokenize(temp_str)
segs_out.extend(ss)
return segs_out
def remove_punctuation(in_str):
"""
remove punctuation
:param in_str:
:return:
"""
in_str = str(in_str).lower().strip()
sp_char = ['-', ':', '_', '*', '^', '/', '\\', '~', '`', '+', '=',
'๏ผ', 'ใ', '๏ผ', '๏ผ', '๏ผ', 'โ', 'โ', '๏ผ', 'โ', 'ใ', 'ใ', 'โฆโฆ', 'ยท', 'ใ',
'ใ', 'ใ', '๏ผ', '๏ผ', '๏ผ', '๏ฝ', 'ใ', 'ใ']
out_segs = []
for char in in_str:
if char in sp_char:
continue
else:
out_segs.append(char)
return ''.join(out_segs)
def find_lcs(s1, s2):
"""
find longest common string
:param s1:
:param s2:
:return:
"""
m = [[0 for i in range(len(s2) + 1)] for j in range(len(s1) + 1)]
mmax = 0
p = 0
for i in range(len(s1)):
for j in range(len(s2)):
if s1[i] == s2[j]:
m[i + 1][j + 1] = m[i][j] + 1
if m[i + 1][j + 1] > mmax:
mmax = m[i + 1][j + 1]
p = i + 1
return s1[p - mmax:p], mmax
def uuid_maker():
"""
make uuid according time and random number
:return:
"""
return '{0:%Y%m%d%H%M%S%f}'.format(datetime.now()) + ''.join(
[str(randint(1, 10)) for i in range(5)])
def text_to_ngrams(sequence, n_gram=3):
result = []
if isinstance(sequence, list):
sequence = ''.join(sequence)
for i in range(1, n_gram + 1):
subword = [''.join(itm) for itm in ngrams(sequence, i)]
result.extend(subword)
return result
def compute_md5_hash(my_string):
m = hashlib.md5()
m.update(my_string.encode('utf-8'))
return m.hexdigest()
| 29.144385
| 85
| 0.535413
|
b65e923175e2be6b22dd2fa993f2b5ae7e9babba
| 39,856
|
py
|
Python
|
paprika/setup.py
|
jeff231li/pAPRika
|
babd0ec7cf7e9a982e814d44cbe3e0e1dd8f31a8
|
[
"BSD-3-Clause"
] | 3
|
2019-11-02T18:21:46.000Z
|
2019-12-03T22:47:41.000Z
|
paprika/setup.py
|
jeff231li/pAPRika
|
babd0ec7cf7e9a982e814d44cbe3e0e1dd8f31a8
|
[
"BSD-3-Clause"
] | null | null | null |
paprika/setup.py
|
jeff231li/pAPRika
|
babd0ec7cf7e9a982e814d44cbe3e0e1dd8f31a8
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This class contains a simulation setup wrapper for use with the OpenFF Evaluator.
"""
import logging
import os
import shutil
import subprocess as sp
from pathlib import Path
import numpy as np
import parmed as pmd
import pkg_resources
import pytraj as pt
import simtk.openmm as openmm
import simtk.unit as unit
from paprika import align
from paprika.restraints import static_DAT_restraint, DAT_restraint
from paprika.restraints.read_yaml import read_yaml
from paprika.restraints.restraints import create_window_list
logger = logging.getLogger(__name__)
_PI_ = np.pi
def _get_installed_benchmarks():
_installed_benchmarks = {}
for entry_point in pkg_resources.iter_entry_points(group="taproom.benchmarks"):
_installed_benchmarks[entry_point.name] = entry_point.load()
return _installed_benchmarks
def read_openmm_system_from_xml(filename):
with open(filename, "rb") as file:
return openmm.XmlSerializer.deserialize(file.read().decode())
class Setup(object):
"""
The Setup class provides a wrapper function around the preparation of the host-guest system and the application of restraints.
"""
def __init__(self, host, guest=None,
backend="openmm", directory_path="benchmarks",
additional_benchmarks=None, generate_gaff_files=False, gaff_version="gaff2",
guest_orientation=None, build=True):
self.host = host
self.guest = guest if guest is not None else "release"
self.backend = backend
self.directory = Path(directory_path).joinpath(self.host).joinpath(f"{self.guest}-{guest_orientation}" if
guest_orientation is not None else
f"{self.guest}")
self.desolvated_window_paths = []
self.window_list = []
if self.backend == "amber":
# Generate `frcmod` and dummy atom files.
raise NotImplementedError
self.directory.mkdir(parents=True, exist_ok=True)
installed_benchmarks = get_benchmarks()
if additional_benchmarks is not None:
installed_benchmarks.update(additional_benchmarks)
host_yaml, guest_yaml = self.parse_yaml(installed_benchmarks, guest_orientation)
self.benchmark_path = host_yaml.parent
self.host_yaml = read_yaml(host_yaml)
if guest:
self.guest_yaml = read_yaml(guest_yaml["yaml"])
if build:
# Here, we build desolvated windows and pass the files to the OpenFF Evaluator.
# These files are stored in `self.desolvated_window_paths`.
self.build_desolvated_windows(guest_orientation)
if generate_gaff_files:
generate_gaff(mol2_file=self.benchmark_path.joinpath(self.host_yaml["structure"]),
residue_name=self.host_yaml["resname"],
output_name=self.host,
directory_path=self.directory,
gaff=gaff_version)
if guest:
generate_gaff(mol2_file=self.benchmark_path.joinpath(
self.guest).joinpath(self.guest_yaml["structure"]),
output_name=self.guest,
residue_name=self.guest_yaml["name"],
directory_path=self.directory,
gaff=gaff_version)
if not build:
self.populate_window_list(input_pdb=os.path.join(self.directory, f"{self.host}-{self.guest}.pdb" if self.guest is not None
else f"{self.host}.pdb"))
def parse_yaml(self, installed_benchmarks, guest_orientation):
"""
Read the YAML recipe for the host and guest.
Returns
-------
"""
try:
if guest_orientation:
host_yaml = installed_benchmarks["host_guest_systems"][self.host]["yaml"][guest_orientation]
else:
host_yaml = installed_benchmarks["host_guest_systems"][self.host]["yaml"]["p"]
except KeyError:
logger.error(f"Cannot find YAML recipe for host: {self.host}")
logger.debug(installed_benchmarks)
raise FileNotFoundError
try:
guest_yaml = installed_benchmarks["host_guest_systems"][self.host][self.guest]
except KeyError:
if self.guest == "release":
guest_yaml = None
else:
logger.error(f"Cannot find YAML recipe for guest: {self.guest}")
logger.debug(installed_benchmarks)
raise FileNotFoundError
return host_yaml, guest_yaml
def align(self, input_pdb):
structure = pmd.load_file(str(input_pdb), structure=True)
intermediate_pdb = self.directory.joinpath(f"tmp.pdb")
destination_pdb = self.directory.joinpath(f"{self.host}-{self.guest}.pdb")
if not self.guest == "release":
# Align the host-guest complex so the first guest atom is at (0, 0, 0) and the second guest atom lies
# along the positive z-axis.
guest_angle_restraint_mask = self.guest_yaml["restraints"]["guest"][-1]["restraint"][
"atoms"
].split()
aligned_structure = align.zalign(
structure, guest_angle_restraint_mask[1], guest_angle_restraint_mask[2]
)
aligned_structure.save(str(intermediate_pdb), overwrite=True)
else:
# Create a PDB file just for the host.
host = pmd.load_file(str(input_pdb), structure=True)
host_coordinates = host[f":{self.host_yaml['resname'].upper()}"].coordinates
# Cheap way to get the center of geometry
offset_coordinates = pmd.geometry.center_of_mass(host_coordinates,
masses=np.ones(len(host_coordinates)))
# Find the principal components, take the two largest, and find the vector orthogonal to that
# (should be cross-product right hand rule, I think). Use that vector to align with the z-axis.
# This may not generalize to non-radially-symmetric host molecules.
aligned_coords = np.empty_like(structure.coordinates)
for atom in range(len(structure.atoms)):
aligned_coords[atom] = structure.coordinates[atom] - offset_coordinates
structure.coordinates = aligned_coords
inertia_tensor = np.dot(structure.coordinates.transpose(), structure.coordinates)
eigenvalues, eigenvectors = np.linalg.eig(inertia_tensor)
order = np.argsort(eigenvalues)
axis_3, axis_2, axis_1 = eigenvectors[:, order].transpose()
dummy_axis = np.cross(axis_1, axis_2)
self._add_dummy_to_PDB(input_pdb=input_pdb,
output_pdb=intermediate_pdb,
offset_coordinates=offset_coordinates,
dummy_atom_tuples=[(0, 0, 0),
(dummy_axis[0], dummy_axis[1], dummy_axis[2])])
structure = pmd.load_file(str(intermediate_pdb), structure=True)
for atom in structure.atoms:
atom.mass = 1.0
aligned_structure = align.zalign(
structure, ":DM1", ":DM2"
)
aligned_structure["!:DM1&!:DM2"].save(str(intermediate_pdb),
overwrite=True)
# Save aligned PDB file with CONECT records.
positions_pdb = openmm.app.PDBFile(str(intermediate_pdb))
topology_pdb = openmm.app.PDBFile(str(input_pdb))
positions = positions_pdb.positions
topology = topology_pdb.topology
with open(destination_pdb, "w") as file:
openmm.app.PDBFile.writeFile(topology, positions, file)
os.remove(intermediate_pdb)
def populate_window_list(self, input_pdb):
logger.debug("Setting up dummy restraint to build window list.")
_dummy_restraint = self._create_dummy_restraint(
initial_structure=str(input_pdb),
)
self.window_list = create_window_list([_dummy_restraint])
return _dummy_restraint
def build_desolvated_windows(self, guest_orientation):
if self.guest != "release":
if not guest_orientation:
initial_structure = self.benchmark_path.joinpath(self.guest).joinpath(
self.guest_yaml["complex"]
)
else:
base_name = Path(self.guest_yaml["complex"]).stem
orientation_structure = base_name + f"-{guest_orientation}.pdb"
initial_structure = self.benchmark_path.joinpath(self.guest).joinpath(
orientation_structure
)
else:
initial_structure = self.directory.joinpath(self.benchmark_path.joinpath(self.host_yaml["structure"]))
host = pt.iterload(str(initial_structure), str(initial_structure))
host.save(str(self.directory.joinpath(f"{self.host}.pdb")), overwrite=True, options='conect')
initial_structure = str(self.directory.joinpath(f"{self.host}.pdb"))
self.align(input_pdb=initial_structure)
_dummy_restraint = self.populate_window_list(input_pdb=initial_structure)
for window in self.window_list:
logger.debug(f"Translating guest in window {window}...")
self.directory.joinpath("windows").joinpath(window).mkdir(
parents=True, exist_ok=True
)
self.translate(window, topology_pdb=initial_structure, restraint=_dummy_restraint)
window_pdb_file_name = f"{self.host}-{self.guest}.pdb"
self.desolvated_window_paths.append(
str(
self.directory.joinpath("windows")
.joinpath(window)
.joinpath(window_pdb_file_name)
)
)
def _create_dummy_restraint(self, initial_structure):
if self.guest != "release":
windows = [
self.host_yaml["calculation"]["windows"]["attach"],
self.host_yaml["calculation"]["windows"]["pull"],
None,
]
else:
windows = [
None,
None,
self.host_yaml["calculation"]["windows"]["release"]
]
guest_restraint = DAT_restraint()
guest_restraint.auto_apr = True
guest_restraint.continuous_apr = True
guest_restraint.amber_index = False if self.backend == "openmm" else True
guest_restraint.topology = str(initial_structure)
guest_restraint.mask1 = "@1"
guest_restraint.mask2 = "@2"
if self.guest != "release":
restraint = self.guest_yaml["restraints"]["guest"][0]
guest_restraint.attach["target"] = restraint["restraint"]["attach"][
"target"
]
guest_restraint.attach["fc_final"] = restraint["restraint"]["attach"][
"force_constant"
]
guest_restraint.attach["fraction_list"] = self.host_yaml["calculation"][
"lambda"
]["attach"]
guest_restraint.pull["target_final"] = self.host_yaml["calculation"]["target"][
"pull"
]
guest_restraint.pull["num_windows"] = windows[1]
else:
# Remember, the purpose of this *fake* restraint is *only* to figure out how many windows to make,
# so we can use the OpenFF Evaluator to solvate the structures for us. To figure out how many winodws
# we need, just setting the lambda values should be sufficient.
guest_restraint.auto_apr = False
guest_restraint.continuous_apr = False
guest_restraint.release["target"] = 1.0
guest_restraint.release["fc_final"] = 1.0
guest_restraint.release["fraction_list"] = self.host_yaml["calculation"][
"lambda"
]["release"]
guest_restraint.initialize()
return guest_restraint
def translate(self, window, topology_pdb, restraint):
window_path = self.directory.joinpath("windows").joinpath(window)
if window[0] == "a":
# Copy the initial structure.
source_pdb = self.directory.joinpath(f"{self.host}-{self.guest}.pdb")
shutil.copy(source_pdb, window_path)
elif window[0] == "p":
# Translate the guest.
source_pdb = self.directory.joinpath(f"{self.host}-{self.guest}.pdb")
structure = pmd.load_file(str(source_pdb), structure=True)
target_difference = (
restraint.phase["pull"]["targets"][int(window[1:])]
- restraint.pull["target_initial"]
)
for atom in structure.atoms:
if atom.residue.name == self.guest.upper():
atom.xz += target_difference
intermediate_pdb = window_path.joinpath(f"tmp.pdb")
destination_pdb = window_path.joinpath(f"{self.host}-{self.guest}.pdb")
structure.save(str(intermediate_pdb), overwrite=True)
input_pdb = openmm.app.PDBFile(str(intermediate_pdb))
topology_pdb = openmm.app.PDBFile(str(topology_pdb))
positions = input_pdb.positions
topology = topology_pdb.topology
with open(destination_pdb, "w") as file:
openmm.app.PDBFile.writeFile(topology, positions, file)
os.remove(intermediate_pdb)
elif window[0] == "r":
try:
# Copy the final pull window, if it exists
source_pdb = (
self.directory.joinpath("windows")
.joinpath(f"p{self.host_yaml['calculation']['windows']['pull']:03d}")
.joinpath(f"{self.host}-{self.guest}.pdb")
)
shutil.copy(source_pdb, window_path)
except FileNotFoundError:
# Copy the initial structure, assuming we are doing a standalone release calculation.
shutil.copy(self.directory.joinpath(f"{self.host}-{self.guest}.pdb"),
window_path)
def _add_dummy_to_PDB(self, input_pdb, output_pdb, offset_coordinates,
dummy_atom_tuples):
input_pdb_file = openmm.app.PDBFile(input_pdb)
positions = input_pdb_file.positions
# When we pass in a guest, we have multiple coordinates and the function expects to address the first guest
# atom coordinates.
# When we pass in the center of mass of the host, we'll only have one set of coordinates.
if len(np.shape(offset_coordinates)) < 2:
offset_coordinates = [offset_coordinates, ]
for index, dummy_atom_tuple in enumerate(dummy_atom_tuples):
positions.append(
openmm.Vec3(
offset_coordinates[0][0] + dummy_atom_tuple[0],
offset_coordinates[0][1] + dummy_atom_tuple[1],
offset_coordinates[0][2] + dummy_atom_tuple[2],
)
* unit.angstrom
)
topology = input_pdb_file.topology
for dummy_index in range(len(dummy_atom_tuples)):
dummy_chain = topology.addChain(None)
dummy_residue = topology.addResidue(f"DM{dummy_index + 1}", dummy_chain)
topology.addAtom(f"DUM", None, dummy_residue)
with open(output_pdb, "w") as file:
openmm.app.PDBFile.writeFile(topology, positions, file)
def _add_dummy_to_System(self, system, dummy_atom_tuples):
[system.addParticle(mass=207) for _ in range(len(dummy_atom_tuples))]
for force_index in range(system.getNumForces()):
force = system.getForce(force_index)
if not isinstance(force, openmm.NonbondedForce):
continue
force.addParticle(0.0, 1.0, 0.0)
force.addParticle(0.0, 1.0, 0.0)
force.addParticle(0.0, 1.0, 0.0)
return system
def add_dummy_atoms(
self,
reference_pdb="reference.pdb",
solvated_pdb="output.pdb",
solvated_xml="system.xml",
dummy_pdb="output.pdb",
dummy_xml="output.xml",
):
reference_structure = pmd.load_file(reference_pdb, structure=True)
# Determine the offset coordinates for the new dummy atoms.
if self.guest == "release":
host_coordinates = reference_structure[f":{self.host_yaml['resname'].upper()}"].coordinates
# Cheap way to get the center of geometry
offset_coordinates = pmd.geometry.center_of_mass(host_coordinates,
masses=np.ones(len(host_coordinates)))
else:
guest_angle_restraint_mask = self.guest_yaml["restraints"]["guest"][-1]["restraint"][
"atoms"
].split()
offset_coordinates = reference_structure[f':{self.guest_yaml["name"].upper()} | :{self.host_yaml["resname"].upper()}']\
[guest_angle_restraint_mask[1]].coordinates
# First add dummy atoms to structure
logger.debug(f"Adding dummy atoms to {solvated_pdb}")
try:
self._add_dummy_to_PDB(solvated_pdb, dummy_pdb, offset_coordinates,
dummy_atom_tuples=[(0, 0, -6.0),
(0, 0, -9.0),
(0, 2.2, -11.2)])
except FileNotFoundError:
logger.warning(f"Missing {solvated_pdb}")
self._wrap(dummy_pdb)
# Add dummy atoms to System
if solvated_xml is not None:
try:
system = read_openmm_system_from_xml(solvated_xml)
system = self._add_dummy_to_System(system, dummy_atom_tuples=[(0, 0, -6.0),
(0, 0, -9.0),
(0, 2.2, -11.2)])
system_xml = openmm.XmlSerializer.serialize(system)
with open(dummy_xml, "w") as file:
file.write(system_xml)
except FileNotFoundError:
logger.warning(f"Missing {solvated_xml}")
@staticmethod
def _wrap(file, mask=":DM3"):
logging.info(f"Re-wrapping {file} to avoid pulling near periodic boundaries.")
structure = pmd.load_file(file, structure=True)
anchor = structure[mask]
anchor_z = anchor.atoms[0].xz
for atom in structure.atoms:
atom.xz -= anchor_z - 2.0
structure.save(file, overwrite=True)
def initialize_restraints(self, structure="output.pdb"):
if self.guest != "release":
windows = [
self.host_yaml["calculation"]["windows"]["attach"],
self.host_yaml["calculation"]["windows"]["pull"],
None,
]
else:
windows = [None,
None,
self.host_yaml["calculation"]["windows"]["release"]
]
static_restraints = []
for restraint in self.host_yaml["restraints"]["static"]:
static = static_DAT_restraint(
restraint_mask_list=restraint["restraint"]["atoms"].split(),
num_window_list=windows,
ref_structure=str(structure),
force_constant=restraint["restraint"]["force_constant"],
amber_index=False if self.backend == "openmm" else True,
)
static_restraints.append(static)
conformational_restraints = []
if self.host_yaml["restraints"]["conformational"]:
for conformational in self.host_yaml["restraints"][
"conformational"
]:
mask = conformational["restraint"]["atoms"].split()
conformational_restraint = DAT_restraint()
conformational_restraint.auto_apr = True
conformational_restraint.continuous_apr = True
conformational_restraint.amber_index = False if self.backend == "openmm" else True
conformational_restraint.topology = str(structure)
conformational_restraint.mask1 = mask[0]
conformational_restraint.mask2 = mask[1]
conformational_restraint.mask3 = mask[2] if len(mask) > 2 else None
conformational_restraint.mask4 = mask[3] if len(mask) > 3 else None
if self.guest != "release":
conformational_restraint.attach["target"] = conformational["restraint"][
"target"
]
conformational_restraint.attach["fc_final"] = conformational["restraint"][
"force_constant"
]
conformational_restraint.attach["fraction_list"] = self.host_yaml["calculation"][
"lambda"
]["attach"]
conformational_restraint.pull["target_final"] = conformational["restraint"][
"target"
]
conformational_restraint.pull["num_windows"] = windows[1]
else:
conformational_restraint.auto_apr = False
conformational_restraint.continuous_apr = False
conformational_restraint.release["target"] = conformational["restraint"][
"target"
]
conformational_restraint.release["fc_final"] = conformational["restraint"][
"force_constant"
]
conformational_restraint.release["fraction_list"] = self.host_yaml["calculation"][
"lambda"
]["release"]
conformational_restraint.initialize()
conformational_restraints.append(conformational_restraint)
else:
logger.debug("Skipping conformational restraints...")
symmetry_restraints = []
if self.guest != "release" and "symmetry_correction" in self.guest_yaml:
for symmetry in self.guest_yaml["symmetry_correction"]["restraints"]:
symmetry_restraint = DAT_restraint()
symmetry_restraint.auto_apr = True
symmetry_restraint.continuous_apr = True
symmetry_restraint.amber_index = False if self.backend == "openmm" else True
symmetry_restraint.topology = str(structure)
symmetry_restraint.mask1 = symmetry["atoms"].split()[0]
symmetry_restraint.mask2 = symmetry["atoms"].split()[1]
symmetry_restraint.mask3 = symmetry["atoms"].split()[2]
symmetry_restraint.attach["fc_final"] = symmetry["force_constant"]
symmetry_restraint.attach["fraction_list"] = [1.0] * len(self.host_yaml["calculation"][
"lambda"
]["attach"])
# This target should be overridden by the custom values.
symmetry_restraint.attach["target"] = 999.99
symmetry_restraint.custom_restraint_values["r2"] = 91
symmetry_restraint.custom_restraint_values["r3"] = 91
# 0 force constant between 91 degrees and 180 degrees.
symmetry_restraint.custom_restraint_values["rk3"] = 0.0
symmetry_restraint.initialize()
symmetry_restraints.append(symmetry_restraint)
else:
logger.debug("Skipping symmetry restraints...")
wall_restraints = []
if self.guest != "release" and "wall_restraints" in self.guest_yaml['restraints']:
for wall in self.guest_yaml["restraints"]["wall_restraints"]:
wall_restraint = DAT_restraint()
wall_restraint.auto_apr = True
wall_restraint.continuous_apr = True
wall_restraint.amber_index = False if self.backend == "openmm" else True
wall_restraint.topology = str(structure)
wall_restraint.mask1 = wall["restraint"]["atoms"].split()[0]
wall_restraint.mask2 = wall["restraint"]["atoms"].split()[1]
wall_restraint.attach["fc_final"] = wall["restraint"]["force_constant"]
wall_restraint.attach["fraction_list"] = [1.0] * len(self.host_yaml["calculation"][
"lambda"
]["attach"])
wall_restraint.attach["target"] = wall["restraint"]["target"]
# Minimum distance is 0 Angstrom
wall_restraint.custom_restraint_values["r1"] = 0
wall_restraint.custom_restraint_values["r2"] = 0
# Harmonic force constant beyond target distance.
wall_restraint.custom_restraint_values["rk2"] = wall["restraint"]["force_constant"]
wall_restraint.custom_restraint_values["rk3"] = wall["restraint"]["force_constant"]
wall_restraint.initialize()
wall_restraints.append(wall_restraint)
else:
logger.debug("Skipping wall restraints...")
guest_restraints = []
for restraint in [] if not hasattr(self, 'guest_yaml') else self.guest_yaml["restraints"]["guest"]:
mask = restraint["restraint"]["atoms"].split()
guest_restraint = DAT_restraint()
guest_restraint.auto_apr = True
guest_restraint.continuous_apr = True
guest_restraint.amber_index = False if self.backend == "openmm" else True
guest_restraint.topology = str(structure)
guest_restraint.mask1 = mask[0]
guest_restraint.mask2 = mask[1]
guest_restraint.mask3 = mask[2] if len(mask) > 2 else None
guest_restraint.mask4 = mask[3] if len(mask) > 3 else None
guest_restraint.attach["target"] = restraint["restraint"]["attach"][
"target"
]
guest_restraint.attach["fc_final"] = restraint["restraint"]["attach"][
"force_constant"
]
guest_restraint.attach["fraction_list"] = self.host_yaml["calculation"][
"lambda"
]["attach"]
guest_restraint.pull["target_final"] = restraint["restraint"]["pull"][
"target"
]
guest_restraint.pull["num_windows"] = windows[1]
guest_restraint.initialize()
guest_restraints.append(guest_restraint)
return (
static_restraints,
conformational_restraints,
symmetry_restraints,
wall_restraints,
guest_restraints,
)
def initialize_calculation(self, window, structure_path="output.pdb",
input_xml="system.xml", output_xml="system.xml"):
if self.backend == "amber":
# Write simulation input files in each directory
raise NotImplementedError
try:
system = read_openmm_system_from_xml(input_xml)
except FileNotFoundError:
logger.warning(f"Cannot read XML from {input_xml}")
# Apply the positional restraints.
structure = pmd.load_file(structure_path, structure=True)
for atom in structure.atoms:
if atom.name == "DUM":
positional_restraint = openmm.CustomExternalForce(
"k * ((x-x0)^2 + (y-y0)^2 + (z-z0)^2)"
)
positional_restraint.addPerParticleParameter("k")
positional_restraint.addPerParticleParameter("x0")
positional_restraint.addPerParticleParameter("y0")
positional_restraint.addPerParticleParameter("z0")
# I haven't found a way to get this to use ParmEd's unit library here.
# ParmEd correctly reports `atom.positions` as units of ร
ngstroms.
# But then we can't access atom indices.
# Using `atom.xx` works for coordinates, but is unitless.
k = 50.0 * unit.kilocalories_per_mole / unit.angstroms ** 2
x0 = 0.1 * atom.xx * unit.nanometers
y0 = 0.1 * atom.xy * unit.nanometers
z0 = 0.1 * atom.xz * unit.nanometers
positional_restraint.addParticle(atom.idx, [k, x0, y0, z0])
system.addForce(positional_restraint)
positional_restraint.setForceGroup(15)
for restraint in self.static_restraints:
system = apply_openmm_restraints(system, restraint, window, ForceGroup=10)
for restraint in self.conformational_restraints:
system = apply_openmm_restraints(system, restraint, window, ForceGroup=11)
for restraint in self.guest_restraints:
system = apply_openmm_restraints(system, restraint, window, ForceGroup=12)
for restraint in self.symmetry_restraints:
system = apply_openmm_restraints(system, restraint, window, flat_bottom=True, ForceGroup=13)
for restraint in self.wall_restraints:
system = apply_openmm_restraints(system, restraint, window, flat_bottom=True, ForceGroup=14)
system_xml = openmm.XmlSerializer.serialize(system)
with open(output_xml, "w") as file:
file.write(system_xml)
def get_benchmarks():
"""
Determine the installed benchmarks.
"""
installed_benchmarks = _get_installed_benchmarks()
return installed_benchmarks
def apply_openmm_restraints(system, restraint, window, flat_bottom=False, ForceGroup=None):
if window[0] == "a":
phase = "attach"
elif window[0] == "p":
phase = "pull"
elif window[0] == "r":
phase = "release"
window_number = int(window[1:])
if flat_bottom and phase == "attach" and restraint.mask3:
flat_bottom_force = openmm.CustomAngleForce('step(-(theta - theta_0)) * k * (theta - theta_0)^2')
# If theta is greater than theta_0, then the argument to step is negative, which means the force is off.
flat_bottom_force.addPerAngleParameter("k")
flat_bottom_force.addPerAngleParameter("theta_0")
theta_0 = 91.0 * unit.degrees
k = (
restraint.phase[phase]["force_constants"][window_number]
* unit.kilocalories_per_mole
/ unit.radian ** 2
)
flat_bottom_force.addAngle(
restraint.index1[0],
restraint.index2[0],
restraint.index3[0],
[k, theta_0],
)
system.addForce(flat_bottom_force)
if ForceGroup:
flat_bottom_force.setForceGroup(ForceGroup)
return system
elif flat_bottom and phase == "attach" and not restraint.mask3:
flat_bottom_force = openmm.CustomBondForce('step((r - r_0)) * k * (r - r_0)^2')
# If x is greater than x_0, then the argument to step is positive, which means the force is on.
flat_bottom_force.addPerBondParameter("k")
flat_bottom_force.addPerBondParameter("r_0")
r_0 = restraint.phase[phase]["targets"][window_number] * unit.angstrom
k = (
restraint.phase[phase]["force_constants"][window_number]
* unit.kilocalories_per_mole
/ unit.radian ** 2
)
flat_bottom_force.addBond(
restraint.index1[0],
restraint.index2[0],
[k, r_0],
)
system.addForce(flat_bottom_force)
if ForceGroup:
flat_bottom_force.setForceGroup(ForceGroup)
return system
elif flat_bottom and phase == "pull":
return system
elif flat_bottom and phase == "release":
return system
if restraint.mask2 and not restraint.mask3:
if not restraint.group1 and not restraint.group2:
bond_restraint = openmm.CustomBondForce("k * (r - r_0)^2")
bond_restraint.addPerBondParameter("k")
bond_restraint.addPerBondParameter("r_0")
r_0 = restraint.phase[phase]["targets"][window_number] * unit.angstroms
k = (
restraint.phase[phase]["force_constants"][window_number]
* unit.kilocalories_per_mole
/ unit.angstrom ** 2
)
bond_restraint.addBond(restraint.index1[0], restraint.index2[0], [k, r_0])
system.addForce(bond_restraint)
else:
bond_restraint = openmm.CustomCentroidBondForce(
2, "k * (distance(g1, g2) - r_0)^2"
)
bond_restraint.addPerBondParameter("k")
bond_restraint.addPerBondParameter("r_0")
r_0 = restraint.phase[phase]["targets"][window_number] * unit.angstroms
k = (
restraint.phase[phase]["force_constants"][window_number]
* unit.kilocalories_per_mole
/ unit.angstrom ** 2
)
g1 = bond_restraint.addGroup(restraint.index1)
g2 = bond_restraint.addGroup(restraint.index2)
bond_restraint.addBond([g1, g2], [k, r_0])
system.addForce(bond_restraint)
if ForceGroup:
bond_restraint.setForceGroup(ForceGroup)
elif restraint.mask3 and not restraint.mask4:
if not restraint.group1 and not restraint.group2 and not restraint.group3:
angle_restraint = openmm.CustomAngleForce("k * (theta - theta_0)^2")
angle_restraint.addPerAngleParameter("k")
angle_restraint.addPerAngleParameter("theta_0")
theta_0 = restraint.phase[phase]["targets"][window_number] * unit.degrees
k = (
restraint.phase[phase]["force_constants"][window_number]
* unit.kilocalories_per_mole
/ unit.radian ** 2
)
angle_restraint.addAngle(
restraint.index1[0],
restraint.index2[0],
restraint.index3[0],
[k, theta_0],
)
system.addForce(angle_restraint)
else:
# Probably needs openmm.CustomCentroidAngleForce (?)
raise NotImplementedError
if ForceGroup:
angle_restraint.setForceGroup(ForceGroup)
elif restraint.mask4:
if (
not restraint.group1
and not restraint.group2
and not restraint.group3
and not restraint.group4
):
dihedral_restraint = openmm.CustomTorsionForce(f"k * min(min(abs(theta - theta_0), abs(theta - theta_0 + 2 * {_PI_})), abs(theta - theta_0 - 2 * {_PI_}))^2")
dihedral_restraint.addPerTorsionParameter("k")
dihedral_restraint.addPerTorsionParameter("theta_0")
theta_0 = restraint.phase[phase]["targets"][window_number] * unit.degrees
k = (
restraint.phase[phase]["force_constants"][window_number]
* unit.kilocalories_per_mole
/ unit.radian ** 2
)
dihedral_restraint.addTorsion(
restraint.index1[0],
restraint.index2[0],
restraint.index3[0],
restraint.index4[0],
[k, theta_0],
)
system.addForce(dihedral_restraint)
else:
# Probably needs openmm.CustomCentroidTorsionForce (?)
raise NotImplementedError
if ForceGroup:
dihedral_restraint.setForceGroup(ForceGroup)
return system
def generate_gaff(mol2_file, residue_name, output_name=None, need_gaff_atom_types=True, generate_frcmod=True,
directory_path="benchmarks", gaff="gaff2"):
if output_name is None:
output_name = mol2_file.stem
if need_gaff_atom_types:
_generate_gaff_atom_types(mol2_file=mol2_file,
residue_name=residue_name,
output_name=output_name,
gaff=gaff,
directory_path=directory_path)
logging.debug("Checking to see if we have a multi-residue MOL2 file that should be converted "
"to single-residue...")
structure = pmd.load_file(os.path.join(directory_path, f"{output_name}.{gaff}.mol2"), structure=True)
if len(structure.residues) > 1:
structure[":1"].save("tmp.mol2")
if os.path.exists("tmp.mol2"):
os.rename("tmp.mol2", os.path.join(directory_path, f"{output_name}.{gaff}.mol2"))
logging.debug("Saved single-residue MOL2 file for `tleap`.")
else:
raise RuntimeError("Unable to convert multi-residue MOL2 file to single-residue for `tleap`.")
if generate_frcmod:
_generate_frcmod(mol2_file=f'{output_name}.{gaff}.mol2',
gaff=gaff,
output_name=output_name,
directory_path=directory_path)
else:
raise NotImplementedError()
def _generate_gaff_atom_types(mol2_file, residue_name, output_name, gaff="gaff2", directory_path="benchmarks"):
p = sp.Popen(["antechamber", "-i", str(mol2_file), "-fi", "mol2",
"-o", f"{output_name}.{gaff}.mol2", "-fo", "mol2",
"-rn", f"{residue_name.upper()}",
"-at", f"{gaff}",
"-an", "no",
"-dr", "no",
"-pf", "yes"], cwd=directory_path)
p.communicate()
files = ["ANTECHAMBER_AC.AC", "ANTECHAMBER_AC.AC0",
"ANTECHAMBER_BOND_TYPE.AC", "ANTECHAMBER_BOND_TYPE.AC0",
"ATOMTYPE.INF"]
files = [directory_path.joinpath(i) for i in files]
for file in files:
if file.exists():
logger.debug(f"Removing temporary file: {file}")
file.unlink()
if not os.path.exists(f"{output_name}.{gaff}.mol2"):
# Try with the newer (AmberTools 19) version of `antechamber` which doesn't have the `-dr` flag
p = sp.Popen(["antechamber", "-i", str(mol2_file), "-fi", "mol2",
"-o", f"{output_name}.{gaff}.mol2", "-fo", "mol2",
"-rn", f"{residue_name.upper()}",
"-at", f"{gaff}",
"-an", "no",
"-pf", "yes"], cwd=directory_path)
p.communicate()
files = ["ANTECHAMBER_AC.AC", "ANTECHAMBER_AC.AC0",
"ANTECHAMBER_BOND_TYPE.AC", "ANTECHAMBER_BOND_TYPE.AC0",
"ATOMTYPE.INF"]
files = [directory_path.joinpath(i) for i in files]
for file in files:
if file.exists():
logger.debug(f"Removing temporary file: {file}")
file.unlink()
def _generate_frcmod(mol2_file, gaff, output_name, directory_path="benchmarks"):
sp.Popen(["parmchk2", "-i", str(mol2_file), "-f", "mol2",
"-o", f"{output_name}.{gaff}.frcmod",
"-s", f"{gaff}"
], cwd=directory_path)
| 42.948276
| 169
| 0.580465
|
9ebcb89749c04f152ab414d6e4c940e06282b368
| 1,761
|
py
|
Python
|
reinforcement_learning/rl_network_compression_ray_custom/src/tensorflow_resnet/compressor/train.py
|
jerrypeng7773/amazon-sagemaker-examples
|
c5ddecce1f739a345465b9a38b064983a129141d
|
[
"Apache-2.0"
] | 2,610
|
2020-10-01T14:14:53.000Z
|
2022-03-31T18:02:31.000Z
|
reinforcement_learning/rl_network_compression_ray_custom/src/tensorflow_resnet/compressor/train.py
|
jerrypeng7773/amazon-sagemaker-examples
|
c5ddecce1f739a345465b9a38b064983a129141d
|
[
"Apache-2.0"
] | 1,959
|
2020-09-30T20:22:42.000Z
|
2022-03-31T23:58:37.000Z
|
reinforcement_learning/rl_network_compression_ray_custom/src/tensorflow_resnet/compressor/train.py
|
jerrypeng7773/amazon-sagemaker-examples
|
c5ddecce1f739a345465b9a38b064983a129141d
|
[
"Apache-2.0"
] | 2,052
|
2020-09-30T22:11:46.000Z
|
2022-03-31T23:02:51.000Z
|
import logging
import math
import tensorflow as tf
def tensorflow_train(
estimator, data_dir, batch_size, input_function, epochs=None, epochs_between_evals=1
):
"""
This method will train a tensorflow model.
Args:
estimator: `tf.estimator.Estimator` object.
data_dir: Directory where data is stored.
batch_size: Mini batch size to train with.
input_function: A function that will return a `tf.data.FixedLengthRecordDataset`.
epochs: Number of epochs to train, if None will run eval only.
epoch_between_evals: frequency of validation.
"""
def input_fn_train(num_epochs):
return input_function(
is_training=True,
data_dir=data_dir,
batch_size=batch_size,
num_epochs=num_epochs,
dtype=tf.float32,
)
def input_fn_eval():
return input_function(
is_training=False,
data_dir=data_dir,
batch_size=batch_size,
num_epochs=1,
dtype=tf.float32,
)
if epochs is None:
schedule, n_loops = [0], 1
else:
n_loops = math.ceil(epochs / epochs_between_evals)
schedule = [epochs_between_evals for _ in range(int(n_loops))]
schedule[-1] = epochs - sum(schedule[:-1])
eval_results = None
for cycle_index, num_train_epochs in enumerate(schedule):
logging.info("Starting cycle: %d/%d", cycle_index, int(n_loops))
if num_train_epochs:
estimator.train(input_fn=lambda: input_fn_train(num_train_epochs))
logging.info("Starting to evaluate.")
eval_results = estimator.evaluate(input_fn=input_fn_eval)
logging.info(eval_results)
return eval_results
| 29.847458
| 89
| 0.645656
|
fc7fb926a4dd771ec9a2a88f5f589ac18145ffb1
| 753
|
py
|
Python
|
cloudmesh-exercises/e-cloudmesh-common-2.py
|
cybertraining-dsc/sp20-516-223
|
2e7188579a63e0cebf51880cd7c82307ae1b919c
|
[
"Apache-2.0"
] | 1
|
2020-04-05T17:53:51.000Z
|
2020-04-05T17:53:51.000Z
|
cloudmesh-exercises/e-cloudmesh-common-2.py
|
iumsds/sp20-516-223
|
c67f0a966d10387d51575d097fad663791b47a00
|
[
"Apache-2.0"
] | 1
|
2020-01-20T17:41:57.000Z
|
2020-01-20T17:41:57.000Z
|
cloudmesh-exercises/e-cloudmesh-common-2.py
|
iumsds/sp20-516-223
|
c67f0a966d10387d51575d097fad663791b47a00
|
[
"Apache-2.0"
] | 8
|
2020-02-02T23:18:26.000Z
|
2020-04-05T06:17:24.000Z
|
# fa20-516-223 E.Cloudmesh.Common.2
from cloudmesh.common.Shell import Shell
from cloudmesh.common.debug import VERBOSE
from cloudmesh.common.dotdict import dotdict
dist = Shell.distribution()
VERBOSE(dist)
# Convert the dict to dotdict.
dist = dotdict(dist)
print(f"Platform is {dist.platform}")
if dist.platform == 'linux':
if dist.ID == 'ubuntu':
print('Nice you have ubuntu')
if dist.VERSION_ID in ['"19.10"', '"18.04"']:
print('and you have the right version as well. Good Job!')
else:
print('but you do not have the right version. Try harder!!!')
else:
print("You should use ubuntu")
elif dist.platform == 'windows':
print("Good Luck!!!")
else:
print("Unknown version")
| 26.892857
| 73
| 0.656042
|
98877642ce5b60c2f78e398ff84ec6fa125d5c01
| 1,521
|
py
|
Python
|
open_catalyst/ocpmodels/common/relaxation/ml_relaxation.py
|
henrique/hpc
|
b796e7aec0339b8a2d33e7af3c875ebe74f038aa
|
[
"Apache-2.0"
] | 16
|
2020-10-26T15:35:20.000Z
|
2022-03-16T08:10:35.000Z
|
ocpmodels/common/relaxation/ml_relaxation.py
|
jg8610/ocp
|
5f16b64911e0dac3001d4cc7427d60469a967370
|
[
"MIT",
"BSD-3-Clause"
] | 23
|
2021-06-09T08:23:41.000Z
|
2022-03-14T17:37:24.000Z
|
ocpmodels/common/relaxation/ml_relaxation.py
|
jg8610/ocp
|
5f16b64911e0dac3001d4cc7427d60469a967370
|
[
"MIT",
"BSD-3-Clause"
] | 5
|
2021-01-11T22:17:54.000Z
|
2022-02-01T21:23:27.000Z
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from pathlib import Path
import torch
from ocpmodels.common.meter import mae, mae_ratio, mean_l2_distance
from ocpmodels.common.registry import registry
from .optimizers.lbfgs_torch import LBFGS, TorchCalc
def ml_relax(
batch,
model,
steps,
fmax,
relax_opt,
device="cuda:0",
transform=None,
):
"""
Runs ML-based relaxations.
Args:
batch: object
model: object
steps: int
Max number of steps in the structure relaxation.
fmax: float
Structure relaxation terminates when the max force
of the system is no bigger than fmax.
relax_opt: str
Optimizer and corresponding parameters to be used for structure relaxations.
"""
batch = batch[0]
ids = batch.sid
calc = TorchCalc(model, transform)
# Run ML-based relaxation
traj_dir = relax_opt.get("traj_dir", None)
optimizer = LBFGS(
batch,
calc,
maxstep=relax_opt.get("maxstep", 0.04),
memory=relax_opt["memory"],
damping=relax_opt.get("damping", 1.0),
alpha=relax_opt.get("alpha", 70.0),
device=device,
traj_dir=Path(traj_dir) if traj_dir is not None else None,
traj_names=ids,
)
relaxed_batch = optimizer.run(fmax=fmax, steps=steps)
return relaxed_batch
| 25.35
| 88
| 0.650888
|
e48a5aae4ecb69ead0115561c4bf64574b667bdd
| 7,657
|
py
|
Python
|
game_board.py
|
jeff012345/clue-part-duo
|
bd9ccd2ccdbc2fe358a696b31644b93e70ff874b
|
[
"MIT"
] | null | null | null |
game_board.py
|
jeff012345/clue-part-duo
|
bd9ccd2ccdbc2fe358a696b31644b93e70ff874b
|
[
"MIT"
] | null | null | null |
game_board.py
|
jeff012345/clue-part-duo
|
bd9ccd2ccdbc2fe358a696b31644b93e70ff874b
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import List, Set, Dict, Tuple, Optional
import pygame
import pygame_gui
from player import *
from definitions import Room
from ai_players import RLPlayer
from Clue import Director, GameStatus
from threading import Lock, Condition, Barrier
from log_book_ui import LogBookPanel
from panels import *
from game_board_util import scale_position, PlayerPiece
from player_roll import PlayerRoll
black = (0,0,0)
white = (255,255,255)
red = (255,0,0)
board_width = 882
display_width = LogBookPanel.PANEL_WIDTH + board_width
display_height = 865
def run(director: Director, run_game_lock: Lock, end_game_lock: Lock, human: HumanPlayer, turn_lock: Lock):
pygame.init()
pygame.display.set_caption('Clue')
game_display = pygame.display.set_mode((display_width, display_height))
manager = pygame_gui.UIManager((display_width, display_height), 'theme.json')
clock = pygame.time.Clock()
crashed = False
# load images
board_img = pygame.image.load('assets/board.jpg')
start_button_rect = pygame.Rect(((display_width / 2) - 50, (display_height / 2) - 25), (100, 50))
start_button = pygame_gui.elements.UIButton(relative_rect=start_button_rect,
text='Start Game',
manager=manager)
board_surface = pygame.Surface((board_width, display_height))
player_pieces: List[PlayerPiece] = list(map(lambda p: PlayerPiece(p, board_surface), director.players))
on_end_turn = lambda: end_turn(turn_lock)
player_roll = PlayerRoll(board_surface, human, on_end_turn)
log_book_ui = LogBookPanel(manager)
guess_panel = GuessPanel(manager, display_width, display_height, human, on_end_turn)
start_turn_menu = StartTurnPanel(manager, display_width, display_height, player_roll, guess_panel, human)
match_pick_panel = MatchPickPanel(manager, display_width, display_height, human, on_end_turn)
human.on_turn = lambda turn: on_player_turn(manager, turn, turn_lock, start_turn_menu, match_pick_panel, on_end_turn)
started = False
while end_game_lock.locked() is False and not started:
time_delta = clock.tick(60) / 1000.0
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit_game(end_game_lock, run_game_lock, turn_lock)
break
elif event.type == pygame.USEREVENT:
if event.user_type == pygame_gui.UI_BUTTON_PRESSED:
if event.ui_element == start_button:
start_button.kill()
run_game_lock.release()
started = True
manager.process_events(event)
game_display.fill(white)
manager.update(time_delta)
manager.draw_ui(game_display)
pygame.display.update()
if started:
# start the game
log_book_ui.show()
while end_game_lock.locked() is False:
time_delta = clock.tick(60) / 1000.0
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit_game(end_game_lock, run_game_lock, turn_lock)
break
start_turn_menu.process_events(event)
log_book_ui.process_events(event)
player_roll.process_events(event)
match_pick_panel.process_events(event)
guess_panel.process_events(event)
manager.process_events(event)
game_display.fill(white)
board_surface.blit(board_img, (0, 0))
if director.game_status == GameStatus.RUNNING:
for player in player_pieces:
player.draw()
player_roll.draw()
game_display.blit(board_surface, (LogBookPanel.PANEL_WIDTH, 0))
manager.update(time_delta)
manager.draw_ui(game_display)
pygame.display.update()
pygame.quit()
def on_player_turn(manager, turn_data: HumanTurn, lock: Lock, start_turn_menu: StartTurnPanel, \
match_pick_panel: MatchPickPanel, on_end_turn: Callable[[Lock]]):
print("player turn")
lock.acquire()
print("starting player turn")
if isinstance(turn_data, PickMatchTurn):
match_pick_panel.show(turn_data)
elif isinstance(turn_data, GuessOutcome):
if turn_data.match is None:
message = "No one showed a card!"
else:
player_name = turn_data.showing_player.character.pretty()
card: Enum = None
if turn_data.match.character is not None:
card = turn_data.match.character
elif turn_data.match.weapon is not None:
card = turn_data.match.weapon
else:
card = turn_data.match.room
message = player_name + " has showed you " + str(card)
rect = create_modal_rect(display_width, display_height, 300, 160)
EndTurnWindow(rect, manager, on_end_turn, "Guess Result", message)
elif isinstance(turn_data, AccusationOutcome):
message = None
if turn_data.correct:
message = 'You Win! Your accusation is correct!'
else:
message = 'You have lost! Your accusation is incorrect.'
message += '<br><br><strong>Solution:</strong> ' + str(turn_data.solution)
rect = create_modal_rect(display_width, display_height, 400, 200)
EndTurnWindow(rect, manager, on_end_turn, "Accusation Result", message)
elif isinstance(turn_data, OpponentGuess):
player_name = turn_data.opponent.character.pretty()
message = "Player " + player_name + " made a guess.<br><br >" + str(turn_data.guess)
rect = create_modal_rect(display_width, display_height, 400, 200)
EndTurnWindow(rect, manager, on_end_turn, "Opponent Guess", message)
elif isinstance(turn_data, DealCard):
message = "You have been dealt the following cards<br>"
for card in turn_data.cards:
message += str(card) + "<br>"
rect = create_modal_rect(display_width, display_height, 400, 200)
EndTurnWindow(rect, manager, on_end_turn, "Dealt Cards", message)
elif isinstance(turn_data, GameOver):
player_name = turn_data.winner.character.pretty()
message = "You have lost! " + player_name + " is the winner!"
message += "<br><strong>Solution:</strong> " + str(turn_data.solution)
if isinstance(turn_data.winner, RLPlayer):
message += "<br>Winner was the AI"
rect = create_modal_rect(display_width, display_height, 400, 200)
EndTurnWindow(rect, manager, on_end_turn, "Game Over", message)
else:
start_turn_menu.show()
def end_turn(lock: Lock):
print("player end turn")
lock.release()
def quit_game(end_game_lock, run_game_lock, turn_lock):
end_game_lock.acquire()
if run_game_lock.locked():
run_game_lock.release()
if turn_lock.locked():
turn_lock.release()
if __name__ == "__main__":
human = NaiveComputerPlayer()# HumanPlayer()
players = [
NaiveComputerPlayer(),
NaiveComputerPlayer(),
NaiveComputerPlayer(),
NaiveComputerPlayer(),
NaiveComputerPlayer(),
human
]
run_game_lock = Lock()
end_game_lock = Lock()
turn_lock = Lock()
director = Director(end_game_lock, players, turn_lock)
run(director, run_game_lock, end_game_lock, human, turn_lock)
| 35.780374
| 121
| 0.642027
|
af53f01f0bd1e029b9942ba5a1f6d783de18f93f
| 556
|
py
|
Python
|
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-dev/testtemplateapp1234-23630
|
c3a1684aac0feca1ed5fbb9735f19aa05345d96e
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-dev/testtemplateapp1234-23630
|
c3a1684aac0feca1ed5fbb9735f19aa05345d96e
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-dev/testtemplateapp1234-23630
|
c3a1684aac0feca1ed5fbb9735f19aa05345d96e
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "testtemplateapp1234-23630.botics.co"
site_params = {
"name": "TesttemplateApp1234",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 21.384615
| 61
| 0.669065
|
4854068268530eebecd0e9b87979ab494154ca4e
| 668
|
py
|
Python
|
home/migrations/0001_initial.py
|
d-shaktiranjan/E_Note_Book
|
2746392bc88f11ce5fb0d4bb3a1a911b3d2bc766
|
[
"Apache-2.0"
] | null | null | null |
home/migrations/0001_initial.py
|
d-shaktiranjan/E_Note_Book
|
2746392bc88f11ce5fb0d4bb3a1a911b3d2bc766
|
[
"Apache-2.0"
] | 1
|
2022-01-27T18:56:50.000Z
|
2022-01-27T18:56:50.000Z
|
home/migrations/0001_initial.py
|
d-shaktiranjan/E_Note_Book
|
2746392bc88f11ce5fb0d4bb3a1a911b3d2bc766
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.4 on 2021-01-03 15:46
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NoteBook',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('noteName', models.CharField(max_length=10)),
('about', models.CharField(max_length=25)),
('teachers', models.CharField(max_length=20)),
('dateTime', models.DateTimeField()),
],
),
]
| 26.72
| 114
| 0.565868
|
d513d5aa8b9dcde68465fe954ffc5ac47706b888
| 378
|
py
|
Python
|
azkm/__main__.py
|
frogrammer/azure-knowledgemining-cli
|
15ffded9ebb6edc0009c6b77ddee64757be6fc7d
|
[
"Apache-2.0"
] | null | null | null |
azkm/__main__.py
|
frogrammer/azure-knowledgemining-cli
|
15ffded9ebb6edc0009c6b77ddee64757be6fc7d
|
[
"Apache-2.0"
] | null | null | null |
azkm/__main__.py
|
frogrammer/azure-knowledgemining-cli
|
15ffded9ebb6edc0009c6b77ddee64757be6fc7d
|
[
"Apache-2.0"
] | null | null | null |
"""azkm CLI entry point."""
from azkm.flight_checks import prereqs
import firehelper
import sys
from azkm.commands import * # noqa
def main():
"""azkm CLI.
"""
if len(sys.argv) == 1:
prereqs.confirm_cmd()
else:
prereqs.check_cmd()
start_cli()
def start_cli():
firehelper.start_fire_cli('azkm')
if __name__ == '__main__':
main()
| 16.434783
| 38
| 0.62963
|
dbc5545f33eccd1c63a3a0100dfc2adab717810d
| 2,098
|
py
|
Python
|
main.py
|
daniel4lee/PSO-car-simulator
|
b4aebca0fed614e33acc3e7d665085d55a67b82a
|
[
"MIT"
] | 1
|
2022-03-23T21:51:59.000Z
|
2022-03-23T21:51:59.000Z
|
main.py
|
daniel4lee/PSO-car-simulator
|
b4aebca0fed614e33acc3e7d665085d55a67b82a
|
[
"MIT"
] | 1
|
2018-10-08T12:53:42.000Z
|
2018-10-08T13:46:13.000Z
|
main.py
|
daniel4lee/PSO-car-simulator
|
b4aebca0fed614e33acc3e7d665085d55a67b82a
|
[
"MIT"
] | 2
|
2020-04-26T08:22:53.000Z
|
2021-05-18T09:51:24.000Z
|
"""
the main excution file
"""
import os
from os.path import join, isfile
from collections import namedtuple
import sys
from PyQt5.QtWidgets import QApplication
from PSO_system.GUI.gui_root import GuiRoot
import numpy as np
def main():
"""Read data as dictionary"""
sys.argv += ['--style', 'fusion']
app = QApplication(sys.argv)
gui_root = GuiRoot(read_file(), read_training_file())
sys.exit(app.exec_())
def read_file():
"""Read txt file in same location"""
road_map = namedtuple('road_map', ['start', 'x', 'y'])
datapath = join(os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))), "map_data")
folderfiles = os.listdir(datapath)
dataset = {}
paths = (join(datapath, f) for f in folderfiles if isfile(join(datapath, f)))
for idx, content in enumerate(list(map(lambda path: open(path, 'r'), paths))):
i = 0
for line in content:
if i == 0:
dataset[folderfiles[idx]] = road_map(list(map(float, line.split(','))), [], [])
else:
dataset[folderfiles[idx]].x.append(float(line.split(',')[0]))
dataset[folderfiles[idx]].y.append(float(line.split(',')[1]))
i += 1
return dataset
def read_training_file():
"""Read txt file in same location"""
train_data = namedtuple('train_data', ['wheel_angle', 'v_x'])
datapath = join(os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))), "training_data")
folderfiles = os.listdir(datapath)
dataset = {}
paths = (join(datapath, f) for f in folderfiles if isfile(join(datapath, f)))
for idx, content in enumerate(list(map(lambda path: open(path, 'r'), paths))):
i = 0
for line in content:
if i == 0:
dataset[folderfiles[idx]] = train_data([], [])
dataset[folderfiles[idx]].wheel_angle.append(float(line.split(' ')[-1]))
t = line.split(' ')
del t[-1]
dataset[folderfiles[idx]].v_x.append(np.array(list(map(float, t))))
i += 1
return dataset
main()
| 39.584906
| 108
| 0.608198
|
992348a73e9b9b5ff6df854b9656ee3f11ca9d77
| 9,030
|
py
|
Python
|
interactive.py
|
vliu15/dialogue-seq2seq
|
d78354cdb568963f8e85fce1202e85690535f01c
|
[
"MIT"
] | 27
|
2019-04-17T11:02:39.000Z
|
2021-12-16T09:42:41.000Z
|
interactive.py
|
lixinyu-up/dialogue-seq2seq
|
d78354cdb568963f8e85fce1202e85690535f01c
|
[
"MIT"
] | 1
|
2019-03-01T09:21:09.000Z
|
2019-03-02T22:49:48.000Z
|
interactive.py
|
vliu15/transformer-rnn-pytorch
|
d78354cdb568963f8e85fce1202e85690535f01c
|
[
"MIT"
] | 13
|
2019-03-31T05:16:49.000Z
|
2021-07-09T13:08:14.000Z
|
''' This script handles local interactive inference '''
import torch
import torch.nn as nn
import torch.nn.functional as F
import argparse
import numpy as np
import spacy
from seq2seq.Models import Seq2Seq
from seq2seq.Translator import Translator
from seq2seq.Beam import Beam
from seq2seq import Constants
class Interactive(Translator):
def __init__(self, opt):
super().__init__(opt)
def translate_batch(self, src_seq, src_pos):
''' Translation work in one batch '''
def get_inst_idx_to_tensor_position_map(inst_idx_list):
''' Indicate the position of an instance in a tensor. '''
return {inst_idx: tensor_position for tensor_position, inst_idx in enumerate(inst_idx_list)}
def collect_active_part(beamed_tensor, curr_active_inst_idx, n_prev_active_inst, n_bm):
''' Collect tensor parts associated to active instances. '''
_, *d_hs = beamed_tensor.size()
n_curr_active_inst = len(curr_active_inst_idx)
new_shape = (n_curr_active_inst * n_bm, *d_hs)
beamed_tensor = beamed_tensor.view(n_prev_active_inst, -1)
beamed_tensor = beamed_tensor.index_select(0, curr_active_inst_idx)
beamed_tensor = beamed_tensor.view(*new_shape)
return beamed_tensor
def collate_active_info(
src_seq, src_enc, inst_idx_to_position_map, active_inst_idx_list):
#- Active sentences are collected so the decoder will not run on completed sentences
n_prev_active_inst = len(inst_idx_to_position_map)
active_inst_idx = [inst_idx_to_position_map[k] for k in active_inst_idx_list]
active_inst_idx = torch.LongTensor(active_inst_idx).to(self.device)
active_src_seq = collect_active_part(src_seq, active_inst_idx, n_prev_active_inst, n_bm)
active_src_enc = collect_active_part(src_enc, active_inst_idx, n_prev_active_inst, n_bm)
active_inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)
return active_src_seq, active_src_enc, active_inst_idx_to_position_map
def beam_decode_step(
inst_dec_beams, len_dec_seq, src_seq, enc_output, inst_idx_to_position_map, n_bm):
''' Decode and update beam status, and then return active beam idx '''
def prepare_beam_dec_seq(inst_dec_beams, len_dec_seq):
dec_partial_seq = [b.get_current_state() for b in inst_dec_beams if not b.done]
dec_partial_seq = torch.stack(dec_partial_seq).to(self.device)
dec_partial_seq = dec_partial_seq.view(-1, len_dec_seq)
return dec_partial_seq
def prepare_beam_dec_pos(len_dec_seq, n_active_inst, n_bm):
dec_partial_pos = torch.arange(1, len_dec_seq + 1, dtype=torch.long, device=self.device)
dec_partial_pos = dec_partial_pos.unsqueeze(0).repeat(n_active_inst * n_bm, 1)
return dec_partial_pos
def predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_inst, n_bm):
dec_output, *_ = self.model.decoder(dec_seq, dec_pos, src_seq, enc_output)
dec_output = dec_output[:, -1, :] # Pick the last step: (bh * bm) * d_h
word_prob = self.model.tgt_word_prj(dec_output)
word_prob[:, Constants.UNK] = -float('inf')
word_prob = F.log_softmax(word_prob, dim=1)
word_prob = word_prob.view(n_active_inst, n_bm, -1)
return word_prob
def collect_active_inst_idx_list(inst_beams, word_prob, inst_idx_to_position_map):
active_inst_idx_list = []
for inst_idx, inst_position in inst_idx_to_position_map.items():
is_inst_complete = inst_beams[inst_idx].advance(word_prob[inst_position])
if not is_inst_complete:
active_inst_idx_list += [inst_idx]
return active_inst_idx_list
n_active_inst = len(inst_idx_to_position_map)
dec_seq = prepare_beam_dec_seq(inst_dec_beams, len_dec_seq)
dec_pos = prepare_beam_dec_pos(len_dec_seq, n_active_inst, n_bm)
word_prob = predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_inst, n_bm)
# Update the beam with predicted word prob information and collect incomplete instances
active_inst_idx_list = collect_active_inst_idx_list(
inst_dec_beams, word_prob, inst_idx_to_position_map)
return active_inst_idx_list
def collect_hypothesis_and_scores(inst_dec_beams, n_best):
all_hyp, all_scores = [], []
for inst_idx in range(len(inst_dec_beams)):
scores, tail_idxs = inst_dec_beams[inst_idx].sort_scores()
all_scores += [scores[:n_best]]
hyps = [inst_dec_beams[inst_idx].get_hypothesis(i) for i in tail_idxs[:n_best]]
all_hyp += [hyps]
return all_hyp, all_scores
with torch.no_grad():
#- Zero out hidden state to batch size 1
self.model.session.zero_lstm_state(1, self.device)
#- Encode
src_enc, *_ = self.model.encoder(src_seq, src_pos)
src_enc, *_ = self.model.session(src_enc)
#- Repeat data for beam search
n_bm = self.opt.beam_size
n_inst, len_s, d_h = src_enc.size()
src_seq = src_seq.repeat(1, n_bm).view(n_inst * n_bm, len_s)
src_enc = src_enc.repeat(1, n_bm, 1).view(n_inst * n_bm, len_s, d_h)
#- Prepare beams
inst_dec_beams = [Beam(n_bm, device=self.device) for _ in range(n_inst)]
#- Bookkeeping for active or not
active_inst_idx_list = list(range(n_inst))
inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)
#- Decode
for len_dec_seq in range(1, self.model_opt.max_subseq_len + 1):
active_inst_idx_list = beam_decode_step(
inst_dec_beams, len_dec_seq, src_seq, src_enc, inst_idx_to_position_map, n_bm)
if not active_inst_idx_list:
break # all instances have finished their path to <EOS>
src_seq, src_enc, inst_idx_to_position_map = collate_active_info(
src_seq, src_enc, inst_idx_to_position_map, active_inst_idx_list)
hyp, scores = collect_hypothesis_and_scores(inst_dec_beams, self.opt.n_best)
return hyp, scores
def interactive(opt):
def prepare_seq(seq, max_seq_len, word2idx, device):
''' Prepares sequence for inference '''
seq = nlp(seq)
seq = [token.text for token in seq[:max_seq_len]]
seq = [word2idx.get(w.lower(), Constants.UNK) for w in seq]
seq = [Constants.BOS] + seq + [Constants.EOS]
seq = np.array(seq + [Constants.PAD] * (max_seq_len - len(seq)))
pos = np.array([pos_i+1 if w_i != Constants.PAD else 0 for pos_i, w_i in enumerate(seq)])
seq = torch.LongTensor(seq).unsqueeze(0)
pos = torch.LongTensor(pos).unsqueeze(0)
return seq.to(device), pos.to(device)
#- Load preprocessing file for vocabulary
prepro = torch.load(opt.prepro_file)
src_word2idx = prepro['dict']['src']
tgt_idx2word = {idx: word for word, idx in prepro['dict']['tgt'].items()}
del prepro # to save memory
#- Prepare interactive shell
nlp = spacy.blank('en')
s2s = Interactive(opt)
max_seq_len = s2s.model_opt.max_subseq_len
print('[Info] Model opts: {}'.format(s2s.model_opt))
#- Interact with console
console_input = ''
console_output = '[Seq2Seq](score:--.--) human , what do you have to say ( type \' exit \' to quit ) ?\n[Human] '
while True:
console_input = input(console_output) # get user input
if console_input == 'exit':
break
seq, pos = prepare_seq(console_input, max_seq_len, src_word2idx, s2s.device)
console_output, score = s2s.translate_batch(seq, pos)
console_output = console_output[0][0]
score = score[0][0]
console_output = '[Seq2Seq](score:{score:2.2f}) '.format(score=score.item()) + \
' '.join([tgt_idx2word.get(word, Constants.UNK_WORD) for word in console_output]) + '\n[Human] '
print('[Seq2Seq](score:--.--) thanks for talking with me !')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='translate.py')
parser.add_argument('-model', required=True, help='Path to model .chkpt file')
parser.add_argument('-prepro_file', required=True, help='Path to preprocessed data for vocab')
parser.add_argument('-beam_size', type=int, default=5, help='Beam size')
parser.add_argument('-no_cuda', action='store_true')
opt = parser.parse_args()
opt.cuda = not opt.no_cuda
opt.n_best = 1
interactive(opt)
| 45.15
| 117
| 0.653378
|
a44232881d599052a397795e687023a6bf1adc0a
| 2,503
|
py
|
Python
|
src/test/tinc/tincrepo/mpp/lib/regress/regress_gpdbverify.py
|
rodel-talampas/gpdb
|
9c955e350334abbd922102f289f782697eb52069
|
[
"PostgreSQL",
"Apache-2.0"
] | 9
|
2018-04-20T03:31:01.000Z
|
2020-05-13T14:10:53.000Z
|
src/test/tinc/tincrepo/mpp/lib/regress/regress_gpdbverify.py
|
rodel-talampas/gpdb
|
9c955e350334abbd922102f289f782697eb52069
|
[
"PostgreSQL",
"Apache-2.0"
] | 36
|
2017-09-21T09:12:27.000Z
|
2020-06-17T16:40:48.000Z
|
src/test/tinc/tincrepo/mpp/lib/regress/regress_gpdbverify.py
|
rodel-talampas/gpdb
|
9c955e350334abbd922102f289f782697eb52069
|
[
"PostgreSQL",
"Apache-2.0"
] | 32
|
2017-08-31T12:50:52.000Z
|
2022-03-01T07:34:53.000Z
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
import unittest2 as unittest
from tinctest.lib import local_path
from mpp.lib.PSQL import PSQL
from mpp.lib.gpdbverify import GpdbVerify
class GpdbVerifyRegressionTests(unittest.TestCase):
def __init__(self, methodName):
self.gpv = GpdbVerify()
super(GpdbVerifyRegressionTests, self).__init__(methodName)
def setUp(self):
PSQL.run_sql_command('create database gptest;', dbname='postgres')
def tearDown(self):
PSQL.run_sql_command('drop database gptest', dbname='postgres')
def test_gpcheckcat(self):
(a,b,c,d) = self.gpv.gpcheckcat()
self.assertIn(a,(0,1,2))
def test_gpcheckmirrorseg(self):
(res,fix_file) = self.gpv.gpcheckmirrorseg()
self.assertIn(res, (True,False))
def test_check_db_is_running(self):
self.assertTrue(self.gpv.check_db_is_running())
def test_run_repairscript(self):
repair_script = local_path('gpcheckcat_repair')
res = self.gpv.run_repair_script(repair_script)
self.assertIn(res, (True,False))
def test_ignore_extra_m(self):
fix_file = local_path('fix_file')
res = self.gpv.ignore_extra_m(fix_file)
self.assertIn(res, (True,False))
def test_cleanup_old_file(self):
old_time = int(time.strftime("%Y%m%d%H%M%S")) - 1005000
old_file = local_path('checkmirrorsegoutput_%s' % old_time)
open(old_file,'w')
self.gpv.cleanup_day_old_out_files(local_path(''))
self.assertFalse(os.path.isfile(old_file))
def test_not_cleanup_todays_file(self):
new_file = local_path('checkmirrorsegoutput_%s' % time.strftime("%Y%m%d%H%M%S"))
open(new_file,'w')
self.gpv.cleanup_day_old_out_files(local_path(''))
self.assertTrue(os.path.isfile(new_file))
| 35.253521
| 88
| 0.704754
|
88a868d18117582e3d77dc88677ec1a63e6b23e0
| 49,602
|
py
|
Python
|
application.py
|
Has3ong/KaKao_Suwon
|
ddba8ea5623f84893d0f62ad8afc985bb4bd786f
|
[
"MIT"
] | 1
|
2019-07-10T03:57:54.000Z
|
2019-07-10T03:57:54.000Z
|
application.py
|
Has3ong/KaKao_Suwon
|
ddba8ea5623f84893d0f62ad8afc985bb4bd786f
|
[
"MIT"
] | 2
|
2020-10-27T22:00:15.000Z
|
2021-06-02T00:36:00.000Z
|
application.py
|
Has3ong/KaKao_Suwon
|
ddba8ea5623f84893d0f62ad8afc985bb4bd786f
|
[
"MIT"
] | null | null | null |
# -- coding: utf-8 --
import os
import json
from flask import Flask, request, jsonify
from datetime import datetime
import requests
import time
import threading
import pymongo
ip = 'localhost'
port = 27017
connection = pymongo.MongoClient(ip, port)
database = connection.get_database('Suwon')
mongo = database.get_collection('Data')
from docs.Menu import oMenu
from docs.Dust import oDust
from docs.Weather import oWeather
from docs.PhoneBook import oPhoneBook
from docs.BusShuttle import oBusShuttle
from docs.Calendar import oCalendar
from docs.Notice import oNotice
from docs.sNotice import sNotice
app = Flask(__name__)
print("Menu")
#o_Menu = oMenu()
print("Weather")
o_Weather = oWeather()
print("Dust")
o_Dust = oDust()
print("PhoneBook")
o_PhoneBook = oPhoneBook()
print("BusShuttle")
o_BusShuttle = oBusShuttle()
print("Calendar")
o_Calendar = oCalendar()
print("Notice")
o_Notice = oNotice()
print("sNotice")
s_Notice = sNotice()
# 1day = 86400, 1hour = 3600
def Threading1d():
threading.Timer(86400, Threading1d).start()
o_Notice.Update()
def ThreadingWeather():
threading.Timer(43200, ThreadingWeather).start()
o_Weather.Update()
def Threading4h():
threading.Timer(14400, Threading4h).start()
today = datetime.today().weekday()
if today > 4:
return 0
#o_Menu.Update()
def Threading1h():
threading.Timer(3600, Threading1h).start()
o_Dust.Update()
@app.route('/keyboard')
def Keyboard():
dataSend = {
}
return jsonify(dataSend)
@app.route('/message', methods=['POST'])
def Message():
content = request.get_json()
content = content['userRequest']
content = content['utterance']
data = str(datetime.now().date())
"""
try:
mongo.insert_one(
{
"contents": content,
"date": data,
}
)
except Exception:
print("MongoDB Connection Failed")
"""
if content == u"์์ํ๊ธฐ":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"basicCard": {
"title": "",
"description": "์๋
ํ์ธ์. \n์์๋ํ๊ต ์๋ฆผ์ด ์
๋๋ค. \n์์๋ํ๊ต์ ๊ด๋ จ๋ ์ ๋ณด๋ฅผ ๊ฐ๋จํ๊ฒ ์๋ ค๋๋ฆด๊ฒ์!",
"thumbnail": {
"imageUrl": "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Index.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9JbmRleC5wbmc=&docker_id=dbagmjvzeyafyjerlac&secure_session_id=ukvGkLMs6b_IfPgimh-pjWVtciFqdpSu"
}
}
}
],
"quickReplies": [
{
"label": "ํ๊ต์ ๋ณด",
"action": "message",
"messageText": "ํ๊ต์ ๋ณด"
},
{
"label": "๋ฒ์ค์
ํ",
"action": "message",
"messageText": "์
ํ๋ฒ์ค"
},
{
"label": "ํ์",
"action": "message",
"messageText": "ํ์"
},
{
"label": "๋ ์จ",
"action": "message",
"messageText": "๋ ์จ"
},
{
"label": "์ข
๊ฐ D-DAY",
"action": "message",
"messageText": "์ข
๊ฐ์ผ ๊ณ์ฐํด์ค"
}
]
}
}
elif content == u"ํ๊ต์ ๋ณด":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"basicCard": {
"title": "",
"description": "ํ๊ต์์ ๊ธํ ํ์ํ ๋ ์ฐพ๊ธฐ ํ๋ค์๋ ์ ๋ณด๋ฅผ ์๋ ค๋๋ฆด๊ฒ์!",
"thumbnail": {
"imageUrl": "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Information.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9JbmZvcm1hdGlvbi5wbmc=&docker_id=dbagmjvzeyafyjerlac&secure_session_id=ukvGkLMs6b_IfPgimh-pjWVtciFqdpSu"
}
}
}
],
"quickReplies": [
{
"label": "๊ต๋ด์ ํ๋ฒํธ",
"action": "message",
"messageText": "๊ต๋ด์ ํ๋ฒํธ"
},
{
"label": "ํ์ฌ์ผ์ ",
"action": "message",
"messageText": "ํ์ฌ์ผ์ "
},
{
"label": "ํธ์์์ค",
"action": "message",
"messateText": "ํธ์์์ค"
}
# {
# "label": "๊ณต์ง์ฌํญ",
# "action": "message",
# "messageText": "๊ณต์ง์ฌํญ"
# }
]
}
}
elif content == u"ํ์":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"basicCard": {
"title": "ํ์",
"description": "์ข
ํฉ๊ฐ์๋, ์๋ง๋์ค ํ์ค ์ ํํด์ฃผ์ธ์. \n\n๋งํฌ : http://www.suwon.ac.kr/?menuno=762 \n๋งํฌ : http://www.suwon.ac.kr/?menuno=1793",
"thumbnail": {
"imageUrl": "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Menu.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9NZW51LnBuZw==&docker_id=dbagmjvzeyafyjerlac&secure_session_id=ukvGkLMs6b_IfPgimh-pjWVtciFqdpSu"
}
}
}
]
}
}
elif content == u"์ข
ํฉ๊ฐ์๋ ํ์ ์๋ ค์ฃผ์ธ์":
today = datetime.today().weekday()
if today > 4:
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": "์ค๋์ ํด์ผ์
๋๋ค."
}
]
}
}
]
}
}
else:
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": o_Menu.JongHab[today][0]
},
{
"title": "",
"description": o_Menu.JongHab[today][1]
},
{
"title": "",
"description": o_Menu.JongHab[today][2]
},
{
"title": "",
"description": o_Menu.JongHab[today][3]
}
]
}
}
]
}
}
elif content == u"์๋ง๋์คํ ํ์ ์๋ ค์ฃผ์ธ์":
today = datetime.today().weekday()
if today > 4:
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": "์ค๋์ ํด์ผ์
๋๋ค."
}
]
}
}
]
}
}
else:
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": o_Menu.Amarense[today]
}
]
}
}
]
}
}
elif content == u"๋ช
๋ น์ด":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"simpleText": {
"text": "์ฌ์ฉ๊ฐ๋ฅํ ๋ช
๋ น์ด๋ '์๊ฐ', '๋ฏธ์ธ๋จผ์ง', 'ํ์', '๋ ์จ', '๊ต๋ด์ ํ๋ฒํธ', '์
ํ๋ฒ์ค', 'ํ์ฌ์ผ์ '์
๋๋ค."
}
}
]
}
}
elif content == u"๋ ์จ":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": "",
"thumbnail": {
"imageUrl": "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Today.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9Ub2RheS5wbmc=&docker_id=dbagmjvzeyafyjerlac&secure_session_id=ukvGkLMs6b_IfPgimh-pjWVtciFqdpSu"
},
"buttons": [
{
"action": "message",
"label": "์ค๋์ ๋ ์จ",
"messageText": "์ค๋ ๋ ์จ ์๋ ค์ค"
}
]
},
{
"title": "",
"description": "",
"thumbnail": {
"imageUrl": "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Tomorow.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9Ub21vcm93LnBuZw==&docker_id=dbagmjvzeyafyjerlac&secure_session_id=ukvGkLMs6b_IfPgimh-pjWVtciFqdpSu"
},
"buttons": [
{
"action": "message",
"label": "๋ด์ผ์ ๋ ์จ",
"messageText": "๋ด์ผ ๋ ์จ ์๋ ค์ค"
}
]
}
]
}
}
]
}
}
'''
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type" : "basicCard",
"items": [
{
"title" : "์ค๋์ ๋ ์จ",
"description" : o_Weather.today + o_Dust.today,
"thumbnail" : {
"imageUrl" : "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Today.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9Ub2RheS5wbmc=&docker_id=dbagmjvzeyafyjerlac&secure_session_id=ukvGkLMs6b_IfPgimh-pjWVtciFqdpSu"
}
},
{
"title" : "๋ด์ผ์ ๋ ์จ",
"description" : o_Weather.tomorrow,
"thumbnail" :{
"imageUrl" : "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Tomorow.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9Ub21vcm93LnBuZw==&docker_id=dbagmjvzeyafyjerlac&secure_session_id=ukvGkLMs6b_IfPgimh-pjWVtciFqdpSu"
}
}
]
}
}
]
}
}
'''
elif content == u"์ค๋ ๋ ์จ ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"basicCard": {
"title": "",
"description": o_Weather.today + o_Dust.today
}
}
]
}
}
elif content == u"๋ด์ผ ๋ ์จ ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"basicCard": {
"title": "",
"description": o_Weather.tomorrow
}
}
]
}
}
elif content == u"์ข
๊ฐ์ผ ๊ณ์ฐํด์ค":
nowtime = datetime.now()
endtime = datetime(2020, 6, 22, 0, 0, 0)
d_days = (endtime - nowtime).days
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"basicCard": {
"title": "",
"description": "์ข
๊ฐ๊น์ง " + str(d_days) + " ์ผ ๋จ์์ต๋๋ค.๐",
"thumbnail": {
"imageUrl": "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_DDAY.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9EREFZLnBuZw==&docker_id=dbagmjvzeyafyjerlac&secure_session_id=ukvGkLMs6b_IfPgimh-pjWVtciFqdpSu"
}
}
}
]
}
}
elif content == u"ํ์ฌ์ผ์ ":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "ํ์ฌ์ผ์ ",
"description": "๋งํฌ : http://www.suwon.ac.kr/?menuno=727",
"thumbnail": {
"imageUrl": "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Information.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9JbmZvcm1hdGlvbi5wbmc=&docker_id=dbagmjvzeyafyjerlac&secure_session_id=-eu90FRT1mUI5U8ZfBLyu-KBEQXB_1LN"
},
"buttons": [
{
"action": "message",
"label": "1ํ๊ธฐ ํ์ฌ์ผ์ ",
"messageText": "1ํ๊ธฐ ํ์ฌ์ผ์ ์๋ ค์ค"
},
{
"action": "message",
"label": "2ํ๊ธฐ ํ์ฌ์ผ์ ",
"messageText": "2ํ๊ธฐ ํ์ฌ์ผ์ ์๋ ค์ค"
},
]
}
]
}
}
]
}
}
elif content == u"1ํ๊ธฐ ํ์ฌ์ผ์ ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "1์ ํ์ฌ์ผ์ ๐",
"description": o_Calendar.Jan
},
{
"title": "2์ ํ์ฌ์ผ์ ๐",
"description": o_Calendar.Feb
},
{
"title": "3์ ํ์ฌ์ผ์ ๐",
"description": o_Calendar.Mar
},
{
"title": "4์ ํ์ฌ์ผ์ ๐",
"description": o_Calendar.Apr
},
{
"title": "5์ ํ์ฌ์ผ์ ๐",
"description": o_Calendar.May
},
{
"title": "6์ ํ์ฌ์ผ์ ๐",
"description": o_Calendar.June
}
]
}
}
]
}
}
elif content == u"2ํ๊ธฐ ํ์ฌ์ผ์ ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "7์ ํ์ฌ์ผ์ ๐",
"description": o_Calendar.July
},
{
"title": "8์ ํ์ฌ์ผ์ ๐",
"description": o_Calendar.Aug
},
{
"title": "9์ ํ์ฌ์ผ์ ๐",
"description": o_Calendar.Sep
},
{
"title": "10์ ํ์ฌ์ผ์ ๐",
"description": o_Calendar.Oct
},
{
"title": "11์ ํ์ฌ์ผ์ ๐",
"description": o_Calendar.Nov
},
{
"title": "12์ ํ์ฌ์ผ์ ๐",
"description": o_Calendar.Dec
}
]
}
}
]
}
}
elif content == u"ํธ์์์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "์นดํ๐ต",
"description": "ACE๊ต์ก๊ด - ์ตํฉ๋ฌธํ์์ ๋ํ - ์ฌํ๊ด",
"thumbnail": {
"imageUrl": "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Cafe.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9DYWZlLnBuZw==&docker_id=dbagmjvzeyafyjerlac&secure_session_id=WK1r5NHmB5cAiSu-chkrikcJuyS6wE7a"
},
},
{
"title": "๋งค์ ๐ฉ",
"description": "์ฌํ๊ด - ๊ฑด๊ฐ๊ณผํ๋ํ - ๊ณต๊ณผ๋ํ\n์ 4๊ณตํ๊ด - ACE๊ต์ก๊ด - ์ตํฉ๋ฌธํ์์ ๋ํ - ๊ฒฝ์๋ํ",
"thumbnail": {
"imageUrl": "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Store.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9TdG9yZS5wbmc=&docker_id=dbagmjvzeyafyjerlac&secure_session_id=WK1r5NHmB5cAiSu-chkrikcJuyS6wE7a"
},
},
{
"title": "๋ณต์ฌ์ค๐จ",
"description": "๋์๊ด 2์ธต - ์ธ๋ฌธ์ฌํ๋ํ - ๊ณต๊ณผ๋ํ",
"thumbnail": {
"imageUrl": "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Boksa.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9Cb2tzYS5wbmc=&docker_id=dbagmjvzeyafyjerlac&secure_session_id=WK1r5NHmB5cAiSu-chkrikcJuyS6wE7a"
},
},
{
"title": "Link",
"description": "http://www.suwon.ac.kr/?menuno=763"
}
]
}
}
]
}
}
elif content == u"์
ํ๋ฒ์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "์
ํ๋ฒ์ค",
"description": "",
"thumbnail": {
"imageUrl": "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Bus1.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9CdXMxLnBuZw==&docker_id=dbagmjvzeyafyjerlac&secure_session_id=ukvGkLMs6b_IfPgimh-pjWVtciFqdpSu"
},
"buttons": [
{
"action": "message",
"label": "๊ต๋ด ์
ํ ์๊ฐํ",
"messageText": "๊ต๋ด ์
ํ ์๊ฐํ ์๋ ค์ค"
},
{
"action": "message",
"label": "์๋ก์ ์
ํ๋ฒ์ค",
"messageText": "์๋ก์ ์
ํ๋ฒ์ค ์๋ ค์ค"
},
{
"action": "message",
"label": "์ก๋ด ์
ํ๋ฒ์ค",
"messageText": "์ก๋ด ์
ํ๋ฒ์ค ์๋ ค์ค"
}
]
},
{
"title": "์
ํ๋ฒ์ค",
"description": "",
"thumbnail": {
"imageUrl": "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Bus2.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9CdXMyLnBuZw==&docker_id=dbagmjvzeyafyjerlac&secure_session_id=-eu90FRT1mUI5U8ZfBLyu-KBEQXB_1LN"
},
"buttons": [
{
"action": "message",
"label": "๊ธ์ ์
ํ๋ฒ์ค",
"messageText": "๊ธ์ ์
ํ๋ฒ์ค ์๋ ค์ค"
},
{
"action": "message",
"label": "์ฑ๋จ(์ผํ) ์
ํ๋ฒ์ค",
"messageText": "์ฑ๋จ ์
ํ๋ฒ์ค ์๋ ค์ค"
},
{
"action": "message",
"label": "์์ ์
ํ๋ฒ์ค",
"messageText": "์์ ์
ํ๋ฒ์ค ์๋ ค์ค"
}
]
},
{
"title": "์
ํ๋ฒ์ค",
"description": "๋งํฌ : http://www.suwon.ac.kr/?menuno=655",
"thumbnail": {
"imageUrl": "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Bus3.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9CdXMzLnBuZw==&docker_id=dbagmjvzeyafyjerlac&secure_session_id=vehspVjlqUmLG5081o_9ITtwVcY1zp64"
},
"buttons": [
{
"action": "message",
"label": "๊ฐ๋จ ์
ํ๋ฒ์ค",
"messageText": "๊ฐ๋จ ์
ํ๋ฒ์ค ์๋ ค์ค"
}
]
}
]
}
}
]
}
}
elif content == u"๊ต๋ด ์
ํ ์๊ฐํ ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": o_BusShuttle.InShuttle
}
]
}
}
]
}
}
elif content == u"์ก๋ด ์
ํ๋ฒ์ค ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": o_BusShuttle.OutShuttle_SongNae
}
]
}
}
]
}
}
elif content == u"์๋ก์ ์
ํ๋ฒ์ค ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": o_BusShuttle.OutShuttle_SangRokSu
}
]
}
}
]
}
}
elif content == u"๊ธ์ ์
ํ๋ฒ์ค ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": o_BusShuttle.OutShuttle_GeumJeong
}
]
}
}
]
}
}
elif content == u"์ฑ๋จ ์
ํ๋ฒ์ค ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": o_BusShuttle.OutShuttle_SeongNam
}
]
}
}
]
}
}
elif content == u"์์ ์
ํ๋ฒ์ค ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": o_BusShuttle.OutShuttle_Suwon
}
]
}
}
]
}
}
elif content == u"๊ฐ๋จ ์
ํ๋ฒ์ค ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": o_BusShuttle.OutShuttle_GangNam
}
]
}
}
]
}
}
# ์ ๋๋ฆผ ์
ํ๋ฒ์ค ์ฌ๋ผ์ง
elif content == u"์ ๋๋ฆผ ์
ํ๋ฒ์ค ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": o_BusShuttle.OutShuttle_SinDoRim
}
]
}
}
]
}
}
elif content == u"๊ต๋ด์ ํ๋ฒํธ":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": "๊ต๋ด ์๋ด 031-220-2114",
"thumbnail": {
"imageUrl": "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Information.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9JbmZvcm1hdGlvbi5wbmc=&docker_id=dbagmjvzeyafyjerlac&secure_session_id=-eu90FRT1mUI5U8ZfBLyu-KBEQXB_1LN"
},
"buttons": [
{
"action": "message",
"label": "์ธ๋ฌธ์ฌํ๋ํ",
"messageText": "์ธ๋ฌธ์ฌํ๋ํ ์ ํ๋ฒํธ ์๋ ค์ค"
},
{
"action": "message",
"label": "๊ฒฝ์๋ํ",
"messageText": "๊ฒฝ์๋ํ ์ ํ๋ฒํธ ์๋ ค์ค"
},
{
"action": "message",
"label": "๊ณต๊ณผ๋ํ",
"messageText": "๊ณต๊ณผ๋ํ ์ ํ๋ฒํธ ์๋ ค์ค"
}
]
},
{
"title": "",
"description": "๊ต๋ด ์๋ด 031-220-2114",
"thumbnail": {
"imageUrl": "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Information.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9JbmZvcm1hdGlvbi5wbmc=&docker_id=dbagmjvzeyafyjerlac&secure_session_id=-eu90FRT1mUI5U8ZfBLyu-KBEQXB_1LN"
},
"buttons": [
{
"action": "message",
"label": "ICT ์ตํฉ๋ํ",
"messageText": "ICT ์ตํฉ๋ํ ์ ํ๋ฒํธ ์๋ ค์ค"
},
{
"action": "message",
"label": "๋ฏธ์ ๋ํ",
"messageText": "๋ฏธ์ ๋ํ ์ ํ๋ฒํธ ์๋ ค์ค"
},
{
"action": "message",
"label": "์์
๋ํ",
"messageText": "์์
๋ํ ์ ํ๋ฒํธ ์๋ ค์ค"
}
]
},
{
"title": "",
"description": "๊ต๋ด ์๋ด 031-220-2114 \n๋งํฌ : http://www.suwon.ac.kr/?menuno=653",
"thumbnail": {
"imageUrl": "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Information.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9JbmZvcm1hdGlvbi5wbmc=&docker_id=dbagmjvzeyafyjerlac&secure_session_id=-eu90FRT1mUI5U8ZfBLyu-KBEQXB_1LN"
},
"buttons": [
{
"action": "message",
"label": "์ตํฉ๋ฌธํ ์์ ๋ํ",
"messageText": "์ตํฉ๋ฌธํ ์์ ๋ํ ์ ํ๋ฒํธ ์๋ ค์ค"
},
{
"action": "message",
"label": "๊ฑด๊ฐ๊ณผํ๋ํ",
"messageText": "๊ฑด๊ฐ๊ณผํ๋ํ ์ ํ๋ฒํธ ์๋ ค์ค"
}
]
}
]
}
}
]
}
}
elif content == u"์ธ๋ฌธ์ฌํ๋ํ ์ ํ๋ฒํธ ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": o_PhoneBook.InMun1
}
]
}
}
]
}
}
elif content == u"๊ฒฝ์๋ํ ์ ํ๋ฒํธ ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": o_PhoneBook.GyungSang1
}
]
}
}
]
}
}
elif content == u"๊ณต๊ณผ๋ํ ์ ํ๋ฒํธ ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": o_PhoneBook.GongGwa1
}
]
}
}
]
}
}
elif content == u"ICT ์ตํฉ๋ํ ์ ํ๋ฒํธ ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": o_PhoneBook.ICT1
}
]
}
}
]
}
}
elif content == u"๋ฏธ์ ๋ํ ์ ํ๋ฒํธ ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": o_PhoneBook.Art1
}
]
}
}
]
}
}
elif content == u"์์
๋ํ ์ ํ๋ฒํธ ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": o_PhoneBook.Music
}
]
}
}
]
}
}
elif content == u"์ตํฉ๋ฌธํ ์์ ๋ํ ์ ํ๋ฒํธ ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": o_PhoneBook.MunHwa1
}
]
}
}
]
}
}
elif content == u"๊ฑด๊ฐ๊ณผํ๋ํ ์ ํ๋ฒํธ ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"carousel": {
"type": "basicCard",
"items": [
{
"title": "",
"description": o_PhoneBook.GunGang1
}
]
}
}
]
}
}
elif content == u"๊ณต์ง์ฌํญ":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"basicCard": {
"title": "",
"description": "ํ๊ต ๊ณต์ง์ฌํญ๊ณผ ์๋ฆผ์ด ๊ณต์ง์ฌํญ์ ์๋ ค๋๋ฆด๊ฒ์.",
"thumbnail": {
"imageUrl": "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Notice.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9Ob3RpY2UucG5n&docker_id=dbagmjvzeyafyjerlac&secure_session_id=IlC0-R5MuofCrIMCXBNPinjASPWLUMb3"
}
}
}
],
"quickReplies": [
{
"label": "์์๋ ๊ณต์ง์ฌํญ",
"action": "message",
"messageText": "์์๋ ๊ณต์ง์ฌํญ ์๋ ค์ค"
},
{
"label": "์๋ฆผ์ด ๊ณต์ง์ฌํญ",
"action": "message",
"messageText": "์๋ฆผ์ด ๊ณต์ง์ฌํญ ์๋ ค์ค"
}
]
}
}
elif content == u"์์๋ ๊ณต์ง์ฌํญ ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"listCard": {
"header": {
"title": "์์๋ํ๊ต ๊ณต์ง์ฌํญ",
"imageUrl": "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Banner.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9CYW5uZXIucG5n&docker_id=dbagmjvzeyafyjerlac&secure_session_id=IlC0-R5MuofCrIMCXBNPinjASPWLUMb3"
},
"items": [
{
"title": o_Notice.res[0],
},
{
"title": o_Notice.res[1]
},
{
"title": o_Notice.res[2]
},
{
"title": o_Notice.res[3]
},
{
"title": o_Notice.res[4]
},
],
"buttons": [
{
"label": "๊ตฌ๊ฒฝ๊ฐ๊ธฐ",
"action": "webLink",
"webLinkUrl": "http://www.suwon.ac.kr/?menuno=674"
}
]
}
}
],
"quickReplies": [
{
"label": "๋๋ณด๊ธฐ",
"action": "message",
"messageText": "์์๋ ๊ณต์ง์ฌํญ2 ์๋ ค์ค"
}
]
}
}
elif content == u"์์๋ ๊ณต์ง์ฌํญ2 ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"listCard": {
"header": {
"title": "์์๋ํ๊ต ๊ณต์ง์ฌํญ",
"imageUrl": "https://proxy.goorm.io//service/5ccda9890e70de7aa094ede1_dbagmjvzeyafyjerlac.run.goorm.io/9080//file/load/App_Banner.png?path=d29ya3NwYWNlJTJGU3V3b25Cb3QlMkZJbWFnZSUyRkFwcF9CYW5uZXIucG5n&docker_id=dbagmjvzeyafyjerlac&secure_session_id=IlC0-R5MuofCrIMCXBNPinjASPWLUMb3"
},
"items": [
{
"title": o_Notice.res[5],
},
{
"title": o_Notice.res[6]
},
{
"title": o_Notice.res[7]
},
{
"title": o_Notice.res[8]
},
{
"title": o_Notice.res[9]
},
],
"buttons": [
{
"label": "๊ตฌ๊ฒฝ๊ฐ๊ธฐ",
"action": "webLink",
"webLinkUrl": "http://www.suwon.ac.kr/?menuno=674"
}
]
}
}
]
}
}
elif content == u"์๋ฆผ์ด ๊ณต์ง์ฌํญ ์๋ ค์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"simpleText": {
"text": s_Notice.data
}
}
]
}
}
elif content == u"๊ฐ๋ฐ์ค":
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"simpleText": {
"text": "๐๐๐๐คฃ๐๐๐
๐๐ค๐๐ถ๐๐๐โบ๏ธ๐๐๐๐๐๐๐คจ๐ค๐๐๐๐ฅฐ๐คฉ๐๐ฃ๐ฅ๐ฎ๐ค๐ฏ๐ช๐คค๐๐๐๐๐ด๐ซ๐๐๐๐๐๐ค๐ฒ๐ข๐ค๐คฏ๐ฌ๐ฉ๐๐๐จ๐ง๐๐๐ฆโน๐ญ๐ฐ๐ฑ๐ฅต๐ฅถ๐ณ๐คช๐ต๐คข๐ฅบ๐๐ปโ โป๐ฅด๐ค๐ค๐ฅณ๐ค๐๐บ๐ง๐ค ๐ท๐คฌ๐๐คญ๐น๐คก๐คซ๐คง๐ ๐ก๐คฎ๐คฅ๐ฟ๐พ๐ฝ๐ค๐ฉ๐บ๐ธ๐น๐๐พ๐ฟ๐๐ฝ๐ผ๐ป๐ง๐ง๐จโโ๏ธ๐ฉโโ๏ธ๐จโ๐พ๐จโ๐ญ๐ฉโ๐ญ๐ฉโโ๐ฉโ๐ง๐จโโ๐ต๐ฆ๐ง๐ด๐ฉโ๐ซ๐จโ๐ง๐ฉโ๐ณ๐จโ๐ซ๐ง๐ถ๐๐ฉ๐ฉโ๐๐จโ๐ณ๐ฉโ๐พ๐จโ๐๐จ๐๐จโ๐ผ๐ฉโ๐ผ๐จโ๐ฌ๐ฉโ๐ฌ๐จโ๐ป๐ฉโ๐ป๐จโ๐ค๐ฉโ๐๐จโ๐๐ฉโโ๏ธ๐จโโ๏ธ๐ฉโ๐จ๐จโ๐จ๐ฉโ๐ค๐โโ๏ธ๐ต๏ธโโ๏ธ๐ต๏ธโโ๏ธ๐ฎโโ๏ธ๐ฎโโ๏ธ๐ฉโ๐๐จโ๐๐งโโ๏ธ๐งโโ๏ธ๐งโโ๏ธ๐โโ๏ธ๐โโ๏ธ๐โโ๏ธ๐โโ๏ธ๐โโ๏ธ๐โโ๏ธ๐โโ๏ธ๐โโ๏ธ๐โโ๏ธ๐
โโ๏ธ๐
๐ผโโ๏ธ๐คทโโ๏ธ๐คทโโ๏ธ๐คฆโโ๏ธ๐คฆโโ๏ธ๐โโ๏ธ๐โโ๏ธ๐โโ๏ธ๐ค๐๐ค๐๐๐๐๐๐งกโค๐๐๐โฃ๐๐๐๐๐๐ฃ๐ฏ๐จ๐ฅผ๐๐ข๐ฌ๐ฅฝ๐ถ๐ซ๐จ๐๐ณ๐ฆ๐ฅ๐ญ๐๐งฆ๐งฅ๐งค๐งฃ๐๐๐๐๐๐๐๐๐๐๐ก๐ ๐ฅฟ๐ฅพ๐๐๐๐งขโ๐๐ฉ๐๐๐ข๐ฟ๐๐๐๐งถ๐งตโโฃ๏ธ๐จ๐ผโฆ๏ธโฅ๏ธ๐ญ๐ดโ ๏ธ๐งฟ๐ฎ๐น๐ฐ๐ฒ๐งฉ๐งธ๐๐๐ฎ๐ฑ๐ฏ๐ฅ๐ท๐ฟ๐ฝ๐ฃโธโณ๐ฅ
๐ฅ๐ฅ๐ธ๐ฅ๐ณ๐๐๐๐ฅ๐๐พ๐๐๐๐๐ฅโพ๏ธโฝ๏ธ๐ฅ๐๐ซ๐๐โจ๐๐งจ๐๐๐ฅ๐ฅ๐๐๐๐๐๐๐
๐๐งง๐๐๐๐๐๐โ๐โ๏ธโ๐ฅโโฑ๐ง๐โกโโ๏ธ๐๐๐ง๐ฆ๐ฅ๐ฌ๐ซ๐คโ๐ช๐ฉโ
โ๏ธ๐จ๐ฐโฒโฑโฐ๐๐๐โฐ๐๐๐๐๐๐งฑ๐๐บ๐งญ๐๐๐๐๐๐ผ๐๐๐ต๐ข๐ถ๐๐ฃ๐ฏ๐๐น๐ธ๐ท๐ป๐ง๐ค๐๐โ๏ธ๐ฒ๐ฑ๐ฅ๐ป๐บ๐จ๐ฅ๐ป๐๐๐ ๐๐๐๐ฆ๐ฎ๐๐๐๐๐๐ด๐ฐ๐ท๐๐๐๐ฐ๐ฒโ๐ง๐จ๐ฉ๐ค๐ฅ๐๐๐๐๐๐
๐๐๐๐๐โโ๐ผ๐ฆ๐ซ๐ช๐ฌ๐ญ๐ฎ๐ณ๐๐โ๏ธ๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐๐โ๐๐งชโโ๐ก๐ ๐ฉ๐งฒ๐งฐ๐งโโ๐กโ๐๐น๐จ๐๐ซโ๐๐๐ก๐ญ๐ฌ๐งฌ๐งซ๐ช๐๐๐งบ๐งน๐งท๐โฐโฑโฑ๐ฟ๐งป๐ฝ๐ฟ๐งผ๐งฝ๐งด๐งด๐๐ธโ๐ซ๐ณ๐ณ๐ญ๐ฏ๐ฑโ ๏ธโ
โโญโโโโโณใฝ๏ธโฟโฐโโโโโผโโใฐ๏ธยฉ๏ธยฎ๏ธโ๏ธโโข๏ธ#๏ธโฃ*๏ธโฃโโ๐ ๐ป๐บ๏ธ๐น๏ธ๐ธ๏ธ๐ท๏ธ๐ถ๏ธ๐๐ฒ๐ณ๐ด๐ตโชโซโฌ๐ก๐ฅ๐ฉ๐ช๐ฅ ๐๐ฐ๐ฅก๐ฆ๐ง๐ฅง๐ง๐จ๐ซโ๐ฅ๐ผ๐ฏ๐ฎ๐ญ๐ฌ๐ต๐ถ๐พ๐ท๐ธ๐น๐น๐บ๐บ๐น๐ธ๐ท๐พ๐ถ๐ต๐ป๐ฅ๐ฅ๐ฅค๐ฝ๐ฅข๐ด๐จ"
}
}
]
}
}
else:
dataSend = {
"version": "2.0",
"template": {
"outputs": [
{
"simpleText": {
"text": "์์ง ๊ณต๋ถํ๊ณ ์์ต๋๋ค."
}
}
]
}
}
return jsonify(dataSend)
"""
"buttons": [
{
"action": "message",
"label": "์ข
ํฉ๊ฐ์๋",
"messageText": "์ข
ํฉ๊ฐ์๋ ํ์ ์๋ ค์ฃผ์ธ์"
},
{
"action": "message",
"label": "์๋ง๋์ค ํ",
"messageText": "์๋ง๋์คํ ํ์ ์๋ ค์ฃผ์ธ์"
}
]
"""
if __name__ == "__main__":
ThreadingWeather()
Threading1d()
Threading1h()
Threading4h()
app.run(host='0.0.0.0', port=8888)
| 40.19611
| 828
| 0.283799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.