text
stringlengths 8
6.05M
|
|---|
import os
import json
import subprocess
from shutil import copyfile
from subprocess import PIPE, run as subprocess_run
from typing import List, Dict
from src.contracts.secret.secret_contract import swap_json
from src.util.config import Config
from src.util.logger import get_logger
logger = get_logger(logger_name="SecretCLI")
def query_encrypted_error(tx_hash: str):
cmd = ['secretcli', 'q', 'compute', 'tx', tx_hash]
resp = run_secret_cli(cmd)
resp_json = json.loads(resp)
return resp_json["output_error"]
def sign_tx(unsigned_tx_path: str, multi_sig_account_addr: str, account_name: str):
cmd = ['secretcli', 'tx', 'sign', unsigned_tx_path, '--signature-only', '--multisig',
multi_sig_account_addr, '--from', account_name]
return run_secret_cli(cmd)
def multisig_tx(unsigned_tx_path: str, multi_sig_account_name: str, *signed_tx):
cmd = ['secretcli', 'tx', 'multisign', unsigned_tx_path, multi_sig_account_name] + list(signed_tx)
return run_secret_cli(cmd)
def create_unsigned_tx(secret_contract_addr: str, transaction_data: Dict, chain_id: str, enclave_key: str,
code_hash: str, multisig_acc_addr: str) -> str:
cmd = ['secretcli', 'tx', 'compute', 'execute', secret_contract_addr, f"{json.dumps(transaction_data)}",
'--generate-only', '--chain-id', f"{chain_id}", '--enclave-key', enclave_key, '--code-hash',
code_hash, '--from', multisig_acc_addr]
return run_secret_cli(cmd)
def broadcast(signed_tx_path: str) -> str:
cmd = ['secretcli', 'tx', 'broadcast', signed_tx_path]
return run_secret_cli(cmd)
def decrypt(data: str) -> str:
cmd = ['secretcli', 'query', 'compute', 'decrypt', data]
return run_secret_cli(cmd)
def query_scrt_swap(nonce: int, contract_addr: str) -> str:
query_str = swap_json(nonce)
cmd = ['secretcli', 'query', 'compute', 'query', contract_addr, f"{query_str}"]
p = subprocess_run(cmd, stdout=PIPE, stderr=PIPE, check=True)
return p.stdout.decode()
def query_tx(tx_hash: str):
cmd = ['secretcli', 'query', 'tx', tx_hash]
return run_secret_cli(cmd)
def query_data_success(tx_hash: str):
""" This command is used to test success of transactions - so we can safely ignore any errors and assume in any case
that means the tx isn't on-chain
"""
cmd = ['secretcli', 'query', 'compute', 'tx', tx_hash]
try:
resp = run_secret_cli(cmd)
return json.loads(json.loads(resp)["output_data_as_string"])
except (RuntimeError, json.JSONDecodeError, KeyError):
return {}
def run_secret_cli(cmd: List[str]) -> str:
"""
"""
try:
logger.debug(f'Running command: {cmd}')
p = subprocess.run(cmd, stdout=PIPE, stderr=PIPE, check=True)
except subprocess.CalledProcessError as e:
logger.error(f'Failed: stderr: {e.stderr.decode()}, stdout: {e.stdout.decode()}')
raise RuntimeError(e.stdout.decode()) from None
logger.debug('Success')
return p.stdout.decode()
def configure_secretcli(config: Config): # pylint: disable=too-many-statements
# check if cli is already set up:
cmd = ['secretcli', 'keys', 'list']
result = run_secret_cli(cmd)
if result.strip() != '[]': # sometimes \n is added to the result
logger.info(f"{result}")
logger.info("CLI already set up")
return
cmd = ['secretcli', 'config', 'output', 'json']
run_secret_cli(cmd)
cmd = ['secretcli', 'config', 'indent', 'true']
run_secret_cli(cmd)
cmd = ['secretcli', 'config', 'trust-node', 'true']
run_secret_cli(cmd)
cmd = ['secretcli', 'config', 'node', config['secret_node']]
run_secret_cli(cmd)
cmd = ['secretcli', 'config', 'chain-id', config['chain_id']]
run_secret_cli(cmd)
cmd = ['secretcli', 'config', 'keyring-backend', 'test']
run_secret_cli(cmd)
# set up multisig
signers = []
for i, key in enumerate(config["secret_signers"]):
cmd = ['secretcli', 'keys', 'add', f'ms_signer{i}', f'--pubkey={key}']
signers.append(f'ms_signer{i}')
run_secret_cli(cmd)
cmd = ['secretcli', 'keys', 'add', f'{config["multisig_key_name"]}', f"--multisig={','.join(signers)}",
'--multisig-threshold', f'{config["signatures_threshold"]}']
run_secret_cli(cmd)
logger.debug(f'importing private key from {config["secret_key_file"]} with name {config["secret_key_name"]}')
# import key
key_path = os.path.join(f'{config["KEYS_BASE_PATH"]}', f'{config["secret_key_file"]}')
cmd = ['secretcli', 'keys', 'import', f'{config["secret_key_name"]}',
f'{key_path}']
process = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
inputdata = config["secret_key_password"]
_, stderrdata = process.communicate(input=(inputdata+"\n").encode())
if stderrdata:
logger.error(f"Error importing secret key: {stderrdata}")
raise EnvironmentError
logger.debug("copying transaction key..")
# copy transaction key from shared location
src_key_path = os.path.join(f'{config["KEYS_BASE_PATH"]}', 'id_tx_io.json')
dst_key_path = os.path.join(f'{config["SECRETCLI_HOME"]}', 'id_tx_io.json')
copyfile(src_key_path, dst_key_path)
# test configuration
cmd = ['secretcli', 'query', 'account', config['multisig_acc_addr']]
run_secret_cli(cmd)
#
cmd = ['secretcli', 'query', 'register', 'secret-network-params']
run_secret_cli(cmd)
|
def innerProd(l1,l2):
if len(l1)==len(l2):
s = 0
for i in range(len(l1)):
s+= l1[i]*l2[i]
return s
def listScalarProd(l,s):
for i in range(len(l1)):
l1[i]=s*l1[i]
return l1
|
#!/usr/bin/env python
# -*-encoding:UTF-8-*-
from myutils.api import serializers
from myutils.api._serializers import UsernameSerializer
from .models import Announcement
class CreateAnnouncementSerializer(serializers.Serializer):
# 创建通知
title = serializers.CharField(max_length=64)
content = serializers.CharField(max_length=1024 * 1024 * 8)
visible = serializers.BooleanField()
class AnnouncementSerializer(serializers.ModelSerializer):
# 通知序列化器
created_by = UsernameSerializer()
class Meta:
model = Announcement
fields = "__all__"
class EditAnnouncementSerializer(serializers.Serializer):
# 编辑通知
id = serializers.IntegerField()
title = serializers.CharField(max_length=64)
content = serializers.CharField(max_length=1024*1024*8)
visible = serializers.BooleanField()
|
def decrypt(encrypted_text, n):
if n < 1: return encrypted_text
output = ""
mid = len(encrypted_text)//2
e_text = encrypted_text
for x in range(n):
for i in range(mid):
output += e_text[mid+i]
output += e_text[i]
e_text = output
if n-1!=x:
output = ""
elif x==n-1 and len(encrypted_text)%2!=0:
output+=encrypted_text[-1]
return output
def encrypt(text, n):
ev_s, rest = '', ''
output = text
for x in range(n):
for i, letter in enumerate(output):
if i%2!=0:
ev_s+=letter
else:
rest+=letter
output = ev_s + rest
ev_s, rest = '', ''
return output
'''
For building the encrypted string:
Take every 2nd char from the string, then the other chars,
that are not every 2nd char, and concat them as new String.
Do this n times!
Examples:
"This is a test!", 1 -> "hsi etTi sats!"
"This is a test!", 2 -> "hsi etTi sats!" -> "s eT ashi tist!"
Write two methods:
def encrypt(text, n)
def decrypt(encrypted_text, n)
For both methods:
If the input-string is null or empty return exactly this value!
If n is <= 0 then return the input text.
'''
|
from django.http import HttpResponse
from django.shortcuts import render
# from rest_framework import status
from rest_framework import mixins
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework import filters
from rest_framework.pagination import PageNumberPagination # 分页功能
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from .models import Goods,GoodsCategory
from .filters import GoodsFilter
from .serializers import GoodsSerializer,CategorySerializer
# Create your views here.
# drf的modelserializer实现商品列表页功能
# class GoodsListView(APIView):
# """
# List all snippets, or create a new snippet.
# """
#
# def get(self, request, format=None):
# print(111)
# goods = Goods.objects.all()[:10]
# goods_serializer = GoodsSerializer(goods, many=True)
# return Response(goods_serializer.data)
#
# def post(self, request, format=None):
# serializer = GoodsSerializer(data=request.data)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data, status=status.HTTP_201_CREATED)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# 自定义分页
class GoodsPagination(PageNumberPagination):
page_size = 12
page_size_query_param = 'page_size'
page_query_param = 'page'
max_page_size = 100
# GenericView方式实现商品列表页和分页功能
# class GoodsListView(generics.ListAPIView):
# """
# 商品列表页
# """
# queryset = Goods.objects.all()
# serializer_class = GoodsSerializer
# pagination_class = GoodsPagination
# def get(self, request, *args, **kwargs):
# return self.list(request, *args, **kwargs)
# viewsets和router完成商品列表页
class GoodsListViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin,viewsets.GenericViewSet):
"""商品列表页,分页,搜索,过滤,排序"""
queryset = Goods.objects.all()
serializer_class = GoodsSerializer
pagination_class = GoodsPagination
authentication_classes = (TokenAuthentication,)
filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
filter_class = GoodsFilter
search_fields = ('name', 'goods_brief', 'goods_desc')
ordering_fields = ('sold_num', 'shop_price')
class CategoryViewset(mixins.ListModelMixin,mixins.RetrieveModelMixin,viewsets.GenericViewSet):
"""
商品分类列表数据
"""
queryset = GoodsCategory.objects.filter(category_type=1)
serializer_class = CategorySerializer
|
"""
J7 VPAC specific configurations and methods.
"""
import os
import inspect
REPORT_CALLER = True
WAIT_ON_EXIT = False
AWB_MAX_IMG_CNT = 20
def init_params(sys_params):
# INITIALIZATION START
sys_params['LSC'] = {}
sys_params['AWB'] = {}
sys_params['AE'] = {}
sys_params['AE']['IMAGE'] = {}
sys_params['AE']['VIDEO'] = {}
sys_params['AE']['WIGHTTABLE'] = {}
for _ in range (0,4):
sys_params['LSC'][_] = {}
for _ in range (0,AWB_MAX_IMG_CNT):
sys_params['AWB'][_] = {}
sys_params['LUT3D']={}
sys_params['GAMMA'] = {}
sys_params['GAMMA']['VIDEO'] = {}
sys_params['GAMMA']['IMAGE'] = {}
sys_params['GAMMA']['IMAGE']['OUTDOOR']={}
sys_params['GAMMA']['IMAGE']['INDOOR']={}
sys_params['GAMMA']['IMAGE']['INDOOR_DARK']={}
sys_params['GAMMA']['VIDEO']['OUTDOOR']={}
sys_params['GAMMA']['VIDEO']['INDOOR']={}
sys_params['GAMMA']['VIDEO']['INDOOR_DARK']={}
sys_params['NOISE_PIPE'] ={}
for _ in range (0,6):
sys_params['NOISE_PIPE'][_] = {}
sys_params['LSC']['FILECNT'] =0
sys_params['AWB']['FILECNT'] =0
sys_params['INPUT_TYPE'] = '.raw'
# INITIALIZATION FINISH
return
def error(message, skip=False, warn=False):
"""
Print error message and quit if not skip.
Print warning instead of error if warn.
"""
if REPORT_CALLER:
try:
message = '[%s:%d] %s' %(inspect.stack()[2][3], inspect.stack()[2][2], message,)
except IndexError as err:
pass
if warn:
message = '[WARNING] %s' %message
else:
message = '[ERROR] %s' %message
print(message)
if not skip:
if WAIT_ON_EXIT:
raw_input("Press ENTER to exit")
os.sys.exit(1)
def get_params(fname, params):
"""
Load configuration from file into specified dict.
Lines beginning with # will be ignored as comments.
Anything after the KEY VALUE double will be ignored.
Single or multiple whitespace is exclusive delimiter.
"""
try:
config_file = open(fname, 'r')
except IOError as err:
error('Params file not found: %s' %err.filename, skip=True)
return 1
fileindex = 0
for line in config_file:
if line[0] == '#':
continue
config = line.split()[:10]
"""
Config Length 2 for Sensor Parameters
> 2 for LSC, AWB
"""
comp_name_idx = line.find("AE")
if ( comp_name_idx == 0 ):
if len(config) == 3:
params['AE'][config[1]] =config[2].strip()
else:
usecase = config[1]
params['AE'][usecase][config[2]] = config[3]
continue
comp_name_idx = line.find("LUT3D")
if ( comp_name_idx == 0 ):
params['LUT3D']['TGT_REF_IDX'] =config[1].strip()
continue
if len(config) == 2:
try:
params[config[0]] = config[1].strip()
except ValueError:
params[config[0]] = config[1]
continue
# LSC IMG_TYPE TEMP FILENAME %GECORRECT VALID
comp_name_idx = line.find("LSC")
if ( comp_name_idx == 0 ):
val = int(config[5])
temperature = int(config[2].strip())
ShdCorrect = int(config[4].strip())
if val == 1 :
fileindex = params['LSC']['FILECNT']
params['LSC'][fileindex]['TEMP'] = temperature
try:
params['LSC'][fileindex]['FILENAME'] = config[3]
except ValueError:
params['LSC'][fileindex]['FILENAME'] = config[3]
params['LSC'][fileindex]['SHDCORECT'] = ShdCorrect
params['LSC']['FILECNT'] = params['LSC']['FILECNT'] + 1
continue
# # AWB STD/ADL TEMP GAIN EXP FLASH APERTURE FILENAME VALID
comp_name_idx = line.find("AWB")
if ( comp_name_idx == 0 ):
val = int(config[9])
ImageType = config[1].strip()
temperature = int(config[3].strip())
Gain = int(config[4].strip())
Exposure = int(config[5].strip())
Flash = int(config[6].strip())
Aperture = int(config[7].strip())
if val == 1 :
fileindex = params['AWB']['FILECNT']
params['AWB'][fileindex]['IMG_TYP'] = ImageType
params['AWB'][fileindex]['TEMP'] = temperature
params['AWB'][fileindex]['GAIN'] = Gain
params['AWB'][fileindex]['EXP'] = Exposure
params['AWB'][fileindex]['FLASH'] = Flash
params['AWB'][fileindex]['APERTURE'] = Aperture
params['AWB'][fileindex]['FILENAME'] = config[8]
params['AWB']['FILECNT'] = params['AWB']['FILECNT'] + 1
continue
# GAMMA VIDEO/IMAGE OUTDOOR/INDOOR/INDOORDARK GAMMAFILE VALID
comp_name_idx = line.find("GAMMA")
if ( comp_name_idx == 0 ):
usecase = config[1].strip()
region = config[2].strip()
params['GAMMA'][usecase][region]['FILENAME']=config[3].strip()
continue
comp_name_idx = line.find("NOISE_PIPE")
# NOISE_PIPE OUTDOOR?INDOOR OUTDOOR/HIGH/MID_HIGH/MID/MID_LOW/LOW NOISE SHARPNESS
RegionList = {'OUTDOOR':0,'HIGH':1,'MID_HIGH':2,'MID':3,'MID_LOW':4,'LOW':5}
if ( comp_name_idx == 0 ):
key = str(config[2].strip())
RegionIdx = RegionList[key]
params['NOISE_PIPE'][RegionIdx]['NOISE'] = int(config[3].strip())
params['NOISE_PIPE'][RegionIdx]['SHARPNESS'] = int(config[4].strip())
continue
config_file.close()
return 0
|
"""
The MyPaas setup script.
"""
import os
try:
import setuptools # noqa, analysis:ignore
except ImportError:
pass # setuptools allows for "develop", but it's not essential
from distutils.core import setup
def get_version_and_doc(filename):
ns = dict(__version__="", __doc__="")
docstatus = 0 # Not started, in progress, done
for line in open(filename, "rb").read().decode().splitlines():
if line.startswith("__version__"):
exec(line.strip(), ns, ns)
elif line.startswith('"""'):
if docstatus == 0:
docstatus = 1
line = line.lstrip('"')
elif docstatus == 1:
docstatus = 2
if docstatus == 1:
ns["__doc__"] += line.rstrip() + "\n"
if not ns["__version__"]:
raise RuntimeError("Could not find __version__")
return ns["__version__"], ns["__doc__"]
# Get version and docstring (i.e. long description)
version, doc = get_version_and_doc(
os.path.join(os.path.dirname(__file__), "mypaas", "__init__.py")
)
client_requires = ["cryptography", "requests", "pyperclip", "toml"]
server_requires = ["uvicorn", "asgineer", "psutil", "fastuaparser", "pscript"]
setup(
name="mypaas",
version=version,
author="Almar Klein",
author_email="",
license="2-clause BSD",
url="https://github.com/almarklein/mypaas",
keywords="paas, saas, deployment, traefik, docker",
description="Run your own PaaS using Docker, Traefik, and great monitoring",
long_description=doc,
platforms="any",
python_requires=">=3.6",
install_requires=client_requires,
extras_require={"server": server_requires},
packages=[
"mypaas",
"mypaas.utils",
"mypaas.client",
"mypaas.server",
"mypaas.daemon",
"mypaas.stats",
],
entry_points={"console_scripts": ["mypaas = mypaas.__main__:main"]},
zip_safe=True,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP",
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
],
)
|
from enums import Direction, State, Symbol
def parse(f):
dic = {}
content = read(f)
transitions = content.split("\n\n")
print(transitions[0])
transitions = transitions[1:]
for t in transitions:
try:
src, dst = t.split("\n")
except:
if len(t.split("\n")) == 1:
break
src, dst, _ = t.split("\n")
state, symbol = src.split(",")
state_key = State.by_rep(state)
symbol_key = Symbol.by_rep(symbol)
if state_key not in dic:
dic[state_key] = {}
dic[state_key].update({symbol_key: make_tuple(dst)})
return dic
def make_tuple(dst):
state, symbol, direc = dst.split(",")
return (State.by_rep(state), Symbol.by_rep(symbol), Direction.by_rep(direc))
def read(f):
content = ""
try:
with open(f, "r") as fd:
content = fd.read()
except (FileNotFoundError, PermissionError, UnicodeDecodeError, IsADirectoryError) as err:
raise err
return content
def display(dic):
symbol_len = Symbol.max_len("Symbol")
new_state_len = State.max_len("New State")
new_symbol_len = Symbol.max_len("New Symbol")
direction_len = Direction.max_len("Direction")
total = symbol_len + new_state_len + new_symbol_len + direction_len + 11
for state, value in dic.items():
print(f"State: {state.name}")
print("+" + "-" * total + "+")
pprint("Symbol", symbol_len)
pprint("New State", new_state_len)
pprint("New Symbol", new_symbol_len)
pprint("Direction", direction_len, last=True)
for symbol, tupl in value.items():
pprint(symbol.name[4], symbol_len)
pprint(tupl[0].name, new_state_len)
pprint(tupl[1].name[4], new_symbol_len)
pprint(tupl[2].name, direction_len, last=True)
print("+" + "-" * total + "+")
print("")
def pprint(txt, nb, last=False):
if last:
print("| " + " " * (nb - len(txt)) + txt + " |")
else:
print("| " + " " * (nb - len(txt)) + txt + " ", end='')
|
# Generated by Django 3.0.4 on 2021-03-07 00:07
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0011_auto_20210307_0004'),
]
operations = [
migrations.AlterField(
model_name='answer',
name='comments',
field=models.ManyToManyField(blank=True, to='api.Comment'),
),
migrations.AlterField(
model_name='answer',
name='upvotes',
field=models.ManyToManyField(blank=True, related_name='answers_upvoted', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='comment',
name='upvotes',
field=models.ManyToManyField(blank=True, related_name='comments_upvoted', to=settings.AUTH_USER_MODEL),
),
]
|
from math import sin, cos
import numpy as np
base = 10
def exact_sin_sum(K):
return .5 * (sin(K) - cos(.5) / sin(.5) * cos(K) + cos(.5) / sin(.5)) / K
def exact_sum(K):
"""Точное значение суммы всех элементов."""
return 1.
def samples_sin(N):
a = np.array([sin(i) / N for i in range(1, N + 1)])
# создаем выборку объединяя части
# перемешиваем элементы выборки и возвращаем
return np.random.permutation(a)
def samples_abs(N):
array = [sin(i) / N for i in range(1, N + 1)]
array.sort(key=abs)
a = np.array(array)
# создаем выборку объединяя части
# перемешиваем элементы выборки и возвращаем
return a
def Kahan_sum(x):
s = 0.0 # частичная сумма
c = 0.0 # сумма погрешностей
for i in x:
y = i - c # первоначально y равно следующему элементу последовательности
t = s + y # сумма s может быть велика, поэтому младшие биты y будут потеряны
c = (t - s) - y # (t-s) отбрасывает старшие биты, вычитание y восстанавливает младшие биты
s = t # новое значение старших битов суммы
return s
def relative_error(x0, x):
"""Погрешность x при точном значении x0"""
return np.abs(x0 - x) / np.abs(x)
def samples(K):
""""Элементы выборки"."""
# создаем K частей из base^k одинаковых значений
parts = [np.full((base ** k,), float(base) ** (-k) / K) for k in range(0, K)]
# создаем выборку объединяя части
samples = np.concatenate(parts)
# перемешиваем элементы выборки и возвращаем
return np.random.permutation(samples)
def direct_sum(x):
"""Последовательная сумма всех элементов вектора x"""
s = 0.
for e in x:
s += e
return s
def number_of_samples(K):
"""Число элементов в выборке"""
return np.sum([base ** k for k in range(0, K)])
K = 7 # число слагаемых
direct, kahan, sort = [], [], []
for i in range(10):
x = samples(K) # сохраняем выборку в массив
exact_sum_for_x = exact_sum(K) # значение суммы с близкой к машинной погрешностью
direct_sum_for_x = direct_sum(x)
direct.append(relative_error(exact_sum_for_x, direct_sum_for_x))
sorted_x = x[np.argsort(x)]
sorted_sum_for_x = direct_sum(sorted_x)
sort.append(relative_error(exact_sum_for_x, sorted_sum_for_x))
Kahan_sum_for_x = Kahan_sum(x) # сумма всех элементов по порядку
kahan.append(relative_error(exact_sum_for_x, Kahan_sum_for_x))
print('Алгоритм Кэхэна:', sum(kahan)/len(kahan))
print('Сортировка по возрастанию:', sum(sort)/len(sort))
print('Прямой проход:', sum(direct)/len(direct))
direct, kahan, sort, sorted_kahan_increase, abs_kahan = [], [], [], [], []
N = 1111111
for i in range(10):
x = samples_sin(N)
y = samples_abs(N)
exact_sum_for_x = exact_sin_sum(N) # значение суммы с близкой к машинной погрешностью
Kahan_sum_for_x = Kahan_sum(x)
direct_sum_for_x = direct_sum(x)
direct.append(relative_error(exact_sum_for_x, direct_sum_for_x))
kahan.append(relative_error(exact_sum_for_x, Kahan_sum_for_x))
sorted_x = x[np.argsort(x)]
sorted_sum_for_x = direct_sum(sorted_x)
sorted_kahan = Kahan_sum(sorted_x)
sort.append(relative_error(exact_sum_for_x, sorted_sum_for_x))
sorted_kahan_increase.append(relative_error(exact_sum_for_x, sorted_kahan))
abs_sort_kahan = Kahan_sum(y)
abs_kahan.append(relative_error(exact_sum_for_x, abs_sort_kahan))
print('Для знакопеременной последовательности:')
print('Алгоритм Кэхэна:', sum(kahan)/len(kahan))
print('Сортировка по возрастанию:', sum(sort)/len(sort))
print('Прямой проход:', sum(direct)/len(direct))
print('Алгоритм Кэхэна с сортировкой по возрастанию:', sum(sorted_kahan_increase)/len(sorted_kahan_increase))
print('Алгоритм Кэхэна с сортировкой по возрастанию абсолютных значений:', sum(abs_kahan)/len(abs_kahan))
# параметры выборки
mean = 1e6 # среднее
delta = 1e-5 # величина отклонения от среднего
def samples2(N_over_two):
"""Генерирует выборку из 2*N_over_two значений с данным средним и среднеквадратическим
отклонением."""
x = np.full((2 * N_over_two,), mean, dtype=np.double)
x[:N_over_two] += delta
x[N_over_two:] -= delta
# print(x)
return np.random.permutation(x)
def exact_mean():
"""Значение среднего арифметического по выборке с близкой к машинной точностью."""
return mean
def exact_variance():
"""Значение оценки дисперсии с близкой к машинной точностью."""
return delta ** 2
x = samples2(1000000)
y = x[np.argsort(x)]
print("Размер выборки:", len(x))
print("Среднее значение:", exact_mean())
print("Оценка дисперсии:", exact_variance())
print("Ошибка среднего для встроенной функции:", relative_error(exact_mean(), np.mean(x)))
print("Ошибка дисперсии для встроенной функции:", relative_error(exact_variance(), np.var(x)))
def direct_mean(x):
return direct_sum(x) / len(x)
def kahan_mean(x):
return Kahan_sum(x) / len(x)
def kahan_second_var(x):
# print('direct:',direct_mean(x ** 2) - direct_mean(x) ** 2)
# print('kahan:', kahan_mean(x ** 2) - kahan_mean(x) ** 2)
return kahan_mean(x ** 2) - kahan_mean(x) ** 2
def kahan_first_var(x):
return kahan_mean((x - kahan_mean(x)) ** 2)
def direct_first_var(x):
"""Первая оценка дисперсии через последовательное суммирование."""
return direct_mean((x - direct_mean(x)) ** 2)
def direct_second_var(x):
"""Вторая оценка дисперсии через последовательное суммирование."""
# print('1:', direct_mean(x ** 2), '2:', direct_mean(x) ** 2)
return direct_mean(x ** 2) - direct_mean(x) ** 2
def online_second_var(x):
"""Вторая оценка дисперсии через один проход по выборке"""
m = x[0] # накопленное среднее
m2 = x[0] ** 2 # накопленное среднее квадратов
for n in range(1, len(x)):
# print(m, m2)
m = (m * (n - 1) + x[n]) / n
m2 = (m2 * (n - 1) + x[n] ** 2) / n
return m2 - m ** 2
print('Ошибка для алгоритма Кэхэна', relative_error(exact_variance(), kahan_first_var(x)))
def welford(x):
m, m2 = x[0], x[0] ** 2
for n in range(1, len(x)):
# print(m, m2, x[n])
delta = x[n] - m
m += m2 / n
delta2 = x[n] - m
m2 += delta * delta2
return m2
def welford2(x):
m, m2 = x[0], 0
for n in range(1, len(x)):
m2 += (n * x[n] - m) ** 2 / (n * (n + 1))
m += x[n]
return m / len(x), m2 / len(x)
print("Ошибка для алгоритма Уэлфорта:", relative_error(exact_variance(), welford2(x)[1]))
print("Ошибка второй оценки дисперсии для последовательного суммирования:",
relative_error(exact_variance(), direct_second_var(x)))
print("Ошибка второй оценки дисперсии для однопроходного суммирования:",
relative_error(exact_variance(), online_second_var(x)))
print("Ошибка первой оценки дисперсии для последовательного суммирования:",
relative_error(exact_variance(), direct_first_var(x)))
|
import numpy as np
from scipy.interpolate import RectBivariateSpline
z = np.ones((5,10))
z[3,3:5] = [1,2]
p = np.ones((2,1))
z=np.concatenate((p,p),axis=1)
print(z)
# x=np.arange(5)
# y=np.arange(7)
#
# func = RectBivariateSpline(x,y,z)
#
# x1 = np.linspace(1,4,3)
# y1 = np.linspace(2,5,5)
#
# print(x1)
# z1=func.__call__(x1,y1)
# # z2=func.ev(x1,y1,0,1)
# print(z1,'-------')
|
"""API v2 tests."""
from django.core.files.base import ContentFile
from django.urls import reverse
from django.utils.encoding import force_str
from rest_framework.authtoken.models import Token
from modoboa.admin import factories, models, constants
from modoboa.core import models as core_models
from modoboa.lib.tests import ModoAPITestCase
class DomainViewSetTestCase(ModoAPITestCase):
@classmethod
def setUpTestData(cls): # NOQA:N802
"""Create test data."""
super().setUpTestData()
factories.populate_database()
cls.da_token = Token.objects.create(
user=core_models.User.objects.get(username="admin@test.com"))
def test_create(self):
url = reverse("v2:domain-list")
data = {
"name": "domain.tld",
"domain_admin": {
"username": "admin",
"with_mailbox": True,
"with_aliases": True
}
}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 201)
dom = models.Domain.objects.get(pk=resp.json()["pk"])
self.assertEqual(len(dom.admins), 1)
admin = dom.admins.first()
self.assertTrue(hasattr(admin, "mailbox"))
self.assertTrue(
models.Alias.objects.filter(
address="postmaster@domain.tld").exists()
)
def test_update(self):
domain = models.Domain.objects.get(name="test2.com")
data = {
"name": "test2.com",
"type": "relaydomain",
"transport": {
"service": "relay",
"settings": {
"relay_target_port": 25,
"relay_target_host": "localhost"
}
}
}
url = reverse("v2:domain-detail", args=[domain.pk])
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 200)
domain.refresh_from_db()
self.assertEqual(domain.transport.service, data["transport"]["service"])
self.assertEqual(
domain.transport._settings["relay_target_host"],
data["transport"]["settings"]["relay_target_host"]
)
data["transport"]["relay_verify_recipients"] = True
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 200)
def test_update_resources(self):
self.set_global_parameter("enable_domain_limits", True, app="limits")
domain = models.Domain.objects.get(name="test2.com")
data = {
"name": "test2.com",
"resources": [
{"name": "domain_aliases", "max_value": 20}
]
}
url = reverse("v2:domain-detail", args=[domain.pk])
resp = self.client.put(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(
domain.domainobjectlimit_set.get(name="domain_aliases").max_value, 20
)
def test_delete(self):
self.client.credentials(
HTTP_AUTHORIZATION="Token " + self.da_token.key)
domain = models.Domain.objects.get(name="test2.com")
url = reverse("v2:domain-delete", args=[domain.pk])
resp = self.client.post(url)
self.assertEqual(resp.status_code, 403)
domain = models.Domain.objects.get(name="test.com")
url = reverse("v2:domain-delete", args=[domain.pk])
resp = self.client.post(url)
self.assertEqual(resp.status_code, 403)
self.client.credentials(HTTP_AUTHORIZATION="Token " + self.token.key)
url = reverse("v2:domain-delete", args=[domain.pk])
resp = self.client.post(url)
self.assertEqual(resp.status_code, 204)
def test_administrators(self):
domain = models.Domain.objects.get(name="test.com")
url = reverse("v2:domain-administrators", args=[domain.pk])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.json()), 1)
def test_administrator(self):
domain = models.Domain.objects.get(name="test.com")
url = reverse("v2:domain-add-administrator", args=[domain.pk])
account = core_models.User.objects.get(username="user@test.com")
data = {"account": account.pk}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 200)
def test_remove_adminstrator(self):
domain = models.Domain.objects.get(name="test.com")
url = reverse("v2:domain-remove-administrator", args=[domain.pk])
account = core_models.User.objects.get(username="user@test.com")
data = {"account": account.pk}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 200)
def test_domains_import(self):
f = ContentFile(b"""domain; domain1.com; 1000; 100; True
domain; domain2.com; 1000; 200; False
domainalias; domalias1.com; domain1.com; True
""", name="domains.csv")
self.client.post(
reverse("v2:domain-import-from-csv"), {
"sourcefile": f
}
)
admin = core_models.User.objects.get(username="admin")
dom = models.Domain.objects.get(name="domain1.com")
self.assertEqual(dom.quota, 1000)
self.assertEqual(dom.default_mailbox_quota, 100)
self.assertTrue(dom.enabled)
self.assertTrue(admin.is_owner(dom))
domalias = models.DomainAlias.objects.get(name="domalias1.com")
self.assertEqual(domalias.target, dom)
self.assertTrue(dom.enabled)
self.assertTrue(admin.is_owner(domalias))
dom = models.Domain.objects.get(name="domain2.com")
self.assertEqual(dom.default_mailbox_quota, 200)
self.assertFalse(dom.enabled)
self.assertTrue(admin.is_owner(dom))
def test_export_domains(self):
"""Check domain export."""
dom = models.Domain.objects.get(name="test.com")
factories.DomainAliasFactory(name="alias.test", target=dom)
response = self.client.get(reverse("v2:domain-export"))
expected_response = [
"domain,test.com,50,10,True",
"domainalias,alias.test,test.com,True",
"domain,test2.com,0,0,True",
]
self.assertCountEqual(
expected_response,
force_str(response.content.strip()).split("\r\n")
)
class AccountViewSetTestCase(ModoAPITestCase):
@classmethod
def setUpTestData(cls): # NOQA:N802
"""Create test data."""
super().setUpTestData()
factories.populate_database()
def test_create(self):
url = reverse("v2:account-list")
data = {
"username": "toto@test.com",
"role": "SimpleUsers",
"mailbox": {
"use_domain_quota": True
},
"password": "Toto12345",
"language": "fr",
"aliases": ["alias3@test.com"]
}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 201)
self.assertTrue(
models.Alias.objects.filter(
address="alias3@test.com").exists()
)
def test_create_admin(self):
url = reverse("v2:account-list")
data = {
"username": "superadmin",
"role": "SuperAdmins",
"password": "Toto12345",
"language": "fr",
"aliases": ["alias3@test.com"]
}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 400)
def test_create_with_bad_password(self):
url = reverse("v2:account-list")
data = {
"username": "superadmin",
"role": "SuperAdmins",
}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 400)
self.assertIn("password", resp.json())
data["password"] = "Toto"
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 400)
self.assertIn("password", resp.json())
def test_validate(self):
"""Test validate and throttling."""
data = {"username": "toto@test.com"}
url = reverse("v2:account-validate")
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 204)
def test_random_password(self):
url = reverse("v2:account-random-password")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("password", resp.json())
def test_delete(self):
account = core_models.User.objects.get(username="user@test.com")
url = reverse("v2:account-delete", args=[account.pk])
resp = self.client.post(url)
self.assertEqual(resp.status_code, 204)
with self.assertRaises(core_models.User.DoesNotExist):
account.refresh_from_db()
def test_update(self):
account = core_models.User.objects.get(username="user@test.com")
url = reverse("v2:account-detail", args=[account.pk])
data = {
"username": "user@test.com",
"role": "SimpleUsers",
"password": "Toto12345",
"mailbox": {
"quota": 10
},
"aliases": [
"aliasupdate1@test.com"
]
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
def test_mailbox_options_update(self):
account = core_models.User.objects.get(username="user@test.com")
url = reverse("v2:account-detail", args=[account.pk])
data = {
"username": "user@test.com",
"role": "SimpleUsers",
"password": "Toto12345",
"mailbox": {
"message_limit": 10,
"is_send_only": True,
}
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
account.refresh_from_db()
self.assertEqual(account.mailbox.message_limit, 10)
self.assertEqual(account.mailbox.is_send_only, True)
data = {
"username": "user@test.com",
"role": "SimpleUsers",
"password": "Toto12345",
"mailbox": {
"quota": 10
}
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
account.refresh_from_db()
self.assertEqual(account.mailbox.message_limit, 10)
data = {
"username": "user@test.com",
"role": "SimpleUsers",
"password": "Toto12345",
"mailbox": {
"message_limit": None,
"is_send_only": False,
}
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
account.refresh_from_db()
self.assertEqual(account.mailbox.message_limit, None)
self.assertEqual(account.mailbox.is_send_only, False)
def test_update_aliases(self):
account = core_models.User.objects.get(username="user@test.com")
url = reverse("v2:account-detail", args=[account.pk])
data = {
"username": "user@test.com",
"role": "SimpleUsers",
"password": "Toto12345",
"mailbox": {
"quota": 10
},
"aliases": [
"aliasupdate1@test.com"
]
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertTrue(
models.Alias.objects.filter(address="aliasupdate1@test.com").exists())
# Create an alias for another user
url = reverse("v2:account-list")
data = {
"username": "toto@test.com",
"role": "SimpleUsers",
"mailbox": {
"use_domain_quota": True
},
"password": "Toto12345",
"language": "fr",
"aliases": ["totoalias@test.com"]
}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 201)
# Try updating existing account with this newly created alias
url = reverse("v2:account-detail", args=[account.pk])
data = {
"username": "user@test.com",
"role": "SimpleUsers",
"password": "Toto12345",
"mailbox": {
"quota": 10
},
"aliases": [
"totoalias@test.com",
"aliasupdate1@test.com",
]
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
alias = models.Alias.objects.filter(address="totoalias@test.com")
self.assertTrue(alias.exists())
alias_recipients = list(alias.first().recipients)
self.assertIn("toto@test.com", alias_recipients)
self.assertIn("user@test.com", alias_recipients)
# Try deleting the aliases
data = {
"username": "user@test.com",
"role": "SimpleUsers",
"mailbox": {
"quota": 10
},
"aliases": []
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertFalse(
models.Alias.objects.filter(address="aliasupdate1@test.com").exists())
alias_recipients = list(models.Alias.objects.filter(
address="totoalias@test.com").first().recipients
)
self.assertEqual(alias_recipients, ["toto@test.com"])
def test_update_admin(self):
account = core_models.User.objects.get(username="admin")
url = reverse("v2:account-detail", args=[account.pk])
data = {
"username": "superadmin@test.com",
"role": "SuperAdmins",
"password": "Toto12345",
"mailbox": {
"quota": 10
}
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
account.refresh_from_db()
self.assertEqual(account.email, data["username"])
self.assertEqual(account.mailbox.full_address, data["username"])
def test_update_resources(self):
account = core_models.User.objects.get(username="admin@test.com")
url = reverse("v2:account-detail", args=[account.pk])
data = {
"resources": [
{"name": "mailboxes", "max_value": 10},
{"name": "mailbox_aliases", "max_value": 10}
]
}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
limit = account.userobjectlimit_set.get(name="mailboxes")
self.assertEqual(limit.max_value, 10)
def test_get_with_resources(self):
account = core_models.User.objects.get(username="admin@test.com")
url = reverse("v2:account-detail", args=[account.pk])
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.json()["resources"]), 2)
class IdentityViewSetTestCase(ModoAPITestCase):
@classmethod
def setUpTestData(cls): # NOQA:N802
"""Create test data."""
super().setUpTestData()
factories.populate_database()
def test_list(self):
url = reverse("v2:identities-list")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.json()), 8)
def test_import(self):
f = ContentFile("""
account; user1@test.com; toto; User; One; True; SimpleUsers; user1@test.com; 0
account; Truc@test.com; toto; René; Truc; True; DomainAdmins; truc@test.com; 5; test.com
alias; alias1@test.com; True; user1@test.com
forward; alias2@test.com; True; user1+ext@test.com
forward; fwd1@test.com; True; user@extdomain.com
dlist; dlist@test.com; True; user1@test.com; user@extdomain.com
""", name="identities.csv") # NOQA:E501
self.client.post(
reverse("v2:identities-import-from-csv"),
{"sourcefile": f, "crypt_password": True}
)
admin = core_models.User.objects.get(username="admin")
u1 = core_models.User.objects.get(username="user1@test.com")
mb1 = u1.mailbox
self.assertTrue(admin.is_owner(u1))
self.assertEqual(u1.email, "user1@test.com")
self.assertEqual(u1.first_name, "User")
self.assertEqual(u1.last_name, "One")
self.assertTrue(u1.is_active)
self.assertEqual(u1.role, "SimpleUsers")
self.assertTrue(mb1.use_domain_quota)
self.assertEqual(mb1.quota, 0)
self.assertTrue(admin.is_owner(mb1))
self.assertEqual(mb1.full_address, "user1@test.com")
self.assertTrue(
self.client.login(username="user1@test.com", password="toto")
)
da = core_models.User.objects.get(username="truc@test.com")
damb = da.mailbox
self.assertEqual(da.first_name, u"René")
self.assertEqual(da.role, "DomainAdmins")
self.assertEqual(damb.quota, 5)
self.assertFalse(damb.use_domain_quota)
self.assertEqual(damb.full_address, "truc@test.com")
dom = models.Domain.objects.get(name="test.com")
self.assertIn(da, dom.admins)
u = core_models.User.objects.get(username="user@test.com")
self.assertTrue(da.can_access(u))
al = models.Alias.objects.get(address="alias1@test.com")
self.assertTrue(
al.aliasrecipient_set
.filter(r_mailbox=u1.mailbox).exists()
)
self.assertTrue(admin.is_owner(al))
fwd = models.Alias.objects.get(address="fwd1@test.com")
self.assertTrue(
fwd.aliasrecipient_set
.filter(
address="user@extdomain.com", r_mailbox__isnull=True,
r_alias__isnull=True)
.exists()
)
self.assertTrue(admin.is_owner(fwd))
dlist = models.Alias.objects.get(address="dlist@test.com")
self.assertTrue(
dlist.aliasrecipient_set
.filter(r_mailbox=u1.mailbox).exists()
)
self.assertTrue(
dlist.aliasrecipient_set.filter(address="user@extdomain.com")
.exists()
)
self.assertTrue(admin.is_owner(dlist))
def test_export(self):
response = self.client.get(reverse("v2:identities-export"))
expected_response = "account,admin,,,,True,SuperAdmins,,\r\naccount,admin@test.com,{PLAIN}toto,,,True,DomainAdmins,admin@test.com,10,test.com\r\naccount,admin@test2.com,{PLAIN}toto,,,True,DomainAdmins,admin@test2.com,10,test2.com\r\naccount,user@test.com,{PLAIN}toto,,,True,SimpleUsers,user@test.com,10\r\naccount,user@test2.com,{PLAIN}toto,,,True,SimpleUsers,user@test2.com,10\r\nalias,alias@test.com,True,user@test.com\r\nalias,forward@test.com,True,user@external.com\r\nalias,postmaster@test.com,True,test@truc.fr,toto@titi.com\r\n" # NOQA:E501
received_content = force_str(response.content.strip()).split("\r\n")
# Empty admin password because it is hashed using SHA512-CRYPT
admin_row = received_content[0].split(",")
admin_row[2] = ""
received_content[0] = ",".join(admin_row)
self.assertCountEqual(
expected_response.strip().split("\r\n"),
received_content
)
class AliasViewSetTestCase(ModoAPITestCase):
@classmethod
def setUpTestData(cls): # NOQA:N802
"""Create test data."""
super().setUpTestData()
factories.populate_database()
def test_validate(self):
url = reverse("v2:alias-validate")
data = {"address": "alias@unknown.com"}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 400)
data = {"address": "alias@test.com"}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 409)
al_id = models.Alias.objects.get(address="alias@test.com").pk
self.assertEqual(resp.json()["id"], al_id)
data = {"address": "alias2@test.com"}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 204)
def test_random_address(self):
url = reverse("v2:alias-random-address")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("address", resp.json())
class UserAccountViewSetTestCase(ModoAPITestCase):
@classmethod
def setUpTestData(cls): # NOQA:N802
"""Create test data."""
super().setUpTestData()
factories.populate_database()
cls.da = core_models.User.objects.get(username="admin@test.com")
cls.da_token = Token.objects.create(user=cls.da)
def test_forward(self):
self.client.credentials(
HTTP_AUTHORIZATION="Token " + self.da_token.key)
url = reverse("v2:account-forward")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertNotIn("recipients", resp.json())
data = {"recipients": "user@domain.ext"}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.da.mailbox.aliasrecipient_set.count(), 1)
data = {"recipients": "user@domain.ext", "keepcopies": True}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(
models.Alias.objects.filter(address=self.da.username).count(),
2
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertTrue(resp.json()["keepcopies"])
data = {"keepcopies": False}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(
models.Alias.objects.filter(address=self.da.username).count(),
1
)
class AlarmViewSetTestCase(ModoAPITestCase):
@classmethod
def setUpTestData(cls): # NOQA:N802
"""Create test data."""
super().setUpTestData()
factories.populate_database()
factories.AlarmFactory(
domain__name="test.com", mailbox=None, title="Test alarm")
cls.da_token = Token.objects.create(
user=core_models.User.objects.get(username="admin@test.com"))
def test_list(self):
url = reverse("v2:alarm-list")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.json()["results"]), 1)
def test_update_alarm(self):
"""Try updating alarm status and delete it afterward."""
domain = models.Domain.objects.get(name="test.com")
# Try performing action on restricted domains
self.client.credentials(
HTTP_AUTHORIZATION="Token " + self.da_token.key)
domain = models.Domain.objects.get(name="test2.com")
alarm_restricted = models.Alarm.objects.create(
domain=domain, mailbox=None, title="Test alarm 2")
alarm_restricted.save()
url = reverse("v2:alarm-switch", args=[alarm_restricted.pk])
resp = self.client.post(url)
self.assertEqual(resp.status_code, 405)
url = reverse("v2:alarm-detail", args=[alarm_restricted.pk])
resp = self.client.delete(url)
self.assertEqual(resp.status_code, 404)
# Perform actions as SuperAdmin
self.client.credentials(HTTP_AUTHORIZATION="Token " + self.token.key)
alarm = models.Alarm.objects.create(
domain=domain, mailbox=None, title="Test alarm 3")
alarm.save()
# Switch status of the alarm to close
url = reverse("v2:alarm-switch", args=[alarm.pk])
resp = self.client.patch(url, {"status": constants.ALARM_CLOSED})
self.assertEqual(resp.status_code, 204)
# Check actual status
url = reverse("v2:alarm-detail", args=[alarm.pk])
resp = self.client.get(url)
self.assertEqual(resp.json()["status"], constants.ALARM_CLOSED)
# Switch status back to open
url = reverse("v2:alarm-switch", args=[alarm.pk])
resp = self.client.patch(url, {"status": constants.ALARM_OPENED})
self.assertEqual(resp.status_code, 204)
# Check actual status
url = reverse("v2:alarm-detail", args=[alarm.pk])
resp = self.client.get(url)
self.assertEqual(resp.json()["status"], constants.ALARM_OPENED)
# Try to set an non-existant status
url = reverse("v2:alarm-switch", args=[alarm.pk])
resp = self.client.patch(url, {"status": 10})
self.assertEqual(resp.status_code, 400)
# Delete the alarm
url = reverse("v2:alarm-detail", args=[alarm.pk])
resp = self.client.delete(url)
self.assertEqual(resp.status_code, 204)
def test_bulk_delete(self):
url = reverse("v2:alarm-bulk-delete")
resp = self.client.delete(url)
self.assertEqual(resp.status_code, 400)
resp = self.client.delete(f"{url}?ids[]=toto")
self.assertEqual(resp.status_code, 400)
alarm1 = factories.AlarmFactory(
domain__name="test.com", mailbox=None, title="Test alarm")
alarm2 = factories.AlarmFactory(
domain__name="test.com", mailbox=None, title="Test alarm")
resp = self.client.delete(f"{url}?ids[]={alarm1.pk}&ids[]={alarm2.pk}")
self.assertEqual(resp.status_code, 204)
with self.assertRaises(models.Alarm.DoesNotExist):
alarm1.refresh_from_db()
with self.assertRaises(models.Alarm.DoesNotExist):
alarm2.refresh_from_db()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 28 22:35:20 2019
@author: andrewbartels1
"""
import time
def tic():
#Homemade version of matlab tic and toc functions
global startTime_for_tictoc
startTime_for_tictoc = time.time()
def toc():
if 'startTime_for_tictoc' in globals():
print("Elapsed time is " + str(time.time() - startTime_for_tictoc) + " seconds.")
else:
print("Toc: start time not set")
|
while True:
num = int(input('Quer ver a tabuada de qual valor? '))
if num < 0:
break
print('-' * 30)
for mult in range(1, 11, 1):
print(f'{num} x {mult} = {num * mult}')
print('-' * 30)
print('PROGRAMA TABUADA ENCERRADO. Volte sempre!')
|
"""
Converts pre-trained word embedding from stanford into hdf5 file.
http://nlp.stanford.edu/projects/glove/
"""
import sys
import h5py
import hashlib
import numpy as np
f = h5py.File("data/word_embeddint.hdf5", "w")
for p in sys.argv[1:]:
with open(p) as word_file:
for text_line in word_file:
line = text_line.split()
key = line[0]
vec = np.array(map(lambda e: float(e), line[1:]))
normalized = hashlib.md5(key).hexdigest()
f.create_dataset(normalized, data=vec)
f.close();
|
from Bio.Align.Applications import MuscleCommandline
from Bio import AlignIO
from Bio.Phylo.Applications import PhymlCommandline
from Bio import Phylo
import pylab
from Bio import SeqUtils
from Bio.SeqUtils import CodonUsageIndices, CodonUsage
import os
from Bio import SeqIO
def align(infile, outfile, clustal):
muscle_exe = "tools/muscle3.8.31_i86win32.exe"
cline = MuscleCommandline(muscle_exe, input=infile, out=outfile, clw=clustal)
cline()
def infer_tree(clwfile, phyfile):
AlignIO.convert(clwfile,"clustal",phyfile,"phylip-relaxed")
phyml_exe = "tools/PhyML_3.0/PhyML_3.0_win32.exe"
#cline = PhymlCommandline(phyml_exe,input=phyfile,datatype='nt',model='HKY85',alpha='e',bootstrap=-1)
cline = PhymlCommandline(phyml_exe,input=phyfile,datatype='aa',model='WAG',alpha='e',bootstrap=-1)
out_log, err_log = cline()
def view_tree(phyfile):
tree = Phylo.read(phyfile+"_phyml_tree.txt", "newick")
Phylo.draw_ascii(tree,column_width=300)
#Phylo.draw_graphviz(tree,prog="neato",node_size=50)
#pylab.show()
Phylo.draw(tree,do_show=True,show_confidence=False)
def annotate(tree_file):
pass
def GC_content(fasta_file):
sequences = SeqUtils.quick_FASTA_reader(fasta_file)
GCs = [SeqUtils.GC(k[1]) for k in sequences]
## for i in range(len(sequences)):
## print str(GCs[i]) + '\t' + sequences[i][0]
#print "AVERAGE: " + str(float(sum(GCs))/len(GCs))
print str(float(sum(GCs))/len(GCs)) + '\t' + sequences[0][0]
def codons(CDS):
usage = CodonUsage.CodonAdaptationIndex()
CodonsDict = SeqUtils.CodonUsage.CodonsDict
handle = open(CDS)
usage.codon_count = CodonsDict.copy()
for cur_record in SeqIO.parse(handle, "fasta"):
# make sure the sequence is lower case
if str(cur_record.seq).islower():
dna_sequence = str(cur_record.seq).upper()
else:
dna_sequence = str(cur_record.seq)
for i in range(0, len(dna_sequence), 3):
codon = dna_sequence[i:i+3]
if codon in usage.codon_count:
usage.codon_count[codon] += 1
else:
raise TypeError("illegal codon %s in gene: %s" % (codon, cur_record.id))
handle.close()
return usage.codon_count
def codons(CDS):
usage = CodonUsage.CodonAdaptationIndex()
CodonsDict = SeqUtils.CodonUsage.CodonsDict
#handle = open(CDS)
usage.codon_count = CodonsDict.copy()
print usage.codon_count
#for cur_record in SeqIO.parse(handle, "fasta"):
# make sure the sequence is lower case
## if str(cur_record.seq).islower():
## dna_sequence = str(cur_record.seq).upper()
## else:
## dna_sequence = str(cur_record.seq)
dna_sequence = CDS
print dna_sequence
num_codons = len(dna_sequence)/3
print num_codons
for i in range(0, len(dna_sequence), 3):
codon = dna_sequence[i:i+3]
#print codon
if codon.upper() in usage.codon_count:
usage.codon_count[codon.upper()] += 1
else:
pass
#raise TypeError("illegal codon %s in gene: %s" % (codon, cur_record.id))
#handle.close()
for i in usage.codon_count: usage.codon_count[i] = float(usage.codon_count[i])/num_codons
#print usage.codon_count['CTT']
#print sum([usage.codon_count[i] for i in usage.codon_count])
return usage.codon_count
#align("data/set3/set3_blc_names.fasta", "data/set3/set3_blc_names.aln", True)
#infer_tree("data/set3/set3_blc_names.aln", "data/set3/set3_blc_names.phy")
view_tree("data/set3/set3_blc_names_pretty.phy")
#GC_content("data/set3/genomes/Streptococcus_anginosus_F0211.fna")
#"data/set3/CDS/Streptococcus_anginosus_F0211.PATRIC.ffn"
##CDS_list = os.listdir("data/set3/CDS")
##for i in range(len(CDS_list)):
## print "\n \n"
## print CDS_list[i][0:-11]
## print codons("data/set3/CDS/"+CDS_list[i])
#genomes = os.listdir("data/set3/genomes")
#for i in range(len(genomes)):
# codons("data/set3/genomes/"+genomes[i], "data/set3/CDS/"+CDS_list[i])
#CDS = "atgactaattttcaacaaacattgtcaacaattacagactacatcaaggctgacatctttccaggtgccagccttgcactttatgacggacaagattggcaggagcattatctaggaactcaggatggaactattccggtagttccaggacttacctatgatctagctagtgtttccaaggtagtcggagttgggaccctctgtcttttctacttgcaagcgggaaaattggacttggatgagaaactatcgacctattatcctgaagttgtggataagacgttaacccttcgtcagcttctttctcacagtagcggaattgatccttttattccaaatcgtgatgacttagaccaagctggcttgattgctgctattaatgccatcaaagttaaggctgataaacctttcttgtatacagatataaattttattctcctaggtttgatgcttgaaaaagtctctggacaaacactggataagctctttgactccgagatttttcaaccgtttggtatgtctgaaacacagtttggcccagttgaggtagctgttccaacagttaagggggttacgggagggactgttcatgatcccaaggctcgagttctcaaagaacatacaggttcagcaggtctcttttcaacactaaaagacttagagatttttgttaaccattatttgacggatgattttgctaaaaatatgacgcagaatatcagccaatccaacaaggaacgcagtgtcgcttgggacctccaaggcgagtggattcttcacactggttatacaggtacctttgtgcttatcaatatccctcgtcaacgtgcggcgattttcctcagtaatcgtacttactacaaggatgagagagctcaatggattaaggacagagatgttttaattgagataatgaagaaggagctgattcgtgaaacggtcgagtag"
#print codons(CDS)
|
import unreal
import AssetFunctions
def showAssetsInContentBrowser_EXAMPLE():
paths = ['/Game/Textures/MyTexture', '/Game/SkeletalMeshes/MySkeletalMesh', '/Game/Sounds/MySound']
AssetFunctions.showAssetsInContentBrowser(paths)
def openAssets_EXAMPLE():
paths = ['/Game/Textures/MyTexture', '/Game/SkeletalMeshes/MySkeletalMesh', '/Game/Sounds/MySound']
AssetFunctions.openAssets(paths)
def createDirectory_EXAMPLE():
operation_succeeded = AssetFunctions.createDirectory('/Game/MyDirectory')
print operation_succeeded
def duplicateDirectory_EXAMPLE():
operation_succeeded = AssetFunctions.duplicateDirectory('/Game/MyDirectory', '/Game/MyDirectory_Duplicated')
print operation_succeeded
def deleteDirectory_EXAMPLE():
operation_succeeded = AssetFunctions.deleteDirectory('/Game/MyDirectory')
print operation_succeeded
def directoryExist_EXAMPLE():
exist = AssetFunctions.directoryExist('/Game/MyDirectory')
print exist
def renameDirectory_EXAMPLE():
operation_succeeded = AssetFunctions.renameDirectory('/Game/MyDirectory', '/Game/MyDirectory_Renamed')
print operation_succeeded
def duplicateAsset_EXAMPLE():
operation_succeeded = AssetFunctions.duplicateAsset('/Game/MyAsset', '/Game/MyAsset_Duplicated')
print operation_succeeded
def deleteAsset_EXAMPLE():
operation_succeeded = AssetFunctions.deleteAsset('/Game/MyAsset')
print operation_succeeded
def assetExist_EXAMPLE():
exist = AssetFunctions.assetExist('/Game/MyAsset')
print exist
def renameAsset_EXAMPLE():
operation_succeeded = AssetFunctions.renameAsset('/Game/MyAsset', '/Game/MyAsset_Renamed')
print operation_succeeded
def duplicateAssetDialog_EXAMPLE():
operation_succeeded = AssetFunctions.duplicateAssetDialog('/Game/MyAsset', '/Game/MyAsset_Duplicated', True)
print operation_succeeded
def renameAssetDialog_EXAMPLE():
operation_succeeded = AssetFunctions.duplicateAssetDialog('/Game/MyAsset', '/Game/MyAsset_Renamed', True)
print operation_succeeded
def saveAsset_EXAMPLE():
operation_succeeded = AssetFunctions.saveAsset('/Game/MyAsset', True)
print operation_succeeded
def saveDirectory_EXAMPLE():
operation_succeeded = AssetFunctions.saveDirectory('/Game/MyDirectory', True, True)
print operation_succeeded
def importMyAssets_EXAMPLE():
texture_task = AssetFunctions.buildImportTask('C:/Path/To/Assets/Texture.TGA', '/Game/Textures')
sound_task = AssetFunctions.buildImportTask('C:/Path/To/Assets/Sound.WAV', '/Game/Sounds')
static_mesh_task = AssetFunctions.buildImportTask('C:/Path/To/Assets/StaticMesh.FBX', '/Game/StaticMeshes', AssetFunctions.buildStaticMeshImportOptions())
skeletal_mesh_task = AssetFunctions.buildImportTask('C:/Path/To/Assets/SkeletalMesh.FBX', '/Game/SkeletalMeshes', AssetFunctions.buildSkeletalMeshImportOptions())
animation_task = AssetFunctions.buildImportTask('C:/Path/To/Assets/Animation.FBX', '/Game/Animations', AssetFunctions.buildAnimationImportOptions('/Game/SkeletalMeshes/SkeletalMesh'))
print AssetFunctions.executeImportTasks([texture_task, sound_task, static_mesh_task, skeletal_mesh_task])
# Not executing the animation_task at the same time of the skeletal_mesh_task because it look like it does not work if it's the case. Pretty sure it's not normal.
print AssetFunctions.executeImportTasks([animation_task])
def spawnBlueprintActor_EXAMPLE():
path = '/Game/MyBlueprint'
location = unreal.Vector(1000.0, 400.0, 0.0)
rotation = unreal.Rotator(90.0, 0.0, 0.0)
scale = unreal.Vector(1.0, 1.0, 5.0)
properties = {'tags': ['MyFirstTag', 'MySecondTag'], 'hidden': False}
print WorldFunctions.spawnBlueprintActor(path, location, rotation, scale, None, properties)
def executeSlowTask_EXAMPLE():
quantity_steps_in_slow_task = 1000
with unreal.ScopedSlowTask(quantity_steps_in_slow_task, 'My Slow Task Text ...') as slow_task:
slow_task.make_dialog(True)
for x in range(quantity_steps_in_slow_task):
if slow_task.should_cancel_EXAMPLE():
break
slow_task.enter_progress_frame(1, 'My Slow Task Text ... ' + str(x) + ' / ' + str(quantity_steps_in_slow_task))
# Execute slow logic here
print 'Executing Slow Task'
def cast_EXAMPLE():
obj = unreal.load_asset('/Game/Textures/MyTexture')
if PythonHelpers.cast(obj, unreal.Texture2D):
print 'Cast Succeeded'
else:
print 'Cast Failed'
def spawnQtWindow_EXAMPLE():
import QtWindowOne
print QtFunctions.spawnQtWindow(QtWindowOne.QtWindowOne)
def getAllActors_EXAMPLE():
print WorldFunctions.getAllActors(False, unreal.StaticMeshActor, 'MyTag', None)
def getSelectedActors_EXAMPLE():
print getSelectedActors()
def selectActors_EXAMPLE():
all_actors = getAllActors()
actors_to_select = []
for x in range(len(all_actors)):
if x % 2:
actors_to_select.append(all_actors[x])
def clearActorSelection_EXAMPLE():
selectActors()
def focusAllViewportsOnSelectedActors_EXAMPLE():
focusViewportOnActor(False)
def focusActiveViewportOnRandomActor_EXAMPLE():
actors_in_world = unreal.GameplayStatics.get_all_actors_of_class(unreal.EditorLevelLibrary.get_editor_world(), unreal.Actor)
random_actor_in_world = actors_in_world[random.randrange(len(actors_in_world))]
focusViewportOnActor(True, random_actor_in_world)
def createGenericAsset_EXAMPLE():
base_path = '/Game/GenericAssets/'
generic_assets = [
[base_path + 'sequence', unreal.LevelSequence, unreal.LevelSequenceFactoryNew()],
[base_path + 'material', unreal.Material, unreal.MaterialFactoryNew()],
[base_path + 'world', unreal.World, unreal.WorldFactory()],
[base_path + 'particle_system', unreal.ParticleSystem, unreal.ParticleSystemFactoryNew()],
[base_path + 'paper_flipbook', unreal.PaperFlipbook, unreal.PaperFlipbookFactory()],
[base_path + 'data_table', unreal.DataTable, unreal.DataTableFactory()], # Will not work
]
for asset in generic_assets:
print createGenericAsset(asset[0], True, asset[1], asset[2])
# Cpp ########################################################################################################################################################################################
def getSelectedAssets_EXAMPLE():
print AssetFunctions.getSelectedAssets()
def setSelectedAssets_EXAMPLE():
asset_paths = ['/Game/Textures/TX_LightSpotMove', '/Game/SkeletalMeshes/TutorialTPP_Mat', '/Game/Sounds/S_CompileSuccess', '/Game/Map/MyNewLevel']
AssetFunctions.setSelectedAssets(asset_paths)
def getSelectedFolders_EXAMPLE():
print AssetFunctions.getSelectedFolders()
def setSelectedFolders_EXAMPLE():
folder_paths = ['/Game/Textures', '/Game/SkeletalMeshes', '/Game/Sounds', '/Game/Map']
AssetFunctions.setSelectedFolders(folder_paths)
def getAllOpenedAssets_EXAMPLE():
print AssetFunctions.getAllOpenedAssets()
def closeAssets_EXAMPLE():
asset_objects = AssetFunctions.getAllOpenedAssets()
AssetFunctions.closeAssets(asset_objects)
def setDirectoryColor_EXAMPLE():
base_path = '/Game/PythonGenerated/'
path = base_path + 'BasicLinearColor'
color = unreal.LinearColor(0, 1, 1, 1)
AssetFunctions.setDirectoryColor(path, color)
AssetFunctions.createDirectory(path) # Note: Only call this line if the folder is not already created
def setDirectoryColorGradient_EXAMPLE():
base_path = '/Game/PythonGenerated/'
for x in range(100, 400):
# Get Gradient Color
z = x - 100
if z < 100:
r = 1.0 - z / 100.0
g = 0.0 + z / 100.0
b = 0.0
elif z < 200:
r = 0.0
g = 1.0 - (z - 100) / 100.0
b = 0.0 + (z - 100) / 100.0
else:
r = 0.0 + (z - 200) / 100.0
g = 0.0
b = 1.0 - (z - 200) / 100.0
color = unreal.LinearColor(r, g, b, 1)
# Set Directory Color
path = base_path + str(x)
AssetFunctions.setDirectoryColor(path, color)
AssetFunctions.createDirectory(path) # Note: Only call this line if the folder is not already created
def executeConsoleCommand_EXAMPLE():
console_commands = ['r.ScreenPercentage 0.1', 'r.Color.Max 6', 'stat fps', 'stat unit']
for x in console_commands:
EditorFunctions.executeConsoleCommand(x)
def getAllProperties_EXAMPLE():
obj = unreal.Actor()
object_class = obj.get_class()
for x in PythonHelpers.getAllProperties(object_class):
y = x
while len(y) < 50:
y = ' ' + y
print y + ' : ' + str(obj.get_editor_property(x))
def setViewportLocationAndRotation_EXAMPLE():
viewport_index = getActiveViewportIndex()
setViewportLocationAndRotation(viewport_index, unreal.Vector(0.0, 0.0, 0.0), unreal.Rotator(0.0, 90.0, 0.0))
def snapViewportToActor_EXAMPLE():
actors_in_world = unreal.GameplayStatics.get_all_actors_of_class(unreal.EditorLevelLibrary.get_editor_world(), unreal.Actor)
random_actor_in_world = actors_in_world[random.randrange(len(actors_in_world))]
viewport_index = getActiveViewportIndex()
snapViewportToActor(viewport_index, random_actor_in_world)
|
# Generated by Django 3.1 on 2020-08-18 08:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20200817_1946'),
]
operations = [
migrations.AlterField(
model_name='post',
name='channelUsername',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='post',
name='image',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='post',
name='likeCount',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='post',
name='parentId',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='post',
name='senderUsername',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
|
from unittest import TestCase
from phi import field
from phi.field import Noise, CenteredGrid, StaggeredGrid
from phi.field._point_cloud import distribute_points
from phi.physics import advect
def _test_advection(adv):
s = CenteredGrid(Noise(), x=4, y=3)
v = CenteredGrid(Noise(vector='x,y'), x=4, y=3)
field.assert_close(s, adv(s, v, 0), adv(s, v * 0, 1))
sv = StaggeredGrid(Noise(), x=4, y=3)
field.assert_close(s, adv(s, sv, 0), adv(s, sv * 0, 1))
field.assert_close(sv, adv(sv, sv, 0), adv(sv, sv * 0, 1))
class TestAdvect(TestCase):
def test_advect(self):
_test_advection(advect.advect)
def test_semi_lagrangian(self):
_test_advection(advect.semi_lagrangian)
def test_mac_cormack(self):
_test_advection(advect.mac_cormack)
def test_advect_points_euler(self):
v = distribute_points(1, points_per_cell=2, x=4, y=3) * (1, -1)
field.assert_close(v, advect.points(v, v, 0), advect.points(v, v*0, 0))
def test_advect_points_rk4(self):
v = distribute_points(1, points_per_cell=2, x=4, y=3) * (1, -1)
field.assert_close(v, advect.points(v, v, 0, advect.rk4), advect.points(v, v*0, 0, advect.rk4))
field.assert_close(v, advect.points(v, v, 0, advect.finite_rk4), advect.points(v, v*0, 0, advect.finite_rk4))
|
# NAME: Derek Haugen
# CLASS: Compiler Construction - Hamer
# ASSIGN#: Assignment 1
# DESC: This is a simple lexical analyzer written to tokenize
# A subset of the C language. The lexer primarily utilizes
# regular expressions to match tokens from the input stream.
#
import re, sys, os
#These should be enumerated types but I have declared them as strings
#so that they can also be used for the print statements. They serve
#the same purpose as the enumeration.
AssignT = 'assignop'
MulT = 'mulop'
AddT = 'addop'
RelationT = 'relop'
RParent = 'rightparT'
LParent = 'leftparT'
RBracket = 'rightbracketT'
LBracket = 'leftbracketT'
RBrace = 'rightbraceT'
LBrace = 'leftbraceT'
CommaT = 'commaT'
PeriodT = 'periodT'
QuoteT = 'quoteT'
Reserved = 'reservedT'
Value = 'numT'
ValueR = 'numRealT'
Identifier = 'idT'
Semicolon = "semiT"
IF = 'ifT'
ELSE = 'elseT'
WHILE = 'whileT'
INT = 'integerT'
FLOAT = 'floatT'
CHAR = 'charT'
BREAK = 'breakT'
CONTINUE = 'continueT'
VOID = 'voidT'
STRLITERAL = "strliteralT"
EOF="eofT"
NewLine = "Increment Line Count"
Comment = "Increment with how many newlines found"
Const = 'constantT'
Linecount = 1
tokenFormats = [
(r'[ \t]+', None), #Whitespace
(r'[\n]',NewLine),
(r'\/\*(\*(?!\/)|[^*])*\*\/', Comment), #Comment
(r'\(', LParent),
(r'\)', RParent),
(r'\[', LBracket),
(r'\]', RBracket),
(r'\{', LBrace),
(r'\}', RBrace),
(r'\,', CommaT),
(r'"{1}.*?"', STRLITERAL),
(r'\.', PeriodT),
(r'[0-9]*\.[0-9]+', ValueR),
(r'\+', AddT),
(r'-', AddT),
(r'\|\|', AddT),
(r';', Semicolon),
(r'\*', MulT),
(r'/', MulT),
(r'\%', MulT),
(r'&&', MulT),
(r'==', RelationT),
(r'!=', RelationT),
(r'<=', RelationT),
(r'>=', RelationT),
(r'>', RelationT),
(r'<', RelationT),
(r'\=', AssignT),
(r'if(?=\s|\()', IF),
(r'else(?=\s|\{)', ELSE),
(r'while(?=\s|\()', WHILE),
(r'float(?=\s)', FLOAT),
(r'int(?=\s)', INT),
(r'char(?=\s)', CHAR),
(r'break(?=\s|\;)', BREAK), #watch that semicolon
(r'continue(?=\s|\;)', CONTINUE),
(r'void(?=\s)', VOID),
(r'[0-9]+', Value),
(r'[A-Za-z][A-Za-z0-9_]*', Identifier),
]
#handles the reading from the file to get characters for analysis
def Lexer(inFile):
arg = inFile
file = open(arg)
inLines = file.read()
file.close()
tokens = lexer(inLines, tokenFormats) # keep a list of the tokens for later
return tokens
#Main function that takes the list of regular expressions and uses them
#to match for expected tokens
def lexer(input, tokenFormats):
global Linecount
pos = 0
lines = 0
tokens = []
while pos < len(input):
match = None
for tokenFormat in tokenFormats:
pattern, tag = tokenFormat
regex = re.compile(pattern)
match = regex.match(input,pos)
if match:
lexeme = match.group(0)
if tag:
if tag == Identifier and len(str(lexeme)) > 27:
sys.stderr.write('Illegal length for identifier: %s\n' % lexeme)
break;
if tag == NewLine:
Linecount = Linecount + 1
break;
if tag == Comment:
temp = str(lexeme).split('\n')
Linecount = Linecount + len(temp) -1
break;
attr = checkForAttribute(lexeme,tag)
token = (lexeme,tag,attr,Linecount)
tokens.append(token)
break
else:
break
if not match:
sys.stderr.write('Illegal or unknown character: %s\n' % input[pos])
pos = pos + 1
lines = lines + 1
else:
pos = match.end(0)
endT=("",EOF,"", Linecount)
tokens.append(endT)
return tokens
#function to print found token
def printToken(token,lines):
template="{0:20}{1:15}{2:10}{3:10}"
if lines > 20:
input("Press Enter To Continue...")
os.system('cls' if os.name == 'nt' else 'clear')
print(template.format("Lexeme", "Token", "Attribute", "Linenumber"))
print(template.format(token[0], token[1], token[2], token[3]))
lines = 0
else:
print(template.format(token[0], token[1], token[2],token[3]))
lines = lines + 1
return lines
#function that checks if a specific
#token has an attribute associated
def checkForAttribute(val,tag):
if tag == Value:
return int(val)
elif tag == ValueR:
return float(val)
elif tag == STRLITERAL:
return val
else:
return ""
|
#!/usr/bin/env python
import sys
import groovesparkb
from twisted.internet import reactor, defer
@defer.inlineCallbacks
def main(token):
gs = groovespark.GroovesharkAPI()
yield gs.initialize()
result = yield gs.send('getSongFromToken', dict(token=token), "more.php")
print result['SongID']
reactor.stop()
if __name__ == "__main__":
main(*sys.argv[1:])
reactor.run()
|
####################################################
##
## Projects suggested by CupOfCode01
## Name Generator using random, string
##
####################################################
import random, string, pprint, sys
#### quick create random string lowercase ascii of user selectable length
# length = input("how many letters?")
# name = ""
# for i in range(int(length)):
# name += random.choice(string.ascii_lowercase)
# print(name)
#
#
# #### generate variable number of names where user can select vowels and consonants
# def generator(length):
#
# vowel = "aeiou"
# cons = "bcdfghjklmnpqrstvwxyz"
# name = ""
#
# while length > 0:
# type = input("Do you want a vowel or a consonant? (v, c)")
# if type == "v":
# choice = random.choice(vowel)
# print("you got", choice)
# name += choice
# length -= 1
# elif type == "c":
# choice = random.choice(cons)
# print("you got", choice)
# name += choice
# length -= 1
# else:
# print("That's not a letter. Please try again")
# return name
#
#
# length = input("How long a name do you want: ")
# name = generator(int(length))
#
# print("\nThe name you generated is", name.capitalize())
#
#
# #### user can select number of names generated and provide a mask for vowels and consonants
# #### e.g. cvvvccv
def nameGen(length, number, mask=""):
vowel = "aeiou"
cons = "bcdfghjklmnpqrstvwxyz"
name = ""
names = []
for i in range(int(number)):
name = ""
for i in mask:
if i == "v":
choice = random.choice(vowel)
name += choice
elif i == "c":
choice = random.choice(cons)
name += choice
elif i == "l":
choice = random.choice(string.ascii_lowercase)
name += choice
else:
name += i
names.append(name.capitalize())
return names
while True:
number = input("How many suggestions do you want?: ")
mask = input("Enter a mask where 'c' = consonants, 'v' = vowels, and 'l'= a random letter\ne.g. vcvvclson\n: ")
print("\nThis will create", str(number), "names that are", str(len(mask)), "letters long.")
correct = input("Is this correct?")
if correct.lower() == "y" or correct.lower() == "yes":
names = nameGen(len(mask), number, mask)
pprint.pprint(names)
#sys.exit()
print("\nYou can keep generating more names,\nor press CTRL+C to end the program\n")
else:
break
#### Generate a 5 letter name where user can specify a letter or v=vowel, c=consonant, l=random letter
# def userinput(length):
# vowel = "aeiou"
# cons = "bcdfghjklmnpqrstvwxyz"
# name = ""
#
# for i in range(int(length)):
# print("Chose a letter... \n'v' for vowel, 'c' for consonant, 'l' for any other letter")
# letter = input()
# if letter.lower() == "v":
# letter = random.choice(vowel)
# name += letter
# elif letter.lower() == "c":
# letter = random.choice(cons)
# name += letter
# elif letter.lower() == "l":
# letter = random.choice(string.ascii_lowercase)
# name += letter
# else:
# name += letter
# return name
#
# length = input("how long a name do you want?\n:")
# print(userinput(length))
|
import time
try:
import mysql.connector
except:
print("mysql-connector-python (installation reqd. - yes)")
time.sleep(3)
quit()
else:
pass
try:
import stdiomask
except:
print("stdiomask (installation reqd. - yes)")
time.sleep(3)
quit()
else:
pass
import random
syner = ("Syntax for updates : updates=[(<table1>,<targetcolumn1>,<newdata1 (int or string)>,"
"<conditioncolumn1 (can be blank by entering '')>,<pattern1> (can be blank by entering '')),\n"
" (<table2>,<targetcolumn2>,<newdata2 (int or string)>,"
"<conditioncolumn2 (can be blank by entering '')>,<pattern2> (can be blank by entering '')),\n"
" ...(<table'n'>,<targetcolumn'n'>,<newdata'n' (int or string)>,"
"<conditioncolumn'n' (can be blank by entering '')>,<pattern'n'> (can be blank by entering ''))]")
def help():
print("\nLink : https://github.com/siddhanth78/interact_module/blob/master/sqlops.txt\n")
def login(sqlhost="localhost",sqluser="root",sqlpass="",sqldb="",dbtable="credentials"):
usern = ""
passw = ""
valid = 0
mid=''
while True:
usern = input("Username : ")
if usern.strip() == "":
print("\nUsername must be filled.\n")
continue
break
while True:
passw = stdiomask.getpass(prompt="Password : ")
if passw.strip() == "":
print("\nPassword must be filled.\n")
continue
break
if sqldb.strip()=="":
pass
else:
try:
mydb = mysql.connector.connect(
host = sqlhost.strip(),
user = sqluser.strip(),
passwd = sqlpass,
database = sqldb.strip()
)
cursor = mydb.cursor()
except:
print("\nError. Couldn't connect to database. Probably because database doesn't exist, invalid credentials or invalid database name.\n")
valid = 0
else:
cursor.execute(f"create table if not exists {dbtable.strip()} (m_id varchar(5) not null unique,Username varchar(20) not null unique , Password varchar(20) not null unique)")
cursor.execute(f"select * from {dbtable.strip()}")
for i in cursor:
mid,us,pa = i
if usern == us and passw == pa:
print("\nCredentials valid.\n")
valid = 1
break
else:
print("\nInvalid credentials.\n")
valid = 0
return(mid , usern , passw ,valid)
def select(sqlhost="localhost",sqluser="root",sqlpass="",sqldb="",dbtable="",reqcol="*",wherecol="",pattern="",multicond=[]):
itemlist = []
try:
mydb = mysql.connector.connect(
host = sqlhost.strip(),
user = sqluser.strip(),
passwd = sqlpass,
database = sqldb.strip()
)
cursor = mydb.cursor()
except:
print("\nError. Couldn't connect to database. Probably because database doesn't exist, invalid credentials or invalid database name.\n")
return
else:
if wherecol.strip()=="" and multicond==[]:
try:
cursor.execute(f"select {reqcol.strip()} from {dbtable.strip()}")
except:
print("\nError. Check your arguments again.\n")
return
else:
for item in cursor:
itemlist.append(item)
return itemlist
elif wherecol.strip()!="" and multicond==[]:
if pattern.strip()=="":
print("\nPattern required.\n")
return
else:
try:
cursor.execute(f"select {reqcol.strip()} from {dbtable.strip()} where {wherecol.strip()} like '{pattern}'")
except:
print("\nError. Check your arguments again.\n")
return
else:
for item in cursor:
itemlist.append(item)
return itemlist
elif wherecol.strip()=="" and multicond!=[]:
if pattern.strip()!="":
print("\nPattern not required.\n")
return
else:
try:
if len(multicond) == 1:
for j in multicond:
where , patt = j
cursor.execute(f"select {reqcol.strip()} from {dbtable.strip()} where {where.strip()} like '{patt}'")
elif len(multicond) > 1:
com=""
n=0
sqlbase = f"select {reqcol.strip()} from {dbtable.strip()} where "
for j in multicond:
n+=1
where , patt = j
if n<len(multicond):
com = com+where+" like "+patt+" and "
else:
com = com+where+" like "+patt
cursor.execute(sql+com)
except:
print("\nError. Check your arguments again.\n")
return
else:
for item in cursor:
itemlist.append(item)
return itemlist
def insert(sqlhost="localhost",sqluser="root",sqlpass="",sqldb="",dbtable="",newdata=[]):
if isinstance(newdata , list) == False:
print("ArgTypeError : multi_update(<string> , <string> , <string> , <list>)")
return
try:
mydb = mysql.connector.connect(
host = sqlhost.strip(),
user = sqluser.strip(),
passwd = sqlpass,
database = sqldb.strip()
)
cursor = mydb.cursor()
except:
print("\nError. Couldn't connect to database. Probably because database doesn't exist, invalid credentials or invalid database name.\n")
return
else:
number = 1
for new in newdata:
if str(new).strip() == "":
print(f"\nError in query {number}. New data can't be blank.\n")
number+=1
continue
else:
ndat = ""
for ne in new:
if ndat=="":
ndat=ndat+f"'{str(ne)}'"
else:
ndat=ndat+","+f"'{str(ne)}'"
try:
cursor.execute(f"insert into {dbtable.strip()} values ({ndat.strip()})")
mydb.commit()
except:
print(f"\nError in query {number}. Check your arguments again.\n")
number+=1
continue
else:
number+=1
continue
def exec(sqlhost="localhost",sqluser="root",sqlpass="",comm=[]):
try:
mydb = mysql.connector.connect(
host = sqlhost.strip(),
user = sqluser.strip(),
passwd = sqlpass,
)
cursor = mydb.cursor()
except:
print("\nError. Couldn't connect to server. Probably invalid credentials.\n")
return
else:
no=1
for c in comm:
try:
cursor.execute(c)
except:
print(f"\nError in query {no}.\n")
no+=1
else:
no+=1
def update(sqlhost="localhost",sqluser="root",sqlpass="",sqldb="",updates=[]):
global syner
if isinstance(updates , list) == False:
print("ArgTypeError : multi_update(<string> , <string> , <string> , <list>)")
return
try:
mydb = mysql.connector.connect(
host = sqlhost.strip(),
user = sqluser.strip(),
passwd = sqlpass,
database = sqldb.strip()
)
cursor = mydb.cursor()
except:
print("\nError. Couldn't connect to database. Probably because database doesn't exist, invalid credentials or invalid database name.\n")
return
else:
number = 1
for up in updates:
try:
if len(up)==3:
dbtable,reqcol,newdata = up
elif len(up)==5:
dbtable,reqcol,newdata,wherecol,pattern = up
except:
print(f"\nError in query {number}. Probably missing arguments or invalid data.\n")
print(syner)
number+=1
continue
else:
pass
if str(updates).strip() == "":
print(f"\nError in query {number}. New data can't be blank.\n")
print(syner)
number+=1
continue
if len(up)==3:
try:
cursor.execute(f"update {dbtable.strip()} set {reqcol.strip()} = '{newdata}'")
mydb.commit()
except:
print(f"\nError in query {number}. Check your arguments again.\n")
print(syner)
number+=1
continue
else:
number+=1
continue
else:
if pattern.strip()=="":
print(f"\nError in query {number}. Pattern required.\n")
print(syner)
number+=1
continue
else:
#try:
cursor.execute(f"update {dbtable.strip()} set {reqcol.strip()} = '{newdata}' where {wherecol.strip()} like '{pattern}'")
mydb.commit()
'''except:
print(f"\nError in query {number}. Check your arguments again.\n")
print(syner)
number+=1
continue
else:
number+=1
continue'''
def sign_up(sqlhost="localhost",sqluser="root",sqlpass="",sqldb="",dbtable="credentials"):
usern = ""
passw = ""
valid = 0
mid=''
while True:
usern = input("Username : ")
if usern.strip() == "":
print("\nUsername must be filled.\n")
continue
break
while True:
passw = stdiomask.getpass(prompt="Password : ")
if passw.strip() == "":
print("\nPassword must be filled.\n")
continue
break
while True:
repassw = stdiomask.getpass(prompt="Confirm password : ")
if repassw.strip() == "":
print("\nPassword must be filled.\n")
continue
if repassw != passw:
print("\nPasswords do not match.\n")
continue
elif repassw == passw:
break
if sqldb.strip()=="":
pass
else:
try:
mydb = mysql.connector.connect(
host = sqlhost.strip(),
user = sqluser.strip(),
passwd = sqlpass,
database = sqldb.strip()
)
cursor = mydb.cursor()
except:
print("\nError. Couldn't connect to database. Probably because database doesn't exist, invalid credentials or invalid database name.\n")
valid = 0
else:
try:
mid=""
no1=random.randint(0,9)
no2=random.randint(10,99)
ch1=random.randint(65,90)
ch2=random.randint(65,90)
mid=str(chr(ch1))+str(no1)+str(chr(ch2))+str(no2)
print(f"\nYour m_id : {mid}\n")
cursor.execute(f"create table if not exists {dbtable.strip()} (m_id varchar(5) not null unique,Username varchar(20) not null , Password varchar(20) not null unique)")
cursor.execute(f"insert into {dbtable.strip()} values('{mid}','{usern}','{passw}')")
mydb.commit()
except:
print("\nError. Couldn't register. Possibly because username/password/m_id already exists or invalid username/password (No special characters other than '_' allowed).\n")
valid = 0
else:
print("\nYou have been registered. Welcome.\n")
valid = 1
return(mid , usern , passw ,valid)
|
n = []
t = int(input())
while len(n) < 1000:
for c in range(0, t):
if len(n) < 1000:
n.append(c)
for i in range(len(n)):
print('N[{}] = {}'.format(i, n[i]))
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 29 22:44:40 2020
@author: HP
"""
a=float(input("enter the first number:"))
b=float(input("enter the second number:"))
c=float(input("enter the third number:"))
if(a>b and a>c):
print("first number is greather",a)
elif(b>c and b>a):
print("second number is greather",b)
else:
print("the third number is greather",c)
|
vvod = raw_input("input 4 numbers: ")
l = str(vvod)
sort = sorted(l)
print "sort of numbers: {}".format(sort)
a = sort[::-1]
print "revers of sort: {}".format(a)
res = int(l[0])*int(l[1])*int(l[2])*int(l[3])
print "mnozenna: {}".format(res)
|
from string import ascii_uppercase as alph
def caeser(message, key):
return ''.join(alph[(alph.index(x)+key)%26] if x.isalpha() else x for x in message.upper())
'''
You have invented a time-machine which has taken you back to ancient Rome. Caeser is impressed with
your programming skills and has appointed you to be the new information security officer.
Caeser has ordered you to write a Caeser cipher to prevent Asterix and Obelix from reading his emails.
A Caeser cipher shifts the letters in a message by the value dictated by the encryption key. Since Caeser's
emails are very important, he wants all encryptions to have upper-case output, for example:
If key = 3 "hello" -> KHOOR If key = 7 "hello" -> OLSSV
Input will consist of the message to be encrypted and the encryption key.
'''
|
"""Construct a profile with two hosts for testing ping
Instructions:
Wait for the profile to start,
run setupHost.sh on node1,
start additional monitoring if desired,
run experiment on node1,
collect data.
"""
# Boiler plate
import geni.portal as portal
import geni.rspec.pg as rspec
request = portal.context.makeRequestRSpec()
# Get nodes
node1 = request.RawPC("node1")
node2 = request.RawPC("node2")
# Force hardware type for consistency
node1.hardware_type = "m510"
node2.hardware_type = "m510"
link1 = request.Link(members = [node1, node2])
# Set scripts from repo
# node1.addService(rspec.Execute(shell="sh", command="/local/repository/initDocker.sh"))
# Boiler plate
portal.context.printRequestRSpec()
|
"""
Robotritons testing version of compass navigation.
Purpose: Use a magnetometer to reliably steer the vehicle.
Requirements: An InvenSense MPU-9250. The python modules logging, sys, spidev, time, math, navio.util, VehiclePWMModule, and navio.mpu9250_better
Use: Input a desired direction and the vehicle will try to turn itself that way. Place the vehicle facing north. Instantiate an imu object, then
initialize it, then calibrate N,E,S,W, finally call the read_mag() method to update the list of magnetometer_data.
The program will calculate the vehicles current heading and the bearing to the desired angle. The vehicle will steer towards the angle.
Updates:
- September 10, 2016. calibrateMagNorth() -> calibrateMag(). Updated so sweep vehicle through all angles, don't just hold at cardinal directions.
Tested steering. +-35 is enough for sharp steer in any direction. Medium steer is hard to do because +-20 doesn't change after 30 and
+-15 doesn't change after center.
- September 9, 2016. Attempted to add basic PID (instead of in VehiclePWMModule) for steering towards the bearing, by increasing the steering angle
by 5 more than the bearing angle. Removed it because the wheels' friction prevents precise movement. In order to keep some control
the old modular steering was reinstated.
- September 8, 2016. Cleaned compling bugs and tested inside finding that its basic ability to steer towards a degree works!
Replaced print statements with output from the logging library. root_log prints to the console, data_log formats csv and prints to a file.
In the 'Logging Setup' section, change the message levelname to control what types of log messages are printed
- September 5, 2016. Heading now measurable in degrees by incorporating arctan and declenation formulas based on adafruits datasheet. Also incorporated steering towards a degree direction.
- August 5, 2016. Created the file.
Resources:
https://docs.emlid.com/navio/Navio-dev/mpu9250-imu/
https://store.invensense.com/datasheets/invensense/MPU9250REV1.0.pdf
https://shahriar.svbtle.com/importing-star-in-python
Resources Logging:
https://docs.python.org/2.7/howto/logging.html#advanced-logging-tutorial
https://docs.python.org/2.7/howto/logging-cookbook.html#logging-to-multiple-destinations
https://docs.python.org/2.7/library/logging.html#logger-objects
Resources Headings:
https://cdn-shop.adafruit.com/datasheets/AN203_Compass_Heading_Using_Magnetometers.pdf
https://docs.python.org/2/library/math.html (look for atan2())
http://aviation.stackexchange.com/questions/8000/what-are-the-differences-between-bearing-vs-course-vs-direction-vs-heading-vs-tr
http://cache.freescale.com/files/sensors/doc/app_note/AN4248.pdf
Resources Bugs:
https://community.emlid.com/t/bug-magnetometer-reading-process-fail/688 #Community is aware of problem, but only C++ fixes are implemented
https://github.com/ArduPilot/ardupilot/pull/2487 #The immediate solution was to stop the AK8963 device reset. This is the solution I mimicked with "mpu9250_better"
https://github.com/ArduPilot/ardupilot/pull/2493 #Emlid developers have fixed the C++ driver
https://github.com/ArduPilot/ardupilot/pull/2504 #Someone else did it their own way for C++
"""
import logging
import sys
import spidev
import time
import math
import navio.util
from navio.mpu9250_better import MPU9250
import VehiclePWMModule
# ----- Logging Setup -----
#1) Loggers create log records. They are the outermost interface included in appliation code. root logger is default.
#2) Handlers send the log records to particular desitnations.
#3) Formatters specify the layout of the final output
#Add a basic handler to the root (parent) logger. This handler's destination is the console stream
logging.basicConfig(level=logging.INFO,format='%(levelname)-6s %(name)-6s %(message)s')#Change debug level to control console output <----------
log_root = logging.getLogger('')#Assign an easy name to the root logger
#Create a separate logger for general output
#log_console = logging.getLogger('console')
#Create a separate logger for raw data from the magnetometer
log_mag = logging.getLogger('magnetometer')
log_mag.setLevel(logging.DEBUG) #Change debug level to control file output <----------
#Create a handler that outputs to a csv file
handler_mag = logging.FileHandler('compassCheckData/magnetometerData.csv',mode='w')
handler_mag.setLevel(logging.DEBUG) #This handler handles all outputs to the file
#Create a formatter that labels the data
format_mag = logging.Formatter('%(levelname)-8s,%(message)s')
#Add format to handler, then handler to logger
handler_mag.setFormatter(format_mag)
log_mag.addHandler(handler_mag)
#Make sure the mag logger does not propagate messages to the root ancestor
#Propagation does not consider the message levelnames of the ancestor
#so we need to avoid flooding the console with mag logs of all levelnames
log_mag.propagate = False
#Example in text logging call
#log_mag.info('This is some data %f' % variable)
# ----- End Log Setup -----
navio.util.check_apm()
imu = MPU9250()
log_root.info('Connection established: %s' %(imu.testConnection()))
#print "Connection established: ", imu.testConnection()
#initialize communication
imu.initialize()
time.sleep(1)
#initialize the servo and esc
vehicle_servo = VehiclePWMModule.vehiclePWM("servo")
vehicle_esc = VehiclePWMModule.vehiclePWM("esc")
#Initialize the angle from north to the target, this is the course angle.
target = 90
def calibrateMag():
time.sleep(5) #5 Seconds before calibration begins
log_root.info('calibrateMag')
#Indicate start of calibration
vehicle_servo.steer(35)
time.sleep(0.5)
vehicle_servo.steer(-35)
time.sleep(0.5)
vehicle_servo.center()
#Capture about 1000 points for the whole sweep
xSet = []
ySet = []
for x in xrange(600):
imu.read_mag()
xSet.append(imu.magnetometer_data[0])
ySet.append(imu.magnetometer_data[1])
log_mag.info('%f,%f' %(xSet[x],ySet[x]))
if (x == 150):
#Indicate 1/4 done with 1 steer
vehicle_servo.steer(35)
time.sleep(0.5)
vehicle_servo.center()
elif (x == 300):
log_root.info('1/2 calibrateMag')
#Indicate 2/4 done with 2 steers
vehicle_servo.steer(35)
time.sleep(0.5)
vehicle_servo.center()
time.sleep(0.5)
vehicle_servo.steer(35)
time.sleep(0.5)
vehicle_servo.center()
elif (x == 450):
#Indicate 3/4 done with 3 steers
vehicle_servo.steer(35)
time.sleep(0.5)
vehicle_servo.center()
time.sleep(0.5)
vehicle_servo.steer(35)
time.sleep(0.5)
vehicle_servo.center()
time.sleep(0.5)
vehicle_servo.steer(35)
time.sleep(0.5)
vehicle_servo.center()
else:
time.sleep(0.05)
log_root.info('End calibrateMag')
log_mag.info('End calibrateMag')
#Indicate end of calibration
vehicle_servo.steer(35)
time.sleep(0.5)
vehicle_servo.steer(-35)
time.sleep(0.5)
vehicle_servo.center()
#Mean values are the coordinates in the center of all readings (zero in the adafruit datasheet)
xMean = float(sum(xSet))/max(len(xSet),1)
yMean = float(sum(ySet))/max(len(ySet),1)
#Y holds similar values for NORTH and SOUTH
#X holds similar values for EAST and WEST
#If Y is inside a small threshold of its median the vehicle faces NORTH or SOUTH
#Otherwise
# Y points NORTH and reads above its median, meaning the vehicle faces mostly WEST
# Y points SOUTH and reads below its median, meaning the vehicle faces mostly EAST
return {'x':xMean,'y':yMean}
#Store our calibrated mean values
magMeans = calibrateMag() #Y's values are most useful
#Start vehicle stopped
vehicle_esc.stop()
vehicle_esc.rest()
vehicle_servo.rest()
while True:
try:
#Read our magnetometer
# Note: The magnetometer data is stored as a list ordered [x,y,z]
# Note: x+ is directed towards the front of the RPI2/Navio+ and y+ is directed towards the right of the RPI2/Navio+
# Note: all calculations assume x is the verticle axis and y is horizontal. Upsidedown vehicle reverses E<->W
imu.read_mag()
xRaw = imu.magnetometer_data[0] #print >> f, "X raw, %f" % (imu.magnetometer_data[0])
yRaw = imu.magnetometer_data[1] #print >> f, "Y raw, %f" % (imu.magnetometer_data[1])
log_mag.debug('%f,%f' % (xRaw,yRaw))
#Translate current reading so that it lies on a circle centered on the origin
yCtrd = yRaw-magMeans['y']#Current readings minus the mean
xCtrd = xRaw-magMeans['x']
#Calculate the heading counterclockwise. (angle between the vehicle and NORTH)
#If the vehicle faces WEST report 90 degrees from north (1/2 pi)
#If the vehicle faces EAST report -90 degrees from north (-1/2 pi)
headRadSign = math.atan2(yCtrd,xCtrd) #atan2 in python takes (y, x). This is opposite to excel
headDegSign = headRadSign*(180/math.pi)
#Convert the heading to range from 0-360
#If the vehicle faces WEST report 90 degrees from north (1/2 pi)
#If the vehicle faces EAST reoprt 270 degrees from north (3/2 pi)
headRad = headRadSign%math.pi #Good for debugging, but unecessary to calculate heading
headDeg = headDegSign%360 #Good for debugging, but unecessary to calculate heading
#print 'Radians heading from north: %f' % (headRad)
log_root.debug('Degrees heading from North: %f' %(headDeg))
#Find the ccw angle between vehicle's heading and target, this is the Relative Bearing.
#This uses our vehicle as the reference "0" degree and equivalently reorients the target around the perspective of the vehicle
bearBasic = (target-headDegSign)%360
#bearRel = (target-headDeg)%360. Has more roundoff error
#bearRel = (headDeg-target)%360. Uses target as the reference "0" degree and equivalently reorients the vehicle around the target's perspective.
#Also, the angle between the target and north is the Magnetic Heading.
#Finally, useful Relative Bearings are <180 and include a sign to denote direction. Subtracting by 360 adds that sign.
if (bearBasic>180):
bearRel=bearBasic-360
else:
bearRel=bearBasic
log_root.debug('bearRel: %f' % (bearRel))
#If not heading in correct direction
if (abs(bearRel)>8):
if (bearRel > 0): #If bearing is to the right of target
#Turn left
if (bearRel < 45):
vehicle_servo.steer(20)
print '15'
#elif (bearRel < 90):
# vehicle_servo.steer(25)
# print '25'
else:
vehicle_servo.steer(35)
print '35'
else: #If bearing is to the left of target
#Turn right
if (bearRel > -45):
vehicle_servo.steer(-20)
print '-15'
#elif (bearRel > -90):
# vehicle_servo.steer(-25)
# print '-25'
else:
vehicle_servo.steer(-35)
print '-35'
#Convert bearing angle to possible steering angle
#vehicle_servo.steer(bearRel*35/180) #steer(+-35) is largest value and bearRel is signed
else:#Stay centered
vehicle_servo.center()
print 'center'
time.sleep(0.05)
except KeyboardInterrupt:
vehicle_esc.stop()
vehicle_servo.rest()
sys.exit()
|
# GETTING HELPER FUNCTION AND LIBRARIES
from prediction_helper import *
import cv2 as cv
# reading image
path = input("Enter the path of the image : ")
path = f"{path}"
img = cv.imread(path)
if img.shape[0] > 1080 and img.shape[1] > 1920:
img = cv.resize(img,(img.shape[1]//3,img.shape[0]//3))
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# face tracker
face_tracker = cv.CascadeClassifier("Deployment/xmls/haarcascade_frontalface_default.xml")
# faces
faces = face_tracker.detectMultiScale(gray,1.2,5)
print(f"faces found {faces}")
predictions = []
# getting and predicting on faces
for (x,y,w,h) in faces:
cropped_image = gray[y:y+h,x:x+w]
cropped_image = cv.resize(cropped_image, (48, 48))
# predictions
age = makePredictionAge(cropped_image)
gender = makePredictionGender(cropped_image)
emotion = makePredictionEmotion(cropped_image)
#ethnicity = makePredictionEthnicity(cropped_image)
predictions.extend([age,gender,emotion])
cv.rectangle(img, (x,y), (x + w, y + h), (255, 255, 255))
cv.putText(img,f"Approximate age is {age}",(x-40,y-20),cv.FONT_HERSHEY_COMPLEX,0.7,(255,0,0),1)
cv.putText(img, f"Gender is {gender}", (x-40, y+5), cv.FONT_HERSHEY_COMPLEX, 0.7, (255,0, 0),1)
cv.putText(img, f"Emotion is {emotion}", (x-40, y+30), cv.FONT_HERSHEY_COMPLEX, 0.7, (255,0, 0),1)
#cv.putText(img, f"Ethnicity hoepfully is {ethnicity}", (10, y+55), cv.FONT_HERSHEY_COMPLEX, 0.7, (255,0, 0))
cv.imshow("Press q to quit",img)
if cv.waitKey(0) & 0xFF == ord("q"):
cv.destroyAllWindows()
print(f"Predictions made are : {predictions}")
|
from django.core.management.base import BaseCommand
from django.db import connection
from organisations.models import (
DivisionGeographySubdivided,
OrganisationGeographySubdivided,
)
class Command(BaseCommand):
help = "Populate the subdivided tables"
def handle(self, *args, **options):
with connection.cursor() as cursor:
self.stdout.write("Orgs")
cursor.execute(OrganisationGeographySubdivided.POPULATE_SQL)
self.stdout.write("Divs")
cursor.execute(DivisionGeographySubdivided.POPULATE_SQL)
self.stdout.write("Divs")
cursor.execute(DivisionGeographySubdivided.POPULATE_SQL)
|
class Solution:
def minDeletion(self, nums: List[int]) -> int:
n = len(nums)
res = 0
# 从左往右
# 如果遇到i%2==0, 且num[i] == num[i+1]
# 有两种选择一个是删除num[i],一个是删除num[i+1],且必须选择一个
# 因为两种选择对后面是等价的所以无所谓,属于是sb题了
# 注意删除后长度不是偶数的话,再在结尾删除一个就行
for i in range(n):
if (i - res) % 2 == 0 and (i != n-1 and nums[i] == nums[i+1]):
res += 1
return res if (n - res) % 2 == 0 else res + 1
|
#!/usr/bin/env python
"""
Author: Patrick Monnahan
Purpose: This script creates a bed file of NON-genic regions to be excluded in structural variant discovery
Takes the following arguments:
-gff : Full path to gff file containing gene locations (can contain other elements as well...these will just be ignored)
-b : Buffer on either side of gene boundary. E.g Gene1 ends at 100bp and Gene2 starts at 500bp. If b=10, then the non-genic region will be 110-490')
"""
# Import necessary arguments
import argparse
# Specify arguments to be read from the command line
parser = argparse.ArgumentParser()
parser.add_argument('-gff', type=str, metavar='gff_path', required=True, help='path to gff file')
parser.add_argument('-b', type=int, metavar='buffer', default=2000, help='Buffer on either side of gene boundary. E.g Gene1 ends at 100bp and Gene2 starts at 500bp. If b=10, then the non-genic region will be 110-490')
args = parser.parse_args()
first_gene = True
old_stop = 0
# Begin looping over lines in the gff file
with open(args.gff, 'r') as gff:
for i, line in enumerate(gff):
if line[0] != "#": # Ignore all lines in the header of the gff
if line.split()[1] != "wareLab": # This is a catch for the B73 gff because it has a wierd first line
line = line.strip("\n").split()
chrom = line[0].replace("M","chr")
start = line[3] # Lower boundary of entry
stop = line[4] # Upper boundary of entry
# Initialize current_chrom to catch when we move on to a different chromosome
if first_gene is True:
current_chrom = chrom
# this catches a gene entry on same chomosome as before
if line[2] == "gene" and chrom == current_chrom:
if int(start) - int(old_stop) >= args.b * 2 : # If buffer regions overlap, then we consider the space between genes to be GENIC.
if first_gene is True:
print(f"{chrom}\t0\t{int(start) - args.b}")
first_gene = False
else:
print(f"{chrom}\t{int(old_stop) + args.b}\t{int(start) - args.b}")
# This catches a gene entry after moving on to new chromosome
elif line[2] == "gene" and chrom != current_chrom:
print(f"{current_chrom}\t{int(old_stop) + args.b}\t999999999") # the final non-genic region is the end of the last gene stop point all the way to the end of the chromosome (999999999)
if int(start) - args.b > 0: # Does buffer region of first gene on new chromosome extend off the beginning of the chromosome?
print(f"{chrom}\t0\t{int(start) - args.b}")
else:
print(f"{chrom}\t0\t{start}")
current_chrom = chrom
old_stop = stop # Store upper boundary of current gene
|
#!/usr/bin/env python
#------------------------------------------------------------------------------
# Copyright 2008-2011 Istituto Nazionale di Fisica Nucleare (INFN)
#
# Licensed under the EUPL, Version 1.1 only (the "Licence").
# You may not use this work except in compliance with the Licence.
# You may obtain a copy of the Licence at:
#
# http://www.osor.eu/eupl/european-union-public-licence-eupl-v.1.1
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the Licence is
# distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied.
# See the Licence for the specific language governing
# permissions and limitations under the Licence.
#------------------------------------------------------------------------------
class Database:
def __init__(self, sess, compute_table, get_next_id):
self.session = sess
self.tables = {}
self.tables['Compute'] = compute_table
self.get_next_id = get_next_id
|
import dash_bootstrap_components as dbc
from dash import html
pagination = html.Div(
dbc.Pagination(max_value=10),
)
|
#
# This file is part of LUNA.
#
# Adapted from lambdasoc.
# This file includes content Copyright (C) 2020 LambdaConcept.
#
# Per our BSD license, derivative files must include this license disclaimer.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Peripheral helpers for LUNA devices. """
from contextlib import contextmanager
from amaranth import Module, Elaboratable
from amaranth import tracer
from amaranth.utils import log2_int
from amaranth_soc import csr, wishbone
from amaranth_soc.memory import MemoryMap
from amaranth_soc.csr.wishbone import WishboneCSRBridge
from lambdasoc.periph.base import PeripheralBridge
from lambdasoc.periph.event import EventSource
import lambdasoc
__all__ = ["Peripheral", "CSRBank", "PeripheralBridge"]
# Note:
#
# The following are thin wrappers around LambdaSoC's Peripheral and
# CSRBank classes.
#
# The primary reason this abstraction exists is to allow us to support
# auto-generation of register documentation from Peripherals.
#
# The intention is to either upstream this at a future point in time
# or use LambdaSoC's facilities if/when it should gain them.
class Peripheral(lambdasoc.periph.base.Peripheral):
def csr_bank(self, *, name=None, addr=None, alignment=None, desc=None):
"""Request a CSR bank.
Arguments
---------
name : str
Optional. Bank name.
addr : int or None
Address of the bank. If ``None``, the implicit next address will be used.
Otherwise, the exact specified address (which must be a multiple of
``2 ** max(alignment, bridge_alignment)``) will be used.
alignment : int or None
Alignment of the bank. If not specified, the bridge alignment is used.
See :class:`amaranth_soc.csr.Multiplexer` for details.
desc : str
Optional. Documentation for the given CSR bank.
Return value
------------
An instance of :class:`CSRBank`.
"""
bank = CSRBank(name=name)
bank.desc = desc
self._csr_banks.append((bank, addr, alignment))
return bank
def event(self, *, mode="level", name=None, src_loc_at=0, desc=None):
"""Request an event source.
Arguments
---------
desc : str
Optional. Documentation for the given event.
See :class:`EventSource` for details.
Return value
------------
An instance of :class:`EventSource`.
"""
if name is None:
name = tracer.get_var_name(depth=2 + src_loc_at).lstrip("_")
event = super().event(mode=mode, name=name, src_loc_at=src_loc_at)
event.desc = desc
return event
class CSRBank(lambdasoc.periph.base.CSRBank):
def csr(self, width, access, *, addr=None, alignment=None, name=None,
src_loc_at=0, desc=None):
"""Request a CSR register.
Parameters
----------
width : int
Width of the register. See :class:`amaranth_soc.csr.Element`.
access : :class:`Access`
Register access mode. See :class:`amaranth_soc.csr.Element`.
addr : int
Address of the register. See :meth:`amaranth_soc.csr.Multiplexer.add`.
alignment : int
Register alignment. See :class:`amaranth_soc.csr.Multiplexer`.
name : str
Name of the register. If ``None`` (default) the name is inferred from the variable
name this register is assigned to.
desc : str
Optional. Documentation for the given register.
Used to generate register documentation automatically.
Return value
------------
An instance of :class:`amaranth_soc.csr.Element`.
"""
if name is None:
name = tracer.get_var_name(depth=2 + src_loc_at).lstrip("_")
elem = super().csr(width, access, addr=addr, alignment=alignment, name=name, src_loc_at=src_loc_at)
elem.desc = desc
return elem
|
import requests, json, datetime
#records the initial game data
def record_data(game, date):
with open('data.json', 'r') as f:
data = json.load(f)
try:
data[date].append(game)
except KeyError:
data.update({date: [game]})
return data
#logs the summoner info and prints it out
def sum_info(sum_ids, endpoint, api_key, data, date):
path_to_sum0 = '/lol/summoner/v3/summoners/'
path_to_league = '/lol/league/v3/positions/by-summoner/'
number = 'team 1'
for team, sums in sum_ids.items():
for i in sums:
summoner = requests.get('%s%s%s?api_key=%s'%(endpoint, path_to_sum0, i, api_key)).json()
league_info = requests.get('%s%s%s?api_key=%s'%(endpoint, path_to_league, i, api_key)).json()
info = summoner.copy()
try:
info.update(league_info[0])
except IndexError:
pass
try:
data[date][len(data[date])-1][number].append(info)
except KeyError:
data[date][len(data[date])-1][number] = [info]
try:
print('%s:\t%s\t%s/%s\t%s %s'%(summoner['name'], summoner['summonerLevel'], league_info[0]['wins'], league_info[0]['losses'], league_info[0]['tier'], league_info[0]['rank']))
except IndexError:
print('%s:\t%s\tno league info'%(summoner['name'], summoner['summonerLevel']))
number = 'team 2'
print('\n')
return data
#saves the final game data to the file
def save(data):
with open('data.json', 'w') as f:
json.dump(data, f)
input()
def main():
with open('config.ini', 'r') as f:
api_key = f.read()
endpoint = 'https://eun1.api.riotgames.com'
path_to_sum = '/lol/summoner/v3/summoners/by-name/'
sum_name = 'BoxofJuice'
summoner = requests.get('%s%s%s?api_key=%s'%(endpoint, path_to_sum, sum_name, api_key)).json()
sum_id = summoner['id'] #gets the summoner id for the game search
path_to_game = '/lol/spectator/v3/active-games/by-summoner/%s'%sum_id
game = requests.get('%s%s?api_key=%s'%(endpoint, path_to_game, api_key)).json()
participants = game['participants'] #get list of players' summoner IDs
sum_ids = {}
for i in participants:
try:
sum_ids[i['teamId']].append(i['summonerId'])
except KeyError:
sum_ids[i['teamId']] = [i['summonerId']]
now = datetime.datetime.now()
date = str(now)[:10]
data = record_data(game, date)
data = sum_info(sum_ids, endpoint, api_key, data, date)
save(data)
main()
|
import sys
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.ticker
import seaborn as sns
plotfile = sys.argv[0].replace('.py', '.pdf')
sns.set_style('ticks')
fig, axes = plt.subplots(2, 3, figsize=(9, 6), sharex=True, sharey=True)
sin_inc_edges = np.linspace(0.0, 1.0, len(axes.flat)+1)
sin_incs = 0.5*(sin_inc_edges[:-1] + sin_inc_edges[1:])
incs_deg = np.degrees(np.arcsin(sin_incs))
ny, nx = 41, 41
Rcs = np.linspace(0.5, 4.5, nx)
R90s = np.linspace(0.5, 4.5, ny)[::-1]
Rc_grid = Rcs[None, :]*np.ones_like(R90s[:, None])
R90_grid = R90s[:, None]*np.ones_like(Rcs[None, :])
Tc_grid = 2*Rc_grid - R90_grid**2
cols = sns.color_palette('magma', n_colors=ny)
def Rc_prime(inc, Tc, Rc):
f = np.sqrt(1.0 + Tc*np.tan(inc)**2)
return Rc * (1 + np.tan(inc)**2) / f / (1.0 + Rc*(f - 1.0) / Tc)
def Tc_prime(inc, Tc):
fsquared = 1.0 + Tc*np.tan(inc)**2
return Tc * (1.0 + np.tan(inc)**2) / fsquared
def R90_prime(inc, Tc, Rc):
return np.sqrt(2*Rc_prime(inc, Tc, Rc) - Tc_prime(inc, Tc))
def qratio(inc, Tc, Rc):
f = np.sqrt(1.0 + Tc*np.tan(inc)**2)
return (1.0 + Rc*(f - 1.0) / Tc)*np.cos(inc)
for ax, inc_deg in zip(axes.flat, incs_deg):
Rc_grid2 = np.linspace(0.0, 10.0, 2000)
R90_T0_grid = np.sqrt(2*Rc_grid2)
R90_T1_grid = np.sqrt(2*Rc_grid2 - 1.0)
R90_T1_grid[~np.isfinite(R90_T1_grid)] = 0.0
ax.fill_between(Rc_grid2, R90_T1_grid, R90_T0_grid, color='k', alpha=0.2)
ax.fill_between(Rc_grid2, R90_T0_grid, color='k', alpha=0.1)
ax.plot(Rc_grid2, R90_T0_grid, c='k', lw=0.5)
ax.axhline(1.0, lw=0.5, alpha=0.5, color='k', zorder=-1)
ax.axvline(1.0, lw=0.5, alpha=0.5, color='k', zorder=-1)
ax.plot([0.0, 10.0], [0.0, 10.0], lw=0.5, alpha=0.5, color='k', zorder=-1)
inc = np.radians(inc_deg)
thetaQ = 0.5*np.pi - inc
Tc_crit = -np.tan(thetaQ)**2
R90_Tcrit_grid = np.sqrt(2*Rc_grid2 - Tc_crit)
ax.fill_between(Rc_grid2, R90_T0_grid, R90_Tcrit_grid, color='g', alpha=0.1)
Rcp = Rc_prime(inc, Tc_grid, Rc_grid).ravel()
R90p = R90_prime(inc, Tc_grid, Rc_grid).ravel()
R0p = qratio(inc, Tc_grid, Rc_grid).ravel()
ax.scatter(Rcp, R90p, c=Tc_grid.ravel(), s=15*R0p,
vmin=Tc_grid.min(), vmax=Tc_grid.max(),
edgecolors='none',
cmap='magma', marker='.', alpha=0.8)
# ax.axhspan(0.0, 10.0, alpha=0.1, facecolor='k', zorder=-1)
# ax.axhline(1.0, ls='--', lw=0.5, c='k', zorder=0)
# ax.axvline(1.0, ls='--', lw=0.5, c='k', zorder=0)
ax.plot([1.0], [1.0], 'x', c='k')
ax.text(2.5, 0.5, rf'$|i| = {inc_deg:.0f}^\circ$',
bbox={'facecolor': 'w', 'alpha': 0.8, 'edgecolor': 'none'})
ax.set_aspect('equal', adjustable='box-forced')
axes[-1, 0].set(
yscale='linear',
xlim=[0.0, 5.1],
ylim=[0.0, 5.1],
xticks=range(5),
yticks=range(5),
xlabel=r"$\Pi'$",
ylabel=r"$\Lambda'$",
)
sns.despine()
fig.tight_layout()
fig.savefig(plotfile, dpi=300)
print(plotfile, end='')
|
# -*- coding: utf-8 -*-
{
'name': "Boxwise Point-of-Sale",
'summary': """
POS for Free Shops
""",
'author': "Humanilog",
'website': "www.humanilog.org",
'category': 'Uncategorized',
'version': '11.0.1.0.0',
'depends': [
'pos',
],
'data': []
}
|
from flask import Blueprint, request, jsonify
from utils.decorators import ErrorHandler
from flask_jwt_extended import (
jwt_required,
jwt_refresh_token_required
)
from .responses import AuthenticationResponse, TokenResponse
from .permissions import admin_required, prohibitted
import logging
from flaskr import jwt
from flask_cors import CORS
authentication = Blueprint('authentication', __name__,
template_folder='templates')
CORS(authentication)
logger = logging.getLogger('app')
# Create your end-points here.
@authentication.route('/auth/token/valid', methods=['GET'])
@jwt_required
@ErrorHandler(logger, authentication)
def is_valid_token():
result, status_code = AuthenticationResponse(request).is_valid()
return result, status_code
@authentication.route('/login', methods=['POST'])
@ErrorHandler(logger, authentication)
def login():
result, status_code = AuthenticationResponse(request).login()
return result, status_code
@authentication.route('/refresh', methods=['POST'])
# @jwt_refresh_token_required
@ErrorHandler(logger, authentication)
@prohibitted
def refresh():
result, status_code = AuthenticationResponse(request).refresh()
return result, status_code
@authentication.route('/logout', methods=['DELETE'])
@jwt_required
@ErrorHandler(logger, authentication)
def logout():
result, status_code = AuthenticationResponse(request).logout()
return result, status_code
# Provide a way for a user to look at their tokens
@authentication.route('/auth/token/list', methods=['GET'])
@jwt_required
@ErrorHandler(logger, authentication)
@admin_required
def get_tokens():
result, status_code = TokenResponse().lists()
return result, status_code
# Provide a way for a user to revoke/unrevoke their tokens
@authentication.route('/logout', methods=['POST'])
@jwt_required
@ErrorHandler(logger, authentication)
@admin_required
def delete_token():
result, status_code = AuthenticationResponse(request).logout()
return result, status_code
@authentication.route('/auth/token/<token_id>', methods=['PUT'])
@jwt_required
@ErrorHandler(logger, authentication)
@prohibitted
def modify_token(token_id):
result, status_code = AuthenticationResponse(request).revoke(token_id)
return result, status_code
|
import sys
from pygments.formatters import HtmlFormatter
class CustomFormatter(HtmlFormatter):
def quote(self, tokensource):
for type, text in tokensource:
yield type, text.replace(' ', '{{{space}}}').replace('`', "{{{backtick}}}")
def format(self, tokensource, outfile):
source = self._format_lines(self.quote(tokensource))
for t, piece in source:
outfile.write(piece.encode('utf-8'))
|
from initial_prediction import *
from objective_function.main import *
import pandas as pd
"""
mark the data without optimization
"""
def mark2csv(filename,output_name,label):
with open("%s"%filename,'r') as f_read:
content = f_read.readlines()
marked_content = []
for each in content:
try:
structure, helices = get_structure("%s"%each.strip('\n'))
normal = predict_normal(structure, helices)
res_list = split_protein_into_slice(structure, normal)
structure_factor, hydrophobic_factor, q_value = objective_function(structure, (normal[1], normal[2]), normal[0])
marked_content.append([each.strip("\n"),structure_factor,hydrophobic_factor,q_value,label])
except:
pass
df = pd.DataFrame(marked_content,columns=['Name','structure_factor','hydrophobic_factor','Q_value','label'])
df.to_csv("%s"%output_name,index=False)
mark2csv("pdbtm_alpha_clean.txt","marked_Tm_without_opti.csv",1)
mark2csv("random_proteins.txt","marked_nonTm_without_opti.csv",-1)
|
import pickle
infile = open('temptable.50-50','rb') #open the file temptable for reading('rb')
(temperatures, grainsizes, radii, Tdict) = pickle.load(infile) #Load in the 'pickled' file. This file returns a tuple (single item consisting of multiple values of potentially varying data types. In this case the first item returned is a list (here assigned to jlist). The second is a dictionary (jtable).
#example 1: Retreive grainsize and radius for every temperature
#for T in temperatures:
# print Tdict[T]
#temperatures is a list of temperatures, grainsizes naturally is a list of grainsizes. Tdict is a dictionary where the key is the temperature and the values are the radius and grainsize used to calculate that temperature. Temperature was calculated by interpolating between flux in and flux out of a particular grain. Emissivity is accounted for in the flux calculations as a function of grain size and Cody's grain properties.
|
import json
from kafka import KafkaConsumer
TOPIC_NAME = 'test-topic'
# To consume latest messages and auto-commit offsets
# You can desable auto-commit with enable_auto_commit=False flag
# consume json messages
consumer = KafkaConsumer(TOPIC_NAME,
group_id='test-group',
bootstrap_servers=['localhost:9092'],
value_deserializer=lambda m: json.loads(m.decode('ascii')))
for message in consumer:
# message value and key are raw bytes -- decode if necessary!
# e.g., for unicode: `message.value.decode('utf-8')`
print ("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition,
message.offset, message.key,
message.value))
|
import json
with open('bus_routes/(1)kingCountyMetro.json') as kc_metro:
kc_data = json.load(kc_metro)
with open('bus_routes/(3)pierceTransit.json') as pierce:
pt_data = json.load(pierce)
with open('bus_routes/(19)intercityTransit.json') as intercity:
it_data = json.load(intercity)
with open('bus_routes/(23)seattleStreetCar.json') as street_car:
sc_data = json.load(street_car)
with open('bus_routes/(29)communityTransit.json') as c_transit:
ct_data = json.load(c_transit)
with open('bus_routes/(40)soundTransit.json') as s_transit:
st_data = json.load(s_transit)
with open('bus_routes/(95)washingtonStateFerries.json') as ferries:
ferry_data = json.load(ferries)
with open('bus_routes/(96)seattleMonorail.json') as sea_mono:
sm_data = json.load(sea_mono)
with open('bus_routes/(97)everettTransit.json') as e_transit:
et_data = json.load(e_transit)
with open('bus_routes/(98)seattleChildrens.json') as childrens:
child_data = json.load(childrens)
with open('bus_routes/(KMD)kingCountyMarine.json') as king_marine:
kmd_data = json.load(king_marine)
with open('bus_routes/rawAgencies.json') as ag_obj:
agencies = json.load(ag_obj)
def hash_routes():
kc_list = kc_data['data']['list']
ct_list = ct_data['data']['list']
et_list = et_data['data']['list']
st_list = st_data['data']['list']
sm_list = sm_data['data']['list']
sc_list = sc_data['data']['list']
pt_list = pt_data['data']['list']
it_list = it_data['data']['list']
kmd_list = kmd_data['data']['list']
ferry_list = ferry_data['data']['list']
child_list = child_data['data']['list']
def add_unique_keys(dictionary, lst, letter):
bus_dict = dictionary
for route in lst:
route_name = route['shortName']
route_id = route['id']
if route["agencyId"] == "95":
route_name = route['longName']
if route_name not in bus_dict:
bus_dict[route_name] = route_id
else:
route_name += letter
bus_dict[route_name] = route_id
print('already in there: ', route_name)
return bus_dict
kc_added = add_unique_keys({}, kc_list, 'kcm')
ct_added = add_unique_keys(kc_added, ct_list, 'N')
et_added = add_unique_keys(ct_added, et_list, 'N')
st_added = add_unique_keys(et_added, st_list, 'st')
sm_added = add_unique_keys(st_added, sm_list, 'sm')
sc_added = add_unique_keys(sm_added, sc_list, 'sc')
pt_added = add_unique_keys(sc_added, pt_list, 'pt')
it_added = add_unique_keys(pt_added, it_list, 'it')
kmd_added = add_unique_keys(it_added, kmd_list, 'kmd')
ferry_added = add_unique_keys(kmd_added, ferry_list, 'ferry')
child_added = add_unique_keys(ferry_added, child_list, 'child')
print(child_added)
def hash_agency():
agency_list = agencies['data']['list']
agency_ref = agencies['data']['references']['agencies']
def combine_agency_name_number(dictionary, lst):
agency_dict = dictionary
for agency in lst:
agency_number = agency['id']
agency_name = agency['name']
agency_dict[agency_number] = {"name": agency_name}
return agency_dict
def combine_agency_coordinates(dictionary, lst):
agency_dict = dictionary
for agency in lst:
agency_number = agency['agencyId']
agency_lat = agency['lat']
agency_lon = agency['lon']
agency_dict[agency_number]["lat"] = agency_lat
agency_dict[agency_number]["lon"] = agency_lon
return agency_dict
names_for_ids = combine_agency_name_number({},agency_ref)
coords_for_ids = combine_agency_coordinates(names_for_ids, agency_list)
print(coords_for_ids)
if __name__ == "__main__":
hash_routes()
# hash_agency()
|
import pdb
import random
import pylab as pl
from scipy.optimize import fmin_bfgs
import numpy as np
from gradDescent import basic_gradient_descent, approximate_gradient_descent
# X is an array of N data points (one dimensional for now), that is, NX1
# Y is a Nx1 column vector of data values
# order is the order of the highest order polynomial in the basis functions
def regressionPlot(X, Y, order, regression_method):
pl.plot(X.T.tolist()[0],Y.T.tolist()[0], 'gs')
# You will need to write the designMatrix and regressionFit function
# constuct the design matrix (Bishop 3.16), the 0th column is just 1s.
phi = designMatrix(X, order)
# compute the weight vector
w = regression_method(X, Y, phi)
# produce a plot of the values of the function
pts = np.array([[p] for p in pl.linspace(min(X), max(X), 100)])
Yp = pl.dot(w.T, designMatrix(pts, order).T)
pl.plot(pts, Yp.tolist()[0])
return w
def designMatrix(X, order):
return np.array([X[:, 0] **i for i in range(order + 1)]).transpose()
def regressionFit(X, Y, phi):
tmp = np.linalg.inv(np.dot(phi.transpose(), phi))
tmp = np.dot(tmp, phi.transpose())
return np.dot(tmp, Y)
def gradientFit(X, Y, phi):
weights, count, score = approximate_gradient_descent(
np.array([[i] for i in phi[0]]),
lambda w: SSE(phi, Y, w),
)
return weights
def SSE(phi, Y, weights):
err = 0
for i in range(len(Y[:, 0])):
appr = sum([weights[j][0] * phi[i][j] for j in range(len(phi[i]))])
err += (Y[:, 0][i] - appr)**2
return err
def SSE_prime(phi, Y, weights):
score = SSE(phi, Y, weights)
dw = np.zeros(weights.shape)
grad = np.zeros(weights.shape)
l = 0.001
for i in range(weights.shape[0]):
dw[i] = l
grad[i] = (SSE(phi, Y, weights + dw) - score) / l
dw[i] = 0
return grad
def getData(name):
data = pl.loadtxt(name)
# Returns column matrices
X = data[0:1].T
Y = data[1:2].T
return X, Y
def bishopCurveData():
# y = sin(2 pi x) + N(0,0.3),
return getData('curvefitting.txt')
def regressAData():
return getData('regressA_train.txt')
def regressBData():
return getData('regressB_train.txt')
def validateData():
return getData('regress_validate.txt')
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from typing import ClassVar
from pants.bsp.protocol import BSPHandlerMapping
from pants.bsp.spec.lifecycle import (
BuildServerCapabilities,
CompileProvider,
DebugProvider,
InitializeBuildParams,
InitializeBuildResult,
RunProvider,
TestProvider,
)
from pants.engine.rules import collect_rules, rule
from pants.engine.unions import UnionMembership, UnionRule, union
from pants.version import VERSION
# Version of BSP supported by Pants.
BSP_VERSION = "2.0.0"
@union
class BSPLanguageSupport:
"""Union exposed by language backends to inform BSP core rules of capabilities to advertise to
clients."""
language_id: ClassVar[str]
can_compile: bool = False
can_test: bool = False
can_run: bool = False
can_debug: bool = False
can_provide_resources: bool = False
# -----------------------------------------------------------------------------------------------
# Initialize Build Request
# See https://build-server-protocol.github.io/docs/specification.html#initialize-build-request
# -----------------------------------------------------------------------------------------------
class InitializeBuildHandlerMapping(BSPHandlerMapping):
method_name = "build/initialize"
request_type = InitializeBuildParams
response_type = InitializeBuildResult
@rule
async def bsp_build_initialize(
_request: InitializeBuildParams, union_membership: UnionMembership
) -> InitializeBuildResult:
compile_provider_language_ids = []
test_provider_language_ids = []
run_provider_language_ids = []
debug_provider_language_ids = []
resources_provider = False
language_support_impls = union_membership.get(BSPLanguageSupport)
for lang in language_support_impls:
if lang.can_compile:
compile_provider_language_ids.append(lang.language_id)
if lang.can_test:
test_provider_language_ids.append(lang.language_id)
if lang.can_run:
run_provider_language_ids.append(lang.language_id)
if lang.can_debug:
debug_provider_language_ids.append(lang.language_id)
if lang.can_provide_resources:
resources_provider = True
return InitializeBuildResult(
display_name="Pants",
version=VERSION,
bsp_version=BSP_VERSION, # TODO: replace with an actual BSP version
capabilities=BuildServerCapabilities(
compile_provider=CompileProvider(
language_ids=tuple(sorted(compile_provider_language_ids))
),
test_provider=TestProvider(language_ids=tuple(sorted(test_provider_language_ids))),
run_provider=RunProvider(language_ids=tuple(sorted(run_provider_language_ids))),
debug_provider=DebugProvider(language_ids=tuple(sorted(debug_provider_language_ids))),
inverse_sources_provider=None,
dependency_sources_provider=True,
dependency_modules_provider=True,
resources_provider=resources_provider,
can_reload=None,
build_target_changed_provider=None,
),
data=None,
)
def rules():
return (
*collect_rules(),
UnionRule(BSPHandlerMapping, InitializeBuildHandlerMapping),
)
|
# ---------------------------------------------------------------------------
# extract_basin_prism_values.py
# Created on: 2014-07-22 18:30:25.00000 (generated by ArcGIS/ModelBuilder)
# Description: extract prism data from raster using basin shapefiles as mask
# and output a csv file for each basin
# UPDATED (8/4/2014): calculate basin mean temperature or precipitation
# Created and Modified by Ryan Spies (ryan.spies@amec.com)
# ---------------------------------------------------------------------------
print 'Importing modules...'
# Import modules
import arcpy
import os
import csv
os.chdir("../..")
maindir = os.getcwd()
################### User Input ###########################################################
RFC = 'MBRFC_FY2015'
variable = 'recharge' # choices: 'recharge' or 'trans'
basins_folder = maindir + '\\' + RFC + '\\Shapefiles\\calb_basins\\'
# if you only want to run specific basins -> list them below
# otherwise set it equal to empty list (basins_overwrite = [])
basins_overwrite = []
output_dir = maindir + '\\' + RFC + '\\GW_' + variable + '\\' # this must contain a folder for each basin (eg. FONN7)
################# End User Input ##########################################################
if not os.path.exists('C:\\NWS\\python\\temp_output\\'):
print "Missing directory: 'C:\\NWS\\python\\temp_output\\' -> please create"
raw_input("Press enter to continue processing...")
if not os.path.exists(output_dir):
print "Missing directory: " + output_dir + " -> please create"
raw_input("Press enter to continue processing...")
# Process: output csv file
print 'Creating '+ RFC + '_recharge.csv file...'
recharge_csv = open(output_dir + RFC + '_' + variable + '.csv', 'ab')
csvFile = csv.writer(recharge_csv) #output csv
# location of PRISM Raster (CONUS)
if variable == 'recharge':
Recharge_Dataset = 'D:\\GIS Library\\rech48grd\\rech48grd'
csvFile.writerow(['Basin','Mean Annual Recharge (mm)', 'Mean Annual Recharge (in)'])
if variable == 'trans':
Recharge_Dataset = maindir + '\\SERFC\\TransmissivityMap_data_USGS\\transidw'
csvFile.writerow(['Basin','log base 10 ft2 per day'])
print Recharge_Dataset
# Check out any necessary licenses
arcpy.CheckOutExtension("spatial")
arcpy.env.overwriteOutput = True
# Set Geoprocessing environments
#arcpy.env.scratchWorkspace = "P:\\NWS\\GIS\\Models\\10_0_tools\\Model_Output.gdb" # temporary file storage directory
#arcpy.env.parallelProcessingFactor = "50"
print 'ok so far...'
#################################################################################
# find all basins in RFC or only run the specified basin list
# find all basins in RFC task or only run the specified basin overwrite list
basin_files = os.listdir(basins_folder) # list all basin shapefiles in the above specified directory
if len(basins_overwrite) != 0:
basin_files = basins_overwrite # use the basins_overright variable to run only specified basins instead of all RFC basins
basins = []
check_dir = os.listdir(output_dir) # list all folders in output_dir
for each in basin_files:
if each.split('.')[0] not in basins:
basins.append(each.split('.')[0])
print basins
print 'Identified ' + str(len(basins)) + ' basins in ' + RFC + ' input directory...'
all_data = {}
# loop through basins
for basin in basins:
## Script arguments
Basin_Boundary = basins_folder + '\\' + basin + '.shp'
# location of PRISM Raster (CONUS)
#Recharge_Dataset = 'Q:\\GISLibrary\\rech48grd\\rech48grd'
print basin
#Out_text = output_dir + basin + '_prism' + '.csv'
## Local variables:
Basin_Raster = 'C:\\NWS\\python\\temp_output\\' + basin
## Process: Extract by Mask
print 'Extracting by mask...'
arcpy.gp.ExtractByMask_sa(Recharge_Dataset, Basin_Boundary, Basin_Raster)
## Process: Calculate mean of raster
print 'Raster to point...'
result = arcpy.GetRasterProperties_management(Basin_Raster, "MEAN")
print result
all_data[basin]=str(result)
if variable == 'recharge':
csvFile.writerow([basin,all_data[basin],float(all_data[basin])/25.4])
if variable == 'trans':
csvFile.writerow([basin,all_data[basin]])
# Process: output csv file
#print 'Creating '+ basin + '_recharge.csv file...'
#recharge_csv = open(output_dir + RFC + '_recharge' + '.csv', 'wb')
#csvFile = csv.writer(recharge_csv) #output csv
#csvFile.writerow(['Basin','Mean Annual Recharge (mm)', 'Mean Annual Recharge (in)'])
#for each in all_data:
# csvFile.writerow([each,all_data[each],all_data[each]/25.4])
recharge_csv.close()
print 'Completed grid extraction!'
|
monty_python = "Monty Python"
print(monty_python)
print(monty_python.lower())
print(monty_python.upper())
|
# -*- coding: utf-8 -*-
import uuid, datetime,psycopg2,inject
from model.systems.offices.offices import Offices
from model.systems.assistance.date import Date
class Issue:
date = inject.attr(Date)
offices = inject.attr(Offices)
# -----------------------------------------------------------------------------------
# ---------------- VISIBILIDAD DE LOS PEDIDOS QUE PUEDO VER -------------------------
# -----------------------------------------------------------------------------------
'''
issues.visibility_group_owner {
id VARCHAR NOT NULL PRIMARY KEY,
request_id VARCHAR NOT NULL REFERENCES issues.request(id),
office_id VARCHAR NOT NULL REFERENCES offices.offices (id),
created TIMESTAMPTZ NOT NULL default now(),
tree boolean default true
}
'''
def _convertVisibilityOfficeToDict(self,visibility,type='OFFICE'):
return {'id':visibility[0],'issue_id':visibility[1],'office_id':visibility[2],'created':visibility[3],'tree':visibility[4],'type':type}
def getVisibilitiesOfficesView(self,con,issue_id):
cur = con.cursor()
cur.execute('select id,request_id,office_id,created,tree from issues.visibility_group_owner where request_id = %s',(issue_id,))
if cur.rowcount <= 0:
return []
visibilities = []
for v in cur:
visibilities.append(self._convertVisibilityOfficeToDict(v))
return visibilities
'''
obtiene los issues_id que puede ver la oficina office_id
'''
def findIssuesByOffice_View(self,con,office_id):
ids = []
cur = con.cursor()
cur.execute('select DISTINCT request_id from issues.visibility_group_owner where office_id = %s',(office_id,))
for i in cur:
ids.append(i[0])
return ids
def removeVisibilityOffice_View(self,con,id):
if id is None:
return
cur = con.cursor()
cur.execute('delete from issues.visibility_group_owner where id = %s',(id,))
def removeAllVisibilityOffice_View(self,con,issue_id):
if id is None:
return
cur = con.cursor()
cur.execute('delete from issues.visibility_group_owner where request_id = %s',(issue_id,))
def createVisibilityOffice_View(self,con,issue_id,office_id,tree=True,created=None):
if issue_id is None or office_id is None:
return None
cur = con.cursor()
id = str(uuid.uuid4())
if created is None:
created = self.date.now()
if tree is None:
tree = True
createdUtc = self.date.awareToUtc(created)
params = (id,issue_id,office_id,created,tree)
cur.execute('set timezone to %s',('UTC',))
cur.execute('insert into issues.visibility_group_owner (id,request_id,office_id,created,tree) values(%s,%s,%s,%s,%s)',params)
return id
# -----------------------------------------------------------------------------------
# -------------------------- ESTADO DEL PEDIDO --------------------------------------
# -----------------------------------------------------------------------------------
def _convertStateToDict(self,state):
return {'created':state[0],'state':state[1],'creator':state[2]}
'''
Obtiene el ultimo estado del pedido
'''
def getState(self,con,issue_id):
cur = con.cursor()
cur.execute('select created,state,user_id from issues.state where request_id = %s order by created desc limit 1',(issue_id,))
if cur.rowcount <= 0:
return None
return self._convertStateToDict(cur.fetchone())
'''
Crea un nuevo estado, por defecto lo pone como ...
'''
def updateState(self,con,issue_id,creator_id,created,state='PENDING'):
if issue_id is None or creator_id is None:
return
cur = con.cursor()
if created is None:
created = self.date.now()
createdUtc = self.date.awareToUtc(created)
params = (state,createdUtc,creator_id,issue_id)
cur.execute('set timezone to %s',('UTC',))
cur.execute('insert into issues.state (state,created,user_id,request_id) values(%s,%s,%s,%s)',params)
# -----------------------------------------------------------------------------------
# --------------------------------- PEDIDO ------------------------------------------
# -----------------------------------------------------------------------------------
def _convertToDict(self,issue,state,visibilities):
return {'id':issue[0],'created':issue[1],
'request':issue[2],'creator':issue[3],
'parent_id':issue[4],'assigned_id':issue[5],
'priority':issue[6],'office_id':issue[7],
'visibilities':visibilities,'state':state['state']}
def _getParamsPersistIssue(self, issue, id, userId):
return (issue['created'],
issue['request'] if 'request' in issue and issue['request'] is not None else '',
userId,
issue['parent_id'] if 'parent_id' in issue else None,
issue['assigned_id'] if 'assigned_id' in issue else None,
issue['priority'] if 'priority' in issue and issue['priority'] is not None else 0,
issue['office_id'],
id
)
'''
Crea un nuevo issue
visibilities = [{'type':OFFICE|USER},'office_id':id office o 'user_id' si es USER,'tree':True]
'''
def create(self,con,issue,userId,visibilities,state='PENDING'):
if issue is None or userId is None or visibilities is None or 'office_id' not in issue:
return None
id = str(uuid.uuid4())
if 'created' in issue and issue['created'] is not None:
issue['created'] = self.date.parse(issue['created'])
else:
issue['created'] = self.date.now()
params = self._getParamsPersistIssue(issue,id,userId)
cur = con.cursor()
cur.execute('set timezone to %s',('UTC',))
cur.execute('insert into issues.request (created,request,requestor_id,related_request_id,assigned_id,priority,office_id,id) values(%s,%s,%s,%s,%s,%s,%s,%s)',params)
self.updateState(con,id,userId,issue['created'],state)
for v in visibilities:
if v['type'] == 'OFFICE':
self.createVisibilityOffice_View(con,id,v['office_id'],v['tree'])
return id
'''
Actualiza el issuer
'''
def updateData(self,con,issue,userId):
if issue is None or userId is None or 'id' not in issue or issue['id'] is None:
return None
params = (userId,
issue['parent_id'] if 'parent_id' in issue else None,
issue['assigned_id'] if 'assigned_id' in issue else None,
issue['priority'] if 'priority' in issue and issue['priority'] is not None else 0,
issue['office_id'],
issue['id']
)
cur = con.cursor()
cur.execute('set timezone to %s',('UTC',))
cur.execute('update issues.request set requestor_id = %s, related_request_id = %s, assigned_id = %s, priority = %s, office_id = %s where id = %s',(params))
# actualizo el estado
if 'state' in issue and issue['state'] is not None:
state = issue['state']
self.updateState(con,issue['id'],userId,None,state)
# elimino la visibilidad que ya posee
self.removeAllVisibilityOffice_View(con,issue['id'])
# actualizo la visibilidad
for v in issue['visibilities']:
if v['type'] == 'OFFICE':
self.createVisibilityOffice_View(con,issue['id'],v['office_id'],v['tree'])
return issue['id']
'''
Elimina el issue y sus hijos
'''
def delete(self,con,id):
if id is None:
return None
childrens = self._getChildrens(con,id)
if len(childrens) > 0:
for child in childrens:
self.delete(con,child['id'])
cur = con.cursor()
# elimino los estados
cur.execute('delete from issues.state where request_id = %s',(id,))
# elimino la visibilidad
self.removeAllVisibilityOffice_View(con,id)
# elimino los issues
cur.execute('delete from issues.request where id = %s',(id,))
return True
'''
Obtiene los hijos
'''
def _getChildrens(self,con,id):
if id is None:
return []
pids = []
pids.append(id)
childrens = []
cur = con.cursor()
cur.execute('select id,created,request,requestor_id,related_request_id,assigned_id,priority,office_id from issues.request where related_request_id = %s',(id,))
if cur.rowcount <= 0:
return []
for cIss in cur:
cId = cIss[0]
state = self.getState(con,cId)
visibilities = self.getVisibilitiesOfficesView(con,cId)
obj = self._convertToDict(cIss,state,visibilities)
obj['childrens'] = self._getChildrens(con,cId)
childrens.append(obj)
return childrens
def _includeIssue(self,issue_id,issues):
for iss in issues:
if iss['id'] == issue_id:
return True
if self._includeIssue(issue_id,iss['childrens']):
return True
return False
'''
Retorna todas las issues solicitadas por el usuario
'''
def getIssues(self,con,userId):
if userId is None:
return None
cur = con.cursor()
# ---- issues por visibilidad de oficina -----
offices = self.offices.getOfficesByUser(con,userId,True)
issues = []
for o in offices:
aux = self.findIssuesByOffice_View(con,o['id'])
for i in aux:
issues.append(self.findIssue(con,i))
issues.extend(self._getIssuesByParentsOffice(con,o['parent']))
issuesRet = []
while len(issues) > 0:
issue = issues[0]
issues.remove(issue)
if not self._includeIssue(issue['id'],issues) and not self._includeIssue(issue['id'],issuesRet):
issuesRet.append(issue)
# self.filterIssuesByVisibilityOffices(issues,offices)
return issuesRet
# obtiene los issues de la oficina y de todos los padres que tengan el tree como true
def _getIssuesByParentsOffice(self,con,officeId):
issues = []
if officeId is None:
return issues
office = self.offices.findOffice(con,officeId)
cur = con.cursor()
cur.execute('select DISTINCT request_id from issues.visibility_group_owner where office_id = %s and tree = true',(officeId,))
for i in cur:
issue = self.findIssue(con,i[0])
issue['readOnly'] = True
issues.append(issue)
issues.extend(self._getIssuesByParentsOffice(con,office['parent']))
return issues
def filterIssuesByVisibilityOffices(self,issues,offices):
removeIssues = []
for issue in issues:
for v in issue['visibilities']:
if self._includeOffices(v['office_id'],offices):
break
else:
removeIssues.append(issue)
continue
self.filterIssuesByVisibilityOffices(issue['childrens'],offices)
for i in removeIssues:
issues.remove(i)
def _includeOffices(self,id,offices):
for o in offices:
if o['id'] == id:
return True
return False
def findIssue(self,con,id):
cur = con.cursor()
cur.execute('select id,created,request,requestor_id,related_request_id,assigned_id,priority,office_id from issues.request where id = %s',(id,))
issue = cur.fetchone()
if issue:
state = self.getState(con,issue[0])
visibilities = self.getVisibilitiesOfficesView(con,issue[0])
obj = self._convertToDict(issue,state,visibilities)
childrens = self._getChildrens(con,issue[0])
obj['childrens'] = childrens
return obj
else:
return None
def _include(self,issue,issue2):
if issue['id'] == issue2['id']:
return True
for iss in issue2['childrens']:
if self._include(issue,iss):
return True
return False
'''
Retorna todas las issues asignadas al usuario
'''
def getIssuesAdmin(self,con,userId):
if userId is None:
return None
cur = con.cursor()
# ---- issues asignadas a la oficina del usuario -----
offices = self.offices.getOfficesByUser(con,userId,True)
issues = []
for o in offices:
iss = self.findIssuesByOffice(con,o['id'])
issues.extend(iss)
issuesRet = []
while len(issues) > 0:
issue = issues[0]
issues.remove(issue)
if not self._includeIssue(issue['id'],issues) and not self._includeIssue(issue['id'],issuesRet):
issuesRet.append(issue)
# self.filterIssuesByVisibilityOffices(issues,offices)
return issuesRet
def findIssuesByOffice(self,con,officeId):
if officeId is None:
return []
cur = con.cursor()
cur.execute('select id,created,request,requestor_id,related_request_id,assigned_id,priority,office_id from issues.request where office_id = %s',(officeId,))
if cur.rowcount <= 0:
return []
issues = []
for issue in cur:
state = self.getState(con,issue[0])
visibilities = self.getVisibilitiesOfficesView(con,issue[0])
obj = self._convertToDict(issue,state,visibilities)
childrens = self._getChildrens(con,issue[0])
obj['childrens'] = childrens
issues.append(obj)
return issues
|
import pandas as pd
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Dropout, BatchNormalization
from sklearn.metrics import accuracy_score
from keras.utils import to_categorical
import numpy as np
#붓꽃데이터 읽어들이기
colnames = ['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth', 'Name']
iris_data = pd.read_csv("./data/iris.csv", names= colnames, encoding='utf-8')
#붓꽃 데이터를 레이블과 입력 데이터로 뷴리하기
y = iris_data.loc[:, "Name"]
x = iris_data.loc[:, ["SepalLength", "SepalWidth", "PetalLength", "PetalWidth"]]
# string one hot encoding
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
encoder.fit(y)
y = encoder.transform(y)
y = to_categorical(y).astype(int)
#학습 전용과 테스트 전용 분리하기
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, train_size = 0.8, shuffle=True)
#학습하기
model = Sequential()
model.add(Dense(1000, input_dim = 4))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'] )
model.fit(x_train, y_train, epochs=50, batch_size=1)
#평가하기
y_pred = model.predict(x_test)
y_pred = np.argmax(y_pred, axis=1)
print(y_pred)
y_pred = encoder.inverse_transform(y_pred)
print(y_pred)
acc = model.evaluate(x_test, y_test, batch_size=1)
print("정답률: ",acc[1] )
|
from . import admin, image_upload
|
# Copyright (c) 2017 UFCG-LSD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from monitor import exceptions as ex
from monitor.service import api
import kubejobs_cost
import kubejobs
class MonitorBuilder:
def __init__(self):
pass
def get_monitor(self, plugin, app_id, plugin_info):
executor = None
if plugin == "kubejobs":
executor = kubejobs.PLUGIN(
app_id, plugin_info, retries=api.retries)
elif plugin == "kubejobs_cost":
executor = kubejobs_cost.PLUGIN(
app_id, plugin_info)
else:
raise ex.BadRequestException()
return executor
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
""" Regular packages
Import Base Store / BaseStoreMetaData
"""
from .base import BaseStores
__all__ = [
'BaseStores',
]
|
class Solution(object):
def generateMatrix(self, n):
A = [[0] * n for _ in range(n)]
i, j, di, dj = 0, 0, 0, 1
for k in xrange(n*n):
A[i][j] = k + 1
if A[(i+di)%n][(j+dj)%n]:
di, dj = dj, -di
i += di
j += dj
return A
class Solution(object):
def generateMatrix(self, n):
try:
result = [[0] * n for i in range(n)]
if n:
for rep in range((n + 1) // 2):
for lr in range(rep, n - rep):
result[rep][lr] = result[rep][lr-1] + 1
for ub in range(rep+1, n-rep):
result[ub][-1 - rep]= result[ub-1][-1 - rep] + 1
for rl in range(rep+2, n-rep+1):
result[-1 - rep][-rl] = result[-1 - rep][1 - rl] + 1
for bu in range(rep+2, n-rep):
result[-bu][rep] = result[1 - bu][rep] + 1
return result
except:
return []
|
from mongo_db import MongoDB
def main():
# connect to mongodb
mongodb = MongoDB("iamr0b0tx", "DJ0Qb8XqulWFUXQK", "Cluster0", "business")
# mongodb = MongoDB("ds", "gg", "Cluster0", "business")
# create businesses
print(mongodb.create({'name': 'Kitchen', 'rating': 1, 'cuisine': 'Pizza'}))
# read business
print(mongodb.read({'rating': 1}))
# update business
print(mongodb.update({'rating': 1}, {'rating': 5}))
# delete business
print(mongodb.delete({'rating': 5}))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
"""unhang_console_by_Threads_SIGALRM.py
Author: Joseph Lin
Email : joseph.lin@aliyun.com
Social:
https://github.com/RDpWTeHM
https://blog.csdn.net/qq_29757283
Note:
signal.alarm(integer) ==> SIGALRM
alarm may can't not work well with sleep in some python version.
"""
# import sys
import os
# import atexit
import signal
# from random import randint
from time import sleep
import threading
request_pages_result = {}
prog_pid = os.getpid()
if __debug__:
print("[Debug] prog_pid <- os.getpid(): {}".format(prog_pid))
def sigalrm_handler(signo, frame):
''' time up'''
global request_pages_result
global prog_pid
print("\nOooooops time up!")
print("[sigalrm_handler] request pages result: \n", request_pages_result)
os.kill(prog_pid, signal.SIGINT)
def sigint_handler(signo, frame):
''' end program'''
global request_pages_result
print("\nGoodbay Cruel World.....")
print("[sigint_handler] before exit program, show you the result:\n", request_pages_result)
raise SystemExit(0)
def prog_init():
signal.signal(signal.SIGALRM, sigalrm_handler)
signal.signal(signal.SIGINT, sigint_handler)
def request_pages():
global request_pages_result
sites = ("https://www.baidu.com",
"https://www.bing.com",
"https://www.yahoo.com",
"http://www.so.com")
import requests
try:
for site in sites:
r = requests.get(site)
if r.status_code != 200:
request_pages_result[site] = False
else:
request_pages_result[site] = True
except Exception as e:
print("[Error] request_pages(): ", e, file=sys.stderr)
def main():
global request_pages_result
global prog_pid
prog_init()
t = threading.Thread(target=request_pages)
t.setDaemon(True)
t.start()
print("request pages result: \n", request_pages_result)
print("start the function which will cost lots of time.")
# 5 second later, run the register function which will show the result.
signal.alarm(5)
# main thread keep doing things
i = 0
circle = ('|', '/', '-', '\\')
while True:
sleep(0.2)
print("\rmain thread processing... {}".format(circle[i]), end='')
i = 0 if i == 3 else i + 1
if __name__ == '__main__':
main()
|
'''
Class TextVisualization is defined in mesa/visualization
TextVisualization: Class meant to wrap around a Model object and render it in some way using Elements, in turn, renders a particular piece of information as text.
TextData: Uses getattr to get the value of a particular property of a model and prints it, along with its name.
TextGrid: Prints a grid, assuming that the value of each cell maps to exactly one ASCII character via a converter method. This (as opposed to a dictionary) is used so as to allow the method to access Agent internals, as well to potentially render a cell based on several values (e.g. an Agent grid and a Patch value grid).
'''
from mesa.visualization.TextVisualization import TextData, TextGrid, TextVisualization
from model import Schelling
class SchellingTextVisualization(TextVisualization):
'''
ASCII visualization for schelling model
'''
def __init__(self, model):
'''
Create new Schelling ASCII visualization
'''
super().__init__(model)
grid_viz = TextGrid(self.model.grid, self.print_ascii_agent)
happy_viz = TextData(self.model, 'happy')
self.elements = [grid_viz, happy_viz]
@staticmethod
def print_ascii_agent(a):
'''
Minority agents are X, Majority are 0.
'''
if a.type == 0:
return '0'
if a.type == 1:
return 'X'
if __name__ == '__main__':
model_params ={
'height': 20,
'width': 20,
# Agent density, from 0.8 to 1.0
'density': 0.8,
# Fraction minority, from 0.2 to 1.0
'minority_pc': 0.2,
# homophily: 3
'homophily': 3,
}
model = Schelling(**model_params)
viz = SchellingTextVisualization(model)
for i in range(10):
print('Step', i)
viz.step()
print('---')
|
# ****************************************************************** #
# ************************* Byte of Python ************************* #
# ****************************************************************** #
########################
# using_sys
########################
# import sys
# print("The command line arguments are:")
# for i in sys.argv:
# print(i)
# print("\n\nThe PYTHONPATH is", sys.path, "\n")
########################
# using_name
########################
# if __name__ == "__main__":
# print("This program is being run by itself")
# else:
# print("I am being imported from another module")
########################
# mymodule
########################
# def sayhi():
# print("Hi, this is mymodule speaking.")
# __version__ = "0.1"
# ****************************************************************** #
# ********************* Programming in python ********************** #
# ****************************************************************** #
import random
x = random.randint(1, 6)
y = random.choice(['apple', 'banana', 'cherry', 'durian'])
print(x)
print(y)
|
import unittest
from katas.kyu_6.bit_counting import countBits
class CountBitsTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(countBits(0), 0)
def test_equals_2(self):
self.assertEqual(countBits(4), 1)
def test_equals_3(self):
self.assertEqual(countBits(7), 3)
def test_equals_4(self):
self.assertEqual(countBits(9), 2)
def test_equals_5(self):
self.assertEqual(countBits(10), 2)
|
'''
Identity Service tokens
'''
from . import credentials
def get():
""" Retrieve a keystone token """
return credentials.keystone().auth_token
|
expDir = '../exp'
nThreads = 4
|
# Import the random package to radomly select individuals
import random
# Import the superclass (also called base class), which is an abstract class,
# to implement the subclass ThresholdSelection
from SelectionOperator import *
# The subclass that inherits of SelectionOperator
class RouletteWheelSelection(SelectionOperator):
# Constructor
def __init__(self):
super().__init__("Roulette wheel selection");
self.sum_fitness = 0.0
self.min_fitness = float('inf')
self.max_fitness = -float('inf')
# Get a SystemRandom instance out of random package
self.system_random = random.SystemRandom();
# Sum the fitness of all the individuals (run once per generation before any selection is done)
# anIndividualSet: The set of individual to choose from
def preProcess(self, anIndividualSet):
# Compute fitness sumation
self.sum_fitness = 0.0
self.min_fitness = float('inf')
self.max_fitness = -float('inf')
# Normalise the fitness values between 0 and 1 in case some are negative.
for individual in anIndividualSet:
self.min_fitness = min(self.min_fitness, individual.getObjective());
self.max_fitness = max(self.max_fitness, individual.getObjective());
fitness_range = self.max_fitness - self.min_fitness;
for individual in anIndividualSet:
self.sum_fitness += (individual.getObjective() - self.min_fitness) / fitness_range;
# Select an idividual
# anIndividualSet: The set of individual to choose from
# aFlag == True for selecting good individuals,
# aFlag == False for selecting bad individuals,
def __select__(self, anIndividualSet, aFlag):
if aFlag == False:
raise NotImplementedError("Selecting a bad individual is not implemented in RouletteWheelSelection!")
# Random number between(0 - self.sum_fitness)
random_number = self.system_random.uniform(0.0, self.sum_fitness)
# Select the individual depending on the probability
accumulator = 0.0;
range = self.max_fitness - self.min_fitness;
for individual in anIndividualSet:
accumulator += (individual.getObjective() - self.min_fitness) / range;
if accumulator >= random_number:
return anIndividualSet.index(individual)
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Forseti Database Objects. """
from sqlalchemy import create_engine
from sqlalchemy import Column
from sqlalchemy import String
from sqlalchemy import Text
from sqlalchemy import BigInteger
from sqlalchemy import Date
from sqlalchemy import desc
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql.elements import literal_column
# TODO: The next editor must remove this disable and correct issues.
# pylint: disable=missing-type-doc,missing-return-type-doc,missing-return-doc
# pylint: disable=missing-param-doc,missing-yield-doc,missing-yield-type-doc
BASE = declarative_base()
TABLE_CACHE = {}
PER_YIELD = 1024
# pylint: disable=too-many-locals
class SnapshotState(object):
"""Possible states for Forseti snapshots."""
SUCCESS = "SUCCESS"
RUNNING = "RUNNING"
FAILURE = "FAILURE"
PARTIAL_SUCCESS = "PARTIAL_SUCCESS"
TIMEOUT = "TIMEOUT"
class Snapshot(BASE):
"""Represents a Forseti snapshot row."""
__tablename__ = 'snapshot_cycles'
id = Column(BigInteger(), primary_key=True)
start_time = Column(Date)
complete_time = Column(Date)
status = Column(String)
schema_version = Column(String(255))
cycle_timestamp = Column(String(255))
def __repr__(self):
return """<Snapshot(id='{}', version='{}', timestamp='{}')>""".format(
self.id, self.schema_version, self.cycle_timestamp)
def create_table_names(timestamp, schema_version):
"""Forseti tables are namespaced via snapshot timestamp.
This function generates the appropriate classes to
abstract the access to a single snapshot."""
schema_number = float(schema_version)
if timestamp in TABLE_CACHE:
return TABLE_CACHE[timestamp]
class Project(BASE):
"""Represtents a GCP project row under the organization."""
__tablename__ = 'projects_%s' % timestamp
id = Column(BigInteger(), primary_key=True)
project_number = Column(BigInteger())
project_id = Column(String(255))
project_name = Column(String(255))
lifecycle_state = Column(String(255))
parent_type = Column(String(255))
parent_id = Column(String(255))
raw_project = Column(Text())
create_time = Column(Date)
def __repr__(self):
"""String representation."""
return """<Project(id='{}', project_name='{}')>""".format(
self.id, self.project_name)
class ProjectPolicy(BASE):
"""Represents a GCP project policy row under the organization."""
__tablename__ = 'raw_project_iam_policies_%s' % timestamp
id = Column(BigInteger(), primary_key=True)
project_number = Column(BigInteger())
iam_policy = Column(Text)
def __repr__(self):
"""String representation."""
return """<Policy(id='{}', type='{}', name='{}'>""".format(
self.id, 'project', self.project_number)
def get_resource_reference(self):
"""Return a reference to the resource in the form (type, id)."""
return 'project', self.project_number
def get_policy(self):
"""Return the corresponding IAM policy."""
return self.iam_policy
class OrganizationPolicy(BASE):
"""Represents a GCP organization policy row."""
__tablename__ = 'raw_org_iam_policies_%s' % timestamp
id = Column(BigInteger(), primary_key=True)
org_id = Column(BigInteger())
iam_policy = Column(Text)
def __repr__(self):
"""String representation."""
return """<Policy(id='{}', type='{}', name='{}'>""".format(
self.id, "organization", self.org_id)
def get_resource_reference(self):
"""Return a reference to the resource in the form (type, id)"""
return 'organization', self.org_id
def get_policy(self):
"""Return the corresponding IAM policy."""
return self.iam_policy
class Bucket(BASE):
"""Represents a GCS bucket item."""
__tablename__ = 'buckets_%s' % timestamp
id = Column(BigInteger(), primary_key=True)
project_number = Column(BigInteger())
bucket_id = Column(String(255))
bucket_name = Column(String(255))
bucket_kind = Column(String(255))
bucket_storage_class = Column(String(255))
bucket_location = Column(String(255))
bucket_create_time = Column(Date)
bucket_update_time = Column(Date)
bucket_selflink = Column(String(255))
bucket_lifecycle_raw = Column(Text)
raw_bucket = Column(Text)
def __repr__(self):
"""String representation."""
return """<Bucket(id='{}', name='{}', location='{}')>""".format(
self.bucket_id, self.bucket_name, self.bucket_location)
class Organization(BASE):
"""Represents a GCP organization."""
__tablename__ = 'organizations_%s' % timestamp
org_id = Column(BigInteger(), primary_key=True)
name = Column(String(255))
display_name = Column(String(255))
lifecycle_state = Column(String(255))
raw_org = Column(Text)
creation_time = Column(Date)
def __repr__(self):
"""String representation."""
fmt_s = "<Organization(id='{}', name='{}', display_name='{}')>"
return fmt_s.format(
self.org_id,
self.name,
self.display_name)
class GroupMembers(BASE):
"""Represents Gsuite group membership."""
__tablename__ = 'group_members_%s' % timestamp
id = Column(BigInteger(), primary_key=True)
group_id = Column(String(32))
member_role = Column(String(128))
member_type = Column(String(128))
member_status = Column(String(128))
member_id = Column(String(128))
member_email = Column(String(128))
raw_member = Column(Text())
def __repr__(self):
"""String representation."""
fmt_s = "<GroupMember(gid='{}', role='{}', email='{}')>"
return fmt_s.format(
self.group_id,
self.member_role,
self.member_email,
self.member_status)
class Groups(BASE):
"""Represents a Gsuite group."""
__tablename__ = 'groups_%s' % timestamp
id = Column(BigInteger(), primary_key=True)
group_id = Column(String(127))
group_email = Column(String(127))
group_kind = Column(String(127))
direct_member_count = Column(BigInteger())
raw_group = Column(Text())
def __repr__(self):
"""String representation."""
fmt_s = "<Group(gid='{}', email='{}', kind='{}', members='{}')>"
return fmt_s.format(
self.group_id,
self.group_email,
self.group_kind,
self.direct_member_count)
class Folders(BASE):
"""Represents a folder."""
__tablename__ = 'folders_%s' % timestamp
folder_id = Column(BigInteger(), primary_key=True)
name = Column(String(255))
display_name = Column(String(255))
lifecycle_state = Column(String(255))
parent_type = Column(String(255))
parent_id = Column(Text())
def __repr__(self):
"""String representation."""
fmt_s = "<Folder(fid='{}', name='{}', display_name='{}')>"
return fmt_s.format(
self.folder_id,
self.name,
self.display_name)
if schema_number >= 2.0:
class FolderPolicy(BASE):
"""Represents a GCP folder policy row under the organization."""
__tablename__ = 'raw_folder_iam_policies_%s' % timestamp
id = Column(BigInteger(), primary_key=True)
folder_id = Column(BigInteger())
iam_policy = Column(Text)
def __repr__(self):
"""String representation."""
return """<Policy(id='{}', type='{}', name='{}'>""".format(
self.id, 'folder', self.folder_id)
def get_resource_reference(self):
"""Return a reference to the resource in the form (type, id).
"""
return 'folder', self.folder_id
def get_policy(self):
"""Return the corresponding IAM policy."""
return self.iam_policy
class CloudSqlInstances(BASE):
"""Represents a Cloud SQL instance."""
__tablename__ = 'cloudsql_instances_%s' % timestamp
id = Column(BigInteger(), primary_key=True)
project_number = Column(BigInteger())
name = Column(String(255))
def __repr__(self):
"""String representation."""
fmt_s = "<CloudSQL(id='{}', name='{}'>"
return fmt_s.format(
self.id,
self.name)
class Instances(BASE):
"""Represents a Cloud GCE instance."""
__tablename__ = 'instances_%s' % timestamp
id = Column(BigInteger(), primary_key=True)
project_id = Column(String(255))
name = Column(String(255))
service_accounts = Column(Text())
raw_instance = Column(Text())
def __repr__(self):
"""String representation."""
fmt_s = "<Instance(id='{}', name='{}'>"
return fmt_s.format(
self.id,
self.name)
class InstanceGroups(BASE):
"""Represents a Cloud GCE instance group."""
__tablename__ = 'instance_groups_%s' % timestamp
id = Column(BigInteger(), primary_key=True)
project_id = Column(String(255))
name = Column(String(255))
raw_instance_group = Column(Text())
def __repr__(self):
"""String representation."""
fmt_s = "<Instance Group(id='{}', name='{}'>"
return fmt_s.format(
self.id,
self.name)
class BigqueryDatasets(BASE):
"""Represents a Cloud Bigquery dataset."""
__tablename__ = 'bigquery_datasets_%s' % timestamp
id = Column(BigInteger(), primary_key=True)
project_id = Column(String(255))
dataset_id = Column(String(255))
raw_access_map = Column(Text())
def __repr__(self):
"""String representation."""
fmt_s = "<Bigquery Dataset(id='{}', name='{}'>"
return fmt_s.format(
self.id,
self.dataset_id)
class BackendServices(BASE):
"""Represents a Cloud Backend Service."""
__tablename__ = 'backend_services_%s' % timestamp
id = Column(BigInteger(), primary_key=True)
project_id = Column(String(255))
name = Column(String(255))
raw_backend_service = Column(Text())
def __repr__(self):
"""String representation."""
fmt_s = "<Bigquery Dataset(id='{}', name='{}'>"
return fmt_s.format(
self.id,
self.name)
supported_policies = [OrganizationPolicy, ProjectPolicy]
if schema_number >= 2.0:
supported_policies.append(FolderPolicy)
result = (Organization,
Folders,
[('projects', Project),
('buckets', Bucket),
('cloudsqlinstances', CloudSqlInstances),
('instances', Instances),
('instancegroups', InstanceGroups),
('bigquerydatasets', BigqueryDatasets),
('backendservices', BackendServices)],
supported_policies,
[GroupMembers, Groups])
TABLE_CACHE[timestamp] = result
return result
class Importer(object):
"""Forseti data importer to iterate the inventory and policies."""
SUPPORTED_SCHEMAS = ['1.0', '2.0']
def __init__(self, db_connect_string):
engine = create_engine(db_connect_string, pool_recycle=3600)
BASE.metadata.create_all(engine)
session = sessionmaker(bind=engine)
self.session = session()
self.engine = engine
def _table_exists_or_raise(self, table, context_msg=None):
"""Raises exception if table does not exists.
Args:
table (object): Table to check for existence
context_msg (str): Additional information
Raises:
Exception: Indicate that the table does not exist
"""
table_name = table.__tablename__
if not self.engine.has_table(table_name):
msg = 'Table not found: {}'.format(table_name)
if context_msg:
msg = '{}, hint: {}'.format(msg, context_msg)
raise Exception(msg)
def _get_latest_snapshot(self):
"""Find the latest snapshot from the database.
Returns:
object: Forseti snapshot description table.
"""
return (
self.session.query(Snapshot)
.filter(Snapshot.status == SnapshotState.SUCCESS)
.filter(Snapshot.schema_version.in_(self.SUPPORTED_SCHEMAS))
.order_by(Snapshot.start_time.desc())
.first())
def __iter__(self):
"""Main interface to get the data, returns assets and then policies."""
snapshot = self._get_latest_snapshot()
organization, folders, tables, policies, group_membership = \
create_table_names(snapshot.cycle_timestamp,
snapshot.schema_version)
# Organizations
self._table_exists_or_raise(organization)
forseti_org = self.session.query(organization).one()
yield "organizations", forseti_org
# Folders
self._table_exists_or_raise(folders)
folder_set = (
self.session.query(folders)
.filter(folders.parent_type == 'organization')
.all())
while folder_set:
for folder in folder_set:
yield 'folders', folder
folder_set = (
self.session.query(folders)
.filter(folders.parent_type == 'folder')
.filter(folders.parent_id.in_(
[f.folder_id for f in folder_set]))
.all()
)
for res_type, table in tables:
for item in self.session.query(table).yield_per(PER_YIELD):
yield res_type, item
# Groups and membership
membership, groups = group_membership
hint = 'Did you enable Forseti group collection?'
self._table_exists_or_raise(membership, hint)
self._table_exists_or_raise(groups, hint)
query_groups = (
self.session.query(groups)
.with_entities(literal_column("'GROUP'"), groups.group_email))
principals = query_groups.distinct()
for kind, email in principals.yield_per(PER_YIELD):
yield kind.lower(), email
query = (
self.session.query(membership, groups)
.filter(membership.group_id == groups.group_id)
.order_by(desc(membership.member_email))
.distinct())
cur_member = None
member_groups = []
for member, group in query.yield_per(PER_YIELD):
if cur_member and cur_member.member_email != member.member_email:
if cur_member:
yield 'membership', (cur_member, member_groups)
cur_member = None
member_groups = []
cur_member = member
member_groups.append(group)
for policy_table in policies:
self._table_exists_or_raise(policy_table)
for policy in self.session.query(policy_table).all():
yield 'policy', policy
|
t = int(input())
while t > 0:
n,m = map(int,input().split())
arr = []
for k in range(n):
a = list(map(str,input().split()))[:m]
arr.append(a)
for i in range(n):
for j in range(m):
if arr[i][j] == '*':
arr[i][j] = 1
elif arr[i][j] == '.':
arr[i][j] = 0
for i in range(n-2,-1,-1):
for j in range(m-2,0,-1):
if arr[i][j] == 1:
arr[i][j] = 1+(min(arr[i+1][j-1],arr[i+1][j],arr[i+1][j+1]))
ans = 0
for i in range(n):
for j in range(m):
ans = ans+arr[i][j]
print(ans)
t = t-1
|
"""added sentiment column
Revision ID: 2c7915255466
Revises:
Create Date: 2019-07-16 12:14:55.838697
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2c7915255466'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('mobiles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('mobile_url', sa.String(), nullable=False),
sa.Column('mobile_name', sa.String(), nullable=False),
sa.Column('mobile_id', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.add_column('review', sa.Column('count_words', sa.Integer(), nullable=True))
op.add_column('review', sa.Column('sentiment', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('review', 'sentiment')
op.drop_column('review', 'count_words')
op.drop_table('mobiles')
# ### end Alembic commands ###
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 11 22:18:46 2018
@author: ck
"""
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import time
import random
import tweepy
import os
import json
import io
from datetime import datetime
import re
####트위터 개발자 정보를 입력해 줍니다.
consumer_key = "4oqZjge7qM0n3WNftJiKHFtOF"
consumer_secret = "CZOzvRcdwFOzPZFoM5igXVGBbOBp7lQWBBtCRe76wuv738equP"
access_token = "1004411169568747520-7NBYDlDKlGXX9q5gjXasgRRo5p3HtT"
access_token_secret = "b3BSPhEfHGYCxuIaNPg1CFcJtKkCWnjIZESooDgT99GWL"
#개발자 인증을 받습니다.
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, retry_delay=10)
#트윗 크롤링을 날짜 기준으로 저장합니다.
def get_tweets(keyword, num_limit):
i=0
for tweet in tweepy.Cursor(api.search, q=keyword, since='2018-11-01', until='2018-12-10',lang="ko").items(num_limit):
StatusObject = tweet._json
dict1 = {
'id': StatusObject['id_str'],
'permalink':"",
'username':StatusObject['user']['name'],
'text': StatusObject['text'],
'date':StatusObject['created_at'],
'retweets': StatusObject['retweet_count'],
'favorites': StatusObject['favorite_count'],
'mentions':StatusObject['entities']['user_mentions'],
'hashtags':StatusObject['entities']['hashtags'],
'geo':StatusObject['geo']
}
##print(str(i)+':'+str(dict1))
####날짜 형태 변환하기####
unformatted = StatusObject['created_at']
# Use re to get rid of the milliseconds.
remove_ms = lambda x:re.sub("\+\d+\s","",x)
# Make the string into a datetime object.
mk_dt = lambda x:datetime.strptime(remove_ms(x), "%a %b %d %H:%M:%S %Y")
# Format your datetime object.
my_form = lambda x:"{:%Y-%m-%d}".format(mk_dt(x))
formatted = my_form(unformatted)
#print(formatted)
####날짜 형태 변환하기####
dirname = "/Users/junha_lee/Documents/Junha/School/Projects/SentimentName/sentiment/tmp_twitter/"+keyword
if not os.path.isdir("/Users/junha_lee/Documents/Junha/School/Projects/SentimentName/sentiment/tmp_twitter/"+keyword):
os.mkdir("/Users/junha_lee/Documents/Junha/School/Projects/SentimentName/sentiment/tmp_twitter/"+keyword)
with open("/Users/junha_lee/Documents/Junha/School/Projects/SentimentName/sentiment/tmp_twitter/"+keyword+"/{}.json".format(str(formatted)),'a+',encoding="utf-8") as make_file:
twitter = json.dumps(dict1, ensure_ascii=False, indent=2)
make_file.write(twitter+',')
i+=1
#크롤링 대상을 명시합니다.
def crawl():
keyword = input()
num_limit = 100
get_tweets(keyword, num_limit)
#메인에서 작업 진행을 합니다.
if __name__ == '__main__':
crawl()
|
cont = 0
soma = 0
maior = 0
menor = 0
opc = ''
while opc != 'n':
num = int(input('Digite um número: '))
cont += 1
soma += num
if cont == 1:
maior = num
menor = num
else:
if maior < num:
maior = num
if menor > num:
menor = num
opc = str(input('Quer continuar? [S/N] ')).lower().strip()
while opc != 'n' and opc != 's':
print('Opção inválida! Tente novamente.')
opc = str(input('Quer continuar? [S/N] ')).lower().strip()
media = soma / cont
print(f'Você digitou {cont} números e a média foi {media}')
print(f'O maior valor foi {maior} e o menor foi {menor}')
|
#------------------------ LIBRERÍAS --------------------------------
import numpy as np
from PIL import Image
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup
from urllib.request import Request
from tabulate import tabulate
from itertools import zip_longest
import requests
import urllib.request
from io import BytesIO
import time
import os
from IPython.display import clear_output
#--------------------------------------------------------------------
# -------------------------- FUNCIONES ------------------------------
def Presentacion():
print(" _____ _ _ ")
print("|_ _| | | | | _ ")
print(" | | _ __ | |_ ___ __ _ _ __ __ _ _ __ | |_ ___ ___ (_)")
print(" | | | '_ \ | __| / _ \ / _` || '__| / _` || '_ \ | __| / _ \/ __| ")
print(" _| |_ | | | || |_ | __/| (_| || | | (_| || | | || |_ | __/\__ \ _ ")
print(" \___/ |_| |_| \__| \___| \__, ||_| \__,_||_| |_| \__| \___||___/(_)")
print(" __/ | ")
print(" |___/ ")
print(" _ _ _ ___ _ ")
print(" / | /_\ _ _ __| | _ _ ___ ___ | _ \ _ _ __ _ __| | ___ ")
print(" | | _ / _ \ | ' \ / _` | | '_| / -_) (_-< | _/ | '_| / _` | / _` | / _ \ ")
print(" |_|(_)/_/ \_\ |_||_| \__,_| |_| \___| /__/ |_| |_| \__,_| \__,_| \___/ ")
print(" ___ _ _ _ ___ _ ")
print("|_ ) _ | | ___ _ _ __ _ | |_ | |_ __ _ _ _ | _ \ _ _ (_) ___")
print(" / / _ | || | / _ \ | ' \ / _` | | _| | ' \ / _` | | ' \ | / | || | | | |_ /")
print("/___|(_) \__/ \___/ |_||_| \__,_| \__| |_||_| \__,_| |_||_| |_|_\ \_,_| |_| /__|")
print(" ____ _ _ _____ ")
print("|__ / | \| | ___ _ _ ___ ___ ___ _ _ |_ _| ___ _ _ ___ _ __ ___ __ _ ")
print(" |_ \ _ | .` | / -_) | || | (_-< (_-< / -_) | '_| | | / -_) | ' \ / -_) | ' \ / -_) / _` |")
print("|___/(_)|_|\_| \___| \_, | /__/ /__/ \___| |_| |_| \___| |_||_| \___| |_|_|_| \___| \__,_|")
print(" |__/ ")
time.sleep(7)
clear_output()
def installFont():
if os.path.isfile('bankgthd.ttf'):
print('')
else:
!wget https://www.fontsupply.com//fonts/bankgthd.ttf
clear_output()
def mapearMascara(val): #para poder generar una wordcloud se necesita una mask con valores 255 en todo el array, hay unas que varían de 0 a 1
if val == 0:
return 255 # cambiamos el valor de 0 a 255
else:
return val
def datosUser():
name = soup.find('div',{'class':'grid--cell fw-bold'}).text #se halla el contenido por clase name
names.append(name)
present = [ [ f'USUARIO ENCONTRADO - URL\n {URL}' ] ]
userName = [[f'Nombre del Usuario: {name} \n ID: {userIdent}']]
print(tabulate(present, tablefmt='fancy_grid', stralign='center')) #fancy_grid es el estilo de tabla predefinifido por el table
print(tabulate(userName, tablefmt='fancy_grid', stralign='center')+ '\n')
def buscarEtiquetas():
pagination = soupTag.find_all('a', class_='s-pagination--item js-pagination-item') #se busca la cantidad de páginas que hay para las etiquetas
if (len(pagination) == 0): #se valida si hay mínimo una página por etiqueta
ultimaPag = 1
else:
ultimaPag = int(pagination[-2].text) #dado que el último elemento es: Siguiente, con [-2] retrocede a la posición que contiene un número(último elemento de la lista)
paginaEtiquetas = URL + '?tab=tags&sort=votes&page=' #se concatena la url con la página designada para las etiquetas
iterator = 1
while iterator <= ultimaPag:
numPaginas = str(iterator)
mostrarTags = requests.get(paginaEtiquetas + numPaginas)
soupMostrar = BeautifulSoup(mostrarTags.content, 'html.parser')
for row in soupMostrar.findAll('table')[0].tbody.findAll('td'): #se detiene cuando encuentra la primera tabla y todos los subcampos 'td'
vte = row.findAll('div', class_='answer-votes')[0] #busca el campo donde haya la puntuación de cada etiqueta
ttb = row.findAll('a', class_='post-tag')[0] # busca el campo donde haya la etiqueta por tabla
try: #se valida si la puntuaciòn es número o string
vte2 = int(vte.text) #se convierte a entero cada elemento puntuación
validarNumero = True
except Exception as e:
vte2 = vte.text
validarNumero = False
if (validarNumero == False):
ch = int(vte2[ 0 ])
ch *= 1000
vte2 = int(vte2.replace(vte2, f'{ch}')) #se reemplaza el k por 1000
if vte2 > 0: #se eliminan todas las puntuaciones y etiquetas que sean negativas o iguales a cero
listaVotos.append(vte2)
listaTags.append(ttb.text)
iterator += 1
def mostrarEtiquetas():
m3 = []
listaVtsModified = [iter(listaVotos)] * 4 #se convierte la lista en filas y columnas
Matriz1 = list(zip_longest(*listaVtsModified, fillvalue=' '))
listTagsModified = [iter(listaTags)] * 4
Matriz2 = list(zip_longest(*listTagsModified, fillvalue=' '))
print()
ck = [f'PUNTUACIONES ETIQUETAS: {names[0]}']
print(tabulate(ck, tablefmt='plain', stralign='center')+'\n')
print(tabulate([[ f'Ver Usuario: {URL}'+'?tab=tags']],tablefmt='fancy_grid', stralign='center'))
for i in range((int(len(listaVotos) / 4))):
m3.append([])
for j in range(4):
m3[i].append(f'{(Matriz1[i][j])}'+ ' : ' + str(Matriz2[i][j])) #se añaden los elementos de cada lista a una matriz nueva
print(tabulate(m3,tablefmt='fancy_grid',stralign='center')) #se presentan los elementos de la nueva matriz
def generarNube():
maxWords = len(listaVotos) #se determina la cantidad de elmentos de la listaVotos(puntuaciones)
dic = dict(zip(listaTags, listaVotos)) #se crea un diccionario donde cada etiqueta contiene su puntuacion
url2 = 'https://image.flaticon.com/icons/png/512/23/23796.png' #url de la img para generar la máscara
response = requests.get(url2)
cd = Image.open(BytesIO(response.content))
cloudPNG = np.array(cd) #convertir la img a array
maskCloud = np.ndarray((cloudPNG.shape[ 0 ], cloudPNG.shape[ 1 ]), np.int32) #se determinan solo los valores 0 y 1 para ser cambiados a 255
for i in range(len(cloudPNG)):
maskCloud[ i ] = list(map(mapearMascara, cloudPNG[ i ]))
wc = WordCloud(font_path='bankgthd.ttf', background_color='white', max_words = maxWords, mask=maskCloud).generate_from_frequencies(dic) #generar la nube de palabras
plt.figure(figsize=[ 20,10]) #asignar el tamaño de la img
userName = [[f'Nube de palabras del usuario: {names[0]}']]
print(tabulate(userName, tablefmt='fancy_grid', stralign='center')+ '\n')
plt.imshow(wc) #convertir la nube de palabras en objeto figura para ser presentada
plt.axis("off") #oculta los bordes de la figura generada
plt.show()#mostrar la figura resultante
#-----------------------------------------------------------------------
# ------------- MAIN --------------------------------------------------
try:
installFont()
Presentacion() #se llama a la función Presentacion()
try :
userid = input('Ingrese el ID del usuario\n ')
entrada = int(userid)
if entrada > 0:
userIdent = str(userid)
listaVotos = list()
listaTags = list()
names = list()
URL = 'https://es.stackoverflow.com/users/' + userIdent # concatenar cadenas
pagina = requests.get(URL)
pagTab = requests.get(URL + '?tab=tags&sort=votes&page=1')
soup = BeautifulSoup(pagina.content, 'html.parser')
soupTag = BeautifulSoup(pagTab.content, 'html.parser')
userValidation = soup.find_all('div', class_='grid--cell mb16 profile-placeholder--image')
try:
fhand = urllib.request.urlopen(URL+'?tab=tags') # obtener el codigo para validar si el usuario existe
if len(userValidation) == 1:
print('El usuario no cuenta con etiquetas')
else:
datosUser()
buscarEtiquetas()
if len(listaVotos) != 0:
try:
opcion = int(input('\n 1: Ver etiquetas \n 2: Ver nube de etiquetas \n 0: salir \n OPCION: '))
clear_output()
while opcion != 0:
if opcion == 1:
mostrarEtiquetas()
if opcion == 2:
generarNube()
opcion = int(input('\n 1: Ver etiquetas \n 2: Ver nube de etiquetas \n 0: salir \n OPCION: '))
clear_output()
except ValueError:
print('No válido, solo opción 1 | 2 | 0')
else:
print('El usuario tiene etiquetas, pero no posee puntuaciones.')
except Exception as e:
print('Error 404 - Página no encontrada')
except ValueError :
print('Solo ingrese dígitos enteros')
except KeyboardInterrupt:
exit()
#--------------------------------------------------------------------------------------
|
# Generated by Django 3.0.6 on 2020-05-24 16:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mdpapp', '0002_auto_20200524_1557'),
]
operations = [
migrations.AlterModelOptions(
name='family',
options={'verbose_name_plural': 'entries'},
),
]
|
def test_HDL_analysis():
from chol_analysis import HDL_analysis
answer = HDL_analysis(80)
expected = "normal"
assert answer == expected
def test_HDL_analysis():
from chol_analysis import HDL_analysis
answer = HDL_analysis(40)
expected = "borderline low"
assert answer == expected
|
def triangular(n):
return (n**2+n)/2 if n>0 else 0
'''
Triangular numbers are so called because of the equilateral triangular shape that they occupy when laid out as dots. i.e.
1st (1) 2nd (3) 3rd (6)
* ** ***
* **
*
You need to return the nth triangular number. You should return 0 for out of range values:
triangular(0)==0,
triangular(2)==3,
triangular(3)==6,
triangular(-10)==0
'''
|
from unittest import TestCase
class TestBase(TestCase):
pass
|
n = int(input("Ingrese un numero: "))
if n==0:
print("Es neutro")
elif n>0:
print("Es positivo")
else:
print("Es negativo")
|
#!/usr/bin/env python3
# ----------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016, Heiko Möllerke
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ----------------------------------------------------------------------
"""This module provides functions to work withs graphics. """
from operator import truediv
from jumpy.r2 import ParaxialRectangle, Vector
class Camera:
"""The Camera's main purpose is to transform between display- and
world-coordinates. """
def __init__(self, display_size, scale=Vector(1, 1), look_at=Vector(0, 0)):
rectangle_size = map(truediv, display_size, scale)
self._scale = Vector(*scale)
self.rectangle = ParaxialRectangle((0, 0), rectangle_size)
self.rectangle.center = look_at
@property
def look_at(self): # -> Vector
"""Returns the world-coordinates the camera looks at. """
return self.rectangle.center
@look_at.setter
def look_at(self, vector):
"""Sets the world-coordinates the camera looks at. """
self.rectangle.center = vector
def transform_world_coordinate(self, vector): # -> Vector
"""Transforms a world-coordinate into the appropiate display-
coordinate. """
x, y = vector
tx, ty = self.rectangle.bottom_left # Translation
return Vector(
(x - tx) * self._scale.x,
(y - ty) * self._scale.y
)
def transform_display_coordinate(self, vector): # -> Vector
"""Transforms a display-coordinate into the appropiate world-
coordinate. """
x, y = vector
tx, ty = self.rectangle.bottom_left # Translation
return Vector(
x / self._scale.x + tx,
y / self._scale.y + ty
)
|
#insertion sort demo
def insertionsort(arr):
n = len(arr)
for i in range(1, n):
ccard = arr[i]
j = i
while (j>0) and (arr[j-1] > ccard):
arr[j] = arr[j-1]
j = j - 1
arr[j] = ccard
return arr
def sort (arr,type):
if(type=='insertionsort'):
return insertionsort(arr)
else:
return arr
def main():
n = int(input("get N: "))
arr=[]
for i in range(n):
arr.append(int(input(str(i)+": ")))
print('Sorting :\n')
arr = sort(arr,'insertionsort')
for x in arr:
print(x,end=' ')
main()
|
import os
import platform
###########################
#Initial Input
"""Faculty list is used as initial input for the whole program.
This list is generated manually to make sure its accuracy. University
names and faculty names must be close to the names stored in the Scopus
database"""
faculty_list = "data/faculty_list.txt"
num_of_school = 101 #number of shool that will be analyzed. Up to 101.
###########################
#Saved database
"""Results searched on Scopus will be save locally. University information
(University name and its scopus ID), faculty informtion (name, personal scopus id,
affiliation name and id, number of publication/citaion, H-index) and search status
(success or false) will be save at chemrank.db. Publication information (author,
journal name, affiliation, year) will be save at publication.json"""
db_fname = 'data/chemrank.db'
pub_fname = 'data/publication.json'
###########################
#Data processing configuration
save_result = True
start_time = 2000 #starting year for "calc_medi_publication_part" and "calc_medi_high_impact_journals", default value is 2000
end_time = 2019 #ending year for "calc_medi_publication_part" and "calc_medi_high_impact_journals", default value is 2019
###########################
"""Scopus API seaching key, users should apply on Scopus API website and
copy to here more detail can be found in https://dev.elsevier.com/"""
apikey = "*********************"
|
# coding=utf-8
"""Version related views."""
import logging
logger = logging.getLogger(__name__)
# noinspection PyUnresolvedReferences
import logging
logger = logging.getLogger(__name__)
import re
import zipfile
import StringIO
import pypandoc
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.views.generic import (
ListView,
CreateView,
DeleteView,
DetailView,
UpdateView,
RedirectView)
from django.http import HttpResponseRedirect, HttpResponse
from braces.views import LoginRequiredMixin, StaffuserRequiredMixin
from pure_pagination.mixins import PaginationMixin
from ..models import Version
from ..forms import VersionForm
class VersionMixin(object):
"""Mixing for all views to inherit which sets some standard properties."""
model = Version # implies -> queryset = Entry.objects.all()
form_class = VersionForm
class VersionCreateUpdateMixin(VersionMixin, LoginRequiredMixin):
"""Mixin for views that do create or update operations."""
def get_context_data(self, **kwargs):
"""Get the context data which is passed to a template.
:param kwargs: Any arguments to pass to the superclass.
:type kwargs: dict
:returns: Context data which will be passed to the template.
:rtype: dict
"""
context = super(VersionMixin, self).get_context_data(**kwargs)
return context
def form_invalid(self, form):
"""Behaviour for invalid forms.
:param form: Form which is being validated.
:type form: ModelForm
"""
return self.render_to_response(self.get_context_data(form=form))
class VersionListView(VersionMixin, PaginationMixin, ListView):
"""View for the list of versions."""
context_object_name = 'versions'
template_name = 'version/list.html'
paginate_by = 10
def get_context_data(self, **kwargs):
"""Get the context data which is passed to a template.
:param kwargs: Any arguments to pass to the superclass.
:type kwargs: dict
:returns: Context data which will be passed to the template.
:rtype: dict
"""
context = super(VersionListView, self).get_context_data(**kwargs)
context['num_versions'] = self.get_queryset().count()
context['unapproved'] = False
return context
def get_queryset(self):
"""Get the queryset for this view.
:returns: A queryset which is filtered to only show approved versions.
:rtype: QuerySet
"""
versions_qs = Version.objects.all()
return versions_qs
class VersionDetailView(VersionMixin, DetailView):
"""A tabular list style view for a version."""
context_object_name = 'version'
template_name = 'version/detail.html'
def get_context_data(self, **kwargs):
"""Get the context data which is passed to a template.
:param kwargs: Any arguments to pass to the superclass.
:type kwargs: dict
:returns: Context data which will be passed to the template.
:rtype: dict
"""
context = super(VersionDetailView, self).get_context_data(**kwargs)
return context
def get_queryset(self):
"""Get the queryset for this view.
:returns: A queryset which is filtered to only show approved versions.
:rtype: QuerySet
"""
versions_qs = Version.objects.all()
return versions_qs
def get_object(self, queryset=None):
"""Get the object referenced by this view.
:param queryset: An option queryset from which the object should be
retrieved.
:type queryset: QuerySet
:returns: A Version instance.
:rtype: Version
"""
obj = super(VersionDetailView, self).get_object(queryset)
obj.request_user = self.request.user
return obj
class VersionMarkdownView(VersionDetailView):
"""Return a markdown version detail."""
template_name = 'version/detail.md'
def render_to_response(self, context, **response_kwargs):
"""Render this version as markdown.
:param context: Context data to use with template.
:type context: dict
:param response_kwargs: A dict of arguments to pass to the renderer.
:type response_kwargs: dict
:returns: A rendered template with mime type application/text.
:rtype: HttpResponse
"""
response = super(VersionMarkdownView, self).render_to_response(
context,
mimetype='application/text',
**response_kwargs)
response['Content-Disposition'] = 'attachment; filename="foo.md"'
return response
class VersionThumbnailView(VersionMixin, DetailView):
"""A contact sheet style list of thumbs per entry."""
context_object_name = 'version'
template_name = 'version/detail-thumbs.html'
def get_context_data(self, **kwargs):
"""Get the context data which is passed to a template.
:param kwargs: Any arguments to pass to the superclass.
:type kwargs: dict
:returns: Context data which will be passed to the template.
:rtype: dict
"""
context = super(VersionThumbnailView, self).get_context_data(**kwargs)
return context
def get_queryset(self):
"""Get the queryset for this view.
:returns: A queryset which is filtered to only show approved versions.
:rtype: QuerySet
"""
versions_qs = Version.objects.all()
return versions_qs
def get_object(self, queryset=None):
"""Get the object referenced by this view.
:param queryset: An option queryset from which the object should be
retrieved.
:type queryset: QuerySet
:returns: A Version instance.
:rtype: Version
"""
obj = super(VersionThumbnailView, self).get_object(queryset)
obj.request_user = self.request.user
return obj
class VersionDeleteView(VersionMixin, DeleteView, LoginRequiredMixin):
"""A view for deleting version objects."""
context_object_name = 'version'
template_name = 'version/delete.html'
def get_success_url(self):
"""Get the url for when the operation was successful.
:returns: A url.
:rtype: str
"""
return reverse('version-list')
def get_queryset(self):
"""Get the queryset for this view.
:returns: A queryset which is filtered to only show approved versions.
:rtype: QuerySet
"""
qs = Version.all_objects.all()
if self.request.user.is_staff:
return qs
else:
return qs.filter(creator=self.request.user)
class VersionCreateView(
VersionCreateUpdateMixin, CreateView, LoginRequiredMixin):
"""A view for creating version objects."""
context_object_name = 'version'
template_name = 'version/create.html'
def get_success_url(self):
"""Get the url for when the operation was successful.
:returns: A url.
:rtype: str
"""
return reverse('pending-version-list')
def form_valid(self, form):
"""Parse the form and check if it is valid.
:param form: A form object.
:type form: model.Form
:returns: HttpResponse object. The user is redirected to success url
if the form is valid.
:rtype: HttpResponse.
"""
self.object = form.save(commit=False)
self.object.save()
return HttpResponseRedirect(self.get_success_url())
class VersionUpdateView(VersionCreateUpdateMixin, UpdateView):
"""View to update an existing version."""
context_object_name = 'version'
template_name = 'version/update.html'
def get_form_kwargs(self):
"""Get the arguments passed to the form object.
:returns: A dictionary of form arguments.
:rtype: dict
"""
kwargs = super(VersionUpdateView, self).get_form_kwargs()
return kwargs
def get_queryset(self):
"""Get the queryset for this view.
:returns: A queryset which is filtered to only show approved versions.
:rtype: QuerySet
"""
versions_qs = Version.objects
return versions_qs
def get_success_url(self):
"""Get the url for when the operation was successful.
:returns: A url.
:rtype: str
"""
return reverse('version-list')
class PendingVersionListView(
VersionMixin, PaginationMixin, ListView, StaffuserRequiredMixin):
"""List all unapproved versions - staff see all """
context_object_name = 'versions'
template_name = 'version/list.html'
paginate_by = 10
def get_context_data(self, **kwargs):
"""Get the context data which is passed to a template.
:param kwargs: Any arguments to pass to the superclass.
:type kwargs: dict
:returns: Context data which will be passed to the template.
:rtype: dict
"""
context = super(PendingVersionListView, self).get_context_data(**kwargs)
context['num_versions'] = self.get_queryset().count()
context['unapproved'] = True
return context
def get_queryset(self):
"""Get the queryset for this view.
:returns: A queryset which is filtered to only show approved versions.
:rtype: QuerySet
"""
versions_qs = Version.unapproved_objects.all()
if self.request.user.is_staff:
return versions_qs
else:
return versions_qs.filter(creator=self.request.user)
class ApproveVersionView(VersionMixin, StaffuserRequiredMixin, RedirectView):
"""A view to allow staff users to approve a given version."""
permanent = False
query_string = True
pattern_name = 'pending-version-list'
def get_redirect_url(self, pk):
"""Get the url for when the operation completes.
:param pk: The primary key of the object being approved.
:type pk: int
:returns: A url.
:rtype: str
"""
version_qs = Version.unapproved_objects.all()
version = get_object_or_404(version_qs, pk=pk)
version.approved = True
version.save()
return reverse(self.pattern_name)
class VersionDownload(VersionMixin, StaffuserRequiredMixin, DetailView):
"""A view to allow staff users to download version page in RST format"""
template_name = 'version/detail-content.html'
def render_to_response(self, context, **response_kwargs):
"""
Returns a RST document for a project version page
"""
version_obj = context.get('version')
# set the context flag for 'rst_download'
context['rst_download'] = True
# render the template
myDocument = self.response_class(
request=self.request,
template=self.get_template_names(),
context=context,
**response_kwargs
)
# convert the html to rst
converted_doc = pypandoc.convert(
myDocument.rendered_content, 'rst', format='html')
converted_doc = converted_doc.replace('/media/images/', 'images/')
# prepare the ZIP file
myZipFile = self._prepare_zip_archive(converted_doc)
# Grab the ZIP file from memory, make response with correct MIME-type
response = HttpResponse(
myZipFile.getvalue(), mimetype="application/x-zip-compressed")
# ..and correct content-disposition
response['Content-Disposition'] = (
'attachment; filename="{}-{}.zip"'.format(
version_obj.project.name, version_obj.name)
)
return response
def _prepare_zip_archive(self, document):
"""
For the given doucment prepare a ZIP file with the document and
referenced images
"""
# create in memory file-like object
myTmpFile = StringIO.StringIO()
# grab all of the images from document
myImages = re.findall(r'images.+', converted_doc)
# create the ZIP file
with zipfile.ZipFile(myTmpFile, 'w') as myzip:
# write all of the image files (read from disk)
for image in myImages:
myzip.write(
'./media/{0}'.format(image),
'{0}'.format(image)
)
# write the actual RST document
myzip.writestr(
'{}-{}.rst'.format(
version_obj.project.name, version_obj.name),
converted_doc)
return myTmpFile
|
def multiplication_table(n):
"""prints multiplication table up to 10"""
for i in range(1, 11):
print(i, '*', n, '=', (i * n))
n = int(input("Enter the number"))
print("The multiplication Table of %d is"%n)
multiplication_table(n)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
update_nvl_polygon_element_query = """
UPDATE public.nvl_polygon AS npg SET
user_id = $2::BIGINT,
{}
deleted = TRUE
WHERE npg.location_id = $1::BIGINT RETURNING *;
"""
|
import csv
class Csvreader:
def __init__(self,csvfile):
self.csvfile = csvfile
def reader(self):
csvfileopen = open(self.csvfile,'rb')
reader = csv.reader(csvfileopen)
return reader
def dictOfAllApartments(self):
dictOfFlats = {}
read = self.reader()
batiment = 0
for line in read:
if line[0].startswith("#"): continue
if line[0].startswith("BATIMENT"):
batiment = line[0].split(" ")[1]
continue
print line[0], line[1]
if not len(line) ==9:
print("line too short")
continue
if line[1].startswith("Blanchisserie"): continue
apartment_ise = line[2] #line[1].split(" ")[1]
dictOfFlats[apartment_ise] = {}
dictOfFlats[apartment_ise]["building"] = batiment
dictOfFlats[apartment_ise]["ISE"] = line[2]
dictOfFlats[apartment_ise]["number"] = line[1].split(" ")[1]
dictOfFlats[apartment_ise]["conso_2014"] = float(line[3].replace(",",""))
dictOfFlats[apartment_ise]["conso_2015"] = float(line[4].replace(",",""))
dictOfFlats[apartment_ise]["conso_2016"] = float(line[5].replace(",",""))
dictOfFlats[apartment_ise]["conso_2017"] = float(line[6].replace(",",""))
rooms = line[7]
if line[7].startswith("Studio"):
rooms=1
elif line[7].startswith("Aucun"):
rooms = 0
else:
rooms=line[7].split(" ")[0]
type = ""
if len(line[8].split(" ") )== 2:
if line[8].split(" ")[1] == "principale":
type = "principale"
elif line[8].split(" ")[1] == "secondaire":
type = "secondaire"
dictOfFlats[apartment_ise]["type"] = type
dictOfFlats[apartment_ise]["rooms"] = rooms
return dictOfFlats
|
# *_* coding=utf8 *_*
#!/usr/bin/env python
config = [
("redis_host", "127.0.0.1"),
("debug", True),
("redis_cache_db", 0),
("redis_session_db", 1),
("redis_port", 6379),
("backend_expire_seconds", 300),
("http_listen_port", 80),
("site_host", "www.gg654.com"),
("session_expire_seconds", 3600),
("mysql_host", "127.0.0.1"),
("mysql_user", "root"),
("mysql_pasword", "tang"),
("mysql_db", "unreal"),
]
class Config(object):
def __init__(self):
self.data = dict(config)
def __getattr__(self, key):
return self.data.get(key)
CONF = Config()
|
# -*- coding: utf-8 -*-
__author__ = 'lish'
import bs4
import re,json,os,codecs,hashlib
import time,datetime
import urllib2,requests,MySQLdb
import StringIO, gzip
import sys
reload(sys)
sys.setdefaultencoding('utf8')
# base_path='/opt/www/ec_con'
base_url='http://s.haohuojun.com/'
base_path=os.path.split( os.path.realpath( sys.argv[0] ) )[0]
# print base_path
global headers
headers={
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.8',
};
def gzdecode(data) :
compressedstream = StringIO.StringIO(data)
gziper = gzip.GzipFile(fileobj=compressedstream)
data2 = gziper.read()
return data2
#链接数据库MySQL
def linkSQL(host,user,passwd,db):
global cursor,conn
conn=MySQLdb.connect(host=host,user=user,passwd=passwd,charset="utf8",db=db)
cursor = conn.cursor()
return conn
def release_cdn(cdnfile,type):
passwd=hashlib.md5(hashlib.sha1('KEUswIa+Tc5/L').hexdigest()).hexdigest()
url = 'http://push.dnion.com/cdnUrlPush.do'
data = dict(
username ='51ss',
password = passwd,
url = cdnfile,
type=str(type) #标识参数 url 为目录或 URL 取值:目录(type=0) URL (type=1)
)
r = requests.post(url,data=data)
# print r.headers
print r.status_code
#该函数是用来解析个攻略的结构关联即相关的基本信息
def AnalyzeGuides(guide_id,isChange=False):
try:
# mycreated_at=''
# if len(guide_id)==2 and isinstance(guide_id,basestring)==False:
# new_guide_id= tuple(guide_id)
# guide_id=new_guide_id[0]
# mycreated_at=new_guide_id[1]
goods_ids=[]
guide_id=str(guide_id)
print 'guide_id:', guide_id
guide_path=base_path+'/guides/'+str(guide_id)+'/img'
isExists=os.path.exists(guide_path)
if not isExists:
os.makedirs(guide_path)
guide_base_path=base_path+'/guides/'+str(guide_id)
###获取攻略详情页
guidepage_url='http://api.liwushuo.com/v2/posts/'+str(guide_id)
# print guidepage_url
req = urllib2.Request(guidepage_url,headers=headers)
guidepage_content = urllib2.urlopen(req).read()
guidepage_bejsons=json.loads(guidepage_content)
guidepage_data=guidepage_bejsons['data']
guide_cover_image_url=guidepage_data['cover_image_url']
#####获取攻略的封面图片链接并下载
cover_image_path=guide_path+'/cover'+str(guide_id)+'.jpg'
# isExists=os.path.exists(cover_image_path)
# if not isExists:
# os.makedirs(cover_image_path)
# print cover_image_url
cover_content= urllib2.urlopen(guide_cover_image_url)
fec = open(cover_image_path,'wb')
fec.write(cover_content.read())
fec.close()
guide_content_html =guidepage_data['content_html']
#######获取攻略页的基本信息
check_guide_id=guidepage_data['id']
if str(check_guide_id)==str(guide_id):
comments_count=guidepage_data['comments_count']
liked=guidepage_data['liked']
likes_count=guidepage_data['likes_count']
created_at=guidepage_data['created_at']
share_msg=guidepage_data['share_msg'].replace('礼物说','好货君')
# shares_count=guidepage_data['shares_count']
short_title=guidepage_data['short_title']
status=guidepage_data['status']
template=guidepage_data['template']
title=guidepage_data['title']
updated_at=guidepage_data['updated_at']
guide_cover_url=base_url+'guides/'+str(guide_id)+'/img/cover'+str(guide_id)+'.jpg'
guidescontent_path=base_path+'/guides/guidescontent'
soup=bs4.BeautifulSoup(guide_content_html,"html.parser")
# print soup.prettify()
if soup.findAll('div','like-hert-widget')!=[]:
i=0
for para1 in soup.findAll('div','like-hert-widget'):
####获取攻略对应的商品ID
goods_id=str(para1['data-goods-id'])
goods_ids+=[str(goods_id)]
i+=1
guides_content=guide_id+'|'+str(goods_id)+'|'+str(i)
# print guides_content
fec=codecs.open(guidescontent_path,'a+','utf-8')
fec.write(guides_content+'\n')
fec.close()
elif soup.findAll('div','item-info')!=[]:
try:
j=0
for para1 in soup.findAll('div','item-info'):
####获取攻略对应的商品ID
goods_id=str(para1['data-id'])
goods_ids+=[str(goods_id)]
j+=1
guides_content=guide_id+'|'+str(goods_id)+'|'+str(j)
# print guides_content
fec=codecs.open(guidescontent_path,'a+','utf-8')
fec.write(guides_content+'\n')
fec.close()
# print goods_ids
except Exception, e:
print '攻略的商品ID列表获取失败!!!!!!'
print e
if goods_ids!=[]:
realcreated_at=created_at
# print realcreated_at,created_at,mycreated_at
infos=str(comments_count)+'|'+str(guide_id)+'|'+str(liked)+'|'+str(likes_count)+'|'+str(realcreated_at)+'|'+str(share_msg)+'|'+str(short_title)+'|'+str(status)+'|'+str(template)+'|'+str(title)+'|'+str(updated_at)+'|'+str(guide_cover_url)
if isChange==False :
infos_path=base_path+'/guides/infos'
# print infos
fi=open(infos_path,'a+')
fi.write(infos+'\n')
fi.close()
return goods_ids
else:
return goods_ids,infos.split('|')
else:
goods_ids=[]
return goods_ids
except Exception, e:
print e
goods_ids=[]
return goods_ids
#为了保证攻略HTML文件的标准统一性,我们单独对攻略HTML文件进行处理,待攻略基本信息和相关商品如数据库后,再调用该函数,根据数据库的信息生产标准格式的攻略HTML文件
def creatGuidesHtml(guideids):
host="100.98.73.21"
user="commerce"
passwd="Vd9ZcDSoo8eHCAVfcUYQ"
conn=linkSQL(host,user,passwd,'ec_con')
for guideid in guideids:
# print guideid
section_cont=''
ssql="""
SELECT aa.bannernum, bb.*
FROM ( SELECT goods_id, count(DISTINCT banner_url) bannernum
FROM con_goods_banner GROUP BY goods_id) aa,
( SELECT b.rn, a.goods_id, a.goods_name, a.goods_brief, a.goods_price
FROM con_goods a,
( SELECT content_id, rn FROM public_db.tmp_con_guide_content WHERE guide_id = '"""+str(guideid)+"""' ORDER BY rn) b
WHERE a.goods_id = b.content_id) bb
WHERE aa.goods_id = bb.goods_id ORDER BY bb.rn
"""
n = cursor.execute(ssql)
goods_num=0
for row in cursor.fetchall():
# print row
img_num=row[0]
# goods_num=row[1]
goods_num+=1
goodsid=row[2]
goodsname=row[3]
goodsbrief=row[4]
goodsprice=row[5]
banners_sql = 'select DISTINCT banner_url from ec_con.con_goods_banner where goods_id='+str(goodsid)
n = cursor.execute(banners_sql)
bannerurls=[row[0] for row in cursor.fetchall()]
img_cont=''
imglog=0
while imglog < img_num and imglog<=5:
img_cont+='<img src="http://m.haohuojun.com/src/img/detailImg.png" data-src="'+str(bannerurls[imglog])+'" >'
imglog+=1
section_cont+="""
<section class="m-introBK" >
<h2 class="title" >
<span class="number" >"""+str(goods_num)+"""</span>
<span >"""+str(goodsname)+"""</span>
</h2>
<p class="brief" >"""+str(goodsbrief)+"""</p>
<div class="j_imgsWrap imgsWrap">
<a href="haohuojun:///content?type=1&content_id="""+str(goodsid)+"""" class="f-clearfix">
"""+img_cont+"""
</a>
<div class="swipeNav"></div>
</div>
<div class="bar f-clearfix" >
<span class="price f-fl" >¥"""+str(float(goodsprice)/100)+"""</span>
<a href="haohuojun:///content?type=1&content_id="""+str(goodsid)+"""" class="u-btn f-fr" >查看详情</a>
</div>
</section>
"""
html_guide_content="""
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width,initial-scale=1,minimum-scale=1,maximum-scale=1,user-scalable=no" />
<title>好货君攻略详情</title>
<link rel="stylesheet" type="text/css" href="http://m.haohuojun.com/src/css/temp.css">
</head>
<body>
<div >
"""+str(section_cont)+"""
</div>
<script src="http://m.haohuojun.com/src/js/modules/swipe.js" type="text/javascript" charset="utf-8"></script>
<script src="http://m.haohuojun.com/src/js/template.js" type="text/javascript" charset="utf-8"></script>
</body>
</html>
"""
# print html_guide_content
if "section" in html_guide_content:
path=base_path+'/guides/html/'+str(guideid)+'.html'
# print path
f=open(path,'w')
f.write(html_guide_content)
f.close()
release_cdn(base_url+'guides/html',0)
#该函数是用来解析个商品的结构关联即相关的基本信息
def AnalyzeGoods(goods_id):
try:
goods=str(goods_id)
print '正在处理goods_id:',goods_id
goods_path =base_path+'/goods/'+str(goods_id)+'/img'
#print html_base_path
isExists=os.path.exists(goods_path)
if not isExists:
os.makedirs(goods_path)
##商品详情页接口
goods_url='http://api.liwushuo.com/v2/items/'+str(goods_id)
req = urllib2.Request(goods_url,headers=headers)
goods_page_content = urllib2.urlopen(req,timeout=3).read()
goodspage_bejson=json.loads(goods_page_content)
goodspage_data=goodspage_bejson['data']
check_goodid=goodspage_data['id']
if str(check_goodid)==str(goods_id):
###商品bannar图下载
goodsbanner_urls =goodspage_data['image_urls']
i=1
for goodsbanner_url in goodsbanner_urls:
try:
goodsbanner_path=goods_path+'/bannar'+str(i)+'.jpg'
goodsbanner_cont= urllib2.urlopen(goodsbanner_url,timeout=3).read()
fgb = open(goodsbanner_path,'wb')
fgb.write(goodsbanner_cont)
fgb.close()
fgbu = open(base_path+'/goods/bannarurl','a+')
fgbu.write(str(goods_id)+'|'+base_url+'goods/'+str(goods_id)+'/img/bannar'+str(i)+'.jpg\n')
fgbu.close()
i+=1
except Exception, e:
print e
#####商品基本信息
category_id=goodspage_data['category_id']
comments_count=goodspage_data['comments_count']
cover_image_url=base_url+'goods/'+str(goods_id)+'/img/bannar1.jpg'
created_at=goodspage_data['created_at']
description=goodspage_data['description'].replace('\n','<br>').replace('|','&')
favorited=goodspage_data['favorited']
favorites_count=goodspage_data['favorites_count']
liked=goodspage_data['liked']
likes_count=goodspage_data['likes_count']
name=goodspage_data['name'].replace('\n','<br>').replace('|','&')
price=goodspage_data['price']
purchase_id=goodspage_data['purchase_id']
purchase_status=goodspage_data['purchase_status']
purchase_type=goodspage_data['purchase_type']
purchase_url=goodspage_data['purchase_url']
shares_count=goodspage_data['shares_count']
updated_at=goodspage_data['updated_at']
source_type=goodspage_data['source']['type']
subcategory_id=goodspage_data['subcategory_id']
###获取商品的html内容并生成文件
goodsdetail_oldhtml= goodspage_data['detail_html']
soup=bs4.BeautifulSoup(goodsdetail_oldhtml,'lxml')
goodsdetail_htmlbody=soup.body.findAll('div','detail-container')[0]
i=0
j=0
for body_imgurl in goodsdetail_htmlbody.findAll('img'):
i+=1
good_img_url= body_imgurl['src']
if good_img_url!='':
####商品详情页的图片下载
good_img_path=goods_path+'/'+str(i)+'.jpg'
isExists=os.path.exists(good_img_path)
if not isExists:
try:
# print good_img_url
good_img= urllib2.urlopen(good_img_url,timeout=3)
f = open(good_img_path,'wb')
f.write(good_img.read())
f.close()
except Exception, e:
if j<len(goodsbanner_urls):
j+=1
good_img_url=goodsbanner_urls[j-1]
good_img= urllib2.urlopen(good_img_url,timeout=3)
f = open(good_img_path,'wb')
f.write(good_img.read())
f.close()
print '图片链接地址无效!已使用banner 图替代'
else:
print '图片链接地址无效!已没有多余图片替代!!',good_img_url
body_imgurl['data-src']=base_url+'goods/'+str(goods_id)+'/img/'+str(i)+'.jpg'
body_imgurl['src']="http://m.haohuojun.com/src/img/detailImg.png"
goodsdetail_htmlbodyscript='<script src="http://m.haohuojun.com/src/js/template.js" type="text/javascript" charset="utf-8"></script>'
goodsdetail_htmlbodyscript=bs4.BeautifulSoup(goodsdetail_htmlbodyscript,'lxml')
goodsdetail_htmlbodyscript= goodsdetail_htmlbodyscript.head.script
goodsdetail_html='<!DOCTYPE html><html><head><meta charset="utf-8"/> <meta content="webkit" name="renderer"/> <meta content="telephone=no" name="format-detection"/> <meta content="IE=Edge" http-equiv="X-UA-Compatible"/> <meta content="yes" name="apple-mobile-web-app-capable"/> <meta content="black" name="apple-mobile-web-app-status-bar-style"/> <meta content="width=device-width, user-scalable=no,maximum-scale=1.0,initial-scale=1" id="vp" name="viewport"/> <title> 商品详情 </title> <link href="http://m.haohuojun.com/src/css/temp.css" rel="stylesheet" type="text/css"/></head><body></body></html>'
goodsdetail_html=bs4.BeautifulSoup(goodsdetail_html,'lxml')
goodsdetail_html.body.append(goodsdetail_htmlbody)
goodsdetail_html.body.append(goodsdetail_htmlbodyscript)
# print goodsdetail_html.prettify()
goodsdetail_htmlcont=str(goodsdetail_html).replace('\n','').replace('\r','').replace('|','&')
html_path=base_path+'/goods/html/'+str(goods_id)+'.html'
fh=open(html_path,'w')
fh.write(goodsdetail_htmlcont)
fh.close()
infos=str(category_id)+'|'+str(comments_count)+'|'+str(cover_image_url)+'|'+str(created_at)+'|'+str(description)+'|'+str(favorited)+'|'+str(favorites_count)+'|'+str(goods_id)+'|'+str(liked)+'|'+str(likes_count)+'|'+str(name)+'|'+str(price)+'|'+str(purchase_id)+'|'+str(purchase_status)+'|'+str(purchase_type)+'|'+str(purchase_url)+'|'+str(shares_count)+'|'+str(source_type)+'|'+str(subcategory_id)+'|'+str(updated_at)
# print infos
####将商品信息写入info文件中
infos_path=base_path+'/goods/infos'
fi=open(infos_path,'a+')
fi.write(infos+'\n')
fi.close()
return infos.split('|')
except Exception, e:
print e
#该函数是用来解析个专题结构关联即相关的基本信息,对应的是数据库和服务器上的topic(专题)相关的数据
def AnalyzeTopic(topic_id,location):
try:
topicsinfos_path=base_path+'/topics/infos'
topicscontent_path=base_path+'/topics/topicscontent'
guide_ids=[]
i=0
for offset in range(0,10,10):
# print offset
topic_url='http://api.liwushuo.com/v2/collections/'+str(topic_id)+'/posts?gender=1&generation=1&limit=10&offset='+str(offset)
# print collection_url
topic_content=requests.get(topic_url,timeout=3).text
topicpage_bejson=json.loads(topic_content)
topicpage_date=topicpage_bejson['data']
####下载专题封面图片
cover_image_url=topicpage_date['cover_image_url']
cover_image_path=base_path+'/topics/img/topic_'+str(location)+'_'+str(topic_id)+'.jpg'
isExists=os.path.exists(cover_image_path)
if not isExists:
cover_content= urllib2.urlopen(cover_image_url)
ftc = open(cover_image_path,'wb')
ftc.write(cover_content.read())
ftc.close()
###专题信息
posts_count=topicpage_date['posts_count']
status=topicpage_date['status']
subtitle=topicpage_date['subtitle'].replace('\n','<br>').replace('|','&')
title=topicpage_date['title'].replace('\n','<br>').replace('|','&')
updated_at=topicpage_date['updated_at']
check_topicid=topicpage_date['id']
cover_image_url=base_url+'topics/img/topic_'+str(location)+'_'+str(topic_id)+'.jpg'
# print subtitle
if '话题' not in title and str(topic_id) == str(check_topicid):
collection_infos_content=str(topic_id)+'|'+str(title)+'|'+str(posts_count)+'|'+str(subtitle)+'|'+str(status)+'|'+str(cover_image_url)+'|'+str(updated_at)+'|'+str(location)
fti=codecs.open(topicsinfos_path,'a+','utf-8')
fti.write(collection_infos_content+'\n')
fti.close()
####专题对应的攻略列表
topicpage_data_posts=topicpage_date['posts']
guide_ids=[]
for topicpage_data_post in topicpage_data_posts:
guide_id= topicpage_data_post['id']
guide_ids+=[str(guide_id)]
i+=1
topic_content=str(topic_id)+'|'+str(guide_id)+'|'+str(i)
ftc=codecs.open(topicscontent_path,'a+','utf-8')
ftc.write(topic_content+'\n')
ftc.close()
print '专题ID:'+str(topic_id)+'-攻略ID:'+str(guide_id)
return guide_ids
except Exception, e:
raise e
#这个类是用来解析礼物说分类页面(selectionpage),这里我们根据页面的特征,一共将其分为了三个区块:
#分别为block1:长方形banner部分
#block2:正方形banner部分
#block3:除block1,block2意外其它的部分,主要也是一些攻略专题等的混合长图,这个为了避免爬取过量的数据,我们给了他一个默认的周期参数即tday=30(前三十天更新或者发布过的)
class crawlSelectionGuides(object):
def dealBlock1(self):
print '处理精选页面中的长方形专题中的攻略...'
topics_path=base_path+'/topics/img'
block1_url='http://api.liwushuo.com/v2/banners?'
block1_content = requests.get(block1_url,timeout=3).text
block1page_bejson=json.loads(block1_content)
block1_databanners=block1page_bejson['data']['banners']
for block1_databanner in block1_databanners:
topic_id=block1_databanner['target_id']
selection_block1_guides=AnalyzeTopic(str(topic_id),1)
# print selection_block1_guides
selection_block1_imgurl=block1_databanner['image_url']
selection_block1_coverpath=topics_path+'/topic_1_'+str(topic_id)+'.jpg'
isExistsTopic=os.path.exists(selection_block1_coverpath)
if not isExistsTopic:
selection_block1_covercont= urllib2.urlopen(selection_block1_imgurl)
fb = open(selection_block1_coverpath,'wb')
fb.write(selection_block1_covercont.read())
fb.close()
return selection_block1_guides
def dealBlock2(self):
###正方形的专题接口不同,单独处理
print '处理精选页面中的正方形专题中的攻略...'
topics_path=base_path+'/topics/img'
block2_url='http://api.liwushuo.com/v2/secondary_banners?gender=1&generation=1'
block2_content=requests.get(block2_url,timeout=3).text
block2page_bejson=json.loads(block2_content)
block2page_data2rybanners=block2page_bejson['data']['secondary_banners']
for block2page_data2rybanner in block2page_data2rybanners:
target_url=block2page_data2rybanner['target_url']
if 'type=topic' in str(target_url):
topic_id=re.findall('type=topic\&topic_id=(\d+)',target_url)[0]
selection_block2_guides=AnalyzeTopic(topic_id,2)
selection_block2_imgurl=block2page_data2rybanner['image_url']
selection_block2_coverpath=topics_path+'/topic_2_'+str(topic_id)+'.jpg'
isExistsTopic=os.path.exists(selection_block2_coverpath)
if not isExistsTopic:
selection_block2_covercont= urllib2.urlopen(selection_block2_imgurl)
fb = open(selection_block2_coverpath,'wb')
fb.write(selection_block2_covercont.read())
fb.close()
return selection_block2_guides
def dealBlock3(self,tdays=200):
print '处理精选页面中的攻略中...'
selection_block3_guides=[]
for offset in range(0,int(tdays)*20,20):#20-1000
block3_url='http://api.liwushuo.com/v2/channels/100/items?ad=2&gender=1&generation=1&limit=20&offset='+str(offset)
block3_content = requests.get(block3_url,timeout=3).text
block3page_bejson=json.loads(block3_content)
block3page_dataitems=block3page_bejson['data']['items']
# print block3page_data
if block3page_dataitems!=[]:
for block3page_dataitem in block3page_dataitems:
#print '???'
# print selection_guide_para
guide_id=block3page_dataitem['id']
created_at=block3page_dataitem['created_at']
DaysAgo = (datetime.datetime.now() - datetime.timedelta(days = tdays))
timeStamp = int(time.mktime(DaysAgo.timetuple()))
# print int(flogtime),timeStamp,DaysAgo
if int(created_at)>=timeStamp:
guide_path=base_path+'/guides/'+str(guide_id)+'/img'
isExists=os.path.exists(guide_path)
if not isExists:
os.makedirs(guide_path)
selection_block3_guides+=[str(guide_id)]
block3_coverurl=block3page_dataitem['cover_image_url']
block3_coverpath=guide_path+'/cover_'+str(guide_id)+'.jpg'
isExistsTopic=os.path.exists(block3_coverpath)
if not isExistsTopic:
block3_covercont= urllib2.urlopen(block3_coverurl)
fb = open(block3_coverpath,'wb')
fb.write(block3_covercont.read())
fb.close()
else:
break
else:
break
print
return selection_block3_guides
def gainAllGuide(self):
main=crawlSelectionGuides()
selection_block1_guides=main.dealBlock1()
selection_block2_guides=main.dealBlock2()
selection_block3_guides=main.dealBlock3()
selection_guide_ids=selection_block1_guides+selection_block2_guides+selection_block3_guides
# print selection_guide_ids
return selection_guide_ids
#这个函数是用来解析礼物说热门页面(popularpage),这里我总页就是一个商品的展示柜。
def crawlPopularGoodss(tdays=100):
try:
popular_goodsids=[]
for offset in range(0,int(tdays)*50,50):
pop_url='http://api.liwushuo.com/v2/items?gender=1&generation=1&limit=50&offset='+str(offset)
pop_content=requests.get(pop_url,timeout=3).text
poppage_bejson=json.loads(pop_content)
poppage_dataitems=poppage_bejson['data']['items']
# print poppage_dataitems
if poppage_dataitems!=[]:
for poppage_dataitem in poppage_dataitems:
goods_id=poppage_dataitem['data']['id']
popular_goodsids+=[str(goods_id)]
else:
break
# print popular_goodsids
return popular_goodsids
except Exception, e:
raise e
#这个类是用来解析礼物说分类页面(classpage),这里我们根据页面的特征,一共将其分为了两个区块:
#分别为block1:专题banner部分包含查看全部里面的全部类荣,这个为了避免爬取过量的数据,我们给了他一个默认的周期参赛即tday=10(前十天更新或者发布过的专题)。
#block2:除专题部分以外的其它部分(即主题-频道(items-channels),例如:品类-礼物,穿搭,美食......)
class crawlClassGuides(object):
def dealBlock1(self,tdays=10):
#获取分类页专题集的攻略ID
block1_guideids=[]
for offset in range(0,int(tdays)*10,10):
block1_url='http://api.liwushuo.com/v2/collections?limit=10&offset='+str(offset)
block1_content=requests.get(block1_url,timeout=3).text
block1page_bejson=json.loads(block1_content)
block1page_datacollections=block1page_bejson['data']['collections']
# print block1page_datacollections
if block1page_datacollections!=[]:
for block1page_datacollection in block1page_datacollections:
topic_id=block1page_datacollection['id']
created_at=block1page_datacollection['created_at']
DaysAgo = (datetime.datetime.now() - datetime.timedelta(days = tdays))
timeStamp = int(time.mktime(DaysAgo.timetuple()))
block1_guideids+=AnalyzeTopic(topic_id,3)
# if int(created_at)>int(timeStamp):
# block1_guideids+=AnalyzeTopic(topic_id,3)
# else:
# break
return block1_guideids
def dealBlock2(self,tdays=10):
#获取分类页分组-频道的攻略ID
# groups_infos_path=base_path+'/items/groupsinfos' #风格|品类|对象|场合
items_infos_path=base_path+'/items/itemsinfos'
items_content_path=base_path+'/items/itemscontent'
block2_url='http://api.liwushuo.com/v2/channel_groups/all'
block2_content=requests.get(block2_url,timeout=3).text
block2page_bejson=json.loads(block2_content)
block2page_data_channelgroups=block2page_bejson['data']['channel_groups']
# print block2page_data_channelgroups
block2_guide_ids=[]
for block2page_data_channelgroup in block2page_data_channelgroups:
block2page_data_channelgroup_channels=block2page_data_channelgroup['channels']
channel_groups_id=block2page_data_channelgroup['id']
# channel_group_name=block2page_data_channelgroup['name']
# channel_group_status=block2page_data_channelgroup['status']
# print channel_group_id,channel_group_name
# group_infos_content=channel_group_id+'|'+channel_group_name+'|'+channel_group_status
# fgi=codecs.open(items_infos_path,'a+','utf-8')
# fgi.write(group_infos_content+'\n')
# fgi.close()
# print block2page_data_channelgroup_channels
for block2page_data_channelgroup_channel in block2page_data_channelgroup_channels:
channel_cover_image_url=block2page_data_channelgroup_channel['cover_image_url']
channel_icon_url=block2page_data_channelgroup_channel['icon_url']
channel_id=block2page_data_channelgroup_channel['id']
channel_items_count=block2page_data_channelgroup_channel['items_count']
channel_name=block2page_data_channelgroup_channel['name']
channel_status=block2page_data_channelgroup_channel['status']
# channel_icon_path=base_path+'/items/img/item'+str(channel_id)+'_icon.jpg'
# isExists=os.path.exists(channel_icon_path)
# if not isExists:
# channel_icon= urllib2.urlopen(channel_icon_url)
# fcicon = open(channel_icon_path,'wb')
# fcicon.write(channel_icon.read())
# fcicon.close()
channel_icon_url=base_url+'items/img/item'+str(channel_id)+'_icon.jpg'
channel_infos_content=str(channel_groups_id)+'|'+str(channel_id)+'|'+str(channel_name)+'|'+str(channel_items_count)+'|'+str(channel_status)+'|'+str(channel_icon_url)
fci=codecs.open(items_infos_path,'w','utf-8')
fci.write(channel_infos_content+'\n')
fci.close()
location=0
for offset in range(0,int(tdays)*10,10):
channel_url='http://api.liwushuo.com/v2/channels/'+str(channel_id)+'/items?limit=10&offset='+str(offset)
channel_content=requests.get(channel_url,timeout=3).text
channelpage_bejson=json.loads(channel_content)
channelpage_dataitems=channelpage_bejson['data']['items']
if channelpage_dataitems !=[]:
for channelpage_dataitem in channelpage_dataitems:
block2_guideid=channelpage_dataitem['id']
block2_guide_ids+=[str(block2_guideid)]
location+=1
item_infos_content=str(channel_id)+'|'+str(block2_guideid)+'|'+str(location)
fti=codecs.open(items_content_path,'a+','utf-8')
fti.write(item_infos_content+'\n')
fti.close()
return block2_guide_ids
def dealBlock3(self):
# http://api.liwushuo.com/v2/columns/17?limit=20&offset=0
# http://api.liwushuo.com/v2/columns?limit=11&offset=0
#获取分类页分组-频道的攻略ID
# groups_infos_path=base_path+'/items/groupsinfos' #风格|品类|对象|场合
columns_infos_path=base_path+'/columns/columnsinfos'
columns_content_path=base_path+'/columns/columnscontent'
# print(columns_content_path)
columns_url='http://api.liwushuo.com/v2/columns?limit=11&offset=0'
columns_contents=requests.get(columns_url,timeout=3).text
bejson=json.loads(columns_contents)
f=open(columns_infos_path,'w+')
for column in bejson['data']['columns']:
title= column['title']
cover_image_url= column['cover_image_url']
description= column['description']
id= column['id']
subtitle= column['subtitle']
f.writelines(str(title)+'|'+str(cover_image_url)+'|'+str(description)+'|'+str(id)+'|'+str(subtitle)+'\n')
f.close()
def gainAllGuide(self):
block1_guides=self.dealBlock1()
block2_guides=self.dealBlock2()
guide_ids=list(set(block1_guides+block2_guides))
return guide_ids
def clearInfosFile():
try:
print 'clearing the content of files!'
fgiude=open(base_path+'/guides/infos','r+')
fgiude.truncate()
fgiude.close()
fguidescont=open(base_path+'/guides/guidescontent','r+')
fguidescont.truncate()
fguidescont.close()
fgoods=open(base_path+'/goods/infos','r+')
fgoods.truncate()
fgoods.close()
fgbu = open(base_path+'/goods/bannarurl','r+')
fgbu.truncate()
fgbu.close()
fti = open(base_path+'/collections/infos','r+')
fti.truncate()
fti.close()
fte = open(base_path+'/collections/collectionscontent','r+')
fte.truncate()
fte.close()
fti = open(base_path+'/items/itemsinfos','r+')
fti.truncate()
fti.close()
fte = open(base_path+'/items/itemscontent','r+')
fte.truncate()
fte.close()
fte = open(base_path+'/topics/infos','r+')
fte.truncate()
fte.close()
fte = open(base_path+'/topics/topicscontent','r+')
fte.truncate()
fte.close()
print 'cleared!'
except Exception, e:
print '初次运行,文件还未生成,无需清空'
def main():
try:
# clearInfosFile()
# selectguides=crawlSelectionGuides()
# selectguides.dealBlock3(1)
# Selectinguides=selectguides.gainAllGuide()
# # print Selectinguides
classguides=crawlClassGuides()
classguides.dealBlock2(1)
# classguides.dealBlock2()
# classguides.dealBlock3()
# Classguides=classguides.gainAllGuide()
# # print Classguides
# crawlPopularGoodss(1)
# AnalyzeTopic('311',3)
# AnalyzeGoods('1002919')
# release_cdn(base_url+'goods/html/1002918.html',1)
# AnalyzeGuides('1044070')
# creatGuidesHtml(['17'])
# release_cdn(base_url+'guides/html',0)
except Exception,e :
print e
raise e
if __name__ == '__main__':
main()
|
'''
Author: Aditi Nair (asn264)
Date: November 3rd 2015
'''
import sys
import math
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
class HealthDataAnalyzer(object):
'''Each instance of this class represents a tool for analyzing the NYC DoH data. It relies on the data-cleaning and grade-evaluation functions as static
methods because their behavior is not dependent on the state of the HealthDataAnalyzer object, but is obviously associated with it.
Each instance of the class is associated with two dataframes, described below, and functions that compute various statistics or create graphs with respect to them.'''
def __init__(self):
#This is just a cleaned version of the provided csv.
self.health_grades = self.clean_health_data()
#Here we sort the data by date, then grouping by BORO and camis_id, take a list of the grades.
#For each list of grades (corresponding to a unique boro-camis_id pair), apply the function test_grades and store the result as a series.
self.progress_evaluation_by_camis = self.health_grades.sort('GRADE DATE').groupby(['BORO', 'CAMIS'])['GRADE'].apply(lambda x: x.tolist()).apply(self.test_grades)
@staticmethod
def clean_health_data():
'''This function cleans and loads the data into a data frame. It removes any rows where the GRADE is not A, B, or C and also
removes any rows where BORO is Missing. It also converts the GRADE DATE column to be of datetime type which is useful later. Because
Git does not allow you to upload files beyond a certain size, you may run into an IOError here.
(According to NYC DOH the only valid grades are A, B, or C: http://www.nyc.gov/html/doh/downloads/pdf/rii/how-we-score-grade.pdf)
'''
try:
#Load the data, dropping rows where there are NaN values. Setting low_memory to false to
health_grades = pd.read_csv('DOHMH_New_York_City_Restaurant_Inspection_Results.csv', low_memory=False).dropna()
#The only valid health grades are A, B, C. Throw out any other values.
health_grades = health_grades[health_grades['GRADE'].isin(['A', 'B', 'C'])]
#Currently the dates in column GRADE DATE are of type 'object'. Convert to type datetime.
health_grades['GRADE DATE'] = pd.to_datetime(health_grades['GRADE DATE'])
#Drop the columns where the Borough is 'Missing'
health_grades = health_grades[health_grades['BORO'] != 'Missing']
return health_grades
except IOError:
sys.exit("Please download the file 'DOHMH_New_York_City_Restaurant_Inspection_Results.csv' into this directory and try again.")
@staticmethod
def test_grades(grades):
'''This function accepts a list of grades and returns 1 if they are improving, 0 if they remain the same and -1 if they are getting worse.
It converts grades to integer values, evaluates differences between consecutive grades, and then takes the weighted average of
differences, giving greater weight to more recent changes. Please consult README.txt for a full explanation of the methodology.
Assumes grades is a chronologically sorted list.'''
#If there is only one grade, then the grades are neither improving or declining
if len(grades) == 1:
return 0
else:
grade_to_int = {'A':3, 'B':2, 'C':1}
#Transform the list of letter grades to a list of integer values
ints = [grade_to_int.get(i) for i in grades]
#The difference between each consecutive grade indicates whether the grade is increasing, decreasing, or staying the same between inspections
differences = np.diff(ints)
#Now take the weighted average of the differences
length = len(differences)
avg = sum([(i+1)*differences[i] for i in range(length)])/float((length*(length+1))/2)
if avg > 0:
return 1
elif avg < 0:
return -1
else:
return 0
def test_restaurant_grades(self, camis_id):
'''Returns the value of test_grades for a list of chronological grades for a single CAMIS id by
looking at the series self.progress_evaluation_by_camis and returning the row corresponding to the right CAMIS id'''
return self.progress_evaluation_by_camis.loc[self.progress_evaluation_by_camis.index.get_level_values('CAMIS')==camis_id][0]
def sum_test_results_by_boro(self):
'''Computes the sum of the test results in each borough by taking the sum of self.progress_evaluation_by_camis grouping
by the index at level='BORO'''
return self.progress_evaluation_by_camis.sum(level='BORO')
def print_test_results(self):
'''Prints the results of sum_test_results_by_boro and then prints a sum of the whole series boro_sums to compute the
sum of results for the whole city.'''
boro_sums = self.sum_test_results_by_boro()
for boro, test_sum in boro_sums.iteritems():
print "The sum of the scores of all restaurants in", boro, ":", test_sum
print "The sum of the scores of all the restaurants in NYC:", boro_sums.sum()
def graph_grade_improvement_nyc(self):
'''Create a graph that plots the number of restaurants having grades A, B, and C over time, and saves it to PDF.
Plots the number of restaurants on the y-axis and the dates on the x-axis, with a different line for each grade.'''
count_by_date = self.health_grades.groupby(['GRADE DATE', 'GRADE']).size().unstack(level=1).sort()
count_by_date.plot(kind='line')
plt.title("Grade Distribution NYC")
plt.ylabel("Number of Restaurants")
plt.savefig("grade_improvement_nyc.pdf")
def graph_grade_improvement_by_boro(self):
'''For each borough, create a graph that plots the number of restaurants having grades A, B, or C over time, and saves it to PDF.
Plots the number of restaurants on the y-axis and the dates on the x-axis, with a different line for each grade.'''
count_by_date_by_boro = self.health_grades.groupby(['BORO', 'GRADE DATE', 'GRADE']).size().unstack(level=2).sort()
for boro in count_by_date_by_boro.index.get_level_values('BORO').unique():
count_by_date_by_boro.loc[boro].plot(kind='line')
plt.title("Grade Distribution " + str(boro))
plt.ylabel("Number of Restaurants")
plt.savefig("grade_improvement_"+str(boro)+".pdf")
|
from django.db import models
from dataprocessing.models import Items
|
from flask import Flask, render_template,redirect,url_for,request
import twitterscraper
from twitterscraper import query_tweets
import datetime as dt
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import os
app = Flask(__name__)
@app.route('/')
def student():
return render_template('student.html')
@app.route('/result',methods=['POST','GET'])
def result():
if request.method =='POST':
app_name = request.form['Name']
def get_tweet(app):
return query_tweets(app, limit=None,
begindate=dt.date.today()-dt.timedelta(days=1),
enddate=dt.date.today(),
poolsize=20,
lang='en'
)
tweet = get_tweet(app=app_name)
length = len(tweet)
likes= 0
reply= 0
retweets= 0
text=[]
if tweet:
length = len(tweet)
likes= 0
reply= 0
retweets= 0
text = []
for line in tweet:
likes+=int(line.likes)
reply+=int(line.replies)
retweets+=int(line.retweets)
if line.text:
text.append(line.text)
analyzer = SentimentIntensityAnalyzer()
pos=[]
for i in text:
output = analyzer.polarity_scores(i)
pos.append(output['pos'])
pos_value = [a for a in pos if a>0]
dic = {'tweets':length,'likes':likes,'reply':reply,'retweets':retweets,'positive sentiment':len(pos_value)}
return render_template('index.html',result = dic)
if __name__ == '__main__':
port = int(os.environ.get('PORT',5000))
app.run(host='0.0.0.0',port=port)
|
from django.contrib import admin
from .models import FullSizeNPC, HeaderImage, Dialogue
@admin.register(FullSizeNPC, HeaderImage, Dialogue)
class MainAdmin(admin.ModelAdmin):
pass
|
from django.db import models
from django.core.validators import FileExtensionValidator
# Create your models here.
class UploadData(models.Model):
"""
Model to store uploaded data to a location MEDIA_ROOT/uploads/ folder
"""
upload = models.FileField(upload_to='')
|
#!/bin/env python
"""
ng -- gets the nearest galaxies
USAGE:
./ng.py [ra]
"""
import csv
import os
import sqlite3
import numpy, math
import geohash2
from math import sin, cos, radians, sqrt, atan2, degrees
import copy
__author__ = "Josh Bloom"
__version__ = "10 Nov 2008"
if os.environ.has_key("TCP_DIR"):
DATADIR = os.environ.get("TCP_DIR") + "Data/"
else:
DATADIR = ""
def_min_depth = 11
class GalGetter:
dbname = "gal.db"
rez = []
query = ""
index_type="index"
def __init__(self,inname=DATADIR + "200MpcGalaxyCatalog_v2.dat",dbname=DATADIR + "gal_v2.db",max_rows=-1,make_db_on_instance=True,verbose=False):
self.inname = inname
self.dbname = dbname
self.verbose = verbose
if make_db_on_instance:
self.make_db(max_rows=max_rows)
# NOTE: 20090227: dstarr changes some default values so this works with normal feature extractor:
# def make_db(self,in_memory=False,clobber=False,skip_headers=[''],\
def make_db(self,in_memory=False,clobber=False,skip_headers=[''],\
intnames=['pgc'],default_hash_depth=18,max_rows=-1):
if not os.path.exists(self.inname):
if self.verbose:
print "inname = %s does not exist" % self.inname
return None
r = csv.reader(open(self.inname), delimiter="|")
## figure out the headers
tmp = numpy.array([s.strip() for s in r.next()])
use_indices = []
for i in range(len(tmp)):
if not (tmp[i] in skip_headers):
use_indices.append(i)
tmp = list(tmp.take(use_indices))
self.headers = copy.copy(tmp)
self.headers.append('ghash')
ra_ind = tmp.index('al2000')
dec_ind = tmp.index('de2000')
d_ind = tmp.index('logd25')
if clobber:
if not in_memory:
if os.path.exists(self.dbname):
os.remove(self.dbname)
else:
if os.path.exists(self.dbname):
return self.dbname
if in_memory:
self.dbname = ":memory:"
# Make the DB
conn = sqlite3.connect(self.dbname)
c = conn.cursor()
tmp1 = []
for s in tmp:
if s in intnames:
tmp1.append(s + " int")
else:
tmp1.append(s + " real")
tmp1.append("ghash text")
tmp1 = ",".join(tmp1)
c.execute('''create table galaxies (%s)''' % tmp1)
#c.execute('select * from galaxies')
#print "*"
#for r in c:
# print r
#print "*"
i=0
min_depth = 35
if self.index_type == "hash":
g = geohash2.Geohash
else:
g = geohash2.Geoindex
for l in r:
if i > max_rows and max_rows > 1:
break
try:
tmp = numpy.array([s.strip() for s in l])
tmp = list(tmp.take(use_indices))
tmp[ra_ind] = str(float(tmp[ra_ind])*15.0)
hashpos = (float(tmp[ra_ind]),float(tmp[dec_ind]))
if float(tmp[d_ind]) > 0:
hashpos_depth = int(math.floor(18.25707 - \
3.333333*math.log10(60.0*10**(float(tmp[d_ind])*0.1))))
else:
hashpos_depth = default_hash_depth
if hashpos_depth < min_depth:
min_depth = hashpos_depth
#hashpos_depth = 18
#print hashpos, hashpos_depth
tmp.append("'%s'" % str(g(hashpos,depth=hashpos_depth)))
tmp = ",".join(tmp)
c.execute("""insert into galaxies values (%s)""" % tmp)
i+=1
except:
if self.verbose:
print "row %i" % i
i+=1
continue
conn.commit()
conn.close()
if self.verbose:
print "min_depth ", min_depth
return self.dbname
def getgi(self,pos=(None,None),error=1.0,min_depth=def_min_depth):
mult = 3600.0
depth =int(math.floor(18.25707 - 3.333333*math.log10(mult*error)))
if depth > min_depth:
depth = min_depth
#depth=18
return geohash2.Geoindex(pos,depth=depth)
def getgh(self,pos=(None,None),error=1.0,min_depth=def_min_depth):
mult = 3600.0
depth =int(math.floor(18.25707 - 3.333333*math.log10(mult*error)))
if depth > min_depth:
depth = min_depth
#depth=18
return geohash2.Geohash(pos,depth=depth)
def getgals(self,pos=(49.362750 , 41.405417),radius=5,min_depth=def_min_depth,sort_by='dist',max_d=500.0):
if self.index_type == "hash":
g = self.getgh
else:
g = self.getgi
## radius in degrees
## max d in Mpc
dm_max = 5.0*math.log10(max_d*1e5)
gh= g(pos=pos,error=radius*3,min_depth=min_depth)
#gh1 = self.getgh(pos=(pos[0],pos[1]+radius),error=radius,min_depth=min_depth)
#gh2 = self.getgh(pos=(pos[0],pos[1]-radius),error=radius,min_depth=min_depth)
#gh3 = self.getgh(pos=(pos[0]-radius*cos(radians(pos[1])),pos[1]),error=radius,min_depth=min_depth)
#gh4 = self.getgh(pos=(pos[0]+radius*cos(radians(pos[1])),pos[1]),error=radius,min_depth=min_depth)
#print gh.bbox()
#print gh.point()
#print gh1.bbox(), (pos[0],pos[1]+radius), str(gh1)
#print gh2.bbox(), (pos[0],pos[1]-radius), str(gh2)
#print gh3.bbox(), (pos[0]-radius*cos(radians(pos[1])),pos[1]), str(gh3)
#print gh4.bbox(), (pos[0]+radius*cos(radians(pos[1])),pos[1]), str(gh4)
conn = sqlite3.connect(self.dbname)
c = conn.cursor()
self.query = 'pos = %s, radius = %f, sort_by=%s max_d=%f \nselect * from galaxies where galaxies.ghash glob %s and galaxies.mucin < %s' % \
(repr(pos), radius, sort_by, max_d, str(gh)[:-2] + "*",dm_max)
c.execute('select * from galaxies where galaxies.ghash glob ? and galaxies.mucin < ?', (str(gh)[:-2] + "*",dm_max))
tmp = c.fetchall()
d_ind = self.headers.index('logd25')
r_ind = self.headers.index('logr25')
pa_ind = self.headers.index('pa')
mucin = self.headers.index('mucin')
muc = self.headers.index('mup')
#semim = 60 * 10.**(r1[0][d_ind])*0.1
#semimin = semim / 10**r1[0][r_ind]
#pa = r1[0][pa_ind]
tmp1 = []
for r in tmp:
d = self.distance(pos[0],pos[1],r[1],r[2])
if d < radius:
dl = self.distlight(pos[0],pos[1],r[1],r[2],60 * 10.**(r[d_ind])*0.1,60 * 10.**(r[d_ind])*0.1/(10**r[r_ind]),r[pa_ind])
if r[muc] > 0:
off = 1e3*radians(d)*1e-5*10**(r[muc]/5.0)
else:
off = 1e3*radians(d)*1e-5*10**(r[mucin]/5.0)
#print off, d, r[mucin]
tmp1.append( (r,d,dl,off))
tmp = tmp1
#print len(tmp)
if sort_by == 'dist':
tmp.sort(key=lambda x: x[1])
elif sort_by == 'dm':
tmp.sort(key=lambda x: x[0][self.headers.index('mucin')])
elif sort_by == 'mag':
## get the distance modulus and the b-band mag corrected
tmp.sort(key=lambda x: x[0][self.headers.index('btc')] - x[0][self.headers.index('mucin')])
elif sort_by == 'light':
## distance in light units
tmp.sort(key=lambda x: x[2][0])
elif sort_by == 'phys':
tmp.sort(key=lambda x: x[3])
self.rez = tmp
conn.close()
def grab_rez(self,retkey="light",prefix="closest_in_"):
try:
r1=self.rez[0]
if retkey == 'light':
val= r1[2][0]
sb = "light"
units = "galaxy_surface_brightness"
alt_dict = {prefix + sb + "_physical_offset_in_kpc": r1[3], prefix + sb + "_angular_offset_in_arcmin": r1[1]*60}
elif retkey == 'phys':
val= r1[3]
sb = "physical_offset_in_kpc"
units = "kpc"
alt_dict = {prefix + sb + "_light": r1[2][0], prefix + sb + "_angular_offset_in_arcmin": r1[1]*60}
elif retkey == "dist":
val= r1[1]*60
sb = "angular_offset_in_arcmin"
units = "arcmin"
alt_dict = {prefix + sb + "_light": r1[2][0], prefix + sb + "_physical_offset_in_kpc": r1[3]}
else:
return {}
d_ind = self.headers.index('logd25')
r_ind = self.headers.index('logr25')
pa_ind = self.headers.index('pa')
mucin = self.headers.index('mucin')
muc = self.headers.index('mup')
t = self.headers.index('t')
te = self.headers.index('e_t')
b = self.headers.index('btc')
ra = self.headers.index('al2000')
dec = self.headers.index('de2000')
smj,sminor,pa = (60 * 10.**(r1[0][d_ind])*0.1, 60 * 10.**(r1[0][d_ind])*0.1/(10**r1[0][r_ind]), r1[0][pa_ind])
dm = r1[0][muc] if r1[0][muc] > 0 else r1[0][mucin]
if smj == 6.0e-99:
smj = None
sminor = None
## look at the t-type
ttype = r1[0][t] if r1[0][te] < 3 and r1[0][t] != -99.0 else None
## look at the absolute mag of the closest galaxy
absb = r1[0][b] - dm if r1[0][b] > 5.0 else None
## angle from major
angle_major = r1[2][1] or None
## position (for internal purposes if we want it)
ra, dec = r1[0][ra], r1[0][dec]
except:
return {}
alt_dict.update({prefix + sb: val, prefix + sb + "_units": units, prefix + sb + "_semimajor_r25_arcsec": smj, \
prefix + sb + "_semiminor_r25_arcsec": sminor, prefix + sb + "_dm": dm, \
prefix + sb + "_angle_from_major_axis": angle_major, prefix + sb + "_ttype": ttype, \
prefix + sb + "_absolute_bmag": absb, prefix + sb + "_galaxy_position": (ra, dec)})
return copy.copy(alt_dict)
def __str__(self):
a = "%s\n%s\n%s\n" % ("*"*50, self.query,"*"*50)
a += "dist(') offset(kpc) dist(light) angle_from_major"
for h in self.headers:
a += "%9s" % h
a += "\n"
for r1 in self.rez:
r = r1[0]
#print r1[2]
a += "%7.4f %7.4f %7.4f %7.1f " % (r1[1]*60, r1[3], r1[2][0], r1[2][1] or -999)
a += " ".join([str(x) for x in r]) + " \n"
# a += "%f %f %f %f %s\n" % (r[1], r[2], r[7], r1[1], r[-1])
return a
def writeds9(self,fname='ds9.reg'):
ra_ind = self.headers.index('al2000')
dec_ind = self.headers.index('de2000')
d_ind = self.headers.index('logd25')
r_ind = self.headers.index('logr25')
pa_ind = self.headers.index('pa')
pgc_ind = self.headers.index('pgc')
mucin = self.headers.index('mucin')
muc = self.headers.index('mup')
f = open(fname,'w')
f.write("# Region file\n")
f.write('global color=green font="helvetica 10 normal" select=1 highlite=1 edit=1 move=1 delete=1 include=1 fixed=0 source\nfk5\n')
mind = 100
maxd = 0
for r1 in self.rez:
if r1[0][muc] != -99:
m = r1[0][muc]
else:
m = r1[0][mucin]
if m < mind: mind = m
if m > maxd: maxd = m
# max sure that maxd and mind aren't the same
if mind == maxd:
mind /= 1.02
for r1 in self.rez:
d = r1[0]
semim = 60 * 10.**(r1[0][d_ind])*0.1
semimin = semim / 10**r1[0][r_ind]
pa = r1[0][pa_ind]
if r1[0][muc] != -99:
m = r1[0][muc]
else:
m = r1[0][mucin]
#print r1, m, mind, maxd
cc = str(hex(int(255.0 - 255.0*(m - mind)/(maxd - mind)))).split("x")[1]
if len(cc) == 1: cc = "0" + cc
col = '"#' + cc*2 + '44"'
width = int(5 - 4.0*(m - mind)/(maxd - mind))
d= 1e-5*10**(m/5.0)
f.write('ellipse(%f,%f,%f",%f",%f) # text={pgc=%i, d=%4.1f Mpc} color=%s width=%i\n' % (r1[0][ra_ind],r1[0][dec_ind],\
semim,semimin,pa - 90.0,r1[0][pgc_ind],d,col,width))
f.close()
def distance(self,lon0, lat0, lon, lat):
"""
Calculates the distance between two points (decimal)
"""
d_lat = radians(lat0 - lat)
d_lon = radians(lon0 - lon)
x = sin(d_lat/2) ** 2 + \
cos(radians(lat0)) * cos(radians(lat)) *\
sin(d_lon/2) ** 2
y = 2 * atan2(sqrt(x), sqrt(1.0 - x))
distance = y*180.0/math.pi
return distance
def distlight(self,lon, lat, lon0, lat0,semimajor,semiminor,pa,assumed_size_if_none=15.0):
"""assumed size = 15.0 arcsec"""
d = self.distance(lon0,lat0,lon,lat)
if pa == -99.0 and semimajor == 6e-99:
if self.verbose:
print "bad pa or size: returning %f %f" % (d/(assumed_size_if_none/3600.0),d)
return (d/(assumed_size_if_none/3600.0), None)
## get the angle from the center of this galaxy to the source
dra = self.distance(lon0,lat0,lon,lat0)
ddec = self.distance(lon0,lat0,lon0,lat)
if lat < lat0:
ddec *= -1
if lon < lon0:
dra *= -1
#if ((lon - lon0) > -180.0) and ((lon - lon0) < 180.0):
# dra *= -1
## this is the angle between the center of the galaxy and the source (east of North)
a = atan2(dra,ddec)
## relative to the semi-major axis the angle is
t = a - radians(pa)
## here's the r25 along this direction
r = numpy.sqrt((semimajor**2)*(semiminor**2)/( (semimajor*numpy.sin(t))**2 + (semiminor*numpy.cos(t))**2))
#print r, d, d*3600.0/r, lon0, lat0, lon, lat
#print "*"*50
return (d*3600.0/r, degrees(t))
def test():
global ddd
ddd = GalGetter(max_rows=-1)
ddd.getgals()
print ddd
ddd.writeds9()
def test1():
conn = sqlite3.connect(ddd.dbname)
c = conn.cursor()
c.execute('select * from galaxies')
for r in c: print r
def get_closest_by_light(pos=(None,None),max_d=300.0,radius=1.0):
"""to be called by the extrators"""
ddd = GalGetter(verbose=False)
ddd.getgals(pos=pos,radius=1.0,sort_by="light",max_d=max_d)
return ddd.grab_rez("light")
def get_closest_by_physical_offset(pos=(None,None),max_d=300.0,radius=1.0):
"""to be called by the extrators in kpc"""
ddd = GalGetter(verbose=False)
ddd.getgals(pos=pos,radius=1.0,sort_by="phys",max_d=max_d)
return ddd.grab_rez("phys")
def get_closest_by_angular_offset(pos=(None,None),max_d=300.0,radius=1.0):
"""to be called by the extrators in arcmin"""
ddd = GalGetter(verbose=False)
ddd.getgals(pos=pos,radius=1.0,sort_by="dist",max_d=max_d)
return ddd.grab_rez("dist")
if __name__ == "__main__":
from optparse import OptionParser
usage = "usage: %prog [options] -p ra dec\n"
parser = OptionParser(usage)
parser.add_option("--ds9name", dest="ds9name", \
help="Name of the output ds9 region file",\
default="ds9.reg")
parser.add_option("--nds9",dest="no_ds9", action="store_true",\
help="dont write the ds9 file",default=False)
parser.add_option("--radius",dest="radius",\
help="Search radius in arcmin",type="float",default=60.0)
parser.add_option("--maxd",dest="max_d",\
help="Maximum distance to search (in Mpc); default = 500 Mpc",type="float",default=500.0)
parser.add_option("-v","--verbose",dest="verbose",action="store_true",\
help="Be verbose",default=False)
parser.add_option("--ntop",dest="ntop",action="store_false",\
help="Dont print the top result",default=False)
parser.add_option("--sortkey",dest="sortkey",choices = ['dist', 'dm', 'mag', 'light','phys'], \
help="Sort key: (dist; default) angular distance from source, " + \
"(dm) proximity to Earth in Mpc, (mag) galaxy absolute mag, (light) light units, (phys) offset in kpc",default="dist")
parser.add_option("-p", type="float", nargs=2, dest="pos")
(options, args) = parser.parse_args()
# if len(args) != 2:
# print "You must supply RA and DEC in decimal degrees"
# print usage
# parser.parse_args(['-h'])
if not options.pos:
parser.parse_args(['-h'])
ddd = GalGetter(verbose=options.verbose)
ddd.getgals(pos=options.pos,radius=float(options.radius)/60.,sort_by=options.sortkey,max_d=options.max_d)
if not options.no_ds9:
ddd.writeds9(fname=options.ds9name)
if not options.ntop:
print ddd
|
import os
n = 0
for root, dirs, files in os.walk('./'):
for name in files:
if(name.endswith(".png")):
n += 1
print(n)
os.remove(os.path.join(root, name))
|
s1,s2,s3=map(str,input().split())
a=int(s3)
count=0
for x in range(0,len(s1)):
if(s1[x]!=s2[x]):
count=count+1
if(count==a):
print("yes")
else:
print("no")
|
import logging
import numpy as np
class EnsembledModel(object):
"""Ensemble of multiple models."""
def __init__(self):
self._models = []
"""`list` of `model.Model`: List of models to be ensembled."""
def add_model(self, model):
"""Adds model to the list of models to be ensembled.
Args:
model (`model.Model`): Model to be added.
"""
self._models.append(model)
def test_data(self):
"""Returns the test data loaded in the models constituting the ensemble
Returns:
`numpy.ndarray`, `numpy.ndarray`, `numpy.ndarray`: The test inputs,
their true labels, and their sequence lengths.
"""
try:
return self._models[0].x_test, self._models[0].y_test, \
self._models[0].seq_lens_test
except IndexError:
return None, None, None
def predict(self, input, k=1):
"""Calculates top-`k` predictions for the supplied `input`.
The top-`k` predictions are obtained by ensembling the predictions of
all the models. Simple averaging of softmax-predictions is used for
ensembling.
`k` = 0 returns a sorted list of all predictions.
Args:
input (str): Description of recipe.
k (int, optional): Number of top predictions to be returned.
Defaults to 1.
Returns:
`list` of (`str`,`float`): Sorted list of top-`k` predictions for
the given description, along with the assigned probabilities.
Predictions are in the form of labels represented by strings
(such as "new_photo_post" for a Trigger Function).
"""
prediction = self._averaged_predictions(np.array([input]),
preprocess=True).reshape((-1,))
logging.debug("Averaged prediction %s", prediction)
ids = np.argpartition(prediction, -k)[-k:]
# Ids of top-k labels.
top_k_indices = ids[np.argsort(prediction[ids])][::-1]
logging.debug("Top k=%s labels' ids %s", k, top_k_indices)
# Convert label-ids to readable label-strings using
# `Model.label_reverse_map`.
labels_reverse_map = self._models[0].labels_reverse_map
top_k_predictions = []
for idx in top_k_indices:
tup = (labels_reverse_map[idx], prediction[idx])
top_k_predictions.append(tup)
return top_k_predictions
def evaluate(self):
"""Evaluates the ensemble of models on the test set.
The test set used is the one loaded in the first model. It is assumed
that all models are loaded with the same subset of the full test set.
"""
logging.debug("Starting evaluation of ensembled model on test data.")
inputs = self._models[0].x_test
labels = self._models[0].y_test
seq_lens = self._models[0].seq_lens_test
mistakes = self.prediction_mistakes(inputs, labels, seq_lens)
error = np.mean(mistakes)
logging.info("Test Error = %s", error)
def prediction_mistakes(self, inputs, labels, seq_lens):
"""Computes averaged predictions of the models and returns identifies
the instances where the model makes a mistake.
Args:
inputs (`numpy.ndarray`): List of input descriptions in tokenized
form, i.e., as a 2D numpy array of tokens
labels (`numpy.ndarray`): True labels in the form of a 2D numpy
array. Each row is a one-hot vector for the label.
seq_lens (`list` of `int`, optional): The list of lengths of
descriptions as returned by
`dataset.Dataset.description_lengths_before_padding`.
Returns:
numpy.ndarray: An array containing `True` and `False`, `True`
representing correct prediction by the model for that input.
"""
averaged_predictions = self._averaged_predictions(inputs, seq_lens,
preprocess=False)
# Calculate classification error.
p = np.argmax(averaged_predictions, axis=1)
l = np.argmax(labels, axis=1)
mistakes = np.not_equal(p, l)
return mistakes
def prediction_confidences(self, inputs, labels, seq_lens):
"""Computes averaged predictions of the models and returns confidences
of top prediction for each input in `inputs`
Args:
inputs (`numpy.ndarray`): List of input descriptions in tokenized
form, i.e., as a 2D numpy array of tokens
labels (`numpy.ndarray`): True labels in the form of a 2D numpy
array. Each row is a one-hot vector for the label.
seq_lens (`list` of `int`, optional): The list of lengths of
descriptions as returned by
`dataset.Dataset.description_lengths_before_padding`.
Returns:
numpy.ndarray: An array containing confidence of top prediction for
each input in `inputs`.
"""
averaged_predictions = self._averaged_predictions(inputs, seq_lens,
preprocess=False)
return np.max(averaged_predictions, axis=1)
def _averaged_predictions(self, inputs, seq_lens=None, preprocess=True):
"""Computes average of softmax-prediction output of all the models.
Each model is supplied with provided `inputs`, optionally we a request
to pre-process the `inputs`. The models softmax output is then used.
Args:
inputs (`list` of `str` or `numpy.ndarray`): List of input
descriptions. The descriptions can either be tokenized -- as a
2D numpy array of tokens -- in which case `preprocess` should be
`False` or raw strings of texts -- `list` of `str` -- in which
case `preprocess` should be `True`.
seq_lens (`list` of `int`, optional): The list of lengths of
descriptions as returned by
`dataset.Dataset.description_lengths_before_padding`. This is
required if `preprocess` is `False`. Defaults to `None`, which
works with `preprocess` set to True.
preprocess (bool, optional): Set to `True` if the `inputs` needs to
be pre-processed. Defaults to `True`.
Returns:
numpy.ndarray: Mean of softmax outputs of all the models of shape
(num_inputs, num_classes)
"""
predictions = []
for model in self._models:
preds = model.predictions(inputs, seq_lens, preprocess)
predictions.append(preds)
averaged_predictions = np.mean(predictions, axis=0)
return averaged_predictions
|
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from Database import Database
class MovieDialog(Gtk.Dialog):
def __init__(self, parent, action):
Gtk.Dialog.__init__(self, action + " a Movie", parent, Gtk.DialogFlags.MODAL, use_header_bar = True)
self.db = Database(Database.location)
self.type = action.lower()
self.area = self.get_content_area() # area is a Gtk.Box
self.area.get_style_context().add_class("linked")
self.area.set_orientation(Gtk.Orientation.HORIZONTAL)
self.entry = Gtk.Entry(text = "Enter the name of a movie to " + action.lower())
self.entry.grab_focus()
self.area.pack_start(self.entry, True, False, 0)
self.enterButton = Gtk.Button(label = "Enter")
self.enterButton.connect("clicked", self.enterButton_cb)
self.area.pack_end(self.enterButton, True, False, 0)
self.show_all()
# if the action is deleting, create an autocompletion tree
def enterButton_cb(self, enterButton):
if (self.type == 'add') and (self.entry.get_text() != 'Enter the name of a movie to add'):
print('Movie to add:', self.entry.get_text())
try:
self.db.newMovie(self.entry.get_text())
print(self.entry.get_text(), 'added as', self.db.movies[-1].title)
except:
print('Error adding', self.entry.get_text())
self.destroy()
|
# KeypointCapture.py
# For storing captured keypoints from OpenPose
# Primarly taken from the work of Damon Gwinn, Clarkson University
import numpy as np
import glob
import json
import copy
import pdb
# Globals that define the order of the keypoints
ORDERED_KEYPOINTS_BODY = [
"Nose",
"Neck",
"RShoulder",
"RElbow",
"RWrist",
"LShoulder",
"LElbow",
"LWrist",
"MidHip",
"RHip",
"RKnee",
"RAnkle",
"LHip",
"LKnee",
"LAnkle",
"REye",
"LEye",
"REar",
"LEar",
"LBigToe",
"LSmallToe",
"LHeel",
"RBigToe",
"RSmallToe",
"RHeel"
]
ORDERED_KEYPOINTS_HAND =[
"Wrist",
"Thumb1",
"Thumb2",
"Thumb3",
"Thumb4",
"Index1",
"Index2",
"Index3",
"Index4",
"Middle1",
"Middle2",
"Middle3",
"Middle4",
"Ring1",
"Ring2",
"Ring3",
"Ring4",
"Pinky1",
"Pinky2",
"Pinky3",
"Pinky4"
]
#POINT_TUPLE = collections.namedtuple('Keypoint', 'x y conf')
# Object to store the keypoints for a single capture
# All keypoint variables are lists of dictionaries with the keys as ORDERED_KEYPOINTS_*
# and the values as [x, y, conf]
class KeypointCapture:
def __init__(self):
self.capture_name = ""
self.capture_id = ""
self.right_hand_keypoints = []
self.left_hand_keypoints = []
self.body_keypoints = []
self.num_frames = 0
def GetKeypointOrdering(self):
return copy.copy(ORDERED_KEYPOINTS_BODY) + ["Left_" + x for x in ORDERED_KEYPOINTS_HAND] + ["Right_" + x for x in ORDERED_KEYPOINTS_HAND]
def GetAllKeypointsList(self):
"""
returns all of the keypoints as a concatenated list
"""
all_keypoints = copy.copy(self.body_keypoints) + copy.copy(self.left_hand_keypoints) + copy.copy(self.right_hand_keypoints) # this is the ordered lists, with
def GetFrameKeypointsAsOneDict(self, frame):
"""
Return the keypoints as a dict indexed by the joint name (prefaced by Right or Left for hands) with the value as the current list
This should be used for copying only
args
----------
frame : int
The index in the capture you wish to get a copy of, not error checked
"""
output_dict = {}
for keypoint in self.body_keypoints[frame].keys():
output_dict[keypoint] = self.body_keypoints[frame][keypoint]
for keypoint in self.left_hand_keypoints[frame].keys():
output_dict["Left_{}".format(keypoint)] = self.left_hand_keypoints[frame][keypoint]
for keypoint in self.right_hand_keypoints[frame].keys():
output_dict["Right_{}".format(keypoint)] = self.right_hand_keypoints[frame][keypoint]
return output_dict
# Gets a deep of the current KeypointCapture instance
def DeepCopy(self):
k_copy = KeypointCapture()
k_copy.capture_name = copy.copy(self.capture_name)
k_copy.capture_id = copy.copy(self.capture_id)
k_copy.num_frames = self.num_frames
k_copy.body_keypoints = [None] * self.num_frames
k_copy.right_hand_keypoints = [None] * self.num_frames
k_copy.left_hand_keypoints = [None] * self.num_frames
for i in range(self.num_frames):
body_keypoints = {}
for keypoint in self.body_keypoints[i].keys():
body_keypoints[keypoint] = copy.copy(self.body_keypoints[i][keypoint])
right_keypoints = {}
for keypoint in self.left_hand_keypoints[i].keys():
right_keypoints[keypoint] = copy.copy(self.left_hand_keypoints[i][keypoint])
left_keypoints = {}
for keypoint in self.right_hand_keypoints[i].keys():
left_keypoints[keypoint] = copy.copy(self.right_hand_keypoints[i][keypoint])
k_copy.body_keypoints[i] = body_keypoints
k_copy.right_hand_keypoints[i] = right_keypoints
k_copy.left_hand_keypoints[i] = left_keypoints
return k_copy
#def GetKeypointsList(self, frame):
# keypoint_dict = self.GetFrameKeypointsDict(frame)
# for key in sorted(keypoint_dict.keys())
# Parses an entire folder of 2D json keypoint frames
def Read2DJsonPath(jsonFolder, captureName, captureId):
if jsonFolder[-1] != "/":
jsonFolder += "/"
files = sorted(glob.glob("{}*".format(jsonFolder)))
frames = list()
for file_ in files:
with open(file_, 'r') as json_file:
json_data = json.loads(json_file.read())
frames.append(json_data)
keypoint_capture = Parse2DJsonFrames(frames, captureName, captureId)
return keypoint_capture
# Parses a 2D Json collection of frames into a KeypointCapture
def Parse2DJsonFrames(jsonFrames, captureName, captureId):
# Initializing keypoint object for performance purposes
keypoint_capture = KeypointCapture()
keypoint_capture.right_hand_keypoints = [None] * len(jsonFrames)
keypoint_capture.left_hand_keypoints = [None] * len(jsonFrames)
keypoint_capture.body_keypoints = [None] * len(jsonFrames)
for i in range(len(jsonFrames)):
json_frame = jsonFrames[i]
# Grabbing our target 2D keypoints
# TODO: Generalize to any number of people in scene
# body_keypoints = [d["pose_keypoints_2d"] for d in json_frame["people"]]
# left_hand_keypoints = [d["hand_left_keypoints_2d"] for d in json_frame["people"]]
# right_hand_keypoints = [d["hand_right_keypoints_2d"] for d in json_frame["people"]]
person = json_frame["people"][0]
body_keypoints = person["pose_keypoints_2d"]
left_hand_keypoints = person["hand_left_keypoints_2d"]
right_hand_keypoints = person["hand_right_keypoints_2d"]
# Now adding to keypoint object
set_of_body = [None] * len(ORDERED_KEYPOINTS_BODY)
set_of_lefthand = [None] * len(ORDERED_KEYPOINTS_HAND)
set_of_righthand = [None] * len(ORDERED_KEYPOINTS_HAND)
# Adding body points
body_dict = {}
for j in range(len(body_keypoints) // 3):
x_body = body_keypoints[3*j]
y_body = body_keypoints[3*j+1]
conf_body = body_keypoints[3*j+2]
body_dict[ORDERED_KEYPOINTS_BODY[j]] = [x_body, y_body, conf_body]
# Adding hand points
left_hand_dict = {}
right_hand_dict = {}
for j in range(len(ORDERED_KEYPOINTS_HAND)):
x_left = left_hand_keypoints[3*j]
y_left = left_hand_keypoints[3*j+1]
conf_left = left_hand_keypoints[3*j+2]
left_hand_dict[ORDERED_KEYPOINTS_HAND[j]] = [x_left, y_left, conf_left]
x_right = right_hand_keypoints[3*j]
y_right = right_hand_keypoints[3*j+1]
conf_right = right_hand_keypoints[3*j+2]
right_hand_dict[ORDERED_KEYPOINTS_HAND[j]] = [x_right, y_right, conf_right]
keypoint_capture.body_keypoints[i] = body_dict
keypoint_capture.left_hand_keypoints[i] = left_hand_dict
keypoint_capture.right_hand_keypoints[i] = right_hand_dict
keypoint_capture.capture_name = captureName
keypoint_capture.capture_id = captureId
keypoint_capture.num_frames = len(jsonFrames)
return keypoint_capture
|
# Python program for implementation
# of Bisection Method for
# solving equations
from sympy import *
import os.path
def func(expr, value, x):
return expr.subs(x, value)
# Prints root of func(x)
# with error of EPSILON
def bisection(a, b, expr, maxIteration, Epsilon, x):
print("In bisectiooon")
file = open("../View/values.txt", "w")
if os.path.isfile('../Viewss/values.txt'):
print("File exist")
else:
print("File not exist")
if (func(expr,a, x) * func(expr,b, x) >= 0):
print("You have not assumed right a and b\n")
return
c = a
step = 1
while (step < maxIteration):
# Find middle point
print('Iteration(%d): X(lower) | f(X-lower) | X(upper) | f(X-upper) | X(mid) | f(X-mid)' % step)
print("\t\t\t %.6f \t %.6f \t %.6f \t %.6f \t %.6f \t %.6f \n" % (
a, func(expr, a, x), b, func(expr, b, x), c, func(expr, c, x)))
file.write("%.6f %.6f %.6f %.6f %.6f %.6f \n" % (
a, func(expr, a, x), b, func(expr, b, x), c, func(expr, c, x)))
c_old=c
c = (a+b)/2
if(abs((c-c_old)/c)<Epsilon):
break
# Check if middle point is root
if (func(expr,c, x) == 0.0):
break
# Decide the side to repeat the steps
if (func(expr,c, x)*func(expr,a, x) < 0):
b = c
else:
a = c
step +=1
file.close()
finalIteration = step
return "%d ): %.6f"%(finalIteration,c)
# Main code
def mainFunc(function, maxIteration, epsilon, a, b):
# the possible variable names must be known beforehand...
expr = sympify(function)
x = var('x')
return bisection(a, b, expr, maxIteration, epsilon, x)
|
from ABC.Instruction import Instruction
from ABC.NodeAST import NodeAST
from ST.Exception import Exception
from ST.SymbolTable import SymbolTable
from ST.Symbol import Symbol
from ST.Type import TYPE
from ST.Type import getTypeString
import copy
class ArrayDeclarationType2(Instruction):
def __init__(self, typeDeclaration, listDimension, id, listExpression, line, column):
self.typeDeclaration = typeDeclaration
self.listDimension = listDimension
self.id = id
self.listExpression = listExpression
self.line = line
self.column = column
self.array = False
self.arrayDim = 0
self.arraySize = 0
self.ReportSymbol = None
def interpreter(self, tree, table):
value = self.__checkArray(copy.copy(self.listExpression), tree, table)
if isinstance(value, Exception): return value
self.type = self.typeDeclaration
self.array = True
symbol = Symbol(str(self.id), self.typeDeclaration, True, self.line, self.column, value[0])
symbol.setArrayDimensions(self.listDimension)
symbol.setArraySize(self.arraySize)
self.ReportSymbol = ({"ID": self.id,
"TYPE": 'ARRAY',
"TYPE2": getTypeString(self.typeDeclaration),
"ENTORNO": "Declaracion Arreglo Tipo 2",
"VALOR": self.arraySize,
"LINE": self.line,
"COLUMN": self.column,
})
result = table.setTable(symbol)
if isinstance(result, Exception): return result
return
def __checkArray(self, listExpression, tree, table):
array = []
for expression in listExpression:
if isinstance(expression, list):
value = self.__checkArray(expression, tree, table)
if isinstance(value, Exception): return value
array.append(value)
else:
self.arraySize += 1
value = expression.interpreter(tree, table)
if isinstance(value, Exception): return value
if expression.type != self.typeDeclaration:
return Exception("Semantico", "Tipos de datos diferentes en arreglo \""+self.id+"\" ", expression.line, expression.column)
array.append(value)
return array
def getNode(self):
node = NodeAST("DECLARACION ARREGLO")
node.addValueChild(getTypeString(self.typeDeclaration))
node.addValueChild(str(self.listDimension))
node.addValueChild(str(self.id))
nodeExpressions = NodeAST("EXPRESION DE LAS DIMENSIONES")
if self.listExpression != None:
self.__generateNode(self.listExpression, nodeExpressions)
node.addChild(nodeExpressions)
return node
def __generateNode(self, listExpression, rootNode):
for expression in listExpression:
if isinstance(expression, list):
rootNode = self.__generateNode(expression, rootNode)
else:
rootNode.addChild(expression.getNode())
return rootNode
|
class TicTacToe:
def __init__(self, beginner):
self.player = beginner
self.gf = [[None, None, None],
[None, None, None],
[None, None, None]]
def swap_player(self):
if self.player is "X":
self.player = "O"
elif self.player is "O":
self.player = "X"
def set_obj(self, row, column):
if self.gf[row][column] is None and self.check_player_has_won() is None:
self.gf[row][column] = self.player
self.swap_player()
return True
return False
def check_player_has_won(self):
# check rows
for row in self.gf:
if row[0] is row[1] and row[0] is row[2] and row[0] is not None:
return row[0]
# check columns
for i in range(3):
if self.gf[0][i] is self.gf[1][i] and self.gf[0][i] is self.gf[2][i] and self.gf[0][i] is not None:
return self.gf[0][i]
# check top left to bottom right
if self.gf[0][0] is self.gf[1][1] and self.gf[0][0] is self.gf[2][2] and self.gf[0][0] is not None:
return self.gf[0][0]
# check bottom left to top right
if self.gf[2][0] is self.gf[1][1] and self.gf [2][0] is self.gf[0][2] and self.gf[2][0] is not None:
return self.gf[2][0]
# return None if no one has won
return None
def check_draw(self):
for row in self.gf:
if row[0] is None or row[1] is None or row[2] is None:
return False
return True
def restart(self):
self.swap_player()
self.__init__(self.player)
def set_player(self, player):
self.player = player
|
from django import template
register = template.Library()
def include_filter(value,values):
return True if value in [int(str(x)) for x in values] else False
register.filter('include', include_filter)
|
def most_frequent_item_count(collection):
return max([collection.count(i) for i in collection]) if len(collection) > 0 else 0
'''
Complete the function to find the count of the most frequent item of an array.
You can assume that input is an array of integers. For an empty array return 0
Example
input array: [3, -1, -1, -1, 2, 3, -1, 3, -1, 2, 4, 9, 3]
output: 5
The most frequent number in the array is -1 and it occurs 5 times.
'''
|
import telegram
import logging
import json
import os
from flask import Flask
from flask import request
app = Flask(__name__)
app.secret_key = 'aYT>.L$kk2h>!'
bot = telegram.Bot(token=os.environ["BOT_TOKEN"])
chatID = os.environ["CHAT_ID"]
@app.route('/alert', methods=['POST'])
def postAlertmanager():
content = json.loads(request.get_data())
# with open("Output.txt", "w") as text_file:
# text_file.write("{0}".format(content))
try:
for alert in content['alerts']:
message = """Alertname: """+alert['labels']['alertname']+""" \n"""
message += """Status: """+alert['status']+""" \n"""
if alert['status'] == "firing":
message += """Detected: """+alert['startsAt']+""" \n"""
if alert['status'] == "resolved":
message += """Resolved: """+alert['endsAt']+""" \n"""
if 'name' in alert['labels']:
message += """Instance: """+alert['labels']['instance']+"""("""+alert['labels']['name']+""") \n"""
else:
message += """Instance: """+alert['labels']['instance']+""" \n"""
message += """\n"""+alert['annotations']['description']+""""""
message += """\n"""+alert['annotations']['summary']+""""""
message += "\n------ END OF MESSAGE ------"
bot.sendMessage(chat_id=chatID, text=message)
return "Alert OK", 200
except Exception as e:
print(e)
bot.sendMessage(chat_id=chatID, text="Error! %s" % e)
return "Alert nOK %s" %e, 200
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9119, debug=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.