text stringlengths 8 6.05M |
|---|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
from cuisine import group_check as get
from cuisine import group_create as create
from cuisine import group_ensure as ensure
from cuisine import group_user_add as user_add
from cuisine import group_user_check as user_check
from cuisine import group_user_ensure as user_ensure
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name="home"),
path('flashcards', views.flashcards, name="flash"),
path('questions', views.questions, name="questions"),
path('interview', views.interview, name="interview"),
] |
import hashlib
import math
import pickle
import numpy as np
from pathlib import Path
from picp.util.geometry import intersection_between_segments, normalize
from picp.util.pose import Pose
from picp.util.position import Position
class SensorModel:
def apply_noise(self, origin: Position, intersection: Position):
raise NotImplementedError("Abstract function.")
class LMS151(SensorModel):
def apply_noise(self, origin: Position, intersection: Position):
v = intersection - origin
dist = v.norm
# Noise modeled based on "Noise characterization of depth sensors for surface inspections", 2015
σ_r = (6.8 * dist + 0.81) / 1000
σ_d = 0.012
if True:
σ = σ_d if dist < 1.646 else σ_r
else:
σ = 0.0001
noisy_dist = σ * np.random.randn() + dist
if v.norm == 0.0:
return origin
return origin + normalize(v) * noisy_dist
class ScanGenerator:
def __init__(self, walls, nb_beam = 180, sensor_model: SensorModel=LMS151()):
self.walls = walls
self.nb_beam = nb_beam
self.max_range_beam = 100
self.sensor_model = sensor_model
def generate(self, pose: Pose, check_cache=False):
if check_cache:
if self.scan_exist(pose):
return self.load_scan(pose)
origin = pose.position
orientation = pose.orientation + np.pi / 2 # The first point taken by the sensor is parallel to the y axis
pts = []
for beam_id in range(0, self.nb_beam):
angle_rad = beam_id / self.nb_beam * 2 * math.pi + orientation
end_beam = origin + Position.from_angle(angle_rad, norm=self.max_range_beam)
closest_inter = None
for wall in self.walls:
# inter = intersection_between_ray_and_segment(origin, end_beam - origin, wall.p1, wall.p2)
inter = intersection_between_segments(origin, end_beam, wall.p1, wall.p2)
if inter is None:
continue
if closest_inter is None or (closest_inter - origin).norm > (inter - origin).norm:
closest_inter = inter
if closest_inter is not None:
point = self.sensor_model.apply_noise(origin, closest_inter) - origin
scan_frame_point = point.rotate(-pose.orientation)
pts.append(scan_frame_point.to_tuple())
scan = np.array(pts)
self.save_scan(pose, scan)
return scan
def scan_exist(self, pose):
if not self._cache_root().exists():
return False
return self._path_to_cache(pose).exists()
def load_scan(self, pose):
path = self._path_to_cache(pose)
print(f"Loading from cache {path}")
return pickle.load(open(path, "rb"))
def save_scan(self, pose, scan):
folder = self._cache_root()
if not folder.exists():
folder.mkdir()
path = self._path_to_cache(pose)
print(f"Saving scan into the cache {path}")
pickle.dump(scan, open(path, "wb+"))
def _generate_hash(self, pose):
value = str(pose) + "".join([str(w) for w in self.walls]) + str(self.nb_beam)
return hashlib.md5(str(value).encode()).hexdigest()
def _cache_root(self):
return Path.home() / '.cache' / 'picp'
def _path_to_cache(self, pose):
return self._cache_root() / self._generate_hash(pose)
|
from django.contrib import admin
from .models import *
#Register your models here.
#admin.site.register(UserAccount)
#admin.site.register(Manager)
admin.site.register(Employee)
admin.site.register(Judge)
admin.site.register(LowOfficer)
admin.site.register(Case)
admin.site.register(Shedule)
admin.site.register(Comment)
admin.site.register(Decision) |
#!/usr/bin/env python3
# Advent of code Year 2019 Day 25 solution
# Author = seven
# Date = December 2019
import sys
from os import path
sys.path.insert(0, path.dirname(path.dirname(path.abspath(__file__))))
from shared import vm
with open((__file__.rstrip("code.py") + "input.txt"), 'r') as input_file:
program = input_file.read()
class Crawl(vm.VM):
def __init__(self, program: str):
self.input_buffer = ''
self.input_buffer_read_pos = 0
self.output_buffer = ''
super().__init__(program=program, input=vm.IO(), output=vm.IO())
def load_from_input(self, a: vm.Param):
char = self.input_buffer[self.input_buffer_read_pos]
self.input.value = ord(char)
self.input_buffer_read_pos += 1
super().load_from_input(a)
def store_to_output(self, a: vm.Param):
super().store_to_output(a)
if self.output.value < 256:
char_code = self.output.value
if char_code == 10:
if self.output_buffer == 'Command?':
command = input('>')
self.input_buffer = command + '\n'
self.input_buffer_read_pos = 0
else:
print(self.output_buffer)
self.output_buffer = ''
else:
self.output_buffer += chr(char_code)
crawler = Crawl(program=program)
crawler.run()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 9 04:47:12 2020
@author: lifecell
"""
def hello():
print("Hello World!!")
def hi(name):
print(f"Hello {name}!!!")
def hi2(name='Nahush'):
print(f"Hello {name}!!!")
def FibNum(num=20):
'''Calculates and returns the fibonacci series from 1 to num'''
out=[]
a=0
b=1
for i in range(1,num+1):
out.append(a)
a,b=b,a+b
return out
def calcMean(first,*remainder):
mean=(first+sum(remainder))/(1+len(remainder))
return mean
def fib2(n):
if n==0:
return 0
elif n==1:
return 1
else:
return fib2(n-1) + fib2(n-2)
|
import numpy as np
import operator
import matplotlib.pyplot as plt
allYears = ["09", "10", "11", "12", "13", "14", "15", "16"]
def make_the_plot(number_langs, sum_col, my_data, year_):
list_of_tuples = []
for i in range(number_langs):
name_lang = my_data[i][0]
total = 0.0
for index, j in enumerate(my_data[i][1]):
division = j / sum_col[index]
total = total + division
total = (total / 7) * 100
list_of_tuples.append((name_lang, total))
sorted_by_second = sorted(list_of_tuples, key=lambda tup: tup[1], reverse=True)
first_five = sorted_by_second[:5]
remaining_text = "Other " + str(number_langs - 5)
five_langs = [x[0] for x in first_five]
five_langs.append(remaining_text)
five_percentages = [x[1] for x in first_five]
sum_first_five = 100 - sum(x for x in five_percentages)
five_percentages.append(sum_first_five)
plot_values = list(range(1, 7))
plt.xticks(plot_values, five_langs)
plt.bar(plot_values, five_percentages)
plt.title("Year" + year_)
plt.savefig('/home/tiaghoul/PycharmProjects/iic-GoogleJamStudy/images/best_five_each_year/best_five_' + year_ + '.png')
plt.close()
for year in allYears:
file_name = '/home/tiaghoul/PycharmProjects/iic-GoogleJamStudy/langsPerYear/langs_year_' + year + ".csv"
data = np.genfromtxt(file_name, comments="?", dtype=[('lang', np.str_, 50), ('values', np.int32, (7,))], skip_header=1, delimiter=',')
sum_by_columns = np.nansum(data["values"], axis=0)
n_langs = len(data)
make_the_plot(n_langs, sum_by_columns, data, year)
|
from django.conf.urls import url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from . import views
### FLOW
## url is entered -> matches url -> goes into views calls a function -> Function does some computation -> Renders a new page
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^users/(?P<username>[\w.@+-]+)/$',views.displaypoints,name = 'displaypoints'),
url(r'^portalapp/logissue/$',views.logissue,name='logissue'),
url(r'^portalapp/report/$',views.report,name='report'),
url(r'^portalapp/uploadhandle/$',views.uploadhandle,name='uploadhandle'),
url(r'^registration/passwordreset.html/$',views.passwordreset,name='passwordreset'),
]
urlpatterns += staticfiles_urlpatterns() |
# -*- coding: utf-8 -*-
class Solution:
def fairCandySwap(self, A, B):
difference = (sum(A) - sum(B)) // 2
setB = set(B)
for a in A:
if a - difference in setB:
return [a, a - difference]
if __name__ == "__main__":
solution = Solution()
assert [1, 2] == solution.fairCandySwap([1, 1], [2, 2])
assert [1, 2] == solution.fairCandySwap([1, 2], [2, 3])
assert [2, 3] == solution.fairCandySwap([2], [1, 3])
assert [5, 4] == solution.fairCandySwap([1, 2, 5], [2, 4])
|
import requests
import re
import time
import csv
import os
from bs4 import BeautifulSoup
url1 = "https://finance.naver.com/sise/sise_group.nhn?type=upjong"
res1 = requests.get(url1)
succece = res1.status_code
if succece == 200:
soup1 = BeautifulSoup(res1.text, "lxml")
jongAll = soup1.find_all("td", attrs={"style":re.compile("^padding")})
print("-------전체 종목-------")
for jong in jongAll:
print(jong.get_text())
selecteJong = input("종목을 입력하세요. :")
day = time.strftime("_%Y_%m_%d", time.localtime(time.time()))
filepath = "./infos"
if os.path.exists("./infos") == False:
os.mkdir("./infos")
filename = "./infos/" +selecteJong + day + ".csv"
f = open(filename, "w", encoding="utf-8-sig", newline="")
writer = csv.writer(f)
for jong in jongAll:
if jong.get_text() == selecteJong:
link = "https://finance.naver.com/" + jong.a["href"]
res2 = requests.get(link)
res2.raise_for_status()
soup2 = BeautifulSoup(res2.text, "lxml")
title = "종목명 현재가 전일비 등락률 매수호가 매도호가 거래량 거래대금 전일거래량".split("\t")
writer.writerow(title)
dataTable = soup2.find("tbody").find_all("tr")
for datas_row in dataTable:
datas = datas_row.find_all("td")
data = [column.get_text().strip() for column in datas]
if len(data) <= 1:
continue
writer.writerow(data)
break
print("Cant Found Stock Item!! Your's Enter :", selecteJong)
else:
print("Error! requests failed!")
print("url =", url, "code =", res.status_code)
|
class Solution(object):
def maxProfit(self, prices, fee):
"""
:type prices: List[int]
:type fee: int
:rtype: int
"""
answer = 0
buyPrice = float('inf')
for price in prices:
if price < buyPrice:
buyPrice = price
continue
if price > buyPrice + fee:
answer += price - buyPrice - fee
buyPrice = price - fee
return answer
|
from __future__ import with_statement
import os
import unittest
try:
from unittest import mock
except ImportError:
import mock # < PY33
from flake8 import engine
from flake8.util import is_windows
class IntegrationTestCase(unittest.TestCase):
"""Integration style tests to exercise different command line options."""
def this_file(self):
"""Return the real path of this file."""
this_file = os.path.realpath(__file__)
if this_file.endswith("pyc"):
this_file = this_file[:-1]
return this_file
def check_files(self, arglist=[], explicit_stdin=False, count=0):
"""Call check_files."""
if explicit_stdin:
target_file = "-"
else:
target_file = self.this_file()
argv = ['flake8'] + arglist + [target_file]
with mock.patch("sys.argv", argv):
style_guide = engine.get_style_guide(parse_argv=True)
report = style_guide.check_files()
self.assertEqual(report.total_errors, count)
return style_guide, report
def test_no_args(self):
# assert there are no reported errors
self.check_files()
def _job_tester(self, jobs):
# mock stdout.flush so we can count the number of jobs created
with mock.patch('sys.stdout.flush') as mocked:
guide, report = self.check_files(arglist=['--jobs=%s' % jobs])
if is_windows():
# The code path where guide.options.jobs gets converted to an
# int is not run on windows. So, do the int conversion here.
self.assertEqual(int(guide.options.jobs), jobs)
# On windows, call count is always zero.
self.assertEqual(mocked.call_count, 0)
else:
self.assertEqual(guide.options.jobs, jobs)
self.assertEqual(mocked.call_count, jobs)
def test_jobs(self):
self._job_tester(2)
self._job_tester(10)
def test_stdin(self):
self.count = 0
def fake_stdin():
self.count += 1
with open(self.this_file(), "r") as f:
return f.read()
with mock.patch("pycodestyle.stdin_get_value", fake_stdin):
guide, report = self.check_files(arglist=['--jobs=4'],
explicit_stdin=True)
self.assertEqual(self.count, 1)
def test_stdin_fail(self):
def fake_stdin():
return "notathing\n"
with mock.patch("pycodestyle.stdin_get_value", fake_stdin):
# only assert needed is in check_files
guide, report = self.check_files(arglist=['--jobs=4'],
explicit_stdin=True,
count=1)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import easy_thumbnails.fields
class Migration(migrations.Migration):
dependencies = [
('localizacao', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cliente',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nome', models.CharField(max_length=100)),
('sobrenome', models.CharField(max_length=100, null=True, blank=True)),
('email', models.EmailField(max_length=254, verbose_name=b'E-mail')),
('telefone', models.CharField(default=b'', max_length=14)),
('endereco', models.ForeignKey(verbose_name=b'Endere\xc3\xa7o', to='localizacao.Endereco')),
],
),
migrations.CreateModel(
name='ItemPedido',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantidade', models.PositiveSmallIntegerField()),
('preco_unitario', models.DecimalField(verbose_name=b'Pre\xc3\xa7o Unit\xc3\xa1rio', max_digits=10, decimal_places=2)),
('preco_total', models.DecimalField(verbose_name=b'Pre\xc3\xa7o Total', max_digits=10, decimal_places=2)),
],
options={
'verbose_name': 'Item Pedido',
'verbose_name_plural': 'Itens Pedido',
},
),
migrations.CreateModel(
name='Pedido',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('data', models.DateTimeField(auto_now_add=True)),
('valor_total', models.DecimalField(max_digits=10, decimal_places=2)),
('status', models.CharField(default=b'CRIADO', max_length=10, choices=[(b'ATENDIDO', b'ATENDIDO'), (b'CANCELADO', b'CANCELADO'), (b'CRIADO', b'CRIADO'), (b'PENDENTE', b'PENDENTE'), (b'RECEBIDO', b'RECEBIDO')])),
('observacoes', models.TextField(null=True, verbose_name=b'Observa\xc3\xa7\xc3\xb5es', blank=True)),
('cliente', models.ForeignKey(to='loja.Cliente')),
],
),
migrations.CreateModel(
name='Produto',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nome', models.CharField(max_length=100)),
('descricao', models.TextField(verbose_name=b'Descri\xc3\xa7\xc3\xa3o', blank=True)),
('preco', models.DecimalField(verbose_name=b'Pre\xc3\xa7o', max_digits=10, decimal_places=2)),
('imagem', easy_thumbnails.fields.ThumbnailerField(default=b'imagens/produtos/default.jpg', upload_to=b'imagens/produtos')),
],
),
migrations.CreateModel(
name='Recebimento',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('data', models.DateTimeField(auto_now_add=True)),
('receptor', models.CharField(max_length=100, verbose_name=b'Recebido por')),
('observacao', models.TextField(verbose_name=b'Observa\xc3\xa7\xc3\xa3o', blank=True)),
],
options={
'verbose_name': 'Recebimento',
'verbose_name_plural': 'Recebimentos',
},
),
migrations.CreateModel(
name='Remessa',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('remetente', models.CharField(max_length=100)),
('data', models.DateTimeField(auto_now_add=True)),
('observacao', models.TextField(verbose_name=b'Observa\xc3\xa7\xc3\xa3o', blank=True)),
('pedido', models.ForeignKey(to='loja.Pedido')),
('responsavel', models.OneToOneField(verbose_name=b'Respons\xc3\xa1vel', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Remessa',
'verbose_name_plural': 'Remessas',
},
),
migrations.AddField(
model_name='recebimento',
name='remessa',
field=models.OneToOneField(to='loja.Remessa'),
),
migrations.AddField(
model_name='pedido',
name='itens',
field=models.ManyToManyField(to='loja.Produto', through='loja.ItemPedido'),
),
migrations.AddField(
model_name='itempedido',
name='pedido',
field=models.ForeignKey(to='loja.Pedido'),
),
migrations.AddField(
model_name='itempedido',
name='produto',
field=models.ForeignKey(to='loja.Produto'),
),
]
|
from django.conf.urls import url
from django.contrib import admin
from django.urls import path
from MyApp import views as App_views
urlpatterns = [
]
|
# 一开始 topo + 排序,属于是惯性思维了,能做
# 实际上 bfs 从0开始,记录它的子节点包括孙子节点
# 需要每次维护set去重,这样依次从数小的节点给其所有的子节点加上祖先节点
# 最终实现的就是有序的
class Solution:
def getAncestors(self, n: int, edges: List[List[int]]) -> List[List[int]]:
g = defaultdict(list)
for u, v in edges:
g[u].append(v)
ans = [[] for _ in range(n)]
for i in range(n):
q = deque(g[i])
seen = set(g[i])
while q:
u = q.popleft() # 子节点
ans[u].append(i) # 子节点记录祖先节点
for v in g[u]:
if v not in seen:
seen.add(v)
q.append(v)
return ans |
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
from collections.abc import Sequence as Seq
from os import PathLike
from typing import (
AsyncContextManager,
BinaryIO,
Callable,
Collection,
Literal,
Optional,
Sequence,
Tuple,
Union,
overload,
)
from ..ledger.aio import Connection
from ..prim import Party
__all__ = ["connect_with_new_party"]
NameGenFn = Callable[[int], Optional[str]]
@overload
def connect_with_new_party(
*,
party_count: Literal[1] = 1,
url: Optional[str] = None,
read_as: Union[None, Party, Collection[Party]] = None,
act_as: Union[None, Party, Collection[Party]] = None,
admin: Optional[bool] = False,
ledger_id: Optional[str] = None,
dar: Union[None, str, bytes, PathLike, BinaryIO] = None,
identifier_hint: Union[None, str, NameGenFn] = None,
display_name: Union[None, str, NameGenFn] = None,
) -> AsyncContextManager[ConnectionWithParty]: ...
@overload
def connect_with_new_party(
*,
party_count: Literal[2],
url: Optional[str] = None,
read_as: Union[None, Party, Collection[Party]] = None,
act_as: Union[None, Party, Collection[Party]] = None,
admin: Optional[bool] = False,
ledger_id: Optional[str] = None,
dar: Union[None, str, bytes, PathLike, BinaryIO] = None,
identifier_hint: Union[None, str, NameGenFn] = None,
display_name: Union[None, str, NameGenFn] = None,
) -> AsyncContextManager[Tuple[ConnectionWithParty, ConnectionWithParty]]: ...
@overload
def connect_with_new_party(
*,
party_count: Literal[3],
url: Optional[str] = None,
read_as: Union[None, Party, Collection[Party]] = None,
act_as: Union[None, Party, Collection[Party]] = None,
admin: Optional[bool] = False,
ledger_id: Optional[str] = None,
dar: Union[None, str, bytes, PathLike, BinaryIO] = None,
identifier_hint: Union[None, str, NameGenFn] = None,
display_name: Union[None, str, NameGenFn] = None,
) -> AsyncContextManager[Tuple[ConnectionWithParty, ConnectionWithParty, ConnectionWithParty]]: ...
@overload
def connect_with_new_party(
*,
party_count: Literal[4],
url: Optional[str] = None,
read_as: Union[None, Party, Collection[Party]] = None,
act_as: Union[None, Party, Collection[Party]] = None,
admin: Optional[bool] = False,
ledger_id: Optional[str] = None,
dar: Union[None, str, bytes, PathLike, BinaryIO] = None,
identifier_hint: Union[None, str, NameGenFn] = None,
display_name: Union[None, str, NameGenFn] = None,
) -> AsyncContextManager[
Tuple[ConnectionWithParty, ConnectionWithParty, ConnectionWithParty, ConnectionWithParty]
]: ...
@overload
def connect_with_new_party(
*,
party_count: int,
url: Optional[str] = None,
read_as: Union[None, Party, Collection[Party]] = None,
act_as: Union[None, Party, Collection[Party]] = None,
admin: Optional[bool] = False,
ledger_id: Optional[str] = None,
dar: Union[None, str, bytes, PathLike, BinaryIO] = None,
identifier_hint: Union[None, str, NameGenFn] = None,
display_name: Union[None, str, NameGenFn] = None,
) -> AsyncContextManager[Sequence[ConnectionWithParty]]: ...
class ConnectionWithParty(Seq["ConnectionWithParty"]):
def __init__(self, connection: Connection, party: Party): ...
@property
def connection(self) -> Connection: ...
@property
def party(self) -> Party: ...
def __getitem__(self, item): ...
def __len__(self) -> Literal[1]: ...
|
# -*- coding: utf-8 -*-
'''
Obtiene los usuarios de la base de datos principal que pertenecen a la oficina Cedlas y los sincroniza con el linux actual.
Tambien actualiza la clave en el samba
'''
import connection
import users
import groups
import systems
import logging
import datetime
import jsonpickle
if __name__ == '__main__':
''' por defecto la fecha de sincronizacion es desde hace un año '''
lastSinc = datetime.datetime.now() - datetime.timedelta(days=365)
try:
with open('/tmp/sinc.dat', 'r') as f:
lastSinc = jsonpickle.decode(f.read())
except Exception as e:
logging.warn(e)
logging.getLogger().setLevel(logging.INFO)
con = connection.getConnection()
try:
usersToSinc = []
offices = groups.OfficeDAO.findAll(con)
for oid in offices:
office = groups.OfficeDAO.findById(con, oid)
if office.name == 'Cedlas':
usersToSinc = office.users
break
import subprocess
from subprocess import PIPE
for uid in usersToSinc:
ups = users.UserPasswordDAO.findByUserId(con, uid)
if len(ups) <= 0:
continue
up = ups[0]
if up.updated <= lastSinc:
''' solo lo actualizo si la fecha es mayor a la actual '''
logging.warn('usuario {} clave ya actualizada {}'.format(up.username, up.updated))
continue
logging.info('sincronizando : {}'.format(up.username))
cmd = 'useradd {}'.format(up.username)
cp = subprocess.run(cmd, shell=True)
logging.info(cp.returncode)
cmd = "echo \"{}:{}\" | chpasswd".format(up.username, up.password)
cp = subprocess.run(cmd, shell=True)
logging.info(cp.returncode)
#cmd = "echo -e \"{1}\n{1}\n\" | smbpasswd -a -s {0}".format(up.username, up.password)
cmd = "/usr/bin/smbpasswd -a -s {}".format(up.username)
#logging.info('actualizando clave samba : {}'.format(cmd))
#cp = subprocess.run(cmd, shell=True)
cp = subprocess.Popen(['/usr/bin/smbpasswd','-a', '-s', '{}'.format(up.username)], stdout=PIPE, stdin=PIPE, stderr=PIPE, universal_newlines=True)
o = cp.communicate(input='{0}\n{0}\n'.format(up.password))
logging.info(cp.returncode)
logging.info(o)
finally:
connection.closeConnection(con)
''' almaceno la fecha de la ultima sincronizacion '''
lastSinc = datetime.datetime.now()
with open('/tmp/sinc.dat', 'w') as f:
f.write(jsonpickle.encode(lastSinc))
|
class Solution:
def final_function(self, input):
if (input < 1):
result = 0
else:
flag = 0
for i in range(2,input):
if (input % i) == 0:
flag = 1
break
if (flag == 0):
result = 0
else:
if (input % 3) == 0:
result = 3**(input/3)
elif (input % 3) == 1:
result = 2*2*3**((input-4)/3)
else:
result = 2*3**((input-2)/3)
return result
|
import tensorflow as tf
from utils import read_data_file, creat_word_embedding, data_parse_one_direction
from CNN import Text_CNN
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('embedding_dim', 100, 'dimension of word embedding')
tf.app.flags.DEFINE_integer('batch_size', 64, 'number of example per batch')
tf.app.flags.DEFINE_integer('n_classes', 2, 'number of distinct class')
tf.app.flags.DEFINE_integer('max_sentence_len', 20, 'max number of tokens per sentence')
tf.app.flags.DEFINE_float('l2_reg', 0.01, 'l2 regularization')
tf.app.flags.DEFINE_integer('n_iters', 10, 'number of train iter')
tf.app.flags.DEFINE_float('learning_rate', 0.001, 'learning rate')
tf.app.flags.DEFINE_string('train_file_path', 'data/restaurant/train.txt', 'training file')
tf.app.flags.DEFINE_string('test_file_path', 'data/restaurant/test.txt', 'testing file')
def main(_):
print('loading data...')
tr_revs, tr_revs_content = read_data_file(FLAGS.train_file_path)
word_idx_map, w2v = creat_word_embedding(tr_revs_content, FLAGS.embedding_dim)
tr_x, _, tr_y = data_parse_one_direction(tr_revs, word_idx_map, FLAGS.max_sentence_len)
te_revs, _ = read_data_file(FLAGS.test_file_path)
te_x, _, te_y = data_parse_one_direction(te_revs, word_idx_map, FLAGS.max_sentence_len)
cnn = Text_CNN(
max_len=2 * FLAGS.max_sentence_len + 1,
n_classes=FLAGS.n_classes
)
print('start training...')
cnn.learn(
word_idx_map, w2v, (tr_x, tr_y), (te_x, te_y),
n_iters=FLAGS.n_iters, batch_size=FLAGS.batch_size, learning_rate=FLAGS.learning_rate
)
if __name__ == '__main__':
tf.app.run() |
# Подключение модулей
import pygame
from random import randrange
# Константы
WINDOW_SIZE = WINDOW_WIDTH, WINDOW_HEIGHT = (300, 300)
OBJECT_SIZE = 10
# переменные и инициализация
pygame.init()
screen = pygame.display.set_mode(WINDOW_SIZE)
x = randrange(0, WINDOW_WIDTH, OBJECT_SIZE)
y = randrange(0, WINDOW_HEIGHT, OBJECT_SIZE)
body_snake = [(x, y)]
length_snake = 1
dx, dy = 0, 0
fps = 7
apple = randrange(0, WINDOW_WIDTH, OBJECT_SIZE), randrange(0, WINDOW_HEIGHT, OBJECT_SIZE)
# Словарь движения
traffic_dict = {"W": (0, -1), "S": (0,1), "A": (-1,0), "D": (1,0)}
# Цикл программы
while True:
# Показ экрана и закраска его в черный цвет
screen.fill(pygame.Color('black'))
# Отрисовка змейки
for i, j in body_snake:
pygame.draw.rect(screen, pygame.Color('green'), (i, j, OBJECT_SIZE, OBJECT_SIZE))
# Отрисовка яблока
pygame.draw.rect(screen, pygame.Color('red'), (*apple, OBJECT_SIZE, OBJECT_SIZE))
# Изменение координат змейки
x += dx * OBJECT_SIZE
y += dy * OBJECT_SIZE
body_snake.append((x, y))
body_snake = body_snake[-length_snake:]
# Поедание яблока
if body_snake[-1] == apple:
apple = randrange(0, WINDOW_WIDTH, OBJECT_SIZE), randrange(0, WINDOW_HEIGHT, OBJECT_SIZE)
length_snake += 1
fps += 1
# Модуль для индетифицирования нажатой клавиши
key = pygame.key.get_pressed()
# Условия движения
if key[pygame.K_w] and (dx, dy) != traffic_dict["S"]:
dx, dy = traffic_dict["W"]
if key[pygame.K_s] and (dx, dy) != traffic_dict["W"]:
dx, dy = traffic_dict["S"]
if key[pygame.K_a] and (dx, dy) != traffic_dict["D"]:
dx, dy = traffic_dict["A"]
if key[pygame.K_d] and (dx, dy) != traffic_dict["A"]:
dx, dy = traffic_dict["D"]
# Вызод за границы экрана
if x <0 or x > WINDOW_WIDTH or y < 0 or y > WINDOW_HEIGHT:
break
# Поедание змейкой самой себя
if len(body_snake) != len(set(body_snake)):
break
# Условие закрытия программы
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
# Обновление экрана
pygame.display.flip()
# Управление частотой кадров
clock = pygame.time.Clock()
# Количество раз выполнения цикла в секунду
clock.tick(2*fps)
|
from rest_framework import generics
from django.views.generic import TemplateView
from .models import Deputy, PoliticalParty
from .serializers import DeputySerializer, PoliticalPartySerializer
class IndexView(TemplateView):
template_name = 'index.html'
class DeputiesList(generics.ListCreateAPIView):
queryset = Deputy.objects.all()
serializer_class = DeputySerializer
class PartiesList(generics.ListCreateAPIView):
queryset = PoliticalParty.objects.all()
serializer_class = PoliticalPartySerializer |
#Central Limit Theorem
from math import e, erf, pi, sqrt
def f(x):
return (e**((x**2)/-2))/sqrt(2*pi)
def normal_distribution(mean, standard_deviation, x):
a = 1/standard_deviation
return a*f((x-mean)/standard_deviation)
def central_limit(mean,standard_deviation, n):
mean *= n
standard_deviation *= sqrt(n)
return normal_distribution(mean, standard_deviation,perbox)
|
class Calculator:
result = 0
intermediate_results = []
def add(self, a, b):
add = a + b
self.intermediate_results.append(add)
return add
def subtract(self, a, b):
subtract = a - b
self.intermediate_results.append(subtract)
return subtract
def divide(self, a, b):
divide = int(a / b)
self.intermediate_results.append(divide)
return divide
def results(self):
return self.intermediate_results
def clear(self):
self.intermediate_results.clear()
self.result = 0
|
from IrreversibleDataType import explanations
class FileWriter(object):
"""
Writes lines to file
"""
@staticmethod
def writeToFile(path, lines):
"""
Write lines to file
:param path: to file
:param lines: of converted file
"""
with open(path, mode='w+', encoding='utf-8') as myfile:
myfile.write(''.join(lines))
@staticmethod
def convertToString(listOfIrreversibles):
"""
Formats information about failed conversions to string
:return: formated string
"""
formatedString = []
# get same file names
fileNames = set(ir.fileName for ir in listOfIrreversibles)
for name in fileNames:
# get list of irreversible data from same file
sameFile = list(filter(lambda x: x.fileName == name, listOfIrreversibles))
formatedString += "_" * 100 + "\n"
formatedString += "Could not fully convert file: " + name + "\n"
formatedString += "_" * 90 + "\n"
for values in sameFile:
formatedString += "In lines: {0} - {1}: {2} expression could not be converted \n\n" \
"Old code:\n{3}Results:\n{4}\n".format(values.lineStart + 1, values.lineEnd + 1,
values.type.value[0], "".join(values.oldValue),
"-"*25 + "\n" + explanations.get(values.type, "") +
"".join(values.codeReplacment) + "-" * 50)
return formatedString
|
"""
fucntions that work on uPub data, but do not look at the data itself
"""
def get_paths_of_files( path ):
"""
- gets the paths of 5 files:
- uPub document
- supplement document
- GFF/GTF
- FASTA/FNA
- PEP/FAA
- returns dictionary
- INPUT: path to publishable model
- OUTPUT: dictionary of absolute paths to 5 files
"""
import os
from pprint import pprint as pprint
file_list = getListOfFiles( path )
file_dict = {}
for file_path in file_list:
file_name = file_path.split("/")[-1].upper()
if "~" in file_name:
continue
else:
if "GFF" in file_name:
file_dict["gff"] = file_path
continue
elif "FASTA" in file_name or "FNA" in file_name:
file_dict["fna"] = file_path
continue
elif "PEP" in file_name or "FAA" in file_name:
file_dict["faa"] = file_path
continue
elif "DOCX" in file_name:
if "UPUB" in file_name:
file_dict["uPub_path"] = file_path
continue
if "SUPP" in file_name:
file_dict["supp_path"] = file_path
continue
return( file_dict )
def getListOfFiles( dirName ):
import os
# create a list of file and sub directories
# names in the given directory
# from: https://thispointer.com/python-how-to-get-list-of-files-in-directory-and-sub-directories/
listOfFile = os.listdir(dirName)
allFiles = list()
# Iterate over all the entries
for entry in listOfFile:
# Create full path
fullPath = os.path.join(dirName, entry)
# If entry is a directory then get the list of files in this directory
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
else:
allFiles.append(fullPath)
return( allFiles )
def purge( dir_path ):
"""
- purges a particulat directory of all files and subdirectories.
- to be used for testing purposes and cleaning the in_progress folder only
"""
import shutil # to remove directory completely
import os # to make a new directory
import time # to sleep between deleting a directory and making another.
try:
shutil.rmtree( dir_path )
time.sleep( 1 )
print( "Purged:\t{0}".format( dir_path ) )
except:
pass
os.mkdir( dir_path )
print( "Created new directory:\t{0}".format( dir_path ) )
|
import os
import warnings
import numpy as np
from manimlib.constants import *
from manimlib.mobject.mobject import Mobject
from manimlib.mobject.geometry import Circle
from manimlib.mobject.svg.drawings import ThoughtBubble
from manimlib.mobject.svg.svg_mobject import SVGMobject
from manimlib.mobject.svg.tex_mobject import TextMobject
from manimlib.mobject.types.vectorized_mobject import VGroup
from manimlib.mobject.types.vectorized_mobject import VMobject
from manimlib.utils.config_ops import digest_config
from manimlib.utils.space_ops import get_norm
from manimlib.utils.space_ops import normalize
MY_CREATURE_DIR = r"C:\Users\jiage\Desktop\animation\manim\media\svg_images"
class MyCreature(SVGMobject):
CONFIG = {
"color": None,
"file_name_prefix": "mycreatures",
"stroke_width": 0,
"stroke_color": BLACK,
"fill_opacity": 1.0,
"height": 3,
"corner_scale_factor": 0.75,
"flip_at_start": False,
"is_looking_direction_purposeful": False,
"start_corner": None,
# Range of proportions along body where arms are
"right_arm_range": [0.55, 0.7],
"left_arm_range": [.34, .462],
"pupil_to_eye_width_ratio": 0.4,
"pupil_dot_to_pupil_width_ratio": 0.3,
"body_index": 1,
"head_index": 2,
"head_color":BLUE,
"body_color":BLUE,
"eye_left_out_index": 3,
"eye_right_out_index": 4,
"eye_left_in_index": 5,
"eye_right_in_index": 6,
"mouth_index": 7,
}
def __init__(self, mode="plain", creature_name = "tau",creature_action = "ur", height = 3,**kwargs):#mode: look to the upper left;look to the top right corner;look in the lower left corner;look to the lower right corner
self.height = height
digest_config(self, kwargs)
self.mode = mode
self.parts_named = False
try:
svg_file = os.path.join(
MY_CREATURE_DIR,
"%s_%s_%s.svg" % (self.file_name_prefix, creature_name, creature_action)
)
SVGMobject.__init__(self, file_name=svg_file, **kwargs)
except Exception:
warnings.warn("No %s design with mode %s" %
(self.file_name_prefix, mode))
svg_file = os.path.join(
MY_CREATURE_DIR,
"mycreature_tau_plain.svg",
)
SVGMobject.__init__(self, mode="plain", file_name=svg_file, **kwargs)
def name_parts(self):
self.head = self.submobjects[self.head_index]
self.body = self.submobjects[self.body_index]
self.eye_left_out = self.submobjects[self.eye_left_out_index]
self.eye_right_out = self.submobjects[self.eye_right_out_index]
self.eye_left_in = self.submobjects[self.eye_left_in_index]
self.eye_right_in = self.submobjects[self.eye_right_in_index]
self.mouth = self.submobjects[self.mouth_index]
self.mouth.stroke_width = 1
self.parts_named = True
def init_colors(self):
SVGMobject.init_colors(self)
self.name_parts()
self.head.set_fill(self.head_color, opacity = 1)
self.body.set_fill(self.body_color, opacity = 1)
self.eye_left_out.set_fill(WHITE, opacity = 1)
self.eye_right_out.set_fill(WHITE, opacity = 1)
self.eye_left_in.set_fill(BLACK, opacity = 1)
self.eye_right_in.set_fill(BLACK, opacity = 1)
|
# Generated by Django 3.0.5 on 2020-04-20 03:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todo', '0002_auto_20200419_1926'),
]
operations = [
migrations.AddField(
model_name='todo',
name='deleted',
field=models.BooleanField(default=False),
),
]
|
#!/usr/bin/env python3
from pwn import *
"""
Apparently, the stub is code is:
xor rax,rax
xor rbx,rbx
xor rcx,rcx
xor rdx,rdx
xor rsi,rsi
xor rdi,rdi
xor rbp,rbp
xor r8,r8
xor r9,r9
xor r10,r10
xor r11,r11
xor r12,r12
xor r13,r13
xor r14,r14
xor r15,r15
nop
I examined that first, which just showed that it clears the registers
such that one cannot cheat.
Thus, we just need to open, read and write the contents of the flag file
"""
# Set up pwntools for the correct architecture
exe = context.binary = ELF('./asm')
context.terminal = ["tmux", "splitw", "-h"]
# Many built-in settings can be controlled on the command-line and show up
# in "args". For example, to dump all data sent/received, and disable ASLR
# for all created processes...
# ./exploit.py DEBUG NOASLR
def start():
"""Start the exploit against the target."""
if args.GDB:
return gdb.debug([exe.path], gdbscript=gdbscript)
elif args.LOCAL:
return process([exe.path])
else:
return remote('pwnable.kr', 9026)
# Specify your GDB script here for debugging
# GDB will be launched if the exploit is run via e.g.
# ./exploit.py GDB
gdbscript = '''
# Just before calling supplied shellcode
break *(main+0x143)
continue
'''.format(**locals())
# ===========================================================
# EXPLOIT GOES HERE
# ===========================================================
# Arch: amd64-64-little
# RELRO: Partial RELRO
# Stack: No canary found
# NX: NX enabled
# PIE: PIE enabled
flag_filename = 'this_is_pwnable.kr_flag_file_please_read_' \
'this_file.sorry_the_file_name_is_very_looo' \
'oooooooooooooooooooooooooooooooooooooooooo' \
'ooooooooooooooooooooooooooooooo00000000000' \
'00000000000000ooooooooooooooooooooooo00000' \
'0000000o0o0o0o0o0o0ong'
if args.LOCAL or args.GDB:
flag_filename = __file__ # read this file instead
io = start()
shellcode = asm(
shellcraft.open(flag_filename) +
shellcraft.read('rax', 'rsp', 0x1000) +
shellcraft.write(1, 'rsp', 'rax')
)
io.recvuntil('give me your x64 shellcode: ')
io.send(shellcode)
success(io.recvall().decode('utf8'))
|
#Anton Danylenko
#SoftDev1 pd8
#16 No Trouble
#2018-10-05
import sqlite3 #enable control of an sqlite database
import csv #facilitates CSV I/O
DB_FILE="discobandit.db"
db = sqlite3.connect(DB_FILE) #open if file exists, otherwise create
c = db.cursor() #facilitate db ops
#==========================================================
#INSERT YOUR POPULATE CODE IN THIS ZONE
# courses.csv
with open('raw/courses.csv') as csvfile: #open csv file and store as DictReader
reader = csv.DictReader(csvfile) #{header: element, header2, element2,..}
command = 'CREATE TABLE courses (code TEXT, id INTEGER, mark INTEGER)'
c.execute(command)
for lines in reader:
#print(lines.keys())
#print(lines)
#print(lines.values())
command = 'INSERT INTO courses VALUES(\"{}\",{},{})'.format(lines['code'], int(lines['id']), int(lines['mark']))
#print(command)
c.execute(command)
with open('raw/occupations.csv') as csvfile: #open csv file and store as DictReader
reader = csv.DictReader(csvfile) #{header: element, header2, element2,..}
command = 'CREATE TABLE occupations (Job Class TEXT, Percentage INTEGER)'
c.execute(command)
for lines in reader:
cleaned_job = lines['Job Class'].strip('\"')
command = 'INSERT INTO occupations VALUES(\"{}\",{})'.format(cleaned_job, float(lines['Percentage']))
c.execute(command)
with open('raw/peeps.csv') as csvfile: #open csv file and store as DictReader
reader = csv.DictReader(csvfile) #{header: element, header2, element2,..}
command = 'CREATE TABLE peeps (name TEXT, age INTEGER, id INTEGER)'
c.execute(command)
for lines in reader:
command = 'INSERT INTO peeps VALUES(\"{}\",{},{})'.format(lines['name'], int(lines['age']), int(lines['id']))
c.execute(command)
#==========================================================
db.commit() #save changes
db.close() #close database
|
"""
IQR Search sub-application module
"""
import base64
from io import BytesIO
import json
import os
import os.path as osp
import random
import shutil
import zipfile
import six
import flask
import PIL.Image
import requests
from smqtk.iqr import IqrSession
from smqtk.representation import get_data_set_impls
from smqtk.representation.data_element.file_element import DataFileElement
from smqtk.utils import Configurable
from smqtk.utils import SmqtkObject
from smqtk.utils import plugin
from smqtk.utils.file_utils import safe_create_dir
from smqtk.utils.mimetype import get_mimetypes
from smqtk.utils.preview_cache import PreviewCache
from smqtk.web.search_app.modules.file_upload import FileUploadMod
from smqtk.web.search_app.modules.static_host import StaticDirectoryHost
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
MT = get_mimetypes()
class IqrSearch (SmqtkObject, flask.Flask, Configurable):
"""
IQR Search Tab blueprint
Components:
* Data-set, from which base media data is provided
* Descriptor generator, which provides descriptor generation services
for user uploaded data.
* NearestNeighborsIndex, from which descriptors are queried from user
input data. This index should contain descriptors that were
generated by the same descriptor generator configuration above (same
dimensionality, etc.).
* RelevancyIndex, which is populated by an initial query, and then
iterated over within the same user session. A new instance and model
is generated every time a new session is created (or new data is
uploaded by the user).
Assumes:
* DescriptorElement related to a DataElement have the same UUIDs.
"""
# TODO: User access white/black-list? See ``search_app/__init__.py``:L135
@classmethod
def get_default_config(cls):
d = super(IqrSearch, cls).get_default_config()
# Remove parent_app slot for later explicit specification.
del d['parent_app']
d['iqr_service_url'] = None
# fill in plugin configs
d['data_set'] = plugin.make_config(get_data_set_impls())
return d
# noinspection PyMethodOverriding
@classmethod
def from_config(cls, config, parent_app):
"""
Instantiate a new instance of this class given the configuration
JSON-compliant dictionary encapsulating initialization arguments.
:param config: JSON compliant dictionary encapsulating
a configuration.
:type config: dict
:param parent_app: Parent containing flask app instance
:type parent_app: smqtk.web.search_app.app.search_app
:return: Constructed instance from the provided config.
:rtype: IqrSearch
"""
merged = cls.get_default_config()
merged.update(config)
# construct nested objects via configurations
merged['data_set'] = \
plugin.from_plugin_config(merged['data_set'],
get_data_set_impls())
return cls(parent_app, **merged)
def __init__(self, parent_app, iqr_service_url, data_set,
working_directory):
"""
Initialize a generic IQR Search module with a single descriptor and
indexer.
:param parent_app: Parent containing flask app instance
:type parent_app: smqtk.web.search_app.IqrSearchDispatcher
:param iqr_service_url: Base URL to the IQR service to use for this
application interface. Any trailing slashes will be striped.
:type iqr_service_url: str
:param data_set: DataSet of the content described by indexed descriptors
in the linked IQR service.
:type data_set: smqtk.representation.DataSet
:param working_directory: Directory in which to place working files.
These may be considered temporary and may be removed between
executions of this app.
:type working_directory: str
:raises ValueError: Invalid Descriptor or indexer type
"""
super(IqrSearch, self).__init__(
import_name=__name__,
static_folder=os.path.join(SCRIPT_DIR, "static"),
template_folder=os.path.join(SCRIPT_DIR, "templates"),
)
self._parent_app = parent_app
self._data_set = data_set
self._iqr_service = IqrServiceProxy(iqr_service_url.rstrip('/'))
# base directory that's transformed by the ``work_dir`` property into
# an absolute path.
self._working_dir = working_directory
# Directory to put things to allow them to be statically available to
# public users.
self._static_data_prefix = "static/data"
self._static_data_dir = osp.join(self.work_dir, 'static')
# Custom static host sub-module
self.mod_static_dir = StaticDirectoryHost('%s_static' % self.name,
self._static_data_dir,
self._static_data_prefix)
self.register_blueprint(self.mod_static_dir)
# Uploader Sub-Module
self.upload_work_dir = os.path.join(self.work_dir, "uploads")
self.mod_upload = FileUploadMod('%s_uploader' % self.name, parent_app,
self.upload_work_dir,
url_prefix='/uploader')
self.register_blueprint(self.mod_upload)
self.register_blueprint(parent_app.module_login)
# Mapping of session IDs to their work directory
#: :type: dict[str, str]
self._iqr_work_dirs = {}
# Mapping of session ID to a dictionary of the custom example data for
# a session (uuid -> DataElement)
#: :type: dict[str, dict[collections.Hashable, smqtk.representation.DataElement]]
self._iqr_example_data = {}
# Preview Image Caching
self._preview_cache = PreviewCache(osp.join(self._static_data_dir,
"previews"))
# Cache mapping of written static files for data elements
self._static_cache = {}
self._static_cache_element = {}
#
# Routing
#
@self.route("/")
@self._parent_app.module_login.login_required
def index():
# Stripping left '/' from blueprint modules in order to make sure
# the paths are relative to our base.
r = {
"module_name": self.name,
"uploader_url": self.mod_upload.url_prefix.lstrip('/'),
"uploader_post_url":
self.mod_upload.upload_post_url().lstrip('/'),
}
self._log.debug("Uploader URL: %s", r['uploader_url'])
# noinspection PyUnresolvedReferences
return flask.render_template("iqr_search_index.html", **r)
@self.route('/iqr_session_info', methods=["GET"])
@self._parent_app.module_login.login_required
def iqr_session_info():
"""
Get information about the current IRQ session
"""
sid = self.get_current_iqr_session()
get_r = self._iqr_service.get('session', sid=sid)
get_r.raise_for_status()
return flask.jsonify(get_r.json())
@self.route('/get_iqr_state')
@self._parent_app.module_login.login_required
def iqr_session_state():
"""
Get IQR session state information composed of positive and negative
descriptor vectors.
We append to the state received from the service in order to produce
a state byte package that is compatible with the
``IqrSession.set_state_bytes`` method. This way state bytes received
from this function can be directly consumed by the IQR service or
other IqrSession instances.
"""
sid = self.get_current_iqr_session()
# Get the state base64 from the underlying service.
r_get = self._iqr_service.get('state', sid=sid)
r_get.raise_for_status()
state_b64 = r_get.json()['state_b64']
state_bytes = base64.b64decode(state_b64)
# Load state dictionary from base-64 ZIP payload from service
# - GET content is base64, so decode first and then read as a
# ZipFile buffer.
# - `r_get.content` is `byte` type so it can be passed directly to
# base64 decode.
state_dict = json.load(
zipfile.ZipFile(
BytesIO(state_bytes),
'r',
IqrSession.STATE_ZIP_COMPRESSION
).open(IqrSession.STATE_ZIP_FILENAME)
)
r_get.close()
# Wrap service state with our UI state: uploaded data elements.
# Data elements are stored as a dictionary mapping UUID to MIMETYPE
# and data byte string.
working_data = {}
sid_data_elems = self._iqr_example_data.get(sid, {})
for uid, workingElem in six.iteritems(sid_data_elems):
working_data[uid] = {
'content_type': workingElem.content_type(),
'bytes_base64':
base64.b64encode(workingElem.get_bytes()),
}
state_dict["working_data"] = working_data
state_json = json.dumps(state_dict)
z_wrapper_buffer = BytesIO()
z_wrapper = zipfile.ZipFile(z_wrapper_buffer, 'w',
IqrSession.STATE_ZIP_COMPRESSION)
z_wrapper.writestr(IqrSession.STATE_ZIP_FILENAME, state_json)
z_wrapper.close()
z_wrapper_buffer.seek(0)
return flask.send_file(
z_wrapper_buffer,
mimetype='application/octet-stream',
as_attachment=True,
attachment_filename="%s.IqrState" % sid
)
@self.route('/set_iqr_state', methods=['PUT'])
@self._parent_app.module_login.login_required
def set_iqr_session_state():
"""
Set the current state based on the given state file.
"""
sid = self.get_current_iqr_session()
fid = flask.request.form.get('fid', None)
return_obj = {
'success': False,
}
#
# Load in state zip package, prepare zip package for service
#
if fid is None:
return_obj['message'] = 'No file ID provided.'
self._log.debug("[%s::%s] Getting temporary filepath from "
"uploader module", sid, fid)
upload_filepath = self.mod_upload.get_path_for_id(fid)
self.mod_upload.clear_completed(fid)
# Load ZIP package back in, then remove the uploaded file.
try:
z = zipfile.ZipFile(
upload_filepath,
compression=IqrSession.STATE_ZIP_COMPRESSION
)
with z.open(IqrSession.STATE_ZIP_FILENAME) as f:
state_dict = json.load(f)
z.close()
finally:
os.remove(upload_filepath)
#
# Consume working data UUID/bytes
#
# Reset this server's resources for an SID
self.reset_session_local(sid)
# - Dictionary of data UUID (SHA1) to {'content_type': <str>,
# 'bytes_base64': <str>} dictionary.
#: :type: dict[str, dict]
working_data = state_dict['working_data']
del state_dict['working_data']
# - Write out base64-decoded files to session-specific work
# directory.
# - Update self._iqr_example_data with DataFileElement instances
# referencing the just-written files.
for uuid_sha1 in working_data:
data_mimetype = working_data[uuid_sha1]['content_type']
data_b64 = str(working_data[uuid_sha1]['bytes_base64'])
# Output file to working directory on disk.
data_filepath = os.path.join(
self._iqr_work_dirs[sid],
'%s%s' % (uuid_sha1, MT.guess_extension(data_mimetype))
)
with open(data_filepath, 'wb') as f:
f.write(base64.urlsafe_b64decode(data_b64))
# Create element reference and store it for the current session.
data_elem = DataFileElement(data_filepath, readonly=True)
self._iqr_example_data[sid][uuid_sha1] = data_elem
#
# Re-package service state as a ZIP payload.
#
service_zip_buffer = BytesIO()
service_zip = zipfile.ZipFile(service_zip_buffer, 'w',
IqrSession.STATE_ZIP_COMPRESSION)
service_zip.writestr(IqrSession.STATE_ZIP_FILENAME,
json.dumps(state_dict))
service_zip.close()
service_zip_base64 = \
base64.b64encode(service_zip_buffer.getvalue())
# Update service state
self._iqr_service.put('state',
sid=sid,
state_base64=service_zip_base64)
return flask.jsonify(return_obj)
@self.route("/check_current_iqr_session")
@self._parent_app.module_login.login_required
def check_current_iqr_session():
"""
Check that the current IQR session exists and is initialized.
Return JSON:
success
Always True if the message returns.
"""
# Getting the current IQR session ensures that one has been
# constructed for the current session.
_ = self.get_current_iqr_session()
return flask.jsonify({
"success": True
})
@self.route("/get_data_preview_image", methods=["GET"])
@self._parent_app.module_login.login_required
def get_ingest_item_image_rep():
"""
Return the base64 preview image data link for the data file
associated with the give UID (plus some other metadata).
"""
uid = flask.request.args['uid']
info = {
"success": True,
"message": None,
"shape": None, # (width, height)
"static_file_link": None,
"static_preview_link": None,
}
# Try to find a DataElement by the given UUID in our indexed data
# or in the session's example data.
if self._data_set.has_uuid(uid):
#: :type: smqtk.representation.DataElement
de = self._data_set.get_data(uid)
else:
sid = self.get_current_iqr_session()
#: :type: smqtk.representation.DataElement | None
de = self._iqr_example_data[sid].get(uid, None)
if not de:
info["success"] = False
info["message"] = "UUID '%s' not part of the base or working " \
"data set!" % uid
else:
# Preview_path should be a path within our statically hosted
# area.
preview_path = self._preview_cache.get_preview_image(de)
img = PIL.Image.open(preview_path)
info["shape"] = img.size
if de.uuid() not in self._static_cache:
self._static_cache[de.uuid()] = \
de.write_temp(self._static_data_dir)
self._static_cache_element[de.uuid()] = de
# Need to format links by transforming the generated paths to
# something usable by webpage:
# - make relative to the static directory, and then pre-pending
# the known static url to the
info["static_preview_link"] = \
self._static_data_prefix + '/' + \
os.path.relpath(preview_path, self._static_data_dir)
info['static_file_link'] = \
self._static_data_prefix + '/' + \
os.path.relpath(self._static_cache[de.uuid()],
self._static_data_dir)
return flask.jsonify(info)
@self.route('/iqr_ingest_file', methods=['POST'])
@self._parent_app.module_login.login_required
def iqr_ingest_file():
"""
Ingest the file with the given UID, getting the path from the
uploader.
:return: string of data/descriptor element's UUID
:rtype: str
"""
# TODO: Add status dict with a "GET" method branch for getting that
# status information.
fid = flask.request.form['fid']
sid = self.get_current_iqr_session()
self._log.debug("[%s::%s] Getting temporary filepath from "
"uploader module", sid, fid)
upload_filepath = self.mod_upload.get_path_for_id(fid)
self.mod_upload.clear_completed(fid)
self._log.debug("[%s::%s] Moving uploaded file",
sid, fid)
sess_upload = osp.join(self._iqr_work_dirs[sid],
osp.basename(upload_filepath))
os.rename(upload_filepath, sess_upload)
# Record uploaded data as user example data for this session.
upload_data = DataFileElement(sess_upload)
uuid = upload_data.uuid()
self._iqr_example_data[sid][uuid] = upload_data
# Extend session ingest -- modifying
self._log.debug("[%s::%s] Adding new data to session "
"external positives", sid, fid)
data_b64 = base64.b64encode(upload_data.get_bytes())
data_ct = upload_data.content_type()
r = self._iqr_service.post('add_external_pos', sid=sid,
base64=data_b64, content_type=data_ct)
r.raise_for_status()
return str(uuid)
@self.route("/iqr_initialize", methods=["POST"])
@self._parent_app.module_login.login_required
def iqr_initialize():
"""
Initialize IQR session working index based on current positive
examples and adjudications.
"""
sid = self.get_current_iqr_session()
# (Re)Initialize working index
post_r = self._iqr_service.post('initialize', sid=sid)
post_r.raise_for_status()
return flask.jsonify(post_r.json())
@self.route("/get_example_adjudication", methods=["GET"])
@self._parent_app.module_login.login_required
def get_example_adjudication():
"""
Get positive/negative status for a data/descriptor in our example
set.
:return: {
is_pos: <bool>,
is_neg: <bool>
}
"""
# TODO: Collapse example and index adjudication endpoints.
elem_uuid = flask.request.args['uid']
sid = self.get_current_iqr_session()
get_r = self._iqr_service.get('adjudicate', sid=sid, uid=elem_uuid)
get_r.raise_for_status()
get_r_json = get_r.json()
return flask.jsonify({
"is_pos": get_r_json['is_pos'],
"is_neg": get_r_json['is_neg'],
})
@self.route("/get_index_adjudication", methods=["GET"])
@self._parent_app.module_login.login_required
def get_index_adjudication():
"""
Get the adjudication status of a particular data/descriptor element
by UUID.
This should only ever return a dict where one of the two, or
neither, are labeled True.
:return: {
is_pos: <bool>,
is_neg: <bool>
}
"""
# TODO: Collapse example and index adjudication endpoints.
elem_uuid = flask.request.args['uid']
sid = self.get_current_iqr_session()
get_r = self._iqr_service.get('adjudicate', sid=sid, uid=elem_uuid)
get_r.raise_for_status()
get_r_json = get_r.json()
return flask.jsonify({
"is_pos": get_r_json['is_pos'],
"is_neg": get_r_json['is_neg'],
})
@self.route("/adjudicate", methods=["POST"])
@self._parent_app.module_login.login_required
def adjudicate():
"""
Update adjudication for this session. This should specify UUIDs of
data/descriptor elements in our working index.
:return: {
success: <bool>,
message: <str>
}
"""
pos_to_add = json.loads(flask.request.form.get('add_pos', '[]'))
pos_to_remove = json.loads(flask.request.form.get('remove_pos',
'[]'))
neg_to_add = json.loads(flask.request.form.get('add_neg', '[]'))
neg_to_remove = json.loads(flask.request.form.get('remove_neg',
'[]'))
msg = "Adjudicated Positive{+%s, -%s}, " \
"Negative{+%s, -%s} " \
% (pos_to_add, pos_to_remove,
neg_to_add, neg_to_remove)
self._log.debug(msg)
sid = self.get_current_iqr_session()
to_neutral = list(set(pos_to_remove) | set(neg_to_remove))
post_r = self._iqr_service.post('adjudicate',
sid=sid,
pos=json.dumps(pos_to_add),
neg=json.dumps(neg_to_add),
neutral=json.dumps(to_neutral))
post_r.raise_for_status()
return flask.jsonify({
"success": True,
"message": msg
})
@self.route("/iqr_refine", methods=["POST"])
@self._parent_app.module_login.login_required
def iqr_refine():
"""
Classify current IQR session indexer, updating ranking for
display.
Fails gracefully if there are no positive[/negative] adjudications.
"""
sid = self.get_current_iqr_session()
post_r = self._iqr_service.post('refine', sid=sid)
post_r.raise_for_status()
return flask.jsonify({
"success": True,
"message": "Completed refinement",
})
@self.route("/iqr_ordered_results", methods=['GET'])
@self._parent_app.module_login.login_required
def get_ordered_results():
"""
Get ordered (UID, probability) pairs in between the given indices,
[i, j). If j Is beyond the end of available results, only available
results are returned.
This may be empty if no refinement has yet occurred.
Return format:
{
results: [ (uid, probability), ... ]
}
"""
i = flask.request.args.get('i', None)
j = flask.request.args.get('j', None)
params = {
'sid': self.get_current_iqr_session(),
}
if i is not None:
params['i'] = int(i)
if j is not None:
params['j'] = int(j)
get_r = self._iqr_service.get('get_results', **params)
get_r.raise_for_status()
return flask.jsonify(get_r.json())
@self.route("/reset_iqr_session", methods=["GET"])
@self._parent_app.module_login.login_required
def reset_iqr_session():
"""
Reset the current IQR session
"""
sid = self.get_current_iqr_session()
# Reset service
put_r = self._iqr_service.put('session', sid=sid)
put_r.raise_for_status()
# Reset local server resources
self.reset_session_local(sid)
return flask.jsonify({"success": True})
@self.route("/get_random_uids")
@self._parent_app.module_login.login_required
def get_random_uids():
"""
Return to the client a list of data/descriptor IDs available in the
configured data set (NOT descriptor/NNI set).
Thus, we assume that the nearest neighbor index that is searchable
is from at least this set of data.
:return: {
uids: list[str]
}
"""
all_ids = list(self._data_set.uuids())
random.shuffle(all_ids)
return flask.jsonify({
"uids": all_ids
})
@self.route('/is_ready')
def is_ready():
""" Simple 'I'm alive' endpoint """
return flask.jsonify({
"alive": True,
})
def __del__(self):
for wdir in self._iqr_work_dirs.values():
if os.path.isdir(wdir):
shutil.rmtree(wdir)
def get_config(self):
return {
'iqr_service_url': self._iqr_service.url,
'working_directory': self._working_dir,
'data_set': plugin.to_plugin_config(self._data_set),
}
@property
def work_dir(self):
"""
:return: Common work directory for this instance.
:rtype: str
"""
return osp.expanduser(osp.abspath(self._working_dir))
def get_current_iqr_session(self):
"""
Get the current IQR Session UUID.
:rtype: str
"""
sid = str(flask.session.sid)
# Ensure there is an initialized session on the configured service.
created_session = False
get_r = self._iqr_service.get('session_ids')
get_r.raise_for_status()
if sid not in get_r.json()['session_uuids']:
post_r = self._iqr_service.post('session', sid=sid)
post_r.raise_for_status()
created_session = True
if created_session or (sid not in self._iqr_work_dirs):
# Dictionaries not initialized yet for this UUID.
self._iqr_work_dirs[sid] = osp.join(self.work_dir, sid)
self._iqr_example_data[sid] = {}
safe_create_dir(self._iqr_work_dirs[sid])
return sid
def reset_session_local(self, sid):
"""
Reset elements of this server for a given session ID.
A given ``sid`` must have been created first. This happens in the
``get_current_iqr_session`` method.
This does not affect the linked IQR service.
:param sid: Session ID to reset for.
:type sid: str
:raises KeyError: ``sid`` not recognized. Probably not initialized
first.
"""
# Also clear work sub-directory and example data state
if os.path.isdir(self._iqr_work_dirs[sid]):
shutil.rmtree(self._iqr_work_dirs[sid])
safe_create_dir(self._iqr_work_dirs[sid])
self._iqr_example_data[sid].clear()
class IqrServiceProxy (object):
"""
Helper class for interacting with the IQR service
"""
def __init__(self, url):
"""
:param url: URL to base requests on.
:type url: str
"""
# Append http:// to the head of the URL if neither http(s) are present
if not (url.startswith('http://') or url.startswith('https://')):
url = 'http://' + url
self.url = url
def _compose(self, endpoint):
return '/'.join([self.url, endpoint])
def get(self, endpoint, **params):
# Make params None if its empty.
params = params and params or None
return requests.get(self._compose(endpoint), params)
def post(self, endpoint, **params):
# Make params None if its empty.
params = params and params or None
return requests.post(self._compose(endpoint), data=params)
def put(self, endpoint, **params):
# Make params None if its empty.
params = params and params or None
return requests.put(self._compose(endpoint), data=params)
def delete(self, endpoint, **params):
# Make params None if its empty.
params = params and params or None
return requests.delete(self._compose(endpoint), data=params)
|
# coding=utf-8
__author__ = 'Hanzhiyun'
# calculate the factorial
number = input("Enter a non-negative integer to take the factorial of: ")
product = 1
for i in range(number):
product *= (i + 1)
print(product)
|
import json
import base64
import cv2
import sys
import datetime
import time
import subprocess
import collections as cl
import csv
def res_cmd_lfeed(cmd):
return subprocess.Popen(
cmd, stdout=subprocess.PIPE,
shell=True).stdout.readlines()
def res_cmd_no_lfeed(cmd):
return [str(x).rstrip("\n") for x in res_cmd_lfeed(cmd)]
def read_input(input):
#message = cv2.imread(input)
message = open(input,'rb').read()
return message
def getTime():
time = datetime.datetime.now()
charTime = time.strftime('%Y%m%d-%H_%M_%S')
return charTime
def dictYOLO(name, score, log):
dictdata = cl.OrderedDict()
classdata = cl.OrderedDict()
for i in range(len(log)):
result = cl.OrderedDict()
result["score"] = score[i]
result["box"] = log[i]
classdata[name[i]] = result
dictdata["yolo"] = classdata
return dictdata
def jsonData(subdata, yolodata):
binImg = base64.b64encode(subdata['img']).decode('utf-8')
dictdata = {
"type" : subdata['type'],
"ts" : subdata['ts'],
"size": subdata['size'],
}
imgData = {"img": binImg}
dictdata.update(yolodata)
#print(dictdata)
dictdata.update(imgData)
return dictdata
def writeData(fname, data):
open(fname, 'wb').write(data)
def setLOG(fname):
f = open(fname, 'a')
writer = csv.writer(f, lineterminator='\n')
return writer
if __name__ == '__main__':
args = sys.argv
if len(args) != 2:
print("Usage:python prgoram.py [Input file name (without file extension)]")
exit(1)
fname = args[1] + ".png"
#YOLOv3 full spec
#cmd = ("./darknet detect cfg/yolov3.cfg yolov3.weights " + fname)
#YOLOv3 tiny version
cmd = ("./darknet detect cfg/yolov3-tiny.cfg yolov3-tiny.weights " + fname)
start = time.time()
result = res_cmd_no_lfeed(cmd)
end = time.time()
log = []
name = []
score = []
output = []
flag = 0
for i in range(len(result)):
log.append(result[i].split())
name.append(log[i][0])
score.append(log[i][1])
log[i].pop(0)
log[i].pop(1)
if name[i] == "person":
flag = 1
#dictyolo = dictYOLO(name, score, log)
detectTime = end - start
if flag == 1:
print("person detected")
else:
print("No person")
print("Detection time: {0} (sec)".format(detectTime))
#cmd = ('rm -rf ' + fname)
#res = res_cmd_no_lfeed(cmd)
|
#!/usr/bin/env python
# coding: utf-8
import cv2
from dataset_explorer.io import FileType
from dataset_explorer.plugins import ImagePlugin
class LaplacianPlugin(ImagePlugin):
def __init__(self):
super(LaplacianPlugin, self).__init__("Laplacian Derivative", FileType.IMAGE, icon="border_clear")
def process(self, data, outFilename):
data = cv2.Laplacian(data, cv2.CV_32F)
cv2.imwrite(outFilename, data)
|
'''
Created on Dec 21, 2010
Mpl examples:
http://matplotlib.sourceforge.net/examples/user_interfaces/index.html
'''
import tempfile
import logging
import numpy as np
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.backends import backend_wx
from matplotlib.figure import Figure
import wx
from .._wxutils import ID, Icon
from .frame import EelbrainFrame
class FigureCanvasPanel(FigureCanvasWxAgg):
"""wx.Panel with a matplotlib figure
Notes
-----
Subclass of mpl's Canvas to allow for more interaction with Eelbrain (such
as copying the contents to the clipboard).
"""
_copy_as_pdf = True
def __init__(self, parent, *args, **kwargs):
"""wx.Panel with a matplotlib figure
Parameters
----------
figsize : tuple
Figure dimensions (width, height) in inches
dpi : int
Dots per inch.
facecolor : mpl color
The figure patch facecolor; defaults to rc ``figure.facecolor``
edgecolor : mpl color
The figure patch edge color; defaults to rc ``figure.edgecolor``
linewidth : scalar
The figure patch edge linewidth; the default linewidth of the frame
frameon : bool
If ``False``, suppress drawing the figure frame
subplotpars :
A :class:`SubplotParams` instance, defaults to rc
tight_layout : bool | dict
If ``False`` use ``subplotpars``; if ``True`` adjust subplot
parameters using :meth:`tight_layout` with default padding.
When providing a dict containing the keys `pad`, `w_pad`, `h_pad`
and `rect`, the default :meth:`tight_layout` paddings will be
overridden. Defaults to rc ``figure.autolayout``.
"""
self.figure = Figure(*args, **kwargs)
FigureCanvasWxAgg.__init__(self, parent, wx.ID_ANY, self.figure)
self.Bind(wx.EVT_ENTER_WINDOW, self.ChangeCursor)
def CanCopy(self):
return True
def bufferHasChanged(self):
return True
def ChangeCursor(self, event):
"http://matplotlib.sourceforge.net/examples/user_interfaces/wxcursor_demo.html"
self.SetCursor(wx.StockCursor(wx.CURSOR_CROSS))
# def OnPaint(self, event):
# self.draw()
def Copy(self):
if self._copy_as_pdf:
try:
if wx.TheClipboard.Open():
# same code in mpl_tools
path = tempfile.mktemp('.pdf') # , text=True)
logging.debug("Temporary file created at: %s" % path)
self.figure.savefig(path)
# copy path
do = wx.FileDataObject()
do.AddFile(path)
wx.TheClipboard.SetData(do)
wx.TheClipboard.Close()
except wx._core.PyAssertionError:
wx.TheClipboard.Close()
else:
self.figure.set_facecolor((1, 1, 1))
self.draw()
self.Copy_to_Clipboard()
def redraw(self, axes=[], artists=[]):
self.restore_region(self._background)
for ax in axes:
ax.draw_artist(ax)
extent = ax.get_window_extent()
self.blit(extent)
for artist in artists:
ax = artist.get_axes()
# FIXME:
# ax.draw_artist(artist)
# extent = artist.get_window_extent(self.get_renderer()) # or self?
# substitute redrawing whole ax
ax.draw_artist(ax)
extent = ax.get_window_extent()
# end substitute
self.blit(extent)
def store_canvas(self):
self._background = self.copy_from_bbox(self.figure.bbox)
class CanvasFrame(EelbrainFrame):
"""
after:
http://matplotlib.sourceforge.net/examples/user_interfaces/embedding_in_wx2.html
"""
def __init__(self, parent=None, title="Matplotlib Frame",
eelfigure=None,
statusbar=True, toolbar=True, mpl_toolbar=False,
*args, **kwargs):
wx.Frame.__init__(self, parent, -1, title=title)
# set up the canvas
# prepare the plot panel
self.sizer = sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(sizer)
self.canvas = FigureCanvasPanel(self, *args, **kwargs)
sizer.Add(self.canvas, 1, wx.EXPAND)
# get figure
self.figure = self.canvas.figure
if statusbar:
self.CreateStatusBar()
if toolbar:
tb = self.CreateToolBar(wx.TB_HORIZONTAL)
tb.SetToolBitmapSize(size=(32, 32))
self.FillToolBar(tb, eelfigure)
tb.Realize()
if mpl_toolbar:
self.add_mpl_toolbar()
sizer.Fit(self)
self._eelfigure = eelfigure
self.Bind(wx.EVT_CLOSE, self.OnClose)
def FillToolBar(self, tb, eelfigure):
"subclasses should call this after adding their own items"
if hasattr(self.Parent, 'attach'):
tb.AddLabelTool(ID.ATTACH, "Attach", Icon("actions/attach"))
self.Bind(wx.EVT_TOOL, self.OnAttach, id=ID.ATTACH)
tb.AddLabelTool(wx.ID_SAVE, "Save", Icon("tango/actions/document-save"))
self.Bind(wx.EVT_TOOL, self.OnSaveAs, id=wx.ID_SAVE)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateUISave, id=wx.ID_SAVE)
# intermediate, custom part
if eelfigure is not None:
eelfigure._fill_toolbar(tb)
# right-most part
if wx.__version__ >= '2.9':
tb.AddStretchableSpace()
else:
tb.AddSeparator()
# tb.AddLabelTool(wx.ID_HELP, 'Help', Icon("tango/apps/help-browser"))
# self.Bind(wx.EVT_TOOL, self.OnHelp, id=wx.ID_HELP)
def add_mpl_toolbar(self):
self.toolbar = backend_wx.NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
if 0: # wx.Platform == '__WXMAC__':
# Mac platform (OSX 10.3, MacPython) does not seem to cope with
# having a toolbar in a sizer. This work-around gets the buttons
# back, but at the expense of having the toolbar at the top
self.SetToolBar(self.toolbar)
else:
# On Windows platform, default window size is incorrect, so set
# toolbar width to figure width.
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
# As noted above, doesn't work for Mac.
self.toolbar.SetSize(wx.Size(fw, th))
self.Sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
# update the axes menu on the toolbar
self.toolbar.update()
def OnAttach(self, event):
items = {'p': self._eelfigure}
self.Parent.attach(items, detach=False, _internal_call=True)
self.Parent.Raise()
def OnClose(self, event):
# remove circular reference
if hasattr(self, '_eelfigure') and self._eelfigure:
del self._eelfigure._frame
del self._eelfigure
event.Skip()
# def OnHelp(self, event):
# app = wx.GetApp()
# shell = getattr(app, 'shell', None)
# if hasattr(shell, 'help_lookup'):
# shell.help_lookup(self._eelfigure)
# else:
# print self.__doc__
def OnSave(self, event):
self.OnSaveAs(event)
def OnSaveAs(self, event):
default_file = '%s.pdf' % self.GetTitle().replace(': ', ' - ')
dlg = wx.FileDialog(self, "If no file type is selected below, it is "
"inferred from the extension.",
defaultFile=default_file,
wildcard="Any (*.*)|*.*|PDF (*.pdf)|*.pdf|PNG (*.png)|*.png",
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
self.figure.savefig(dlg.GetPath())
dlg.Destroy()
def OnUpdateUISave(self, event):
event.Enable(True)
def OnUpdateUISaveAs(self, event):
event.Enable(True)
def redraw(self, axes=[], artists=[]):
self.canvas.redraw(axes=axes, artists=artists)
def store_canvas(self):
self.canvas.store_canvas()
class TestCanvas(CanvasFrame):
"This is a minimal CanvasFrame subclass"
def __init__(self, effect=10, mpl_toolbar=True):
CanvasFrame.__init__(self, title="Test MPL Frame", mpl_toolbar=mpl_toolbar)
self.plot()
self.Show(effect)
def plot(self):
self.axes = self.figure.add_subplot(111)
t = np.arange(0.0, 3.0, 0.01)
s = np.sin(2 * np.pi * t)
self.axes.plot(t, s)
|
from .king_bot import king_bot
from .settings import settings
|
#
#
#
#
# Functions to calculate the volume of a set of rules
#
#
#
#
from operator import itemgetter
# FUNCTION TO CALCULATE THE VOLUME OF A PARAMETER
def parameter_volume(parameter):
if type(parameter) == int or type(parameter) == float:
minimum = maximum = parameter
else:
minimum = min(parameter)
maximum = max(parameter)
volume = abs(maximum - minimum)
return volume
#print(parameter_volume((5,)))
#print(parameter_volume((11,13,16)))
#print(parameter_volume({11,13,16}))
#Function to calculate the volume of a rule
def rule_volume(rule):
volume = []
dimension = 0
for parameter in range(0,len(rule) -1 ):
parameter_contribution = parameter_volume(rule[parameter])
if parameter_contribution != 0:
dimension = dimension + 1
volume = volume + [parameter_contribution]
#print('volume:',volume)
volume = sum(volume)
return [volume,dimension]
#print(rule_volume([{12}, {10, 13}, 'B']))
#print(rule_volume([{12,13}, {10, 13}, 'B']))
#print(rule_volume([{8}, (3,), 'A']))
#print(rule_volume([8, (5,), 'B']))
#print(rule_volume([{8}, (7,), 'A']))
#-------------------------------------------------------------------
# Function that receives an array with the [volume,dimension] for a set of rules
# and return the global [volume, dimension] for each dimension
# [ VOLUME, DIMENSION ]
# [ VOLUME, DIMENSION ]
# [ VOLUME, DIMENSION ]
def sum_equal_dimensions(volumes):
dimensions = {}
result = []
# print(volumes)
for volume in volumes:
key = str(volume[1])
# print(type(key))
# print(key)
if key not in dimensions:
dimensions[key] = [0,int(key)]
# print('the dimensions are:',dimensions)
while len(volumes) > 0:
temporal = volumes[0]
volumes.remove(temporal)
temporalkey = str(temporal[1])
suma = dimensions[temporalkey]
suma[0] = suma[0] + temporal[0]
dimensions[temporalkey] = suma
#print(dimensions)
for key in dimensions:
result.append(dimensions[key])
return result
#print(sum_equal_dimensions( [ [4.0, 2], [3.0, 1], [1.0, 1] ] ) )
#print(sum_equal_dimensions([[3.0, 1], [4.0, 2], [1.0, 1]]) )
#print(sum_equal_dimensions([[0,0],[0,0],[0,0]]))
#print(sum_equal_dimensions([[2, 1], [0, 0], [2, 1], [0, 0], [0, 0], [0, 0]]))
#-------------------------------------------------------------------
#Function that takes an array of arrays and sort them by its second entrance
#from operator import itemgetter
def sort_volumes(volumes):
volumes = sorted(volumes,key=itemgetter(1))
return volumes
#sort_volumes([[4.0, 1], [4.0, 2]])
#sort_volumes([[4.0, 2], [4.0, 1]])
#print(sort_volumes([[1.0, 2], [3.0, 2], [4.0, 4]]))
#print(sort_volumes([[1.0, 2], [3.0, 2], [4.0, 4]]) )
#print(sort_volumes([[0, 0]]))
#-------------------------------------------------------------------
#Function to calculate the volume of a set of rules
# For each rule in the set
#calculate its volume
#retur the sum of the volumes
def volume_of_the_ruleset(rules):
volumes = []
for rule in rules:
# print(rule)
volume = rule_volume(rule)
# print('aportacion de la regla', volume)
volumes = volumes + [volume]
# print('volumes',volumes)
volumes = sum_equal_dimensions(volumes)
volumes = sort_volumes(volumes)
return volumes
#print(partition_volume([[(12,), {10, 13}, 'B'], [{11, 13}, {11, 13}, 'D'], [{12,13},(10,),'B'] ]))
#print(volume_of_the_ruleset( [[(6,), {4, 6}, 'A'], [(8,), 5, 'B'], [(10,), {4, 6}, 'A'], [{8}, (3,), 'A'], [8, (5,), 'B'], [{8}, (7,), 'A'] ] ) )
#
#from calculate_volume_of_a_ruleset import volume_of_the_ruleset, sum_equal_dimensions
def partition_volumes(combinations):
combinations_volumes = []
for combination in combinations:
combination_volume = []
#print('combination\n', combination)
# Each partition is the partition of a rule
for partition in combination:
volume_of_the_partition = volume_of_the_ruleset(partition)
[combination_volume.append(x) for x in volume_of_the_partition]
# print('combination volume',combination_volume)
combination_volume = sum_equal_dimensions(combination_volume)
# print('combination volume',combination_volume)
combinations_volumes.append(combination_volume)
return combinations_volumes
# -*- coding: utf-8 -*-
"""
Function that takes a set of sets of rules, each one corresponding
to a different partition of an original connected set
e.g set1, set2, set3 . . .
and return the set (partition) with greater "volume".
"""
from copy import deepcopy
#-----------------------------------------------
# Function "compare" takes two single volumes
# that could have different volume and dimension
# [volume1, dimension1] [volume2, domension2]
# and returns the maximum considering volume
# and dimension.
#-----------------------------------------------
def compare(vol1,vol2):
if vol1[1] > vol2[1]:
return 1
elif vol1[1] < vol2[1]:
return 2
elif vol1[0] > vol2[0]:
return 1
elif vol1[0] < vol2[0]:
return 2
else:
return 3
#print(compare([4,1],[3,1]))
#-------------------------------------------------
# This function takes two volumes (one beguins being the
# winner and the other the contendent) that may have
# different dimensions e.g [[0,0],[2,1],[2,4]] and [[3,1],[1,4]]
# and returns:
# 1 if the winner has the bigger volume.
# 2 if the contendent has the bigger volume.
# 3 if they are tie .
# They fight to see which one winns.
#-------------------------------------------------
def fight(winner, contendent):
winner_copy = deepcopy(winner)
contendent_copy = deepcopy(contendent)
hand1 = winner[-1]
hand2 = contendent[-1]
result = 3
while result == 3 and len(winner) > 0 and len(contendent) > 0:
hand1 = winner[-1]
del winner[-1]
hand2 = contendent[-1]
del contendent[-1]
result = compare(hand1, hand2)
if result == 1:
return winner_copy
if result == 2:
return contendent_copy
if result == 3:
result = 3
if result == 3:
return winner_copy
else:
return result
#print( fight( [[0,0],[5,1],[4,2]], [[10,1],[4,2]]) )
#print(fight([[0,0],[1,2]],[[0,0]]))
#print(fight( [[4, 1], [0, 0]], [[8, 1], [0, 0]]))
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# This function receives an array with the volumes of the different
# combinations created from the different partitions of the rules
# -the different ways of solving the contradictions-
# and return the index of the bigest one
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#def find_combination_with_maximum_volume(volumes_of_the_combinations):
# if volumes_of_the_combinations:
# maximum_volume = volumes_of_the_combinations[0]
# for i in range(len(volumes_of_the_combinations)):
# maximum_volume = fight(maximum_volume,volumes_of_the_combinations[i])
# for i, j in enumerate(volumes_of_the_combinations):
# if j == maximum_volume:
# index = i
# print(i)
# return i
def find_combination_with_maximum_volume(volumes_of_the_combinations):
if volumes_of_the_combinations:
maximum_volume = volumes_of_the_combinations[0]
if len(volumes_of_the_combinations)==1:
return 0
for i in range(len(volumes_of_the_combinations)):
maximum_volume = fight(maximum_volume,volumes_of_the_combinations[i])
for i, j in enumerate(volumes_of_the_combinations):
if j == maximum_volume:
index = i
return i
|
import sqlite3, hashlib
import os
DIR = os.path.dirname(__file__)
DIR += '/'
m = DIR + "../data/database.db"
# Login - Returns true if successful, false otherwise
def login(username, password):
print "THIS IS M " + m
db = sqlite3.connect(m)
c = db.cursor()
c.execute("SELECT username, password FROM profiles WHERE username = '%s';" % (username));
for account in c:
user = account[0]
passw = account[1]
# Check if user and encrypted password match
print username + " " + user
print passw + " " + encrypt_password(password)
if username == user and encrypt_password(password) == passw:
print "Successful Login"
db.commit()
db.close()
return True
print "Login Failed"
db.commit()
db.close()
return False
# Encrypt password - SHA256
def encrypt_password(password):
encrypted = hashlib.sha256(password).hexdigest()
return encrypted
# Create account - Returns true if successful, false otherwise
def create_account(username, password, fullname, account):
db = sqlite3.connect(m)
c = db.cursor()
if not does_username_exist(username):
# Add user to profiles table
c.execute("INSERT INTO profiles VALUES('%s', '%s', '%s', '%s');" % (username, encrypt_password(password), fullname, account))
db.commit()
db.close()
print "Create Account Successful"
return True
print "Create Account Failed"
db.commit()
db.close()
return False
# Checks if username exists - Returns true if username exists, false otherwise
def does_username_exist(username):
db = sqlite3.connect(m)
c = db.cursor()
c.execute("SELECT username FROM profiles WHERE username = '%s';" % (username))
for account in c:
# Username exists
print "Username exists"
db.commit()
db.close()
return True
print "Username does not exist"
db.commit()
db.close()
return False
# Returns account type of a specific user - else returns False if failed
def get_account(username):
db = sqlite3.connect(m)
c = db.cursor()
if does_username_exist(username):
c.execute("SELECT account FROM profiles WHERE username = '%s';" % (username))
for account in c:
db.commit()
db.close()
return account[0]
print "Username does not exist"
db.commit()
db.close()
return False
# Returns full name of a specific user - else returns False if failed
def get_name(username):
db = sqlite3.connect(m)
c = db.cursor()
if does_username_exist(username):
c.execute("SELECT fullname FROM profiles WHERE username = '%s';" % (username))
for account in c:
db.commit()
db.close()
return account[0]
print "Username does not exist"
db.commit()
db.close()
return False
# Returns class type of a specific course - else Returns False if failed
def get_classtype(coursecode):
db = sqlite3.connect(m)
c = db.cursor()
if not does_course_exist(coursecode):
c.execute("SELECT type FROM classes WHERE coursecode = '%s';" % (coursecode))
for course in c:
print "Class Type Returned: " + str(course)
db.commit()
db.close()
return course[0]
print "Course does not exist"
db.commit()
db.close()
return False
# Returns a list of leaders of a specific course - else Returns False if failed
def get_leaders(coursecode):
db = sqlite3.connect(m)
c = db.cursor()
if does_course_exist(coursecode):
c.execute("SELECT leader FROM leaders WHERE coursecode = '%s';" % (coursecode))
leaders = []
for course in c:
leaders.append(course[0])
print "Leaders Returned: " + str(leaders)
db.commit()
db.close()
return leaders
print "Course does not exist"
db.commit()
db.close()
return False
# Returns a list of students enrolled in a specific course - else Returns False if failed
def get_students(coursecode):
db = sqlite3.connect(m)
c = db.cursor()
if does_course_exist(coursecode):
c.execute("SELECT student FROM enrollment WHERE coursecode = '%s';" % (coursecode))
students = []
for student in c:
students.append(student[0])
print "Students Returned: " + str(students)
db.commit()
db.close()
return students
print "Course does not exist"
db.commit()
db.close()
return False
# Authorizes student into the class
def authorize_class(coursecode, password):
db = sqlite3.connect(m)
c = db.cursor()
c.execute("SELECT coursecode, password FROM classes WHERE coursecode = '%s';" % (coursecode));
for course in c:
ccode = course[0]
passw = course[1]
# Check if ccode and encrypted password match
if coursecode == ccode and encrypt_password(password) == passw:
print "Successful Authorization Into Class"
db.commit()
db.close()
return True
print "Class Authorization Failed"
db.commit()
db.close()
return False
# Adds unexcused attendance if DNE, else excuses with reason
def add_attendance(username, course, day, type, reason):
db = sqlite3.connect(m)
c = db.cursor()
if type == 'E':
c.execute("UPDATE attendance SET type = 'E', reason = '%s' WHERE username = '%s' AND day = '%s' AND course = '%s';" % (reason, username, day, course))
print "Attendance updated to excused"
db.commit()
db.close()
return True
else:
c.execute("INSERT INTO attendance VALUES('%s', '%s', '%s', 'U', '');" % (username, day, course))
print "Attendance added"
db.commit()
db.close()
return True
db.commit()
db.close()
print "Attendance didn't work"
return False
# Returns whether or not the class exists
def does_course_exist(coursecode):
db = sqlite3.connect(m)
c = db.cursor()
c.execute("SELECT coursecode FROM classes WHERE coursecode = '%s';" % (coursecode))
for course in c:
# course exists
print "Course exists"
db.commit()
db.close()
return True
print "Course does not exist"
db.commit()
db.close()
return False
# Creates class if class does not exist - Returns true if successful or false if not
def create_class(teacher, coursecode, password):
db = sqlite3.connect(m)
c = db.cursor()
if not does_course_exist(coursecode):
# Add course to classes table
c.execute("INSERT INTO classes VALUES('%s', '%s', '%s');" % (teacher, coursecode, encrypt_password(password)))
db.commit()
db.close()
print "Create Course Successful"
return True
print "Create Course Failed"
db.commit()
db.close()
return False
# Gets all the available classes
def get_classes(username):
db = sqlite3.connect(m)
c = db.cursor()
if get_account(username) == 'T':
c.execute("SELECT coursecode FROM classes WHERE teacher='%s';" %(username))
classes = []
for course in c:
classes.append(course[0])
if get_account(username) == 'L':
c.execute("SELECT coursecode FROM leaders WHERE leader='%s';" %(username))
classes = []
for course in c:
classes.append(course[0])
print "Classes Returned: " + str(classes)
db.commit()
db.close()
return classes
# Adds leader to the class - Returns true if successful or false if not
def add_leader(coursecode, username):
db = sqlite3.connect(m)
c = db.cursor()
if does_course_exist(coursecode) and does_username_exist(username):
# Add leader to leaders table
c.execute("INSERT INTO leaders VALUES('%s', '%s');" % (coursecode, username))
c.execute("UPDATE profiles SET account='L' WHERE username='%s';" %(username))
db.commit()
db.close()
print "Add Leader Successful"
return True
print "add Leader Failed"
db.commit()
db.close()
return False
# Removes leader from the class - Returns true if successful or false if not
def remove_leader(coursecode, username):
db = sqlite3.connect(m)
c = db.cursor()
if not does_course_exist(coursecode) and not does_username_exist(username):
# Add leader to leaders table
c.execute("DELETE FROM leaders WHERE coursecode = '%s' AND username = '%s';" % (coursecode, username))
db.commit()
db.close()
print "Deleted Leader Successful"
return True
print "Deleted Leader Failed"
db.commit()
db.close()
return False
# Adds student to the class - Returns true if successful or false if not
def add_student(coursecode, username, fullname):
db = sqlite3.connect(m)
c = db.cursor()
if does_course_exist(coursecode) and does_username_exist(username):
# Add student to enrollment table
print '2'
c.execute("INSERT INTO enrollment VALUES('%s', '%s', '%s', NULL);" % (coursecode, username, fullname))
db.commit()
db.close()
print "Add Student Successful"
return True
print "Add Student Failed"
db.commit()
db.close()
return False
# Removes student from the class - Returns true if successful or false if not
def remove_student(coursecode, username):
db = sqlite3.connect(m)
c = db.cursor()
if does_course_exist(coursecode) and does_username_exist(username):
# Add student to enrollment table
c.execute("DELETE FROM enrollment WHERE coursecode = '%s' AND student = '%s';" % (coursecode, username))
db.commit()
db.close()
print "Deleted Student Successful"
return True
print "Deleted Student Failed"
db.commit()
db.close()
return False
# Get grade for student in class - Returns the value
def get_grade(coursecode, username):
db = sqlite3.connect(m)
c = db.cursor()
if does_course_exist(coursecode) and does_username_exist(username):
c.execute("SELECT grade FROM enrollment WHERE coursecode = '%s' AND student = '%s';" % (coursecode, username))
for grade in c:
print "Grade Returned: " + str(grade)
db.commit()
db.close()
return grade[0]
db.commit()
db.close()
return 'not yet inputted'
# Changes grade for student in class - Returns true if successful or false if not
def change_grade(coursecode, username, grade):
db = sqlite3.connect(m)
c = db.cursor()
if does_course_exist(coursecode) and does_username_exist(username):
remove_student(coursecode, username)
c.execute("INSERT INTO enrollment VALUES('%s', '%s', '%s', %d);" % (coursecode, username, get_name(username), grade))
db.commit()
db.close()
print "Changed Grade Successful"
return True
print "Changed Grade Failed"
db.commit()
db.close()
return False
# Counts number of unexcused absences for a student
def count_unexcused(username):
db = sqlite3.connect(m)
c = db.cursor()
ans = 0
if does_username_exist(username):
c.execute("SELECT type FROM attendance WHERE username = '%s' AND type = 'U';" % (username))
for grade in c:
ans += 1
print "Unexcused: " + str(ans)
db.commit()
db.close()
return ans
# Counts number of excused absences for a student
def count_excused(username):
db = sqlite3.connect(m)
c = db.cursor()
ans = 0
if does_username_exist(username):
c.execute("SELECT type FROM attendance WHERE username = '%s' AND type = 'E';" % (username))
for grade in c:
ans += 1
print "Unexcused: " + str(ans)
db.commit()
db.close()
return ans
# Gets all the classes that a student is enrolled in
def get_studentclass(username):
db = sqlite3.connect(m)
c = db.cursor()
c.execute("SELECT coursecode FROM enrollment WHERE student = '%s';" %(username))
classes = []
for course in c:
classes.append(course[0])
print "Classes Returned: " + str(classes)
db.commit()
db.close()
return classes
# Checks if student was present on a given day for a given course - Returns true if absent, false if not
def student_present(username, date, course):
db = sqlite3.connect(m)
c = db.cursor()
if does_course_exist(course) and does_username_exist(username):
c.execute("SELECT type FROM attendance WHERE username='%s' AND day='%s' AND course='%s';" % (username, date, course))
for account in c:
db.commit()
db.close()
print "Absent"
return False
print "Present"
db.commit()
db.close()
return True
# Checks if student was absent on a given day for a given course - Returns true if absent, false if not
def check_attendance(username, date, course):
db = sqlite3.connect(m)
c = db.cursor()
if does_course_exist(course) and does_username_exist(username):
c.execute("SELECT type FROM attendance WHERE username='%s' AND day='%s' AND course='%s';" % (username, date, course))
for account in c:
db.commit()
db.close()
print "Absence recorded"
return False
print "No absence recorded"
db.commit()
db.close()
return True
# Removes attendance for those marked present - Returns true if removed, false if not
def delete_attendance(username, date, course):
db = sqlite3.connect(m)
c = db.cursor()
if does_course_exist(course) and does_username_exist(username):
c.execute("DELETE FROM attendance WHERE username='%s' AND day='%s' AND course='%s';" % (username, date, course))
db.commit()
db.close()
print "Absence removed"
return False
print "No absence removed"
db.commit()
db.close()
return True
|
nums=[10,202,12,121]
for num in nums:
if num%5==0:
print(num)
break
else:
print("Not Found") |
#!/usr/bin/env python
"""
scdist.py
=============
Used from Opticks scdist- bash functions.
"""
import os, logging, argparse
log = logging.getLogger(__name__)
from opticks.bin.dist import Dist
class SCDist(Dist):
"""
"""
exclude_dir_name = [
]
bases = [
'bin',
'geocache',
'rngcache',
'metadata',
]
extras = []
def __init__(self, distprefix, distname):
Dist.__init__(self, distprefix, distname, extra_bases=[])
def exclude_file(self, name):
exclude = False
if name.endswith(".log"):
exclude = True
pass
return exclude
if __name__ == '__main__':
parser = argparse.ArgumentParser(__doc__)
parser.add_argument( "--distname", help="Distribution name including the extension, expect .tar or .tar.gz" )
parser.add_argument( "--distprefix", help="Distribution prefix, ie the top level directory structure within distribution file." )
parser.add_argument( "--level", default="info", help="logging level" )
args = parser.parse_args()
fmt = '[%(asctime)s] p%(process)s {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s'
logging.basicConfig(level=getattr(logging,args.level.upper()), format=fmt)
log.info("distprefix %s distname %s " % (args.distprefix, args.distname))
dist = SCDist(args.distprefix, args.distname )
|
from django.core.files import File
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from django.views.generic.edit import FormView
from django.utils import timezone
from .forms import FileUploadform
from .models import UploadData
from .pdf_utils import MergePDFs
import json
import ast
# Create your views here.
def file_upload(request):
if request.method == "POST":
form = FileUploadform(request.POST, request.FILES)
if form.is_valid():
form_data = request.FILES.getlist('file')
for file_data in form_data:
print(file_data.chunks)
instance = UploadData(upload=file_data)
print(f"form_data === {instance}")
instance.save()
file_count = UploadData.objects.all().count()
return render(request, 'pdfApp/upload_view.html',{'form':form,'fnames_pk': UploadData.objects.all(),'file_count':file_count})
else:
return render(request, 'pdfApp/upload_view.html',{'form':form, 'fnames_pk':False})
else:
form = FileUploadform()
try:
if(UploadData.objects.count() > 0):
entries = UploadData.objects.all()
entries.delete()
except:
print("Data Not found .....")
return render(request, 'pdfApp/upload_view.html',{'form':form, 'fnames_pk':False })
def file_download(request):
#Fetch the data from UploadData Table and display it on the console.
query = UploadData.objects.all()
print(query)
#Merge the uploaded pdfs
MergePDFs()
pdf = None
f = open('../test.pdf','rb')
pdf = f.read()
data = HttpResponse(pdf, content_type='application/pdf')
data['Content-Disposition'] = f'filename={timezone.now()}.pdf'
f.close()
return data
def remove_upload(request,pk):
print(f"File ID in remove_upload = {pk}")
file_id = request.GET.get("file_id", None)
file_op = UploadData.objects.get(pk=file_id)
file_op.delete()
return render(request,'pdfApp/upload_view.html') |
"""Treadmill app configurator daemon, subscribes to eventmgr events.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import click
from treadmill import appcfgmgr
def init():
"""Top level command handler."""
@click.command()
@click.option('--approot', type=click.Path(exists=True),
envvar='TREADMILL_APPROOT', required=True)
@click.option('--runtime', envvar='TREADMILL_RUNTIME', required=True)
def run(approot, runtime):
"""Starts appcfgmgr process."""
mgr = appcfgmgr.AppCfgMgr(approot, runtime)
mgr.run()
return run
|
class Solution(object):
def max_profit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if not prices:
return 0
profit = 0
total = 0
min_price = prices[0]
for price in prices:
# find the min so far
min_price = min(price, min_price)
# compare the new profit with current price and min price we saw
# so far
current_profit = price - min_price
# find which profit is bigger current profit or the profit we
# saw so far
if current_profit > profit:
profit = current_profit
total += profit
print("total: ", total)
# profit = max(profit, current_profit)
# print(total)
return profit
prices = [7, 1, 5, 3, 6, 4]
obj = Solution()
result = obj.max_profit(prices)
print(result)
|
import io
import csv
from beancount.core.number import D
from beancount.core import data
def identify(rd: io.TextIOBase, dialect: str, fields: [str]):
rd = csv.reader(rd, dialect=dialect)
for row in rd:
if set(row) != set(fields):
return False
break
return True
def make_posting(account, amount=None):
return data.Posting(account, amount, None, None, None, None)
def parse_amount(s):
s = s.replace(',', '.')
return data.Amount(D(s), 'EUR')
|
import json
from rest_framework import permissions
from django.core import serializers
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse, HttpResponseBadRequest, JsonResponse
from django.utils.datastructures import MultiValueDictKeyError
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from rest_framework.views import APIView
from django.http import Http404
from rest_framework.response import Response
from .models import Cafe, Item, Order
from .serializers import CafeSerializer, OrderSerializer
from users.models import User
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
class CsrfExemptSessionAuthentication(SessionAuthentication):
def enforce_csrf(self, request):
return None
class CafeList(APIView):
def get(self, request):
"""
Получение листа всех кафе
"""
cafe_queryset = Cafe.objects.all()
serializer = CafeSerializer(cafe_queryset, many=True)
return JsonResponse(serializer.data, safe=False)
class CafeDetail(APIView):
def get_object(self, pk):
try:
return Cafe.objects.get(pk=pk)
except Cafe.DoesNotExist:
raise Http404
def get(self, request, pk):
"""
Функция для получение информации о кафе
---
Параметры:
* cafe_id - ID кафе, информацию которого нужно получить
---
Возвращаемый словарь:
* cafe_id - ID кафе
* cafe_name - Название кафе
* cafe_description - Описание кафе
* cafe_rating - Рэйтинг кафе
* lat - Координата широты кафе
* lon - Координата долготы кафе
* cafe_owner - Владелец кафе объект типа :model:`cafes.Owner`, пердставляет словарь с полями:\n
* owner_id - ID владельца кафе
* owner_name - Имя владельца кафе
* owner_phone_number - Номер телефона владельца кафе
* owner_email - Почта владельца кафе
* cafe_menu - Мень кафе, список объектов типа :model:`cafes.Item`, где каждый элемент списка словарь с полями:\n
* item_id - ID продукта
* item_name - Название продукта
* item_description - Описание продукта
* item_time - Время приготовления продукта
* item_icon - Иконка продукта
* item_image - Фотография продукта
* item_cost - Цена продукта
* cafe_opening_hours - Лист \n
* нулевой элемент - время открытия кафе
* первый элемент - время закрытия кафе
* add_time - Время добавления кафе в систему
"""
try:
cafe = self.get_object(pk)
except Http404:
return HttpResponse(status=404)
serializer = CafeSerializer(cafe)
return JsonResponse(serializer.data)
class CafeName(APIView):
def get(self, request):
"""
Функция для получения кафе по имени
---
Параметры:
cafe_name - Имя кафе, string
---
Возвращаемый словарь:
* cafe_id - ID кафе
* cafe_name - Название кафе
* cafe_description - Описание кафе
* cafe_rating - Рэйтинг кафе
* lat - Координата широты кафе
* lon - Координата долготы кафе
* cafe_owner - Владелец кафе объект типа :model:`cafes.Owner`, пердставляет словарь с полями:\n
* owner_id - ID владельца кафе
* owner_name - Имя владельца кафе
* owner_phone_number - Номер телефона владельца кафе
* owner_email - Почта владельца кафе
* cafe_menu - Мень кафе, список объектов типа :model:`cafes.Item`, где каждый элемент списка словарь с полями:\n
* item_id - ID продукта
* item_name - Название продукта
* item_description - Описание продукта
* item_time - Время приготовления продукта
* item_icon - Иконка продукта
* item_image - Фотография продукта
* item_cost - Цена продукта
* cafe_opening_hours - Лист \n
* нулевой элемент - время открытия кафе
* первый элемент - время закрытия кафе
* add_time - Время добавления кафе в систему
"""
try:
cafes = Cafe.objects.get(cafe_name=request.GET["cafe_name"])
except Cafe.DoesNotExist:
return HttpResponse(status=404)
except MultiValueDictKeyError:
return HttpResponse(status=400)
if request.method == "GET":
if type(cafes) == Cafe:
serializer = CafeSerializer(cafes)
else:
serializer = CafeSerializer(cafes, many=True)
return JsonResponse(serializer.data, safe=False)
class CafeCoordinates(APIView):
def get(self, request):
"""
Функция для получения информации ID ближайших кафе по координатам
---
Параметры:
* lat - Широта, float
* lon - Долгота, float
* r - Радиус, int
---
Возвращает:
* список словарей, в каждом из которых есть следующие ключи\n
* cafe_id - ID объекта :model:`cafes.Cafe`
* cafe_lat - Координата широты кафе
* cafe_lon - Координата долготы кафе
---
Для получения полной информации о кафе нужно воспользоваться функцией :view:`cafes.views.get_cafe_by_id`
"""
if request.method != "GET":
return HttpResponseBadRequest("Incorrect type of request. GET needed.")
try:
lat = float(request.GET["lat"])
lon = float(request.GET["lon"])
r = float(request.GET["r"])
except KeyError:
return HttpResponseBadRequest("lat, lon or r parameter is invalid")
r2 = r ** 2
all_cafes = Cafe.objects.all()
cafes = []
for cafe in all_cafes:
if (cafe.cafe_coordinates.lat - lat) ** 2 + (cafe.cafe_coordinates.lon - lon) ** 2 <= r2:
cafes.append(
{
"icon": cafe.icon.url,
"cafe_name": cafe.cafe_name,
"cafe_coordinates": {
"lat": cafe.cafe_coordinates.lat,
"lon": cafe.cafe_coordinates.lon
},
"cafe_description": cafe.cafe_description,
"cafe_id": cafe.cafe_id
}
)
return JsonResponse(cafes, safe=False)
class ItemDetail(APIView):
def get(self, request):
"""
Функция для получения продукта по его ID
---
Параметры:
* item_id - ID элемента, который нужно получить, int
---
Возвращает:
* Объект :model:`cafes.Item`
"""
if request.method != "GET":
return HttpResponseBadRequest("Incorrect type of request. GET needed.")
try:
item = Item.objects.get(item_id=int(request.GET["id"]))
except KeyError:
return HttpResponseBadRequest("No id in request")
except ObjectDoesNotExist:
return HttpResponseBadRequest("No object with your id")
serialized_obj = serializers.serialize('json', [item, ])
return HttpResponse(serialized_obj)
class OrdersList(APIView):
def get(self, request):
"""
Функция для получения списка всех заказов
---
Параметры:
* cafe_id - ID кафе, спсиок заказов которого нужно получить, int
---
Возвращает:
Список объектов типа Order
Список полей:
* id
* customer
* on_time
* order_time
* items
* done
* taken
* cafe_id
"""
try:
cafe_id = request.GET["cafe_id"]
except KeyError as e:
return Response("No " + e.args[0] + " field", status=400)
order_queryset = Order.objects.filter(
cafe_id=cafe_id,
taken=False
)
serializer = OrderSerializer(order_queryset, many=True)
return JsonResponse(serializer.data, safe=False)
class OrderCreation(APIView):
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def post(self, request):
# ToDo Сделать для поля items массив
"""
Функция создания заказа
---
Права:
пользователь должен быть авторизован
---
Параметры:
* on_time - Время, на которое был сделан заказ
* items - ID item'а, который есть в заказе(в ближайшее время изменим на массив)
* cafe_id - ID кафе, где пользователь сделал заказ
---
Возвращает:
В ответе есть только статус 200, если всё успешно, иначе ошибку и статус 400
"""
try:
customer = User.objects.get(username=request.user)
on_time = request.POST["on_time"]
item_id = request.POST["items"]
cafe_id = request.POST["cafe_id"]
except KeyError as e:
return Response("No " + e.args[0] + " field", status=400)
order = Order(
customer=customer,
on_time=on_time,
items=Item.objects.get(item_id=item_id),
cafe_id=Cafe.objects.get(cafe_id=cafe_id)
)
order.save()
return Response(status=200)
class ChangingOrderStatus(APIView):
# ToDo Сделать права для класса
def post(self, request):
"""
Изменение статуса заказа
Функция изменяет у заказа параметр done или taken на True.
---
Параметры:
* order_id - ID заказа
* status_type - Тип поля, которое нужно изменить. Модет быть два варианта: done или taken
---
Возвращает:
Ошибку или статус 200, если все прошло успешно.
"""
try:
order_id = request.data["order_id"]
status_type = request.data["status_type"]
except KeyError as e:
return Response("No " + e.args[0] + " field", status=400)
try:
order = Order.objects.get(
id=order_id
)
except Exception as e:
return Response("No " + e.args[0] + " field", status=400)
if status_type == "done":
order.done = True
elif status_type == "taken":
order.taken = True
else:
return Response("Bad status_type", status=400)
order.save()
return Response(status=200)
|
'''
Created on 2018/03/11
@author: yasushi
'''
import tensorflow as tf
hello = tf.constant('Hello, TensorFlow!')
sess = tf.Session()
print(sess.run(hello)) |
import re
# 要匹配的字符串对象
import time
#
# a="被赋予汗水和欢笑的日子将会成为她成长道路上最宝贵的回忆,祝福母校未来一定会更好!合照东北石油大学六十周年华诞倒计时49天飞扬的歌声,吟唱难忘的岁月,凝聚心头不变的情节;今天,我们用歌声共同挽起友爱的臂膀,让明天来倾听我们爱心旋律的唱响;期盼您下期观看《报答》。策划:吴波,郭连峰(校友),彭艳秋(校友)导演:吴波协调:曹建刚剪辑:吴波,王霖(助理团),李天赐(助理团)文字:郭雨仙,李泽月(助理团),张悦琳(助理团)" \
# "大 学 生 新 闻 中 心 出 品\xa0图文编辑:刘\xa0\xa0\xa0岩责任编辑:王超颖审\u3000\u3000核:苍留松"
#
# # 匹配列表
# pattern=['(?<=图文编辑:).*?(?=责任编辑)','(?<=原创文字:).*?(?=责任编辑)','(?<=摄影记者:).*?(?=责任编辑)','(?<=视频制作:).*?(?=责任编辑)',
# '(?<=图文编辑:).*?(?=原创文字)','(?<=原创文字:).*?(?=视频制作)','(?<=视频制作:).*?(?=图文编辑)',
# '(?<=图文编辑:).*?(?=视频制作)','(?<=视频制作:).*?(?=摄影记者)','(?<=摄影记者:).*?(?=视频制作)',
#
# ]
# # comment = re.compile('(?<=图文编辑:).*?(?=责任编辑)',re.S)
# name=[]
# for i in pattern:
# comment = re.compile(i,re.S)
# comment1 = comment.findall(a)
# if len(comment1)>0:
# name.append(comment1[0])
#
# name[0].replace('\xa0','')
# print(str(a).__contains__(comment))
#
# localtime = time.localtime(1595071935)
# dt = time.strftime('%Y-%m-%d %H:%M:%S', localtime)
import datetime
def get_date_interval(now, end):
# 定义的日期格式需与当前时间格式一致
# print(date1, date2)
d1 = datetime.datetime.strptime(now, '%Y-%m-%d')
d2 = datetime.datetime.strptime(end, '%Y-%m-%d')
d = (d1 - d2)
print('{} 比 {} 晚:{}天'.format(d1, d2, d.days))
return d.days
if get_date_interval('2020-07-20','2020-3-1')>=0:
print('0k')
import pandas as pd
info1 =pd.read_excel('量化统计.xls')
info2=pd.read_excel('量化统计.xls')
info1 = pd.concat([info2,info2])
info1 =info1.drop_duplicates().reset_index(drop=True)
import os
os.path.exists('/opt/mq/pytorch_study/东油微博舆情监督/weibo.xls')
import jieba
jieba.load_userdict('姓名.txt')
def get_name(data):
# 匹配列表
pattern = ['图文编辑', '责任编辑', '原创文字', '责任编辑', '摄影记者', '视频制作', '原创图片', ':', ':', ' ', ' ']
temp_name = []
end_name = ''
comment = re.compile('(?<=图文编辑:).*?(?=责任编辑)', re.S)
comment1 = comment.findall(data)
if len(comment1) > 0:
p_name = str(comment1[0])
p_name = ''.join(p_name.split())
for i in pattern:
p_name = p_name.replace(i, '')
p_name = jieba.lcut(p_name)
for j in p_name:
end_name = end_name + " " + j
return end_name
m = '图文编辑:向 波 责任编辑'
a = get_name(m)
# all = pd.read_excel('专业统计.xlsx')
# allname=all['姓名']
# allname.to_csv('姓名.txt',index=None,header=None) |
# A. Dungeon
t = int(input())
for _ in range(t):
a,b,c = map(int,input().split(' '))
s = a+b+c
d = s // 9
# applying the condition that sum is atleast equal to 9
# and is multiple of 9.
if s > 8 and s % 9 == 0:
# applying the condition that a,b and c are atleast
# equal to the d.
if a >= d and b >= d and c >= d:
print('YES')
else:
print('NO')
else:
print('NO')
|
str = 'Hello World!'
print str #prints whole tring
print str[0] #prints the first character
print str[2:5] #prints characters at 3rd to last position
print str[2:] #prints all characters from 3rd position onwards
print str + "Test" #prints out string concatenated by TEST
|
#!/usr/bin/env python
from flask import Flask, escape, request
from wgraph.summary import go
from wgraph.graph import load, apply_styles
app = Flask(__name__)
GRAPH = load("../graph.tsv")
@app.route("/")
def home():
return """
<form method="POST">
<input name="word">
<input type="submit" value="Enter a word">
</form>
"""
def sumup(word):
# TODO - return error if no word specified
graph = go(
graph=GRAPH,
word=word,
max_depth=5,
max_nodes=50,
group_by_origin=True,
)
apply_styles(word, graph).render("output")
with open("output.svg") as inputs:
svg = inputs.read()
start = svg.find("<svg ")
if start != -1:
svg = svg[start:]
# TODO - return svg MIME type
return f"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<title>Wgraph {escape(word)}</title>
</head>
<body>
<div style="max-width: 100%">
{svg}
</div>
</body>
</html>
"""
@app.route("/", methods=["POST"])
def form():
word = request.form["word"]
return sumup(word)
@app.route("/summary", methods=["GET"])
def summary():
word = request.args.get("word")
return sumup(word)
|
def solution(n):
answer = ''
while n:
if n % 3:
answer += str(n % 3)
n //= 3
else:
answer += "4"
n = n//3 - 1
return answer[::-1]
from collections import deque
def solution(n):
answer = ''
country = deque()
while n:
if n % 3 == 0:
if country[-1] == '1':
country.pop()
country.append(4)
else:
country[-1] -= 1
country.append(4)
country.append(str(n % 3))
n //= 3
answer = list(answer[::-1])
for i in range(1, len(answer)):
if answer[i] == '0':
answer[i - 1] = str(int(answer[i - 1]) - 1)
answer[i] = '4'
return str(int(''.join(answer)))
# K진법 구하고 0이 나오는 숫자에 4를 대신 넣어주면 됨
# 결국은 3진법을 구하는 문제
def solution(n):
answer = ''
print(n % 3)
print(n // 3)
A = []
n = 12
while True:
if n // 3 == 0:
break
A.append(n % 3)
n = n // 3
A.reverse()
print(A)
return answer |
#The range() Function
for i in range (5):
print (i)
print ('______________________________________')
for i in range(5, 10):
print(i)
print ('______________________________________')
for i in range(0, 10, 3):
print(i)
print ('______________________________________')
for i in range(-10, -100, -30):
print(i)
print ('______________________________________')
a= ['Mary', 'had', 'a', 'little', 'lamb']
for i in range(len(a)):
print(i, a[i])
print ('______________________________________')
print(range(10))
print(sum(range(4))) |
import sys
def parse_http_datetime(s):
datetime=""
month=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]
if "GMT" in s and "-" not in s:
a=s.split(" ")
y= a[3]
if (month.index(a[2])+1)<10:
m= str("0"+str(month.index(a[2])+1))
else:
m=str(month.index(a[2])+1)
d=a[1]
t=a[4]
datetime=y+"-"+m+"-"+d+" "+t
elif "GMT" in s and "-" in s:
a=s.split(" ")
y= "19"+a[1].split("-")[2]
if (month.index(a[1].split("-")[1])+1)<10:
m= str("0"+str(month.index(a[1].split("-")[1])+1))
else:
m=str(month.index(a[1].split("-")[1])+1)
d=a[1].split("-")[0]
t=a[2]
datetime=y+"-"+m+"-"+d+" "+t
elif "GMT" not in s and "-" not in s:
a=s.split(" ")
y= a[5]
if (month.index(a[1])+1)<10:
m= str("0"+str(month.index(a[1])+1))
else:
m=str(month.index(a[1])+1)
if int(a[3])<10:
d="0"+a[3]
else:
d=a[3]
t=a[4]
datetime=y+"-"+m+"-"+d+" "+t
return datetime
while True:
line = sys.stdin.readline()
line = line.strip()
if line == '':
break
print(parse_http_datetime(line))
|
# -*- coding: utf-8 -*-
from ProxyIP.utils.useragent import UAPOOL
# Scrapy settings for ProxyIP project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'ProxyIP'
SPIDER_MODULES = ['ProxyIP.spiders']
NEWSPIDER_MODULE = 'ProxyIP.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'ProxyIP (+http://www.yourdomain.com)'
# Obey robots.txt rules
# ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 16
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'ProxyIP.middlewares.ProxyipSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'ProxyIP.middlewares.ProxyipDownloaderMiddleware': 543,
# }
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'ProxyIP.pipelines.ProxyipPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# LOG_LEVEL = 'ERROR'
# REDIS配置信息
REDIS_HOST = 'zqhd5'
REDIS_PORT = 6379
REDIS_PROXYIP_BASENAME = 'tianshu_proxyip'
REDIS_PROXYIP_EXPIRE_TIME = 172800
# 免费IP认证接口
AUTH_URLS_INFO = [{'name': "kuaishou", "url": "http://live.kuaishou.com/m_graphql",
'body': {"operationName":"searchHotQuery","variables":{"limit":5},"query":"query searchHotQuery($limit: Int) {\n pcSearchHot(limit: $limit)\n}\n"}
}]
# 翻页数
SPIDER_PAGE_START = 1
SPIDER_PAGE_END = 50
# 超时时间
# RETRY_ENABLED = True
# RETRY_TIMES = 1
DOWNLOAD_TIMEOUT = 5
|
L=[]
with open("cuburi.txt") as f:
k=1
for i in f.readlines():
if k==1:
n=int(i)
k+=1
else:
j=i.split()
L.append((int(j[0]),j[1].strip("\n")))
L=sorted(L,key= lambda e: -e[0])
print (L)
with open("turn.txt","w") as g:
g.write(str(L[0])+"\n")
cc=L[0][1]
for i in L[1:]:
if i[1]!=cc:
g.write(str(i)+"\n")
cc=i[1]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-28 15:59
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('client_config', '0002_client_notice'),
]
operations = [
migrations.RenameField(
model_name='client',
old_name='notice',
new_name='alarm_active',
),
]
|
from tkinter import *
import pickle
from tkinter import messagebox
from tkinter import filedialog
from tkinter.filedialog import askopenfilename
from reviewUtil import *
from clusterUtil import *
def Cluster():
game = textBox.get("1.0",END)
if len(game) > 1:
clusterResult = list(clusterTestGame(game))
if len(clusterResult) < 2: #rerun kmeans if we don't get results for the game
clusterResult = list(clusterTestGame(game))
if len(clusterResult) < 2:
clusterResult = list(clusterTestGame(game))
if len(clusterResult) < 2:
clusterResult = list(clusterTestGame(game))
if len(clusterResult) < 2:
clusterResult = list(clusterTestGame(game))
resultStr = ""
if "Test Game Title.txt" in clusterResult:
clusterResult.remove("Test Game Title.txt")
for item in clusterResult:
resultStr += item[:-4] + ", "
if len(clusterResult) > 0:
message = resultStr[:-2]
else:
message = "Related games not found"
messagebox.showinfo(title="Cluster Results", message=message)
def ClusterFile():
game = textBox2.get("1.0",END)
if len(game) > 1:
clusterResult = list(clusterTestGame(game)) #rerun kmeans if we don't get results
if len(clusterResult) < 2:
clusterResult = list(clusterTestGame(game))
if len(clusterResult) < 2:
clusterResult = list(clusterTestGame(game))
if len(clusterResult) < 2:
clusterResult = list(clusterTestGame(game))
if len(clusterResult) < 2:
clusterResult = list(clusterTestGame(game))
resultStr = ""
if "Test Game Title.txt" in clusterResult:
clusterResult.remove("Test Game Title.txt")
for item in clusterResult:
resultStr += item[:-4] + ", "
if len(clusterResult) > 0:
message = resultStr[:-2]
else:
message = "Related games not found"
messagebox.showinfo(title="Cluster Results", message=message)
def Analyze():
game = textBox.get("1.0",END)
if len(game) > 1:
rating = classifyGame(game)
if rating == 1:
messagebox.showinfo(title="Analysis Results", message="This was a positive review :)")
elif rating == 0:
messagebox.showinfo(title="Analysis Results", message="This was a negative review :(")
def AnalyzeEntities():
game = textBox2.get("1.0",END)
if len(game) > 1:
makeChart(getEntities(game))
def AnalyzeEntitiesRaw():
game = textBox.get("1.0",END)
if len(game) > 1:
makeChart(getEntities(game))
def AnalyzeFile():
name = filedialog.askopenfilename(title = "Select a video game review", initialdir="sampleReviews",filetypes=(("text files","*.txt"),("all files","*.*")))
rating = classifyGameReviewFile(name)
textBox2.delete("1.0",END)
f = open(name, 'r')
fText = ''.join(f.readlines())
f.close()
textBox2.insert(INSERT,fText)
if rating == 1:
messagebox.showinfo(title="Analysis Results", message="This was a positive review :)")
elif rating == 0:
messagebox.showinfo(title="Analysis Results", message="This was a negative review :(")
root = Tk()
hi = Label(root,text = "Video Game Review Datamining",padx=30,pady=10,justify=CENTER,font="Verdana 24 bold")
hi.grid(row=0,column=1)
# Border and frame spacing stuff
border = Frame(root, height=10)
border.grid(row=1)
border2 = Frame(root, height=10)
border2.grid(row=6)
border3 = Frame(root, height=10)
border3.grid(row=11)
col = Frame(root, width=20)
col.grid(column=2)
col2 = Frame(root, width=20)
col2.grid(column=4)
# User-provided review that we perform sentiment analysis on
reviewLb = Label(root, text="Analyze a Review", padx=20,pady=10,justify=LEFT,font=16)
reviewLb.grid(row=3,rowspan=3)
textBox = Text(root, wrap=WORD,height=20,width=120,borderwidth=2,font="14",padx=5,pady=5,highlightbackground="GRAY",highlightthickness="0.5")
textBox.grid(row=3,column=1,rowspan=3)
analyzeButton = Button(root,text='Analyze Review',command=Analyze,width=20,pady=10,font="16")
analyzeButton.grid(row=3,column=3,sticky=W)
analyzeButton4 = Button(root,text='Analyze Entities',command=AnalyzeEntitiesRaw,width=20,pady=10,font="16")
analyzeButton4.grid(row=4,column=3,sticky=W)
classifyButton = Button(root,text='Cluster',command=Cluster,width=20,pady=10,font="16")
classifyButton.grid(row=5,column=3,sticky=W)
# File provided review that we perform sentiment analysis on
reviewLb = Label(root, text="Analyze a Review from a file", padx=20,pady=10,justify=LEFT,font=16)
reviewLb.grid(row=8,rowspan=3)
textBox2= Text(root, wrap=WORD,height=20,width=120,borderwidth=1,font="14",padx=5,pady=5,highlightbackground="GRAY",highlightthickness="0.5")
textBox2.grid(row=8,column=1,rowspan=3)
analyzeButton2 = Button(root,text='Analyze Review File',command=AnalyzeFile,width=20,pady=10,font="16")
analyzeButton2.grid(row=8,column=3,sticky=W)
analyzeButton3 = Button(root,text='Analyze Entities',command=AnalyzeEntities,width=20,pady=10,font="16")
analyzeButton3.grid(row=9,column=3,sticky=W)
classifyButton2 = Button(root,text='Cluster',command=ClusterFile,width=20,pady=10,font="16")
classifyButton2.grid(row=10,column=3,sticky=W)
# And run it! :D
root.mainloop()
|
import unittest
from TFExamplesUtils import *
class TestTFExamplesUtils(unittest.TestCase):
def setUp(self):
self.tf_example_utils = TFExamplesUtils()
def test_bytes_feature(self):
a = self.tf_example_utils.bytes_feature(b"a")
print(a)
def test_float_feature(self):
a = self.tf_example_utils.float_feature(3.14)
print(a)
def test_int64_feature(self):
a = self.tf_example_utils.int64_feature(30)
print(a)
@unittest.skip("create_feature_maps -> function not implemented yet")
def test_create_feature_maps(self):
self.fail()
|
from gensim import corpora, models, similarities
from gensim.models import word2vec
import logging
import gensim
import time
import random
from numpy.random import RandomState
from random import randint
import numpy
import pandas as pd
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
from sklearn import svm
#from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
#from sklearn.metrics import classification_report
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import cosine_similarity
from nltk.corpus import stopwords
start_time = time.time()
sentences = word2vec.Text8Corpus('AmazonParser/source/title_text8.txt')
model = word2vec.Word2Vec(sentences, size=300, window=5, min_count=5, workers=4,sg=1)
model.save('model/word2vec300w2_skip.model')
print("--- %s seconds ---" % (time.time() - start_time)) |
#coding:utf-8
print("Please input plain text")
plain = input()
print("Please input key(0~26)")
key = int(input())
code = list(map(lambda x:chr(ord(x)+ key), plain))
code = ''.join(code)
print(code)
|
"""Parses webcomic pages to identify their navigation links and locate archived and new comics"""
import urllib
from contextlib import closing
from lxml import etree
from lxml import html
def findLinks(source, target):
""" Locates all links from the source URL to the target URL, returning a list
of 2-element tuples with each link's xpath and inner HTML """
links = []
with closing(urllib.urlopen(source)) as source_socket:
absolute_tree = html.document_fromstring(source_socket.read(), base_url=source)
absolute_tree.make_links_absolute()
context = etree.iterwalk(absolute_tree, tag="a")
for action, elem in context:
if "href" in elem.attrib:
url = elem.attrib["href"]
if url == target:
links.append((etree.ElementTree(elem).getpath(elem), elem.text))
return links
def findXpathFor(source, target):
""" Locates a target within the source URL and returns an xpath of the container element """
with closing(urllib.urlopen(source)) as source_socket:
absolute_tree = html.document_fromstring(source_socket.read(), base_url=source)
absolute_tree.make_links_absolute()
context = etree.iterwalk(absolute_tree)
stripped_target = target.strip()
for action, elem in context:
if elem.text and (elem.text.strip() == stripped_target):
return etree.ElementTree(elem).getpath(elem)
def getTextForXpath(source, xpath):
""" Returns the space-stripped text from the element that matches that xpath on the source URL """
with closing(urllib.urlopen(source)) as source_socket:
tree = html.parse(source_socket, base_url=source)
return tree.xpath(xpath)[0].text_content().strip()
def getNext(source, xpath, expected_html):
""" If the link in the xpath contains a link with the expected html and an href that does
point somewhere else (i.e., not "#"), returns its href
(which should be the next comic, if that source is a non-last webcomic episode)."""
next = None
source_socket = urllib.urlopen(source)
source_html = html.document_fromstring(source_socket.read(), base_url=source)
source_html.make_links_absolute()
links = source_html.xpath(xpath)
if len(links) == 1:
link = links[0]
if link.text == expected_html and \
"href" in link.attrib and \
link.attrib["href"].strip("# \n") != source.strip("# \n"):
next = link.attrib["href"]
source_socket.close()
return next
def removePrefix(url):
return url.rpartition("/")[2];
|
#!/usr/bin/env python
# Solution for http://adventofcode.com/2016/
def is_triange(a, b, c):
k = sorted([a, b, c])
return k[2] < k[0] + k[1]
print is_triange(5, 10, 25)
triangles = 0
with open('advent_2016_3.txt') as fp:
line1 = fp.readline()
line2 = fp.readline()
line3 = fp.readline()
while line1 and line1 != '':
if is_triange(int(line1[0:5]), int(line2[0:5]), int(line3[0:5])):
triangles += 1
if is_triange(int(line1[5:10]), int(line2[5:10]), int(line3[5:10])):
triangles += 1
if is_triange(int(line1[10:15]), int(line2[10:15]), int(line3[10:15])):
triangles += 1
line1 = fp.readline()
line2 = fp.readline()
line3 = fp.readline()
print triangles
|
#!/usr/bin/python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Provides read access to buildbot's global_variables.json .
"""
import json
import svn
_global_vars = None
class NoSuchGlobalVariable(KeyError):
pass
def Get(var_name):
'''Return the value associated with this name in global_variables.json.
Raises NoSuchGlobalVariable if there is no variable with that name.'''
global _global_vars
if not _global_vars:
_global_vars = json.loads(svn.Cat('http://skia.googlecode.com/svn/'
'buildbot/site_config/'
'global_variables.json'))
try:
return _global_vars[var_name]['value']
except KeyError:
raise NoSuchGlobalVariable(var_name)
|
import sys
from pyspark.sql import SparkSession
def multiply(r,d):
if int(r[1]) in d:
return [(int(r[0]), float(r[2]) * d[int(r[1])])]
return []
if len(sys.argv) != 2:
print("Zle argumenty")
exit(1)
sp = SparkSession.builder.appName("zad1").getOrCreate()
file = sys.argv[1]
v_num = 4
dane = sp.read.csv(file, header=False, sep=' ').rdd
res = {}
for i in range(v_num):
res[i] = float(1.0 / v_num)
print(res)
for i in range(50):
res = dane.flatMap(lambda row: multiply(row, res))\
.reduceByKey(lambda x, y: x + y).collectAsMap()
print("Result at iteration ", i+1, ": ", res)
|
from django.contrib import admin
from django.utils.text import truncate_words
from snippets.models import Image, Markup, MediaType, Style
class ImageAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'author', 'title', 'dimensions', 'format')
def dimensions(self, image):
if image.width and image.height:
return '%dx%d' % (image.width, image.height)
return ''
class MarkupAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'author', 'title', 'content_preview')
def content_preview(self, markup):
content = markup.content
return "%s..." % content[:80] if len(content) > 80 else content
content_preview.short_description = "content"
class MediaTypeAdmin(admin.ModelAdmin):
list_display = ('name', 'description_preview')
def description_preview(self, media_type):
return truncate_words(media_type.description, 20)
description_preview.short_description = "description"
class StyleAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'author', 'title', 'content_preview', 'style_media')
def content_preview(self, markup):
content = markup.content
return "%s..." % content[:80] if len(content) > 80 else content
content_preview.short_description = "content"
def style_media(self, style):
return style.media
style_media.short_description = "media"
admin.site.register(Image, ImageAdmin)
admin.site.register(Markup, MarkupAdmin)
admin.site.register(MediaType, MediaTypeAdmin)
admin.site.register(Style, StyleAdmin)
|
from django.contrib import admin
# Register your models here.
from . import models
# Register your models here.
@admin.register(models.Tm_Service)
class Tm_ServiceAdim(admin.ModelAdmin):
list_display = ('id', 'department', 'service_name', 'upload_file', 'order', 'created', 'modified')
ordering = ('id',)
fields = ('department', 'service_name', 'upload_file', 'order')
@admin.register(models.Tm_Workflow)
class Tm_WorkflowAdim(admin.ModelAdmin):
list_display = ('id', 'department', 'workflow_route', 'workflow_count', 'order', 'created', 'modified')
ordering = ('id',)
fields = ('department', 'workflow_route', 'workflow_count', 'order')
@admin.register(models.Tm_Authorizer)
class Tm_AuthorizerAdim(admin.ModelAdmin):
list_display = ('id', 'workflow', 'workflow_count', 'author', 'order', 'created', 'modified')
ordering = ('id',)
fields = ('workflow', 'workflow_count', 'author', 'order')
@admin.register(models.Tm_Workflow_Conditions)
class Tm_Workflow_ConditionsAdim(admin.ModelAdmin):
list_display = ('id', 'workflow', 'service', 'order', 'created', 'modified')
ordering = ('id',)
fields = ('workflow', 'service', 'order', 'amount_min', 'amount_max', 'rate_min', 'rate_max')
|
import numpy as np
import argparse
import copy
import os
import sys
import re
import projector
import pretrained_networks
from training import dataset
from training import misc
import dnnlib
from dnnlib import EasyDict
import dnnlib.tflib as tflib
import os
import glob
from metrics.metric_defaults import metric_defaults
def run(dataset, data_dir, result_dir, config_id, num_gpus, total_kimg, gamma, mirror_augment, metrics):
train = EasyDict(run_func_name='training.training_loop.training_loop') # Options for training loop.
G = EasyDict(func_name='training.networks_stylegan2.G_main') # Options for generator network.
D = EasyDict(func_name='training.networks_stylegan2.D_stylegan2') # Options for discriminator network.
G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer.
D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer.
G_loss = EasyDict(func_name='training.loss.G_logistic_ns_pathreg') # Options for generator loss.
D_loss = EasyDict(func_name='training.loss.D_logistic_r1') # Options for discriminator loss.
sched = EasyDict() # Options for TrainingSchedule.
grid = EasyDict(size='8k', layout='random') # Options for setup_snapshot_image_grid().
sc = dnnlib.SubmitConfig() # Options for dnnlib.submit_run().
tf_config = {'rnd.np_random_seed': 1000} # Options for tflib.init_tf().
train.data_dir = data_dir
train.total_kimg = total_kimg
train.mirror_augment = mirror_augment
train.image_snapshot_ticks = train.network_snapshot_ticks = 10
sched.G_lrate_base = sched.D_lrate_base = 0.002
sched.minibatch_size_base = 32
sched.minibatch_gpu_base = 4
D_loss.gamma = 10
metrics = [metric_defaults[x] for x in metrics]
desc = 'stylegan2'
desc += '-' + dataset
dataset_args = EasyDict(tfrecord_dir=dataset)
assert num_gpus in [1, 2, 4, 8]
sc.num_gpus = num_gpus
desc += '-%dgpu' % num_gpus
assert config_id in _valid_configs
desc += '-' + config_id
# Configs A-E: Shrink networks to match original StyleGAN.
if config_id != 'config-f':
G.fmap_base = D.fmap_base = 8 << 10
# Config E: Set gamma to 100 and override G & D architecture.
if config_id.startswith('config-e'):
D_loss.gamma = 100
if 'Gorig' in config_id: G.architecture = 'orig'
if 'Gskip' in config_id: G.architecture = 'skip' # (default)
if 'Gresnet' in config_id: G.architecture = 'resnet'
if 'Dorig' in config_id: D.architecture = 'orig'
if 'Dskip' in config_id: D.architecture = 'skip'
if 'Dresnet' in config_id: D.architecture = 'resnet' # (default)
# Configs A-D: Enable progressive growing and switch to networks that support it.
if config_id in ['config-a', 'config-b', 'config-c', 'config-d']:
sched.lod_initial_resolution = 8
sched.G_lrate_base = sched.D_lrate_base = 0.001
sched.G_lrate_dict = sched.D_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}
sched.minibatch_size_base = 32 # (default)
sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32}
sched.minibatch_gpu_base = 4 # (default)
sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4}
G.synthesis_func = 'G_synthesis_stylegan_revised'
D.func_name = 'training.networks_stylegan2.D_stylegan'
# Configs A-C: Disable path length regularization.
if config_id in ['config-a', 'config-b', 'config-c']:
G_loss = EasyDict(func_name='training.loss.G_logistic_ns')
# Configs A-B: Disable lazy regularization.
if config_id in ['config-a', 'config-b']:
train.lazy_regularization = False
# Config A: Switch to original StyleGAN networks.
if config_id == 'config-a':
G = EasyDict(func_name='training.networks_stylegan.G_style')
D = EasyDict(func_name='training.networks_stylegan.D_basic')
if gamma is not None:
D_loss.gamma = gamma
sc.submit_target = dnnlib.SubmitTarget.LOCAL
sc.local.do_not_copy_source_files = True
kwargs = EasyDict(train)
kwargs.update(G_args=_G, D_args=_D, G_opt_args=G_opt, D_opt_args=D_opt, G_loss_args=G_loss, D_loss_args=D_loss)
kwargs.update(dataset_args=dataset_args, sched_args=sched, grid_args=grid, metric_arg_list=metrics, tf_config=tf_config)
kwargs.submit_config = copy.deepcopy(sc)
kwargs.submit_config.run_dir_root = result_dir
kwargs.submit_config.run_desc = desc
print("--------------------i am here----------------------")
print(network_pkl)
dnnlib.submit_run(**kwargs)
_valid_configs = [
# Table 1
'config-a', # Baseline StyleGAN
'config-b', # + Weight demodulation
'config-c', # + Lazy regularization
'config-d', # + Path length regularization
'config-e', # + No growing, new G & D arch.
'config-f', # + Large networks (default)
# Table 2
'config-e-Gorig-Dorig', 'config-e-Gorig-Dresnet', 'config-e-Gorig-Dskip',
'config-e-Gresnet-Dorig', 'config-e-Gresnet-Dresnet', 'config-e-Gresnet-Dskip',
'config-e-Gskip-Dorig', 'config-e-Gskip-Dresnet', 'config-e-Gskip-Dskip',
]
# network_pkl="results/00014-stylegan2-DeepFashon-8gpu-config-f/network-snapshot-005406.pkl"
# network_pkl="results/Retrain/00008-stylegan2-DeepFashon-8gpu-config-f/network-snapshot-000614.pkl"
#network_pkl="results/Transfer/00003-stylegan2-DeepFashion_Full_1024-8gpu-config-f/network-snapshot-002826.pkl"
#findes latest snapshot
def findlatestBuild(path,positionstart,positionEnd):
files = glob.glob(path)
count=0
fileListInt = []
for f in files:
fileListInt.append([int(f[positionstart:positionEnd]),f])
highestint = 0
highestfile = ""
for i in fileListInt:
if i[0] >= highestint:
highestint = i[0]
highestfile = i[1]
return highestfile
print("first step",findlatestBuild("results/Transfer/*",17,22))
network_pkl= findlatestBuild(findlatestBuild("results/Transfer/*",17,22)+"/network-snapshot*",86,92)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
# run(dataset="clothing", data_dir="DataSet", result_dir="results/RetrainTest",config="config-f",num-gpus=8, total_kimg=25000, gamma=None, mirror-augment=True, metrics='fid50k'):
# parser.add_argument('--metrics', help='Comma-separated list of metrics or "none" (default: %(default)s)', default='fid50k', type=_parse_comma_sep)
# metric =('fid50k',type=_parse_comma_sep)
run("DeepFashion_Full_1024", "DataSet", "results/Transfer","config-f",8, 25000, None, True,['fid50k'])
|
## 항목수, 항목 내용, 몇번째 항목을 확인하고 싶은지 입력
pn = int(input('게임에 참가할 사람 수는?:'))
z = int(input('몇 번째 항목의 결과를 확인하시겠습니까?:'))
people = []
result = []
for i in range (0,pn):
name = str(input('게임에 참가하는 사람의 이름을 입력해주세요:'))
people.append(name)
def ladders():
import turtle
import random
global pn
## 세로줄 그리기
def drawcol(k):
turtle.write(people[k], False, "center",('바탕', 10, 'bold'))
turtle.penup()
turtle.forward(50)
turtle.pendown()
turtle.forward(400)
turtle.penup()
turtle.forward(50)
turtle.pendown()
if k == 0:
turtle.write('당첨!', False, "center",('바탕', 10, 'bold'))
else:
turtle.write('꽝!', False, "center",('바탕', 10, 'bold'))
turtle.right(90)
for j in range (0,pn):
turtle.penup()
turtle.goto(((j*100)-150),250)
turtle.pendown()
drawcol(j)
turtle.left(90)
points = []
## 가로줄 랜덤으로 그리기
def drawrow():
global pn
x = random.randrange(-150,(100*(pn-1)-150),100)
y = random.randrange(-190,200,10)
turtle.penup()
turtle.goto(x,y)
turtle.pendown()
turtle.forward(100)
t = (x,y)
points.append(t)
for l in range (0,3*pn):
drawrow()
turtle.right(90)
global z
turtle.penup()
turtle.goto(((z-1)*100-150),200)
## 사다리 따라서 가는것 구현
def followline():
global pn
turtle.pendown()
turtle.pencolor('red')
if (int(turtle.xcor()),int(turtle.ycor())) in points:
turtle.left(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(1)
elif (int(int(turtle.xcor())-100),int(turtle.ycor())) in points:
turtle.right(90)
turtle.forward(100)
turtle.left(90)
turtle.forward(1)
else:
turtle.forward(1)
## 사다리 끝에 올 때까지 반복
while True:
followline()
if int(turtle.ycor()) == -200:
break
turtle.penup()
turtle.left(90)
ladders()
|
import sys
import os
f = open("C:/Users/user/Documents/python/other/import.txt","r")
sys.stdin = f
# -*- coding: utf-8 -*-
cand = [1,2,3,4]
def dfs (i):
if i <= 0:
return [["1"],["2"],["3"],["4"]]
temp = []
for j in range(4**i):
for k in range(4):
tmp = dfs(i-1)[j] + list(str(cand[k]))
temp.append(tmp)
return temp
v = dfs(3)
str = ""
for i in range(len(v)):
str += "".join(v[i]) + "\n"
print(str)
|
import numpy as np
# The signals initially arrive in a noisy state, so they must first be preprocessed using a
# moving average filter to smooth the signals and enable them to be used for feature extraction.
def movingavg(signal):
# The order of the moving average filter is set to 100.
N = 100
temp_signal = np.insert(signal, 0, 0)
cumsum = np.cumsum(temp_signal)
filtered = (cumsum[N:] - cumsum[:-N])/N
return filtered
def preprocess(ppg, gsr):
ppg = movingavg(ppg)
gsr = movingavg(gsr)
return ppg, gsr
|
import speech_recognition as sr
import pyttsx3
listener = sr.Recognizer()
socio = pyttsx3.init()
voices = socio.getProperty('voices')
socio.setProperty('voices', voices[1].id)
def talk(text):
socio.say(text)
socio.runAndWait()
def take_command():
try:
with sr.Microphone() as source:
print('listening...')
voice = listener.listen(source)
command = listener.recognize_google(voice)
print(command)
except:
pass
return command
def run_socio():
command = take_command() |
##자료 형변환방법
a = 10
b = 20
z = "10"
#자료형 변환 함수
# - bool()
# - int()
# - str()
# - float()
# 입력되는 데이터에 한해서 0인 경우 false,
# 0이 아닌 모든데이터는 True이다.
print(bool("")) |
from django.contrib import admin
from .models import Plan, ThumbnailSize
@admin.register(Plan)
class PlanAdmin(admin.ModelAdmin):
list_display = (
"name",
"has_access_to_org_img",
"can_generate_expiring_links",
"display_available_thumbnail_sizes",
)
@admin.register(ThumbnailSize)
class ThumbnailSizeAdmin(admin.ModelAdmin):
list_display = ("size",)
|
"""RUTINA DPLL Y UNIT PROPAGATE"""
import copy
def is_unit_cl(lista): #Clausula unitaria es aquella en la que solo hay un átomo
for l in lista:
if len(l) == 1:
return True
elif len(l) == 2 and l[0]== "-":
return True
return False
def complemento(n): #El complemento de un átomo es su negación
x = n
if x[0] == '-': #Si es negativo, p. ej. -p, se retorna p
return x[1]
else: #Si es positivo, p. ej. p, se retorna -p
return '-' + x
def unit_propagate(S, I):
#Input: S, un conjunto de formaClausal (Se toma de Tseitin), I, un dicc de interpretaciones (generalmente entra vacío)
#Output: S, el conjunto de formaClausal modificado, e I, el dicc de interps. modificado
c_vacia = []
aux = is_unit_cl(S)
while(c_vacia not in S and aux):
for n in S:
if len(n) == 1:
l = n[0]
elif len(n) == 2 and n[0] == "-":
l = n[0] + n[1]
T = []
for y in S:
if l not in y:
T.append(y)
S = copy.deepcopy(T)
for w in S:
if complemento(l) in w:
w.remove(complemento(l))
if l[0] == '-':
I[l[1]] = 0
else:
I[l] = 1
aux = is_unit_cl(S)
return S, I
def DPLL(S, I):
#Input: S, un conjunto de formaClausal (Se toma de Tseitin), I, un dicc de interpretaciones (generalmente entra vacío)
#Output: S, el conjunto de formaClausal modificado, Satisfacible/Insatisfacible, e I, el dicc de interps. modificado
print(len(I)) #Para ver si funciona
#1: UnitPropagate
S, I = unit_propagate(S, I)
#2: Si la clausula vacía está en S->insatisfacible
c_vacia = []
if c_vacia in S:
return "Insatisfacible", {}
#2: Si S es un conjunto vacío-> Satisfacible
elif len(S) == 0:
return "Satisfacible", I
#4: tomamos un literal no asignado en l
l = ""
for clausula in S:
for literal in clausula:
if literal not in I.keys():
l = literal
break
lBarra = complemento(l)
if l == "":
#print("Error: ya se obtuvo una interpretacion")
return None
#5: Definimos S' (S1P) como S sin las clausulas que tienen a l y sin -l de las clausulas restantes
S1P = copy.deepcopy(S)
S1P = [clausula for clausula in S1P if not l in clausula]
for c in S1P:
if lBarra in c:
c.remove(lBarra)
Ip = copy.deepcopy(I)
#6:Añadimos a I el valor para el que se cumple que VI(l)=1
if l[0] == '-':
Ip[l[1]] = 0 #si l es positivo, I(l)=1
else:
Ip[l] = 1 #si l es negativo, I(l)=0
#7: Hacer unitPropagate, y si retorna Satisfacible e I, retornar eso mismo
S1, I1 = DPLL(S1P, Ip)
if S1 == "Satisfacible":
return S1, I1
#8: Si no retorna satisfacible
else:
#9: Definimos S'' (S2P) como S sin las clausulas que contienen a -l y eliminando l de las restantes
S2P = copy.deepcopy(S)
for clausula in S2P:
if complemento(l) in clausula:
S2P.remove(clausula)
for clausula in S2P:
if l in clausula:
clausula.remove(l)
#10: Aumentamos I con las interpretaciones que cumplan que V(-l)=1
Ipp = copy.deepcopy(I)
if l[0] == '-':
Ipp[l[1]] = 1
else:
Ipp[l] = 0
#11: Retornamos DPLL de (S'',I'')
return DPLL(S2P, Ipp)
def conjunto_de_formulas(FNC): #Al final no se utilizó
#INPUT: Una formula en FNC, p. ejemplo pOqYqOiO-r
#OUTPUT: lista con la clausulas, p. ejemplo: [pq,qi-r]
l_claus=[]
lista=FNC.split("Y")
for item in lista:
item=item.replace("O","")
l_claus.append(item)
return l_claus
|
n = 1
while n > 0:
n = int(input("Digite um número natural:"))
fatorial = 1
if n < 0:
print ("O número digitado não é um número natural!")
else:
while n >= 1:
fatorial = n*fatorial
n = n - 1
print (fatorial)
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
#DEBUG = False
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u1$&h3pz1js3pqoe37zt3r!5s457nr9dw@h5_bj!ra@ai_749k'
ALLOWED_HOSTS = [
'127.0.0.1',
'35.165.202.83',
'blog.skokov.tk',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
# 'django.contrib.sites',
'blog',
'ckeditor',
'django.contrib.sites',
'django_comments_xtd',
'django_comments',
'grappelli',
'filebrowser',
#'disqus',
]
SITE_ID=1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
if DEBUG is True:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Добавить 'postgresql_psycopg2', 'mysql', 'sqlite3' или 'oracle'.
'NAME': 'blog', # имя БД
# следущие настройки не используются с sqlite3
'USER': 'userblog', # пользователь СУБД
'PASSWORD': 'userblog', # пароль пользователя
'HOST': '127.0.0.1', # адрес
'PORT': '', # установить порт СУБД, по умолчанию 3306 для mysql
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'ru-RU'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
if DEBUG is True:
STATIC_ROOT = os.path.join(BASE_DIR, 'blog/static')
MEDIA_ROOT = os.path.join(STATIC_ROOT, 'uploads')
else:
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(STATIC_ROOT, 'uploads')
#FILEBROWSER_DIRECTORY = MEDIA_ROOT
#DIRECTORY = STATIC_ROOT
CKEDITOR_CONFIGS = {
'default': {
'toolbar': [
['CodeSnippet', 'Undo', 'Redo',
'-', 'Bold', 'Italic', 'Underline',
'-', 'Link', 'Unlink', 'Anchor',
'-', 'Format',
'-', 'Maximize',
'-', 'Table',
'-', 'Image',
'-', 'Source',
'-', 'NumberedList', 'BulletedList',
],
['JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock',
'-', 'Font', 'FontSize', 'TextColor',
'-', 'Outdent', 'Indent',
'-', 'HorizontalRule',
'-', 'Blockquote'
]
],
'height': 500,
'width': '100%',
'removePlugins': 'stylesheetparser',
'extraPlugins': 'codesnippet',
},
}
CKEDITOR_UPLOAD_PATH = "uploads/"
CKEDITOR_JQUERY_URL = '//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js'
COMMENTS_APP = 'django_comments_xtd'
COMMENTS_XTD_MAX_THREAD_LEVEL = 2
COMMENTS_XTD_CONFIRM_EMAIL = True
EMAIL_HOST = "smtp.mail.com"
EMAIL_PORT = "587"
EMAIL_HOST_USER = "alias@mail.com"
EMAIL_HOST_PASSWORD = "yourpassword"
DEFAULT_FROM_EMAIL = "Helpdesk <helpdesk@yourdomain>"
from filebrowser.sites import site
site.directory = "uploads/"
from django.conf import settings
PRIVATE_DIR = getattr(settings, "PRIVATE_DIR", None) |
import os
import logging
logger = logging.getLogger(__name__)
registry = dict()
def backend(*args, **kwargs):
def deco_backend(f):
enabled = kwargs.get('enabled', True)
# Allow overriding enabled status of backends through environment variables
# Set REGISTRATOR_ETCD to true, 1 or enabled to enable backend 'etcd'
# Set REGISTRATOR_ETCD to false, 0 or disabled to disable backend 'etcd'
if 'name' in kwargs:
env_name = 'REGISTRATOR_{!s}'.format(kwargs['name'].upper())
_value = os.environ.get(env_name, str(enabled))
if _value.lower() in ['true', 'enabled', '1']:
enabled = True
elif _value.lower() in ['false', 'disabled', '0']:
enabled = False
if enabled:
f_name = kwargs.get('name', f.__name__)
registry[f_name] = f()
return f
return deco_backend
def get_backends():
return registry.values()
|
import board
import neopixel
import time
pixels = neopixel.NeoPixel(board.D18, 20)
pixels[5] = (10,0,0)
|
#!/usr/bin/env python
import argparse
import json
import os
import re
import sys
import time
#
# Global
#
CEPH_LOG_DIR = os.environ.get('CEPH_LOG_DIR') or \
'/var/log/ceph'
CEPHSTATS_LOG_DIR = os.environ.get('CEPHSTATS_LOG_DIR') or \
CEPH_LOG_DIR
CEPHSTATS_LOG_FILE = os.environ.get('CEPHSTATS_LOG_FILE') or \
CEPHSTATS_LOG_DIR + '/ceph-stats.{DATE}.log'
CEPHPERF_LOG_FILE = os.environ.get('CEPHPERF_LOG_FILE') or \
CEPHSTATS_LOG_DIR + '/ceph-stats.{DAEMON}.perf.{DATE}.log'
CEPHSTATS_DATE = os.environ.get('CEPHSTATS_DATE') or \
time.strftime("%F")
#
# Functions
#
def parse_args():
parser = argparse.ArgumentParser(
description='process stats from logs generated by ceph-stats'
)
parser.add_argument(
'-D', '--daemon',
metavar='mon.x|osd.x',
help='process stats from daemon',
default=None,
)
parser.add_argument(
'-d', '--date',
metavar='YYYY-MM-DD',
help='date to parse data for',
default=CEPHSTATS_DATE,
)
parser.add_argument(
'-p', '--json-pretty',
action='store_true',
default=False,
help='json-prettify output',
)
parser.add_argument(
'name',
help='statistics name to look for',
)
parser.add_argument(
'key',
nargs='*',
help='key in statistics to look for',
default=['']
)
args = parser.parse_args()
return args
def logfile(ctx):
if not ctx.daemon:
return CEPHSTATS_LOG_FILE.replace('{DATE}', ctx.date)
else:
return CEPHPERF_LOG_FILE.replace('{DATE}', ctx.date).replace('{DAEMON}', ctx.daemon)
def main():
ctx = parse_args()
if ctx.daemon and ctx.name == 'list':
ls = os.popen("ls %s 2>/dev/null" % (logfile(ctx))).read().strip().split("\n");
r = re.compile('^.*\.((osd|mon)\..*)\.perf\..*$')
for f in ls:
m = r.match(f)
if m:
print m.group(1)
return
f = open(logfile(ctx), 'r')
r = re.compile('^(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d) \[%s\] \s*(.*)$' % (ctx.name))
if not ctx.json_pretty:
print '#"date" "time"', ' '.join(['"' + x + '"' for x in ctx.key])
n = 0
for line in f:
n += 1
m = r.match(line)
if not m:
continue
t = m.group(1)
try:
val = json.loads(m.group(2))
except ValueError as e:
print >> sys.stderr, "line %d:" % n, e.message
continue
print t,
if ctx.json_pretty:
print ctx.name
for key in ctx.key:
v = val
for k in key.split():
if k.isdigit():
k = int(k)
try:
v = v[k]
except:
v = None
break
if ctx.json_pretty:
print key, ' = ', json.dumps(v, sort_keys=True, indent=4, separators=(',', ': '))
else:
print v is None and '-' or v,
print
main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-25 07:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('catalogues', '0029_school_general_information_address_icon'),
]
operations = [
migrations.CreateModel(
name='School_events_april',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_title', models.CharField(max_length=200)),
('event_description', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('event_date', models.DateTimeField(blank=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_events_august',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_title', models.CharField(max_length=200)),
('event_description', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('event_date', models.DateTimeField(blank=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_events_december',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_title', models.CharField(max_length=200)),
('event_description', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('event_date', models.DateTimeField(blank=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_events_february',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_title', models.CharField(max_length=200)),
('event_description', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('event_date', models.DateTimeField(blank=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_events_january',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_title', models.CharField(max_length=200)),
('event_description', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('event_date', models.DateTimeField(blank=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_events_july',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_title', models.CharField(max_length=200)),
('event_description', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('event_date', models.DateTimeField(blank=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_events_june',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_title', models.CharField(max_length=200)),
('event_description', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('event_date', models.DateTimeField(blank=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_events_march',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_title', models.CharField(max_length=200)),
('event_description', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('event_date', models.DateTimeField(blank=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_events_may',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_title', models.CharField(max_length=200)),
('event_description', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('event_date', models.DateTimeField(blank=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_events_november',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_title', models.CharField(max_length=200)),
('event_description', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('event_date', models.DateTimeField(blank=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_events_october',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_title', models.CharField(max_length=200)),
('event_description', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('event_date', models.DateTimeField(blank=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_events_september',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_title', models.CharField(max_length=200)),
('event_description', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('event_date', models.DateTimeField(blank=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='school_events_calendar',
name='author',
),
migrations.DeleteModel(
name='School_events_calendar',
),
]
|
def dpMakeChange(coinValueList, change, minCoins):
for cents in range(1, change+1):
coinCount = cents
for j in [c for c in coinValueList if c <= cents]:
if minCoins[cents-j] + 1 < coinCount:
coinCount = minCoins[cents-j] + 1
minCoins[cents] = coinCount
return minCoins[change]
def recMC(coinValueList, change):
knowChangeCoins = [0] * (change+1)
for i in range(1, change+1):
minCoins = i
for j in [c for c in coinValueList if c <= i]:
if knowChangeCoins[i-j] + 1 < minCoins:
minCoins = knowChangeCoins[i-j] + 1
knowChangeCoins[i] = minCoins
return knowChangeCoins[change]
if __name__ == '__main__':
print(dpMakeChange([1, 5, 10, 21, 25], 63, [0]*64))
print(recMC([1, 5, 21, 10, 25], 63))
|
import pandas as pd
import requests
from multiprocessing import Pool
from urllib.parse import urljoin
FPL_URL = 'https://fantasy.premierleague.com/'
API = 'api/'
ENTRY = 'entry/'
HISTORY = 'history/'
SHARDS = 1024
CURR_WEEK = 6
NUM_PARTICIPANTS = 7338639
def get_entry_history(entry_id):
entry_path = API + ENTRY + str(entry_id) + '/' + HISTORY
return urljoin(FPL_URL, entry_path)
def entry_overview(entry_id):
entry_url = get_entry_history(entry_id=entry_id)
try:
content = requests.get(entry_url).json()
current_season_stats = content['current']
week_stats = current_season_stats[CURR_WEEK - 1]
overall_rank = week_stats['overall_rank']
total_points = week_stats['total_points']
return entry_id, overall_rank, total_points
except:
return entry_id, -1, -1
def main():
entries_per_shard = NUM_PARTICIPANTS // SHARDS
start = 200649
for shard in range(29, SHARDS + 1):
if shard == SHARDS:
entries = NUM_PARTICIPANTS - entries_per_shard * (SHARDS - 1)
else:
entries = entries_per_shard
with Pool(processes=512) as pool:
res = pool.map(entry_overview, range(start, start + entries))
start = start + entries
overview_df = pd.DataFrame(
res, columns=['Entry ID', 'Overall Rank', 'Total Points']
)
overview_df = overview_df.set_index('Entry ID')
overview_df.to_csv(f'./data/rankings-{str(shard).zfill(len(str(SHARDS)))}.csv')
if __name__ == "__main__":
main()
|
from django.db import models
# Create your models here.
TYPE_SELECT = (('0', 'Female'),('1', 'male'),)
class Student(models.Model):
name = models.CharField(max_length=255,blank=True)
roll_no = models.IntegerField(null=True)
email = models.EmailField(unique=True)
mobile = models.CharField(max_length=20)
marks = models.FloatField()
create_date = models.DateTimeField(auto_now_add=True)
update_date = models.DateTimeField(auto_now=True)
picture = models.ImageField(upload_to='upload',blank=True)
password = models.CharField(max_length=255,null=True)
gender = models.CharField(choices=TYPE_SELECT,max_length=20,default=0)
course = models.CharField(max_length=20,blank=True)
class Meta:
db_table='student'
|
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
# df_train = pd.read_csv('Kaggle_Datasets/Facebook/train.csv')
# df_test = pd.read_csv('https://s3-us-west-2.amazonaws.com/fbdataset/test.csv')
def run():
print 'Loading DataFrame'
df_train = pd.read_csv('Kaggle_Datasets/Facebook/train.csv')
# df_train = df_train.loc[(df_train.x <= 0.5) & (df_train.y <= 0.5), :]
print 'Splitting train and test data'
train, test = train_test_split(df_train, test_size=0.2)
df = train
features = ['x', 'y', 'accuracy', 'hour', 'day', 'week', 'month', 'year']
df.loc[:, 'hours'] = df.time / float(60)
df.loc[:, 'hour'] = df.hours % 24
df.loc[:, 'days'] = df.time / float(60*24)
df.loc[:, 'day'] = df.days % 7
df.loc[:, 'weeks'] = df.time / float(60*24*7)
df.loc[:, 'week'] = df.weeks % 52
df.loc[:, 'months'] = df.time / float(60*24*30)
df.loc[:, 'month'] = df.months % 12
df.loc[:, 'year'] = df.time / float(60*24*365)
model = RandomForestClassifier(n_jobs=1, warm_start=True)
train_df = df.loc[:, features]
values = df.loc[:, 'place_id']
print 'Fitting Model'
model.fit(train_df, values)
wdf = test.sort_values('row_id').set_index('row_id')
expected = wdf.place_id
wdf = wdf.loc[:, features]
predictions = model.predict(wdf)
actual = predictions
print dict(zip(wdf.index, predictions))
expect = pd.Series(expected)
actual = pd.Series(actual)
print (sum(expect == actual)/float(len(expected))) * 100
|
#Functions take files and plots contents
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.patches import Circle
from pylab import savefig
#DECLARE FILE
filenumber = 41
text = 'positions/position_test%d.txt' % filenumber
def Parameter_builder(text):
file = open(text, 'r')
lines = file.readlines()
file.close()
x = []
y = []
z = []
v_x = []
v_y = []
v_z = []
k = []
for line in lines:
line = line.strip('\n').split(' ')
x.append(line[0])
y.append(line[1])
z.append(line[2])
v_x.append(line[3])
v_y.append(line[4])
v_z.append(line[5])
k.append(line[6])
return [x, y, z, v_x, v_y, v_z, k]
parameters = Parameter_builder(text)
x = parameters[0]
y = parameters[1]
z = parameters[2]
r = parameters[6]
#Radius
lo = []
for word in r:
lo.append(float(word))
daphnis = lo.index(max(lo)) #Find Daphnis
n = lo.pop(daphnis) #Remove Daphnis
l = np.array(lo)/max(lo) #Scale without Daphnis
#CONVERTING STRINGS TO FLOATS
i = []
for word in x:
i.append(float(word))
i.pop(daphnis)
j = []
for word in y:
j.append(float(word))
j.pop(daphnis)
k = []
for word in z:
k.append(float(word))
k.pop(daphnis)
#Y VS X Graph (POSITION GRAPH)
cmap = plt.cm.autumn
fig = plt.figure(1)
ax = fig.add_subplot(111, aspect='equal')
ax.set_xlabel('Y positions')
ax.set_ylabel('X positions')
ax.set_title('Y vs X positions of particles')
ax.axis([min(j), max(j), 0, max(i)])
#Plot particles
for x, y, c, h in zip(j, i, l, l):
ax.add_artist(Circle(xy=(x, y), radius = h, alpha = 0.5, color = cmap(c**2)))
#Plot Daphnis
ax.add_artist(Circle(xy=(0, 0), radius = n, alpha = 0.5, color = plt.cm.winter(0)))
#savefig('xyz_graphs/positions_YX_%d.png' % filenumber, bbox_inches='tight')
#Y VS Z GRAPH (WAKE GRAPH)
cmap = plt.cm.winter
fig = plt.figure(2)
ax = fig.add_subplot(111, aspect='equal')
ax.set_xlabel('Y positions')
ax.set_ylabel('Z positions')
ax.set_title('Azimuthal Displacement (Wakes)')
ax.axis([min(j), max(j), min(k) + min(k)/10, max(k)+max(k)/10])
#Plot particles
for x, y, c, h in zip(j, k, l, l):
ax.add_artist(Circle(xy=(x, y), radius = h, alpha = 0.5, color = cmap(c**2)))
#Plot Daphnis
ax.add_artist(Circle(xy=(0, 0), radius = n, alpha = 0.5, color = plt.cm.autumn(0)))
#savefig('xyz_graphs/positions_YZ_%d.png' % filenumber, bbox_inches='tight')
plt.show()
#print('***DONE*** Check xyz_graphs folder')
|
import copy
def fibHelper(x):
if x == 0:
return 0
elif x == 1:
return 1
return fibHelper(x-1) + fibHelper(x-2)
def fib(x):
return fibHelper(x)
def reverseStringHelper(s, reversedString):
if len(s) >= 1:
rs = s[0] + reversedString
string = s[1:]
else:
return reversedString
return reverseStringHelper(string, rs)
def reverseString(string):
return reverseStringHelper(string, "")
def isSortedHelper(lst, sortedLst):
if len(lst) >= 1:
minimum = min(lst)
sortedLst.append(minimum)
lst.remove(minimum)
else:
return sortedLst
return isSortedHelper(lst, sortedLst)
def isSorted(lst):
origLst = copy.copy(lst)
if origLst == isSortedHelper(lst, []):
return True
return False
def countOccHelper(lst, item, timesOccurred):
if len(lst) >= 1:
if lst[0] == item:
timesOccurred += 1
lst = lst[1:]
else:
return timesOccurred
return countOccHelper(lst, item, timesOccurred)
def countOcc(lst, item):
return countOccHelper(lst, item, 0)
def main():
print(reverseString("hello")) #should be "olleh"
print(isSorted([3, 5, 2])) #should be False
print(isSorted([3, 4, 9])) #shoudl be True
print(countOcc([4, 5, 6, 7, 8, 8, 2], 8)) #should be 2
for i in range(10):
print(fib(i)) #should come out as 0, 1, 1, 2, 3, 5, 8, 13, 21, 34
#You can run more tests if you want
main()
|
## install python libraries from terminal for pulling data out of HTML files.
# pip install bs4
# pip install requests
# step 1: import modules
# step 2: make requests instance and pass into URL
# step 3: Pass the requests into a BeautifulSoup() function
# step 4: Use 'img' tag to find them all tag ('src')
import requests
from bs4 import BeautifulSoup
def image_scraping(url):
r = requests.get(url)
return r.text
data = image_scraping("https://www.briantracy.com/blog/personal-success/26-motivational-quotes-for-success/")
soup = BeautifulSoup(data, 'html.parser')
for item in soup.find_all('img'):
print(item['src'])
|
from Bio.Blast import NCBIXML
import sys
import argparse
from glob import glob
#from joblib import Parallel, delayed
import os.path
#hsps with larger e-value are discarded
significant_e_value = 0.001
#if query coverage fraction is smaller then alignment is discarded
significant_query_fraction = 0.1
#if best_hit_query_coverage * significant_ratio < other_hit_query_coverage && hits are of different types (i.e plasmid and chromosome then ambiguous)
significant_ratio = 0.9
def parse_args(args):
###### Command Line Argument Parser
parser = argparse.ArgumentParser(description="Check the help flag")
parser.add_argument('-i', help='Path to file with XML output')
parser.add_argument('-o', help='Path to output folder')
return parser.parse_args()
###### Return total length of alignment with respect of significant_e_value (0.001) threshold
#
def get_total_len(alignment):
global significant_e_value
total_len = 0
for hsp in alignment.hsps:
if hsp.expect < significant_e_value:
total_len += hsp.align_length
return total_len
def get_query_coverage (alignment):
global significant_e_value
total_len = 0
coords = []
for hsp in alignment.hsps:
if hsp.expect < significant_e_value:
coords.append([hsp.query_start, 1])
coords.append([hsp.query_end, -1])
coords.sort()
if len(coords) > 0:
layers = coords[0][1]
for j in range (1, len(coords)):
if layers > 0:
total_len += coords[j][0] - coords[j - 1][0]
layers += coords[j][1]
return total_len
def get_identity(alignment):
global significant_e_value
identity = 0
for hsp in alignment.hsps:
if hsp.expect < significant_e_value:
identity += hsp.identities
return identity
def get_hsp_count(alignment):
global significant_e_value
hsp_count = 0
for hsp in alignment.hsps:
if hsp.expect < significant_e_value:
hsp_count += 1
return hsp_count
def parse_name(seq):
if (seq.find('hage') != -1 or seq.find('irus') != -1 ):
return "Virus"
elif seq.find('lasmid') != -1:
return "Plasmid"
elif (seq.find('hromosome') != -1 or seq.find("omplete genome") != -1):
return "Chromosome"
else:
return "Other"
def report_file(file, query, alignment):
file.write(query + '\n')
file.write(alignment[1] + "\t query coverage: " + str(1.0 * alignment[0]/alignment[6]) + "\t identity: " + str(1.0 * alignment[4]/alignment[3]) + "\t hsp_num: " + str(alignment[5]))
file.write('\n')
return
def parser(f, out_dir):
global significant_query_fraction
global significant_ratio
scat_out={}
xml_file = open(f,"r")
name = os.path.basename(f)
name = os.path.join(out_dir, name)
# print(name)
nosig = open(name[:-4]+"_no_significant.names", "w")
chrom = open(name[:-4]+"_chromosome.names", "w")
plasmids = open(name[:-4]+"_plasmid.names", "w")
other = open(name[:-4]+"_other.names", "w")
records= NCBIXML.parse(xml_file)
viral = open(name[:-4]+"_viruses.names", "w")
files = {"Virus": viral, "Plasmid": plasmids, "Chromosome": chrom, "Other": other}
for item in records:
pl_len = item.query_length
###### No alignment - put in non-significant
if len(item.alignments) == 0:
# print ("No Significant")
nosig.write(item.query + '\n\n')
continue
good_aln = []
scat_good_aln = []
alignments = []
for alignment in item.alignments:
seq_type = parse_name(alignment.title) # Virus, Plasmid, Chromosome or Other
query_coverage = get_query_coverage(alignment)
if get_query_coverage(alignment) < significant_query_fraction * pl_len:
continue
alignments.append([get_query_coverage(alignment), alignment.title, parse_name(alignment.title), get_total_len(alignment), get_identity(alignment), get_hsp_count(alignment), pl_len])
if len(alignments)== 0:
nosig.write(item.query + '\n')
al = item.alignments[0]
nosig.write(al.title + "\t" + "total query cov " + str(get_query_coverage(al)) + " of " + str(pl_len))
nosig.write('\n')
else:
alignments.sort()
alignments.reverse()
type = alignments[0][2]
best_query_cov = alignments[0][0]
#ambigous = 0
for i in range (1, len(alignments)):
if alignments[i][0] < best_query_cov * significant_ratio:
break
if type != alignments[i][2]:
if alignments[i][2] == "Virus" or alignments[i][2] == "Plasmid":
report_file(files[alignments[i][2]], item.query,alignments[0])
break
else:
# print (type)
report_file(files[type], item.query,alignments[0])
return
def main ():
parsed_args = parse_args(sys.argv[1:])
files = glob(str(parsed_args.i)+"/*.xml")
# print (files)
parser(parsed_args.i, parsed_args.o)
# Parallel(n_jobs=30)(delayed(parser) (infile, parsed_args.o) for infile in files)
#Parallel(n_jobs=30)(delayed(parser) (args) for args in files)
# print('score:', hsp.score)
# print('gaps:', hsp.gaps)
# print('e value:', hsp.expect)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
hismask.py: HisMask
========================
::
In [12]: from opticks.ana.hismask import HisMask
In [13]: hm = HisMask()
In [16]: hm.label(2114)
Out[16]: 'BT|SD|SI'
"""
import os, datetime, logging, sys
log = logging.getLogger(__name__)
import numpy as np
from opticks.ana.base import PhotonMaskFlags
from opticks.ana.seq import MaskType, SeqTable, SeqAna
from opticks.ana.nbase import count_unique_sorted
from opticks.ana.nload import A
class HisMask(MaskType):
"""
"""
def __init__(self):
log.debug("HisMask.__init__")
flags = PhotonMaskFlags()
MaskType.__init__(self, flags, flags.abbrev)
log.debug("HisMask.__init__ DONE")
def test_HisMask(af):
label = "TO BT SD"
mask = af.code(label)
label2 = af.label(mask)
log.info( " %30s -> %d -> %10s " % (label, mask, label2 ))
def test_HisMask_SeqTable(aa, af):
hflags = aa[:,3,3].view(np.uint32)
cu = count_unique_sorted(hflags)
st = SeqTable(cu, af)
print(st)
def test_HisMask_SeqAna(aa, af):
hflags = aa[:,3,3].view(np.uint32)
sa = SeqAna(hflags, af)
print(sa.table)
if __name__ == '__main__':
from opticks.ana.main import opticks_main
#ok = opticks_main(src="torch", tag="10", det="PmtInBox")
ok = opticks_main()
af = HisMask()
test_HisMask(af)
try:
ht = A.load_("ht",ok.src,ok.tag,ok.det, pfx=ok.pfx)
log.info("loaded ht %s %s shape %s " % (ht.path, ht.stamp, repr(ht.shape)))
#test_HisMask_SeqTable(ht, af)
test_HisMask_SeqAna(ht, af)
except IOError as err:
log.warning("no ht")
try:
ox = A.load_("ox",ok.src,ok.tag,ok.det, pfx=ok.pfx)
log.info("loaded ox %s %s shape %s " % (ox.path, ox.stamp, repr(ox.shape)))
#test_HisMask_SeqTable(ox, af)
test_HisMask_SeqAna(ox, af)
except IOError as err:
log.warning("no ht")
|
#!/usr/bin/env python3
r'''
./p01B-missing-number-sorted
This program finds the missing number in a list of integers. This
implementation is optimized for sorted lists through use of binary
search.
* by: Leomar Duran <https://github.com/lduran2>
* date: 2019-06-28T23:53ZQ
* for: https://dev.to/javinpaul/50-data-structure-and-algorithms-\
problems-from-coding-interviews-4lh2
'''
import random # for picking the missing number
import logging # for logging
import sys # for command line arguments
def main(
min = 1, # the minimum value in the array to generate
max = 100, # the maximum value in the array to generate
print = print # the printing function
):
r'''
Tests the find_missing function on a shuffled array from 1 to 100.
'''
# generate the list of numbers
NUMBERS = generate_missing_number_list(min, max)
# find the missing number
MISSING = find_missing(NUMBERS, min, max)
# The output:
print('The array:')
print(NUMBERS)
print()
print('The missing number is ', MISSING, '.', sep='')
def find_missing(numbers, min, max):
r'''
Finds the number missing in the array of integers @numbers whose
elements are in the interval [min, max].
{1} Calculates the rightmost index for a binary search.
{2} Uses binary search to find the number before the missing.
{3} The missing number is one more than the number found.
'''
RIGHT = ((max - min) - 1) #{1}
INDEX = binary_search(numbers, RIGHT, #{2}
missing_number_arithpredicate)
MISSING = (numbers[INDEX] + 1) #{3}
return MISSING
def missing_number_arithpredicate(subject):
r'''
This arithpredicate uses the number at $list[mid], and returns -1
if the number and the following number are as expected, 0 if only
the current number is as expected and the following are not, or
+1 if the current number is not expected.
A number $list[index] is expected if it equals $(list[0] + index).
{1} Divides up the @subject parameter.
{2} Finds the minimum, actual current element, and calculates the
expected element.
{3} Compares the actual and expected current elements.
{4} If the actual is less or equal,
{5} checks that the next elemet is still within range,
and if so, return 0, avoiding index overflow.
{6} checks whether the next element is greater than one more
than the current, and if so, return 0.
If neither {5,6}, the comparator will be negative.
If not {4}, the comparator will be positive.
{7} Returns the comparator.
'''
(index, list, off, len) = subject #{1}
#{2}
MIN = list[0]
ELEMENT = list[index]
EXPECTED = (index + MIN)
comparison = (ELEMENT - EXPECTED) #{3}
if (comparison <= 0): #{4}
if ((index + 1) == len): #{5}
return 0
NEXT = list[index + 1] #{6}
comparison = (NEXT - (ELEMENT + 2))
if (comparison > 0):
comparison = 0
return comparison #{7}
def binary_search(list, right, arithpredicate, left = 0, default = -1):
r'''
Searches the list within indices in the interval [left, right] for
an element that satisfies the given arithpredicate.
Starts with the entire list as the sublist.
{1} Loops until the sublist is empty.
{2} Calculates the midpoint of the sublist.
{3} Tests the element at the midpoint with the arithpredicate.
{4} If the element is too low, shifts the left endpoint of the
sublist to the midpoint
{5} If the element is too high, shifts the right endpoint of
the sublist to the endpoint
{6} Otherwise, returns the midpoint.
{7} If the sublist is empty, returns $default.
params:
list -- the list to search
right -- the index of the rightmost endpoint of the list
arithpredicate -- an arithpredicate with which to search the
list
left -- the index of the leftmost endpoint of the list
default -- the default index to return
returns:
the index of the element satisfying the arithpredicate, or
$default (-1 by default) if no such element is found
'''
LEN = (right + 1) # the length of the entire list
logging.debug(r'Searching through list of size: %d...', LEN)
while (left <= right): #{1}
MID = ((left + right) >> 1) #{2}
logging.debug(r' Midpoint: %d', MID)
COMPARISON = arithpredicate((MID, list, left, LEN)) #{3}
if (COMPARISON < 0): #{4}
left = (MID + 1)
elif (COMPARISON > 0): #{5}
right = (MID - 1)
else: #{6}
return MID
return default #{7}
def generate_missing_number_list(min, max):
r'''
Generates a list of shuffled numbers in the interval [min, max]
with one number missing.
{1} Chooses a random number in the interval [min, max[ to replace.
{2} Creates an list in the interval [min, max], skipping the
number to replace.
params:
min -- the minimum value in the list
max -- the maximum value in the list
'''
MISSING = random.randrange(min, max) #{1}
logging.debug(r'Missing number chosen: %d', MISSING)
numbers = [x for x in range(min, (max + 1)) if (x != MISSING)] #{2}
return numbers
if (r'__main__' == __name__):
# if '--debug' flag was given in command line arguments,
# place the logger in debug mode
if (r'--debug' in sys.argv):
logging.basicConfig(level=logging.DEBUG)
main()
|
from config import ConfigurationError
from config import PropertyFormatError
from config import PropertyNotExistError
from config import parse_config
from config import validate
class TestAdminConfig:
def test_valid_config(self):
errors = validate(parse_config('tests/fixtures/valid_config.yaml'))
assert (len(list(errors)) == 0)
def test_invalid_config(self):
errors = list(validate(parse_config('tests/fixtures/invalid_config.yaml')))
assert (len(errors) == 6)
assert isinstance(errors[0], PropertyNotExistError)
assert isinstance(errors[1], ConfigurationError)
assert isinstance(errors[2], PropertyFormatError)
assert isinstance(errors[3], PropertyFormatError)
assert isinstance(errors[4], PropertyFormatError)
assert isinstance(errors[5], PropertyFormatError)
|
import sys
import matplotlib.pyplot as plt
import numpy as np
#PATH="\Users\maxen\Desktop\FORMATION\2A\INF442\projet\code\connected-components\data\"
#print(PATH)
PATH="data/"
#print(PATH)
def distance_2D(ax, ay, bx, by):
return np.sqrt( (ax - bx) ** 2 + (ay - by) ** 2)
def create_graph(n, p):
# Save coordinates
np.random.seed(0)
xn = np.random.randn(n)
yn = np.random.randn(n)
with open(PATH + "graph_2D_coordinates.txt", "w") as f:
for i in range(n):
f.write(str(xn[i]) + ' ' + str(yn[i]) + "\n")
# Set edges
distances = np.ones((n, n)) * -1
nb_edge = 0
for i in range(n):
for j in range(n):
if (i != j and np.random.binomial(1, p) > 0.5):
nb_edge += 1
distances[i][j] = np.sqrt(distance_2D(xn[i], yn[i], xn[j], yn[j]) )
# Save edges
with open(PATH + "graph_2D.txt", "w") as f:
f.write(str(1) + ' ' + str(1) + '\n')
f.write(str(n) + ' ' + str(nb_edge) + '\n')
for i in range(n):
for j in range(n):
if distances[i][j] > 0:
f.write(str(i) + ' ' + str(j) + ' ' + str(distances[i][j]) + '\n')
return 0
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: python graph_2D_output_coords_et_format.py nb_points proba_edges')
sys.exit(1)
n=int(sys.argv[1])
p=float(sys.argv[2])
create_graph(n, p)
|
# -*- coding: utf-8 -*-
import inject
import logging
import asyncio
from asyncio import coroutine
from autobahn.asyncio.wamp import ApplicationSession
from model.registry import Registry
from model.connection import connection
from model.positions.positions import Position
from model.serializer.utils import MySerializer, JSONSerializable
class PositionsWamp(ApplicationSession):
def __init__(self, config=None):
logging.debug('instanciando')
ApplicationSession.__init__(self, config)
reg = inject.instance(Registry)
self.conn = connection.Connection(reg.getRegistry('dcsys'))
@coroutine
def onJoin(self, details):
logging.debug('registering methods')
yield from self.register(self.getPosition_async, 'positions.getPosition')
# yield from self.register(self.updatePosition_async, 'positions.updatePosition')
def getPosition(self, userIds):
con = self.conn.get()
try:
positions = Position.findByUser(con, userIds)
return positions
finally:
self.conn.put(con)
@coroutine
def getPosition_async(self, userIds):
loop = asyncio.get_event_loop()
r = yield from loop.run_in_executor(None, self.getPosition, userIds)
return r
|
import datetime
#the following function takes a letter and returns a number for its position.
def alphabet_position(letter):
alpha = "AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz"
for ctr in range(len(alpha)):
if alpha[ctr] == letter:
return int(ctr/2)
#the following function rotates a letter according to a certain code.
def rotate_character(char, rot):
lettersrot = ("AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz")
if char.isalpha():
num1 = alphabet_position(char)
num2 = num1 + rot
if num2 > 25:
num2 = num2 - 26
if num2 < 0:
num2 = num2 + 26
for elem in lettersrot:
if alphabet_position(elem) == (num2):
for num3 in range(len(lettersrot)):
if char == lettersrot[num3]:
num4 = num3
if num4%2==0:
elem=elem.upper()
else:
elem=elem.lower()
return elem
elif char.isnumeric():
dig = int(char)
dig2 = dig + 3
if dig2 > 9:
dig2 = dig2 - 9
dig3 = str(dig2)
return dig3
else:
return char
#this code uses the other functions to encrypt a message.
def encrypt(message, code):
ctr = 0
answ = ""
for elem in message:
if elem.isalpha() or elem.isnumeric():
answ += rotate_character(elem, int(code[ctr]))
ctr += 1
if ctr == len(code):
ctr = 0
else:
answ += elem
return answ
#this code is used to decrypt a message. It essentially reverses the encrypt functions.
def decrypt(message, code):
ctr = 0
answ = ""
for elem in message:
if elem.isalpha():
answ += rotate_character(elem,(-1* int(code[ctr])))
ctr += 1
if ctr == len(code):
ctr = 0
elif elem.isnumeric():
diga = int(elem)
digb = diga - 3
if digb < 0:
digb = digb + 9
digc = str(digb)
answ += digc
ctr += 1
if ctr == len(code):
ctr = 0
else:
answ += elem
return answ
#this function retrieves the date and time and returns it as a string.
def retrieve_time():
right_now = datetime.datetime.now().isoformat()
list = []
for i in right_now:
if i.isnumeric():
list.append(i)
tim = "".join(list)
return tim
#this is the main function. It encrypts or decrypts based on user input.
def main():
codin = retrieve_time()
quer = input("Please enter 1 to encrypt or 2 to decrypt: ")
if quer == "1":
print("Encryption:")
msgin = input("What message would you like to encrypt? ")
codin = retrieve_time()
encmsg = encrypt(msgin, codin)
codtot = "12252001" + codin
outfile = open("ShadowDataOut.txt", "w")
outfile.write(encmsg + "\n")
outfile.write(codtot + "\n")
outfile.close()
if quer == "2":
print("Decryption:")
infile = open("ShadowDataOut.txt", "r")
aline = infile.readline()
recmsg = aline[0:-1]
aline = infile.readline()
rectim = aline[8:-1]
infile.close()
dcpmsg = decrypt(recmsg, rectim)
print("The translation is:", dcpmsg)
## THE GHOST OF THE SHADOW ##
if __name__ == "__main__":
main() |
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def shuffle(self, nums: List[int], n: int) -> List[int]:
return [
(nums[i // 2] if i % 2 == 0 else nums[i // 2 + n]) for i in range(2 * n)
]
if __name__ == "__main__":
solution = Solution()
assert [2, 3, 5, 4, 1, 7] == solution.shuffle([2, 5, 1, 3, 4, 7], 3)
assert [1, 4, 2, 3, 3, 2, 4, 1] == solution.shuffle([1, 2, 3, 4, 4, 3, 2, 1], 4)
assert [1, 2, 1, 2] == solution.shuffle([1, 1, 2, 2], 2)
|
# -*- coding: utf-8 -*-
import pytest
from django.template import Template, Context
from chloroform.models import Configuration
@pytest.mark.django_db
def test_chloroform_ttag():
Configuration.objects.create(name='default')
t = Template('{% load chloroform %}{% chloroform %}')
rendered = t.render(Context())
assert 'form' in rendered
assert 'chloroform-default' in rendered
@pytest.mark.django_db
def test_chloroform_ttag_alternative():
Configuration.objects.create(name='alternative')
t = Template('{% load chloroform %}{% chloroform "alternative" %}')
rendered = t.render(Context())
assert 'form' in rendered
assert 'chloroform-alternative' in rendered
|
from Tools.Utilities.ObjectCounter import objectCounterUi as oCounter
reload(oCounter)
oCounter.create()
|
from .direct import DirectMethod
from .wilson import WilsonMethod
from .eratosthene import EratostheneMethod
|
def merge(parents, ranks, src, dst):
src_parent = get_parent(parents, src)
dst_parent = get_parent(parents, dst)
if src_parent == dst_parent:
return False
if ranks[src_parent] > ranks[dst_parent]:
parents[dst_parent] = src_parent
else:
parents[src_parent] = dst_parent
if ranks[src_parent] == ranks[dst_parent]:
ranks[dst_parent] = ranks[dst_parent] + 1
return True
def merge_1(parents, ranks, src, dst):
src_parent = get_parent_1(parents, src)
dst_parent = get_parent_1(parents, dst)
if src_parent == dst_parent:
return False
if ranks[src_parent] > ranks[dst_parent]:
parents[dst_parent] = src_parent
else:
parents[src_parent] = dst_parent
if ranks[src_parent] == ranks[dst_parent]:
ranks[dst_parent] = ranks[dst_parent] + 1
return True
def get_parent(parents, i):
if parents[i] != i:
parents[i] = get_parent(parents, parents[i])
return parents[i]
def get_parent_1(parents, i):
if parents[i] != i:
return get_parent(parents, parents[i])
return parents[i]
def test():
parents = [0] * 61
ranks = [0] * 61
for i in range(1, 61):
parents[i] = i
ranks[i] = 0
for i in range(1, 31):
merge(parents, ranks, i, 2*i)
for i in range(1, 21):
merge(parents, ranks, i, 3*i)
for i in range(1, 13):
merge(parents, ranks, i, 5*i)
for i in range(1, 61):
get_parent(parents, i)
set_parents = set(parents)
set_parents.remove(0)
for i in set_parents:
print(ranks[i])
def test1():
parents = [0] * 13
ranks = [0] * 13
for i in range(1, 13):
parents[i] = i
ranks[i] = 0
merge_1(parents, ranks, 2, 10)
merge_1(parents, ranks, 7, 5)
merge_1(parents, ranks, 6, 1)
merge_1(parents, ranks, 3, 4)
merge_1(parents, ranks, 5, 11)
merge_1(parents, ranks, 7, 8)
merge_1(parents, ranks, 7, 3)
merge_1(parents, ranks, 12, 2)
merge_1(parents, ranks, 9, 6)
set_parents = set(parents)
set_parents.remove(0)
for i in set_parents:
print(ranks[i])
def test2(n):
parents = [0] * n
ranks = [0] * n
for i in range(n):
parents[i] = i
ranks[i] = 0
for i in range(n-1):
merge(parents, ranks, i, i+1)
set_parents = set(parents)
print(*ranks)
print(*parents)
for i in set_parents:
print(i, ranks[i])
if __name__ == "__main__":
test() |
import cv2
import numpy as np
img1 = np.zeros((277, 576, 3), np.uint8)
img1 = cv2.rectangle(img1, (238, 0), (338, 100), (255, 255, 255), -1)
img2 = cv2.imread('image_1.jpg')
bit_and = cv2.bitwise_and(img2, img1)
bit_or = cv2.bitwise_or(img2, img1)
bit_xor = cv2.bitwise_xor(img2, img1)
bit_not1 = cv2.bitwise_not(img1)
bit_not2 = cv2.bitwise_not(img2)
cv2.imshow("img1", img1)
cv2.imshow("img2", img2)
cv2.imshow("bit_and", bit_and)
cv2.imshow("bit_or", bit_or)
cv2.imshow("bit_xor", bit_xor)
cv2.imshow("bit_not1", bit_not1)
cv2.imshow("bit_not2", bit_not2)
cv2.waitKey(0)
cv2.destroyAllWindows() |
import os, re
from twisted.python.filepath import FilePath
from twisted.internet.inotify import IN_MODIFY, IN_CREATE, INotify
from twisted.internet import reactor
import gallery
WATCH_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
WATCH_DIR_LEN = len(WATCH_DIRECTORY)
galleries = {}
call_ids = {}
CALL_DELAY = 30
"""Seconds to wait before creating gallery."""
THUMBNAIL_PREFIX = 't_'
THUMBNAILS_DIR = 't'
OUTPUT_FILE = 'index.md'
IGNORE_LIST = (THUMBNAILS_DIR, THUMBNAIL_PREFIX, OUTPUT_FILE)
"""Do not trigger the if file is likely to be created by the process."""
def process_handler(gallery_root):
if gallery_root not in galleries:
galleries[gallery_root] = gallery.Gallery(gallery_root)
return galleries[gallery_root].process_gallery(
WATCH_DIRECTORY, WATCH_DIR_LEN)
def find_root(pathname, dir_len=WATCH_DIR_LEN):
head, tail = os.path.split(pathname)
oldhead = head
while head and tail and dir_len != len(head):
oldhead = head
head, tail = os.path.split(head)
return oldhead
def greed_controller(pathname):
print 'WORKING:', pathname
process_handler(pathname)
print 'DONE:', pathname
def created(ignored, path, mask):
basename = path.basename()
if re.match(r'^\.|^t$|^t_|^index\.md$', basename):
return
pathname = path.path
pathname = find_root(pathname)
if pathname not in call_ids or not call_ids[pathname].active():
print 'SET:', pathname
call_ids[pathname] = reactor.callLater(CALL_DELAY, greed_controller, pathname)
else:
print 'RESET:', pathname
call_ids[pathname].reset(CALL_DELAY)
notifier = INotify()
notifier.watch(
FilePath(WATCH_DIRECTORY),
mask=IN_MODIFY|IN_CREATE,
callbacks=[created],
autoAdd=True,
recursive=True
)
notifier.startReading()
reactor.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.